Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include <linux/bpf_trace.h>
   5#include <linux/net/intel/libie/rx.h>
   6#include <linux/prefetch.h>
   7#include <linux/sctp.h>
   8#include <net/mpls.h>
   9#include <net/xdp.h>
  10#include "i40e_txrx_common.h"
  11#include "i40e_trace.h"
  12#include "i40e_xsk.h"
  13
  14#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
  15/**
  16 * i40e_fdir - Generate a Flow Director descriptor based on fdata
  17 * @tx_ring: Tx ring to send buffer on
  18 * @fdata: Flow director filter data
  19 * @add: Indicate if we are adding a rule or deleting one
  20 *
  21 **/
  22static void i40e_fdir(struct i40e_ring *tx_ring,
  23		      struct i40e_fdir_filter *fdata, bool add)
  24{
  25	struct i40e_filter_program_desc *fdir_desc;
  26	struct i40e_pf *pf = tx_ring->vsi->back;
  27	u32 flex_ptype, dtype_cmd, vsi_id;
  28	u16 i;
  29
  30	/* grab the next descriptor */
  31	i = tx_ring->next_to_use;
  32	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  33
  34	i++;
  35	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  36
  37	flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
  38
  39	flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
  40				 fdata->flex_off);
  41
  42	flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
  43
  44	/* Use LAN VSI Id if not programmed by user */
  45	vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id;
  46	flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id);
  47
  48	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
  49
  50	dtype_cmd |= add ?
  51		     I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
  52		     I40E_TXD_FLTR_QW1_PCMD_SHIFT :
  53		     I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
  54		     I40E_TXD_FLTR_QW1_PCMD_SHIFT;
  55
  56	dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
  57
  58	dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
  59				fdata->fd_status);
  60
  61	if (fdata->cnt_index) {
  62		dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
  63		dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
  64					fdata->cnt_index);
  65	}
  66
  67	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
  68	fdir_desc->rsvd = cpu_to_le32(0);
  69	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
  70	fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
 
 
 
 
  71}
  72
  73#define I40E_FD_CLEAN_DELAY 10
  74/**
  75 * i40e_program_fdir_filter - Program a Flow Director filter
  76 * @fdir_data: Packet data that will be filter parameters
  77 * @raw_packet: the pre-allocated packet buffer for FDir
  78 * @pf: The PF pointer
  79 * @add: True for add/update, False for remove
  80 **/
  81static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
  82				    u8 *raw_packet, struct i40e_pf *pf,
  83				    bool add)
  84{
  85	struct i40e_tx_buffer *tx_buf, *first;
 
  86	struct i40e_tx_desc *tx_desc;
  87	struct i40e_ring *tx_ring;
 
  88	struct i40e_vsi *vsi;
  89	struct device *dev;
  90	dma_addr_t dma;
  91	u32 td_cmd = 0;
  92	u16 i;
  93
  94	/* find existing FDIR VSI */
  95	vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
 
 
 
  96	if (!vsi)
  97		return -ENOENT;
  98
  99	tx_ring = vsi->tx_rings[0];
 100	dev = tx_ring->dev;
 101
 102	/* we need two descriptors to add/del a filter and we can wait */
 103	for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
 104		if (!i)
 105			return -EAGAIN;
 106		msleep_interruptible(1);
 107	}
 108
 109	dma = dma_map_single(dev, raw_packet,
 110			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
 111	if (dma_mapping_error(dev, dma))
 112		goto dma_fail;
 113
 114	/* grab the next descriptor */
 115	i = tx_ring->next_to_use;
 116	first = &tx_ring->tx_bi[i];
 117	i40e_fdir(tx_ring, fdir_data, add);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119	/* Now program a dummy descriptor */
 120	i = tx_ring->next_to_use;
 121	tx_desc = I40E_TX_DESC(tx_ring, i);
 122	tx_buf = &tx_ring->tx_bi[i];
 123
 124	tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
 125
 126	memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
 127
 128	/* record length, and DMA address */
 129	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
 130	dma_unmap_addr_set(tx_buf, dma, dma);
 131
 132	tx_desc->buffer_addr = cpu_to_le64(dma);
 133	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 134
 135	tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
 136	tx_buf->raw_buf = (void *)raw_packet;
 137
 138	tx_desc->cmd_type_offset_bsz =
 139		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 140
 
 
 
 141	/* Force memory writes to complete before letting h/w
 142	 * know there are new descriptors to fetch.
 
 
 143	 */
 144	wmb();
 145
 146	/* Mark the data descriptor to be watched */
 147	first->next_to_watch = tx_desc;
 148
 149	writel(tx_ring->next_to_use, tx_ring->tail);
 150	return 0;
 151
 152dma_fail:
 153	return -1;
 154}
 155
 
 
 156/**
 157 * i40e_create_dummy_packet - Constructs dummy packet for HW
 158 * @dummy_packet: preallocated space for dummy packet
 159 * @ipv4: is layer 3 packet of version 4 or 6
 160 * @l4proto: next level protocol used in data portion of l3
 161 * @data: filter data
 162 *
 163 * Returns address of layer 4 protocol dummy packet.
 164 **/
 165static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
 166				      struct i40e_fdir_filter *data)
 167{
 168	bool is_vlan = !!data->vlan_tag;
 169	struct vlan_hdr vlan = {};
 170	struct ipv6hdr ipv6 = {};
 171	struct ethhdr eth = {};
 172	struct iphdr ip = {};
 173	u8 *tmp;
 174
 175	if (ipv4) {
 176		eth.h_proto = cpu_to_be16(ETH_P_IP);
 177		ip.protocol = l4proto;
 178		ip.version = 0x4;
 179		ip.ihl = 0x5;
 180
 181		ip.daddr = data->dst_ip;
 182		ip.saddr = data->src_ip;
 183	} else {
 184		eth.h_proto = cpu_to_be16(ETH_P_IPV6);
 185		ipv6.nexthdr = l4proto;
 186		ipv6.version = 0x6;
 187
 188		memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
 189		       sizeof(__be32) * 4);
 190		memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
 191		       sizeof(__be32) * 4);
 192	}
 193
 194	if (is_vlan) {
 195		vlan.h_vlan_TCI = data->vlan_tag;
 196		vlan.h_vlan_encapsulated_proto = eth.h_proto;
 197		eth.h_proto = data->vlan_etype;
 198	}
 199
 200	tmp = dummy_packet;
 201	memcpy(tmp, &eth, sizeof(eth));
 202	tmp += sizeof(eth);
 203
 204	if (is_vlan) {
 205		memcpy(tmp, &vlan, sizeof(vlan));
 206		tmp += sizeof(vlan);
 207	}
 208
 209	if (ipv4) {
 210		memcpy(tmp, &ip, sizeof(ip));
 211		tmp += sizeof(ip);
 212	} else {
 213		memcpy(tmp, &ipv6, sizeof(ipv6));
 214		tmp += sizeof(ipv6);
 215	}
 216
 217	return tmp;
 218}
 219
 220/**
 221 * i40e_create_dummy_udp_packet - helper function to create UDP packet
 222 * @raw_packet: preallocated space for dummy packet
 223 * @ipv4: is layer 3 packet of version 4 or 6
 224 * @l4proto: next level protocol used in data portion of l3
 225 * @data: filter data
 226 *
 227 * Helper function to populate udp fields.
 228 **/
 229static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
 230					 struct i40e_fdir_filter *data)
 231{
 232	struct udphdr *udp;
 233	u8 *tmp;
 234
 235	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
 236	udp = (struct udphdr *)(tmp);
 237	udp->dest = data->dst_port;
 238	udp->source = data->src_port;
 239}
 240
 241/**
 242 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
 243 * @raw_packet: preallocated space for dummy packet
 244 * @ipv4: is layer 3 packet of version 4 or 6
 245 * @l4proto: next level protocol used in data portion of l3
 246 * @data: filter data
 247 *
 248 * Helper function to populate tcp fields.
 249 **/
 250static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
 251					 struct i40e_fdir_filter *data)
 252{
 253	struct tcphdr *tcp;
 254	u8 *tmp;
 255	/* Dummy tcp packet */
 256	static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 257		0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
 258
 259	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
 260
 261	tcp = (struct tcphdr *)tmp;
 262	memcpy(tcp, tcp_packet, sizeof(tcp_packet));
 263	tcp->dest = data->dst_port;
 264	tcp->source = data->src_port;
 265}
 266
 267/**
 268 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
 269 * @raw_packet: preallocated space for dummy packet
 270 * @ipv4: is layer 3 packet of version 4 or 6
 271 * @l4proto: next level protocol used in data portion of l3
 272 * @data: filter data
 273 *
 274 * Helper function to populate sctp fields.
 275 **/
 276static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
 277					  u8 l4proto,
 278					  struct i40e_fdir_filter *data)
 279{
 280	struct sctphdr *sctp;
 281	u8 *tmp;
 282
 283	tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
 284
 285	sctp = (struct sctphdr *)tmp;
 286	sctp->dest = data->dst_port;
 287	sctp->source = data->src_port;
 288}
 289
 290/**
 291 * i40e_prepare_fdir_filter - Prepare and program fdir filter
 292 * @pf: physical function to attach filter to
 293 * @fd_data: filter data
 294 * @add: add or delete filter
 295 * @packet_addr: address of dummy packet, used in filtering
 296 * @payload_offset: offset from dummy packet address to user defined data
 297 * @pctype: Packet type for which filter is used
 298 *
 299 * Helper function to offset data of dummy packet, program it and
 300 * handle errors.
 301 **/
 302static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
 303				    struct i40e_fdir_filter *fd_data,
 304				    bool add, char *packet_addr,
 305				    int payload_offset, u8 pctype)
 306{
 307	int ret;
 308
 309	if (fd_data->flex_filter) {
 310		u8 *payload;
 311		__be16 pattern = fd_data->flex_word;
 312		u16 off = fd_data->flex_offset;
 313
 314		payload = packet_addr + payload_offset;
 315
 316		/* If user provided vlan, offset payload by vlan header length */
 317		if (!!fd_data->vlan_tag)
 318			payload += VLAN_HLEN;
 319
 320		*((__force __be16 *)(payload + off)) = pattern;
 321	}
 322
 323	fd_data->pctype = pctype;
 324	ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
 325	if (ret) {
 326		dev_info(&pf->pdev->dev,
 327			 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
 328			 fd_data->pctype, fd_data->fd_id, ret);
 329		/* Free the packet buffer since it wasn't added to the ring */
 330		return -EOPNOTSUPP;
 331	} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
 332		if (add)
 333			dev_info(&pf->pdev->dev,
 334				 "Filter OK for PCTYPE %d loc = %d\n",
 335				 fd_data->pctype, fd_data->fd_id);
 336		else
 337			dev_info(&pf->pdev->dev,
 338				 "Filter deleted for PCTYPE %d loc = %d\n",
 339				 fd_data->pctype, fd_data->fd_id);
 340	}
 341
 342	return ret;
 343}
 344
 345/**
 346 * i40e_change_filter_num - Prepare and program fdir filter
 347 * @ipv4: is layer 3 packet of version 4 or 6
 348 * @add: add or delete filter
 349 * @ipv4_filter_num: field to update
 350 * @ipv6_filter_num: field to update
 351 *
 352 * Update filter number field for pf.
 353 **/
 354static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
 355				   u16 *ipv6_filter_num)
 356{
 357	if (add) {
 358		if (ipv4)
 359			(*ipv4_filter_num)++;
 360		else
 361			(*ipv6_filter_num)++;
 362	} else {
 363		if (ipv4)
 364			(*ipv4_filter_num)--;
 365		else
 366			(*ipv6_filter_num)--;
 367	}
 368}
 369
 370#define I40E_UDPIP_DUMMY_PACKET_LEN	42
 371#define I40E_UDPIP6_DUMMY_PACKET_LEN	62
 372/**
 373 * i40e_add_del_fdir_udp - Add/Remove UDP filters
 374 * @vsi: pointer to the targeted VSI
 375 * @fd_data: the flow director data required for the FDir descriptor
 
 376 * @add: true adds a filter, false removes it
 377 * @ipv4: true is v4, false is v6
 378 *
 379 * Returns 0 if the filters were successfully added or removed
 380 **/
 381static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
 382				 struct i40e_fdir_filter *fd_data,
 383				 bool add,
 384				 bool ipv4)
 385{
 386	struct i40e_pf *pf = vsi->back;
 387	u8 *raw_packet;
 
 
 388	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389
 390	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
 391	if (!raw_packet)
 392		return -ENOMEM;
 393
 394	i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
 395
 396	if (ipv4)
 397		ret = i40e_prepare_fdir_filter
 398			(pf, fd_data, add, raw_packet,
 399			 I40E_UDPIP_DUMMY_PACKET_LEN,
 400			 I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
 401	else
 402		ret = i40e_prepare_fdir_filter
 403			(pf, fd_data, add, raw_packet,
 404			 I40E_UDPIP6_DUMMY_PACKET_LEN,
 405			 I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
 406
 407	if (ret) {
 408		kfree(raw_packet);
 409		return ret;
 410	}
 411
 412	i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
 413			       &pf->fd_udp6_filter_cnt);
 414
 415	return 0;
 416}
 417
 418#define I40E_TCPIP_DUMMY_PACKET_LEN	54
 419#define I40E_TCPIP6_DUMMY_PACKET_LEN	74
 420/**
 421 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
 422 * @vsi: pointer to the targeted VSI
 423 * @fd_data: the flow director data required for the FDir descriptor
 
 424 * @add: true adds a filter, false removes it
 425 * @ipv4: true is v4, false is v6
 426 *
 427 * Returns 0 if the filters were successfully added or removed
 428 **/
 429static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
 430				 struct i40e_fdir_filter *fd_data,
 431				 bool add,
 432				 bool ipv4)
 433{
 434	struct i40e_pf *pf = vsi->back;
 435	u8 *raw_packet;
 
 
 436	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
 439	if (!raw_packet)
 440		return -ENOMEM;
 
 
 
 441
 442	i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
 443	if (ipv4)
 444		ret = i40e_prepare_fdir_filter
 445			(pf, fd_data, add, raw_packet,
 446			 I40E_TCPIP_DUMMY_PACKET_LEN,
 447			 I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 448	else
 449		ret = i40e_prepare_fdir_filter
 450			(pf, fd_data, add, raw_packet,
 451			 I40E_TCPIP6_DUMMY_PACKET_LEN,
 452			 I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 453
 454	if (ret) {
 455		kfree(raw_packet);
 456		return ret;
 
 
 
 
 
 457	}
 458
 459	i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
 460			       &pf->fd_tcp6_filter_cnt);
 461
 462	if (add) {
 463		if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
 464		    I40E_DEBUG_FD & pf->hw.debug_mask)
 465			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
 466		set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
 
 
 
 
 467	}
 468	return 0;
 
 469}
 470
 471#define I40E_SCTPIP_DUMMY_PACKET_LEN	46
 472#define I40E_SCTPIP6_DUMMY_PACKET_LEN	66
 473/**
 474 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
 475 * a specific flow spec
 476 * @vsi: pointer to the targeted VSI
 477 * @fd_data: the flow director data required for the FDir descriptor
 
 478 * @add: true adds a filter, false removes it
 479 * @ipv4: true is v4, false is v6
 480 *
 481 * Returns 0 if the filters were successfully added or removed
 482 **/
 483static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
 484				  struct i40e_fdir_filter *fd_data,
 485				  bool add,
 486				  bool ipv4)
 487{
 488	struct i40e_pf *pf = vsi->back;
 489	u8 *raw_packet;
 490	int ret;
 491
 492	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
 493	if (!raw_packet)
 494		return -ENOMEM;
 495
 496	i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
 497
 498	if (ipv4)
 499		ret = i40e_prepare_fdir_filter
 500			(pf, fd_data, add, raw_packet,
 501			 I40E_SCTPIP_DUMMY_PACKET_LEN,
 502			 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
 503	else
 504		ret = i40e_prepare_fdir_filter
 505			(pf, fd_data, add, raw_packet,
 506			 I40E_SCTPIP6_DUMMY_PACKET_LEN,
 507			 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
 508
 509	if (ret) {
 510		kfree(raw_packet);
 511		return ret;
 512	}
 513
 514	i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
 515			       &pf->fd_sctp6_filter_cnt);
 516
 517	return 0;
 518}
 519
 520#define I40E_IP_DUMMY_PACKET_LEN	34
 521#define I40E_IP6_DUMMY_PACKET_LEN	54
 522/**
 523 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
 524 * a specific flow spec
 525 * @vsi: pointer to the targeted VSI
 526 * @fd_data: the flow director data required for the FDir descriptor
 
 527 * @add: true adds a filter, false removes it
 528 * @ipv4: true is v4, false is v6
 529 *
 530 * Returns 0 if the filters were successfully added or removed
 531 **/
 532static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
 533				struct i40e_fdir_filter *fd_data,
 534				bool add,
 535				bool ipv4)
 536{
 537	struct i40e_pf *pf = vsi->back;
 538	int payload_offset;
 539	u8 *raw_packet;
 540	int iter_start;
 541	int iter_end;
 542	int ret;
 543	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544
 545	if (ipv4) {
 546		iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 547		iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
 548	} else {
 549		iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
 550		iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
 551	}
 552
 553	for (i = iter_start; i <= iter_end; i++) {
 554		raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
 555		if (!raw_packet)
 556			return -ENOMEM;
 557
 558		/* IPv6 no header option differs from IPv4 */
 559		(void)i40e_create_dummy_packet
 560			(raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
 561			 fd_data);
 562
 563		payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
 564			I40E_IP6_DUMMY_PACKET_LEN;
 565		ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
 566					       payload_offset, i);
 567		if (ret)
 568			goto err;
 569	}
 570
 571	i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
 572			       &pf->fd_ip6_filter_cnt);
 573
 574	return 0;
 575err:
 576	kfree(raw_packet);
 577	return ret;
 578}
 579
 580/**
 581 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
 582 * @vsi: pointer to the targeted VSI
 583 * @input: filter to add or delete
 584 * @add: true adds a filter, false removes it
 585 *
 586 **/
 587int i40e_add_del_fdir(struct i40e_vsi *vsi,
 588		      struct i40e_fdir_filter *input, bool add)
 589{
 590	enum ip_ver { ipv6 = 0, ipv4 = 1 };
 591	struct i40e_pf *pf = vsi->back;
 
 592	int ret;
 593
 
 
 
 
 
 
 
 594	switch (input->flow_type & ~FLOW_EXT) {
 595	case TCP_V4_FLOW:
 596		ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
 
 597		break;
 598	case UDP_V4_FLOW:
 599		ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
 
 600		break;
 601	case SCTP_V4_FLOW:
 602		ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
 603		break;
 604	case TCP_V6_FLOW:
 605		ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
 606		break;
 607	case UDP_V6_FLOW:
 608		ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
 609		break;
 610	case SCTP_V6_FLOW:
 611		ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
 
 612		break;
 613	case IP_USER_FLOW:
 614		switch (input->ipl4_proto) {
 615		case IPPROTO_TCP:
 616			ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
 
 617			break;
 618		case IPPROTO_UDP:
 619			ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
 
 620			break;
 621		case IPPROTO_SCTP:
 622			ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
 623			break;
 624		case IPPROTO_IP:
 625			ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
 626			break;
 627		default:
 628			/* We cannot support masking based on protocol */
 629			dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
 630				 input->ipl4_proto);
 631			return -EINVAL;
 632		}
 633		break;
 634	case IPV6_USER_FLOW:
 635		switch (input->ipl4_proto) {
 636		case IPPROTO_TCP:
 637			ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
 638			break;
 639		case IPPROTO_UDP:
 640			ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
 641			break;
 642		case IPPROTO_SCTP:
 643			ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
 644			break;
 645		case IPPROTO_IP:
 646			ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
 647			break;
 648		default:
 649			/* We cannot support masking based on protocol */
 650			dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
 651				 input->ipl4_proto);
 652			return -EINVAL;
 653		}
 654		break;
 655	default:
 656		dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
 657			 input->flow_type);
 658		return -EINVAL;
 659	}
 660
 661	/* The buffer allocated here will be normally be freed by
 662	 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
 663	 * completion. In the event of an error adding the buffer to the FDIR
 664	 * ring, it will immediately be freed. It may also be freed by
 665	 * i40e_clean_tx_ring() when closing the VSI.
 666	 */
 667	return ret;
 668}
 669
 670/**
 671 * i40e_fd_handle_status - check the Programming Status for FD
 672 * @rx_ring: the Rx ring for this descriptor
 673 * @qword0_raw: qword0
 674 * @qword1: qword1 after le_to_cpu
 675 * @prog_id: the id originally used for programming
 676 *
 677 * This is used to verify if the FD programming or invalidation
 678 * requested by SW to the HW is successful or not and take actions accordingly.
 679 **/
 680static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
 681				  u64 qword1, u8 prog_id)
 682{
 683	struct i40e_pf *pf = rx_ring->vsi->back;
 684	struct pci_dev *pdev = pf->pdev;
 685	struct i40e_16b_rx_wb_qw0 *qw0;
 686	u32 fcnt_prog, fcnt_avail;
 687	u32 error;
 
 688
 689	qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
 690	error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
 691
 692	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 693		pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
 694		if (qw0->hi_dword.fd_id != 0 ||
 695		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 696			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
 697				 pf->fd_inv);
 698
 699		/* Check if the programming error is for ATR.
 700		 * If so, auto disable ATR and set a state for
 701		 * flush in progress. Next time we come here if flush is in
 702		 * progress do nothing, once flush is complete the state will
 703		 * be cleared.
 704		 */
 705		if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
 706			return;
 707
 708		pf->fd_add_err++;
 709		/* store the current atr filter count */
 710		pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
 711
 712		if (qw0->hi_dword.fd_id == 0 &&
 713		    test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
 714			/* These set_bit() calls aren't atomic with the
 715			 * test_bit() here, but worse case we potentially
 716			 * disable ATR and queue a flush right after SB
 717			 * support is re-enabled. That shouldn't cause an
 718			 * issue in practice
 719			 */
 720			set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
 721			set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
 722		}
 723
 724		/* filter programming failed most likely due to table full */
 725		fcnt_prog = i40e_get_global_fd_count(pf);
 726		fcnt_avail = pf->fdir_pf_filter_count;
 
 
 727		/* If ATR is running fcnt_prog can quickly change,
 728		 * if we are very close to full, it makes sense to disable
 729		 * FD ATR/SB and then re-enable it when there is room.
 730		 */
 731		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
 732			if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
 733			    !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
 734					      pf->state))
 735				if (I40E_DEBUG_FD & pf->hw.debug_mask)
 736					dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
 
 
 
 
 
 
 
 
 
 
 
 737		}
 738	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 
 739		if (I40E_DEBUG_FD & pf->hw.debug_mask)
 740			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
 741				 qw0->hi_dword.fd_id);
 742	}
 743}
 744
 745/**
 746 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
 747 * @ring:      the ring that owns the buffer
 748 * @tx_buffer: the buffer to free
 749 **/
 750static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
 751					    struct i40e_tx_buffer *tx_buffer)
 752{
 753	if (tx_buffer->skb) {
 754		if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
 755			kfree(tx_buffer->raw_buf);
 756		else if (ring_is_xdp(ring))
 757			xdp_return_frame(tx_buffer->xdpf);
 758		else
 759			dev_kfree_skb_any(tx_buffer->skb);
 760		if (dma_unmap_len(tx_buffer, len))
 761			dma_unmap_single(ring->dev,
 762					 dma_unmap_addr(tx_buffer, dma),
 763					 dma_unmap_len(tx_buffer, len),
 764					 DMA_TO_DEVICE);
 765	} else if (dma_unmap_len(tx_buffer, len)) {
 766		dma_unmap_page(ring->dev,
 767			       dma_unmap_addr(tx_buffer, dma),
 768			       dma_unmap_len(tx_buffer, len),
 769			       DMA_TO_DEVICE);
 770	}
 771
 772	tx_buffer->next_to_watch = NULL;
 773	tx_buffer->skb = NULL;
 774	dma_unmap_len_set(tx_buffer, len, 0);
 775	/* tx_buffer must be completely set up in the transmit path */
 776}
 777
 778/**
 779 * i40e_clean_tx_ring - Free any empty Tx buffers
 780 * @tx_ring: ring to be cleaned
 781 **/
 782void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 783{
 784	unsigned long bi_size;
 785	u16 i;
 786
 787	if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
 788		i40e_xsk_clean_tx_ring(tx_ring);
 789	} else {
 790		/* ring already cleared, nothing to do */
 791		if (!tx_ring->tx_bi)
 792			return;
 793
 794		/* Free all the Tx ring sk_buffs */
 795		for (i = 0; i < tx_ring->count; i++)
 796			i40e_unmap_and_free_tx_resource(tx_ring,
 797							&tx_ring->tx_bi[i]);
 798	}
 799
 800	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 801	memset(tx_ring->tx_bi, 0, bi_size);
 802
 803	/* Zero out the descriptor ring */
 804	memset(tx_ring->desc, 0, tx_ring->size);
 805
 806	tx_ring->next_to_use = 0;
 807	tx_ring->next_to_clean = 0;
 808
 809	if (!tx_ring->netdev)
 810		return;
 811
 812	/* cleanup Tx queue statistics */
 813	netdev_tx_reset_queue(txring_txq(tx_ring));
 
 814}
 815
 816/**
 817 * i40e_free_tx_resources - Free Tx resources per queue
 818 * @tx_ring: Tx descriptor ring for a specific queue
 819 *
 820 * Free all transmit software resources
 821 **/
 822void i40e_free_tx_resources(struct i40e_ring *tx_ring)
 823{
 824	i40e_clean_tx_ring(tx_ring);
 825	kfree(tx_ring->tx_bi);
 826	tx_ring->tx_bi = NULL;
 827
 828	if (tx_ring->desc) {
 829		dma_free_coherent(tx_ring->dev, tx_ring->size,
 830				  tx_ring->desc, tx_ring->dma);
 831		tx_ring->desc = NULL;
 832	}
 833}
 834
 835/**
 836 * i40e_get_tx_pending - how many tx descriptors not processed
 837 * @ring: the ring of descriptors
 838 * @in_sw: use SW variables
 839 *
 840 * Since there is no access to the ring head register
 841 * in XL710, we need to use our local copies
 842 **/
 843u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
 
 
 
 
 
 
 
 
 
 
 
 
 844{
 845	u32 head, tail;
 
 846
 847	if (!in_sw) {
 848		head = i40e_get_head(ring);
 849		tail = readl(ring->tail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850	} else {
 851		head = ring->next_to_clean;
 852		tail = ring->next_to_use;
 
 853	}
 854
 855	if (head != tail)
 856		return (head < tail) ?
 857			tail - head : (tail + ring->count - head);
 858
 859	return 0;
 860}
 861
 862/**
 863 * i40e_detect_recover_hung - Function to detect and recover hung_queues
 864 * @pf: pointer to PF struct
 865 *
 866 * LAN VSI has netdev and netdev has TX queues. This function is to check
 867 * each of those TX queues if they are hung, trigger recovery by issuing
 868 * SW interrupt.
 869 **/
 870void i40e_detect_recover_hung(struct i40e_pf *pf)
 871{
 872	struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
 873	struct i40e_ring *tx_ring = NULL;
 874	struct net_device *netdev;
 875	unsigned int i;
 876	int packets;
 877
 878	if (!vsi)
 879		return;
 880
 881	if (test_bit(__I40E_VSI_DOWN, vsi->state))
 882		return;
 883
 884	netdev = vsi->netdev;
 885	if (!netdev)
 886		return;
 887
 888	if (!netif_carrier_ok(netdev))
 889		return;
 890
 891	for (i = 0; i < vsi->num_queue_pairs; i++) {
 892		tx_ring = vsi->tx_rings[i];
 893		if (tx_ring && tx_ring->desc) {
 894			/* If packet counter has not changed the queue is
 895			 * likely stalled, so force an interrupt for this
 896			 * queue.
 897			 *
 898			 * prev_pkt_ctr would be negative if there was no
 899			 * pending work.
 900			 */
 901			packets = tx_ring->stats.packets & INT_MAX;
 902			if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
 903				i40e_force_wb(vsi, tx_ring->q_vector);
 904				continue;
 905			}
 906
 907			/* Memory barrier between read of packet count and call
 908			 * to i40e_get_tx_pending()
 909			 */
 910			smp_rmb();
 911			tx_ring->tx_stats.prev_pkt_ctr =
 912			    i40e_get_tx_pending(tx_ring, true) ? packets : -1;
 913		}
 914	}
 915}
 916
 917/**
 918 * i40e_clean_tx_irq - Reclaim resources after transmit completes
 919 * @vsi: the VSI we care about
 920 * @tx_ring: Tx ring to clean
 921 * @napi_budget: Used to determine if we are in netpoll
 922 * @tx_cleaned: Out parameter set to the number of TXes cleaned
 923 *
 924 * Returns true if there's any budget left (e.g. the clean is finished)
 925 **/
 926static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
 927			      struct i40e_ring *tx_ring, int napi_budget,
 928			      unsigned int *tx_cleaned)
 929{
 930	int i = tx_ring->next_to_clean;
 931	struct i40e_tx_buffer *tx_buf;
 932	struct i40e_tx_desc *tx_head;
 933	struct i40e_tx_desc *tx_desc;
 934	unsigned int total_bytes = 0, total_packets = 0;
 935	unsigned int budget = vsi->work_limit;
 936
 937	tx_buf = &tx_ring->tx_bi[i];
 938	tx_desc = I40E_TX_DESC(tx_ring, i);
 939	i -= tx_ring->count;
 940
 941	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
 942
 943	do {
 944		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 945
 946		/* if next_to_watch is not set then there is no work pending */
 947		if (!eop_desc)
 948			break;
 949
 950		/* prevent any other reads prior to eop_desc */
 951		smp_rmb();
 952
 953		i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
 954		/* we have caught up to head, no work left to do */
 955		if (tx_head == tx_desc)
 956			break;
 957
 958		/* clear next_to_watch to prevent false hangs */
 959		tx_buf->next_to_watch = NULL;
 960
 961		/* update the statistics for this packet */
 962		total_bytes += tx_buf->bytecount;
 963		total_packets += tx_buf->gso_segs;
 964
 965		/* free the skb/XDP data */
 966		if (ring_is_xdp(tx_ring))
 967			xdp_return_frame(tx_buf->xdpf);
 968		else
 969			napi_consume_skb(tx_buf->skb, napi_budget);
 970
 971		/* unmap skb header data */
 972		dma_unmap_single(tx_ring->dev,
 973				 dma_unmap_addr(tx_buf, dma),
 974				 dma_unmap_len(tx_buf, len),
 975				 DMA_TO_DEVICE);
 976
 977		/* clear tx_buffer data */
 978		tx_buf->skb = NULL;
 979		dma_unmap_len_set(tx_buf, len, 0);
 980
 981		/* unmap remaining buffers */
 982		while (tx_desc != eop_desc) {
 983			i40e_trace(clean_tx_irq_unmap,
 984				   tx_ring, tx_desc, tx_buf);
 985
 986			tx_buf++;
 987			tx_desc++;
 988			i++;
 989			if (unlikely(!i)) {
 990				i -= tx_ring->count;
 991				tx_buf = tx_ring->tx_bi;
 992				tx_desc = I40E_TX_DESC(tx_ring, 0);
 993			}
 994
 995			/* unmap any remaining paged data */
 996			if (dma_unmap_len(tx_buf, len)) {
 997				dma_unmap_page(tx_ring->dev,
 998					       dma_unmap_addr(tx_buf, dma),
 999					       dma_unmap_len(tx_buf, len),
1000					       DMA_TO_DEVICE);
1001				dma_unmap_len_set(tx_buf, len, 0);
1002			}
1003		}
1004
1005		/* move us one more past the eop_desc for start of next pkt */
1006		tx_buf++;
1007		tx_desc++;
1008		i++;
1009		if (unlikely(!i)) {
1010			i -= tx_ring->count;
1011			tx_buf = tx_ring->tx_bi;
1012			tx_desc = I40E_TX_DESC(tx_ring, 0);
1013		}
1014
1015		prefetch(tx_desc);
1016
1017		/* update budget accounting */
1018		budget--;
1019	} while (likely(budget));
1020
1021	i += tx_ring->count;
1022	tx_ring->next_to_clean = i;
1023	i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1024	i40e_arm_wb(tx_ring, vsi, budget);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1025
1026	if (ring_is_xdp(tx_ring))
1027		return !!budget;
 
1028
1029	/* notify netdev of completed buffers */
1030	netdev_tx_completed_queue(txring_txq(tx_ring),
1031				  total_packets, total_bytes);
1032
1033#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
1034	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1035		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1036		/* Make sure that anybody stopping the queue after this
1037		 * sees the new next_to_clean.
1038		 */
1039		smp_mb();
1040		if (__netif_subqueue_stopped(tx_ring->netdev,
1041					     tx_ring->queue_index) &&
1042		   !test_bit(__I40E_VSI_DOWN, vsi->state)) {
1043			netif_wake_subqueue(tx_ring->netdev,
1044					    tx_ring->queue_index);
1045			++tx_ring->tx_stats.restart_queue;
1046		}
1047	}
1048
1049	*tx_cleaned = total_packets;
1050	return !!budget;
1051}
1052
1053/**
1054 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1055 * @vsi: the VSI we care about
1056 * @q_vector: the vector on which to enable writeback
1057 *
 
 
 
 
 
 
 
1058 **/
1059static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
1060				  struct i40e_q_vector *q_vector)
1061{
1062	u16 flags = q_vector->tx.ring[0].flags;
1063	u32 val;
1064
1065	if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
1066		return;
1067
1068	if (q_vector->arm_wb_state)
1069		return;
1070
1071	if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1072		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
1073		      I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
1074
1075		wr32(&vsi->back->hw,
1076		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1077		     val);
1078	} else {
1079		val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
1080		      I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
1081
1082		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1083	}
1084	q_vector->arm_wb_state = true;
1085}
1086
1087/**
1088 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1089 * @vsi: the VSI we care about
1090 * @q_vector: the vector  on which to force writeback
1091 *
1092 **/
1093void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1094{
1095	if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1096		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1097			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
1098			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1099			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
1100			  /* allow 00 to be written to the index */
1101
1102		wr32(&vsi->back->hw,
1103		     I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1104	} else {
1105		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1106			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1107			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1108			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1109			/* allow 00 to be written to the index */
1110
1111		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1112	}
1113}
1114
1115static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1116					struct i40e_ring_container *rc)
1117{
1118	return &q_vector->rx == rc;
1119}
1120
1121static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1122{
1123	unsigned int divisor;
1124
1125	switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1126	case I40E_LINK_SPEED_40GB:
1127		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1128		break;
1129	case I40E_LINK_SPEED_25GB:
1130	case I40E_LINK_SPEED_20GB:
1131		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1132		break;
1133	default:
1134	case I40E_LINK_SPEED_10GB:
1135		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1136		break;
1137	case I40E_LINK_SPEED_1GB:
1138	case I40E_LINK_SPEED_100MB:
1139		divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1140		break;
1141	}
1142
1143	return divisor;
1144}
1145
1146/**
1147 * i40e_update_itr - update the dynamic ITR value based on statistics
1148 * @q_vector: structure containing interrupt and ring information
1149 * @rc: structure containing ring performance data
1150 *
1151 * Stores a new ITR value based on packets and byte
1152 * counts during the last interrupt.  The advantage of per interrupt
1153 * computation is faster updates and more accurate ITR for the current
1154 * traffic pattern.  Constants in this function were computed
1155 * based on theoretical maximum wire speed and thresholds were set based
1156 * on testing data as well as attempting to minimize response time
1157 * while increasing bulk throughput.
1158 **/
1159static void i40e_update_itr(struct i40e_q_vector *q_vector,
1160			    struct i40e_ring_container *rc)
1161{
1162	unsigned int avg_wire_size, packets, bytes, itr;
1163	unsigned long next_update = jiffies;
1164
1165	/* If we don't have any rings just leave ourselves set for maximum
1166	 * possible latency so we take ourselves out of the equation.
1167	 */
1168	if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1169		return;
1170
1171	/* For Rx we want to push the delay up and default to low latency.
1172	 * for Tx we want to pull the delay down and default to high latency.
1173	 */
1174	itr = i40e_container_is_rx(q_vector, rc) ?
1175	      I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1176	      I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1177
1178	/* If we didn't update within up to 1 - 2 jiffies we can assume
1179	 * that either packets are coming in so slow there hasn't been
1180	 * any work, or that there is so much work that NAPI is dealing
1181	 * with interrupt moderation and we don't need to do anything.
1182	 */
1183	if (time_after(next_update, rc->next_update))
1184		goto clear_counts;
1185
1186	/* If itr_countdown is set it means we programmed an ITR within
1187	 * the last 4 interrupt cycles. This has a side effect of us
1188	 * potentially firing an early interrupt. In order to work around
1189	 * this we need to throw out any data received for a few
1190	 * interrupts following the update.
1191	 */
1192	if (q_vector->itr_countdown) {
1193		itr = rc->target_itr;
1194		goto clear_counts;
1195	}
1196
1197	packets = rc->total_packets;
1198	bytes = rc->total_bytes;
1199
1200	if (i40e_container_is_rx(q_vector, rc)) {
1201		/* If Rx there are 1 to 4 packets and bytes are less than
1202		 * 9000 assume insufficient data to use bulk rate limiting
1203		 * approach unless Tx is already in bulk rate limiting. We
1204		 * are likely latency driven.
1205		 */
1206		if (packets && packets < 4 && bytes < 9000 &&
1207		    (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1208			itr = I40E_ITR_ADAPTIVE_LATENCY;
1209			goto adjust_by_size;
1210		}
1211	} else if (packets < 4) {
1212		/* If we have Tx and Rx ITR maxed and Tx ITR is running in
1213		 * bulk mode and we are receiving 4 or fewer packets just
1214		 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1215		 * that the Rx can relax.
1216		 */
1217		if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1218		    (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1219		     I40E_ITR_ADAPTIVE_MAX_USECS)
1220			goto clear_counts;
1221	} else if (packets > 32) {
1222		/* If we have processed over 32 packets in a single interrupt
1223		 * for Tx assume we need to switch over to "bulk" mode.
1224		 */
1225		rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1226	}
1227
1228	/* We have no packets to actually measure against. This means
1229	 * either one of the other queues on this vector is active or
1230	 * we are a Tx queue doing TSO with too high of an interrupt rate.
1231	 *
1232	 * Between 4 and 56 we can assume that our current interrupt delay
1233	 * is only slightly too low. As such we should increase it by a small
1234	 * fixed amount.
1235	 */
1236	if (packets < 56) {
1237		itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1238		if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1239			itr &= I40E_ITR_ADAPTIVE_LATENCY;
1240			itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1241		}
1242		goto clear_counts;
1243	}
1244
1245	if (packets <= 256) {
1246		itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1247		itr &= I40E_ITR_MASK;
1248
1249		/* Between 56 and 112 is our "goldilocks" zone where we are
1250		 * working out "just right". Just report that our current
1251		 * ITR is good for us.
1252		 */
1253		if (packets <= 112)
1254			goto clear_counts;
1255
1256		/* If packet count is 128 or greater we are likely looking
1257		 * at a slight overrun of the delay we want. Try halving
1258		 * our delay to see if that will cut the number of packets
1259		 * in half per interrupt.
1260		 */
1261		itr /= 2;
1262		itr &= I40E_ITR_MASK;
1263		if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1264			itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1265
1266		goto clear_counts;
1267	}
1268
1269	/* The paths below assume we are dealing with a bulk ITR since
1270	 * number of packets is greater than 256. We are just going to have
1271	 * to compute a value and try to bring the count under control,
1272	 * though for smaller packet sizes there isn't much we can do as
1273	 * NAPI polling will likely be kicking in sooner rather than later.
1274	 */
1275	itr = I40E_ITR_ADAPTIVE_BULK;
1276
1277adjust_by_size:
1278	/* If packet counts are 256 or greater we can assume we have a gross
1279	 * overestimation of what the rate should be. Instead of trying to fine
1280	 * tune it just use the formula below to try and dial in an exact value
1281	 * give the current packet size of the frame.
1282	 */
1283	avg_wire_size = bytes / packets;
1284
1285	/* The following is a crude approximation of:
1286	 *  wmem_default / (size + overhead) = desired_pkts_per_int
1287	 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1288	 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1289	 *
1290	 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1291	 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1292	 * formula down to
1293	 *
1294	 *  (170 * (size + 24)) / (size + 640) = ITR
1295	 *
1296	 * We first do some math on the packet size and then finally bitshift
1297	 * by 8 after rounding up. We also have to account for PCIe link speed
1298	 * difference as ITR scales based on this.
1299	 */
1300	if (avg_wire_size <= 60) {
1301		/* Start at 250k ints/sec */
1302		avg_wire_size = 4096;
1303	} else if (avg_wire_size <= 380) {
1304		/* 250K ints/sec to 60K ints/sec */
1305		avg_wire_size *= 40;
1306		avg_wire_size += 1696;
1307	} else if (avg_wire_size <= 1084) {
1308		/* 60K ints/sec to 36K ints/sec */
1309		avg_wire_size *= 15;
1310		avg_wire_size += 11452;
1311	} else if (avg_wire_size <= 1980) {
1312		/* 36K ints/sec to 30K ints/sec */
1313		avg_wire_size *= 5;
1314		avg_wire_size += 22420;
1315	} else {
1316		/* plateau at a limit of 30K ints/sec */
1317		avg_wire_size = 32256;
1318	}
1319
1320	/* If we are in low latency mode halve our delay which doubles the
1321	 * rate to somewhere between 100K to 16K ints/sec
1322	 */
1323	if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1324		avg_wire_size /= 2;
1325
1326	/* Resultant value is 256 times larger than it needs to be. This
1327	 * gives us room to adjust the value as needed to either increase
1328	 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1329	 *
1330	 * Use addition as we have already recorded the new latency flag
1331	 * for the ITR value.
1332	 */
1333	itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1334	       I40E_ITR_ADAPTIVE_MIN_INC;
1335
1336	if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1337		itr &= I40E_ITR_ADAPTIVE_LATENCY;
1338		itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1339	}
1340
1341clear_counts:
1342	/* write back value */
1343	rc->target_itr = itr;
1344
1345	/* next update should occur within next jiffy */
1346	rc->next_update = next_update + 1;
1347
1348	rc->total_bytes = 0;
1349	rc->total_packets = 0;
1350}
1351
1352static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1353{
1354	return &rx_ring->rx_bi[idx];
1355}
1356
1357/**
1358 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1359 * @rx_ring: rx descriptor ring to store buffers on
1360 * @old_buff: donor buffer to have page reused
1361 *
1362 * Synchronizes page for reuse by the adapter
1363 **/
1364static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1365			       struct i40e_rx_buffer *old_buff)
1366{
1367	struct i40e_rx_buffer *new_buff;
1368	u16 nta = rx_ring->next_to_alloc;
1369
1370	new_buff = i40e_rx_bi(rx_ring, nta);
1371
1372	/* update, and store next to alloc */
1373	nta++;
1374	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1375
1376	/* transfer page from old buffer to new buffer */
1377	new_buff->dma		= old_buff->dma;
1378	new_buff->page		= old_buff->page;
1379	new_buff->page_offset	= old_buff->page_offset;
1380	new_buff->pagecnt_bias	= old_buff->pagecnt_bias;
1381
1382	/* clear contents of buffer_info */
1383	old_buff->page = NULL;
1384}
1385
1386/**
1387 * i40e_clean_programming_status - clean the programming status descriptor
1388 * @rx_ring: the rx ring that has this descriptor
1389 * @qword0_raw: qword0
1390 * @qword1: qword1 representing status_error_len in CPU ordering
1391 *
1392 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1393 * status being successful or not and take actions accordingly. FCoE should
1394 * handle its context/filter programming/invalidation status and take actions.
1395 *
1396 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1397 **/
1398void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1399				   u64 qword1)
1400{
 
1401	u8 id;
1402
1403	id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
 
 
1404
1405	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1406		i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1407}
1408
1409/**
1410 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1411 * @tx_ring: the tx ring to set up
1412 *
1413 * Return 0 on success, negative on error
1414 **/
1415int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1416{
1417	struct device *dev = tx_ring->dev;
1418	int bi_size;
1419
1420	if (!dev)
1421		return -ENOMEM;
1422
1423	/* warn if we are about to overwrite the pointer */
1424	WARN_ON(tx_ring->tx_bi);
1425	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1426	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1427	if (!tx_ring->tx_bi)
1428		goto err;
1429
1430	u64_stats_init(&tx_ring->syncp);
1431
1432	/* round up to nearest 4K */
1433	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1434	/* add u32 for head writeback, align after this takes care of
1435	 * guaranteeing this is at least one cache line in size
1436	 */
1437	tx_ring->size += sizeof(u32);
1438	tx_ring->size = ALIGN(tx_ring->size, 4096);
1439	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1440					   &tx_ring->dma, GFP_KERNEL);
1441	if (!tx_ring->desc) {
1442		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1443			 tx_ring->size);
1444		goto err;
1445	}
1446
1447	tx_ring->next_to_use = 0;
1448	tx_ring->next_to_clean = 0;
1449	tx_ring->tx_stats.prev_pkt_ctr = -1;
1450	return 0;
1451
1452err:
1453	kfree(tx_ring->tx_bi);
1454	tx_ring->tx_bi = NULL;
1455	return -ENOMEM;
1456}
1457
1458static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1459{
1460	memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1461}
1462
1463/**
1464 * i40e_clean_rx_ring - Free Rx buffers
1465 * @rx_ring: ring to be cleaned
1466 **/
1467void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1468{
 
 
 
1469	u16 i;
1470
1471	/* ring already cleared, nothing to do */
1472	if (!rx_ring->rx_bi)
1473		return;
1474
1475	if (rx_ring->xsk_pool) {
1476		i40e_xsk_clean_rx_ring(rx_ring);
1477		goto skip_free;
1478	}
1479
1480	/* Free all the Rx ring sk_buffs */
1481	for (i = 0; i < rx_ring->count; i++) {
1482		struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1483
1484		if (!rx_bi->page)
1485			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1486
1487		/* Invalidate cache lines that may have been written to by
1488		 * device so that we avoid corrupting memory.
1489		 */
1490		dma_sync_single_range_for_cpu(rx_ring->dev,
1491					      rx_bi->dma,
1492					      rx_bi->page_offset,
1493					      rx_ring->rx_buf_len,
1494					      DMA_FROM_DEVICE);
1495
1496		/* free resources associated with mapping */
1497		dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1498				     i40e_rx_pg_size(rx_ring),
1499				     DMA_FROM_DEVICE,
1500				     I40E_RX_DMA_ATTR);
1501
1502		__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1503
1504		rx_bi->page = NULL;
1505		rx_bi->page_offset = 0;
1506	}
1507
1508skip_free:
1509	if (rx_ring->xsk_pool)
1510		i40e_clear_rx_bi_zc(rx_ring);
1511	else
1512		i40e_clear_rx_bi(rx_ring);
1513
1514	/* Zero out the descriptor ring */
1515	memset(rx_ring->desc, 0, rx_ring->size);
1516
1517	rx_ring->next_to_alloc = 0;
1518	rx_ring->next_to_clean = 0;
1519	rx_ring->next_to_process = 0;
1520	rx_ring->next_to_use = 0;
1521}
1522
1523/**
1524 * i40e_free_rx_resources - Free Rx resources
1525 * @rx_ring: ring to clean the resources from
1526 *
1527 * Free all receive software resources
1528 **/
1529void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1530{
1531	i40e_clean_rx_ring(rx_ring);
1532	if (rx_ring->vsi->type == I40E_VSI_MAIN)
1533		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1534	rx_ring->xdp_prog = NULL;
1535	kfree(rx_ring->rx_bi);
1536	rx_ring->rx_bi = NULL;
1537
1538	if (rx_ring->desc) {
1539		dma_free_coherent(rx_ring->dev, rx_ring->size,
1540				  rx_ring->desc, rx_ring->dma);
1541		rx_ring->desc = NULL;
1542	}
1543}
1544
1545/**
1546 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1547 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1548 *
1549 * Returns 0 on success, negative on failure
1550 **/
1551int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1552{
1553	struct device *dev = rx_ring->dev;
 
1554
1555	u64_stats_init(&rx_ring->syncp);
 
 
 
1556
1557	/* Round up to nearest 4K */
1558	rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
 
 
1559	rx_ring->size = ALIGN(rx_ring->size, 4096);
1560	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1561					   &rx_ring->dma, GFP_KERNEL);
1562
1563	if (!rx_ring->desc) {
1564		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1565			 rx_ring->size);
1566		return -ENOMEM;
1567	}
1568
1569	rx_ring->next_to_alloc = 0;
1570	rx_ring->next_to_clean = 0;
1571	rx_ring->next_to_process = 0;
1572	rx_ring->next_to_use = 0;
1573
1574	rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1575
1576	rx_ring->rx_bi =
1577		kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
1578	if (!rx_ring->rx_bi)
1579		return -ENOMEM;
1580
1581	return 0;
 
 
 
 
1582}
1583
1584/**
1585 * i40e_release_rx_desc - Store the new tail and head values
1586 * @rx_ring: ring to bump
1587 * @val: new head index
1588 **/
1589void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1590{
1591	rx_ring->next_to_use = val;
1592
1593	/* update next to alloc since we have filled the ring */
1594	rx_ring->next_to_alloc = val;
1595
1596	/* Force memory writes to complete before letting h/w
1597	 * know there are new descriptors to fetch.  (Only
1598	 * applicable for weak-ordered memory model archs,
1599	 * such as IA-64).
1600	 */
1601	wmb();
1602	writel(val, rx_ring->tail);
1603}
1604
1605#if (PAGE_SIZE >= 8192)
1606static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1607					   unsigned int size)
1608{
1609	unsigned int truesize;
1610
1611	truesize = rx_ring->rx_offset ?
1612		SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
1613		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1614		SKB_DATA_ALIGN(size);
1615	return truesize;
1616}
1617#endif
1618
1619/**
1620 * i40e_alloc_mapped_page - recycle or make a new page
1621 * @rx_ring: ring to use
1622 * @bi: rx_buffer struct to modify
1623 *
1624 * Returns true if the page was successfully allocated or
1625 * reused.
1626 **/
1627static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1628				   struct i40e_rx_buffer *bi)
1629{
1630	struct page *page = bi->page;
1631	dma_addr_t dma;
1632
1633	/* since we are recycling buffers we should seldom need to alloc */
1634	if (likely(page)) {
1635		rx_ring->rx_stats.page_reuse_count++;
1636		return true;
1637	}
1638
1639	/* alloc new page for storage */
1640	page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1641	if (unlikely(!page)) {
1642		rx_ring->rx_stats.alloc_page_failed++;
1643		return false;
1644	}
1645
1646	rx_ring->rx_stats.page_alloc_count++;
1647
1648	/* map page for use */
1649	dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1650				 i40e_rx_pg_size(rx_ring),
1651				 DMA_FROM_DEVICE,
1652				 I40E_RX_DMA_ATTR);
1653
1654	/* if mapping failed free memory back to system since
1655	 * there isn't much point in holding memory we can't use
1656	 */
1657	if (dma_mapping_error(rx_ring->dev, dma)) {
1658		__free_pages(page, i40e_rx_pg_order(rx_ring));
1659		rx_ring->rx_stats.alloc_page_failed++;
1660		return false;
1661	}
1662
1663	bi->dma = dma;
1664	bi->page = page;
1665	bi->page_offset = rx_ring->rx_offset;
1666	page_ref_add(page, USHRT_MAX - 1);
1667	bi->pagecnt_bias = USHRT_MAX;
1668
1669	return true;
1670}
1671
1672/**
1673 * i40e_alloc_rx_buffers - Replace used receive buffers
1674 * @rx_ring: ring to place buffers on
1675 * @cleaned_count: number of buffers to replace
1676 *
1677 * Returns false if all allocations were successful, true if any fail
1678 **/
1679bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1680{
1681	u16 ntu = rx_ring->next_to_use;
1682	union i40e_rx_desc *rx_desc;
1683	struct i40e_rx_buffer *bi;
 
1684
1685	/* do nothing if no valid netdev defined */
1686	if (!rx_ring->netdev || !cleaned_count)
1687		return false;
1688
1689	rx_desc = I40E_RX_DESC(rx_ring, ntu);
1690	bi = i40e_rx_bi(rx_ring, ntu);
1691
1692	do {
1693		if (!i40e_alloc_mapped_page(rx_ring, bi))
1694			goto no_buffers;
 
 
 
 
 
 
 
 
 
 
 
 
 
1695
1696		/* sync the buffer for use by the device */
1697		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1698						 bi->page_offset,
1699						 rx_ring->rx_buf_len,
1700						 DMA_FROM_DEVICE);
1701
1702		/* Refresh the desc even if buffer_addrs didn't change
1703		 * because each write-back erases this info.
1704		 */
1705		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1706
1707		rx_desc++;
1708		bi++;
1709		ntu++;
1710		if (unlikely(ntu == rx_ring->count)) {
1711			rx_desc = I40E_RX_DESC(rx_ring, 0);
1712			bi = i40e_rx_bi(rx_ring, 0);
1713			ntu = 0;
1714		}
1715
1716		/* clear the status bits for the next_to_use descriptor */
1717		rx_desc->wb.qword1.status_error_len = 0;
1718
1719		cleaned_count--;
1720	} while (cleaned_count);
 
 
 
1721
1722	if (rx_ring->next_to_use != ntu)
1723		i40e_release_rx_desc(rx_ring, ntu);
 
 
 
 
 
 
 
 
 
 
 
 
 
1724
1725	return false;
 
 
 
 
 
 
 
 
 
 
 
 
1726
1727no_buffers:
1728	if (rx_ring->next_to_use != ntu)
1729		i40e_release_rx_desc(rx_ring, ntu);
 
1730
1731	/* make sure to come back via polling to try again after
1732	 * allocation failure
1733	 */
1734	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1735}
1736
1737/**
1738 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1739 * @vsi: the VSI we care about
1740 * @skb: skb currently being received and modified
1741 * @rx_desc: the receive descriptor
 
 
1742 **/
1743static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1744				    struct sk_buff *skb,
1745				    union i40e_rx_desc *rx_desc)
1746{
1747	struct libeth_rx_pt decoded;
1748	u32 rx_error, rx_status;
1749	bool ipv4, ipv6;
1750	u8 ptype;
1751	u64 qword;
 
 
 
 
 
 
1752
 
1753	skb->ip_summed = CHECKSUM_NONE;
1754
1755	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1756	ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1757
1758	decoded = libie_rx_pt_parse(ptype);
1759	if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
1760		return;
1761
1762	rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
1763	rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1764
1765	/* did the hardware decode the packet and checksum? */
1766	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1767		return;
1768
1769	ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
1770	ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
1771
1772	if (ipv4 &&
1773	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1774			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1775		goto checksum_fail;
1776
1777	/* likely incorrect csum if alternate IP extension headers found */
1778	if (ipv6 &&
1779	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1780		/* don't increment checksum err here, non-fatal err */
1781		return;
1782
1783	/* there was some L4 error, count error and punt packet to the stack */
1784	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1785		goto checksum_fail;
1786
1787	/* handle packets that were not able to be checksummed due
1788	 * to arrival speed, in this case the stack can compute
1789	 * the csum.
1790	 */
1791	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1792		return;
 
1793
1794	/* If there is an outer header present that might contain a checksum
1795	 * we need to bump the checksum level by 1 to reflect the fact that
1796	 * we are indicating we validated the inner checksum.
1797	 */
1798	if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
1799		skb->csum_level = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800
1801	skb->ip_summed = CHECKSUM_UNNECESSARY;
1802	return;
 
 
 
1803
1804checksum_fail:
1805	vsi->back->hw_csum_rx_error++;
1806}
1807
1808/**
1809 * i40e_rx_hash - set the hash value in the skb
1810 * @ring: descriptor ring
1811 * @rx_desc: specific descriptor
1812 * @skb: skb currently being received and modified
1813 * @rx_ptype: Rx packet type
1814 **/
1815static inline void i40e_rx_hash(struct i40e_ring *ring,
1816				union i40e_rx_desc *rx_desc,
1817				struct sk_buff *skb,
1818				u8 rx_ptype)
1819{
1820	struct libeth_rx_pt decoded;
1821	u32 hash;
1822	const __le64 rss_mask =
1823		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1824			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1825
1826	decoded = libie_rx_pt_parse(rx_ptype);
1827	if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
1828		return;
1829
1830	if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1831		hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1832		libeth_rx_pt_set_hash(skb, hash, decoded);
1833	}
1834}
1835
1836/**
1837 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1838 * @rx_ring: rx descriptor ring packet is being transacted on
1839 * @rx_desc: pointer to the EOP Rx descriptor
1840 * @skb: pointer to current skb being populated
1841 *
1842 * This function checks the ring, descriptor, and packet information in
1843 * order to populate the hash, checksum, VLAN, protocol, and
1844 * other fields within the skb.
1845 **/
1846void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1847			     union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1848{
1849	u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1850	u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1851	u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1852	u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
1853	u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1854
1855	if (unlikely(tsynvalid))
1856		i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1857
1858	i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1859
1860	i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1861
1862	skb_record_rx_queue(skb, rx_ring->queue_index);
1863
1864	if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1865		__le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1866
1867		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1868				       le16_to_cpu(vlan_tag));
1869	}
1870
1871	/* modifies the skb - consumes the enet header */
1872	skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1873}
1874
1875/**
1876 * i40e_cleanup_headers - Correct empty headers
1877 * @rx_ring: rx descriptor ring packet is being transacted on
1878 * @skb: pointer to current skb being fixed
1879 * @rx_desc: pointer to the EOP Rx descriptor
1880 *
1881 * In addition if skb is not at least 60 bytes we need to pad it so that
1882 * it is large enough to qualify as a valid Ethernet frame.
1883 *
1884 * Returns true if an error was encountered and skb was freed.
1885 **/
1886static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1887				 union i40e_rx_desc *rx_desc)
1888
1889{
1890	/* ERR_MASK will only have valid bits if EOP set, and
1891	 * what we are doing here is actually checking
1892	 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1893	 * the error field
1894	 */
1895	if (unlikely(i40e_test_staterr(rx_desc,
1896				       BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1897		dev_kfree_skb_any(skb);
1898		return true;
1899	}
1900
1901	/* if eth_skb_pad returns an error the skb was freed */
1902	if (eth_skb_pad(skb))
1903		return true;
1904
1905	return false;
1906}
1907
1908/**
1909 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1910 * @rx_buffer: buffer containing the page
1911 * @rx_stats: rx stats structure for the rx ring
1912 *
1913 * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
1914 * which will assign the current buffer to the buffer that next_to_alloc is
1915 * pointing to; otherwise, the DMA mapping needs to be destroyed and
1916 * page freed.
1917 *
1918 * rx_stats will be updated to indicate whether the page was waived
1919 * or busy if it could not be reused.
1920 */
1921static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1922				   struct i40e_rx_queue_stats *rx_stats)
1923{
1924	unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1925	struct page *page = rx_buffer->page;
1926
1927	/* Is any reuse possible? */
1928	if (!dev_page_is_reusable(page)) {
1929		rx_stats->page_waive_count++;
1930		return false;
1931	}
1932
1933#if (PAGE_SIZE < 8192)
1934	/* if we are only owner of page we can reuse it */
1935	if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) {
1936		rx_stats->page_busy_count++;
1937		return false;
1938	}
1939#else
1940#define I40E_LAST_OFFSET \
1941	(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1942	if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
1943		rx_stats->page_busy_count++;
1944		return false;
1945	}
1946#endif
1947
1948	/* If we have drained the page fragment pool we need to update
1949	 * the pagecnt_bias and page count so that we fully restock the
1950	 * number of references the driver holds.
1951	 */
1952	if (unlikely(pagecnt_bias == 1)) {
1953		page_ref_add(page, USHRT_MAX - 1);
1954		rx_buffer->pagecnt_bias = USHRT_MAX;
1955	}
1956
1957	return true;
1958}
1959
1960/**
1961 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
1962 * @rx_buffer: Rx buffer to adjust
1963 * @truesize: Size of adjustment
1964 **/
1965static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer,
1966				unsigned int truesize)
1967{
1968#if (PAGE_SIZE < 8192)
1969	rx_buffer->page_offset ^= truesize;
1970#else
1971	rx_buffer->page_offset += truesize;
1972#endif
1973}
1974
1975/**
1976 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1977 * @rx_ring: rx descriptor ring to transact packets on
1978 * @size: size of buffer to add to skb
1979 *
1980 * This function will pull an Rx buffer from the ring and synchronize it
1981 * for use by the CPU.
1982 */
1983static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1984						 const unsigned int size)
1985{
1986	struct i40e_rx_buffer *rx_buffer;
1987
1988	rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process);
1989	rx_buffer->page_count =
1990#if (PAGE_SIZE < 8192)
1991		page_count(rx_buffer->page);
1992#else
1993		0;
1994#endif
1995	prefetch_page_address(rx_buffer->page);
1996
1997	/* we are reusing so sync this buffer for CPU use */
1998	dma_sync_single_range_for_cpu(rx_ring->dev,
1999				      rx_buffer->dma,
2000				      rx_buffer->page_offset,
2001				      size,
2002				      DMA_FROM_DEVICE);
2003
2004	/* We have pulled a buffer for use, so decrement pagecnt_bias */
2005	rx_buffer->pagecnt_bias--;
2006
2007	return rx_buffer;
2008}
2009
2010/**
2011 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2012 * @rx_ring: rx descriptor ring to transact packets on
2013 * @rx_buffer: rx buffer to pull data from
2014 *
2015 * This function will clean up the contents of the rx_buffer.  It will
2016 * either recycle the buffer or unmap it and free the associated resources.
2017 */
2018static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2019			       struct i40e_rx_buffer *rx_buffer)
2020{
2021	if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) {
2022		/* hand second half of page back to the ring */
2023		i40e_reuse_rx_page(rx_ring, rx_buffer);
2024	} else {
2025		/* we are not reusing the buffer so unmap it */
2026		dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2027				     i40e_rx_pg_size(rx_ring),
2028				     DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2029		__page_frag_cache_drain(rx_buffer->page,
2030					rx_buffer->pagecnt_bias);
2031		/* clear contents of buffer_info */
2032		rx_buffer->page = NULL;
2033	}
2034}
2035
2036/**
2037 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2038 * @rx_ring: Rx descriptor ring to transact packets on
2039 * @xdp_res: Result of the XDP program
2040 * @xdp: xdp_buff pointing to the data
2041 **/
2042static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
2043				  struct xdp_buff *xdp)
2044{
2045	u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
2046	u32 next = rx_ring->next_to_clean, i = 0;
2047	struct i40e_rx_buffer *rx_buffer;
2048
2049	xdp->flags = 0;
2050
2051	while (1) {
2052		rx_buffer = i40e_rx_bi(rx_ring, next);
2053		if (++next == rx_ring->count)
2054			next = 0;
2055
2056		if (!rx_buffer->page)
2057			continue;
2058
2059		if (xdp_res != I40E_XDP_CONSUMED)
2060			i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2061		else if (i++ <= nr_frags)
2062			rx_buffer->pagecnt_bias++;
2063
2064		/* EOP buffer will be put in i40e_clean_rx_irq() */
2065		if (next == rx_ring->next_to_process)
2066			return;
2067
2068		i40e_put_rx_buffer(rx_ring, rx_buffer);
2069	}
2070}
2071
2072/**
2073 * i40e_construct_skb - Allocate skb and populate it
2074 * @rx_ring: rx descriptor ring to transact packets on
2075 * @xdp: xdp_buff pointing to the data
2076 *
2077 * This function allocates an skb.  It then populates it with the page
2078 * data from the current receive descriptor, taking care to set up the
2079 * skb correctly.
2080 */
2081static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2082					  struct xdp_buff *xdp)
2083{
2084	unsigned int size = xdp->data_end - xdp->data;
2085	struct i40e_rx_buffer *rx_buffer;
2086	struct skb_shared_info *sinfo;
2087	unsigned int headlen;
2088	struct sk_buff *skb;
2089	u32 nr_frags = 0;
2090
2091	/* prefetch first cache line of first page */
2092	net_prefetch(xdp->data);
2093
2094	/* Note, we get here by enabling legacy-rx via:
2095	 *
2096	 *    ethtool --set-priv-flags <dev> legacy-rx on
2097	 *
2098	 * In this mode, we currently get 0 extra XDP headroom as
2099	 * opposed to having legacy-rx off, where we process XDP
2100	 * packets going to stack via i40e_build_skb(). The latter
2101	 * provides us currently with 192 bytes of headroom.
2102	 *
2103	 * For i40e_construct_skb() mode it means that the
2104	 * xdp->data_meta will always point to xdp->data, since
2105	 * the helper cannot expand the head. Should this ever
2106	 * change in future for legacy-rx mode on, then lets also
2107	 * add xdp->data_meta handling here.
2108	 */
2109
2110	/* allocate a skb to store the frags */
2111	skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
2112	if (unlikely(!skb))
2113		return NULL;
2114
2115	/* Determine available headroom for copy */
2116	headlen = size;
2117	if (headlen > I40E_RX_HDR_SIZE)
2118		headlen = eth_get_headlen(skb->dev, xdp->data,
2119					  I40E_RX_HDR_SIZE);
2120
2121	/* align pull length to size of long to optimize memcpy performance */
2122	memcpy(__skb_put(skb, headlen), xdp->data,
2123	       ALIGN(headlen, sizeof(long)));
2124
2125	if (unlikely(xdp_buff_has_frags(xdp))) {
2126		sinfo = xdp_get_shared_info_from_buff(xdp);
2127		nr_frags = sinfo->nr_frags;
2128	}
2129	rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2130	/* update all of the pointers */
2131	size -= headlen;
2132	if (size) {
2133		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2134			dev_kfree_skb(skb);
2135			return NULL;
2136		}
2137		skb_add_rx_frag(skb, 0, rx_buffer->page,
2138				rx_buffer->page_offset + headlen,
2139				size, xdp->frame_sz);
2140		/* buffer is used by skb, update page_offset */
2141		i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2142	} else {
2143		/* buffer is unused, reset bias back to rx_buffer */
2144		rx_buffer->pagecnt_bias++;
2145	}
2146
2147	if (unlikely(xdp_buff_has_frags(xdp))) {
2148		struct skb_shared_info *skinfo = skb_shinfo(skb);
2149
2150		memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
2151		       sizeof(skb_frag_t) * nr_frags);
2152
2153		xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
2154					   sinfo->xdp_frags_size,
2155					   nr_frags * xdp->frame_sz,
2156					   xdp_buff_is_frag_pfmemalloc(xdp));
2157
2158		/* First buffer has already been processed, so bump ntc */
2159		if (++rx_ring->next_to_clean == rx_ring->count)
2160			rx_ring->next_to_clean = 0;
2161
2162		i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2163	}
2164
2165	return skb;
2166}
2167
2168/**
2169 * i40e_build_skb - Build skb around an existing buffer
2170 * @rx_ring: Rx descriptor ring to transact packets on
2171 * @xdp: xdp_buff pointing to the data
2172 *
2173 * This function builds an skb around an existing Rx buffer, taking care
2174 * to set up the skb correctly and avoid any memcpy overhead.
2175 */
2176static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2177				      struct xdp_buff *xdp)
2178{
2179	unsigned int metasize = xdp->data - xdp->data_meta;
2180	struct skb_shared_info *sinfo;
2181	struct sk_buff *skb;
2182	u32 nr_frags;
2183
2184	/* Prefetch first cache line of first page. If xdp->data_meta
2185	 * is unused, this points exactly as xdp->data, otherwise we
2186	 * likely have a consumer accessing first few bytes of meta
2187	 * data, and then actual data.
2188	 */
2189	net_prefetch(xdp->data_meta);
2190
2191	if (unlikely(xdp_buff_has_frags(xdp))) {
2192		sinfo = xdp_get_shared_info_from_buff(xdp);
2193		nr_frags = sinfo->nr_frags;
2194	}
2195
2196	/* build an skb around the page buffer */
2197	skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
2198	if (unlikely(!skb))
2199		return NULL;
2200
2201	/* update pointers within the skb to store the data */
2202	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2203	__skb_put(skb, xdp->data_end - xdp->data);
2204	if (metasize)
2205		skb_metadata_set(skb, metasize);
2206
2207	if (unlikely(xdp_buff_has_frags(xdp))) {
2208		xdp_update_skb_shared_info(skb, nr_frags,
2209					   sinfo->xdp_frags_size,
2210					   nr_frags * xdp->frame_sz,
2211					   xdp_buff_is_frag_pfmemalloc(xdp));
2212
2213		i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2214	} else {
2215		struct i40e_rx_buffer *rx_buffer;
2216
2217		rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2218		/* buffer is used by skb, update page_offset */
2219		i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2220	}
2221
2222	return skb;
2223}
2224
2225/**
2226 * i40e_is_non_eop - process handling of non-EOP buffers
2227 * @rx_ring: Rx ring being processed
2228 * @rx_desc: Rx descriptor for current buffer
2229 *
2230 * If the buffer is an EOP buffer, this function exits returning false,
2231 * otherwise return true indicating that this is in fact a non-EOP buffer.
2232 */
2233bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2234		     union i40e_rx_desc *rx_desc)
2235{
2236	/* if we are the last buffer then there is nothing else to do */
2237#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2238	if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2239		return false;
2240
2241	rx_ring->rx_stats.non_eop_descs++;
2242
2243	return true;
2244}
2245
2246static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2247			      struct i40e_ring *xdp_ring);
2248
2249int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2250{
2251	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2252
2253	if (unlikely(!xdpf))
2254		return I40E_XDP_CONSUMED;
2255
2256	return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2257}
2258
2259/**
2260 * i40e_run_xdp - run an XDP program
2261 * @rx_ring: Rx ring being processed
2262 * @xdp: XDP buffer containing the frame
2263 * @xdp_prog: XDP program to run
2264 **/
2265static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
2266{
2267	int err, result = I40E_XDP_PASS;
2268	struct i40e_ring *xdp_ring;
2269	u32 act;
2270
2271	if (!xdp_prog)
2272		goto xdp_out;
2273
2274	prefetchw(xdp->data_hard_start); /* xdp_frame write */
2275
2276	act = bpf_prog_run_xdp(xdp_prog, xdp);
2277	switch (act) {
2278	case XDP_PASS:
2279		break;
2280	case XDP_TX:
2281		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2282		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2283		if (result == I40E_XDP_CONSUMED)
2284			goto out_failure;
2285		break;
2286	case XDP_REDIRECT:
2287		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2288		if (err)
2289			goto out_failure;
2290		result = I40E_XDP_REDIR;
2291		break;
2292	default:
2293		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
2294		fallthrough;
2295	case XDP_ABORTED:
2296out_failure:
2297		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2298		fallthrough; /* handle aborts by dropping packet */
2299	case XDP_DROP:
2300		result = I40E_XDP_CONSUMED;
2301		break;
2302	}
2303xdp_out:
2304	return result;
2305}
2306
2307/**
2308 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2309 * @xdp_ring: XDP Tx ring
2310 *
2311 * This function updates the XDP Tx ring tail register.
2312 **/
2313void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2314{
2315	/* Force memory writes to complete before letting h/w
2316	 * know there are new descriptors to fetch.
2317	 */
2318	wmb();
2319	writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2320}
2321
2322/**
2323 * i40e_update_rx_stats - Update Rx ring statistics
2324 * @rx_ring: rx descriptor ring
2325 * @total_rx_bytes: number of bytes received
2326 * @total_rx_packets: number of packets received
2327 *
2328 * This function updates the Rx ring statistics.
2329 **/
2330void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2331			  unsigned int total_rx_bytes,
2332			  unsigned int total_rx_packets)
2333{
2334	u64_stats_update_begin(&rx_ring->syncp);
2335	rx_ring->stats.packets += total_rx_packets;
2336	rx_ring->stats.bytes += total_rx_bytes;
2337	u64_stats_update_end(&rx_ring->syncp);
2338	rx_ring->q_vector->rx.total_packets += total_rx_packets;
2339	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2340}
2341
2342/**
2343 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2344 * @rx_ring: Rx ring
2345 * @xdp_res: Result of the receive batch
2346 *
2347 * This function bumps XDP Tx tail and/or flush redirect map, and
2348 * should be called when a batch of packets has been processed in the
2349 * napi loop.
2350 **/
2351void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2352{
2353	if (xdp_res & I40E_XDP_REDIR)
2354		xdp_do_flush();
2355
2356	if (xdp_res & I40E_XDP_TX) {
2357		struct i40e_ring *xdp_ring =
2358			rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2359
2360		i40e_xdp_ring_update_tail(xdp_ring);
2361	}
2362}
2363
2364/**
2365 * i40e_inc_ntp: Advance the next_to_process index
2366 * @rx_ring: Rx ring
2367 **/
2368static void i40e_inc_ntp(struct i40e_ring *rx_ring)
2369{
2370	u32 ntp = rx_ring->next_to_process + 1;
2371
2372	ntp = (ntp < rx_ring->count) ? ntp : 0;
2373	rx_ring->next_to_process = ntp;
2374	prefetch(I40E_RX_DESC(rx_ring, ntp));
2375}
2376
2377/**
2378 * i40e_add_xdp_frag: Add a frag to xdp_buff
2379 * @xdp: xdp_buff pointing to the data
2380 * @nr_frags: return number of buffers for the packet
2381 * @rx_buffer: rx_buffer holding data of the current frag
2382 * @size: size of data of current frag
2383 */
2384static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags,
2385			     struct i40e_rx_buffer *rx_buffer, u32 size)
2386{
2387	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2388
2389	if (!xdp_buff_has_frags(xdp)) {
2390		sinfo->nr_frags = 0;
2391		sinfo->xdp_frags_size = 0;
2392		xdp_buff_set_frags_flag(xdp);
2393	} else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
2394		/* Overflowing packet: All frags need to be dropped */
2395		return -ENOMEM;
2396	}
2397
2398	__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page,
2399				   rx_buffer->page_offset, size);
2400
2401	sinfo->xdp_frags_size += size;
2402
2403	if (page_is_pfmemalloc(rx_buffer->page))
2404		xdp_buff_set_frag_pfmemalloc(xdp);
2405	*nr_frags = sinfo->nr_frags;
2406
2407	return 0;
2408}
2409
2410/**
2411 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2412 * @rx_ring: rx descriptor ring to transact packets on
2413 * @xdp: xdp_buff pointing to the data
2414 * @rx_buffer: rx_buffer of eop desc
2415 */
2416static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring,
2417				  struct xdp_buff *xdp,
2418				  struct i40e_rx_buffer *rx_buffer)
2419{
2420	i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp);
2421	i40e_put_rx_buffer(rx_ring, rx_buffer);
2422	rx_ring->next_to_clean = rx_ring->next_to_process;
2423	xdp->data = NULL;
2424}
2425
2426/**
2427 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2428 * @rx_ring: rx descriptor ring to transact packets on
2429 * @budget: Total limit on number of packets to process
2430 * @rx_cleaned: Out parameter of the number of packets processed
2431 *
2432 * This function provides a "bounce buffer" approach to Rx interrupt
2433 * processing.  The advantage to this is that on systems that have
2434 * expensive overhead for IOMMU access this provides a means of avoiding
2435 * it by maintaining the mapping of the page to the system.
2436 *
2437 * Returns amount of work completed
2438 **/
2439static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
2440			     unsigned int *rx_cleaned)
2441{
2442	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
2443	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2444	u16 clean_threshold = rx_ring->count / 2;
2445	unsigned int offset = rx_ring->rx_offset;
2446	struct xdp_buff *xdp = &rx_ring->xdp;
2447	unsigned int xdp_xmit = 0;
2448	struct bpf_prog *xdp_prog;
2449	bool failure = false;
2450	int xdp_res = 0;
2451
2452	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2453
2454	while (likely(total_rx_packets < (unsigned int)budget)) {
2455		u16 ntp = rx_ring->next_to_process;
2456		struct i40e_rx_buffer *rx_buffer;
2457		union i40e_rx_desc *rx_desc;
2458		struct sk_buff *skb;
2459		unsigned int size;
2460		u32 nfrags = 0;
2461		bool neop;
2462		u64 qword;
2463
2464		/* return some buffers to hardware, one at a time is too slow */
2465		if (cleaned_count >= clean_threshold) {
2466			failure = failure ||
2467				  i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2468			cleaned_count = 0;
2469		}
2470
2471		rx_desc = I40E_RX_DESC(rx_ring, ntp);
 
 
 
2472
2473		/* status_error_len will always be zero for unused descriptors
2474		 * because it's cleared in cleanup, and overlaps with hdr_addr
2475		 * which is always zero because packet split isn't used, if the
2476		 * hardware wrote DD then the length will be non-zero
2477		 */
2478		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2479
2480		/* This memory barrier is needed to keep us from reading
2481		 * any other fields out of the rx_desc until we have
2482		 * verified the descriptor has been written back.
2483		 */
2484		dma_rmb();
2485
2486		if (i40e_rx_is_programming_status(qword)) {
2487			i40e_clean_programming_status(rx_ring,
2488						      rx_desc->raw.qword[0],
2489						      qword);
2490			rx_buffer = i40e_rx_bi(rx_ring, ntp);
2491			i40e_inc_ntp(rx_ring);
2492			i40e_reuse_rx_page(rx_ring, rx_buffer);
2493			/* Update ntc and bump cleaned count if not in the
2494			 * middle of mb packet.
2495			 */
2496			if (rx_ring->next_to_clean == ntp) {
2497				rx_ring->next_to_clean =
2498					rx_ring->next_to_process;
2499				cleaned_count++;
2500			}
2501			continue;
2502		}
2503
2504		size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
2505		if (!size)
2506			break;
2507
2508		i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
2509		/* retrieve a buffer from the ring */
2510		rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2511
2512		neop = i40e_is_non_eop(rx_ring, rx_desc);
2513		i40e_inc_ntp(rx_ring);
2514
2515		if (!xdp->data) {
2516			unsigned char *hard_start;
2517
2518			hard_start = page_address(rx_buffer->page) +
2519				     rx_buffer->page_offset - offset;
2520			xdp_prepare_buff(xdp, hard_start, offset, size, true);
2521#if (PAGE_SIZE > 4096)
2522			/* At larger PAGE_SIZE, frame_sz depend on len size */
2523			xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2524#endif
2525		} else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) &&
2526			   !neop) {
2527			/* Overflowing packet: Drop all frags on EOP */
2528			i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2529			break;
2530		}
2531
2532		if (neop)
2533			continue;
2534
2535		xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
2536
2537		if (xdp_res) {
2538			xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
 
 
 
 
 
 
2539
2540			if (unlikely(xdp_buff_has_frags(xdp))) {
2541				i40e_process_rx_buffs(rx_ring, xdp_res, xdp);
2542				size = xdp_get_buff_len(xdp);
2543			} else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2544				i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2545			} else {
2546				rx_buffer->pagecnt_bias++;
2547			}
2548			total_rx_bytes += size;
2549		} else {
2550			if (ring_uses_build_skb(rx_ring))
2551				skb = i40e_build_skb(rx_ring, xdp);
 
 
 
 
 
 
 
 
 
 
 
2552			else
2553				skb = i40e_construct_skb(rx_ring, xdp);
2554
2555			/* drop if we failed to retrieve a buffer */
2556			if (!skb) {
2557				rx_ring->rx_stats.alloc_buff_failed++;
2558				i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2559				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2560			}
 
 
 
2561
2562			if (i40e_cleanup_headers(rx_ring, skb, rx_desc))
2563				goto process_next;
2564
2565			/* probably a little skewed due to removing CRC */
2566			total_rx_bytes += skb->len;
2567
2568			/* populate checksum, VLAN, and protocol */
2569			i40e_process_skb_fields(rx_ring, rx_desc, skb);
2570
2571			i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
2572			napi_gro_receive(&rx_ring->q_vector->napi, skb);
 
 
 
 
 
2573		}
2574
2575		/* update budget accounting */
 
2576		total_rx_packets++;
2577process_next:
2578		cleaned_count += nfrags + 1;
2579		i40e_put_rx_buffer(rx_ring, rx_buffer);
2580		rx_ring->next_to_clean = rx_ring->next_to_process;
2581
2582		xdp->data = NULL;
2583	}
2584
2585	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2586
2587	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2588
2589	*rx_cleaned = total_rx_packets;
 
 
 
2590
2591	/* guarantee a trip back through this routine if there was a failure */
2592	return failure ? budget : (int)total_rx_packets;
2593}
2594
2595/**
2596 * i40e_buildreg_itr - build a value for writing to I40E_PFINT_DYN_CTLN register
2597 * @itr_idx: interrupt throttling index
2598 * @interval: interrupt throttling interval value in usecs
2599 * @force_swint: force software interrupt
2600 *
2601 * The function builds a value for I40E_PFINT_DYN_CTLN register that
2602 * is used to update interrupt throttling interval for specified ITR index
2603 * and optionally enforces a software interrupt. If the @itr_idx is equal
2604 * to I40E_ITR_NONE then no interval change is applied and only @force_swint
2605 * parameter is taken into account. If the interval change and enforced
2606 * software interrupt are not requested then the built value just enables
2607 * appropriate vector interrupt.
2608 **/
2609static u32 i40e_buildreg_itr(enum i40e_dyn_idx itr_idx, u16 interval,
2610			     bool force_swint)
2611{
2612	u32 val;
2613
2614	/* We don't bother with setting the CLEARPBA bit as the data sheet
2615	 * points out doing so is "meaningless since it was already
2616	 * auto-cleared". The auto-clearing happens when the interrupt is
2617	 * asserted.
2618	 *
2619	 * Hardware errata 28 for also indicates that writing to a
2620	 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2621	 * an event in the PBA anyway so we need to rely on the automask
2622	 * to hold pending events for us until the interrupt is re-enabled
2623	 *
2624	 * We have to shift the given value as it is reported in microseconds
2625	 * and the register value is recorded in 2 microsecond units.
2626	 */
2627	interval >>= 1;
2628
2629	/* 1. Enable vector interrupt
2630	 * 2. Update the interval for the specified ITR index
2631	 *    (I40E_ITR_NONE in the register is used to indicate that
2632	 *     no interval update is requested)
2633	 */
2634	val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2635	      FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX_MASK, itr_idx) |
2636	      FIELD_PREP(I40E_PFINT_DYN_CTLN_INTERVAL_MASK, interval);
2637
2638	/* 3. Enforce software interrupt trigger if requested
2639	 *    (These software interrupts rate is limited by ITR2 that is
2640	 *     set to 20K interrupts per second)
2641	 */
2642	if (force_swint)
2643		val |= I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
2644		       I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
2645		       FIELD_PREP(I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK,
2646				  I40E_SW_ITR);
2647
2648	return val;
2649}
2650
2651/* The act of updating the ITR will cause it to immediately trigger. In order
2652 * to prevent this from throwing off adaptive update statistics we defer the
2653 * update so that it can only happen so often. So after either Tx or Rx are
2654 * updated we make the adaptive scheme wait until either the ITR completely
2655 * expires via the next_update expiration or we have been through at least
2656 * 3 interrupts.
2657 */
2658#define ITR_COUNTDOWN_START 3
2659
2660/**
2661 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2662 * @vsi: the VSI we care about
2663 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2664 *
2665 **/
2666static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2667					  struct i40e_q_vector *q_vector)
2668{
2669	enum i40e_dyn_idx itr_idx = I40E_ITR_NONE;
2670	struct i40e_hw *hw = &vsi->back->hw;
2671	u16 interval = 0;
2672	u32 itr_val;
2673
2674	/* If we don't have MSIX, then we only need to re-enable icr0 */
2675	if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
2676		i40e_irq_dynamic_enable_icr0(vsi->back);
2677		return;
 
2678	}
2679
2680	/* These will do nothing if dynamic updates are not enabled */
2681	i40e_update_itr(q_vector, &q_vector->tx);
2682	i40e_update_itr(q_vector, &q_vector->rx);
2683
2684	/* This block of logic allows us to get away with only updating
2685	 * one ITR value with each interrupt. The idea is to perform a
2686	 * pseudo-lazy update with the following criteria.
2687	 *
2688	 * 1. Rx is given higher priority than Tx if both are in same state
2689	 * 2. If we must reduce an ITR that is given highest priority.
2690	 * 3. We then give priority to increasing ITR based on amount.
2691	 */
2692	if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2693		/* Rx ITR needs to be reduced, this is highest priority */
2694		itr_idx = I40E_RX_ITR;
2695		interval = q_vector->rx.target_itr;
2696		q_vector->rx.current_itr = q_vector->rx.target_itr;
2697		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2698	} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2699		   ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2700		    (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2701		/* Tx ITR needs to be reduced, this is second priority
2702		 * Tx ITR needs to be increased more than Rx, fourth priority
2703		 */
2704		itr_idx = I40E_TX_ITR;
2705		interval = q_vector->tx.target_itr;
2706		q_vector->tx.current_itr = q_vector->tx.target_itr;
2707		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2708	} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2709		/* Rx ITR needs to be increased, third priority */
2710		itr_idx = I40E_RX_ITR;
2711		interval = q_vector->rx.target_itr;
2712		q_vector->rx.current_itr = q_vector->rx.target_itr;
2713		q_vector->itr_countdown = ITR_COUNTDOWN_START;
2714	} else {
2715		/* No ITR update, lowest priority */
2716		if (q_vector->itr_countdown)
2717			q_vector->itr_countdown--;
2718	}
2719
2720	/* Do not update interrupt control register if VSI is down */
2721	if (test_bit(__I40E_VSI_DOWN, vsi->state))
2722		return;
2723
2724	/* Update ITR interval if necessary and enforce software interrupt
2725	 * if we are exiting busy poll.
2726	 */
2727	if (q_vector->in_busy_poll) {
2728		itr_val = i40e_buildreg_itr(itr_idx, interval, true);
2729		q_vector->in_busy_poll = false;
2730	} else {
2731		itr_val = i40e_buildreg_itr(itr_idx, interval, false);
2732	}
2733	wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->reg_idx), itr_val);
2734}
2735
2736/**
2737 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2738 * @napi: napi struct with our devices info in it
2739 * @budget: amount of work driver is allowed to do this pass, in packets
2740 *
2741 * This function will clean all queues associated with a q_vector.
2742 *
2743 * Returns the amount of work done
2744 **/
2745int i40e_napi_poll(struct napi_struct *napi, int budget)
2746{
2747	struct i40e_q_vector *q_vector =
2748			       container_of(napi, struct i40e_q_vector, napi);
2749	struct i40e_vsi *vsi = q_vector->vsi;
2750	struct i40e_ring *ring;
2751	bool tx_clean_complete = true;
2752	bool rx_clean_complete = true;
2753	unsigned int tx_cleaned = 0;
2754	unsigned int rx_cleaned = 0;
2755	bool clean_complete = true;
2756	bool arm_wb = false;
2757	int budget_per_ring;
2758	int work_done = 0;
2759
2760	if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2761		napi_complete(napi);
2762		return 0;
2763	}
2764
2765	/* Since the actual Tx work is minimal, we can give the Tx a larger
2766	 * budget and be more aggressive about cleaning up the Tx descriptors.
2767	 */
2768	i40e_for_each_ring(ring, q_vector->tx) {
2769		bool wd = ring->xsk_pool ?
2770			  i40e_clean_xdp_tx_irq(vsi, ring) :
2771			  i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
2772
2773		if (!wd) {
2774			clean_complete = tx_clean_complete = false;
2775			continue;
2776		}
2777		arm_wb |= ring->arm_wb;
2778		ring->arm_wb = false;
2779	}
2780
2781	/* Handle case where we are called by netpoll with a budget of 0 */
2782	if (budget <= 0)
2783		goto tx_only;
2784
2785	/* normally we have 1 Rx ring per q_vector */
2786	if (unlikely(q_vector->num_ringpairs > 1))
2787		/* We attempt to distribute budget to each Rx queue fairly, but
2788		 * don't allow the budget to go below 1 because that would exit
2789		 * polling early.
2790		 */
2791		budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2792	else
2793		/* Max of 1 Rx ring in this q_vector so give it the budget */
2794		budget_per_ring = budget;
2795
2796	i40e_for_each_ring(ring, q_vector->rx) {
2797		int cleaned = ring->xsk_pool ?
2798			      i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2799			      i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
2800
2801		work_done += cleaned;
2802		/* if we clean as many as budgeted, we must not be done */
2803		if (cleaned >= budget_per_ring)
2804			clean_complete = rx_clean_complete = false;
2805	}
2806
2807	if (!i40e_enabled_xdp_vsi(vsi))
2808		trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
2809				     tx_cleaned, rx_clean_complete, tx_clean_complete);
2810
2811	/* If work not completed, return budget and polling will return */
2812	if (!clean_complete) {
2813		int cpu_id = smp_processor_id();
2814
2815		/* It is possible that the interrupt affinity has changed but,
2816		 * if the cpu is pegged at 100%, polling will never exit while
2817		 * traffic continues and the interrupt will be stuck on this
2818		 * cpu.  We check to make sure affinity is correct before we
2819		 * continue to poll, otherwise we must stop polling so the
2820		 * interrupt can move to the correct cpu.
2821		 */
2822		if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2823			/* Tell napi that we are done polling */
2824			napi_complete_done(napi, work_done);
2825
2826			/* Force an interrupt */
2827			i40e_force_wb(vsi, q_vector);
2828
2829			/* Return budget-1 so that polling stops */
2830			return budget - 1;
2831		}
2832tx_only:
2833		if (arm_wb) {
2834			q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2835			i40e_enable_wb_on_itr(vsi, q_vector);
2836		}
2837		return budget;
2838	}
2839
2840	if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
2841		q_vector->arm_wb_state = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2842
2843	/* Exit the polling mode, but don't re-enable interrupts if stack might
2844	 * poll us due to busy-polling
2845	 */
2846	if (likely(napi_complete_done(napi, work_done)))
2847		i40e_update_enable_itr(vsi, q_vector);
2848	else
2849		q_vector->in_busy_poll = true;
2850
2851	return min(work_done, budget - 1);
2852}
2853
2854/**
2855 * i40e_atr - Add a Flow Director ATR filter
2856 * @tx_ring:  ring to add programming descriptor to
2857 * @skb:      send buffer
2858 * @tx_flags: send tx flags
 
2859 **/
2860static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2861		     u32 tx_flags)
2862{
2863	struct i40e_filter_program_desc *fdir_desc;
2864	struct i40e_pf *pf = tx_ring->vsi->back;
2865	union {
2866		unsigned char *network;
2867		struct iphdr *ipv4;
2868		struct ipv6hdr *ipv6;
2869	} hdr;
2870	struct tcphdr *th;
2871	unsigned int hlen;
2872	u32 flex_ptype, dtype_cmd;
2873	int l4_proto;
2874	u16 i;
2875
2876	/* make sure ATR is enabled */
2877	if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
2878		return;
2879
2880	if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2881		return;
2882
2883	/* if sampling is disabled do nothing */
2884	if (!tx_ring->atr_sample_rate)
2885		return;
2886
2887	/* Currently only IPv4/IPv6 with TCP is supported */
2888	if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2889		return;
2890
2891	/* snag network header to get L4 type and address */
2892	hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2893		      skb_inner_network_header(skb) : skb_network_header(skb);
2894
2895	/* Note: tx_flags gets modified to reflect inner protocols in
2896	 * tx_enable_csum function if encap is enabled.
2897	 */
2898	if (tx_flags & I40E_TX_FLAGS_IPV4) {
2899		/* access ihl as u8 to avoid unaligned access on ia64 */
 
2900		hlen = (hdr.network[0] & 0x0F) << 2;
2901		l4_proto = hdr.ipv4->protocol;
2902	} else {
2903		/* find the start of the innermost ipv6 header */
2904		unsigned int inner_hlen = hdr.network - skb->data;
2905		unsigned int h_offset = inner_hlen;
2906
2907		/* this function updates h_offset to the end of the header */
2908		l4_proto =
2909		  ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2910		/* hlen will contain our best estimate of the tcp header */
2911		hlen = h_offset - inner_hlen;
2912	}
2913
2914	if (l4_proto != IPPROTO_TCP)
 
2915		return;
 
2916
2917	th = (struct tcphdr *)(hdr.network + hlen);
2918
2919	/* Due to lack of space, no more new filters can be programmed */
2920	if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2921		return;
2922	if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
2923		/* HW ATR eviction will take care of removing filters on FIN
2924		 * and RST packets.
2925		 */
2926		if (th->fin || th->rst)
2927			return;
2928	}
2929
2930	tx_ring->atr_count++;
2931
2932	/* sample on all syn/fin/rst packets or once every atr sample rate */
2933	if (!th->fin &&
2934	    !th->syn &&
2935	    !th->rst &&
2936	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
2937		return;
2938
2939	tx_ring->atr_count = 0;
2940
2941	/* grab the next descriptor */
2942	i = tx_ring->next_to_use;
2943	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2944
2945	i++;
2946	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2947
2948	flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
2949				tx_ring->queue_index);
2950	flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2951		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2952		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2953		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2954		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2955
2956	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2957
2958	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2959
2960	dtype_cmd |= (th->fin || th->rst) ?
2961		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2962		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2963		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2964		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2965
2966	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2967		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
2968
2969	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2970		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2971
2972	dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2973	if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2974		dtype_cmd |=
2975			FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
2976				   I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
2977	else
2978		dtype_cmd |=
2979			FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
2980				   I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
2981
2982	if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
2983		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2984
2985	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2986	fdir_desc->rsvd = cpu_to_le32(0);
2987	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2988	fdir_desc->fd_id = cpu_to_le32(0);
2989}
2990
2991/**
2992 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2993 * @skb:     send buffer
2994 * @tx_ring: ring to send buffer on
2995 * @flags:   the tx flags to be set
2996 *
2997 * Checks the skb and set up correspondingly several generic transmit flags
2998 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2999 *
3000 * Returns error code indicate the frame should be dropped upon error and the
3001 * otherwise  returns 0 to indicate the flags has been set properly.
3002 **/
3003static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
3004					     struct i40e_ring *tx_ring,
3005					     u32 *flags)
3006{
3007	__be16 protocol = skb->protocol;
3008	u32  tx_flags = 0;
3009
3010	if (protocol == htons(ETH_P_8021Q) &&
3011	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
3012		/* When HW VLAN acceleration is turned off by the user the
3013		 * stack sets the protocol to 8021q so that the driver
3014		 * can take any steps required to support the SW only
3015		 * VLAN handling.  In our case the driver doesn't need
3016		 * to take any further steps so just set the protocol
3017		 * to the encapsulated ethertype.
3018		 */
3019		skb->protocol = vlan_get_protocol(skb);
3020		goto out;
3021	}
3022
3023	/* if we have a HW VLAN tag being added, default to the HW one */
3024	if (skb_vlan_tag_present(skb)) {
3025		tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
3026		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3027	/* else if it is a SW VLAN, check the next protocol and store the tag */
3028	} else if (protocol == htons(ETH_P_8021Q)) {
3029		struct vlan_hdr *vhdr, _vhdr;
3030
3031		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
3032		if (!vhdr)
3033			return -EINVAL;
3034
3035		protocol = vhdr->h_vlan_encapsulated_proto;
3036		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
3037		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
3038	}
3039
3040	if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
3041		goto out;
3042
3043	/* Insert 802.1p priority into VLAN header */
3044	if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
3045	    (skb->priority != TC_PRIO_CONTROL)) {
 
3046		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
3047		tx_flags |= (skb->priority & 0x7) <<
3048				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
3049		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
3050			struct vlan_ethhdr *vhdr;
3051			int rc;
3052
3053			rc = skb_cow_head(skb, 0);
3054			if (rc < 0)
3055				return rc;
3056			vhdr = skb_vlan_eth_hdr(skb);
3057			vhdr->h_vlan_TCI = htons(tx_flags >>
3058						 I40E_TX_FLAGS_VLAN_SHIFT);
3059		} else {
3060			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3061		}
3062	}
3063
3064out:
3065	*flags = tx_flags;
3066	return 0;
3067}
3068
3069/**
3070 * i40e_tso - set up the tso context descriptor
3071 * @first:    pointer to first Tx buffer for xmit
 
 
 
3072 * @hdr_len:  ptr to the size of the packet header
3073 * @cd_type_cmd_tso_mss: Quad Word 1
3074 *
3075 * Returns 0 if no TSO can happen, 1 if tso is going, or error
3076 **/
3077static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
3078		    u64 *cd_type_cmd_tso_mss)
3079{
3080	struct sk_buff *skb = first->skb;
3081	u64 cd_cmd, cd_tso_len, cd_mss;
3082	__be16 protocol;
3083	union {
3084		struct iphdr *v4;
3085		struct ipv6hdr *v6;
3086		unsigned char *hdr;
3087	} ip;
3088	union {
3089		struct tcphdr *tcp;
3090		struct udphdr *udp;
3091		unsigned char *hdr;
3092	} l4;
3093	u32 paylen, l4_offset;
3094	u16 gso_size;
3095	int err;
3096
3097	if (skb->ip_summed != CHECKSUM_PARTIAL)
3098		return 0;
3099
3100	if (!skb_is_gso(skb))
3101		return 0;
3102
3103	err = skb_cow_head(skb, 0);
3104	if (err < 0)
3105		return err;
3106
3107	protocol = vlan_get_protocol(skb);
3108
3109	if (eth_p_mpls(protocol))
3110		ip.hdr = skb_inner_network_header(skb);
3111	else
3112		ip.hdr = skb_network_header(skb);
3113	l4.hdr = skb_checksum_start(skb);
3114
3115	/* initialize outer IP header fields */
3116	if (ip.v4->version == 4) {
3117		ip.v4->tot_len = 0;
3118		ip.v4->check = 0;
3119
3120		first->tx_flags |= I40E_TX_FLAGS_TSO;
3121	} else {
3122		ip.v6->payload_len = 0;
3123		first->tx_flags |= I40E_TX_FLAGS_TSO;
3124	}
3125
3126	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
3127					 SKB_GSO_GRE_CSUM |
3128					 SKB_GSO_IPXIP4 |
3129					 SKB_GSO_IPXIP6 |
3130					 SKB_GSO_UDP_TUNNEL |
3131					 SKB_GSO_UDP_TUNNEL_CSUM)) {
3132		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3133		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
3134			l4.udp->len = 0;
3135
3136			/* determine offset of outer transport header */
3137			l4_offset = l4.hdr - skb->data;
3138
3139			/* remove payload length from outer checksum */
3140			paylen = skb->len - l4_offset;
3141			csum_replace_by_diff(&l4.udp->check,
3142					     (__force __wsum)htonl(paylen));
3143		}
3144
3145		/* reset pointers to inner headers */
3146		ip.hdr = skb_inner_network_header(skb);
3147		l4.hdr = skb_inner_transport_header(skb);
3148
3149		/* initialize inner IP header fields */
3150		if (ip.v4->version == 4) {
3151			ip.v4->tot_len = 0;
3152			ip.v4->check = 0;
3153		} else {
3154			ip.v6->payload_len = 0;
3155		}
3156	}
3157
3158	/* determine offset of inner transport header */
3159	l4_offset = l4.hdr - skb->data;
3160
3161	/* remove payload length from inner checksum */
3162	paylen = skb->len - l4_offset;
3163
3164	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3165		csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
3166		/* compute length of segmentation header */
3167		*hdr_len = sizeof(*l4.udp) + l4_offset;
3168	} else {
3169		csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3170		/* compute length of segmentation header */
3171		*hdr_len = (l4.tcp->doff * 4) + l4_offset;
3172	}
3173
3174	/* pull values out of skb_shinfo */
3175	gso_size = skb_shinfo(skb)->gso_size;
3176
3177	/* update GSO size and bytecount with header size */
3178	first->gso_segs = skb_shinfo(skb)->gso_segs;
3179	first->bytecount += (first->gso_segs - 1) * *hdr_len;
3180
3181	/* find the field values */
3182	cd_cmd = I40E_TX_CTX_DESC_TSO;
3183	cd_tso_len = skb->len - *hdr_len;
3184	cd_mss = gso_size;
3185	*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3186				(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3187				(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
 
3188	return 1;
3189}
3190
3191/**
3192 * i40e_tsyn - set up the tsyn context descriptor
3193 * @tx_ring:  ptr to the ring to send
3194 * @skb:      ptr to the skb we're sending
3195 * @tx_flags: the collected send information
3196 * @cd_type_cmd_tso_mss: Quad Word 1
3197 *
3198 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3199 **/
3200static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3201		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3202{
3203	struct i40e_pf *pf;
3204
3205	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3206		return 0;
3207
3208	/* Tx timestamps cannot be sampled when doing TSO */
3209	if (tx_flags & I40E_TX_FLAGS_TSO)
3210		return 0;
3211
3212	/* only timestamp the outbound packet if the user has requested it and
3213	 * we are not already transmitting a packet to be timestamped
3214	 */
3215	pf = i40e_netdev_to_pf(tx_ring->netdev);
3216	if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
3217		return 0;
3218
3219	if (pf->ptp_tx &&
3220	    !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3221		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3222		pf->ptp_tx_start = jiffies;
3223		pf->ptp_tx_skb = skb_get(skb);
3224	} else {
3225		pf->tx_hwtstamp_skipped++;
3226		return 0;
3227	}
3228
3229	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3230				I40E_TXD_CTX_QW1_CMD_SHIFT;
3231
 
 
 
3232	return 1;
3233}
3234
3235/**
3236 * i40e_tx_enable_csum - Enable Tx checksum offloads
3237 * @skb: send buffer
3238 * @tx_flags: pointer to Tx flags currently set
3239 * @td_cmd: Tx descriptor command bits to set
3240 * @td_offset: Tx descriptor header offsets to set
3241 * @tx_ring: Tx descriptor ring
3242 * @cd_tunneling: ptr to context desc bits
3243 **/
3244static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3245			       u32 *td_cmd, u32 *td_offset,
3246			       struct i40e_ring *tx_ring,
3247			       u32 *cd_tunneling)
3248{
3249	union {
3250		struct iphdr *v4;
3251		struct ipv6hdr *v6;
3252		unsigned char *hdr;
3253	} ip;
3254	union {
3255		struct tcphdr *tcp;
3256		struct udphdr *udp;
3257		unsigned char *hdr;
3258	} l4;
3259	unsigned char *exthdr;
3260	u32 offset, cmd = 0;
3261	__be16 frag_off;
3262	__be16 protocol;
3263	u8 l4_proto = 0;
3264
3265	if (skb->ip_summed != CHECKSUM_PARTIAL)
3266		return 0;
3267
3268	protocol = vlan_get_protocol(skb);
3269
3270	if (eth_p_mpls(protocol)) {
3271		ip.hdr = skb_inner_network_header(skb);
3272		l4.hdr = skb_checksum_start(skb);
3273	} else {
3274		ip.hdr = skb_network_header(skb);
3275		l4.hdr = skb_transport_header(skb);
3276	}
3277
3278	/* set the tx_flags to indicate the IP protocol type. this is
3279	 * required so that checksum header computation below is accurate.
3280	 */
3281	if (ip.v4->version == 4)
3282		*tx_flags |= I40E_TX_FLAGS_IPV4;
3283	else
3284		*tx_flags |= I40E_TX_FLAGS_IPV6;
3285
3286	/* compute outer L2 header size */
3287	offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3288
3289	if (skb->encapsulation) {
3290		u32 tunnel = 0;
3291		/* define outer network header type */
3292		if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3293			tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3294				  I40E_TX_CTX_EXT_IP_IPV4 :
3295				  I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3296
3297			l4_proto = ip.v4->protocol;
3298		} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3299			int ret;
3300
3301			tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3302
3303			exthdr = ip.hdr + sizeof(*ip.v6);
3304			l4_proto = ip.v6->nexthdr;
3305			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
3306					       &l4_proto, &frag_off);
3307			if (ret < 0)
3308				return -1;
 
 
 
3309		}
3310
3311		/* define outer transport */
3312		switch (l4_proto) {
3313		case IPPROTO_UDP:
3314			tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3315			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3316			break;
3317		case IPPROTO_GRE:
3318			tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3319			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3320			break;
3321		case IPPROTO_IPIP:
3322		case IPPROTO_IPV6:
3323			*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3324			l4.hdr = skb_inner_network_header(skb);
3325			break;
3326		default:
3327			if (*tx_flags & I40E_TX_FLAGS_TSO)
3328				return -1;
3329
3330			skb_checksum_help(skb);
3331			return 0;
3332		}
3333
3334		/* compute outer L3 header size */
3335		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3336			  I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3337
3338		/* switch IP header pointer from outer to inner header */
3339		ip.hdr = skb_inner_network_header(skb);
3340
3341		/* compute tunnel header size */
3342		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3343			  I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3344
3345		/* indicate if we need to offload outer UDP header */
3346		if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3347		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3348		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3349			tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3350
3351		/* record tunnel offload values */
3352		*cd_tunneling |= tunnel;
3353
3354		/* switch L4 header pointer from outer to inner */
3355		l4.hdr = skb_inner_transport_header(skb);
3356		l4_proto = 0;
3357
3358		/* reset type as we transition from outer to inner headers */
3359		*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3360		if (ip.v4->version == 4)
3361			*tx_flags |= I40E_TX_FLAGS_IPV4;
3362		if (ip.v6->version == 6)
3363			*tx_flags |= I40E_TX_FLAGS_IPV6;
3364	}
3365
3366	/* Enable IP checksum offloads */
3367	if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3368		l4_proto = ip.v4->protocol;
3369		/* the stack computes the IP header already, the only time we
3370		 * need the hardware to recompute it is in the case of TSO.
3371		 */
3372		cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3373		       I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3374		       I40E_TX_DESC_CMD_IIPT_IPV4;
3375	} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3376		cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3377
3378		exthdr = ip.hdr + sizeof(*ip.v6);
3379		l4_proto = ip.v6->nexthdr;
3380		if (l4.hdr != exthdr)
3381			ipv6_skip_exthdr(skb, exthdr - skb->data,
3382					 &l4_proto, &frag_off);
3383	}
3384
3385	/* compute inner L3 header size */
3386	offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
 
 
 
 
3387
3388	/* Enable L4 checksum offloads */
3389	switch (l4_proto) {
3390	case IPPROTO_TCP:
3391		/* enable checksum offloads */
3392		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3393		offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
 
3394		break;
3395	case IPPROTO_SCTP:
3396		/* enable SCTP checksum offload */
3397		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3398		offset |= (sizeof(struct sctphdr) >> 2) <<
3399			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3400		break;
3401	case IPPROTO_UDP:
3402		/* enable UDP checksum offload */
3403		cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3404		offset |= (sizeof(struct udphdr) >> 2) <<
3405			  I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3406		break;
3407	default:
3408		if (*tx_flags & I40E_TX_FLAGS_TSO)
3409			return -1;
3410		skb_checksum_help(skb);
3411		return 0;
3412	}
3413
3414	*td_cmd |= cmd;
3415	*td_offset |= offset;
3416
3417	return 1;
3418}
3419
3420/**
3421 * i40e_create_tx_ctx - Build the Tx context descriptor
3422 * @tx_ring:  ring to create the descriptor on
3423 * @cd_type_cmd_tso_mss: Quad Word 1
3424 * @cd_tunneling: Quad Word 0 - bits 0-31
3425 * @cd_l2tag2: Quad Word 0 - bits 32-63
3426 **/
3427static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3428			       const u64 cd_type_cmd_tso_mss,
3429			       const u32 cd_tunneling, const u32 cd_l2tag2)
3430{
3431	struct i40e_tx_context_desc *context_desc;
3432	int i = tx_ring->next_to_use;
3433
3434	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3435	    !cd_tunneling && !cd_l2tag2)
3436		return;
3437
3438	/* grab the next descriptor */
3439	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3440
3441	i++;
3442	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3443
3444	/* cpu_to_le32 and assign to struct fields */
3445	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3446	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3447	context_desc->rsvd = cpu_to_le16(0);
3448	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3449}
3450
3451/**
3452 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3453 * @tx_ring: the ring to be checked
3454 * @size:    the size buffer we want to assure is available
3455 *
3456 * Returns -EBUSY if a stop is needed, else 0
3457 **/
3458int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3459{
3460	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3461	/* Memory barrier before checking head and tail */
3462	smp_mb();
3463
3464	++tx_ring->tx_stats.tx_stopped;
3465
3466	/* Check again in a case another CPU has just made room available. */
3467	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3468		return -EBUSY;
3469
3470	/* A reprieve! - use start_queue because it doesn't call schedule */
3471	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3472	++tx_ring->tx_stats.restart_queue;
3473	return 0;
3474}
3475
3476/**
3477 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3478 * @skb:      send buffer
3479 *
3480 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3481 * and so we need to figure out the cases where we need to linearize the skb.
3482 *
3483 * For TSO we need to count the TSO header and segment payload separately.
3484 * As such we need to check cases where we have 7 fragments or more as we
3485 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3486 * the segment payload in the first descriptor, and another 7 for the
3487 * fragments.
3488 **/
3489bool __i40e_chk_linearize(struct sk_buff *skb)
3490{
3491	const skb_frag_t *frag, *stale;
3492	int nr_frags, sum;
3493
3494	/* no need to check if number of frags is less than 7 */
3495	nr_frags = skb_shinfo(skb)->nr_frags;
3496	if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3497		return false;
3498
3499	/* We need to walk through the list and validate that each group
3500	 * of 6 fragments totals at least gso_size.
3501	 */
3502	nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3503	frag = &skb_shinfo(skb)->frags[0];
3504
3505	/* Initialize size to the negative value of gso_size minus 1.  We
3506	 * use this as the worst case scenerio in which the frag ahead
3507	 * of us only provides one byte which is why we are limited to 6
3508	 * descriptors for a single transmit as the header and previous
3509	 * fragment are already consuming 2 descriptors.
3510	 */
3511	sum = 1 - skb_shinfo(skb)->gso_size;
3512
3513	/* Add size of frags 0 through 4 to create our initial sum */
3514	sum += skb_frag_size(frag++);
3515	sum += skb_frag_size(frag++);
3516	sum += skb_frag_size(frag++);
3517	sum += skb_frag_size(frag++);
3518	sum += skb_frag_size(frag++);
3519
3520	/* Walk through fragments adding latest fragment, testing it, and
3521	 * then removing stale fragments from the sum.
3522	 */
3523	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3524		int stale_size = skb_frag_size(stale);
3525
3526		sum += skb_frag_size(frag++);
3527
3528		/* The stale fragment may present us with a smaller
3529		 * descriptor than the actual fragment size. To account
3530		 * for that we need to remove all the data on the front and
3531		 * figure out what the remainder would be in the last
3532		 * descriptor associated with the fragment.
3533		 */
3534		if (stale_size > I40E_MAX_DATA_PER_TXD) {
3535			int align_pad = -(skb_frag_off(stale)) &
3536					(I40E_MAX_READ_REQ_SIZE - 1);
3537
3538			sum -= align_pad;
3539			stale_size -= align_pad;
3540
3541			do {
3542				sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3543				stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3544			} while (stale_size > I40E_MAX_DATA_PER_TXD);
3545		}
3546
3547		/* if sum is negative we failed to make sufficient progress */
3548		if (sum < 0)
3549			return true;
3550
3551		if (!nr_frags--)
3552			break;
3553
3554		sum -= stale_size;
3555	}
3556
3557	return false;
3558}
3559
3560/**
3561 * i40e_tx_map - Build the Tx descriptor
3562 * @tx_ring:  ring to send buffer on
3563 * @skb:      send buffer
3564 * @first:    first buffer info buffer to use
3565 * @tx_flags: collected send information
3566 * @hdr_len:  size of the packet header
3567 * @td_cmd:   the command field in the descriptor
3568 * @td_offset: offset for checksum or crc
3569 *
3570 * Returns 0 on success, -1 on failure to DMA
3571 **/
3572static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3573			      struct i40e_tx_buffer *first, u32 tx_flags,
3574			      const u8 hdr_len, u32 td_cmd, u32 td_offset)
3575{
3576	unsigned int data_len = skb->data_len;
3577	unsigned int size = skb_headlen(skb);
3578	skb_frag_t *frag;
3579	struct i40e_tx_buffer *tx_bi;
3580	struct i40e_tx_desc *tx_desc;
3581	u16 i = tx_ring->next_to_use;
3582	u32 td_tag = 0;
3583	dma_addr_t dma;
3584	u16 desc_count = 1;
3585
3586	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3587		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3588		td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
 
3589	}
3590
 
 
 
 
 
 
 
 
 
3591	first->tx_flags = tx_flags;
3592
3593	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3594
3595	tx_desc = I40E_TX_DESC(tx_ring, i);
3596	tx_bi = first;
3597
3598	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3599		unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3600
3601		if (dma_mapping_error(tx_ring->dev, dma))
3602			goto dma_error;
3603
3604		/* record length, and DMA address */
3605		dma_unmap_len_set(tx_bi, len, size);
3606		dma_unmap_addr_set(tx_bi, dma, dma);
3607
3608		/* align size to end of page */
3609		max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3610		tx_desc->buffer_addr = cpu_to_le64(dma);
3611
3612		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3613			tx_desc->cmd_type_offset_bsz =
3614				build_ctob(td_cmd, td_offset,
3615					   max_data, td_tag);
3616
3617			tx_desc++;
3618			i++;
3619			desc_count++;
3620
3621			if (i == tx_ring->count) {
3622				tx_desc = I40E_TX_DESC(tx_ring, 0);
3623				i = 0;
3624			}
3625
3626			dma += max_data;
3627			size -= max_data;
3628
3629			max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3630			tx_desc->buffer_addr = cpu_to_le64(dma);
3631		}
3632
3633		if (likely(!data_len))
3634			break;
3635
3636		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3637							  size, td_tag);
3638
3639		tx_desc++;
3640		i++;
3641		desc_count++;
3642
3643		if (i == tx_ring->count) {
3644			tx_desc = I40E_TX_DESC(tx_ring, 0);
3645			i = 0;
3646		}
3647
3648		size = skb_frag_size(frag);
3649		data_len -= size;
3650
3651		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3652				       DMA_TO_DEVICE);
3653
3654		tx_bi = &tx_ring->tx_bi[i];
3655	}
3656
3657	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3658
3659	i++;
3660	if (i == tx_ring->count)
3661		i = 0;
3662
3663	tx_ring->next_to_use = i;
3664
3665	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3666
3667	/* write last descriptor with EOP bit */
3668	td_cmd |= I40E_TX_DESC_CMD_EOP;
3669
3670	/* We OR these values together to check both against 4 (WB_STRIDE)
3671	 * below. This is safe since we don't re-use desc_count afterwards.
3672	 */
3673	desc_count |= ++tx_ring->packet_stride;
3674
3675	if (desc_count >= WB_STRIDE) {
3676		/* write last descriptor with RS bit set */
3677		td_cmd |= I40E_TX_DESC_CMD_RS;
3678		tx_ring->packet_stride = 0;
 
 
 
 
 
 
 
3679	}
3680
3681	tx_desc->cmd_type_offset_bsz =
3682			build_ctob(td_cmd, td_offset, size, td_tag);
 
3683
3684	skb_tx_timestamp(skb);
 
3685
3686	/* Force memory writes to complete before letting h/w know there
3687	 * are new descriptors to fetch.
3688	 *
3689	 * We also use this memory barrier to make certain all of the
3690	 * status bits have been updated before next_to_watch is written.
3691	 */
3692	wmb();
3693
3694	/* set next_to_watch value indicating a packet is present */
3695	first->next_to_watch = tx_desc;
3696
 
 
 
 
 
 
3697	/* notify HW of packet */
3698	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3699		writel(i, tx_ring->tail);
3700	}
3701
3702	return 0;
3703
3704dma_error:
3705	dev_info(tx_ring->dev, "TX DMA map failed\n");
3706
3707	/* clear dma mappings for failed tx_bi map */
3708	for (;;) {
3709		tx_bi = &tx_ring->tx_bi[i];
3710		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3711		if (tx_bi == first)
3712			break;
3713		if (i == 0)
3714			i = tx_ring->count;
3715		i--;
3716	}
3717
3718	tx_ring->next_to_use = i;
3719
3720	return -1;
3721}
3722
3723static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
3724				  const struct sk_buff *skb,
3725				  u16 num_tx_queues)
 
 
 
 
 
3726{
3727	u32 jhash_initval_salt = 0xd631614b;
3728	u32 hash;
3729
3730	if (skb->sk && skb->sk->sk_hash)
3731		hash = skb->sk->sk_hash;
3732	else
3733		hash = (__force u16)skb->protocol ^ skb->hash;
3734
3735	hash = jhash_1word(hash, jhash_initval_salt);
 
 
3736
3737	return (u16)(((u64)hash * num_tx_queues) >> 32);
 
 
 
3738}
3739
3740u16 i40e_lan_select_queue(struct net_device *netdev,
3741			  struct sk_buff *skb,
3742			  struct net_device __always_unused *sb_dev)
 
 
 
 
 
3743{
3744	struct i40e_netdev_priv *np = netdev_priv(netdev);
3745	struct i40e_vsi *vsi = np->vsi;
3746	struct i40e_hw *hw;
3747	u16 qoffset;
3748	u16 qcount;
3749	u8 tclass;
3750	u16 hash;
3751	u8 prio;
3752
3753	/* is DCB enabled at all? */
3754	if (vsi->tc_config.numtc == 1 ||
3755	    i40e_is_tc_mqprio_enabled(vsi->back))
3756		return netdev_pick_tx(netdev, skb, sb_dev);
3757
3758	prio = skb->priority;
3759	hw = &vsi->back->hw;
3760	tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
3761	/* sanity check */
3762	if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
3763		tclass = 0;
3764
3765	/* select a queue assigned for the given TC */
3766	qcount = vsi->tc_config.tc_info[tclass].qcount;
3767	hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
3768
3769	qoffset = vsi->tc_config.tc_info[tclass].qoffset;
3770	return qoffset + hash;
3771}
3772
3773/**
3774 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3775 * @xdpf: data to transmit
3776 * @xdp_ring: XDP Tx ring
3777 **/
3778static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3779			      struct i40e_ring *xdp_ring)
3780{
3781	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
3782	u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
3783	u16 i = 0, index = xdp_ring->next_to_use;
3784	struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
3785	struct i40e_tx_buffer *tx_bi = tx_head;
3786	struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
3787	void *data = xdpf->data;
3788	u32 size = xdpf->len;
3789
3790	if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
3791		xdp_ring->tx_stats.tx_busy++;
3792		return I40E_XDP_CONSUMED;
3793	}
3794
3795	tx_head->bytecount = xdp_get_frame_len(xdpf);
3796	tx_head->gso_segs = 1;
3797	tx_head->xdpf = xdpf;
3798
3799	for (;;) {
3800		dma_addr_t dma;
3801
3802		dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3803		if (dma_mapping_error(xdp_ring->dev, dma))
3804			goto unmap;
3805
3806		/* record length, and DMA address */
3807		dma_unmap_len_set(tx_bi, len, size);
3808		dma_unmap_addr_set(tx_bi, dma, dma);
3809
3810		tx_desc->buffer_addr = cpu_to_le64(dma);
3811		tx_desc->cmd_type_offset_bsz =
3812			build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
3813
3814		if (++index == xdp_ring->count)
3815			index = 0;
3816
3817		if (i == nr_frags)
3818			break;
3819
3820		tx_bi = &xdp_ring->tx_bi[index];
3821		tx_desc = I40E_TX_DESC(xdp_ring, index);
3822
3823		data = skb_frag_address(&sinfo->frags[i]);
3824		size = skb_frag_size(&sinfo->frags[i]);
3825		i++;
3826	}
3827
3828	tx_desc->cmd_type_offset_bsz |=
3829		cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
3830
3831	/* Make certain all of the status bits have been updated
3832	 * before next_to_watch is written.
 
 
 
3833	 */
3834	smp_wmb();
3835
3836	xdp_ring->xdp_tx_active++;
3837
3838	tx_head->next_to_watch = tx_desc;
3839	xdp_ring->next_to_use = index;
3840
3841	return I40E_XDP_TX;
3842
3843unmap:
3844	for (;;) {
3845		tx_bi = &xdp_ring->tx_bi[index];
3846		if (dma_unmap_len(tx_bi, len))
3847			dma_unmap_page(xdp_ring->dev,
3848				       dma_unmap_addr(tx_bi, dma),
3849				       dma_unmap_len(tx_bi, len),
3850				       DMA_TO_DEVICE);
3851		dma_unmap_len_set(tx_bi, len, 0);
3852		if (tx_bi == tx_head)
3853			break;
3854
3855		if (!index)
3856			index += xdp_ring->count;
3857		index--;
3858	}
3859
3860	return I40E_XDP_CONSUMED;
3861}
3862
3863/**
3864 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3865 * @skb:     send buffer
3866 * @tx_ring: ring to send buffer on
3867 *
3868 * Returns NETDEV_TX_OK if sent, else an error code
3869 **/
3870static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3871					struct i40e_ring *tx_ring)
3872{
3873	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3874	u32 cd_tunneling = 0, cd_l2tag2 = 0;
3875	struct i40e_tx_buffer *first;
3876	u32 td_offset = 0;
3877	u32 tx_flags = 0;
 
3878	u32 td_cmd = 0;
3879	u8 hdr_len = 0;
3880	int tso, count;
3881	int tsyn;
 
 
 
3882
3883	/* prefetch the data, we'll need it later */
3884	prefetch(skb->data);
3885
3886	i40e_trace(xmit_frame_ring, skb, tx_ring);
3887
3888	count = i40e_xmit_descriptor_count(skb);
3889	if (i40e_chk_linearize(skb, count)) {
3890		if (__skb_linearize(skb)) {
3891			dev_kfree_skb_any(skb);
3892			return NETDEV_TX_OK;
3893		}
3894		count = i40e_txd_use_count(skb->len);
3895		tx_ring->tx_stats.tx_linearize++;
3896	}
3897
3898	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3899	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3900	 *       + 4 desc gap to avoid the cache line where head is,
3901	 *       + 1 desc for context descriptor,
3902	 * otherwise try next time
3903	 */
3904	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3905		tx_ring->tx_stats.tx_busy++;
3906		return NETDEV_TX_BUSY;
3907	}
3908
3909	/* record the location of the first descriptor for this packet */
3910	first = &tx_ring->tx_bi[tx_ring->next_to_use];
3911	first->skb = skb;
3912	first->bytecount = skb->len;
3913	first->gso_segs = 1;
3914
3915	/* prepare the xmit flags */
3916	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3917		goto out_drop;
 
 
3918
3919	tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
 
3920
3921	if (tso < 0)
3922		goto out_drop;
3923	else if (tso)
3924		tx_flags |= I40E_TX_FLAGS_TSO;
3925
3926	/* Always offload the checksum, since it's in the data descriptor */
3927	tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3928				  tx_ring, &cd_tunneling);
3929	if (tso < 0)
3930		goto out_drop;
3931
3932	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3933
3934	if (tsyn)
3935		tx_flags |= I40E_TX_FLAGS_TSYN;
3936
3937	/* always enable CRC insertion offload */
3938	td_cmd |= I40E_TX_DESC_CMD_ICRC;
3939
 
 
 
 
 
 
 
 
3940	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3941			   cd_tunneling, cd_l2tag2);
3942
3943	/* Add Flow Director ATR if it's enabled.
3944	 *
3945	 * NOTE: this must always be directly before the data descriptor.
3946	 */
3947	i40e_atr(tx_ring, skb, tx_flags);
3948
3949	if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3950			td_cmd, td_offset))
3951		goto cleanup_tx_tstamp;
 
3952
3953	return NETDEV_TX_OK;
3954
3955out_drop:
3956	i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3957	dev_kfree_skb_any(first->skb);
3958	first->skb = NULL;
3959cleanup_tx_tstamp:
3960	if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3961		struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3962
3963		dev_kfree_skb_any(pf->ptp_tx_skb);
3964		pf->ptp_tx_skb = NULL;
3965		clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3966	}
3967
3968	return NETDEV_TX_OK;
3969}
3970
3971/**
3972 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3973 * @skb:    send buffer
3974 * @netdev: network interface device structure
3975 *
3976 * Returns NETDEV_TX_OK if sent, else an error code
3977 **/
3978netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3979{
3980	struct i40e_netdev_priv *np = netdev_priv(netdev);
3981	struct i40e_vsi *vsi = np->vsi;
3982	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3983
3984	/* hardware can't handle really short frames, hardware padding works
3985	 * beyond this point
3986	 */
3987	if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3988		return NETDEV_TX_OK;
3989
3990	return i40e_xmit_frame_ring(skb, tx_ring);
3991}
3992
3993/**
3994 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3995 * @dev: netdev
3996 * @n: number of frames
3997 * @frames: array of XDP buffer pointers
3998 * @flags: XDP extra info
3999 *
4000 * Returns number of frames successfully sent. Failed frames
4001 * will be free'ed by XDP core.
4002 *
4003 * For error cases, a negative errno code is returned and no-frames
4004 * are transmitted (caller must handle freeing frames).
4005 **/
4006int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
4007		  u32 flags)
4008{
4009	struct i40e_netdev_priv *np = netdev_priv(dev);
4010	unsigned int queue_index = smp_processor_id();
4011	struct i40e_vsi *vsi = np->vsi;
4012	struct i40e_pf *pf = vsi->back;
4013	struct i40e_ring *xdp_ring;
4014	int nxmit = 0;
4015	int i;
4016
4017	if (test_bit(__I40E_VSI_DOWN, vsi->state))
4018		return -ENETDOWN;
4019
4020	if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
4021	    test_bit(__I40E_CONFIG_BUSY, pf->state))
4022		return -ENXIO;
4023
4024	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
4025		return -EINVAL;
4026
4027	xdp_ring = vsi->xdp_rings[queue_index];
4028
4029	for (i = 0; i < n; i++) {
4030		struct xdp_frame *xdpf = frames[i];
4031		int err;
4032
4033		err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
4034		if (err != I40E_XDP_TX)
4035			break;
4036		nxmit++;
4037	}
4038
4039	if (unlikely(flags & XDP_XMIT_FLUSH))
4040		i40e_xdp_ring_update_tail(xdp_ring);
4041
4042	return nxmit;
4043}
v3.15
   1/*******************************************************************************
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
 
 
 
 
 
  26
  27#include "i40e.h"
  28#include "i40e_prototype.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  29
  30static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
  31				u32 td_tag)
  32{
  33	return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
  34			   ((u64)td_cmd  << I40E_TXD_QW1_CMD_SHIFT) |
  35			   ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
  36			   ((u64)size  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
  37			   ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
  38}
  39
  40#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
  41/**
  42 * i40e_program_fdir_filter - Program a Flow Director filter
  43 * @fdir_data: Packet data that will be filter parameters
  44 * @raw_packet: the pre-allocated packet buffer for FDir
  45 * @pf: The pf pointer
  46 * @add: True for add/update, False for remove
  47 **/
  48int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
  49			     struct i40e_pf *pf, bool add)
 
  50{
  51	struct i40e_filter_program_desc *fdir_desc;
  52	struct i40e_tx_buffer *tx_buf;
  53	struct i40e_tx_desc *tx_desc;
  54	struct i40e_ring *tx_ring;
  55	unsigned int fpt, dcc;
  56	struct i40e_vsi *vsi;
  57	struct device *dev;
  58	dma_addr_t dma;
  59	u32 td_cmd = 0;
  60	u16 i;
  61
  62	/* find existing FDIR VSI */
  63	vsi = NULL;
  64	for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
  65		if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
  66			vsi = pf->vsi[i];
  67	if (!vsi)
  68		return -ENOENT;
  69
  70	tx_ring = vsi->tx_rings[0];
  71	dev = tx_ring->dev;
  72
 
 
 
 
 
 
 
  73	dma = dma_map_single(dev, raw_packet,
  74			     I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
  75	if (dma_mapping_error(dev, dma))
  76		goto dma_fail;
  77
  78	/* grab the next descriptor */
  79	i = tx_ring->next_to_use;
  80	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
  81
  82	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
  83
  84	fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
  85	      I40E_TXD_FLTR_QW0_QINDEX_MASK;
  86
  87	fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
  88	       I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
  89
  90	fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
  91	       I40E_TXD_FLTR_QW0_PCTYPE_MASK;
  92
  93	/* Use LAN VSI Id if not programmed by user */
  94	if (fdir_data->dest_vsi == 0)
  95		fpt |= (pf->vsi[pf->lan_vsi]->id) <<
  96		       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
  97	else
  98		fpt |= ((u32)fdir_data->dest_vsi <<
  99			I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
 100		       I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 101
 102	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
 103
 104	dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 105
 106	if (add)
 107		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
 108		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 109	else
 110		dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
 111		       I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 112
 113	dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
 114	       I40E_TXD_FLTR_QW1_DEST_MASK;
 115
 116	dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
 117	       I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
 118
 119	if (fdir_data->cnt_index != 0) {
 120		dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
 121		dcc |= ((u32)fdir_data->cnt_index <<
 122			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 123		       I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 124	}
 125
 126	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
 127	fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 128
 129	/* Now program a dummy descriptor */
 130	i = tx_ring->next_to_use;
 131	tx_desc = I40E_TX_DESC(tx_ring, i);
 132	tx_buf = &tx_ring->tx_bi[i];
 133
 134	tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
 
 135
 136	/* record length, and DMA address */
 137	dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
 138	dma_unmap_addr_set(tx_buf, dma, dma);
 139
 140	tx_desc->buffer_addr = cpu_to_le64(dma);
 141	td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 142
 
 
 
 143	tx_desc->cmd_type_offset_bsz =
 144		build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 145
 146	/* set the timestamp */
 147	tx_buf->time_stamp = jiffies;
 148
 149	/* Force memory writes to complete before letting h/w
 150	 * know there are new descriptors to fetch.  (Only
 151	 * applicable for weak-ordered memory model archs,
 152	 * such as IA-64).
 153	 */
 154	wmb();
 155
 156	/* Mark the data descriptor to be watched */
 157	tx_buf->next_to_watch = tx_desc;
 158
 159	writel(tx_ring->next_to_use, tx_ring->tail);
 160	return 0;
 161
 162dma_fail:
 163	return -1;
 164}
 165
 166#define IP_HEADER_OFFSET 14
 167#define I40E_UDPIP_DUMMY_PACKET_LEN 42
 168/**
 169 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170 * @vsi: pointer to the targeted VSI
 171 * @fd_data: the flow director data required for the FDir descriptor
 172 * @raw_packet: the pre-allocated packet buffer for FDir
 173 * @add: true adds a filter, false removes it
 
 174 *
 175 * Returns 0 if the filters were successfully added or removed
 176 **/
 177static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
 178				   struct i40e_fdir_filter *fd_data,
 179				   u8 *raw_packet, bool add)
 
 180{
 181	struct i40e_pf *pf = vsi->back;
 182	struct udphdr *udp;
 183	struct iphdr *ip;
 184	bool err = false;
 185	int ret;
 186	int i;
 187	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
 188		0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
 189		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 190
 191	memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
 192
 193	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
 194	udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
 195	      + sizeof(struct iphdr));
 196
 197	ip->daddr = fd_data->dst_ip[0];
 198	udp->dest = fd_data->dst_port;
 199	ip->saddr = fd_data->src_ip[0];
 200	udp->source = fd_data->src_port;
 201
 202	for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
 203	     i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
 204		fd_data->pctype = i;
 205		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 206
 207		if (ret) {
 208			dev_info(&pf->pdev->dev,
 209				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
 210				 fd_data->pctype, ret);
 211			err = true;
 212		} else {
 213			dev_info(&pf->pdev->dev,
 214				 "Filter OK for PCTYPE %d (ret = %d)\n",
 215				 fd_data->pctype, ret);
 216		}
 
 
 
 
 
 
 
 
 
 
 217	}
 218
 219	return err ? -EOPNOTSUPP : 0;
 
 
 
 220}
 221
 222#define I40E_TCPIP_DUMMY_PACKET_LEN 54
 
 223/**
 224 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
 225 * @vsi: pointer to the targeted VSI
 226 * @fd_data: the flow director data required for the FDir descriptor
 227 * @raw_packet: the pre-allocated packet buffer for FDir
 228 * @add: true adds a filter, false removes it
 
 229 *
 230 * Returns 0 if the filters were successfully added or removed
 231 **/
 232static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
 233				   struct i40e_fdir_filter *fd_data,
 234				   u8 *raw_packet, bool add)
 
 235{
 236	struct i40e_pf *pf = vsi->back;
 237	struct tcphdr *tcp;
 238	struct iphdr *ip;
 239	bool err = false;
 240	int ret;
 241	/* Dummy packet */
 242	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
 243		0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
 244		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
 245		0x0, 0x72, 0, 0, 0, 0};
 246
 247	memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
 248
 249	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
 250	tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
 251	      + sizeof(struct iphdr));
 252
 253	ip->daddr = fd_data->dst_ip[0];
 254	tcp->dest = fd_data->dst_port;
 255	ip->saddr = fd_data->src_ip[0];
 256	tcp->source = fd_data->src_port;
 257
 258	if (add) {
 259		if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
 260			dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
 261			pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 262		}
 263	}
 264
 265	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
 266	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
 
 
 
 
 
 
 
 
 267
 268	if (ret) {
 269		dev_info(&pf->pdev->dev,
 270			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
 271			 fd_data->pctype, ret);
 272		err = true;
 273	} else {
 274		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
 275			 fd_data->pctype, ret);
 276	}
 277
 278	fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
 
 279
 280	ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 281	if (ret) {
 282		dev_info(&pf->pdev->dev,
 283			 "Filter command send failed for PCTYPE %d (ret = %d)\n",
 284			 fd_data->pctype, ret);
 285		err = true;
 286	} else {
 287		dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
 288			  fd_data->pctype, ret);
 289	}
 290
 291	return err ? -EOPNOTSUPP : 0;
 292}
 293
 
 
 294/**
 295 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
 296 * a specific flow spec
 297 * @vsi: pointer to the targeted VSI
 298 * @fd_data: the flow director data required for the FDir descriptor
 299 * @raw_packet: the pre-allocated packet buffer for FDir
 300 * @add: true adds a filter, false removes it
 
 301 *
 302 * Always returns -EOPNOTSUPP
 303 **/
 304static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
 305				    struct i40e_fdir_filter *fd_data,
 306				    u8 *raw_packet, bool add)
 
 307{
 308	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309}
 310
 311#define I40E_IP_DUMMY_PACKET_LEN 34
 
 312/**
 313 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
 314 * a specific flow spec
 315 * @vsi: pointer to the targeted VSI
 316 * @fd_data: the flow director data required for the FDir descriptor
 317 * @raw_packet: the pre-allocated packet buffer for FDir
 318 * @add: true adds a filter, false removes it
 
 319 *
 320 * Returns 0 if the filters were successfully added or removed
 321 **/
 322static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
 323				  struct i40e_fdir_filter *fd_data,
 324				  u8 *raw_packet, bool add)
 
 325{
 326	struct i40e_pf *pf = vsi->back;
 327	struct iphdr *ip;
 328	bool err = false;
 
 
 329	int ret;
 330	int i;
 331	static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
 332		0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
 333		0, 0, 0, 0};
 334
 335	memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
 336	ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
 337
 338	ip->saddr = fd_data->src_ip[0];
 339	ip->daddr = fd_data->dst_ip[0];
 340	ip->protocol = 0;
 341
 342	for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
 343	     i <= I40E_FILTER_PCTYPE_FRAG_IPV4;	i++) {
 344		fd_data->pctype = i;
 345		ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 346
 347		if (ret) {
 348			dev_info(&pf->pdev->dev,
 349				 "Filter command send failed for PCTYPE %d (ret = %d)\n",
 350				 fd_data->pctype, ret);
 351			err = true;
 352		} else {
 353			dev_info(&pf->pdev->dev,
 354				 "Filter OK for PCTYPE %d (ret = %d)\n",
 355				 fd_data->pctype, ret);
 356		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357	}
 358
 359	return err ? -EOPNOTSUPP : 0;
 
 
 
 
 
 
 360}
 361
 362/**
 363 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
 364 * @vsi: pointer to the targeted VSI
 365 * @cmd: command to get or set RX flow classification rules
 366 * @add: true adds a filter, false removes it
 367 *
 368 **/
 369int i40e_add_del_fdir(struct i40e_vsi *vsi,
 370		      struct i40e_fdir_filter *input, bool add)
 371{
 
 372	struct i40e_pf *pf = vsi->back;
 373	u8 *raw_packet;
 374	int ret;
 375
 376	/* Populate the Flow Director that we have at the moment
 377	 * and allocate the raw packet buffer for the calling functions
 378	 */
 379	raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
 380	if (!raw_packet)
 381		return -ENOMEM;
 382
 383	switch (input->flow_type & ~FLOW_EXT) {
 384	case TCP_V4_FLOW:
 385		ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
 386					      add);
 387		break;
 388	case UDP_V4_FLOW:
 389		ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
 390					      add);
 391		break;
 392	case SCTP_V4_FLOW:
 393		ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
 394					       add);
 
 
 
 
 
 395		break;
 396	case IPV4_FLOW:
 397		ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
 398					     add);
 399		break;
 400	case IP_USER_FLOW:
 401		switch (input->ip4_proto) {
 402		case IPPROTO_TCP:
 403			ret = i40e_add_del_fdir_tcpv4(vsi, input,
 404						      raw_packet, add);
 405			break;
 406		case IPPROTO_UDP:
 407			ret = i40e_add_del_fdir_udpv4(vsi, input,
 408						      raw_packet, add);
 409			break;
 410		case IPPROTO_SCTP:
 411			ret = i40e_add_del_fdir_sctpv4(vsi, input,
 412						       raw_packet, add);
 
 
 413			break;
 414		default:
 415			ret = i40e_add_del_fdir_ipv4(vsi, input,
 416						     raw_packet, add);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417			break;
 
 
 
 
 
 418		}
 419		break;
 420	default:
 421		dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
 422			 input->flow_type);
 423		ret = -EINVAL;
 424	}
 425
 426	kfree(raw_packet);
 
 
 
 
 
 427	return ret;
 428}
 429
 430/**
 431 * i40e_fd_handle_status - check the Programming Status for FD
 432 * @rx_ring: the Rx ring for this descriptor
 433 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
 
 434 * @prog_id: the id originally used for programming
 435 *
 436 * This is used to verify if the FD programming or invalidation
 437 * requested by SW to the HW is successful or not and take actions accordingly.
 438 **/
 439static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
 440				  union i40e_rx_desc *rx_desc, u8 prog_id)
 441{
 442	struct i40e_pf *pf = rx_ring->vsi->back;
 443	struct pci_dev *pdev = pf->pdev;
 
 444	u32 fcnt_prog, fcnt_avail;
 445	u32 error;
 446	u64 qw;
 447
 448	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
 449	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
 450		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 451
 452	if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 453		dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
 454			 rx_desc->wb.qword0.hi_dword.fd_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 455
 456		/* filter programming failed most likely due to table full */
 457		fcnt_prog = i40e_get_current_fd_count(pf);
 458		fcnt_avail = pf->hw.fdir_shared_filter_count +
 459						       pf->fdir_pf_filter_count;
 460
 461		/* If ATR is running fcnt_prog can quickly change,
 462		 * if we are very close to full, it makes sense to disable
 463		 * FD ATR/SB and then re-enable it when there is room.
 464		 */
 465		if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
 466			/* Turn off ATR first */
 467			if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
 468				pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 469				dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
 470				pf->auto_disable_flags |=
 471						       I40E_FLAG_FD_ATR_ENABLED;
 472				pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
 473			} else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
 474				pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
 475				dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
 476				pf->auto_disable_flags |=
 477							I40E_FLAG_FD_SB_ENABLED;
 478				pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
 479			}
 480		} else {
 481			dev_info(&pdev->dev, "FD filter programming error\n");
 482		}
 483	} else if (error ==
 484			  (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 485		if (I40E_DEBUG_FD & pf->hw.debug_mask)
 486			dev_info(&pdev->dev, "ntuple filter loc = %d, could not be removed\n",
 487				 rx_desc->wb.qword0.hi_dword.fd_id);
 488	}
 489}
 490
 491/**
 492 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
 493 * @ring:      the ring that owns the buffer
 494 * @tx_buffer: the buffer to free
 495 **/
 496static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
 497					    struct i40e_tx_buffer *tx_buffer)
 498{
 499	if (tx_buffer->skb) {
 500		dev_kfree_skb_any(tx_buffer->skb);
 
 
 
 
 
 501		if (dma_unmap_len(tx_buffer, len))
 502			dma_unmap_single(ring->dev,
 503					 dma_unmap_addr(tx_buffer, dma),
 504					 dma_unmap_len(tx_buffer, len),
 505					 DMA_TO_DEVICE);
 506	} else if (dma_unmap_len(tx_buffer, len)) {
 507		dma_unmap_page(ring->dev,
 508			       dma_unmap_addr(tx_buffer, dma),
 509			       dma_unmap_len(tx_buffer, len),
 510			       DMA_TO_DEVICE);
 511	}
 
 512	tx_buffer->next_to_watch = NULL;
 513	tx_buffer->skb = NULL;
 514	dma_unmap_len_set(tx_buffer, len, 0);
 515	/* tx_buffer must be completely set up in the transmit path */
 516}
 517
 518/**
 519 * i40e_clean_tx_ring - Free any empty Tx buffers
 520 * @tx_ring: ring to be cleaned
 521 **/
 522void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 523{
 524	unsigned long bi_size;
 525	u16 i;
 526
 527	/* ring already cleared, nothing to do */
 528	if (!tx_ring->tx_bi)
 529		return;
 
 
 
 530
 531	/* Free all the Tx ring sk_buffs */
 532	for (i = 0; i < tx_ring->count; i++)
 533		i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
 
 
 534
 535	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 536	memset(tx_ring->tx_bi, 0, bi_size);
 537
 538	/* Zero out the descriptor ring */
 539	memset(tx_ring->desc, 0, tx_ring->size);
 540
 541	tx_ring->next_to_use = 0;
 542	tx_ring->next_to_clean = 0;
 543
 544	if (!tx_ring->netdev)
 545		return;
 546
 547	/* cleanup Tx queue statistics */
 548	netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
 549						  tx_ring->queue_index));
 550}
 551
 552/**
 553 * i40e_free_tx_resources - Free Tx resources per queue
 554 * @tx_ring: Tx descriptor ring for a specific queue
 555 *
 556 * Free all transmit software resources
 557 **/
 558void i40e_free_tx_resources(struct i40e_ring *tx_ring)
 559{
 560	i40e_clean_tx_ring(tx_ring);
 561	kfree(tx_ring->tx_bi);
 562	tx_ring->tx_bi = NULL;
 563
 564	if (tx_ring->desc) {
 565		dma_free_coherent(tx_ring->dev, tx_ring->size,
 566				  tx_ring->desc, tx_ring->dma);
 567		tx_ring->desc = NULL;
 568	}
 569}
 570
 571/**
 572 * i40e_get_tx_pending - how many tx descriptors not processed
 573 * @tx_ring: the ring of descriptors
 
 574 *
 575 * Since there is no access to the ring head register
 576 * in XL710, we need to use our local copies
 577 **/
 578static u32 i40e_get_tx_pending(struct i40e_ring *ring)
 579{
 580	u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
 581			? ring->next_to_use
 582			: ring->next_to_use + ring->count);
 583	return ntu - ring->next_to_clean;
 584}
 585
 586/**
 587 * i40e_check_tx_hang - Is there a hang in the Tx queue
 588 * @tx_ring: the ring of descriptors
 589 **/
 590static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
 591{
 592	u32 tx_pending = i40e_get_tx_pending(tx_ring);
 593	bool ret = false;
 594
 595	clear_check_for_tx_hang(tx_ring);
 596
 597	/* Check for a hung queue, but be thorough. This verifies
 598	 * that a transmit has been completed since the previous
 599	 * check AND there is at least one packet pending. The
 600	 * ARMED bit is set to indicate a potential hang. The
 601	 * bit is cleared if a pause frame is received to remove
 602	 * false hang detection due to PFC or 802.3x frames. By
 603	 * requiring this to fail twice we avoid races with
 604	 * PFC clearing the ARMED bit and conditions where we
 605	 * run the check_tx_hang logic with a transmit completion
 606	 * pending but without time to complete it yet.
 607	 */
 608	if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
 609	    tx_pending) {
 610		/* make sure it is true for two checks in a row */
 611		ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
 612				       &tx_ring->state);
 613	} else {
 614		/* update completed stats and disarm the hang check */
 615		tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
 616		clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
 617	}
 618
 619	return ret;
 
 
 
 
 620}
 621
 622/**
 623 * i40e_get_head - Retrieve head from head writeback
 624 * @tx_ring:  tx ring to fetch head of
 625 *
 626 * Returns value of Tx ring head based on value stored
 627 * in head write-back location
 628 **/
 629static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
 630{
 631	void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 632
 633	return le32_to_cpu(*(volatile __le32 *)head);
 
 
 
 
 
 
 
 634}
 635
 636/**
 637 * i40e_clean_tx_irq - Reclaim resources after transmit completes
 638 * @tx_ring:  tx ring to clean
 639 * @budget:   how many cleans we're allowed
 
 
 640 *
 641 * Returns true if there's any budget left (e.g. the clean is finished)
 642 **/
 643static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 
 
 644{
 645	u16 i = tx_ring->next_to_clean;
 646	struct i40e_tx_buffer *tx_buf;
 647	struct i40e_tx_desc *tx_head;
 648	struct i40e_tx_desc *tx_desc;
 649	unsigned int total_packets = 0;
 650	unsigned int total_bytes = 0;
 651
 652	tx_buf = &tx_ring->tx_bi[i];
 653	tx_desc = I40E_TX_DESC(tx_ring, i);
 654	i -= tx_ring->count;
 655
 656	tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
 657
 658	do {
 659		struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 660
 661		/* if next_to_watch is not set then there is no work pending */
 662		if (!eop_desc)
 663			break;
 664
 665		/* prevent any other reads prior to eop_desc */
 666		read_barrier_depends();
 667
 
 668		/* we have caught up to head, no work left to do */
 669		if (tx_head == tx_desc)
 670			break;
 671
 672		/* clear next_to_watch to prevent false hangs */
 673		tx_buf->next_to_watch = NULL;
 674
 675		/* update the statistics for this packet */
 676		total_bytes += tx_buf->bytecount;
 677		total_packets += tx_buf->gso_segs;
 678
 679		/* free the skb */
 680		dev_kfree_skb_any(tx_buf->skb);
 
 
 
 681
 682		/* unmap skb header data */
 683		dma_unmap_single(tx_ring->dev,
 684				 dma_unmap_addr(tx_buf, dma),
 685				 dma_unmap_len(tx_buf, len),
 686				 DMA_TO_DEVICE);
 687
 688		/* clear tx_buffer data */
 689		tx_buf->skb = NULL;
 690		dma_unmap_len_set(tx_buf, len, 0);
 691
 692		/* unmap remaining buffers */
 693		while (tx_desc != eop_desc) {
 
 
 694
 695			tx_buf++;
 696			tx_desc++;
 697			i++;
 698			if (unlikely(!i)) {
 699				i -= tx_ring->count;
 700				tx_buf = tx_ring->tx_bi;
 701				tx_desc = I40E_TX_DESC(tx_ring, 0);
 702			}
 703
 704			/* unmap any remaining paged data */
 705			if (dma_unmap_len(tx_buf, len)) {
 706				dma_unmap_page(tx_ring->dev,
 707					       dma_unmap_addr(tx_buf, dma),
 708					       dma_unmap_len(tx_buf, len),
 709					       DMA_TO_DEVICE);
 710				dma_unmap_len_set(tx_buf, len, 0);
 711			}
 712		}
 713
 714		/* move us one more past the eop_desc for start of next pkt */
 715		tx_buf++;
 716		tx_desc++;
 717		i++;
 718		if (unlikely(!i)) {
 719			i -= tx_ring->count;
 720			tx_buf = tx_ring->tx_bi;
 721			tx_desc = I40E_TX_DESC(tx_ring, 0);
 722		}
 723
 
 
 724		/* update budget accounting */
 725		budget--;
 726	} while (likely(budget));
 727
 728	i += tx_ring->count;
 729	tx_ring->next_to_clean = i;
 730	u64_stats_update_begin(&tx_ring->syncp);
 731	tx_ring->stats.bytes += total_bytes;
 732	tx_ring->stats.packets += total_packets;
 733	u64_stats_update_end(&tx_ring->syncp);
 734	tx_ring->q_vector->tx.total_bytes += total_bytes;
 735	tx_ring->q_vector->tx.total_packets += total_packets;
 736
 737	if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
 738		/* schedule immediate reset if we believe we hung */
 739		dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
 740			 "  VSI                  <%d>\n"
 741			 "  Tx Queue             <%d>\n"
 742			 "  next_to_use          <%x>\n"
 743			 "  next_to_clean        <%x>\n",
 744			 tx_ring->vsi->seid,
 745			 tx_ring->queue_index,
 746			 tx_ring->next_to_use, i);
 747		dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
 748			 "  time_stamp           <%lx>\n"
 749			 "  jiffies              <%lx>\n",
 750			 tx_ring->tx_bi[i].time_stamp, jiffies);
 751
 752		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 753
 754		dev_info(tx_ring->dev,
 755			 "tx hang detected on queue %d, resetting adapter\n",
 756			 tx_ring->queue_index);
 757
 758		tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
 759
 760		/* the adapter is about to reset, no point in enabling stuff */
 761		return true;
 762	}
 763
 764	netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
 765						      tx_ring->queue_index),
 766				  total_packets, total_bytes);
 767
 768#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
 769	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
 770		     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 771		/* Make sure that anybody stopping the queue after this
 772		 * sees the new next_to_clean.
 773		 */
 774		smp_mb();
 775		if (__netif_subqueue_stopped(tx_ring->netdev,
 776					     tx_ring->queue_index) &&
 777		   !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
 778			netif_wake_subqueue(tx_ring->netdev,
 779					    tx_ring->queue_index);
 780			++tx_ring->tx_stats.restart_queue;
 781		}
 782	}
 783
 784	return budget > 0;
 
 785}
 786
 787/**
 788 * i40e_set_new_dynamic_itr - Find new ITR level
 789 * @rc: structure containing ring performance data
 
 790 *
 791 * Stores a new ITR value based on packets and byte counts during
 792 * the last interrupt.  The advantage of per interrupt computation
 793 * is faster updates and more accurate ITR for the current traffic
 794 * pattern.  Constants in this function were computed based on
 795 * theoretical maximum wire speed and thresholds were set based on
 796 * testing data as well as attempting to minimize response time
 797 * while increasing bulk throughput.
 798 **/
 799static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
 
 800{
 801	enum i40e_latency_range new_latency_range = rc->latency_range;
 802	u32 new_itr = rc->itr;
 803	int bytes_per_int;
 
 
 804
 805	if (rc->total_packets == 0 || !rc->itr)
 806		return;
 807
 808	/* simple throttlerate management
 809	 *   0-10MB/s   lowest (100000 ints/s)
 810	 *  10-20MB/s   low    (20000 ints/s)
 811	 *  20-1249MB/s bulk   (8000 ints/s)
 812	 */
 813	bytes_per_int = rc->total_bytes / rc->itr;
 814	switch (rc->itr) {
 815	case I40E_LOWEST_LATENCY:
 816		if (bytes_per_int > 10)
 817			new_latency_range = I40E_LOW_LATENCY;
 818		break;
 819	case I40E_LOW_LATENCY:
 820		if (bytes_per_int > 20)
 821			new_latency_range = I40E_BULK_LATENCY;
 822		else if (bytes_per_int <= 10)
 823			new_latency_range = I40E_LOWEST_LATENCY;
 824		break;
 825	case I40E_BULK_LATENCY:
 826		if (bytes_per_int <= 20)
 827			rc->latency_range = I40E_LOW_LATENCY;
 828		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829	}
 
 830
 831	switch (new_latency_range) {
 832	case I40E_LOWEST_LATENCY:
 833		new_itr = I40E_ITR_100K;
 
 
 
 
 
 
 
 
 
 
 834		break;
 835	case I40E_LOW_LATENCY:
 836		new_itr = I40E_ITR_20K;
 
 837		break;
 838	case I40E_BULK_LATENCY:
 839		new_itr = I40E_ITR_8K;
 
 840		break;
 841	default:
 
 
 842		break;
 843	}
 844
 845	if (new_itr != rc->itr) {
 846		/* do an exponential smoothing */
 847		new_itr = (10 * new_itr * rc->itr) /
 848			  ((9 * new_itr) + rc->itr);
 849		rc->itr = new_itr & I40E_MAX_ITR;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850	}
 851
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 852	rc->total_bytes = 0;
 853	rc->total_packets = 0;
 854}
 855
 
 
 
 
 
 856/**
 857 * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
 858 * @q_vector: the vector to adjust
 
 
 
 859 **/
 860static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
 
 861{
 862	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
 863	struct i40e_hw *hw = &q_vector->vsi->back->hw;
 864	u32 reg_addr;
 865	u16 old_itr;
 866
 867	reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
 868	old_itr = q_vector->rx.itr;
 869	i40e_set_new_dynamic_itr(&q_vector->rx);
 870	if (old_itr != q_vector->rx.itr)
 871		wr32(hw, reg_addr, q_vector->rx.itr);
 872
 873	reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
 874	old_itr = q_vector->tx.itr;
 875	i40e_set_new_dynamic_itr(&q_vector->tx);
 876	if (old_itr != q_vector->tx.itr)
 877		wr32(hw, reg_addr, q_vector->tx.itr);
 
 878}
 879
 880/**
 881 * i40e_clean_programming_status - clean the programming status descriptor
 882 * @rx_ring: the rx ring that has this descriptor
 883 * @rx_desc: the rx descriptor written back by HW
 
 884 *
 885 * Flow director should handle FD_FILTER_STATUS to check its filter programming
 886 * status being successful or not and take actions accordingly. FCoE should
 887 * handle its context/filter programming/invalidation status and take actions.
 888 *
 
 889 **/
 890static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
 891					  union i40e_rx_desc *rx_desc)
 892{
 893	u64 qw;
 894	u8 id;
 895
 896	qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
 897	id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
 898		  I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
 899
 900	if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
 901		i40e_fd_handle_status(rx_ring, rx_desc, id);
 902}
 903
 904/**
 905 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
 906 * @tx_ring: the tx ring to set up
 907 *
 908 * Return 0 on success, negative on error
 909 **/
 910int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
 911{
 912	struct device *dev = tx_ring->dev;
 913	int bi_size;
 914
 915	if (!dev)
 916		return -ENOMEM;
 917
 
 
 918	bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
 919	tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
 920	if (!tx_ring->tx_bi)
 921		goto err;
 922
 
 
 923	/* round up to nearest 4K */
 924	tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
 925	/* add u32 for head writeback, align after this takes care of
 926	 * guaranteeing this is at least one cache line in size
 927	 */
 928	tx_ring->size += sizeof(u32);
 929	tx_ring->size = ALIGN(tx_ring->size, 4096);
 930	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
 931					   &tx_ring->dma, GFP_KERNEL);
 932	if (!tx_ring->desc) {
 933		dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 934			 tx_ring->size);
 935		goto err;
 936	}
 937
 938	tx_ring->next_to_use = 0;
 939	tx_ring->next_to_clean = 0;
 
 940	return 0;
 941
 942err:
 943	kfree(tx_ring->tx_bi);
 944	tx_ring->tx_bi = NULL;
 945	return -ENOMEM;
 946}
 947
 
 
 
 
 
 948/**
 949 * i40e_clean_rx_ring - Free Rx buffers
 950 * @rx_ring: ring to be cleaned
 951 **/
 952void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
 953{
 954	struct device *dev = rx_ring->dev;
 955	struct i40e_rx_buffer *rx_bi;
 956	unsigned long bi_size;
 957	u16 i;
 958
 959	/* ring already cleared, nothing to do */
 960	if (!rx_ring->rx_bi)
 961		return;
 962
 
 
 
 
 
 963	/* Free all the Rx ring sk_buffs */
 964	for (i = 0; i < rx_ring->count; i++) {
 965		rx_bi = &rx_ring->rx_bi[i];
 966		if (rx_bi->dma) {
 967			dma_unmap_single(dev,
 968					 rx_bi->dma,
 969					 rx_ring->rx_buf_len,
 970					 DMA_FROM_DEVICE);
 971			rx_bi->dma = 0;
 972		}
 973		if (rx_bi->skb) {
 974			dev_kfree_skb(rx_bi->skb);
 975			rx_bi->skb = NULL;
 976		}
 977		if (rx_bi->page) {
 978			if (rx_bi->page_dma) {
 979				dma_unmap_page(dev,
 980					       rx_bi->page_dma,
 981					       PAGE_SIZE / 2,
 982					       DMA_FROM_DEVICE);
 983				rx_bi->page_dma = 0;
 984			}
 985			__free_page(rx_bi->page);
 986			rx_bi->page = NULL;
 987			rx_bi->page_offset = 0;
 988		}
 989	}
 990
 991	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
 992	memset(rx_ring->rx_bi, 0, bi_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993
 994	/* Zero out the descriptor ring */
 995	memset(rx_ring->desc, 0, rx_ring->size);
 996
 
 997	rx_ring->next_to_clean = 0;
 
 998	rx_ring->next_to_use = 0;
 999}
1000
1001/**
1002 * i40e_free_rx_resources - Free Rx resources
1003 * @rx_ring: ring to clean the resources from
1004 *
1005 * Free all receive software resources
1006 **/
1007void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1008{
1009	i40e_clean_rx_ring(rx_ring);
 
 
 
1010	kfree(rx_ring->rx_bi);
1011	rx_ring->rx_bi = NULL;
1012
1013	if (rx_ring->desc) {
1014		dma_free_coherent(rx_ring->dev, rx_ring->size,
1015				  rx_ring->desc, rx_ring->dma);
1016		rx_ring->desc = NULL;
1017	}
1018}
1019
1020/**
1021 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1022 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1023 *
1024 * Returns 0 on success, negative on failure
1025 **/
1026int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1027{
1028	struct device *dev = rx_ring->dev;
1029	int bi_size;
1030
1031	bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1032	rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1033	if (!rx_ring->rx_bi)
1034		goto err;
1035
1036	/* Round up to nearest 4K */
1037	rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
1038		? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
1039		: rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1040	rx_ring->size = ALIGN(rx_ring->size, 4096);
1041	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1042					   &rx_ring->dma, GFP_KERNEL);
1043
1044	if (!rx_ring->desc) {
1045		dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1046			 rx_ring->size);
1047		goto err;
1048	}
1049
 
1050	rx_ring->next_to_clean = 0;
 
1051	rx_ring->next_to_use = 0;
1052
 
 
 
 
 
 
 
1053	return 0;
1054err:
1055	kfree(rx_ring->rx_bi);
1056	rx_ring->rx_bi = NULL;
1057	return -ENOMEM;
1058}
1059
1060/**
1061 * i40e_release_rx_desc - Store the new tail and head values
1062 * @rx_ring: ring to bump
1063 * @val: new head index
1064 **/
1065static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1066{
1067	rx_ring->next_to_use = val;
 
 
 
 
1068	/* Force memory writes to complete before letting h/w
1069	 * know there are new descriptors to fetch.  (Only
1070	 * applicable for weak-ordered memory model archs,
1071	 * such as IA-64).
1072	 */
1073	wmb();
1074	writel(val, rx_ring->tail);
1075}
1076
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077/**
1078 * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
1079 * @rx_ring: ring to place buffers on
1080 * @cleaned_count: number of buffers to replace
 
 
1081 **/
1082void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1083{
1084	u16 i = rx_ring->next_to_use;
1085	union i40e_rx_desc *rx_desc;
1086	struct i40e_rx_buffer *bi;
1087	struct sk_buff *skb;
1088
1089	/* do nothing if no valid netdev defined */
1090	if (!rx_ring->netdev || !cleaned_count)
1091		return;
 
 
 
1092
1093	while (cleaned_count--) {
1094		rx_desc = I40E_RX_DESC(rx_ring, i);
1095		bi = &rx_ring->rx_bi[i];
1096		skb = bi->skb;
1097
1098		if (!skb) {
1099			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1100							rx_ring->rx_buf_len);
1101			if (!skb) {
1102				rx_ring->rx_stats.alloc_buff_failed++;
1103				goto no_buffers;
1104			}
1105			/* initialize queue mapping */
1106			skb_record_rx_queue(skb, rx_ring->queue_index);
1107			bi->skb = skb;
1108		}
1109
1110		if (!bi->dma) {
1111			bi->dma = dma_map_single(rx_ring->dev,
1112						 skb->data,
1113						 rx_ring->rx_buf_len,
1114						 DMA_FROM_DEVICE);
1115			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1116				rx_ring->rx_stats.alloc_buff_failed++;
1117				bi->dma = 0;
1118				goto no_buffers;
1119			}
 
 
 
 
 
 
 
 
1120		}
1121
1122		if (ring_is_ps_enabled(rx_ring)) {
1123			if (!bi->page) {
1124				bi->page = alloc_page(GFP_ATOMIC);
1125				if (!bi->page) {
1126					rx_ring->rx_stats.alloc_page_failed++;
1127					goto no_buffers;
1128				}
1129			}
1130
1131			if (!bi->page_dma) {
1132				/* use a half page if we're re-using */
1133				bi->page_offset ^= PAGE_SIZE / 2;
1134				bi->page_dma = dma_map_page(rx_ring->dev,
1135							    bi->page,
1136							    bi->page_offset,
1137							    PAGE_SIZE / 2,
1138							    DMA_FROM_DEVICE);
1139				if (dma_mapping_error(rx_ring->dev,
1140						      bi->page_dma)) {
1141					rx_ring->rx_stats.alloc_page_failed++;
1142					bi->page_dma = 0;
1143					goto no_buffers;
1144				}
1145			}
1146
1147			/* Refresh the desc even if buffer_addrs didn't change
1148			 * because each write-back erases this info.
1149			 */
1150			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1151			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1152		} else {
1153			rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1154			rx_desc->read.hdr_addr = 0;
1155		}
1156		i++;
1157		if (i == rx_ring->count)
1158			i = 0;
1159	}
1160
1161no_buffers:
1162	if (rx_ring->next_to_use != i)
1163		i40e_release_rx_desc(rx_ring, i);
1164}
1165
1166/**
1167 * i40e_receive_skb - Send a completed packet up the stack
1168 * @rx_ring:  rx ring in play
1169 * @skb: packet to send up
1170 * @vlan_tag: vlan tag for packet
1171 **/
1172static void i40e_receive_skb(struct i40e_ring *rx_ring,
1173			     struct sk_buff *skb, u16 vlan_tag)
1174{
1175	struct i40e_q_vector *q_vector = rx_ring->q_vector;
1176	struct i40e_vsi *vsi = rx_ring->vsi;
1177	u64 flags = vsi->back->flags;
1178
1179	if (vlan_tag & VLAN_VID_MASK)
1180		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1181
1182	if (flags & I40E_FLAG_IN_NETPOLL)
1183		netif_rx(skb);
1184	else
1185		napi_gro_receive(&q_vector->napi, skb);
1186}
1187
1188/**
1189 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1190 * @vsi: the VSI we care about
1191 * @skb: skb currently being received and modified
1192 * @rx_status: status value of last descriptor in packet
1193 * @rx_error: error value of last descriptor in packet
1194 * @rx_ptype: ptype value of last descriptor in packet
1195 **/
1196static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1197				    struct sk_buff *skb,
1198				    u32 rx_status,
1199				    u32 rx_error,
1200				    u16 rx_ptype)
1201{
1202	bool ipv4_tunnel, ipv6_tunnel;
1203	__wsum rx_udp_csum;
1204	__sum16 csum;
1205	struct iphdr *iph;
1206
1207	ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
1208		      (rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
1209	ipv6_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
1210		      (rx_ptype < I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
1211
1212	skb->encapsulation = ipv4_tunnel || ipv6_tunnel;
1213	skb->ip_summed = CHECKSUM_NONE;
1214
1215	/* Rx csum enabled and ip headers found? */
1216	if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
1217	      rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 
 
1218		return;
1219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1220	/* likely incorrect csum if alternate IP extension headers found */
1221	if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 
 
1222		return;
1223
1224	/* IP or L4 or outmost IP checksum error */
1225	if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
1226			(1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
1227			(1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
1228		vsi->back->hw_csum_rx_error++;
 
 
 
 
1229		return;
1230	}
1231
1232	if (ipv4_tunnel &&
1233	    !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
1234		/* If VXLAN traffic has an outer UDPv4 checksum we need to check
1235		 * it in the driver, hardware does not do it for us.
1236		 * Since L3L4P bit was set we assume a valid IHL value (>=5)
1237		 * so the total length of IPv4 header is IHL*4 bytes
1238		 */
1239		skb->transport_header = skb->mac_header +
1240					sizeof(struct ethhdr) +
1241					(ip_hdr(skb)->ihl * 4);
1242
1243		/* Add 4 bytes for VLAN tagged packets */
1244		skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) ||
1245					  skb->protocol == htons(ETH_P_8021AD))
1246					  ? VLAN_HLEN : 0;
1247
1248		rx_udp_csum = udp_csum(skb);
1249		iph = ip_hdr(skb);
1250		csum = csum_tcpudp_magic(
1251				iph->saddr, iph->daddr,
1252				(skb->len - skb_transport_offset(skb)),
1253				IPPROTO_UDP, rx_udp_csum);
1254
1255		if (udp_hdr(skb)->check != csum) {
1256			vsi->back->hw_csum_rx_error++;
1257			return;
1258		}
1259	}
1260
1261	skb->ip_summed = CHECKSUM_UNNECESSARY;
 
1262}
1263
1264/**
1265 * i40e_rx_hash - returns the hash value from the Rx descriptor
1266 * @ring: descriptor ring
1267 * @rx_desc: specific descriptor
 
 
1268 **/
1269static inline u32 i40e_rx_hash(struct i40e_ring *ring,
1270			       union i40e_rx_desc *rx_desc)
 
 
1271{
 
 
1272	const __le64 rss_mask =
1273		cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1274			    I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1275
1276	if ((ring->netdev->features & NETIF_F_RXHASH) &&
1277	    (rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
1278		return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1279	else
1280		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281}
1282
1283/**
1284 * i40e_ptype_to_hash - get a hash type
1285 * @ptype: the ptype value from the descriptor
 
1286 *
1287 * Returns a hash type to be used by skb_set_hash
 
 
1288 **/
1289static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1290{
1291	struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
 
1292
1293	if (!decoded.known)
1294		return PKT_HASH_TYPE_NONE;
 
1295
1296	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1297	    decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1298		return PKT_HASH_TYPE_L4;
1299	else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1300		 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1301		return PKT_HASH_TYPE_L3;
1302	else
1303		return PKT_HASH_TYPE_L2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304}
1305
1306/**
1307 * i40e_clean_rx_irq - Reclaim resources after receive completes
1308 * @rx_ring:  rx ring to clean
1309 * @budget:   how many cleans we're allowed
 
 
 
 
 
 
1310 *
1311 * Returns true if there's any budget left (e.g. the clean is finished)
1312 **/
1313static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 
1314{
1315	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1316	u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
1317	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
1318	const int current_node = numa_node_id();
1319	struct i40e_vsi *vsi = rx_ring->vsi;
1320	u16 i = rx_ring->next_to_clean;
1321	union i40e_rx_desc *rx_desc;
1322	u32 rx_error, rx_status;
1323	u8 rx_ptype;
1324	u64 qword;
 
 
 
 
 
 
 
 
 
 
 
 
1325
1326	if (budget <= 0)
1327		return 0;
 
 
 
 
1328
1329	rx_desc = I40E_RX_DESC(rx_ring, i);
1330	qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1331	rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1332		    I40E_RXD_QW1_STATUS_SHIFT;
1333
1334	while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1335		union i40e_rx_desc *next_rxd;
1336		struct i40e_rx_buffer *rx_bi;
1337		struct sk_buff *skb;
1338		u16 vlan_tag;
1339		if (i40e_rx_is_programming_status(qword)) {
1340			i40e_clean_programming_status(rx_ring, rx_desc);
1341			I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1342			goto next_desc;
1343		}
1344		rx_bi = &rx_ring->rx_bi[i];
1345		skb = rx_bi->skb;
1346		prefetch(skb->data);
1347
1348		rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
1349				I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
1350		rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
1351				I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
1352		rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
1353			 I40E_RXD_QW1_LENGTH_SPH_SHIFT;
1354
1355		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1356			   I40E_RXD_QW1_ERROR_SHIFT;
1357		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1358		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
1359
1360		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1361			   I40E_RXD_QW1_PTYPE_SHIFT;
1362		rx_bi->skb = NULL;
1363
1364		/* This memory barrier is needed to keep us from reading
1365		 * any other fields out of the rx_desc until we know the
1366		 * STATUS_DD bit is set
1367		 */
1368		rmb();
1369
1370		/* Get the header and possibly the whole packet
1371		 * If this is an skb from previous receive dma will be 0
1372		 */
1373		if (rx_bi->dma) {
1374			u16 len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1375
1376			if (rx_hbo)
1377				len = I40E_RX_HDR_SIZE;
1378			else if (rx_sph)
1379				len = rx_header_len;
1380			else if (rx_packet_len)
1381				len = rx_packet_len;   /* 1buf/no split found */
1382			else
1383				len = rx_header_len;   /* split always mode */
1384
1385			skb_put(skb, len);
1386			dma_unmap_single(rx_ring->dev,
1387					 rx_bi->dma,
1388					 rx_ring->rx_buf_len,
1389					 DMA_FROM_DEVICE);
1390			rx_bi->dma = 0;
1391		}
1392
1393		/* Get the rest of the data if this was a header split */
1394		if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
1395
1396			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1397					   rx_bi->page,
1398					   rx_bi->page_offset,
1399					   rx_packet_len);
1400
1401			skb->len += rx_packet_len;
1402			skb->data_len += rx_packet_len;
1403			skb->truesize += rx_packet_len;
1404
1405			if ((page_count(rx_bi->page) == 1) &&
1406			    (page_to_nid(rx_bi->page) == current_node))
1407				get_page(rx_bi->page);
1408			else
1409				rx_bi->page = NULL;
1410
1411			dma_unmap_page(rx_ring->dev,
1412				       rx_bi->page_dma,
1413				       PAGE_SIZE / 2,
1414				       DMA_FROM_DEVICE);
1415			rx_bi->page_dma = 0;
1416		}
1417		I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
1418
1419		if (unlikely(
1420		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
1421			struct i40e_rx_buffer *next_buffer;
1422
1423			next_buffer = &rx_ring->rx_bi[i];
1424
1425			if (ring_is_ps_enabled(rx_ring)) {
1426				rx_bi->skb = next_buffer->skb;
1427				rx_bi->dma = next_buffer->dma;
1428				next_buffer->skb = skb;
1429				next_buffer->dma = 0;
1430			}
1431			rx_ring->rx_stats.non_eop_descs++;
1432			goto next_desc;
1433		}
1434
1435		/* ERR_MASK will only have valid bits if EOP set */
1436		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
1437			dev_kfree_skb_any(skb);
1438			goto next_desc;
1439		}
 
 
 
1440
1441		skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1442			     i40e_ptype_to_hash(rx_ptype));
1443		if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1444			i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1445					   I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1446					   I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
1447			rx_ring->last_rx_timestamp = jiffies;
1448		}
1449
1450		/* probably a little skewed due to removing CRC */
1451		total_rx_bytes += skb->len;
1452		total_rx_packets++;
 
 
 
 
 
 
 
1453
1454		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1455
1456		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
1457
1458		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
1459			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
1460			 : 0;
1461		i40e_receive_skb(rx_ring, skb, vlan_tag);
1462
1463		rx_ring->netdev->last_rx = jiffies;
1464		budget--;
1465next_desc:
1466		rx_desc->wb.qword1.status_error_len = 0;
1467		if (!budget)
1468			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469
1470		cleaned_count++;
1471		/* return some buffers to hardware, one at a time is too slow */
1472		if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
1473			i40e_alloc_rx_buffers(rx_ring, cleaned_count);
1474			cleaned_count = 0;
1475		}
 
 
 
 
 
 
 
1476
1477		/* use prefetched values */
1478		rx_desc = next_rxd;
1479		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1480		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1481			    I40E_RXD_QW1_STATUS_SHIFT;
1482	}
1483
1484	rx_ring->next_to_clean = i;
1485	u64_stats_update_begin(&rx_ring->syncp);
1486	rx_ring->stats.packets += total_rx_packets;
1487	rx_ring->stats.bytes += total_rx_bytes;
1488	u64_stats_update_end(&rx_ring->syncp);
1489	rx_ring->q_vector->rx.total_packets += total_rx_packets;
1490	rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1491
1492	if (cleaned_count)
1493		i40e_alloc_rx_buffers(rx_ring, cleaned_count);
 
1494
1495	return budget > 0;
 
 
 
 
 
 
 
 
 
1496}
1497
1498/**
1499 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
1500 * @napi: napi struct with our devices info in it
1501 * @budget: amount of work driver is allowed to do this pass, in packets
1502 *
1503 * This function will clean all queues associated with a q_vector.
1504 *
1505 * Returns the amount of work done
1506 **/
1507int i40e_napi_poll(struct napi_struct *napi, int budget)
1508{
1509	struct i40e_q_vector *q_vector =
1510			       container_of(napi, struct i40e_q_vector, napi);
1511	struct i40e_vsi *vsi = q_vector->vsi;
1512	struct i40e_ring *ring;
 
 
 
 
1513	bool clean_complete = true;
 
1514	int budget_per_ring;
 
1515
1516	if (test_bit(__I40E_DOWN, &vsi->state)) {
1517		napi_complete(napi);
1518		return 0;
1519	}
1520
1521	/* Since the actual Tx work is minimal, we can give the Tx a larger
1522	 * budget and be more aggressive about cleaning up the Tx descriptors.
1523	 */
1524	i40e_for_each_ring(ring, q_vector->tx)
1525		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526
1527	/* We attempt to distribute budget to each Rx queue fairly, but don't
1528	 * allow the budget to go below 1 because that would exit polling early.
1529	 */
1530	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
 
 
 
 
 
 
1531
1532	i40e_for_each_ring(ring, q_vector->rx)
1533		clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
 
 
 
 
 
 
 
 
 
 
 
1534
1535	/* If work not completed, return budget and polling will return */
1536	if (!clean_complete)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1537		return budget;
 
1538
1539	/* Work is done so exit the polling mode and re-enable the interrupt */
1540	napi_complete(napi);
1541	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
1542	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
1543		i40e_update_dynamic_itr(q_vector);
1544
1545	if (!test_bit(__I40E_DOWN, &vsi->state)) {
1546		if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
1547			i40e_irq_dynamic_enable(vsi,
1548					q_vector->v_idx + vsi->base_vector);
1549		} else {
1550			struct i40e_hw *hw = &vsi->back->hw;
1551			/* We re-enable the queue 0 cause, but
1552			 * don't worry about dynamic_enable
1553			 * because we left it on for the other
1554			 * possible interrupts during napi
1555			 */
1556			u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
1557			qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
1558			wr32(hw, I40E_QINT_RQCTL(0), qval);
1559
1560			qval = rd32(hw, I40E_QINT_TQCTL(0));
1561			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
1562			wr32(hw, I40E_QINT_TQCTL(0), qval);
1563
1564			i40e_irq_dynamic_enable_icr0(vsi->back);
1565		}
1566	}
 
 
 
 
1567
1568	return 0;
1569}
1570
1571/**
1572 * i40e_atr - Add a Flow Director ATR filter
1573 * @tx_ring:  ring to add programming descriptor to
1574 * @skb:      send buffer
1575 * @flags:    send flags
1576 * @protocol: wire protocol
1577 **/
1578static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1579		     u32 flags, __be16 protocol)
1580{
1581	struct i40e_filter_program_desc *fdir_desc;
1582	struct i40e_pf *pf = tx_ring->vsi->back;
1583	union {
1584		unsigned char *network;
1585		struct iphdr *ipv4;
1586		struct ipv6hdr *ipv6;
1587	} hdr;
1588	struct tcphdr *th;
1589	unsigned int hlen;
1590	u32 flex_ptype, dtype_cmd;
 
1591	u16 i;
1592
1593	/* make sure ATR is enabled */
1594	if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
 
 
 
1595		return;
1596
1597	/* if sampling is disabled do nothing */
1598	if (!tx_ring->atr_sample_rate)
1599		return;
1600
 
 
 
 
1601	/* snag network header to get L4 type and address */
1602	hdr.network = skb_network_header(skb);
 
1603
1604	/* Currently only IPv4/IPv6 with TCP is supported */
1605	if (protocol == htons(ETH_P_IP)) {
1606		if (hdr.ipv4->protocol != IPPROTO_TCP)
1607			return;
1608
1609		/* access ihl as a u8 to avoid unaligned access on ia64 */
1610		hlen = (hdr.network[0] & 0x0F) << 2;
1611	} else if (protocol == htons(ETH_P_IPV6)) {
1612		if (hdr.ipv6->nexthdr != IPPROTO_TCP)
1613			return;
 
 
 
 
 
 
 
 
 
1614
1615		hlen = sizeof(struct ipv6hdr);
1616	} else {
1617		return;
1618	}
1619
1620	th = (struct tcphdr *)(hdr.network + hlen);
1621
1622	/* Due to lack of space, no more new filters can be programmed */
1623	if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1624		return;
 
 
 
 
 
 
 
1625
1626	tx_ring->atr_count++;
1627
1628	/* sample on all syn/fin/rst packets or once every atr sample rate */
1629	if (!th->fin &&
1630	    !th->syn &&
1631	    !th->rst &&
1632	    (tx_ring->atr_count < tx_ring->atr_sample_rate))
1633		return;
1634
1635	tx_ring->atr_count = 0;
1636
1637	/* grab the next descriptor */
1638	i = tx_ring->next_to_use;
1639	fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
1640
1641	i++;
1642	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1643
1644	flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1645		      I40E_TXD_FLTR_QW0_QINDEX_MASK;
1646	flex_ptype |= (protocol == htons(ETH_P_IP)) ?
1647		      (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
1648		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
1649		      (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
1650		       I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
1651
1652	flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
1653
1654	dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
1655
1656	dtype_cmd |= (th->fin || th->rst) ?
1657		     (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1658		      I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
1659		     (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1660		      I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1661
1662	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
1663		     I40E_TXD_FLTR_QW1_DEST_SHIFT;
1664
1665	dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
1666		     I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
1667
 
 
 
 
 
 
 
 
 
 
 
 
 
1668	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
 
1669	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
 
1670}
1671
1672/**
1673 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1674 * @skb:     send buffer
1675 * @tx_ring: ring to send buffer on
1676 * @flags:   the tx flags to be set
1677 *
1678 * Checks the skb and set up correspondingly several generic transmit flags
1679 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1680 *
1681 * Returns error code indicate the frame should be dropped upon error and the
1682 * otherwise  returns 0 to indicate the flags has been set properly.
1683 **/
1684static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
1685				      struct i40e_ring *tx_ring,
1686				      u32 *flags)
1687{
1688	__be16 protocol = skb->protocol;
1689	u32  tx_flags = 0;
1690
 
 
 
 
 
 
 
 
 
 
 
 
 
1691	/* if we have a HW VLAN tag being added, default to the HW one */
1692	if (vlan_tx_tag_present(skb)) {
1693		tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
1694		tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1695	/* else if it is a SW VLAN, check the next protocol and store the tag */
1696	} else if (protocol == htons(ETH_P_8021Q)) {
1697		struct vlan_hdr *vhdr, _vhdr;
 
1698		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
1699		if (!vhdr)
1700			return -EINVAL;
1701
1702		protocol = vhdr->h_vlan_encapsulated_proto;
1703		tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
1704		tx_flags |= I40E_TX_FLAGS_SW_VLAN;
1705	}
1706
 
 
 
1707	/* Insert 802.1p priority into VLAN header */
1708	if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
1709	    ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
1710	     (skb->priority != TC_PRIO_CONTROL))) {
1711		tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
1712		tx_flags |= (skb->priority & 0x7) <<
1713				I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
1714		if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
1715			struct vlan_ethhdr *vhdr;
1716			int rc;
1717
1718			rc = skb_cow_head(skb, 0);
1719			if (rc < 0)
1720				return rc;
1721			vhdr = (struct vlan_ethhdr *)skb->data;
1722			vhdr->h_vlan_TCI = htons(tx_flags >>
1723						 I40E_TX_FLAGS_VLAN_SHIFT);
1724		} else {
1725			tx_flags |= I40E_TX_FLAGS_HW_VLAN;
1726		}
1727	}
 
 
1728	*flags = tx_flags;
1729	return 0;
1730}
1731
1732/**
1733 * i40e_tso - set up the tso context descriptor
1734 * @tx_ring:  ptr to the ring to send
1735 * @skb:      ptr to the skb we're sending
1736 * @tx_flags: the collected send information
1737 * @protocol: the send protocol
1738 * @hdr_len:  ptr to the size of the packet header
1739 * @cd_tunneling: ptr to context descriptor bits
1740 *
1741 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1742 **/
1743static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1744		    u32 tx_flags, __be16 protocol, u8 *hdr_len,
1745		    u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
1746{
1747	u32 cd_cmd, cd_tso_len, cd_mss;
1748	struct ipv6hdr *ipv6h;
1749	struct tcphdr *tcph;
1750	struct iphdr *iph;
1751	u32 l4len;
 
 
 
 
 
 
 
 
 
1752	int err;
1753
 
 
 
1754	if (!skb_is_gso(skb))
1755		return 0;
1756
1757	err = skb_cow_head(skb, 0);
1758	if (err < 0)
1759		return err;
1760
1761	if (protocol == htons(ETH_P_IP)) {
1762		iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1763		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1764		iph->tot_len = 0;
1765		iph->check = 0;
1766		tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1767						 0, IPPROTO_TCP, 0);
1768	} else if (skb_is_gso_v6(skb)) {
1769
1770		ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1771					   : ipv6_hdr(skb);
1772		tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1773		ipv6h->payload_len = 0;
1774		tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
1775					       0, IPPROTO_TCP, 0);
1776	}
1777
1778	l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1779	*hdr_len = (skb->encapsulation
1780		    ? (skb_inner_transport_header(skb) - skb->data)
1781		    : skb_transport_offset(skb)) + l4len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1782
1783	/* find the field values */
1784	cd_cmd = I40E_TX_CTX_DESC_TSO;
1785	cd_tso_len = skb->len - *hdr_len;
1786	cd_mss = skb_shinfo(skb)->gso_size;
1787	*cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
1788				((u64)cd_tso_len <<
1789				 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
1790				((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
1791	return 1;
1792}
1793
1794/**
1795 * i40e_tsyn - set up the tsyn context descriptor
1796 * @tx_ring:  ptr to the ring to send
1797 * @skb:      ptr to the skb we're sending
1798 * @tx_flags: the collected send information
 
1799 *
1800 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
1801 **/
1802static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
1803		     u32 tx_flags, u64 *cd_type_cmd_tso_mss)
1804{
1805	struct i40e_pf *pf;
1806
1807	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1808		return 0;
1809
1810	/* Tx timestamps cannot be sampled when doing TSO */
1811	if (tx_flags & I40E_TX_FLAGS_TSO)
1812		return 0;
1813
1814	/* only timestamp the outbound packet if the user has requested it and
1815	 * we are not already transmitting a packet to be timestamped
1816	 */
1817	pf = i40e_netdev_to_pf(tx_ring->netdev);
1818	if (pf->ptp_tx && !pf->ptp_tx_skb) {
 
 
 
 
1819		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 
1820		pf->ptp_tx_skb = skb_get(skb);
1821	} else {
 
1822		return 0;
1823	}
1824
1825	*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
1826				I40E_TXD_CTX_QW1_CMD_SHIFT;
1827
1828	pf->ptp_tx_start = jiffies;
1829	schedule_work(&pf->ptp_tx_work);
1830
1831	return 1;
1832}
1833
1834/**
1835 * i40e_tx_enable_csum - Enable Tx checksum offloads
1836 * @skb: send buffer
1837 * @tx_flags: Tx flags currently set
1838 * @td_cmd: Tx descriptor command bits to set
1839 * @td_offset: Tx descriptor header offsets to set
 
1840 * @cd_tunneling: ptr to context desc bits
1841 **/
1842static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1843				u32 *td_cmd, u32 *td_offset,
1844				struct i40e_ring *tx_ring,
1845				u32 *cd_tunneling)
1846{
1847	struct ipv6hdr *this_ipv6_hdr;
1848	unsigned int this_tcp_hdrlen;
1849	struct iphdr *this_ip_hdr;
1850	u32 network_hdr_len;
1851	u8 l4_hdr = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1852
1853	if (skb->encapsulation) {
1854		network_hdr_len = skb_inner_network_header_len(skb);
1855		this_ip_hdr = inner_ip_hdr(skb);
1856		this_ipv6_hdr = inner_ipv6_hdr(skb);
1857		this_tcp_hdrlen = inner_tcp_hdrlen(skb);
1858
1859		if (tx_flags & I40E_TX_FLAGS_IPV4) {
1860
1861			if (tx_flags & I40E_TX_FLAGS_TSO) {
1862				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
1863				ip_hdr(skb)->check = 0;
1864			} else {
1865				*cd_tunneling |=
1866					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1867			}
1868		} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1869			if (tx_flags & I40E_TX_FLAGS_TSO) {
1870				*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1871				ip_hdr(skb)->check = 0;
1872			} else {
1873				*cd_tunneling |=
1874					 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1875			}
1876		}
1877
1878		/* Now set the ctx descriptor fields */
1879		*cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
1880					I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
1881				   I40E_TXD_CTX_UDP_TUNNELING            |
1882				   ((skb_inner_network_offset(skb) -
1883					skb_transport_offset(skb)) >> 1) <<
1884				   I40E_TXD_CTX_QW0_NATLEN_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
1885
1886	} else {
1887		network_hdr_len = skb_network_header_len(skb);
1888		this_ip_hdr = ip_hdr(skb);
1889		this_ipv6_hdr = ipv6_hdr(skb);
1890		this_tcp_hdrlen = tcp_hdrlen(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1891	}
1892
1893	/* Enable IP checksum offloads */
1894	if (tx_flags & I40E_TX_FLAGS_IPV4) {
1895		l4_hdr = this_ip_hdr->protocol;
1896		/* the stack computes the IP header already, the only time we
1897		 * need the hardware to recompute it is in the case of TSO.
1898		 */
1899		if (tx_flags & I40E_TX_FLAGS_TSO) {
1900			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
1901			this_ip_hdr->check = 0;
1902		} else {
1903			*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
1904		}
1905		/* Now set the td_offset for IP header length */
1906		*td_offset = (network_hdr_len >> 2) <<
1907			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1908	} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1909		l4_hdr = this_ipv6_hdr->nexthdr;
1910		*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
1911		/* Now set the td_offset for IP header length */
1912		*td_offset = (network_hdr_len >> 2) <<
1913			      I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
1914	}
1915	/* words in MACLEN + dwords in IPLEN + dwords in L4Len */
1916	*td_offset |= (skb_network_offset(skb) >> 1) <<
1917		       I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
1918
1919	/* Enable L4 checksum offloads */
1920	switch (l4_hdr) {
1921	case IPPROTO_TCP:
1922		/* enable checksum offloads */
1923		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
1924		*td_offset |= (this_tcp_hdrlen >> 2) <<
1925			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1926		break;
1927	case IPPROTO_SCTP:
1928		/* enable SCTP checksum offload */
1929		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
1930		*td_offset |= (sizeof(struct sctphdr) >> 2) <<
1931			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1932		break;
1933	case IPPROTO_UDP:
1934		/* enable UDP checksum offload */
1935		*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
1936		*td_offset |= (sizeof(struct udphdr) >> 2) <<
1937			       I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
1938		break;
1939	default:
1940		break;
 
 
 
1941	}
 
 
 
 
 
1942}
1943
1944/**
1945 * i40e_create_tx_ctx Build the Tx context descriptor
1946 * @tx_ring:  ring to create the descriptor on
1947 * @cd_type_cmd_tso_mss: Quad Word 1
1948 * @cd_tunneling: Quad Word 0 - bits 0-31
1949 * @cd_l2tag2: Quad Word 0 - bits 32-63
1950 **/
1951static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1952			       const u64 cd_type_cmd_tso_mss,
1953			       const u32 cd_tunneling, const u32 cd_l2tag2)
1954{
1955	struct i40e_tx_context_desc *context_desc;
1956	int i = tx_ring->next_to_use;
1957
1958	if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1959	    !cd_tunneling && !cd_l2tag2)
1960		return;
1961
1962	/* grab the next descriptor */
1963	context_desc = I40E_TX_CTXTDESC(tx_ring, i);
1964
1965	i++;
1966	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1967
1968	/* cpu_to_le32 and assign to struct fields */
1969	context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
1970	context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
 
1971	context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1972}
1973
1974/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1975 * i40e_tx_map - Build the Tx descriptor
1976 * @tx_ring:  ring to send buffer on
1977 * @skb:      send buffer
1978 * @first:    first buffer info buffer to use
1979 * @tx_flags: collected send information
1980 * @hdr_len:  size of the packet header
1981 * @td_cmd:   the command field in the descriptor
1982 * @td_offset: offset for checksum or crc
 
 
1983 **/
1984static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1985			struct i40e_tx_buffer *first, u32 tx_flags,
1986			const u8 hdr_len, u32 td_cmd, u32 td_offset)
1987{
1988	unsigned int data_len = skb->data_len;
1989	unsigned int size = skb_headlen(skb);
1990	struct skb_frag_struct *frag;
1991	struct i40e_tx_buffer *tx_bi;
1992	struct i40e_tx_desc *tx_desc;
1993	u16 i = tx_ring->next_to_use;
1994	u32 td_tag = 0;
1995	dma_addr_t dma;
1996	u16 gso_segs;
1997
1998	if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
1999		td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
2000		td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
2001			 I40E_TX_FLAGS_VLAN_SHIFT;
2002	}
2003
2004	if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
2005		gso_segs = skb_shinfo(skb)->gso_segs;
2006	else
2007		gso_segs = 1;
2008
2009	/* multiply data chunks by size of headers */
2010	first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
2011	first->gso_segs = gso_segs;
2012	first->skb = skb;
2013	first->tx_flags = tx_flags;
2014
2015	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
2016
2017	tx_desc = I40E_TX_DESC(tx_ring, i);
2018	tx_bi = first;
2019
2020	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
 
 
2021		if (dma_mapping_error(tx_ring->dev, dma))
2022			goto dma_error;
2023
2024		/* record length, and DMA address */
2025		dma_unmap_len_set(tx_bi, len, size);
2026		dma_unmap_addr_set(tx_bi, dma, dma);
2027
 
 
2028		tx_desc->buffer_addr = cpu_to_le64(dma);
2029
2030		while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
2031			tx_desc->cmd_type_offset_bsz =
2032				build_ctob(td_cmd, td_offset,
2033					   I40E_MAX_DATA_PER_TXD, td_tag);
2034
2035			tx_desc++;
2036			i++;
 
 
2037			if (i == tx_ring->count) {
2038				tx_desc = I40E_TX_DESC(tx_ring, 0);
2039				i = 0;
2040			}
2041
2042			dma += I40E_MAX_DATA_PER_TXD;
2043			size -= I40E_MAX_DATA_PER_TXD;
2044
 
2045			tx_desc->buffer_addr = cpu_to_le64(dma);
2046		}
2047
2048		if (likely(!data_len))
2049			break;
2050
2051		tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
2052							  size, td_tag);
2053
2054		tx_desc++;
2055		i++;
 
 
2056		if (i == tx_ring->count) {
2057			tx_desc = I40E_TX_DESC(tx_ring, 0);
2058			i = 0;
2059		}
2060
2061		size = skb_frag_size(frag);
2062		data_len -= size;
2063
2064		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
2065				       DMA_TO_DEVICE);
2066
2067		tx_bi = &tx_ring->tx_bi[i];
2068	}
2069
2070	/* Place RS bit on last descriptor of any packet that spans across the
2071	 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
 
 
 
 
 
 
 
 
 
 
 
 
 
2072	 */
2073#define WB_STRIDE 0x3
2074	if (((i & WB_STRIDE) != WB_STRIDE) &&
2075	    (first <= &tx_ring->tx_bi[i]) &&
2076	    (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2077		tx_desc->cmd_type_offset_bsz =
2078			build_ctob(td_cmd, td_offset, size, td_tag) |
2079			cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2080					 I40E_TXD_QW1_CMD_SHIFT);
2081	} else {
2082		tx_desc->cmd_type_offset_bsz =
2083			build_ctob(td_cmd, td_offset, size, td_tag) |
2084			cpu_to_le64((u64)I40E_TXD_CMD <<
2085					 I40E_TXD_QW1_CMD_SHIFT);
2086	}
2087
2088	netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
2089						 tx_ring->queue_index),
2090			     first->bytecount);
2091
2092	/* set the timestamp */
2093	first->time_stamp = jiffies;
2094
2095	/* Force memory writes to complete before letting h/w
2096	 * know there are new descriptors to fetch.  (Only
2097	 * applicable for weak-ordered memory model archs,
2098	 * such as IA-64).
 
2099	 */
2100	wmb();
2101
2102	/* set next_to_watch value indicating a packet is present */
2103	first->next_to_watch = tx_desc;
2104
2105	i++;
2106	if (i == tx_ring->count)
2107		i = 0;
2108
2109	tx_ring->next_to_use = i;
2110
2111	/* notify HW of packet */
2112	writel(i, tx_ring->tail);
 
 
2113
2114	return;
2115
2116dma_error:
2117	dev_info(tx_ring->dev, "TX DMA map failed\n");
2118
2119	/* clear dma mappings for failed tx_bi map */
2120	for (;;) {
2121		tx_bi = &tx_ring->tx_bi[i];
2122		i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
2123		if (tx_bi == first)
2124			break;
2125		if (i == 0)
2126			i = tx_ring->count;
2127		i--;
2128	}
2129
2130	tx_ring->next_to_use = i;
 
 
2131}
2132
2133/**
2134 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2135 * @tx_ring: the ring to be checked
2136 * @size:    the size buffer we want to assure is available
2137 *
2138 * Returns -EBUSY if a stop is needed, else 0
2139 **/
2140static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2141{
2142	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2143	/* Memory barrier before checking head and tail */
2144	smp_mb();
 
 
 
 
2145
2146	/* Check again in a case another CPU has just made room available. */
2147	if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2148		return -EBUSY;
2149
2150	/* A reprieve! - use start_queue because it doesn't call schedule */
2151	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2152	++tx_ring->tx_stats.restart_queue;
2153	return 0;
2154}
2155
2156/**
2157 * i40e_maybe_stop_tx - 1st level check for tx stop conditions
2158 * @tx_ring: the ring to be checked
2159 * @size:    the size buffer we want to assure is available
2160 *
2161 * Returns 0 if stop is not needed
2162 **/
2163static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2164{
2165	if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
2166		return 0;
2167	return __i40e_maybe_stop_tx(tx_ring, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2168}
2169
2170/**
2171 * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
2172 * @skb:     send buffer
2173 * @tx_ring: ring to send buffer on
2174 *
2175 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
2176 * there is not enough descriptors available in this ring since we need at least
2177 * one descriptor.
2178 **/
2179static int i40e_xmit_descriptor_count(struct sk_buff *skb,
2180				      struct i40e_ring *tx_ring)
2181{
2182#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2183	unsigned int f;
2184#endif
2185	int count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2186
2187	/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2188	 *       + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2189	 *       + 4 desc gap to avoid the cache line where head is,
2190	 *       + 1 desc for context descriptor,
2191	 * otherwise try next time
2192	 */
2193#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
2194	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2195		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2196#else
2197	count += skb_shinfo(skb)->nr_frags;
2198#endif
2199	count += TXD_USE_COUNT(skb_headlen(skb));
2200	if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
2201		tx_ring->tx_stats.tx_busy++;
2202		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2203	}
2204	return count;
 
2205}
2206
2207/**
2208 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2209 * @skb:     send buffer
2210 * @tx_ring: ring to send buffer on
2211 *
2212 * Returns NETDEV_TX_OK if sent, else an error code
2213 **/
2214static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2215					struct i40e_ring *tx_ring)
2216{
2217	u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
2218	u32 cd_tunneling = 0, cd_l2tag2 = 0;
2219	struct i40e_tx_buffer *first;
2220	u32 td_offset = 0;
2221	u32 tx_flags = 0;
2222	__be16 protocol;
2223	u32 td_cmd = 0;
2224	u8 hdr_len = 0;
 
2225	int tsyn;
2226	int tso;
2227	if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
2228		return NETDEV_TX_BUSY;
2229
2230	/* prepare the xmit flags */
2231	if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
2232		goto out_drop;
 
 
 
 
 
 
 
 
 
 
 
2233
2234	/* obtain protocol of skb */
2235	protocol = skb->protocol;
 
 
 
 
 
 
 
 
2236
2237	/* record the location of the first descriptor for this packet */
2238	first = &tx_ring->tx_bi[tx_ring->next_to_use];
 
 
 
2239
2240	/* setup IPv4/IPv6 offloads */
2241	if (protocol == htons(ETH_P_IP))
2242		tx_flags |= I40E_TX_FLAGS_IPV4;
2243	else if (protocol == htons(ETH_P_IPV6))
2244		tx_flags |= I40E_TX_FLAGS_IPV6;
2245
2246	tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
2247		       &cd_type_cmd_tso_mss, &cd_tunneling);
2248
2249	if (tso < 0)
2250		goto out_drop;
2251	else if (tso)
2252		tx_flags |= I40E_TX_FLAGS_TSO;
2253
2254	skb_tx_timestamp(skb);
 
 
 
 
2255
2256	tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
2257
2258	if (tsyn)
2259		tx_flags |= I40E_TX_FLAGS_TSYN;
2260
2261	/* always enable CRC insertion offload */
2262	td_cmd |= I40E_TX_DESC_CMD_ICRC;
2263
2264	/* Always offload the checksum, since it's in the data descriptor */
2265	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2266		tx_flags |= I40E_TX_FLAGS_CSUM;
2267
2268		i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
2269				    tx_ring, &cd_tunneling);
2270	}
2271
2272	i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
2273			   cd_tunneling, cd_l2tag2);
2274
2275	/* Add Flow Director ATR if it's enabled.
2276	 *
2277	 * NOTE: this must always be directly before the data descriptor.
2278	 */
2279	i40e_atr(tx_ring, skb, tx_flags, protocol);
2280
2281	i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
2282		    td_cmd, td_offset);
2283
2284	i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
2285
2286	return NETDEV_TX_OK;
2287
2288out_drop:
2289	dev_kfree_skb_any(skb);
 
 
 
 
 
 
 
 
 
 
 
2290	return NETDEV_TX_OK;
2291}
2292
2293/**
2294 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2295 * @skb:    send buffer
2296 * @netdev: network interface device structure
2297 *
2298 * Returns NETDEV_TX_OK if sent, else an error code
2299 **/
2300netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2301{
2302	struct i40e_netdev_priv *np = netdev_priv(netdev);
2303	struct i40e_vsi *vsi = np->vsi;
2304	struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
2305
2306	/* hardware can't handle really short frames, hardware padding works
2307	 * beyond this point
2308	 */
2309	if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
2310		if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
2311			return NETDEV_TX_OK;
2312		skb->len = I40E_MIN_TX_LEN;
2313		skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2314	}
2315
2316	return i40e_xmit_frame_ring(skb, tx_ring);
 
 
 
2317}