Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/* Copyright (C) 2023 Intel Corporation */
   3
   4#ifndef _IDPF_TXRX_H_
   5#define _IDPF_TXRX_H_
   6
   7#include <linux/dim.h>
   8
   9#include <net/libeth/cache.h>
  10#include <net/tcp.h>
  11#include <net/netdev_queues.h>
  12
  13#include "idpf_lan_txrx.h"
  14#include "virtchnl2_lan_desc.h"
  15
  16#define IDPF_LARGE_MAX_Q			256
  17#define IDPF_MAX_Q				16
  18#define IDPF_MIN_Q				2
  19/* Mailbox Queue */
  20#define IDPF_MAX_MBXQ				1
  21
  22#define IDPF_MIN_TXQ_DESC			64
  23#define IDPF_MIN_RXQ_DESC			64
  24#define IDPF_MIN_TXQ_COMPLQ_DESC		256
  25#define IDPF_MAX_QIDS				256
  26
  27/* Number of descriptors in a queue should be a multiple of 32. RX queue
  28 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
  29 * to achieve BufQ descriptors aligned to 32
  30 */
  31#define IDPF_REQ_DESC_MULTIPLE			32
  32#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
  33#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
  34#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
  35
  36#define IDPF_MAX_DESCS				8160
  37#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
  38#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
  39#define MIN_SUPPORT_TXDID (\
  40	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
  41	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
  42
  43#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
  44#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
  45#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
  46#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
  47
  48#define IDPF_COMPLQ_PER_GROUP			1
  49#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
  50#define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
  51#define IDPF_BUFQ2_ENA				1
  52#define IDPF_NUMQ_PER_CHUNK			1
  53
  54#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
  55#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
  56
  57/* Default vector sharing */
  58#define IDPF_MBX_Q_VEC		1
  59#define IDPF_MIN_Q_VEC		1
  60
  61#define IDPF_DFLT_TX_Q_DESC_COUNT		512
  62#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
  63#define IDPF_DFLT_RX_Q_DESC_COUNT		512
  64
  65/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
  66 * given RX completion queue has descriptors. This includes _ALL_ buffer
  67 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
  68 * you have a total of 1024 buffers so your RX queue _must_ have at least that
  69 * many descriptors. This macro divides a given number of RX descriptors by
  70 * number of buffer queues to calculate how many descriptors each buffer queue
  71 * can have without overrunning the RX queue.
  72 *
  73 * If you give hardware more buffers than completion descriptors what will
  74 * happen is that if hardware gets a chance to post more than ring wrap of
  75 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
  76 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
  77 * be gone forever and SW has no reasonable way to tell that this has happened.
  78 * From SW perspective, when we finally get an interrupt, it looks like we're
  79 * still waiting for descriptor to be done, stalling forever.
  80 */
  81#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
  82
  83#define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
  84
  85#define IDPF_RX_BUMP_NTC(rxq, ntc)				\
  86do {								\
  87	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
  88		ntc = 0;					\
  89		idpf_queue_change(GEN_CHK, rxq);		\
  90	}							\
  91} while (0)
  92
  93#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
  94do {								\
  95	if (unlikely(++(idx) == (q)->desc_count))		\
  96		idx = 0;					\
  97} while (0)
  98
 
 
 
  99#define IDPF_RX_BUF_STRIDE			32
 100#define IDPF_RX_BUF_POST_STRIDE			16
 101#define IDPF_LOW_WATERMARK			64
 102
 
 
 
 103#define IDPF_TX_TSO_MIN_MSS			88
 104
 105/* Minimum number of descriptors between 2 descriptors with the RE bit set;
 106 * only relevant in flow scheduling mode
 107 */
 108#define IDPF_TX_SPLITQ_RE_MIN_GAP	64
 109
 110#define IDPF_RX_BI_GEN_M		BIT(16)
 111#define IDPF_RX_BI_BUFID_M		GENMASK(15, 0)
 112
 
 113#define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
 114#define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
 115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116#define IDPF_DESC_UNUSED(txq)     \
 117	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
 118	(txq)->next_to_clean - (txq)->next_to_use - 1)
 119
 120#define IDPF_TX_BUF_RSV_UNUSED(txq)	((txq)->stash->buf_stack.top)
 121#define IDPF_TX_BUF_RSV_LOW(txq)	(IDPF_TX_BUF_RSV_UNUSED(txq) < \
 122					 (txq)->desc_count >> 2)
 123
 124#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
 125/* Determine the absolute number of completions pending, i.e. the number of
 126 * completions that are expected to arrive on the TX completion queue.
 127 */
 128#define IDPF_TX_COMPLQ_PENDING(txq)	\
 129	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
 130	0 : U32_MAX) + \
 131	(txq)->num_completions_pending - (txq)->complq->num_completions)
 132
 133#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH	16
 
 134/* Adjust the generation for the completion tag and wrap if necessary */
 135#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
 136	((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
 137	0 : (txq)->compl_tag_cur_gen)
 138
 139#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
 140
 141#define IDPF_TX_FLAGS_TSO		BIT(0)
 142#define IDPF_TX_FLAGS_IPV4		BIT(1)
 143#define IDPF_TX_FLAGS_IPV6		BIT(2)
 144#define IDPF_TX_FLAGS_TUNNEL		BIT(3)
 145
 146union idpf_tx_flex_desc {
 147	struct idpf_flex_tx_desc q; /* queue based scheduling */
 148	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
 149};
 150
 151#define idpf_tx_buf libeth_sqe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 153/**
 154 * struct idpf_buf_lifo - LIFO for managing OOO completions
 155 * @top: Used to know how many buffers are left
 156 * @size: Total size of LIFO
 157 * @bufs: Backing array
 158 */
 159struct idpf_buf_lifo {
 160	u16 top;
 161	u16 size;
 162	struct idpf_tx_stash **bufs;
 163};
 164
 165/**
 166 * struct idpf_tx_offload_params - Offload parameters for a given packet
 167 * @tx_flags: Feature flags enabled for this packet
 168 * @hdr_offsets: Offset parameter for single queue model
 169 * @cd_tunneling: Type of tunneling enabled for single queue model
 170 * @tso_len: Total length of payload to segment
 171 * @mss: Segment size
 172 * @tso_segs: Number of segments to be sent
 173 * @tso_hdr_len: Length of headers to be duplicated
 174 * @td_cmd: Command field to be inserted into descriptor
 175 */
 176struct idpf_tx_offload_params {
 177	u32 tx_flags;
 178
 179	u32 hdr_offsets;
 180	u32 cd_tunneling;
 181
 182	u32 tso_len;
 183	u16 mss;
 184	u16 tso_segs;
 185	u16 tso_hdr_len;
 186
 187	u16 td_cmd;
 188};
 189
 190/**
 191 * struct idpf_tx_splitq_params
 192 * @dtype: General descriptor info
 193 * @eop_cmd: Type of EOP
 194 * @compl_tag: Associated tag for completion
 195 * @td_tag: Descriptor tunneling tag
 196 * @offload: Offload parameters
 197 */
 198struct idpf_tx_splitq_params {
 199	enum idpf_tx_desc_dtype_value dtype;
 200	u16 eop_cmd;
 201	union {
 202		u16 compl_tag;
 203		u16 td_tag;
 204	};
 205
 206	struct idpf_tx_offload_params offload;
 207};
 208
 209enum idpf_tx_ctx_desc_eipt_offload {
 210	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
 211	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
 212	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
 213	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
 214};
 215
 216/* Checksum offload bits decoded from the receive descriptor. */
 217struct idpf_rx_csum_decoded {
 218	u32 l3l4p : 1;
 219	u32 ipe : 1;
 220	u32 eipe : 1;
 221	u32 eudpe : 1;
 222	u32 ipv6exadd : 1;
 223	u32 l4e : 1;
 224	u32 pprs : 1;
 225	u32 nat : 1;
 226	u32 raw_csum_inv : 1;
 227	u32 raw_csum : 16;
 228};
 229
 230struct idpf_rx_extracted {
 231	unsigned int size;
 232	u16 rx_ptype;
 233};
 234
 235#define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
 236#define IDPF_TX_MIN_PKT_LEN		17
 237#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
 238#define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
 239					 sizeof(struct idpf_flex_tx_desc))
 240#define IDPF_TX_DESCS_FOR_CTX		1
 241/* TX descriptors needed, worst case */
 242#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
 243			     IDPF_TX_DESCS_PER_CACHE_LINE + \
 244			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
 245
 246/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 247 * In order to align with the read requests we will align the value to
 248 * the nearest 4K which represents our maximum read request size.
 249 */
 250#define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
 251#define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
 252#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
 253	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
 254
 255#define idpf_rx_buf libeth_fqe
 
 
 
 
 
 
 
 
 
 256
 257#define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
 258#define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
 259				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
 260#define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
 261#define IDPF_RX_MAX_PTYPES_PER_BUF	\
 262	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
 263			   IDPF_RX_MAX_PTYPE_SZ)
 264
 265#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
 266
 267#define IDPF_TUN_IP_GRE (\
 268	IDPF_PTYPE_TUNNEL_IP |\
 269	IDPF_PTYPE_TUNNEL_IP_GRENAT)
 270
 271#define IDPF_TUN_IP_GRE_MAC (\
 272	IDPF_TUN_IP_GRE |\
 273	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
 274
 275#define IDPF_RX_MAX_PTYPE	1024
 276#define IDPF_RX_MAX_BASE_PTYPE	256
 277#define IDPF_INVALID_PTYPE_ID	0xFFFF
 278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279enum idpf_tunnel_state {
 280	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
 281	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
 282	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
 283};
 284
 285struct idpf_ptype_state {
 286	bool outer_ip:1;
 287	bool outer_frag:1;
 288	u8 tunnel_state:6;
 
 
 
 
 
 
 
 
 
 
 
 
 
 289};
 290
 291/**
 292 * enum idpf_queue_flags_t
 293 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
 294 *		      identify new descriptor writebacks on the ring. HW sets
 295 *		      the gen bit to 1 on the first writeback of any given
 296 *		      descriptor. After the ring wraps, HW sets the gen bit of
 297 *		      those descriptors to 0, and continues flipping
 298 *		      0->1 or 1->0 on each ring wrap. SW maintains its own
 299 *		      gen bit to know what value will indicate writebacks on
 300 *		      the next pass around the ring. E.g. it is initialized
 301 *		      to 1 and knows that reading a gen bit of 1 in any
 302 *		      descriptor on the initial pass of the ring indicates a
 303 *		      writeback. It also flips on every ring wrap.
 304 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
 305 *			  bit and Q_RFL_GEN is the SW bit.
 306 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
 307 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
 308 * @__IDPF_Q_POLL_MODE: Enable poll mode
 309 * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
 310 * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
 311 * @__IDPF_Q_FLAGS_NBITS: Must be last
 312 */
 313enum idpf_queue_flags_t {
 314	__IDPF_Q_GEN_CHK,
 315	__IDPF_Q_RFL_GEN_CHK,
 316	__IDPF_Q_FLOW_SCH_EN,
 317	__IDPF_Q_SW_MARKER,
 318	__IDPF_Q_POLL_MODE,
 319	__IDPF_Q_CRC_EN,
 320	__IDPF_Q_HSPLIT_EN,
 321
 322	__IDPF_Q_FLAGS_NBITS,
 323};
 324
 325#define idpf_queue_set(f, q)		__set_bit(__IDPF_Q_##f, (q)->flags)
 326#define idpf_queue_clear(f, q)		__clear_bit(__IDPF_Q_##f, (q)->flags)
 327#define idpf_queue_change(f, q)		__change_bit(__IDPF_Q_##f, (q)->flags)
 328#define idpf_queue_has(f, q)		test_bit(__IDPF_Q_##f, (q)->flags)
 329
 330#define idpf_queue_has_clear(f, q)			\
 331	__test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
 332#define idpf_queue_assign(f, q, v)			\
 333	__assign_bit(__IDPF_Q_##f, (q)->flags, v)
 334
 335/**
 336 * struct idpf_vec_regs
 337 * @dyn_ctl_reg: Dynamic control interrupt register offset
 338 * @itrn_reg: Interrupt Throttling Rate register offset
 339 * @itrn_index_spacing: Register spacing between ITR registers of the same
 340 *			vector
 341 */
 342struct idpf_vec_regs {
 343	u32 dyn_ctl_reg;
 344	u32 itrn_reg;
 345	u32 itrn_index_spacing;
 346};
 347
 348/**
 349 * struct idpf_intr_reg
 350 * @dyn_ctl: Dynamic control interrupt register
 351 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
 352 * @dyn_ctl_intena_msk_m: Mask for dyn_ctl interrupt enable mask
 353 * @dyn_ctl_itridx_s: Register bit offset for ITR index
 354 * @dyn_ctl_itridx_m: Mask for ITR index
 355 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
 356 * @dyn_ctl_wb_on_itr_m: Mask for WB on ITR feature
 357 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
 358 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
 359 * @rx_itr: RX ITR register
 360 * @tx_itr: TX ITR register
 361 * @icr_ena: Interrupt cause register offset
 362 * @icr_ena_ctlq_m: Mask for ICR
 363 */
 364struct idpf_intr_reg {
 365	void __iomem *dyn_ctl;
 366	u32 dyn_ctl_intena_m;
 367	u32 dyn_ctl_intena_msk_m;
 368	u32 dyn_ctl_itridx_s;
 369	u32 dyn_ctl_itridx_m;
 370	u32 dyn_ctl_intrvl_s;
 371	u32 dyn_ctl_wb_on_itr_m;
 372	u32 dyn_ctl_sw_itridx_ena_m;
 373	u32 dyn_ctl_swint_trig_m;
 374	void __iomem *rx_itr;
 375	void __iomem *tx_itr;
 376	void __iomem *icr_ena;
 377	u32 icr_ena_ctlq_m;
 378};
 379
 380/**
 381 * struct idpf_q_vector
 382 * @vport: Vport back pointer
 383 * @num_rxq: Number of RX queues
 
 
 
 384 * @num_txq: Number of TX queues
 385 * @num_bufq: Number of buffer queues
 386 * @num_complq: number of completion queues
 387 * @rx: Array of RX queues to service
 388 * @tx: Array of TX queues to service
 389 * @bufq: Array of buffer queues to service
 390 * @complq: array of completion queues
 391 * @intr_reg: See struct idpf_intr_reg
 392 * @napi: napi handler
 393 * @total_events: Number of interrupts processed
 394 * @wb_on_itr: whether WB on ITR is enabled
 395 * @tx_dim: Data for TX net_dim algorithm
 396 * @tx_itr_value: TX interrupt throttling rate
 397 * @tx_intr_mode: Dynamic ITR or not
 398 * @tx_itr_idx: TX ITR index
 
 
 399 * @rx_dim: Data for RX net_dim algorithm
 400 * @rx_itr_value: RX interrupt throttling rate
 401 * @rx_intr_mode: Dynamic ITR or not
 402 * @rx_itr_idx: RX ITR index
 403 * @v_idx: Vector index
 404 * @affinity_mask: CPU affinity mask
 
 
 405 */
 406struct idpf_q_vector {
 407	__cacheline_group_begin_aligned(read_mostly);
 408	struct idpf_vport *vport;
 409
 410	u16 num_rxq;
 411	u16 num_txq;
 412	u16 num_bufq;
 413	u16 num_complq;
 414	struct idpf_rx_queue **rx;
 415	struct idpf_tx_queue **tx;
 416	struct idpf_buf_queue **bufq;
 417	struct idpf_compl_queue **complq;
 418
 419	struct idpf_intr_reg intr_reg;
 420	__cacheline_group_end_aligned(read_mostly);
 421
 422	__cacheline_group_begin_aligned(read_write);
 423	struct napi_struct napi;
 424	u16 total_events;
 425	bool wb_on_itr;
 426
 
 
 427	struct dim tx_dim;
 428	u16 tx_itr_value;
 429	bool tx_intr_mode;
 430	u32 tx_itr_idx;
 431
 
 
 432	struct dim rx_dim;
 433	u16 rx_itr_value;
 434	bool rx_intr_mode;
 435	u32 rx_itr_idx;
 436	__cacheline_group_end_aligned(read_write);
 437
 438	__cacheline_group_begin_aligned(cold);
 439	u16 v_idx;
 440
 441	cpumask_var_t affinity_mask;
 442	__cacheline_group_end_aligned(cold);
 443};
 444libeth_cacheline_set_assert(struct idpf_q_vector, 120,
 445			    24 + sizeof(struct napi_struct) +
 446			    2 * sizeof(struct dim),
 447			    8 + sizeof(cpumask_var_t));
 448
 449struct idpf_rx_queue_stats {
 450	u64_stats_t packets;
 451	u64_stats_t bytes;
 452	u64_stats_t rsc_pkts;
 453	u64_stats_t hw_csum_err;
 454	u64_stats_t hsplit_pkts;
 455	u64_stats_t hsplit_buf_ovf;
 456	u64_stats_t bad_descs;
 457};
 458
 459struct idpf_tx_queue_stats {
 460	u64_stats_t packets;
 461	u64_stats_t bytes;
 462	u64_stats_t lso_pkts;
 463	u64_stats_t linearize;
 464	u64_stats_t q_busy;
 465	u64_stats_t skb_drops;
 466	u64_stats_t dma_map_errs;
 467};
 468
 
 
 
 
 
 
 
 
 
 
 469#define IDPF_ITR_DYNAMIC	1
 470#define IDPF_ITR_MAX		0x1FE0
 471#define IDPF_ITR_20K		0x0032
 472#define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
 473#define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
 474#define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
 475#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
 476#define IDPF_ITR_TX_DEF		IDPF_ITR_20K
 477#define IDPF_ITR_RX_DEF		IDPF_ITR_20K
 478/* Index used for 'SW ITR' update in DYN_CTL register */
 479#define IDPF_SW_ITR_UPDATE_IDX	2
 480/* Index used for 'No ITR' update in DYN_CTL register */
 481#define IDPF_NO_ITR_UPDATE_IDX	3
 482#define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
 483#define IDPF_DIM_DEFAULT_PROFILE_IX		1
 484
 485/**
 486 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
 487 * @buf_stack: Stack of empty buffers to store buffer info for out of order
 488 *	       buffer completions. See struct idpf_buf_lifo
 489 * @sched_buf_hash: Hash table to store buffers
 490 */
 491struct idpf_txq_stash {
 492	struct idpf_buf_lifo buf_stack;
 493	DECLARE_HASHTABLE(sched_buf_hash, 12);
 494} ____cacheline_aligned;
 495
 496/**
 497 * struct idpf_rx_queue - software structure representing a receive queue
 498 * @rx: universal receive descriptor array
 499 * @single_buf: buffer descriptor array in singleq
 500 * @desc_ring: virtual descriptor ring address
 501 * @bufq_sets: Pointer to the array of buffer queues in splitq mode
 502 * @napi: NAPI instance corresponding to this queue (splitq)
 503 * @rx_buf: See struct &libeth_fqe
 504 * @pp: Page pool pointer in singleq mode
 505 * @netdev: &net_device corresponding to this queue
 506 * @tail: Tail offset. Used for both queue models single and split.
 507 * @flags: See enum idpf_queue_flags_t
 508 * @idx: For RX queue, it is used to index to total RX queue across groups and
 509 *	 used for skb reporting.
 510 * @desc_count: Number of descriptors
 511 * @rxdids: Supported RX descriptor ids
 512 * @rx_ptype_lkup: LUT of Rx ptypes
 513 * @next_to_use: Next descriptor to use
 514 * @next_to_clean: Next descriptor to clean
 515 * @next_to_alloc: RX buffer to allocate at
 
 
 516 * @skb: Pointer to the skb
 517 * @truesize: data buffer truesize in singleq
 518 * @stats_sync: See struct u64_stats_sync
 519 * @q_stats: See union idpf_rx_queue_stats
 520 * @q_id: Queue id
 521 * @size: Length of descriptor ring in bytes
 522 * @dma: Physical address of ring
 523 * @q_vector: Backreference to associated vector
 524 * @rx_buffer_low_watermark: RX buffer low watermark
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 525 * @rx_hbuf_size: Header buffer size
 526 * @rx_buf_size: Buffer size
 527 * @rx_max_pkt_size: RX max packet size
 528 */
 529struct idpf_rx_queue {
 530	__cacheline_group_begin_aligned(read_mostly);
 531	union {
 532		union virtchnl2_rx_desc *rx;
 533		struct virtchnl2_singleq_rx_buf_desc *single_buf;
 534
 535		void *desc_ring;
 536	};
 537	union {
 538		struct {
 539			struct idpf_bufq_set *bufq_sets;
 540			struct napi_struct *napi;
 541		};
 542		struct {
 543			struct libeth_fqe *rx_buf;
 544			struct page_pool *pp;
 545		};
 546	};
 547	struct net_device *netdev;
 548	void __iomem *tail;
 549
 550	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 551	u16 idx;
 552	u16 desc_count;
 553
 554	u32 rxdids;
 555	const struct libeth_rx_pt *rx_ptype_lkup;
 556	__cacheline_group_end_aligned(read_mostly);
 557
 558	__cacheline_group_begin_aligned(read_write);
 559	u16 next_to_use;
 560	u16 next_to_clean;
 561	u16 next_to_alloc;
 562
 563	struct sk_buff *skb;
 564	u32 truesize;
 565
 566	struct u64_stats_sync stats_sync;
 567	struct idpf_rx_queue_stats q_stats;
 568	__cacheline_group_end_aligned(read_write);
 569
 570	__cacheline_group_begin_aligned(cold);
 571	u32 q_id;
 572	u32 size;
 573	dma_addr_t dma;
 574
 575	struct idpf_q_vector *q_vector;
 576
 577	u16 rx_buffer_low_watermark;
 578	u16 rx_hbuf_size;
 579	u16 rx_buf_size;
 580	u16 rx_max_pkt_size;
 581	__cacheline_group_end_aligned(cold);
 582};
 583libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
 584			    80 + sizeof(struct u64_stats_sync),
 585			    32);
 586
 587/**
 588 * struct idpf_tx_queue - software structure representing a transmit queue
 589 * @base_tx: base Tx descriptor array
 590 * @base_ctx: base Tx context descriptor array
 591 * @flex_tx: flex Tx descriptor array
 592 * @flex_ctx: flex Tx context descriptor array
 593 * @desc_ring: virtual descriptor ring address
 594 * @tx_buf: See struct idpf_tx_buf
 595 * @txq_grp: See struct idpf_txq_group
 596 * @dev: Device back pointer for DMA mapping
 597 * @tail: Tail offset. Used for both queue models single and split
 598 * @flags: See enum idpf_queue_flags_t
 599 * @idx: For TX queue, it is used as index to map between TX queue group and
 600 *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
 601 * @desc_count: Number of descriptors
 602 * @tx_min_pkt_len: Min supported packet length
 
 
 
 
 
 
 
 603 * @compl_tag_gen_s: Completion tag generation bit
 604 *	The format of the completion tag will change based on the TXQ
 605 *	descriptor ring size so that we can maintain roughly the same level
 606 *	of "uniqueness" across all descriptor sizes. For example, if the
 607 *	TXQ descriptor ring size is 64 (the minimum size supported), the
 608 *	completion tag will be formatted as below:
 609 *	15                 6 5         0
 610 *	--------------------------------
 611 *	|    GEN=0-1023     |IDX = 0-63|
 612 *	--------------------------------
 613 *
 614 *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
 615 *	the TXQ descriptor ring size is 8160 (the maximum size supported),
 616 *	the completion tag will be formatted as below:
 617 *	15 13 12                       0
 618 *	--------------------------------
 619 *	|GEN |       IDX = 0-8159      |
 620 *	--------------------------------
 621 *
 622 *	This gives us 8*8160 = 65280 possible unique values.
 623 * @netdev: &net_device corresponding to this queue
 624 * @next_to_use: Next descriptor to use
 625 * @next_to_clean: Next descriptor to clean
 626 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
 627 *		   the TX completion queue, it can be for any TXQ associated
 628 *		   with that completion queue. This means we can clean up to
 629 *		   N TXQs during a single call to clean the completion queue.
 630 *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
 631 *		   that single call to clean the completion queue. By doing so,
 632 *		   we can update BQL with aggregate cleaned stats for each TXQ
 633 *		   only once at the end of the cleaning routine.
 634 * @clean_budget: singleq only, queue cleaning budget
 635 * @cleaned_pkts: Number of packets cleaned for the above said case
 636 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
 637 * @stash: Tx buffer stash for Flow-based scheduling mode
 638 * @compl_tag_bufid_m: Completion tag buffer id mask
 639 * @compl_tag_cur_gen: Used to keep track of current completion tag generation
 640 * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
 641 * @stats_sync: See struct u64_stats_sync
 642 * @q_stats: See union idpf_tx_queue_stats
 643 * @q_id: Queue id
 644 * @size: Length of descriptor ring in bytes
 645 * @dma: Physical address of ring
 646 * @q_vector: Backreference to associated vector
 647 */
 648struct idpf_tx_queue {
 649	__cacheline_group_begin_aligned(read_mostly);
 
 650	union {
 651		struct idpf_base_tx_desc *base_tx;
 652		struct idpf_base_tx_ctx_desc *base_ctx;
 653		union idpf_tx_flex_desc *flex_tx;
 654		struct idpf_flex_tx_ctx_desc *flex_ctx;
 655
 656		void *desc_ring;
 657	};
 658	struct libeth_sqe *tx_buf;
 659	struct idpf_txq_group *txq_grp;
 660	struct device *dev;
 661	void __iomem *tail;
 662
 663	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 664	u16 idx;
 665	u16 desc_count;
 666
 667	u16 tx_min_pkt_len;
 668	u16 compl_tag_gen_s;
 669
 670	struct net_device *netdev;
 671	__cacheline_group_end_aligned(read_mostly);
 672
 673	__cacheline_group_begin_aligned(read_write);
 674	u16 next_to_use;
 675	u16 next_to_clean;
 676
 677	union {
 678		u32 cleaned_bytes;
 679		u32 clean_budget;
 
 
 
 
 680	};
 681	u16 cleaned_pkts;
 682
 683	u16 tx_max_bufs;
 684	struct idpf_txq_stash *stash;
 685
 686	u16 compl_tag_bufid_m;
 687	u16 compl_tag_cur_gen;
 688	u16 compl_tag_gen_max;
 689
 690	struct u64_stats_sync stats_sync;
 691	struct idpf_tx_queue_stats q_stats;
 692	__cacheline_group_end_aligned(read_write);
 693
 694	__cacheline_group_begin_aligned(cold);
 695	u32 q_id;
 696	u32 size;
 697	dma_addr_t dma;
 698
 699	struct idpf_q_vector *q_vector;
 700	__cacheline_group_end_aligned(cold);
 701};
 702libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
 703			    88 + sizeof(struct u64_stats_sync),
 704			    24);
 705
 706/**
 707 * struct idpf_buf_queue - software structure representing a buffer queue
 708 * @split_buf: buffer descriptor array
 709 * @hdr_buf: &libeth_fqe for header buffers
 710 * @hdr_pp: &page_pool for header buffers
 711 * @buf: &libeth_fqe for data buffers
 712 * @pp: &page_pool for data buffers
 713 * @tail: Tail offset
 714 * @flags: See enum idpf_queue_flags_t
 715 * @desc_count: Number of descriptors
 716 * @next_to_use: Next descriptor to use
 717 * @next_to_clean: Next descriptor to clean
 718 * @next_to_alloc: RX buffer to allocate at
 719 * @hdr_truesize: truesize for buffer headers
 720 * @truesize: truesize for data buffers
 721 * @q_id: Queue id
 722 * @size: Length of descriptor ring in bytes
 723 * @dma: Physical address of ring
 724 * @q_vector: Backreference to associated vector
 725 * @rx_buffer_low_watermark: RX buffer low watermark
 726 * @rx_hbuf_size: Header buffer size
 727 * @rx_buf_size: Buffer size
 728 */
 729struct idpf_buf_queue {
 730	__cacheline_group_begin_aligned(read_mostly);
 731	struct virtchnl2_splitq_rx_buf_desc *split_buf;
 732	struct libeth_fqe *hdr_buf;
 733	struct page_pool *hdr_pp;
 734	struct libeth_fqe *buf;
 735	struct page_pool *pp;
 736	void __iomem *tail;
 
 
 
 737
 
 
 
 738	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 739	u32 desc_count;
 740	__cacheline_group_end_aligned(read_mostly);
 741
 742	__cacheline_group_begin_aligned(read_write);
 743	u32 next_to_use;
 744	u32 next_to_clean;
 745	u32 next_to_alloc;
 746
 747	u32 hdr_truesize;
 748	u32 truesize;
 749	__cacheline_group_end_aligned(read_write);
 750
 751	__cacheline_group_begin_aligned(cold);
 752	u32 q_id;
 753	u32 size;
 754	dma_addr_t dma;
 755
 756	struct idpf_q_vector *q_vector;
 
 757
 758	u16 rx_buffer_low_watermark;
 759	u16 rx_hbuf_size;
 760	u16 rx_buf_size;
 761	__cacheline_group_end_aligned(cold);
 762};
 763libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
 764
 765/**
 766 * struct idpf_compl_queue - software structure representing a completion queue
 767 * @comp: completion descriptor array
 768 * @txq_grp: See struct idpf_txq_group
 769 * @flags: See enum idpf_queue_flags_t
 770 * @desc_count: Number of descriptors
 771 * @clean_budget: queue cleaning budget
 772 * @netdev: &net_device corresponding to this queue
 773 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
 774 *		 and bufq.
 775 * @next_to_clean: Next descriptor to clean
 776 * @num_completions: Only relevant for TX completion queue. It tracks the
 777 *		     number of completions received to compare against the
 778 *		     number of completions pending, as accumulated by the
 779 *		     TX queues.
 780 * @q_id: Queue id
 781 * @size: Length of descriptor ring in bytes
 782 * @dma: Physical address of ring
 783 * @q_vector: Backreference to associated vector
 784 */
 785struct idpf_compl_queue {
 786	__cacheline_group_begin_aligned(read_mostly);
 787	struct idpf_splitq_tx_compl_desc *comp;
 788	struct idpf_txq_group *txq_grp;
 789
 790	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 791	u32 desc_count;
 792
 793	u32 clean_budget;
 794	struct net_device *netdev;
 795	__cacheline_group_end_aligned(read_mostly);
 796
 797	__cacheline_group_begin_aligned(read_write);
 798	u32 next_to_use;
 799	u32 next_to_clean;
 800
 801	aligned_u64 num_completions;
 802	__cacheline_group_end_aligned(read_write);
 803
 804	__cacheline_group_begin_aligned(cold);
 805	u32 q_id;
 806	u32 size;
 807	dma_addr_t dma;
 808
 809	struct idpf_q_vector *q_vector;
 810	__cacheline_group_end_aligned(cold);
 811};
 812libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
 813
 814/**
 815 * struct idpf_sw_queue
 816 * @ring: Pointer to the ring
 
 817 * @flags: See enum idpf_queue_flags_t
 
 818 * @desc_count: Descriptor count
 819 * @next_to_use: Buffer to allocate at
 820 * @next_to_clean: Next descriptor to clean
 821 *
 822 * Software queues are used in splitq mode to manage buffers between rxq
 823 * producer and the bufq consumer.  These are required in order to maintain a
 824 * lockless buffer management system and are strictly software only constructs.
 825 */
 826struct idpf_sw_queue {
 827	__cacheline_group_begin_aligned(read_mostly);
 828	u32 *ring;
 829
 830	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 831	u32 desc_count;
 832	__cacheline_group_end_aligned(read_mostly);
 833
 834	__cacheline_group_begin_aligned(read_write);
 835	u32 next_to_use;
 836	u32 next_to_clean;
 837	__cacheline_group_end_aligned(read_write);
 838};
 839libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
 840libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
 841libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
 842
 843/**
 844 * struct idpf_rxq_set
 845 * @rxq: RX queue
 846 * @refillq: pointers to refill queues
 
 847 *
 848 * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
 849 * Each rxq needs a refillq to return used buffers back to the respective bufq.
 850 * Bufqs then clean these refillqs for buffers to give to hardware.
 851 */
 852struct idpf_rxq_set {
 853	struct idpf_rx_queue rxq;
 854	struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
 
 855};
 856
 857/**
 858 * struct idpf_bufq_set
 859 * @bufq: Buffer queue
 860 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
 861 *		  in idpf_rxq_group.
 862 * @refillqs: Pointer to refill queues array.
 863 *
 864 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
 865 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
 866 * Used buffers received by rxqs will be put on refillqs which bufqs will
 867 * clean to return new buffers back to hardware.
 868 *
 869 * Buffers needed by some number of rxqs associated in this rxq_group are
 870 * managed by at most two bufqs (depending on performance configuration).
 871 */
 872struct idpf_bufq_set {
 873	struct idpf_buf_queue bufq;
 874	int num_refillqs;
 875	struct idpf_sw_queue *refillqs;
 876};
 877
 878/**
 879 * struct idpf_rxq_group
 880 * @vport: Vport back pointer
 881 * @singleq: Struct with single queue related members
 882 * @singleq.num_rxq: Number of RX queues associated
 883 * @singleq.rxqs: Array of RX queue pointers
 884 * @splitq: Struct with split queue related members
 885 * @splitq.num_rxq_sets: Number of RX queue sets
 886 * @splitq.rxq_sets: Array of RX queue sets
 887 * @splitq.bufq_sets: Buffer queue set pointer
 888 *
 889 * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
 890 * rxq_group contains all the rxqs, bufqs and refillqs needed to
 891 * manage buffers in splitq mode.
 892 */
 893struct idpf_rxq_group {
 894	struct idpf_vport *vport;
 895
 896	union {
 897		struct {
 898			u16 num_rxq;
 899			struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
 900		} singleq;
 901		struct {
 902			u16 num_rxq_sets;
 903			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
 904			struct idpf_bufq_set *bufq_sets;
 905		} splitq;
 906	};
 907};
 908
 909/**
 910 * struct idpf_txq_group
 911 * @vport: Vport back pointer
 912 * @num_txq: Number of TX queues associated
 913 * @txqs: Array of TX queue pointers
 914 * @stashes: array of OOO stashes for the queues
 915 * @complq: Associated completion queue pointer, split queue only
 916 * @num_completions_pending: Total number of completions pending for the
 917 *			     completion queue, acculumated for all TX queues
 918 *			     associated with that completion queue.
 919 *
 920 * Between singleq and splitq, a txq_group is largely the same except for the
 921 * complq. In splitq a single complq is responsible for handling completions
 922 * for some number of txqs associated in this txq_group.
 923 */
 924struct idpf_txq_group {
 925	struct idpf_vport *vport;
 926
 927	u16 num_txq;
 928	struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
 929	struct idpf_txq_stash *stashes;
 930
 931	struct idpf_compl_queue *complq;
 932
 933	aligned_u64 num_completions_pending;
 934};
 935
 936static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
 937{
 938	u32 cpu;
 939
 940	if (!q_vector)
 941		return NUMA_NO_NODE;
 942
 943	cpu = cpumask_first(q_vector->affinity_mask);
 944
 945	return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
 946}
 947
 948/**
 949 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
 950 * @size: transmit request size in bytes
 951 *
 952 * In the case where a large frag (>= 16K) needs to be split across multiple
 953 * descriptors, we need to assume that we can have no more than 12K of data
 954 * per descriptor due to hardware alignment restrictions (4K alignment).
 955 */
 956static inline u32 idpf_size_to_txd_count(unsigned int size)
 957{
 958	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
 959}
 960
 961/**
 962 * idpf_tx_singleq_build_ctob - populate command tag offset and size
 963 * @td_cmd: Command to be filled in desc
 964 * @td_offset: Offset to be filled in desc
 965 * @size: Size of the buffer
 966 * @td_tag: td tag to be filled
 967 *
 968 * Returns the 64 bit value populated with the input parameters
 969 */
 970static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
 971						unsigned int size, u64 td_tag)
 972{
 973	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
 974			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
 975			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
 976			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
 977			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
 978}
 979
 980void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
 981			      struct idpf_tx_splitq_params *params,
 982			      u16 td_cmd, u16 size);
 983void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
 984				    struct idpf_tx_splitq_params *params,
 985				    u16 td_cmd, u16 size);
 986/**
 987 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
 988 * @desc: descriptor to populate
 989 * @params: pointer to tx params struct
 990 * @td_cmd: command to be filled in desc
 991 * @size: size of buffer
 992 */
 993static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
 994					     struct idpf_tx_splitq_params *params,
 995					     u16 td_cmd, u16 size)
 996{
 997	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
 998		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
 999	else
1000		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
1001}
1002
1003/**
1004 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
1005 * @q_vector: pointer to queue vector struct
1006 */
1007static inline void idpf_vport_intr_set_wb_on_itr(struct idpf_q_vector *q_vector)
 
 
 
 
 
 
1008{
1009	struct idpf_intr_reg *reg;
 
 
 
 
1010
1011	if (q_vector->wb_on_itr)
1012		return;
1013
1014	q_vector->wb_on_itr = true;
1015	reg = &q_vector->intr_reg;
1016
1017	writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m |
1018	       (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s),
1019	       reg->dyn_ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1020}
1021
1022int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
1023void idpf_vport_init_num_qs(struct idpf_vport *vport,
1024			    struct virtchnl2_create_vport *vport_msg);
1025void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
1026int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
1027			     struct virtchnl2_create_vport *vport_msg,
1028			     struct idpf_vport_max_q *max_q);
1029void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
1030int idpf_vport_queues_alloc(struct idpf_vport *vport);
1031void idpf_vport_queues_rel(struct idpf_vport *vport);
1032void idpf_vport_intr_rel(struct idpf_vport *vport);
1033int idpf_vport_intr_alloc(struct idpf_vport *vport);
1034void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
1035void idpf_vport_intr_deinit(struct idpf_vport *vport);
1036int idpf_vport_intr_init(struct idpf_vport *vport);
1037void idpf_vport_intr_ena(struct idpf_vport *vport);
 
1038int idpf_config_rss(struct idpf_vport *vport);
1039int idpf_init_rss(struct idpf_vport *vport);
1040void idpf_deinit_rss(struct idpf_vport *vport);
1041int idpf_rx_bufs_init_all(struct idpf_vport *vport);
1042void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
1043		      unsigned int size);
1044struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
1045void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
 
 
 
 
1046			   bool xmit_more);
1047unsigned int idpf_size_to_txd_count(unsigned int size);
1048netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
1049void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
1050			   struct idpf_tx_buf *first, u16 ring_idx);
1051unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
1052					 struct sk_buff *skb);
 
 
 
1053void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1054netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
1055				  struct idpf_tx_queue *tx_q);
1056netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
1057bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
 
1058				      u16 cleaned_count);
1059int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
1060
1061static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
1062					     u32 needed)
1063{
1064	return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
1065					  IDPF_DESC_UNUSED(tx_q),
1066					  needed, needed);
1067}
1068
1069#endif /* !_IDPF_TXRX_H_ */
v6.9.4
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/* Copyright (C) 2023 Intel Corporation */
   3
   4#ifndef _IDPF_TXRX_H_
   5#define _IDPF_TXRX_H_
   6
   7#include <net/page_pool/helpers.h>
 
 
   8#include <net/tcp.h>
   9#include <net/netdev_queues.h>
  10
 
 
 
  11#define IDPF_LARGE_MAX_Q			256
  12#define IDPF_MAX_Q				16
  13#define IDPF_MIN_Q				2
  14/* Mailbox Queue */
  15#define IDPF_MAX_MBXQ				1
  16
  17#define IDPF_MIN_TXQ_DESC			64
  18#define IDPF_MIN_RXQ_DESC			64
  19#define IDPF_MIN_TXQ_COMPLQ_DESC		256
  20#define IDPF_MAX_QIDS				256
  21
  22/* Number of descriptors in a queue should be a multiple of 32. RX queue
  23 * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
  24 * to achieve BufQ descriptors aligned to 32
  25 */
  26#define IDPF_REQ_DESC_MULTIPLE			32
  27#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
  28#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
  29#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
  30
  31#define IDPF_MAX_DESCS				8160
  32#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
  33#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
  34#define MIN_SUPPORT_TXDID (\
  35	VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
  36	VIRTCHNL2_TXDID_FLEX_TSO_CTX)
  37
  38#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS		1
  39#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS		1
  40#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP		4
  41#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP		4
  42
  43#define IDPF_COMPLQ_PER_GROUP			1
  44#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP		1
  45#define IDPF_MAX_BUFQS_PER_RXQ_GRP		2
  46#define IDPF_BUFQ2_ENA				1
  47#define IDPF_NUMQ_PER_CHUNK			1
  48
  49#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP		1
  50#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP		1
  51
  52/* Default vector sharing */
  53#define IDPF_MBX_Q_VEC		1
  54#define IDPF_MIN_Q_VEC		1
  55
  56#define IDPF_DFLT_TX_Q_DESC_COUNT		512
  57#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT		512
  58#define IDPF_DFLT_RX_Q_DESC_COUNT		512
  59
  60/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
  61 * given RX completion queue has descriptors. This includes _ALL_ buffer
  62 * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
  63 * you have a total of 1024 buffers so your RX queue _must_ have at least that
  64 * many descriptors. This macro divides a given number of RX descriptors by
  65 * number of buffer queues to calculate how many descriptors each buffer queue
  66 * can have without overrunning the RX queue.
  67 *
  68 * If you give hardware more buffers than completion descriptors what will
  69 * happen is that if hardware gets a chance to post more than ring wrap of
  70 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
  71 * in the descriptor will be wrong. Any overwritten descriptors' buffers will
  72 * be gone forever and SW has no reasonable way to tell that this has happened.
  73 * From SW perspective, when we finally get an interrupt, it looks like we're
  74 * still waiting for descriptor to be done, stalling forever.
  75 */
  76#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ)	((RXD) / (NUM_BUFQ))
  77
  78#define IDPF_RX_BUFQ_WORKING_SET(rxq)		((rxq)->desc_count - 1)
  79
  80#define IDPF_RX_BUMP_NTC(rxq, ntc)				\
  81do {								\
  82	if (unlikely(++(ntc) == (rxq)->desc_count)) {		\
  83		ntc = 0;					\
  84		change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags);	\
  85	}							\
  86} while (0)
  87
  88#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx)			\
  89do {								\
  90	if (unlikely(++(idx) == (q)->desc_count))		\
  91		idx = 0;					\
  92} while (0)
  93
  94#define IDPF_RX_HDR_SIZE			256
  95#define IDPF_RX_BUF_2048			2048
  96#define IDPF_RX_BUF_4096			4096
  97#define IDPF_RX_BUF_STRIDE			32
  98#define IDPF_RX_BUF_POST_STRIDE			16
  99#define IDPF_LOW_WATERMARK			64
 100/* Size of header buffer specifically for header split */
 101#define IDPF_HDR_BUF_SIZE			256
 102#define IDPF_PACKET_HDR_PAD	\
 103	(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
 104#define IDPF_TX_TSO_MIN_MSS			88
 105
 106/* Minimum number of descriptors between 2 descriptors with the RE bit set;
 107 * only relevant in flow scheduling mode
 108 */
 109#define IDPF_TX_SPLITQ_RE_MIN_GAP	64
 110
 111#define IDPF_RX_BI_BUFID_S		0
 112#define IDPF_RX_BI_BUFID_M		GENMASK(14, 0)
 113#define IDPF_RX_BI_GEN_S		15
 114#define IDPF_RX_BI_GEN_M		BIT(IDPF_RX_BI_GEN_S)
 115#define IDPF_RXD_EOF_SPLITQ		VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
 116#define IDPF_RXD_EOF_SINGLEQ		VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
 117
 118#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i)	\
 119	(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
 120#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i)	\
 121	(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
 122#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
 123
 124#define IDPF_BASE_TX_DESC(txq, i)	\
 125	(&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
 126#define IDPF_BASE_TX_CTX_DESC(txq, i) \
 127	(&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
 128#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i)	\
 129	(&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
 130
 131#define IDPF_FLEX_TX_DESC(txq, i) \
 132	(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
 133#define IDPF_FLEX_TX_CTX_DESC(txq, i)	\
 134	(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
 135
 136#define IDPF_DESC_UNUSED(txq)     \
 137	((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
 138	(txq)->next_to_clean - (txq)->next_to_use - 1)
 139
 140#define IDPF_TX_BUF_RSV_UNUSED(txq)	((txq)->buf_stack.top)
 141#define IDPF_TX_BUF_RSV_LOW(txq)	(IDPF_TX_BUF_RSV_UNUSED(txq) < \
 142					 (txq)->desc_count >> 2)
 143
 144#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq)	((txcq)->desc_count >> 1)
 145/* Determine the absolute number of completions pending, i.e. the number of
 146 * completions that are expected to arrive on the TX completion queue.
 147 */
 148#define IDPF_TX_COMPLQ_PENDING(txq)	\
 149	(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
 150	0 : U64_MAX) + \
 151	(txq)->num_completions_pending - (txq)->complq->num_completions)
 152
 153#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH	16
 154#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG	-1
 155/* Adjust the generation for the completion tag and wrap if necessary */
 156#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
 157	((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
 158	0 : (txq)->compl_tag_cur_gen)
 159
 160#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
 161
 162#define IDPF_TX_FLAGS_TSO		BIT(0)
 163#define IDPF_TX_FLAGS_IPV4		BIT(1)
 164#define IDPF_TX_FLAGS_IPV6		BIT(2)
 165#define IDPF_TX_FLAGS_TUNNEL		BIT(3)
 166
 167union idpf_tx_flex_desc {
 168	struct idpf_flex_tx_desc q; /* queue based scheduling */
 169	struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
 170};
 171
 172/**
 173 * struct idpf_tx_buf
 174 * @next_to_watch: Next descriptor to clean
 175 * @skb: Pointer to the skb
 176 * @dma: DMA address
 177 * @len: DMA length
 178 * @bytecount: Number of bytes
 179 * @gso_segs: Number of GSO segments
 180 * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
 181 *	       with completion tag returned in buffer completion event.
 182 *	       Because the completion tag is expected to be the same in all
 183 *	       data descriptors for a given packet, and a single packet can
 184 *	       span multiple buffers, we need this field to track all
 185 *	       buffers associated with this completion tag independently of
 186 *	       the buf_id. The tag consists of a N bit buf_id and M upper
 187 *	       order "generation bits". See compl_tag_bufid_m and
 188 *	       compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
 189 *	       to indicate the tag is not valid.
 190 * @ctx_entry: Singleq only. Used to indicate the corresponding entry
 191 *	       in the descriptor ring was used for a context descriptor and
 192 *	       this buffer entry should be skipped.
 193 */
 194struct idpf_tx_buf {
 195	void *next_to_watch;
 196	struct sk_buff *skb;
 197	DEFINE_DMA_UNMAP_ADDR(dma);
 198	DEFINE_DMA_UNMAP_LEN(len);
 199	unsigned int bytecount;
 200	unsigned short gso_segs;
 201
 202	union {
 203		int compl_tag;
 204
 205		bool ctx_entry;
 206	};
 207};
 208
 209struct idpf_tx_stash {
 210	struct hlist_node hlist;
 211	struct idpf_tx_buf buf;
 212};
 213
 214/**
 215 * struct idpf_buf_lifo - LIFO for managing OOO completions
 216 * @top: Used to know how many buffers are left
 217 * @size: Total size of LIFO
 218 * @bufs: Backing array
 219 */
 220struct idpf_buf_lifo {
 221	u16 top;
 222	u16 size;
 223	struct idpf_tx_stash **bufs;
 224};
 225
 226/**
 227 * struct idpf_tx_offload_params - Offload parameters for a given packet
 228 * @tx_flags: Feature flags enabled for this packet
 229 * @hdr_offsets: Offset parameter for single queue model
 230 * @cd_tunneling: Type of tunneling enabled for single queue model
 231 * @tso_len: Total length of payload to segment
 232 * @mss: Segment size
 233 * @tso_segs: Number of segments to be sent
 234 * @tso_hdr_len: Length of headers to be duplicated
 235 * @td_cmd: Command field to be inserted into descriptor
 236 */
 237struct idpf_tx_offload_params {
 238	u32 tx_flags;
 239
 240	u32 hdr_offsets;
 241	u32 cd_tunneling;
 242
 243	u32 tso_len;
 244	u16 mss;
 245	u16 tso_segs;
 246	u16 tso_hdr_len;
 247
 248	u16 td_cmd;
 249};
 250
 251/**
 252 * struct idpf_tx_splitq_params
 253 * @dtype: General descriptor info
 254 * @eop_cmd: Type of EOP
 255 * @compl_tag: Associated tag for completion
 256 * @td_tag: Descriptor tunneling tag
 257 * @offload: Offload parameters
 258 */
 259struct idpf_tx_splitq_params {
 260	enum idpf_tx_desc_dtype_value dtype;
 261	u16 eop_cmd;
 262	union {
 263		u16 compl_tag;
 264		u16 td_tag;
 265	};
 266
 267	struct idpf_tx_offload_params offload;
 268};
 269
 270enum idpf_tx_ctx_desc_eipt_offload {
 271	IDPF_TX_CTX_EXT_IP_NONE         = 0x0,
 272	IDPF_TX_CTX_EXT_IP_IPV6         = 0x1,
 273	IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
 274	IDPF_TX_CTX_EXT_IP_IPV4         = 0x3
 275};
 276
 277/* Checksum offload bits decoded from the receive descriptor. */
 278struct idpf_rx_csum_decoded {
 279	u32 l3l4p : 1;
 280	u32 ipe : 1;
 281	u32 eipe : 1;
 282	u32 eudpe : 1;
 283	u32 ipv6exadd : 1;
 284	u32 l4e : 1;
 285	u32 pprs : 1;
 286	u32 nat : 1;
 287	u32 raw_csum_inv : 1;
 288	u32 raw_csum : 16;
 289};
 290
 291struct idpf_rx_extracted {
 292	unsigned int size;
 293	u16 rx_ptype;
 294};
 295
 296#define IDPF_TX_COMPLQ_CLEAN_BUDGET	256
 297#define IDPF_TX_MIN_PKT_LEN		17
 298#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR	1
 299#define IDPF_TX_DESCS_PER_CACHE_LINE	(L1_CACHE_BYTES / \
 300					 sizeof(struct idpf_flex_tx_desc))
 301#define IDPF_TX_DESCS_FOR_CTX		1
 302/* TX descriptors needed, worst case */
 303#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
 304			     IDPF_TX_DESCS_PER_CACHE_LINE + \
 305			     IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
 306
 307/* The size limit for a transmit buffer in a descriptor is (16K - 1).
 308 * In order to align with the read requests we will align the value to
 309 * the nearest 4K which represents our maximum read request size.
 310 */
 311#define IDPF_TX_MAX_READ_REQ_SIZE	SZ_4K
 312#define IDPF_TX_MAX_DESC_DATA		(SZ_16K - 1)
 313#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
 314	ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
 315
 316#define IDPF_RX_DMA_ATTR \
 317	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
 318#define IDPF_RX_DESC(rxq, i)	\
 319	(&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
 320
 321struct idpf_rx_buf {
 322	struct page *page;
 323	unsigned int page_offset;
 324	u16 truesize;
 325};
 326
 327#define IDPF_RX_MAX_PTYPE_PROTO_IDS    32
 328#define IDPF_RX_MAX_PTYPE_SZ	(sizeof(struct virtchnl2_ptype) + \
 329				 (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
 330#define IDPF_RX_PTYPE_HDR_SZ	sizeof(struct virtchnl2_get_ptype_info)
 331#define IDPF_RX_MAX_PTYPES_PER_BUF	\
 332	DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
 333			   IDPF_RX_MAX_PTYPE_SZ)
 334
 335#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
 336
 337#define IDPF_TUN_IP_GRE (\
 338	IDPF_PTYPE_TUNNEL_IP |\
 339	IDPF_PTYPE_TUNNEL_IP_GRENAT)
 340
 341#define IDPF_TUN_IP_GRE_MAC (\
 342	IDPF_TUN_IP_GRE |\
 343	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
 344
 345#define IDPF_RX_MAX_PTYPE	1024
 346#define IDPF_RX_MAX_BASE_PTYPE	256
 347#define IDPF_INVALID_PTYPE_ID	0xFFFF
 348
 349/* Packet type non-ip values */
 350enum idpf_rx_ptype_l2 {
 351	IDPF_RX_PTYPE_L2_RESERVED	= 0,
 352	IDPF_RX_PTYPE_L2_MAC_PAY2	= 1,
 353	IDPF_RX_PTYPE_L2_TIMESYNC_PAY2	= 2,
 354	IDPF_RX_PTYPE_L2_FIP_PAY2	= 3,
 355	IDPF_RX_PTYPE_L2_OUI_PAY2	= 4,
 356	IDPF_RX_PTYPE_L2_MACCNTRL_PAY2	= 5,
 357	IDPF_RX_PTYPE_L2_LLDP_PAY2	= 6,
 358	IDPF_RX_PTYPE_L2_ECP_PAY2	= 7,
 359	IDPF_RX_PTYPE_L2_EVB_PAY2	= 8,
 360	IDPF_RX_PTYPE_L2_QCN_PAY2	= 9,
 361	IDPF_RX_PTYPE_L2_EAPOL_PAY2	= 10,
 362	IDPF_RX_PTYPE_L2_ARP		= 11,
 363};
 364
 365enum idpf_rx_ptype_outer_ip {
 366	IDPF_RX_PTYPE_OUTER_L2	= 0,
 367	IDPF_RX_PTYPE_OUTER_IP	= 1,
 368};
 369
 370#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv)			\
 371	(((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) &&	\
 372	 ((ptype)->outer_ip_ver == (ipv)))
 373
 374enum idpf_rx_ptype_outer_ip_ver {
 375	IDPF_RX_PTYPE_OUTER_NONE	= 0,
 376	IDPF_RX_PTYPE_OUTER_IPV4	= 1,
 377	IDPF_RX_PTYPE_OUTER_IPV6	= 2,
 378};
 379
 380enum idpf_rx_ptype_outer_fragmented {
 381	IDPF_RX_PTYPE_NOT_FRAG	= 0,
 382	IDPF_RX_PTYPE_FRAG	= 1,
 383};
 384
 385enum idpf_rx_ptype_tunnel_type {
 386	IDPF_RX_PTYPE_TUNNEL_NONE		= 0,
 387	IDPF_RX_PTYPE_TUNNEL_IP_IP		= 1,
 388	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
 389	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
 390	IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
 391};
 392
 393enum idpf_rx_ptype_tunnel_end_prot {
 394	IDPF_RX_PTYPE_TUNNEL_END_NONE	= 0,
 395	IDPF_RX_PTYPE_TUNNEL_END_IPV4	= 1,
 396	IDPF_RX_PTYPE_TUNNEL_END_IPV6	= 2,
 397};
 398
 399enum idpf_rx_ptype_inner_prot {
 400	IDPF_RX_PTYPE_INNER_PROT_NONE		= 0,
 401	IDPF_RX_PTYPE_INNER_PROT_UDP		= 1,
 402	IDPF_RX_PTYPE_INNER_PROT_TCP		= 2,
 403	IDPF_RX_PTYPE_INNER_PROT_SCTP		= 3,
 404	IDPF_RX_PTYPE_INNER_PROT_ICMP		= 4,
 405	IDPF_RX_PTYPE_INNER_PROT_TIMESYNC	= 5,
 406};
 407
 408enum idpf_rx_ptype_payload_layer {
 409	IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
 410	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
 411	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
 412	IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
 413};
 414
 415enum idpf_tunnel_state {
 416	IDPF_PTYPE_TUNNEL_IP                    = BIT(0),
 417	IDPF_PTYPE_TUNNEL_IP_GRENAT             = BIT(1),
 418	IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC         = BIT(2),
 419};
 420
 421struct idpf_ptype_state {
 422	bool outer_ip;
 423	bool outer_frag;
 424	u8 tunnel_state;
 425};
 426
 427struct idpf_rx_ptype_decoded {
 428	u32 ptype:10;
 429	u32 known:1;
 430	u32 outer_ip:1;
 431	u32 outer_ip_ver:2;
 432	u32 outer_frag:1;
 433	u32 tunnel_type:3;
 434	u32 tunnel_end_prot:2;
 435	u32 tunnel_end_frag:1;
 436	u32 inner_prot:4;
 437	u32 payload_layer:3;
 438};
 439
 440/**
 441 * enum idpf_queue_flags_t
 442 * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
 443 *		      identify new descriptor writebacks on the ring. HW sets
 444 *		      the gen bit to 1 on the first writeback of any given
 445 *		      descriptor. After the ring wraps, HW sets the gen bit of
 446 *		      those descriptors to 0, and continues flipping
 447 *		      0->1 or 1->0 on each ring wrap. SW maintains its own
 448 *		      gen bit to know what value will indicate writebacks on
 449 *		      the next pass around the ring. E.g. it is initialized
 450 *		      to 1 and knows that reading a gen bit of 1 in any
 451 *		      descriptor on the initial pass of the ring indicates a
 452 *		      writeback. It also flips on every ring wrap.
 453 * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
 454 *			 and RFLGQ_GEN is the SW bit.
 455 * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
 456 * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
 457 * @__IDPF_Q_POLL_MODE: Enable poll mode
 
 
 458 * @__IDPF_Q_FLAGS_NBITS: Must be last
 459 */
 460enum idpf_queue_flags_t {
 461	__IDPF_Q_GEN_CHK,
 462	__IDPF_RFLQ_GEN_CHK,
 463	__IDPF_Q_FLOW_SCH_EN,
 464	__IDPF_Q_SW_MARKER,
 465	__IDPF_Q_POLL_MODE,
 
 
 466
 467	__IDPF_Q_FLAGS_NBITS,
 468};
 469
 
 
 
 
 
 
 
 
 
 
 470/**
 471 * struct idpf_vec_regs
 472 * @dyn_ctl_reg: Dynamic control interrupt register offset
 473 * @itrn_reg: Interrupt Throttling Rate register offset
 474 * @itrn_index_spacing: Register spacing between ITR registers of the same
 475 *			vector
 476 */
 477struct idpf_vec_regs {
 478	u32 dyn_ctl_reg;
 479	u32 itrn_reg;
 480	u32 itrn_index_spacing;
 481};
 482
 483/**
 484 * struct idpf_intr_reg
 485 * @dyn_ctl: Dynamic control interrupt register
 486 * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
 
 487 * @dyn_ctl_itridx_s: Register bit offset for ITR index
 488 * @dyn_ctl_itridx_m: Mask for ITR index
 489 * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
 
 
 
 490 * @rx_itr: RX ITR register
 491 * @tx_itr: TX ITR register
 492 * @icr_ena: Interrupt cause register offset
 493 * @icr_ena_ctlq_m: Mask for ICR
 494 */
 495struct idpf_intr_reg {
 496	void __iomem *dyn_ctl;
 497	u32 dyn_ctl_intena_m;
 
 498	u32 dyn_ctl_itridx_s;
 499	u32 dyn_ctl_itridx_m;
 500	u32 dyn_ctl_intrvl_s;
 
 
 
 501	void __iomem *rx_itr;
 502	void __iomem *tx_itr;
 503	void __iomem *icr_ena;
 504	u32 icr_ena_ctlq_m;
 505};
 506
 507/**
 508 * struct idpf_q_vector
 509 * @vport: Vport back pointer
 510 * @affinity_mask: CPU affinity mask
 511 * @napi: napi handler
 512 * @v_idx: Vector index
 513 * @intr_reg: See struct idpf_intr_reg
 514 * @num_txq: Number of TX queues
 
 
 
 515 * @tx: Array of TX queues to service
 
 
 
 
 
 
 516 * @tx_dim: Data for TX net_dim algorithm
 517 * @tx_itr_value: TX interrupt throttling rate
 518 * @tx_intr_mode: Dynamic ITR or not
 519 * @tx_itr_idx: TX ITR index
 520 * @num_rxq: Number of RX queues
 521 * @rx: Array of RX queues to service
 522 * @rx_dim: Data for RX net_dim algorithm
 523 * @rx_itr_value: RX interrupt throttling rate
 524 * @rx_intr_mode: Dynamic ITR or not
 525 * @rx_itr_idx: RX ITR index
 526 * @num_bufq: Number of buffer queues
 527 * @bufq: Array of buffer queues to service
 528 * @total_events: Number of interrupts processed
 529 * @name: Queue vector name
 530 */
 531struct idpf_q_vector {
 
 532	struct idpf_vport *vport;
 533	cpumask_t affinity_mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 534	struct napi_struct napi;
 535	u16 v_idx;
 536	struct idpf_intr_reg intr_reg;
 537
 538	u16 num_txq;
 539	struct idpf_queue **tx;
 540	struct dim tx_dim;
 541	u16 tx_itr_value;
 542	bool tx_intr_mode;
 543	u32 tx_itr_idx;
 544
 545	u16 num_rxq;
 546	struct idpf_queue **rx;
 547	struct dim rx_dim;
 548	u16 rx_itr_value;
 549	bool rx_intr_mode;
 550	u32 rx_itr_idx;
 
 551
 552	u16 num_bufq;
 553	struct idpf_queue **bufq;
 554
 555	u16 total_events;
 556	char *name;
 557};
 
 
 
 
 558
 559struct idpf_rx_queue_stats {
 560	u64_stats_t packets;
 561	u64_stats_t bytes;
 562	u64_stats_t rsc_pkts;
 563	u64_stats_t hw_csum_err;
 564	u64_stats_t hsplit_pkts;
 565	u64_stats_t hsplit_buf_ovf;
 566	u64_stats_t bad_descs;
 567};
 568
 569struct idpf_tx_queue_stats {
 570	u64_stats_t packets;
 571	u64_stats_t bytes;
 572	u64_stats_t lso_pkts;
 573	u64_stats_t linearize;
 574	u64_stats_t q_busy;
 575	u64_stats_t skb_drops;
 576	u64_stats_t dma_map_errs;
 577};
 578
 579struct idpf_cleaned_stats {
 580	u32 packets;
 581	u32 bytes;
 582};
 583
 584union idpf_queue_stats {
 585	struct idpf_rx_queue_stats rx;
 586	struct idpf_tx_queue_stats tx;
 587};
 588
 589#define IDPF_ITR_DYNAMIC	1
 590#define IDPF_ITR_MAX		0x1FE0
 591#define IDPF_ITR_20K		0x0032
 592#define IDPF_ITR_GRAN_S		1	/* Assume ITR granularity is 2us */
 593#define IDPF_ITR_MASK		0x1FFE  /* ITR register value alignment mask */
 594#define ITR_REG_ALIGN(setting)	((setting) & IDPF_ITR_MASK)
 595#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
 596#define IDPF_ITR_TX_DEF		IDPF_ITR_20K
 597#define IDPF_ITR_RX_DEF		IDPF_ITR_20K
 
 
 598/* Index used for 'No ITR' update in DYN_CTL register */
 599#define IDPF_NO_ITR_UPDATE_IDX	3
 600#define IDPF_ITR_IDX_SPACING(spacing, dflt)	(spacing ? spacing : dflt)
 601#define IDPF_DIM_DEFAULT_PROFILE_IX		1
 602
 603/**
 604 * struct idpf_queue
 605 * @dev: Device back pointer for DMA mapping
 606 * @vport: Back pointer to associated vport
 607 * @txq_grp: See struct idpf_txq_group
 608 * @rxq_grp: See struct idpf_rxq_group
 609 * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
 610 *	 buffer queue uses this index to determine which group of refill queues
 611 *	 to clean.
 612 *	 For TX queue, it is used as index to map between TX queue group and
 613 *	 hot path TX pointers stored in vport. Used in both singleq/splitq.
 614 *	 For RX queue, it is used to index to total RX queue across groups and
 
 
 
 
 
 
 
 
 
 
 
 
 615 *	 used for skb reporting.
 616 * @tail: Tail offset. Used for both queue models single and split. In splitq
 617 *	  model relevant only for TX queue and RX queue.
 618 * @tx_buf: See struct idpf_tx_buf
 619 * @rx_buf: Struct with RX buffer related members
 620 * @rx_buf.buf: See struct idpf_rx_buf
 621 * @rx_buf.hdr_buf_pa: DMA handle
 622 * @rx_buf.hdr_buf_va: Virtual address
 623 * @pp: Page pool pointer
 624 * @skb: Pointer to the skb
 625 * @q_type: Queue type (TX, RX, TX completion, RX buffer)
 
 
 626 * @q_id: Queue id
 627 * @desc_count: Number of descriptors
 628 * @next_to_use: Next descriptor to use. Relevant in both split & single txq
 629 *		 and bufq.
 630 * @next_to_clean: Next descriptor to clean. In split queue model, only
 631 *		   relevant to TX completion queue and RX queue.
 632 * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
 633 *		   only relevant to RX queue.
 634 * @flags: See enum idpf_queue_flags_t
 635 * @q_stats: See union idpf_queue_stats
 636 * @stats_sync: See struct u64_stats_sync
 637 * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
 638 *		   the TX completion queue, it can be for any TXQ associated
 639 *		   with that completion queue. This means we can clean up to
 640 *		   N TXQs during a single call to clean the completion queue.
 641 *		   cleaned_bytes|pkts tracks the clean stats per TXQ during
 642 *		   that single call to clean the completion queue. By doing so,
 643 *		   we can update BQL with aggregate cleaned stats for each TXQ
 644 *		   only once at the end of the cleaning routine.
 645 * @cleaned_pkts: Number of packets cleaned for the above said case
 646 * @rx_hsplit_en: RX headsplit enable
 647 * @rx_hbuf_size: Header buffer size
 648 * @rx_buf_size: Buffer size
 649 * @rx_max_pkt_size: RX max packet size
 650 * @rx_buf_stride: RX buffer stride
 651 * @rx_buffer_low_watermark: RX buffer low watermark
 652 * @rxdids: Supported RX descriptor ids
 653 * @q_vector: Backreference to associated vector
 654 * @size: Length of descriptor ring in bytes
 655 * @dma: Physical address of ring
 656 * @desc_ring: Descriptor ring memory
 657 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 658 * @tx_min_pkt_len: Min supported packet length
 659 * @num_completions: Only relevant for TX completion queue. It tracks the
 660 *		     number of completions received to compare against the
 661 *		     number of completions pending, as accumulated by the
 662 *		     TX queues.
 663 * @buf_stack: Stack of empty buffers to store buffer info for out of order
 664 *	       buffer completions. See struct idpf_buf_lifo.
 665 * @compl_tag_bufid_m: Completion tag buffer id mask
 666 * @compl_tag_gen_s: Completion tag generation bit
 667 *	The format of the completion tag will change based on the TXQ
 668 *	descriptor ring size so that we can maintain roughly the same level
 669 *	of "uniqueness" across all descriptor sizes. For example, if the
 670 *	TXQ descriptor ring size is 64 (the minimum size supported), the
 671 *	completion tag will be formatted as below:
 672 *	15                 6 5         0
 673 *	--------------------------------
 674 *	|    GEN=0-1023     |IDX = 0-63|
 675 *	--------------------------------
 676 *
 677 *	This gives us 64*1024 = 65536 possible unique values. Similarly, if
 678 *	the TXQ descriptor ring size is 8160 (the maximum size supported),
 679 *	the completion tag will be formatted as below:
 680 *	15 13 12                       0
 681 *	--------------------------------
 682 *	|GEN |       IDX = 0-8159      |
 683 *	--------------------------------
 684 *
 685 *	This gives us 8*8160 = 65280 possible unique values.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686 * @compl_tag_cur_gen: Used to keep track of current completion tag generation
 687 * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
 688 * @sched_buf_hash: Hash table to stores buffers
 
 
 
 
 
 689 */
 690struct idpf_queue {
 691	struct device *dev;
 692	struct idpf_vport *vport;
 693	union {
 694		struct idpf_txq_group *txq_grp;
 695		struct idpf_rxq_group *rxq_grp;
 
 
 
 
 696	};
 
 
 
 
 
 
 697	u16 idx;
 698	void __iomem *tail;
 
 
 
 
 
 
 
 
 
 
 
 699	union {
 700		struct idpf_tx_buf *tx_buf;
 701		struct {
 702			struct idpf_rx_buf *buf;
 703			dma_addr_t hdr_buf_pa;
 704			void *hdr_buf_va;
 705		} rx_buf;
 706	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707	struct page_pool *pp;
 708	struct sk_buff *skb;
 709	u16 q_type;
 710	u32 q_id;
 711	u16 desc_count;
 712
 713	u16 next_to_use;
 714	u16 next_to_clean;
 715	u16 next_to_alloc;
 716	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 
 
 717
 718	union idpf_queue_stats q_stats;
 719	struct u64_stats_sync stats_sync;
 
 
 
 
 
 
 
 
 
 
 
 720
 721	u32 cleaned_bytes;
 722	u16 cleaned_pkts;
 723
 724	bool rx_hsplit_en;
 725	u16 rx_hbuf_size;
 726	u16 rx_buf_size;
 727	u16 rx_max_pkt_size;
 728	u16 rx_buf_stride;
 729	u8 rx_buffer_low_watermark;
 730	u64 rxdids;
 731	struct idpf_q_vector *q_vector;
 732	unsigned int size;
 733	dma_addr_t dma;
 734	void *desc_ring;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735
 736	u16 tx_max_bufs;
 737	u8 tx_min_pkt_len;
 738
 739	u32 num_completions;
 
 
 740
 741	struct idpf_buf_lifo buf_stack;
 
 
 742
 743	u16 compl_tag_bufid_m;
 744	u16 compl_tag_gen_s;
 745
 746	u16 compl_tag_cur_gen;
 747	u16 compl_tag_gen_max;
 
 
 748
 749	DECLARE_HASHTABLE(sched_buf_hash, 12);
 750} ____cacheline_internodealigned_in_smp;
 
 
 751
 752/**
 753 * struct idpf_sw_queue
 754 * @next_to_clean: Next descriptor to clean
 755 * @next_to_alloc: Buffer to allocate at
 756 * @flags: See enum idpf_queue_flags_t
 757 * @ring: Pointer to the ring
 758 * @desc_count: Descriptor count
 759 * @dev: Device back pointer for DMA mapping
 
 760 *
 761 * Software queues are used in splitq mode to manage buffers between rxq
 762 * producer and the bufq consumer.  These are required in order to maintain a
 763 * lockless buffer management system and are strictly software only constructs.
 764 */
 765struct idpf_sw_queue {
 766	u16 next_to_clean;
 767	u16 next_to_alloc;
 
 768	DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
 769	u16 *ring;
 770	u16 desc_count;
 771	struct device *dev;
 772} ____cacheline_internodealigned_in_smp;
 
 
 
 
 
 
 
 773
 774/**
 775 * struct idpf_rxq_set
 776 * @rxq: RX queue
 777 * @refillq0: Pointer to refill queue 0
 778 * @refillq1: Pointer to refill queue 1
 779 *
 780 * Splitq only.  idpf_rxq_set associates an rxq with at an array of refillqs.
 781 * Each rxq needs a refillq to return used buffers back to the respective bufq.
 782 * Bufqs then clean these refillqs for buffers to give to hardware.
 783 */
 784struct idpf_rxq_set {
 785	struct idpf_queue rxq;
 786	struct idpf_sw_queue *refillq0;
 787	struct idpf_sw_queue *refillq1;
 788};
 789
 790/**
 791 * struct idpf_bufq_set
 792 * @bufq: Buffer queue
 793 * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
 794 *		  in idpf_rxq_group.
 795 * @refillqs: Pointer to refill queues array.
 796 *
 797 * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
 798 * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
 799 * Used buffers received by rxqs will be put on refillqs which bufqs will
 800 * clean to return new buffers back to hardware.
 801 *
 802 * Buffers needed by some number of rxqs associated in this rxq_group are
 803 * managed by at most two bufqs (depending on performance configuration).
 804 */
 805struct idpf_bufq_set {
 806	struct idpf_queue bufq;
 807	int num_refillqs;
 808	struct idpf_sw_queue *refillqs;
 809};
 810
 811/**
 812 * struct idpf_rxq_group
 813 * @vport: Vport back pointer
 814 * @singleq: Struct with single queue related members
 815 * @singleq.num_rxq: Number of RX queues associated
 816 * @singleq.rxqs: Array of RX queue pointers
 817 * @splitq: Struct with split queue related members
 818 * @splitq.num_rxq_sets: Number of RX queue sets
 819 * @splitq.rxq_sets: Array of RX queue sets
 820 * @splitq.bufq_sets: Buffer queue set pointer
 821 *
 822 * In singleq mode, an rxq_group is simply an array of rxqs.  In splitq, a
 823 * rxq_group contains all the rxqs, bufqs and refillqs needed to
 824 * manage buffers in splitq mode.
 825 */
 826struct idpf_rxq_group {
 827	struct idpf_vport *vport;
 828
 829	union {
 830		struct {
 831			u16 num_rxq;
 832			struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
 833		} singleq;
 834		struct {
 835			u16 num_rxq_sets;
 836			struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
 837			struct idpf_bufq_set *bufq_sets;
 838		} splitq;
 839	};
 840};
 841
 842/**
 843 * struct idpf_txq_group
 844 * @vport: Vport back pointer
 845 * @num_txq: Number of TX queues associated
 846 * @txqs: Array of TX queue pointers
 
 847 * @complq: Associated completion queue pointer, split queue only
 848 * @num_completions_pending: Total number of completions pending for the
 849 *			     completion queue, acculumated for all TX queues
 850 *			     associated with that completion queue.
 851 *
 852 * Between singleq and splitq, a txq_group is largely the same except for the
 853 * complq. In splitq a single complq is responsible for handling completions
 854 * for some number of txqs associated in this txq_group.
 855 */
 856struct idpf_txq_group {
 857	struct idpf_vport *vport;
 858
 859	u16 num_txq;
 860	struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
 
 861
 862	struct idpf_queue *complq;
 863
 864	u32 num_completions_pending;
 865};
 866
 
 
 
 
 
 
 
 
 
 
 
 
 867/**
 868 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
 869 * @size: transmit request size in bytes
 870 *
 871 * In the case where a large frag (>= 16K) needs to be split across multiple
 872 * descriptors, we need to assume that we can have no more than 12K of data
 873 * per descriptor due to hardware alignment restrictions (4K alignment).
 874 */
 875static inline u32 idpf_size_to_txd_count(unsigned int size)
 876{
 877	return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
 878}
 879
 880/**
 881 * idpf_tx_singleq_build_ctob - populate command tag offset and size
 882 * @td_cmd: Command to be filled in desc
 883 * @td_offset: Offset to be filled in desc
 884 * @size: Size of the buffer
 885 * @td_tag: td tag to be filled
 886 *
 887 * Returns the 64 bit value populated with the input parameters
 888 */
 889static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
 890						unsigned int size, u64 td_tag)
 891{
 892	return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
 893			   (td_cmd << IDPF_TXD_QW1_CMD_S) |
 894			   (td_offset << IDPF_TXD_QW1_OFFSET_S) |
 895			   ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
 896			   (td_tag << IDPF_TXD_QW1_L2TAG1_S));
 897}
 898
 899void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
 900			      struct idpf_tx_splitq_params *params,
 901			      u16 td_cmd, u16 size);
 902void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
 903				    struct idpf_tx_splitq_params *params,
 904				    u16 td_cmd, u16 size);
 905/**
 906 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
 907 * @desc: descriptor to populate
 908 * @params: pointer to tx params struct
 909 * @td_cmd: command to be filled in desc
 910 * @size: size of buffer
 911 */
 912static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
 913					     struct idpf_tx_splitq_params *params,
 914					     u16 td_cmd, u16 size)
 915{
 916	if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
 917		idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
 918	else
 919		idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
 920}
 921
 922/**
 923 * idpf_alloc_page - Allocate a new RX buffer from the page pool
 924 * @pool: page_pool to allocate from
 925 * @buf: metadata struct to populate with page info
 926 * @buf_size: 2K or 4K
 927 *
 928 * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
 929 */
 930static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
 931					 struct idpf_rx_buf *buf,
 932					 unsigned int buf_size)
 933{
 934	if (buf_size == IDPF_RX_BUF_2048)
 935		buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
 936						     buf_size);
 937	else
 938		buf->page = page_pool_dev_alloc_pages(pool);
 939
 940	if (!buf->page)
 941		return DMA_MAPPING_ERROR;
 942
 943	buf->truesize = buf_size;
 
 944
 945	return page_pool_get_dma_addr(buf->page) + buf->page_offset +
 946	       pool->p.offset;
 947}
 948
 949/**
 950 * idpf_rx_put_page - Return RX buffer page to pool
 951 * @rx_buf: RX buffer metadata struct
 952 */
 953static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
 954{
 955	page_pool_put_page(rx_buf->page->pp, rx_buf->page,
 956			   rx_buf->truesize, true);
 957	rx_buf->page = NULL;
 958}
 959
 960/**
 961 * idpf_rx_sync_for_cpu - Synchronize DMA buffer
 962 * @rx_buf: RX buffer metadata struct
 963 * @len: frame length from descriptor
 964 */
 965static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
 966{
 967	struct page *page = rx_buf->page;
 968	struct page_pool *pp = page->pp;
 969
 970	dma_sync_single_range_for_cpu(pp->p.dev,
 971				      page_pool_get_dma_addr(page),
 972				      rx_buf->page_offset + pp->p.offset, len,
 973				      page_pool_get_dma_dir(pp));
 974}
 975
 976int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
 977void idpf_vport_init_num_qs(struct idpf_vport *vport,
 978			    struct virtchnl2_create_vport *vport_msg);
 979void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
 980int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
 981			     struct virtchnl2_create_vport *vport_msg,
 982			     struct idpf_vport_max_q *max_q);
 983void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
 984int idpf_vport_queues_alloc(struct idpf_vport *vport);
 985void idpf_vport_queues_rel(struct idpf_vport *vport);
 986void idpf_vport_intr_rel(struct idpf_vport *vport);
 987int idpf_vport_intr_alloc(struct idpf_vport *vport);
 988void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
 989void idpf_vport_intr_deinit(struct idpf_vport *vport);
 990int idpf_vport_intr_init(struct idpf_vport *vport);
 991void idpf_vport_intr_ena(struct idpf_vport *vport);
 992enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
 993int idpf_config_rss(struct idpf_vport *vport);
 994int idpf_init_rss(struct idpf_vport *vport);
 995void idpf_deinit_rss(struct idpf_vport *vport);
 996int idpf_rx_bufs_init_all(struct idpf_vport *vport);
 997void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
 998		      unsigned int size);
 999struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
1000				      struct idpf_rx_buf *rx_buf,
1001				      unsigned int size);
1002bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
1003void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
1004void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
1005			   bool xmit_more);
1006unsigned int idpf_size_to_txd_count(unsigned int size);
1007netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
1008void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
1009			   struct idpf_tx_buf *first, u16 ring_idx);
1010unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
1011					 struct sk_buff *skb);
1012bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
1013			unsigned int count);
1014int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
1015void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
1016netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
1017				 struct net_device *netdev);
1018netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
1019				  struct net_device *netdev);
1020bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
1021				      u16 cleaned_count);
1022int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
 
 
 
 
 
 
 
 
1023
1024#endif /* !_IDPF_TXRX_H_ */