Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
   2 * Google virtual Ethernet (gve) driver
   3 *
   4 * Copyright (C) 2015-2024 Google LLC
   5 */
   6
   7#ifndef _GVE_H_
   8#define _GVE_H_
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/dmapool.h>
  12#include <linux/ethtool_netlink.h>
  13#include <linux/netdevice.h>
  14#include <linux/pci.h>
  15#include <linux/u64_stats_sync.h>
  16#include <net/page_pool/helpers.h>
  17#include <net/xdp.h>
  18
  19#include "gve_desc.h"
  20#include "gve_desc_dqo.h"
  21
  22#ifndef PCI_VENDOR_ID_GOOGLE
  23#define PCI_VENDOR_ID_GOOGLE	0x1ae0
  24#endif
  25
  26#define PCI_DEV_ID_GVNIC	0x0042
  27
  28#define GVE_REGISTER_BAR	0
  29#define GVE_DOORBELL_BAR	2
  30
  31/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
  32#define GVE_TX_MAX_IOVEC	4
  33/* 1 for management, 1 for rx, 1 for tx */
  34#define GVE_MIN_MSIX 3
  35
  36/* Numbers of gve tx/rx stats in stats report. */
  37#define GVE_TX_STATS_REPORT_NUM	6
  38#define GVE_RX_STATS_REPORT_NUM	2
  39
  40/* Interval to schedule a stats report update, 20000ms. */
  41#define GVE_STATS_REPORT_TIMER_PERIOD	20000
  42
  43/* Numbers of NIC tx/rx stats in stats report. */
  44#define NIC_TX_STATS_REPORT_NUM	0
  45#define NIC_RX_STATS_REPORT_NUM	4
  46
  47#define GVE_ADMINQ_BUFFER_SIZE 4096
  48
  49#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
  50
  51/* PTYPEs are always 10 bits. */
  52#define GVE_NUM_PTYPES	1024
  53
  54/* Default minimum ring size */
  55#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
  56#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
  57
  58#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
  59
  60#define GVE_MAX_RX_BUFFER_SIZE 4096
  61
  62#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
  63
  64#define GVE_PAGE_POOL_SIZE_MULTIPLIER 4
  65
  66#define GVE_FLOW_RULES_CACHE_SIZE \
  67	(GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
  68#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
  69	(GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
  70
  71#define GVE_XDP_ACTIONS 5
  72
  73#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
  74
  75#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
  76
  77#define DQO_QPL_DEFAULT_TX_PAGES 512
  78
  79/* Maximum TSO size supported on DQO */
  80#define GVE_DQO_TX_MAX	0x3FFFF
  81
  82#define GVE_TX_BUF_SHIFT_DQO 11
  83
  84/* 2K buffers for DQO-QPL */
  85#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
  86#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
  87#define GVE_MAX_TX_BUFS_PER_PKT (DIV_ROUND_UP(GVE_DQO_TX_MAX, GVE_TX_BUF_SIZE_DQO))
  88
  89/* If number of free/recyclable buffers are less than this threshold; driver
  90 * allocs and uses a non-qpl page on the receive path of DQO QPL to free
  91 * up buffers.
  92 * Value is set big enough to post at least 3 64K LRO packet via 2K buffer to NIC.
  93 */
  94#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
  95
  96/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
  97struct gve_rx_desc_queue {
  98	struct gve_rx_desc *desc_ring; /* the descriptor ring */
  99	dma_addr_t bus; /* the bus for the desc_ring */
 100	u8 seqno; /* the next expected seqno for this desc*/
 101};
 102
 103/* The page info for a single slot in the RX data queue */
 104struct gve_rx_slot_page_info {
 105	struct page *page;
 106	void *page_address;
 107	u32 page_offset; /* offset to write to in page */
 108	unsigned int buf_size;
 109	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
 110	u16 pad; /* adjustment for rx padding */
 111	u8 can_flip; /* tracks if the networking stack is using the page */
 112};
 113
 114/* A list of pages registered with the device during setup and used by a queue
 115 * as buffers
 116 */
 117struct gve_queue_page_list {
 118	u32 id; /* unique id */
 119	u32 num_entries;
 120	struct page **pages; /* list of num_entries pages */
 121	dma_addr_t *page_buses; /* the dma addrs of the pages */
 122};
 123
 124/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
 125struct gve_rx_data_queue {
 126	union gve_rx_data_slot *data_ring; /* read by NIC */
 127	dma_addr_t data_bus; /* dma mapping of the slots */
 128	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
 129	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
 130	u8 raw_addressing; /* use raw_addressing? */
 131};
 132
 133struct gve_priv;
 134
 135/* RX buffer queue for posting buffers to HW.
 136 * Each RX (completion) queue has a corresponding buffer queue.
 137 */
 138struct gve_rx_buf_queue_dqo {
 139	struct gve_rx_desc_dqo *desc_ring;
 140	dma_addr_t bus;
 141	u32 head; /* Pointer to start cleaning buffers at. */
 142	u32 tail; /* Last posted buffer index + 1 */
 143	u32 mask; /* Mask for indices to the size of the ring */
 144};
 145
 146/* RX completion queue to receive packets from HW. */
 147struct gve_rx_compl_queue_dqo {
 148	struct gve_rx_compl_desc_dqo *desc_ring;
 149	dma_addr_t bus;
 150
 151	/* Number of slots which did not have a buffer posted yet. We should not
 152	 * post more buffers than the queue size to avoid HW overrunning the
 153	 * queue.
 154	 */
 155	int num_free_slots;
 156
 157	/* HW uses a "generation bit" to notify SW of new descriptors. When a
 158	 * descriptor's generation bit is different from the current generation,
 159	 * that descriptor is ready to be consumed by SW.
 160	 */
 161	u8 cur_gen_bit;
 162
 163	/* Pointer into desc_ring where the next completion descriptor will be
 164	 * received.
 165	 */
 166	u32 head;
 167	u32 mask; /* Mask for indices to the size of the ring */
 168};
 169
 170struct gve_header_buf {
 171	u8 *data;
 172	dma_addr_t addr;
 173};
 174
 175/* Stores state for tracking buffers posted to HW */
 176struct gve_rx_buf_state_dqo {
 177	/* The page posted to HW. */
 178	struct gve_rx_slot_page_info page_info;
 179
 180	/* The DMA address corresponding to `page_info`. */
 181	dma_addr_t addr;
 182
 183	/* Last offset into the page when it only had a single reference, at
 184	 * which point every other offset is free to be reused.
 185	 */
 186	u32 last_single_ref_offset;
 187
 188	/* Linked list index to next element in the list, or -1 if none */
 189	s16 next;
 190};
 191
 192/* `head` and `tail` are indices into an array, or -1 if empty. */
 193struct gve_index_list {
 194	s16 head;
 195	s16 tail;
 196};
 197
 198/* A single received packet split across multiple buffers may be
 199 * reconstructed using the information in this structure.
 200 */
 201struct gve_rx_ctx {
 202	/* head and tail of skb chain for the current packet or NULL if none */
 203	struct sk_buff *skb_head;
 204	struct sk_buff *skb_tail;
 205	u32 total_size;
 206	u8 frag_cnt;
 207	bool drop_pkt;
 208};
 209
 210struct gve_rx_cnts {
 211	u32 ok_pkt_bytes;
 212	u16 ok_pkt_cnt;
 213	u16 total_pkt_cnt;
 214	u16 cont_pkt_cnt;
 215	u16 desc_err_pkt_cnt;
 216};
 217
 218/* Contains datapath state used to represent an RX queue. */
 219struct gve_rx_ring {
 220	struct gve_priv *gve;
 221	union {
 222		/* GQI fields */
 223		struct {
 224			struct gve_rx_desc_queue desc;
 225			struct gve_rx_data_queue data;
 226
 227			/* threshold for posting new buffs and descs */
 228			u32 db_threshold;
 229			u16 packet_buffer_size;
 230
 231			u32 qpl_copy_pool_mask;
 232			u32 qpl_copy_pool_head;
 233			struct gve_rx_slot_page_info *qpl_copy_pool;
 234		};
 235
 236		/* DQO fields. */
 237		struct {
 238			struct gve_rx_buf_queue_dqo bufq;
 239			struct gve_rx_compl_queue_dqo complq;
 240
 241			struct gve_rx_buf_state_dqo *buf_states;
 242			u16 num_buf_states;
 243
 244			/* Linked list of gve_rx_buf_state_dqo. Index into
 245			 * buf_states, or -1 if empty.
 246			 */
 247			s16 free_buf_states;
 248
 249			/* Linked list of gve_rx_buf_state_dqo. Indexes into
 250			 * buf_states, or -1 if empty.
 251			 *
 252			 * This list contains buf_states which are pointing to
 253			 * valid buffers.
 254			 *
 255			 * We use a FIFO here in order to increase the
 256			 * probability that buffers can be reused by increasing
 257			 * the time between usages.
 258			 */
 259			struct gve_index_list recycled_buf_states;
 260
 261			/* Linked list of gve_rx_buf_state_dqo. Indexes into
 262			 * buf_states, or -1 if empty.
 263			 *
 264			 * This list contains buf_states which have buffers
 265			 * which cannot be reused yet.
 266			 */
 267			struct gve_index_list used_buf_states;
 268
 269			/* qpl assigned to this queue */
 270			struct gve_queue_page_list *qpl;
 271
 272			/* index into queue page list */
 273			u32 next_qpl_page_idx;
 274
 275			/* track number of used buffers */
 276			u16 used_buf_states_cnt;
 277
 278			/* Address info of the buffers for header-split */
 279			struct gve_header_buf hdr_bufs;
 280
 281			struct page_pool *page_pool;
 282		} dqo;
 283	};
 284
 285	u64 rbytes; /* free-running bytes received */
 286	u64 rx_hsplit_bytes; /* free-running header bytes received */
 287	u64 rpackets; /* free-running packets received */
 288	u32 cnt; /* free-running total number of completed packets */
 289	u32 fill_cnt; /* free-running total number of descs and buffs posted */
 290	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
 291	u64 rx_hsplit_pkt; /* free-running packets with headers split */
 292	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
 293	u64 rx_copied_pkt; /* free-running total number of copied packets */
 294	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
 295	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
 296	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
 297	/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
 298	u64 rx_hsplit_unsplit_pkt;
 299	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
 300	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
 301	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
 302	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
 303	u64 xdp_tx_errors;
 304	u64 xdp_redirect_errors;
 305	u64 xdp_alloc_fails;
 306	u64 xdp_actions[GVE_XDP_ACTIONS];
 307	u32 q_num; /* queue index */
 308	u32 ntfy_id; /* notification block index */
 309	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 310	dma_addr_t q_resources_bus; /* dma address for the queue resources */
 311	struct u64_stats_sync statss; /* sync stats for 32bit archs */
 312
 313	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
 314
 315	/* XDP stuff */
 316	struct xdp_rxq_info xdp_rxq;
 317	struct xdp_rxq_info xsk_rxq;
 318	struct xsk_buff_pool *xsk_pool;
 319	struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
 320};
 321
 322/* A TX desc ring entry */
 323union gve_tx_desc {
 324	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
 325	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
 326	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
 327};
 328
 329/* Tracks the memory in the fifo occupied by a segment of a packet */
 330struct gve_tx_iovec {
 331	u32 iov_offset; /* offset into this segment */
 332	u32 iov_len; /* length */
 333	u32 iov_padding; /* padding associated with this segment */
 334};
 335
 336/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 337 * ring entry but only used for a pkt_desc not a seg_desc
 338 */
 339struct gve_tx_buffer_state {
 340	union {
 341		struct sk_buff *skb; /* skb for this pkt */
 342		struct xdp_frame *xdp_frame; /* xdp_frame */
 343	};
 344	struct {
 345		u16 size; /* size of xmitted xdp pkt */
 346		u8 is_xsk; /* xsk buff */
 347	} xdp;
 348	union {
 349		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
 350		struct {
 351			DEFINE_DMA_UNMAP_ADDR(dma);
 352			DEFINE_DMA_UNMAP_LEN(len);
 353		};
 354	};
 355};
 356
 357/* A TX buffer - each queue has one */
 358struct gve_tx_fifo {
 359	void *base; /* address of base of FIFO */
 360	u32 size; /* total size */
 361	atomic_t available; /* how much space is still available */
 362	u32 head; /* offset to write at */
 363	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
 364};
 365
 366/* TX descriptor for DQO format */
 367union gve_tx_desc_dqo {
 368	struct gve_tx_pkt_desc_dqo pkt;
 369	struct gve_tx_tso_context_desc_dqo tso_ctx;
 370	struct gve_tx_general_context_desc_dqo general_ctx;
 371};
 372
 373enum gve_packet_state {
 374	/* Packet is in free list, available to be allocated.
 375	 * This should always be zero since state is not explicitly initialized.
 376	 */
 377	GVE_PACKET_STATE_UNALLOCATED,
 378	/* Packet is expecting a regular data completion or miss completion */
 379	GVE_PACKET_STATE_PENDING_DATA_COMPL,
 380	/* Packet has received a miss completion and is expecting a
 381	 * re-injection completion.
 382	 */
 383	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
 384	/* No valid completion received within the specified timeout. */
 385	GVE_PACKET_STATE_TIMED_OUT_COMPL,
 386};
 387
 388struct gve_tx_pending_packet_dqo {
 389	struct sk_buff *skb; /* skb for this packet */
 390
 391	/* 0th element corresponds to the linear portion of `skb`, should be
 392	 * unmapped with `dma_unmap_single`.
 393	 *
 394	 * All others correspond to `skb`'s frags and should be unmapped with
 395	 * `dma_unmap_page`.
 396	 */
 397	union {
 398		struct {
 399			DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
 400			DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
 401		};
 402		s16 tx_qpl_buf_ids[GVE_MAX_TX_BUFS_PER_PKT];
 403	};
 404
 405	u16 num_bufs;
 406
 407	/* Linked list index to next element in the list, or -1 if none */
 408	s16 next;
 409
 410	/* Linked list index to prev element in the list, or -1 if none.
 411	 * Used for tracking either outstanding miss completions or prematurely
 412	 * freed packets.
 413	 */
 414	s16 prev;
 415
 416	/* Identifies the current state of the packet as defined in
 417	 * `enum gve_packet_state`.
 418	 */
 419	u8 state;
 420
 421	/* If packet is an outstanding miss completion, then the packet is
 422	 * freed if the corresponding re-injection completion is not received
 423	 * before kernel jiffies exceeds timeout_jiffies.
 424	 */
 425	unsigned long timeout_jiffies;
 426};
 427
 428/* Contains datapath state used to represent a TX queue. */
 429struct gve_tx_ring {
 430	/* Cacheline 0 -- Accessed & dirtied during transmit */
 431	union {
 432		/* GQI fields */
 433		struct {
 434			struct gve_tx_fifo tx_fifo;
 435			u32 req; /* driver tracked head pointer */
 436			u32 done; /* driver tracked tail pointer */
 437		};
 438
 439		/* DQO fields. */
 440		struct {
 441			/* Linked list of gve_tx_pending_packet_dqo. Index into
 442			 * pending_packets, or -1 if empty.
 443			 *
 444			 * This is a consumer list owned by the TX path. When it
 445			 * runs out, the producer list is stolen from the
 446			 * completion handling path
 447			 * (dqo_compl.free_pending_packets).
 448			 */
 449			s16 free_pending_packets;
 450
 451			/* Cached value of `dqo_compl.hw_tx_head` */
 452			u32 head;
 453			u32 tail; /* Last posted buffer index + 1 */
 454
 455			/* Index of the last descriptor with "report event" bit
 456			 * set.
 457			 */
 458			u32 last_re_idx;
 459
 460			/* free running number of packet buf descriptors posted */
 461			u16 posted_packet_desc_cnt;
 462			/* free running number of packet buf descriptors completed */
 463			u16 completed_packet_desc_cnt;
 464
 465			/* QPL fields */
 466			struct {
 467			       /* Linked list of gve_tx_buf_dqo. Index into
 468				* tx_qpl_buf_next, or -1 if empty.
 469				*
 470				* This is a consumer list owned by the TX path. When it
 471				* runs out, the producer list is stolen from the
 472				* completion handling path
 473				* (dqo_compl.free_tx_qpl_buf_head).
 474				*/
 475				s16 free_tx_qpl_buf_head;
 476
 477			       /* Free running count of the number of QPL tx buffers
 478				* allocated
 479				*/
 480				u32 alloc_tx_qpl_buf_cnt;
 481
 482				/* Cached value of `dqo_compl.free_tx_qpl_buf_cnt` */
 483				u32 free_tx_qpl_buf_cnt;
 484			};
 485		} dqo_tx;
 486	};
 487
 488	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
 489	union {
 490		/* GQI fields */
 491		struct {
 492			/* Spinlock for when cleanup in progress */
 493			spinlock_t clean_lock;
 494			/* Spinlock for XDP tx traffic */
 495			spinlock_t xdp_lock;
 496		};
 497
 498		/* DQO fields. */
 499		struct {
 500			u32 head; /* Last read on compl_desc */
 501
 502			/* Tracks the current gen bit of compl_q */
 503			u8 cur_gen_bit;
 504
 505			/* Linked list of gve_tx_pending_packet_dqo. Index into
 506			 * pending_packets, or -1 if empty.
 507			 *
 508			 * This is the producer list, owned by the completion
 509			 * handling path. When the consumer list
 510			 * (dqo_tx.free_pending_packets) is runs out, this list
 511			 * will be stolen.
 512			 */
 513			atomic_t free_pending_packets;
 514
 515			/* Last TX ring index fetched by HW */
 516			atomic_t hw_tx_head;
 517
 518			/* List to track pending packets which received a miss
 519			 * completion but not a corresponding reinjection.
 520			 */
 521			struct gve_index_list miss_completions;
 522
 523			/* List to track pending packets that were completed
 524			 * before receiving a valid completion because they
 525			 * reached a specified timeout.
 526			 */
 527			struct gve_index_list timed_out_completions;
 528
 529			/* QPL fields */
 530			struct {
 531				/* Linked list of gve_tx_buf_dqo. Index into
 532				 * tx_qpl_buf_next, or -1 if empty.
 533				 *
 534				 * This is the producer list, owned by the completion
 535				 * handling path. When the consumer list
 536				 * (dqo_tx.free_tx_qpl_buf_head) is runs out, this list
 537				 * will be stolen.
 538				 */
 539				atomic_t free_tx_qpl_buf_head;
 540
 541				/* Free running count of the number of tx buffers
 542				 * freed
 543				 */
 544				atomic_t free_tx_qpl_buf_cnt;
 545			};
 546		} dqo_compl;
 547	} ____cacheline_aligned;
 548	u64 pkt_done; /* free-running - total packets completed */
 549	u64 bytes_done; /* free-running - total bytes completed */
 550	u64 dropped_pkt; /* free-running - total packets dropped */
 551	u64 dma_mapping_error; /* count of dma mapping errors */
 552
 553	/* Cacheline 2 -- Read-mostly fields */
 554	union {
 555		/* GQI fields */
 556		struct {
 557			union gve_tx_desc *desc;
 558
 559			/* Maps 1:1 to a desc */
 560			struct gve_tx_buffer_state *info;
 561		};
 562
 563		/* DQO fields. */
 564		struct {
 565			union gve_tx_desc_dqo *tx_ring;
 566			struct gve_tx_compl_desc *compl_ring;
 567
 568			struct gve_tx_pending_packet_dqo *pending_packets;
 569			s16 num_pending_packets;
 570
 571			u32 complq_mask; /* complq size is complq_mask + 1 */
 572
 573			/* QPL fields */
 574			struct {
 575				/* qpl assigned to this queue */
 576				struct gve_queue_page_list *qpl;
 577
 578				/* Each QPL page is divided into TX bounce buffers
 579				 * of size GVE_TX_BUF_SIZE_DQO. tx_qpl_buf_next is
 580				 * an array to manage linked lists of TX buffers.
 581				 * An entry j at index i implies that j'th buffer
 582				 * is next on the list after i
 583				 */
 584				s16 *tx_qpl_buf_next;
 585				u32 num_tx_qpl_bufs;
 586			};
 587		} dqo;
 588	} ____cacheline_aligned;
 589	struct netdev_queue *netdev_txq;
 590	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
 591	struct device *dev;
 592	u32 mask; /* masks req and done down to queue size */
 593	u8 raw_addressing; /* use raw_addressing? */
 594
 595	/* Slow-path fields */
 596	u32 q_num ____cacheline_aligned; /* queue idx */
 597	u32 stop_queue; /* count of queue stops */
 598	u32 wake_queue; /* count of queue wakes */
 599	u32 queue_timeout; /* count of queue timeouts */
 600	u32 ntfy_id; /* notification block index */
 601	u32 last_kick_msec; /* Last time the queue was kicked */
 602	dma_addr_t bus; /* dma address of the descr ring */
 603	dma_addr_t q_resources_bus; /* dma address of the queue resources */
 604	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
 605	struct u64_stats_sync statss; /* sync stats for 32bit archs */
 606	struct xsk_buff_pool *xsk_pool;
 607	u32 xdp_xsk_wakeup;
 608	u32 xdp_xsk_done;
 609	u64 xdp_xsk_sent;
 610	u64 xdp_xmit;
 611	u64 xdp_xmit_errors;
 612} ____cacheline_aligned;
 613
 614/* Wraps the info for one irq including the napi struct and the queues
 615 * associated with that irq.
 616 */
 617struct gve_notify_block {
 618	__be32 *irq_db_index; /* pointer to idx into Bar2 */
 619	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
 620	struct napi_struct napi; /* kernel napi struct for this block */
 621	struct gve_priv *priv;
 622	struct gve_tx_ring *tx; /* tx rings on this block */
 623	struct gve_rx_ring *rx; /* rx rings on this block */
 624	u32 irq;
 625};
 626
 627/* Tracks allowed and current queue settings */
 628struct gve_queue_config {
 629	u16 max_queues;
 630	u16 num_queues; /* current */
 631};
 632
 633/* Tracks the available and used qpl IDs */
 634struct gve_qpl_config {
 635	u32 qpl_map_size; /* map memory size */
 636	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
 637};
 638
 
 
 
 
 
 639struct gve_irq_db {
 640	__be32 index;
 641} ____cacheline_aligned;
 642
 643struct gve_ptype {
 644	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
 645	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
 646};
 647
 648struct gve_ptype_lut {
 649	struct gve_ptype ptypes[GVE_NUM_PTYPES];
 650};
 651
 652/* Parameters for allocating resources for tx queues */
 653struct gve_tx_alloc_rings_cfg {
 654	struct gve_queue_config *qcfg;
 655
 656	u16 ring_size;
 657	u16 start_idx;
 658	u16 num_rings;
 659	bool raw_addressing;
 660
 661	/* Allocated resources are returned here */
 662	struct gve_tx_ring *tx;
 663};
 664
 665/* Parameters for allocating resources for rx queues */
 666struct gve_rx_alloc_rings_cfg {
 667	/* tx config is also needed to determine QPL ids */
 668	struct gve_queue_config *qcfg;
 669	struct gve_queue_config *qcfg_tx;
 670
 671	u16 ring_size;
 672	u16 packet_buffer_size;
 673	bool raw_addressing;
 674	bool enable_header_split;
 675
 676	/* Allocated resources are returned here */
 677	struct gve_rx_ring *rx;
 678};
 679
 680/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
 681 * when the entire configure_device_resources command is zeroed out and the
 682 * queue_format is not specified.
 683 */
 684enum gve_queue_format {
 685	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
 686	GVE_GQI_RDA_FORMAT		= 0x1,
 687	GVE_GQI_QPL_FORMAT		= 0x2,
 688	GVE_DQO_RDA_FORMAT		= 0x3,
 689	GVE_DQO_QPL_FORMAT		= 0x4,
 690};
 691
 692struct gve_flow_spec {
 693	__be32 src_ip[4];
 694	__be32 dst_ip[4];
 695	union {
 696		struct {
 697			__be16 src_port;
 698			__be16 dst_port;
 699		};
 700		__be32 spi;
 701	};
 702	union {
 703		u8 tos;
 704		u8 tclass;
 705	};
 706};
 707
 708struct gve_flow_rule {
 709	u32 location;
 710	u16 flow_type;
 711	u16 action;
 712	struct gve_flow_spec key;
 713	struct gve_flow_spec mask;
 714};
 715
 716struct gve_flow_rules_cache {
 717	bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
 718	struct gve_adminq_queried_flow_rule *rules_cache;
 719	__be32 *rule_ids_cache;
 720	/* The total number of queried rules that stored in the caches */
 721	u32 rules_cache_num;
 722	u32 rule_ids_cache_num;
 723};
 724
 725struct gve_priv {
 726	struct net_device *dev;
 727	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
 728	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
 
 729	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
 730	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
 731	dma_addr_t irq_db_indices_bus;
 732	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
 733	char mgmt_msix_name[IFNAMSIZ + 16];
 734	u32 mgmt_msix_idx;
 735	__be32 *counter_array; /* array of num_event_counters */
 736	dma_addr_t counter_array_bus;
 737
 738	u16 num_event_counters;
 739	u16 tx_desc_cnt; /* num desc per ring */
 740	u16 rx_desc_cnt; /* num desc per ring */
 741	u16 max_tx_desc_cnt;
 742	u16 max_rx_desc_cnt;
 743	u16 min_tx_desc_cnt;
 744	u16 min_rx_desc_cnt;
 745	bool modify_ring_size_enabled;
 746	bool default_min_ring_size;
 747	u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
 748	u64 max_registered_pages;
 749	u64 num_registered_pages; /* num pages registered with NIC */
 750	struct bpf_prog *xdp_prog; /* XDP BPF program */
 751	u32 rx_copybreak; /* copy packets smaller than this */
 752	u16 default_num_queues; /* default num queues to set up */
 753
 754	u16 num_xdp_queues;
 755	struct gve_queue_config tx_cfg;
 756	struct gve_queue_config rx_cfg;
 
 757	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
 758
 759	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
 760	__be32 __iomem *db_bar2; /* "array" of doorbells */
 761	u32 msg_enable;	/* level for netif* netdev print macros	*/
 762	struct pci_dev *pdev;
 763
 764	/* metrics */
 765	u32 tx_timeo_cnt;
 766
 767	/* Admin queue - see gve_adminq.h*/
 768	union gve_adminq_command *adminq;
 769	dma_addr_t adminq_bus_addr;
 770	struct dma_pool *adminq_pool;
 771	struct mutex adminq_lock; /* Protects adminq command execution */
 772	u32 adminq_mask; /* masks prod_cnt to adminq size */
 773	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
 774	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
 775	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
 776	/* free-running count of per AQ cmd executed */
 777	u32 adminq_describe_device_cnt;
 778	u32 adminq_cfg_device_resources_cnt;
 779	u32 adminq_register_page_list_cnt;
 780	u32 adminq_unregister_page_list_cnt;
 781	u32 adminq_create_tx_queue_cnt;
 782	u32 adminq_create_rx_queue_cnt;
 783	u32 adminq_destroy_tx_queue_cnt;
 784	u32 adminq_destroy_rx_queue_cnt;
 785	u32 adminq_dcfg_device_resources_cnt;
 786	u32 adminq_set_driver_parameter_cnt;
 787	u32 adminq_report_stats_cnt;
 788	u32 adminq_report_link_speed_cnt;
 789	u32 adminq_get_ptype_map_cnt;
 790	u32 adminq_verify_driver_compatibility_cnt;
 791	u32 adminq_query_flow_rules_cnt;
 792	u32 adminq_cfg_flow_rule_cnt;
 793	u32 adminq_cfg_rss_cnt;
 794	u32 adminq_query_rss_cnt;
 795
 796	/* Global stats */
 797	u32 interface_up_cnt; /* count of times interface turned up since last reset */
 798	u32 interface_down_cnt; /* count of times interface turned down since last reset */
 799	u32 reset_cnt; /* count of reset */
 800	u32 page_alloc_fail; /* count of page alloc fails */
 801	u32 dma_mapping_error; /* count of dma mapping errors */
 802	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
 803	u32 suspend_cnt; /* count of times suspended */
 804	u32 resume_cnt; /* count of times resumed */
 805	struct workqueue_struct *gve_wq;
 806	struct work_struct service_task;
 807	struct work_struct stats_report_task;
 808	unsigned long service_task_flags;
 809	unsigned long state_flags;
 810
 811	struct gve_stats_report *stats_report;
 812	u64 stats_report_len;
 813	dma_addr_t stats_report_bus; /* dma address for the stats report */
 814	unsigned long ethtool_flags;
 815
 816	unsigned long stats_report_timer_period;
 817	struct timer_list stats_report_timer;
 818
 819	/* Gvnic device link speed from hypervisor. */
 820	u64 link_speed;
 821	bool up_before_suspend; /* True if dev was up before suspend */
 822
 
 823	struct gve_ptype_lut *ptype_lut_dqo;
 824
 825	/* Must be a power of two. */
 826	u16 data_buffer_size_dqo;
 827	u16 max_rx_buffer_size; /* device limit */
 828
 829	enum gve_queue_format queue_format;
 830
 831	/* Interrupt coalescing settings */
 832	u32 tx_coalesce_usecs;
 833	u32 rx_coalesce_usecs;
 834
 835	u16 header_buf_size; /* device configured, header-split supported if non-zero */
 836	bool header_split_enabled; /* True if the header split is enabled by the user */
 837
 838	u32 max_flow_rules;
 839	u32 num_flow_rules;
 840
 841	struct gve_flow_rules_cache flow_rules_cache;
 842
 843	u16 rss_key_size;
 844	u16 rss_lut_size;
 845};
 846
 847enum gve_service_task_flags_bit {
 848	GVE_PRIV_FLAGS_DO_RESET			= 1,
 849	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
 850	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
 851	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
 852};
 853
 854enum gve_state_flags_bit {
 855	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
 856	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
 857	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
 858	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
 859};
 860
 861enum gve_ethtool_flags_bit {
 862	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
 863};
 864
 865static inline bool gve_get_do_reset(struct gve_priv *priv)
 866{
 867	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 868}
 869
 870static inline void gve_set_do_reset(struct gve_priv *priv)
 871{
 872	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 873}
 874
 875static inline void gve_clear_do_reset(struct gve_priv *priv)
 876{
 877	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
 878}
 879
 880static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
 881{
 882	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
 883			&priv->service_task_flags);
 884}
 885
 886static inline void gve_set_reset_in_progress(struct gve_priv *priv)
 887{
 888	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 889}
 890
 891static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
 892{
 893	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
 894}
 895
 896static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
 897{
 898	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
 899			&priv->service_task_flags);
 900}
 901
 902static inline void gve_set_probe_in_progress(struct gve_priv *priv)
 903{
 904	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 905}
 906
 907static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
 908{
 909	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
 910}
 911
 912static inline bool gve_get_do_report_stats(struct gve_priv *priv)
 913{
 914	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
 915			&priv->service_task_flags);
 916}
 917
 918static inline void gve_set_do_report_stats(struct gve_priv *priv)
 919{
 920	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 921}
 922
 923static inline void gve_clear_do_report_stats(struct gve_priv *priv)
 924{
 925	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
 926}
 927
 928static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
 929{
 930	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 931}
 932
 933static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
 934{
 935	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 936}
 937
 938static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
 939{
 940	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
 941}
 942
 943static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
 944{
 945	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 946}
 947
 948static inline void gve_set_device_resources_ok(struct gve_priv *priv)
 949{
 950	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 951}
 952
 953static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
 954{
 955	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
 956}
 957
 958static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
 959{
 960	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 961}
 962
 963static inline void gve_set_device_rings_ok(struct gve_priv *priv)
 964{
 965	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 966}
 967
 968static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
 969{
 970	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
 971}
 972
 973static inline bool gve_get_napi_enabled(struct gve_priv *priv)
 974{
 975	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 976}
 977
 978static inline void gve_set_napi_enabled(struct gve_priv *priv)
 979{
 980	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 981}
 982
 983static inline void gve_clear_napi_enabled(struct gve_priv *priv)
 984{
 985	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
 986}
 987
 988static inline bool gve_get_report_stats(struct gve_priv *priv)
 989{
 990	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 991}
 992
 993static inline void gve_clear_report_stats(struct gve_priv *priv)
 994{
 995	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
 996}
 997
 998/* Returns the address of the ntfy_blocks irq doorbell
 999 */
1000static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
1001					       struct gve_notify_block *block)
1002{
1003	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
1004}
1005
1006/* Returns the index into ntfy_blocks of the given tx ring's block
1007 */
1008static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1009{
1010	return queue_idx;
1011}
1012
1013/* Returns the index into ntfy_blocks of the given rx ring's block
1014 */
1015static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
1016{
1017	return (priv->num_ntfy_blks / 2) + queue_idx;
1018}
1019
1020static inline bool gve_is_qpl(struct gve_priv *priv)
1021{
1022	return priv->queue_format == GVE_GQI_QPL_FORMAT ||
1023		priv->queue_format == GVE_DQO_QPL_FORMAT;
1024}
1025
1026/* Returns the number of tx queue page lists */
1027static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
1028				  int num_xdp_queues,
1029				  bool is_qpl)
1030{
1031	if (!is_qpl)
1032		return 0;
1033	return tx_cfg->num_queues + num_xdp_queues;
 
1034}
1035
1036/* Returns the number of XDP tx queue page lists
1037 */
1038static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
1039{
1040	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
1041		return 0;
1042
1043	return priv->num_xdp_queues;
1044}
1045
1046/* Returns the number of rx queue page lists */
1047static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
1048				  bool is_qpl)
 
1049{
1050	if (!is_qpl)
1051		return 0;
1052	return rx_cfg->num_queues;
1053}
1054
1055static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
1056{
1057	return tx_qid;
1058}
1059
1060static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
1061{
1062	return priv->tx_cfg.max_queues + rx_qid;
1063}
1064
1065static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
 
 
 
1066{
1067	return tx_cfg->max_queues + rx_qid;
1068}
 
1069
1070static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
1071{
1072	return gve_tx_qpl_id(priv, 0);
1073}
1074
1075static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
1076{
1077	return gve_get_rx_qpl_id(tx_cfg, 0);
1078}
1079
1080static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
 
 
1081{
1082	/* For DQO, page count should be more than ring size for
1083	 * out-of-order completions. Set it to two times of ring size.
1084	 */
1085	return 2 * rx_desc_cnt;
1086}
1087
1088/* Returns the correct dma direction for tx and rx qpls */
 
1089static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
1090						      int id)
1091{
1092	if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
1093		return DMA_TO_DEVICE;
1094	else
1095		return DMA_FROM_DEVICE;
1096}
1097
1098static inline bool gve_is_gqi(struct gve_priv *priv)
1099{
1100	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
1101		priv->queue_format == GVE_GQI_QPL_FORMAT;
1102}
1103
1104static inline u32 gve_num_tx_queues(struct gve_priv *priv)
1105{
1106	return priv->tx_cfg.num_queues + priv->num_xdp_queues;
1107}
1108
1109static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
1110{
1111	return priv->tx_cfg.num_queues + queue_id;
1112}
1113
1114static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
1115{
1116	return gve_xdp_tx_queue_id(priv, 0);
1117}
1118
1119static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
1120{
1121	switch (priv->queue_format) {
1122	case GVE_GQI_QPL_FORMAT:
1123		return true;
1124	default:
1125		return false;
1126	}
1127}
1128
1129/* gqi napi handler defined in gve_main.c */
1130int gve_napi_poll(struct napi_struct *napi, int budget);
1131
1132/* buffers */
1133int gve_alloc_page(struct gve_priv *priv, struct device *dev,
1134		   struct page **page, dma_addr_t *dma,
1135		   enum dma_data_direction, gfp_t gfp_flags);
1136void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
1137		   enum dma_data_direction);
1138/* qpls */
1139struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
1140						      u32 id, int pages);
1141void gve_free_queue_page_list(struct gve_priv *priv,
1142			      struct gve_queue_page_list *qpl,
1143			      u32 id);
1144/* tx handling */
1145netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
1146int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
1147		 u32 flags);
1148int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
1149		     void *data, int len, void *frame_p);
1150void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
1151bool gve_tx_poll(struct gve_notify_block *block, int budget);
1152bool gve_xdp_poll(struct gve_notify_block *block, int budget);
1153int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
1154int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
1155			   struct gve_tx_alloc_rings_cfg *cfg);
1156void gve_tx_free_rings_gqi(struct gve_priv *priv,
1157			   struct gve_tx_alloc_rings_cfg *cfg);
1158void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
1159void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
1160u32 gve_tx_load_event_counter(struct gve_priv *priv,
1161			      struct gve_tx_ring *tx);
1162bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
1163/* rx handling */
1164void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
1165int gve_rx_poll(struct gve_notify_block *block, int budget);
1166bool gve_rx_work_pending(struct gve_rx_ring *rx);
1167int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
1168			  struct gve_rx_alloc_rings_cfg *cfg,
1169			  struct gve_rx_ring *rx,
1170			  int idx);
1171void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
1172			  struct gve_rx_alloc_rings_cfg *cfg);
1173int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
1174			   struct gve_rx_alloc_rings_cfg *cfg);
1175void gve_rx_free_rings_gqi(struct gve_priv *priv,
1176			   struct gve_rx_alloc_rings_cfg *cfg);
1177void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
1178void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
1179u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
1180bool gve_header_split_supported(const struct gve_priv *priv);
1181int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
1182/* rx buffer handling */
1183int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
1184void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
1185		       bool free_page);
1186struct gve_rx_buf_state_dqo *gve_alloc_buf_state(struct gve_rx_ring *rx);
1187bool gve_buf_state_is_allocated(struct gve_rx_ring *rx,
1188				struct gve_rx_buf_state_dqo *buf_state);
1189void gve_free_buf_state(struct gve_rx_ring *rx,
1190			struct gve_rx_buf_state_dqo *buf_state);
1191struct gve_rx_buf_state_dqo *gve_dequeue_buf_state(struct gve_rx_ring *rx,
1192						   struct gve_index_list *list);
1193void gve_enqueue_buf_state(struct gve_rx_ring *rx, struct gve_index_list *list,
1194			   struct gve_rx_buf_state_dqo *buf_state);
1195struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx);
1196void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
1197			 struct gve_rx_buf_state_dqo *buf_state);
1198void gve_free_to_page_pool(struct gve_rx_ring *rx,
1199			   struct gve_rx_buf_state_dqo *buf_state,
1200			   bool allow_direct);
1201int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx,
1202			   struct gve_rx_buf_state_dqo *buf_state);
1203void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state);
1204void gve_reuse_buffer(struct gve_rx_ring *rx,
1205		      struct gve_rx_buf_state_dqo *buf_state);
1206void gve_free_buffer(struct gve_rx_ring *rx,
1207		     struct gve_rx_buf_state_dqo *buf_state);
1208int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc);
1209struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv,
1210					  struct gve_rx_ring *rx);
1211
1212/* Reset */
1213void gve_schedule_reset(struct gve_priv *priv);
1214int gve_reset(struct gve_priv *priv, bool attempt_teardown);
1215void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
1216			     struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1217			     struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1218int gve_adjust_config(struct gve_priv *priv,
1219		      struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
1220		      struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
1221int gve_adjust_queues(struct gve_priv *priv,
1222		      struct gve_queue_config new_rx_config,
1223		      struct gve_queue_config new_tx_config);
1224/* flow steering rule */
1225int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1226int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
1227int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1228int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
1229int gve_flow_rules_reset(struct gve_priv *priv);
1230/* report stats handling */
1231void gve_handle_report_stats(struct gve_priv *priv);
1232/* exported by ethtool.c */
1233extern const struct ethtool_ops gve_ethtool_ops;
1234/* needed by ethtool */
1235extern char gve_driver_name[];
1236extern const char gve_version_str[];
1237#endif /* _GVE_H_ */
v6.2
  1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
  2 * Google virtual Ethernet (gve) driver
  3 *
  4 * Copyright (C) 2015-2021 Google, Inc.
  5 */
  6
  7#ifndef _GVE_H_
  8#define _GVE_H_
  9
 10#include <linux/dma-mapping.h>
 
 
 11#include <linux/netdevice.h>
 12#include <linux/pci.h>
 13#include <linux/u64_stats_sync.h>
 
 
 14
 15#include "gve_desc.h"
 16#include "gve_desc_dqo.h"
 17
 18#ifndef PCI_VENDOR_ID_GOOGLE
 19#define PCI_VENDOR_ID_GOOGLE	0x1ae0
 20#endif
 21
 22#define PCI_DEV_ID_GVNIC	0x0042
 23
 24#define GVE_REGISTER_BAR	0
 25#define GVE_DOORBELL_BAR	2
 26
 27/* Driver can alloc up to 2 segments for the header and 2 for the payload. */
 28#define GVE_TX_MAX_IOVEC	4
 29/* 1 for management, 1 for rx, 1 for tx */
 30#define GVE_MIN_MSIX 3
 31
 32/* Numbers of gve tx/rx stats in stats report. */
 33#define GVE_TX_STATS_REPORT_NUM	6
 34#define GVE_RX_STATS_REPORT_NUM	2
 35
 36/* Interval to schedule a stats report update, 20000ms. */
 37#define GVE_STATS_REPORT_TIMER_PERIOD	20000
 38
 39/* Numbers of NIC tx/rx stats in stats report. */
 40#define NIC_TX_STATS_REPORT_NUM	0
 41#define NIC_RX_STATS_REPORT_NUM	4
 42
 
 
 43#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
 44
 45/* PTYPEs are always 10 bits. */
 46#define GVE_NUM_PTYPES	1024
 47
 48#define GVE_RX_BUFFER_SIZE_DQO 2048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
 51struct gve_rx_desc_queue {
 52	struct gve_rx_desc *desc_ring; /* the descriptor ring */
 53	dma_addr_t bus; /* the bus for the desc_ring */
 54	u8 seqno; /* the next expected seqno for this desc*/
 55};
 56
 57/* The page info for a single slot in the RX data queue */
 58struct gve_rx_slot_page_info {
 59	struct page *page;
 60	void *page_address;
 61	u32 page_offset; /* offset to write to in page */
 
 62	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
 63	u16 pad; /* adjustment for rx padding */
 64	u8 can_flip; /* tracks if the networking stack is using the page */
 65};
 66
 67/* A list of pages registered with the device during setup and used by a queue
 68 * as buffers
 69 */
 70struct gve_queue_page_list {
 71	u32 id; /* unique id */
 72	u32 num_entries;
 73	struct page **pages; /* list of num_entries pages */
 74	dma_addr_t *page_buses; /* the dma addrs of the pages */
 75};
 76
 77/* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
 78struct gve_rx_data_queue {
 79	union gve_rx_data_slot *data_ring; /* read by NIC */
 80	dma_addr_t data_bus; /* dma mapping of the slots */
 81	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
 82	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
 83	u8 raw_addressing; /* use raw_addressing? */
 84};
 85
 86struct gve_priv;
 87
 88/* RX buffer queue for posting buffers to HW.
 89 * Each RX (completion) queue has a corresponding buffer queue.
 90 */
 91struct gve_rx_buf_queue_dqo {
 92	struct gve_rx_desc_dqo *desc_ring;
 93	dma_addr_t bus;
 94	u32 head; /* Pointer to start cleaning buffers at. */
 95	u32 tail; /* Last posted buffer index + 1 */
 96	u32 mask; /* Mask for indices to the size of the ring */
 97};
 98
 99/* RX completion queue to receive packets from HW. */
100struct gve_rx_compl_queue_dqo {
101	struct gve_rx_compl_desc_dqo *desc_ring;
102	dma_addr_t bus;
103
104	/* Number of slots which did not have a buffer posted yet. We should not
105	 * post more buffers than the queue size to avoid HW overrunning the
106	 * queue.
107	 */
108	int num_free_slots;
109
110	/* HW uses a "generation bit" to notify SW of new descriptors. When a
111	 * descriptor's generation bit is different from the current generation,
112	 * that descriptor is ready to be consumed by SW.
113	 */
114	u8 cur_gen_bit;
115
116	/* Pointer into desc_ring where the next completion descriptor will be
117	 * received.
118	 */
119	u32 head;
120	u32 mask; /* Mask for indices to the size of the ring */
121};
122
 
 
 
 
 
123/* Stores state for tracking buffers posted to HW */
124struct gve_rx_buf_state_dqo {
125	/* The page posted to HW. */
126	struct gve_rx_slot_page_info page_info;
127
128	/* The DMA address corresponding to `page_info`. */
129	dma_addr_t addr;
130
131	/* Last offset into the page when it only had a single reference, at
132	 * which point every other offset is free to be reused.
133	 */
134	u32 last_single_ref_offset;
135
136	/* Linked list index to next element in the list, or -1 if none */
137	s16 next;
138};
139
140/* `head` and `tail` are indices into an array, or -1 if empty. */
141struct gve_index_list {
142	s16 head;
143	s16 tail;
144};
145
146/* A single received packet split across multiple buffers may be
147 * reconstructed using the information in this structure.
148 */
149struct gve_rx_ctx {
150	/* head and tail of skb chain for the current packet or NULL if none */
151	struct sk_buff *skb_head;
152	struct sk_buff *skb_tail;
153	u32 total_size;
154	u8 frag_cnt;
155	bool drop_pkt;
156};
157
158struct gve_rx_cnts {
159	u32 ok_pkt_bytes;
160	u16 ok_pkt_cnt;
161	u16 total_pkt_cnt;
162	u16 cont_pkt_cnt;
163	u16 desc_err_pkt_cnt;
164};
165
166/* Contains datapath state used to represent an RX queue. */
167struct gve_rx_ring {
168	struct gve_priv *gve;
169	union {
170		/* GQI fields */
171		struct {
172			struct gve_rx_desc_queue desc;
173			struct gve_rx_data_queue data;
174
175			/* threshold for posting new buffs and descs */
176			u32 db_threshold;
177			u16 packet_buffer_size;
178
179			u32 qpl_copy_pool_mask;
180			u32 qpl_copy_pool_head;
181			struct gve_rx_slot_page_info *qpl_copy_pool;
182		};
183
184		/* DQO fields. */
185		struct {
186			struct gve_rx_buf_queue_dqo bufq;
187			struct gve_rx_compl_queue_dqo complq;
188
189			struct gve_rx_buf_state_dqo *buf_states;
190			u16 num_buf_states;
191
192			/* Linked list of gve_rx_buf_state_dqo. Index into
193			 * buf_states, or -1 if empty.
194			 */
195			s16 free_buf_states;
196
197			/* Linked list of gve_rx_buf_state_dqo. Indexes into
198			 * buf_states, or -1 if empty.
199			 *
200			 * This list contains buf_states which are pointing to
201			 * valid buffers.
202			 *
203			 * We use a FIFO here in order to increase the
204			 * probability that buffers can be reused by increasing
205			 * the time between usages.
206			 */
207			struct gve_index_list recycled_buf_states;
208
209			/* Linked list of gve_rx_buf_state_dqo. Indexes into
210			 * buf_states, or -1 if empty.
211			 *
212			 * This list contains buf_states which have buffers
213			 * which cannot be reused yet.
214			 */
215			struct gve_index_list used_buf_states;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216		} dqo;
217	};
218
219	u64 rbytes; /* free-running bytes received */
 
220	u64 rpackets; /* free-running packets received */
221	u32 cnt; /* free-running total number of completed packets */
222	u32 fill_cnt; /* free-running total number of descs and buffs posted */
223	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
 
224	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
225	u64 rx_copied_pkt; /* free-running total number of copied packets */
226	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
227	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
228	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
 
 
229	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
230	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
231	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
232	u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
233
 
 
 
234	u32 q_num; /* queue index */
235	u32 ntfy_id; /* notification block index */
236	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
237	dma_addr_t q_resources_bus; /* dma address for the queue resources */
238	struct u64_stats_sync statss; /* sync stats for 32bit archs */
239
240	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
 
 
 
 
 
 
241};
242
243/* A TX desc ring entry */
244union gve_tx_desc {
245	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
246	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
247	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
248};
249
250/* Tracks the memory in the fifo occupied by a segment of a packet */
251struct gve_tx_iovec {
252	u32 iov_offset; /* offset into this segment */
253	u32 iov_len; /* length */
254	u32 iov_padding; /* padding associated with this segment */
255};
256
257/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
258 * ring entry but only used for a pkt_desc not a seg_desc
259 */
260struct gve_tx_buffer_state {
261	struct sk_buff *skb; /* skb for this pkt */
 
 
 
 
 
 
 
262	union {
263		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
264		struct {
265			DEFINE_DMA_UNMAP_ADDR(dma);
266			DEFINE_DMA_UNMAP_LEN(len);
267		};
268	};
269};
270
271/* A TX buffer - each queue has one */
272struct gve_tx_fifo {
273	void *base; /* address of base of FIFO */
274	u32 size; /* total size */
275	atomic_t available; /* how much space is still available */
276	u32 head; /* offset to write at */
277	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
278};
279
280/* TX descriptor for DQO format */
281union gve_tx_desc_dqo {
282	struct gve_tx_pkt_desc_dqo pkt;
283	struct gve_tx_tso_context_desc_dqo tso_ctx;
284	struct gve_tx_general_context_desc_dqo general_ctx;
285};
286
287enum gve_packet_state {
288	/* Packet is in free list, available to be allocated.
289	 * This should always be zero since state is not explicitly initialized.
290	 */
291	GVE_PACKET_STATE_UNALLOCATED,
292	/* Packet is expecting a regular data completion or miss completion */
293	GVE_PACKET_STATE_PENDING_DATA_COMPL,
294	/* Packet has received a miss completion and is expecting a
295	 * re-injection completion.
296	 */
297	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
298	/* No valid completion received within the specified timeout. */
299	GVE_PACKET_STATE_TIMED_OUT_COMPL,
300};
301
302struct gve_tx_pending_packet_dqo {
303	struct sk_buff *skb; /* skb for this packet */
304
305	/* 0th element corresponds to the linear portion of `skb`, should be
306	 * unmapped with `dma_unmap_single`.
307	 *
308	 * All others correspond to `skb`'s frags and should be unmapped with
309	 * `dma_unmap_page`.
310	 */
311	DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
312	DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
 
 
 
 
 
 
313	u16 num_bufs;
314
315	/* Linked list index to next element in the list, or -1 if none */
316	s16 next;
317
318	/* Linked list index to prev element in the list, or -1 if none.
319	 * Used for tracking either outstanding miss completions or prematurely
320	 * freed packets.
321	 */
322	s16 prev;
323
324	/* Identifies the current state of the packet as defined in
325	 * `enum gve_packet_state`.
326	 */
327	u8 state;
328
329	/* If packet is an outstanding miss completion, then the packet is
330	 * freed if the corresponding re-injection completion is not received
331	 * before kernel jiffies exceeds timeout_jiffies.
332	 */
333	unsigned long timeout_jiffies;
334};
335
336/* Contains datapath state used to represent a TX queue. */
337struct gve_tx_ring {
338	/* Cacheline 0 -- Accessed & dirtied during transmit */
339	union {
340		/* GQI fields */
341		struct {
342			struct gve_tx_fifo tx_fifo;
343			u32 req; /* driver tracked head pointer */
344			u32 done; /* driver tracked tail pointer */
345		};
346
347		/* DQO fields. */
348		struct {
349			/* Linked list of gve_tx_pending_packet_dqo. Index into
350			 * pending_packets, or -1 if empty.
351			 *
352			 * This is a consumer list owned by the TX path. When it
353			 * runs out, the producer list is stolen from the
354			 * completion handling path
355			 * (dqo_compl.free_pending_packets).
356			 */
357			s16 free_pending_packets;
358
359			/* Cached value of `dqo_compl.hw_tx_head` */
360			u32 head;
361			u32 tail; /* Last posted buffer index + 1 */
362
363			/* Index of the last descriptor with "report event" bit
364			 * set.
365			 */
366			u32 last_re_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367		} dqo_tx;
368	};
369
370	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
371	union {
372		/* GQI fields */
373		struct {
374			/* Spinlock for when cleanup in progress */
375			spinlock_t clean_lock;
 
 
376		};
377
378		/* DQO fields. */
379		struct {
380			u32 head; /* Last read on compl_desc */
381
382			/* Tracks the current gen bit of compl_q */
383			u8 cur_gen_bit;
384
385			/* Linked list of gve_tx_pending_packet_dqo. Index into
386			 * pending_packets, or -1 if empty.
387			 *
388			 * This is the producer list, owned by the completion
389			 * handling path. When the consumer list
390			 * (dqo_tx.free_pending_packets) is runs out, this list
391			 * will be stolen.
392			 */
393			atomic_t free_pending_packets;
394
395			/* Last TX ring index fetched by HW */
396			atomic_t hw_tx_head;
397
398			/* List to track pending packets which received a miss
399			 * completion but not a corresponding reinjection.
400			 */
401			struct gve_index_list miss_completions;
402
403			/* List to track pending packets that were completed
404			 * before receiving a valid completion because they
405			 * reached a specified timeout.
406			 */
407			struct gve_index_list timed_out_completions;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408		} dqo_compl;
409	} ____cacheline_aligned;
410	u64 pkt_done; /* free-running - total packets completed */
411	u64 bytes_done; /* free-running - total bytes completed */
412	u64 dropped_pkt; /* free-running - total packets dropped */
413	u64 dma_mapping_error; /* count of dma mapping errors */
414
415	/* Cacheline 2 -- Read-mostly fields */
416	union {
417		/* GQI fields */
418		struct {
419			union gve_tx_desc *desc;
420
421			/* Maps 1:1 to a desc */
422			struct gve_tx_buffer_state *info;
423		};
424
425		/* DQO fields. */
426		struct {
427			union gve_tx_desc_dqo *tx_ring;
428			struct gve_tx_compl_desc *compl_ring;
429
430			struct gve_tx_pending_packet_dqo *pending_packets;
431			s16 num_pending_packets;
432
433			u32 complq_mask; /* complq size is complq_mask + 1 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434		} dqo;
435	} ____cacheline_aligned;
436	struct netdev_queue *netdev_txq;
437	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
438	struct device *dev;
439	u32 mask; /* masks req and done down to queue size */
440	u8 raw_addressing; /* use raw_addressing? */
441
442	/* Slow-path fields */
443	u32 q_num ____cacheline_aligned; /* queue idx */
444	u32 stop_queue; /* count of queue stops */
445	u32 wake_queue; /* count of queue wakes */
446	u32 queue_timeout; /* count of queue timeouts */
447	u32 ntfy_id; /* notification block index */
448	u32 last_kick_msec; /* Last time the queue was kicked */
449	dma_addr_t bus; /* dma address of the descr ring */
450	dma_addr_t q_resources_bus; /* dma address of the queue resources */
451	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
452	struct u64_stats_sync statss; /* sync stats for 32bit archs */
 
 
 
 
 
 
453} ____cacheline_aligned;
454
455/* Wraps the info for one irq including the napi struct and the queues
456 * associated with that irq.
457 */
458struct gve_notify_block {
459	__be32 *irq_db_index; /* pointer to idx into Bar2 */
460	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
461	struct napi_struct napi; /* kernel napi struct for this block */
462	struct gve_priv *priv;
463	struct gve_tx_ring *tx; /* tx rings on this block */
464	struct gve_rx_ring *rx; /* rx rings on this block */
 
465};
466
467/* Tracks allowed and current queue settings */
468struct gve_queue_config {
469	u16 max_queues;
470	u16 num_queues; /* current */
471};
472
473/* Tracks the available and used qpl IDs */
474struct gve_qpl_config {
475	u32 qpl_map_size; /* map memory size */
476	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
477};
478
479struct gve_options_dqo_rda {
480	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
481	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
482};
483
484struct gve_irq_db {
485	__be32 index;
486} ____cacheline_aligned;
487
488struct gve_ptype {
489	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
490	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
491};
492
493struct gve_ptype_lut {
494	struct gve_ptype ptypes[GVE_NUM_PTYPES];
495};
496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497/* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
498 * when the entire configure_device_resources command is zeroed out and the
499 * queue_format is not specified.
500 */
501enum gve_queue_format {
502	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
503	GVE_GQI_RDA_FORMAT		= 0x1,
504	GVE_GQI_QPL_FORMAT		= 0x2,
505	GVE_DQO_RDA_FORMAT		= 0x3,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
506};
507
508struct gve_priv {
509	struct net_device *dev;
510	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
511	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
512	struct gve_queue_page_list *qpls; /* array of num qpls */
513	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
514	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
515	dma_addr_t irq_db_indices_bus;
516	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
517	char mgmt_msix_name[IFNAMSIZ + 16];
518	u32 mgmt_msix_idx;
519	__be32 *counter_array; /* array of num_event_counters */
520	dma_addr_t counter_array_bus;
521
522	u16 num_event_counters;
523	u16 tx_desc_cnt; /* num desc per ring */
524	u16 rx_desc_cnt; /* num desc per ring */
525	u16 tx_pages_per_qpl; /* tx buffer length */
526	u16 rx_data_slot_cnt; /* rx buffer length */
 
 
 
 
 
527	u64 max_registered_pages;
528	u64 num_registered_pages; /* num pages registered with NIC */
 
529	u32 rx_copybreak; /* copy packets smaller than this */
530	u16 default_num_queues; /* default num queues to set up */
531
 
532	struct gve_queue_config tx_cfg;
533	struct gve_queue_config rx_cfg;
534	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
535	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
536
537	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
538	__be32 __iomem *db_bar2; /* "array" of doorbells */
539	u32 msg_enable;	/* level for netif* netdev print macros	*/
540	struct pci_dev *pdev;
541
542	/* metrics */
543	u32 tx_timeo_cnt;
544
545	/* Admin queue - see gve_adminq.h*/
546	union gve_adminq_command *adminq;
547	dma_addr_t adminq_bus_addr;
 
 
548	u32 adminq_mask; /* masks prod_cnt to adminq size */
549	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
550	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
551	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
552	/* free-running count of per AQ cmd executed */
553	u32 adminq_describe_device_cnt;
554	u32 adminq_cfg_device_resources_cnt;
555	u32 adminq_register_page_list_cnt;
556	u32 adminq_unregister_page_list_cnt;
557	u32 adminq_create_tx_queue_cnt;
558	u32 adminq_create_rx_queue_cnt;
559	u32 adminq_destroy_tx_queue_cnt;
560	u32 adminq_destroy_rx_queue_cnt;
561	u32 adminq_dcfg_device_resources_cnt;
562	u32 adminq_set_driver_parameter_cnt;
563	u32 adminq_report_stats_cnt;
564	u32 adminq_report_link_speed_cnt;
565	u32 adminq_get_ptype_map_cnt;
566	u32 adminq_verify_driver_compatibility_cnt;
 
 
 
 
567
568	/* Global stats */
569	u32 interface_up_cnt; /* count of times interface turned up since last reset */
570	u32 interface_down_cnt; /* count of times interface turned down since last reset */
571	u32 reset_cnt; /* count of reset */
572	u32 page_alloc_fail; /* count of page alloc fails */
573	u32 dma_mapping_error; /* count of dma mapping errors */
574	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
575	u32 suspend_cnt; /* count of times suspended */
576	u32 resume_cnt; /* count of times resumed */
577	struct workqueue_struct *gve_wq;
578	struct work_struct service_task;
579	struct work_struct stats_report_task;
580	unsigned long service_task_flags;
581	unsigned long state_flags;
582
583	struct gve_stats_report *stats_report;
584	u64 stats_report_len;
585	dma_addr_t stats_report_bus; /* dma address for the stats report */
586	unsigned long ethtool_flags;
587
588	unsigned long stats_report_timer_period;
589	struct timer_list stats_report_timer;
590
591	/* Gvnic device link speed from hypervisor. */
592	u64 link_speed;
593	bool up_before_suspend; /* True if dev was up before suspend */
594
595	struct gve_options_dqo_rda options_dqo_rda;
596	struct gve_ptype_lut *ptype_lut_dqo;
597
598	/* Must be a power of two. */
599	int data_buffer_size_dqo;
 
600
601	enum gve_queue_format queue_format;
602
603	/* Interrupt coalescing settings */
604	u32 tx_coalesce_usecs;
605	u32 rx_coalesce_usecs;
 
 
 
 
 
 
 
 
 
 
 
606};
607
608enum gve_service_task_flags_bit {
609	GVE_PRIV_FLAGS_DO_RESET			= 1,
610	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
611	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
612	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
613};
614
615enum gve_state_flags_bit {
616	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
617	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
618	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
619	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
620};
621
622enum gve_ethtool_flags_bit {
623	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
624};
625
626static inline bool gve_get_do_reset(struct gve_priv *priv)
627{
628	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
629}
630
631static inline void gve_set_do_reset(struct gve_priv *priv)
632{
633	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
634}
635
636static inline void gve_clear_do_reset(struct gve_priv *priv)
637{
638	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
639}
640
641static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
642{
643	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
644			&priv->service_task_flags);
645}
646
647static inline void gve_set_reset_in_progress(struct gve_priv *priv)
648{
649	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
650}
651
652static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
653{
654	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
655}
656
657static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
658{
659	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
660			&priv->service_task_flags);
661}
662
663static inline void gve_set_probe_in_progress(struct gve_priv *priv)
664{
665	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
666}
667
668static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
669{
670	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
671}
672
673static inline bool gve_get_do_report_stats(struct gve_priv *priv)
674{
675	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
676			&priv->service_task_flags);
677}
678
679static inline void gve_set_do_report_stats(struct gve_priv *priv)
680{
681	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
682}
683
684static inline void gve_clear_do_report_stats(struct gve_priv *priv)
685{
686	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
687}
688
689static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
690{
691	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
692}
693
694static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
695{
696	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
697}
698
699static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
700{
701	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
702}
703
704static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
705{
706	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
707}
708
709static inline void gve_set_device_resources_ok(struct gve_priv *priv)
710{
711	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
712}
713
714static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
715{
716	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
717}
718
719static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
720{
721	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
722}
723
724static inline void gve_set_device_rings_ok(struct gve_priv *priv)
725{
726	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
727}
728
729static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
730{
731	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
732}
733
734static inline bool gve_get_napi_enabled(struct gve_priv *priv)
735{
736	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
737}
738
739static inline void gve_set_napi_enabled(struct gve_priv *priv)
740{
741	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
742}
743
744static inline void gve_clear_napi_enabled(struct gve_priv *priv)
745{
746	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
747}
748
749static inline bool gve_get_report_stats(struct gve_priv *priv)
750{
751	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
752}
753
754static inline void gve_clear_report_stats(struct gve_priv *priv)
755{
756	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
757}
758
759/* Returns the address of the ntfy_blocks irq doorbell
760 */
761static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
762					       struct gve_notify_block *block)
763{
764	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
765}
766
767/* Returns the index into ntfy_blocks of the given tx ring's block
768 */
769static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
770{
771	return queue_idx;
772}
773
774/* Returns the index into ntfy_blocks of the given rx ring's block
775 */
776static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
777{
778	return (priv->num_ntfy_blks / 2) + queue_idx;
779}
780
781/* Returns the number of tx queue page lists
782 */
783static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
 
 
 
 
 
 
 
784{
785	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
786		return 0;
787
788	return priv->tx_cfg.num_queues;
789}
790
791/* Returns the number of rx queue page lists
792 */
793static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
794{
795	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
796		return 0;
797
798	return priv->rx_cfg.num_queues;
799}
800
801/* Returns a pointer to the next available tx qpl in the list of qpls
802 */
803static inline
804struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
805{
806	int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
807				     priv->qpl_cfg.qpl_map_size);
 
 
808
809	/* we are out of tx qpls */
810	if (id >= gve_num_tx_qpls(priv))
811		return NULL;
 
812
813	set_bit(id, priv->qpl_cfg.qpl_id_map);
814	return &priv->qpls[id];
 
815}
816
817/* Returns a pointer to the next available rx qpl in the list of qpls
818 */
819static inline
820struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
821{
822	int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
823				    priv->qpl_cfg.qpl_map_size,
824				    gve_num_tx_qpls(priv));
825
826	/* we are out of rx qpls */
827	if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
828		return NULL;
 
829
830	set_bit(id, priv->qpl_cfg.qpl_id_map);
831	return &priv->qpls[id];
 
832}
833
834/* Unassigns the qpl with the given id
835 */
836static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
837{
838	clear_bit(id, priv->qpl_cfg.qpl_id_map);
 
 
 
839}
840
841/* Returns the correct dma direction for tx and rx qpls
842 */
843static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
844						      int id)
845{
846	if (id < gve_num_tx_qpls(priv))
847		return DMA_TO_DEVICE;
848	else
849		return DMA_FROM_DEVICE;
850}
851
852static inline bool gve_is_gqi(struct gve_priv *priv)
853{
854	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
855		priv->queue_format == GVE_GQI_QPL_FORMAT;
856}
857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
858/* buffers */
859int gve_alloc_page(struct gve_priv *priv, struct device *dev,
860		   struct page **page, dma_addr_t *dma,
861		   enum dma_data_direction, gfp_t gfp_flags);
862void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
863		   enum dma_data_direction);
 
 
 
 
 
 
864/* tx handling */
865netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
 
 
 
 
 
866bool gve_tx_poll(struct gve_notify_block *block, int budget);
867int gve_tx_alloc_rings(struct gve_priv *priv);
868void gve_tx_free_rings_gqi(struct gve_priv *priv);
 
 
 
 
 
 
869u32 gve_tx_load_event_counter(struct gve_priv *priv,
870			      struct gve_tx_ring *tx);
871bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
872/* rx handling */
873void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
874int gve_rx_poll(struct gve_notify_block *block, int budget);
875bool gve_rx_work_pending(struct gve_rx_ring *rx);
876int gve_rx_alloc_rings(struct gve_priv *priv);
877void gve_rx_free_rings_gqi(struct gve_priv *priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
878/* Reset */
879void gve_schedule_reset(struct gve_priv *priv);
880int gve_reset(struct gve_priv *priv, bool attempt_teardown);
 
 
 
 
 
 
881int gve_adjust_queues(struct gve_priv *priv,
882		      struct gve_queue_config new_rx_config,
883		      struct gve_queue_config new_tx_config);
 
 
 
 
 
 
884/* report stats handling */
885void gve_handle_report_stats(struct gve_priv *priv);
886/* exported by ethtool.c */
887extern const struct ethtool_ops gve_ethtool_ops;
888/* needed by ethtool */
 
889extern const char gve_version_str[];
890#endif /* _GVE_H_ */