Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 * Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7#ifndef _GVE_ADMINQ_H
8#define _GVE_ADMINQ_H
9
10#include <linux/build_bug.h>
11
12/* Admin queue opcodes */
13enum gve_adminq_opcodes {
14 GVE_ADMINQ_DESCRIBE_DEVICE = 0x1,
15 GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES = 0x2,
16 GVE_ADMINQ_REGISTER_PAGE_LIST = 0x3,
17 GVE_ADMINQ_UNREGISTER_PAGE_LIST = 0x4,
18 GVE_ADMINQ_CREATE_TX_QUEUE = 0x5,
19 GVE_ADMINQ_CREATE_RX_QUEUE = 0x6,
20 GVE_ADMINQ_DESTROY_TX_QUEUE = 0x7,
21 GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
22 GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
23 GVE_ADMINQ_CONFIGURE_RSS = 0xA,
24 GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
25 GVE_ADMINQ_REPORT_STATS = 0xC,
26 GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
27 GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
28 GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
29 GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
30 GVE_ADMINQ_QUERY_RSS = 0x12,
31
32 /* For commands that are larger than 56 bytes */
33 GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
34};
35
36/* The normal adminq command is restricted to be 56 bytes at maximum. For the
37 * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
38 * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
39 * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
40 */
41enum gve_adminq_extended_cmd_opcodes {
42 GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101,
43};
44
45/* Admin queue status codes */
46enum gve_adminq_statuses {
47 GVE_ADMINQ_COMMAND_UNSET = 0x0,
48 GVE_ADMINQ_COMMAND_PASSED = 0x1,
49 GVE_ADMINQ_COMMAND_ERROR_ABORTED = 0xFFFFFFF0,
50 GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS = 0xFFFFFFF1,
51 GVE_ADMINQ_COMMAND_ERROR_CANCELLED = 0xFFFFFFF2,
52 GVE_ADMINQ_COMMAND_ERROR_DATALOSS = 0xFFFFFFF3,
53 GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED = 0xFFFFFFF4,
54 GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION = 0xFFFFFFF5,
55 GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR = 0xFFFFFFF6,
56 GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT = 0xFFFFFFF7,
57 GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND = 0xFFFFFFF8,
58 GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE = 0xFFFFFFF9,
59 GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED = 0xFFFFFFFA,
60 GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED = 0xFFFFFFFB,
61 GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED = 0xFFFFFFFC,
62 GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE = 0xFFFFFFFD,
63 GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED = 0xFFFFFFFE,
64 GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR = 0xFFFFFFFF,
65};
66
67#define GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION 1
68
69/* All AdminQ command structs should be naturally packed. The static_assert
70 * calls make sure this is the case at compile time.
71 */
72
73struct gve_adminq_describe_device {
74 __be64 device_descriptor_addr;
75 __be32 device_descriptor_version;
76 __be32 available_length;
77};
78
79static_assert(sizeof(struct gve_adminq_describe_device) == 16);
80
81struct gve_device_descriptor {
82 __be64 max_registered_pages;
83 __be16 reserved1;
84 __be16 tx_queue_entries;
85 __be16 rx_queue_entries;
86 __be16 default_num_queues;
87 __be16 mtu;
88 __be16 counters;
89 __be16 tx_pages_per_qpl;
90 __be16 rx_pages_per_qpl;
91 u8 mac[ETH_ALEN];
92 __be16 num_device_options;
93 __be16 total_length;
94 u8 reserved2[6];
95};
96
97static_assert(sizeof(struct gve_device_descriptor) == 40);
98
99struct gve_device_option {
100 __be16 option_id;
101 __be16 option_length;
102 __be32 required_features_mask;
103};
104
105static_assert(sizeof(struct gve_device_option) == 8);
106
107struct gve_device_option_gqi_rda {
108 __be32 supported_features_mask;
109};
110
111static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
112
113struct gve_device_option_gqi_qpl {
114 __be32 supported_features_mask;
115};
116
117static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
118
119struct gve_device_option_dqo_rda {
120 __be32 supported_features_mask;
121 __be32 reserved;
122};
123
124static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
125
126struct gve_device_option_dqo_qpl {
127 __be32 supported_features_mask;
128 __be16 tx_pages_per_qpl;
129 __be16 rx_pages_per_qpl;
130};
131
132static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8);
133
134struct gve_device_option_jumbo_frames {
135 __be32 supported_features_mask;
136 __be16 max_mtu;
137 u8 padding[2];
138};
139
140static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
141
142struct gve_device_option_buffer_sizes {
143 /* GVE_SUP_BUFFER_SIZES_MASK bit should be set */
144 __be32 supported_features_mask;
145 __be16 packet_buffer_size;
146 __be16 header_buffer_size;
147};
148
149static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
150
151struct gve_device_option_modify_ring {
152 __be32 supported_featured_mask;
153 __be16 max_rx_ring_size;
154 __be16 max_tx_ring_size;
155 __be16 min_rx_ring_size;
156 __be16 min_tx_ring_size;
157};
158
159static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
160
161struct gve_device_option_flow_steering {
162 __be32 supported_features_mask;
163 __be32 reserved;
164 __be32 max_flow_rules;
165};
166
167static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
168
169struct gve_device_option_rss_config {
170 __be32 supported_features_mask;
171 __be16 hash_key_size;
172 __be16 hash_lut_size;
173};
174
175static_assert(sizeof(struct gve_device_option_rss_config) == 8);
176
177/* Terminology:
178 *
179 * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
180 * mapped and read/updated by the device.
181 *
182 * QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
183 * the device for read/write and data is copied from/to SKBs.
184 */
185enum gve_dev_opt_id {
186 GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
187 GVE_DEV_OPT_ID_GQI_RDA = 0x2,
188 GVE_DEV_OPT_ID_GQI_QPL = 0x3,
189 GVE_DEV_OPT_ID_DQO_RDA = 0x4,
190 GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
191 GVE_DEV_OPT_ID_DQO_QPL = 0x7,
192 GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
193 GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
194 GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
195 GVE_DEV_OPT_ID_RSS_CONFIG = 0xe,
196};
197
198enum gve_dev_opt_req_feat_mask {
199 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
200 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
201 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
202 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
203 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
204 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
205 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
206 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
207 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
208 GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG = 0x0,
209};
210
211enum gve_sup_feature_mask {
212 GVE_SUP_MODIFY_RING_MASK = 1 << 0,
213 GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
214 GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
215 GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
216 GVE_SUP_RSS_CONFIG_MASK = 1 << 7,
217};
218
219#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
220
221#define GVE_VERSION_STR_LEN 128
222
223enum gve_driver_capbility {
224 gve_driver_capability_gqi_qpl = 0,
225 gve_driver_capability_gqi_rda = 1,
226 gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
227 gve_driver_capability_dqo_rda = 3,
228 gve_driver_capability_alt_miss_compl = 4,
229 gve_driver_capability_flexible_buffer_size = 5,
230 gve_driver_capability_flexible_rss_size = 6,
231};
232
233#define GVE_CAP1(a) BIT((int)a)
234#define GVE_CAP2(a) BIT(((int)a) - 64)
235#define GVE_CAP3(a) BIT(((int)a) - 128)
236#define GVE_CAP4(a) BIT(((int)a) - 192)
237
238#define GVE_DRIVER_CAPABILITY_FLAGS1 \
239 (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
240 GVE_CAP1(gve_driver_capability_gqi_rda) | \
241 GVE_CAP1(gve_driver_capability_dqo_rda) | \
242 GVE_CAP1(gve_driver_capability_alt_miss_compl) | \
243 GVE_CAP1(gve_driver_capability_flexible_buffer_size) | \
244 GVE_CAP1(gve_driver_capability_flexible_rss_size))
245
246#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
247#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
248#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
249
250struct gve_adminq_extended_command {
251 __be32 inner_opcode;
252 __be32 inner_length;
253 __be64 inner_command_addr;
254};
255
256static_assert(sizeof(struct gve_adminq_extended_command) == 16);
257
258struct gve_driver_info {
259 u8 os_type; /* 0x01 = Linux */
260 u8 driver_major;
261 u8 driver_minor;
262 u8 driver_sub;
263 __be32 os_version_major;
264 __be32 os_version_minor;
265 __be32 os_version_sub;
266 __be64 driver_capability_flags[4];
267 u8 os_version_str1[GVE_VERSION_STR_LEN];
268 u8 os_version_str2[GVE_VERSION_STR_LEN];
269};
270
271struct gve_adminq_verify_driver_compatibility {
272 __be64 driver_info_len;
273 __be64 driver_info_addr;
274};
275
276static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
277
278struct gve_adminq_configure_device_resources {
279 __be64 counter_array;
280 __be64 irq_db_addr;
281 __be32 num_counters;
282 __be32 num_irq_dbs;
283 __be32 irq_db_stride;
284 __be32 ntfy_blk_msix_base_idx;
285 u8 queue_format;
286 u8 padding[7];
287};
288
289static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
290
291struct gve_adminq_register_page_list {
292 __be32 page_list_id;
293 __be32 num_pages;
294 __be64 page_address_list_addr;
295 __be64 page_size;
296};
297
298static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
299
300struct gve_adminq_unregister_page_list {
301 __be32 page_list_id;
302};
303
304static_assert(sizeof(struct gve_adminq_unregister_page_list) == 4);
305
306#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
307
308struct gve_adminq_create_tx_queue {
309 __be32 queue_id;
310 __be32 reserved;
311 __be64 queue_resources_addr;
312 __be64 tx_ring_addr;
313 __be32 queue_page_list_id;
314 __be32 ntfy_id;
315 __be64 tx_comp_ring_addr;
316 __be16 tx_ring_size;
317 __be16 tx_comp_ring_size;
318 u8 padding[4];
319};
320
321static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
322
323struct gve_adminq_create_rx_queue {
324 __be32 queue_id;
325 __be32 index;
326 __be32 reserved;
327 __be32 ntfy_id;
328 __be64 queue_resources_addr;
329 __be64 rx_desc_ring_addr;
330 __be64 rx_data_ring_addr;
331 __be32 queue_page_list_id;
332 __be16 rx_ring_size;
333 __be16 packet_buffer_size;
334 __be16 rx_buff_ring_size;
335 u8 enable_rsc;
336 u8 padding1;
337 __be16 header_buffer_size;
338 u8 padding2[2];
339};
340
341static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
342
343/* Queue resources that are shared with the device */
344struct gve_queue_resources {
345 union {
346 struct {
347 __be32 db_index; /* Device -> Guest */
348 __be32 counter_index; /* Device -> Guest */
349 };
350 u8 reserved[64];
351 };
352};
353
354static_assert(sizeof(struct gve_queue_resources) == 64);
355
356struct gve_adminq_destroy_tx_queue {
357 __be32 queue_id;
358};
359
360static_assert(sizeof(struct gve_adminq_destroy_tx_queue) == 4);
361
362struct gve_adminq_destroy_rx_queue {
363 __be32 queue_id;
364};
365
366static_assert(sizeof(struct gve_adminq_destroy_rx_queue) == 4);
367
368/* GVE Set Driver Parameter Types */
369enum gve_set_driver_param_types {
370 GVE_SET_PARAM_MTU = 0x1,
371};
372
373struct gve_adminq_set_driver_parameter {
374 __be32 parameter_type;
375 u8 reserved[4];
376 __be64 parameter_value;
377};
378
379static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
380
381struct gve_adminq_report_stats {
382 __be64 stats_report_len;
383 __be64 stats_report_addr;
384 __be64 interval;
385};
386
387static_assert(sizeof(struct gve_adminq_report_stats) == 24);
388
389struct gve_adminq_report_link_speed {
390 __be64 link_speed_address;
391};
392
393static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
394
395struct stats {
396 __be32 stat_name;
397 __be32 queue_id;
398 __be64 value;
399};
400
401static_assert(sizeof(struct stats) == 16);
402
403struct gve_stats_report {
404 __be64 written_count;
405 struct stats stats[];
406};
407
408static_assert(sizeof(struct gve_stats_report) == 8);
409
410enum gve_stat_names {
411 // stats from gve
412 TX_WAKE_CNT = 1,
413 TX_STOP_CNT = 2,
414 TX_FRAMES_SENT = 3,
415 TX_BYTES_SENT = 4,
416 TX_LAST_COMPLETION_PROCESSED = 5,
417 RX_NEXT_EXPECTED_SEQUENCE = 6,
418 RX_BUFFERS_POSTED = 7,
419 TX_TIMEOUT_CNT = 8,
420 // stats from NIC
421 RX_QUEUE_DROP_CNT = 65,
422 RX_NO_BUFFERS_POSTED = 66,
423 RX_DROPS_PACKET_OVER_MRU = 67,
424 RX_DROPS_INVALID_CHECKSUM = 68,
425};
426
427enum gve_l3_type {
428 /* Must be zero so zero initialized LUT is unknown. */
429 GVE_L3_TYPE_UNKNOWN = 0,
430 GVE_L3_TYPE_OTHER,
431 GVE_L3_TYPE_IPV4,
432 GVE_L3_TYPE_IPV6,
433};
434
435enum gve_l4_type {
436 /* Must be zero so zero initialized LUT is unknown. */
437 GVE_L4_TYPE_UNKNOWN = 0,
438 GVE_L4_TYPE_OTHER,
439 GVE_L4_TYPE_TCP,
440 GVE_L4_TYPE_UDP,
441 GVE_L4_TYPE_ICMP,
442 GVE_L4_TYPE_SCTP,
443};
444
445/* These are control path types for PTYPE which are the same as the data path
446 * types.
447 */
448struct gve_ptype_entry {
449 u8 l3_type;
450 u8 l4_type;
451};
452
453struct gve_ptype_map {
454 struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
455};
456
457struct gve_adminq_get_ptype_map {
458 __be64 ptype_map_len;
459 __be64 ptype_map_addr;
460};
461
462/* Flow-steering related definitions */
463enum gve_adminq_flow_rule_cfg_opcode {
464 GVE_FLOW_RULE_CFG_ADD = 0,
465 GVE_FLOW_RULE_CFG_DEL = 1,
466 GVE_FLOW_RULE_CFG_RESET = 2,
467};
468
469enum gve_adminq_flow_rule_query_opcode {
470 GVE_FLOW_RULE_QUERY_RULES = 0,
471 GVE_FLOW_RULE_QUERY_IDS = 1,
472 GVE_FLOW_RULE_QUERY_STATS = 2,
473};
474
475enum gve_adminq_flow_type {
476 GVE_FLOW_TYPE_TCPV4,
477 GVE_FLOW_TYPE_UDPV4,
478 GVE_FLOW_TYPE_SCTPV4,
479 GVE_FLOW_TYPE_AHV4,
480 GVE_FLOW_TYPE_ESPV4,
481 GVE_FLOW_TYPE_TCPV6,
482 GVE_FLOW_TYPE_UDPV6,
483 GVE_FLOW_TYPE_SCTPV6,
484 GVE_FLOW_TYPE_AHV6,
485 GVE_FLOW_TYPE_ESPV6,
486};
487
488/* Flow-steering command */
489struct gve_adminq_flow_rule {
490 __be16 flow_type;
491 __be16 action; /* RX queue id */
492 struct gve_flow_spec key;
493 struct gve_flow_spec mask;
494};
495
496struct gve_adminq_configure_flow_rule {
497 __be16 opcode;
498 u8 padding[2];
499 struct gve_adminq_flow_rule rule;
500 __be32 location;
501};
502
503static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
504
505struct gve_query_flow_rules_descriptor {
506 __be32 num_flow_rules;
507 __be32 max_flow_rules;
508 __be32 num_queried_rules;
509 __be32 total_length;
510};
511
512struct gve_adminq_queried_flow_rule {
513 __be32 location;
514 struct gve_adminq_flow_rule flow_rule;
515};
516
517struct gve_adminq_query_flow_rules {
518 __be16 opcode;
519 u8 padding[2];
520 __be32 starting_rule_id;
521 __be64 available_length; /* The dma memory length that the driver allocated */
522 __be64 rule_descriptor_addr; /* The dma memory address */
523};
524
525static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
526
527enum gve_rss_hash_type {
528 GVE_RSS_HASH_IPV4,
529 GVE_RSS_HASH_TCPV4,
530 GVE_RSS_HASH_IPV6,
531 GVE_RSS_HASH_IPV6_EX,
532 GVE_RSS_HASH_TCPV6,
533 GVE_RSS_HASH_TCPV6_EX,
534 GVE_RSS_HASH_UDPV4,
535 GVE_RSS_HASH_UDPV6,
536 GVE_RSS_HASH_UDPV6_EX,
537};
538
539struct gve_adminq_configure_rss {
540 __be16 hash_types;
541 u8 hash_alg;
542 u8 reserved;
543 __be16 hash_key_size;
544 __be16 hash_lut_size;
545 __be64 hash_key_addr;
546 __be64 hash_lut_addr;
547};
548
549static_assert(sizeof(struct gve_adminq_configure_rss) == 24);
550
551struct gve_query_rss_descriptor {
552 __be32 total_length;
553 __be16 hash_types;
554 u8 hash_alg;
555 u8 reserved;
556};
557
558struct gve_adminq_query_rss {
559 __be64 available_length;
560 __be64 rss_descriptor_addr;
561};
562
563static_assert(sizeof(struct gve_adminq_query_rss) == 16);
564
565union gve_adminq_command {
566 struct {
567 __be32 opcode;
568 __be32 status;
569 union {
570 struct gve_adminq_configure_device_resources
571 configure_device_resources;
572 struct gve_adminq_create_tx_queue create_tx_queue;
573 struct gve_adminq_create_rx_queue create_rx_queue;
574 struct gve_adminq_destroy_tx_queue destroy_tx_queue;
575 struct gve_adminq_destroy_rx_queue destroy_rx_queue;
576 struct gve_adminq_describe_device describe_device;
577 struct gve_adminq_register_page_list reg_page_list;
578 struct gve_adminq_unregister_page_list unreg_page_list;
579 struct gve_adminq_set_driver_parameter set_driver_param;
580 struct gve_adminq_report_stats report_stats;
581 struct gve_adminq_report_link_speed report_link_speed;
582 struct gve_adminq_get_ptype_map get_ptype_map;
583 struct gve_adminq_verify_driver_compatibility
584 verify_driver_compatibility;
585 struct gve_adminq_query_flow_rules query_flow_rules;
586 struct gve_adminq_configure_rss configure_rss;
587 struct gve_adminq_query_rss query_rss;
588 struct gve_adminq_extended_command extended_command;
589 };
590 };
591 u8 reserved[64];
592};
593
594static_assert(sizeof(union gve_adminq_command) == 64);
595
596int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
597void gve_adminq_free(struct device *dev, struct gve_priv *priv);
598void gve_adminq_release(struct gve_priv *priv);
599int gve_adminq_describe_device(struct gve_priv *priv);
600int gve_adminq_configure_device_resources(struct gve_priv *priv,
601 dma_addr_t counter_array_bus_addr,
602 u32 num_counters,
603 dma_addr_t db_array_bus_addr,
604 u32 num_ntfy_blks);
605int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
606int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
607int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
608int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
609int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
610int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
611int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
612int gve_adminq_register_page_list(struct gve_priv *priv,
613 struct gve_queue_page_list *qpl);
614int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
615int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
616int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
617 dma_addr_t stats_report_addr, u64 interval);
618int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
619 u64 driver_info_len,
620 dma_addr_t driver_info_addr);
621int gve_adminq_report_link_speed(struct gve_priv *priv);
622int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
623int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
624int gve_adminq_reset_flow_rules(struct gve_priv *priv);
625int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
626int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
627int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh);
628
629struct gve_ptype_lut;
630int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
631 struct gve_ptype_lut *ptype_lut);
632
633#endif /* _GVE_ADMINQ_H */