Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2/* Copyright (c) 2021, Microsoft Corporation. */
  3
  4#ifndef _MANA_H
  5#define _MANA_H
  6
  7#include <net/xdp.h>
  8
  9#include "gdma.h"
 10#include "hw_channel.h"
 11
 12/* Microsoft Azure Network Adapter (MANA)'s definitions
 13 *
 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
 15 * them are naturally aligned and hence don't need __packed.
 16 */
 17
 18/* MANA protocol version */
 19#define MANA_MAJOR_VERSION	0
 20#define MANA_MINOR_VERSION	1
 21#define MANA_MICRO_VERSION	1
 22
 23typedef u64 mana_handle_t;
 24#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
 25
 26enum TRI_STATE {
 27	TRI_STATE_UNKNOWN = -1,
 28	TRI_STATE_FALSE = 0,
 29	TRI_STATE_TRUE = 1
 30};
 31
 32/* Number of entries for hardware indirection table must be in power of 2 */
 33#define MANA_INDIRECT_TABLE_MAX_SIZE 512
 34#define MANA_INDIRECT_TABLE_DEF_SIZE 64
 35
 36/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
 37#define MANA_HASH_KEY_SIZE 40
 38
 39#define COMP_ENTRY_SIZE 64
 40
 41/* This Max value for RX buffers is derived from __alloc_page()'s max page
 42 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
 43 * size beyond this value gets rejected by __alloc_page() call.
 44 */
 45#define MAX_RX_BUFFERS_PER_QUEUE 8192
 46#define DEF_RX_BUFFERS_PER_QUEUE 1024
 47#define MIN_RX_BUFFERS_PER_QUEUE 128
 48
 49/* This max value for TX buffers is derived as the maximum allocatable
 50 * pages supported on host per guest through testing. TX buffer size beyond
 51 * this value is rejected by the hardware.
 52 */
 53#define MAX_TX_BUFFERS_PER_QUEUE 16384
 54#define DEF_TX_BUFFERS_PER_QUEUE 256
 55#define MIN_TX_BUFFERS_PER_QUEUE 128
 56
 57#define EQ_SIZE (8 * MANA_PAGE_SIZE)
 58
 
 59#define LOG2_EQ_THROTTLE 3
 60
 61#define MAX_PORTS_IN_MANA_DEV 256
 62
 63/* Update this count whenever the respective structures are changed */
 64#define MANA_STATS_RX_COUNT 5
 65#define MANA_STATS_TX_COUNT 11
 66
 67struct mana_stats_rx {
 68	u64 packets;
 69	u64 bytes;
 70	u64 xdp_drop;
 71	u64 xdp_tx;
 72	u64 xdp_redirect;
 73	struct u64_stats_sync syncp;
 74};
 75
 76struct mana_stats_tx {
 77	u64 packets;
 78	u64 bytes;
 79	u64 xdp_xmit;
 80	u64 tso_packets;
 81	u64 tso_bytes;
 82	u64 tso_inner_packets;
 83	u64 tso_inner_bytes;
 84	u64 short_pkt_fmt;
 85	u64 long_pkt_fmt;
 86	u64 csum_partial;
 87	u64 mana_map_err;
 88	struct u64_stats_sync syncp;
 89};
 90
 91struct mana_txq {
 92	struct gdma_queue *gdma_sq;
 93
 94	union {
 95		u32 gdma_txq_id;
 96		struct {
 97			u32 reserved1	: 10;
 98			u32 vsq_frame	: 14;
 99			u32 reserved2	: 8;
100		};
101	};
102
103	u16 vp_offset;
104
105	struct net_device *ndev;
106
107	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
108	struct sk_buff_head pending_skbs;
109	struct netdev_queue *net_txq;
110
111	atomic_t pending_sends;
112
113	bool napi_initialized;
114
115	struct mana_stats_tx stats;
116};
117
118/* skb data and frags dma mappings */
119struct mana_skb_head {
120	/* GSO pkts may have 2 SGEs for the linear part*/
121	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
122
123	u32 size[MAX_SKB_FRAGS + 2];
124};
125
126#define MANA_HEADROOM sizeof(struct mana_skb_head)
127
128enum mana_tx_pkt_format {
129	MANA_SHORT_PKT_FMT	= 0,
130	MANA_LONG_PKT_FMT	= 1,
131};
132
133struct mana_tx_short_oob {
134	u32 pkt_fmt		: 2;
135	u32 is_outer_ipv4	: 1;
136	u32 is_outer_ipv6	: 1;
137	u32 comp_iphdr_csum	: 1;
138	u32 comp_tcp_csum	: 1;
139	u32 comp_udp_csum	: 1;
140	u32 supress_txcqe_gen	: 1;
141	u32 vcq_num		: 24;
142
143	u32 trans_off		: 10; /* Transport header offset */
144	u32 vsq_frame		: 14;
145	u32 short_vp_offset	: 8;
146}; /* HW DATA */
147
148struct mana_tx_long_oob {
149	u32 is_encap		: 1;
150	u32 inner_is_ipv6	: 1;
151	u32 inner_tcp_opt	: 1;
152	u32 inject_vlan_pri_tag : 1;
153	u32 reserved1		: 12;
154	u32 pcp			: 3;  /* 802.1Q */
155	u32 dei			: 1;  /* 802.1Q */
156	u32 vlan_id		: 12; /* 802.1Q */
157
158	u32 inner_frame_offset	: 10;
159	u32 inner_ip_rel_offset : 6;
160	u32 long_vp_offset	: 12;
161	u32 reserved2		: 4;
162
163	u32 reserved3;
164	u32 reserved4;
165}; /* HW DATA */
166
167struct mana_tx_oob {
168	struct mana_tx_short_oob s_oob;
169	struct mana_tx_long_oob l_oob;
170}; /* HW DATA */
171
172enum mana_cq_type {
173	MANA_CQ_TYPE_RX,
174	MANA_CQ_TYPE_TX,
175};
176
177enum mana_cqe_type {
178	CQE_INVALID			= 0,
179	CQE_RX_OKAY			= 1,
180	CQE_RX_COALESCED_4		= 2,
181	CQE_RX_OBJECT_FENCE		= 3,
182	CQE_RX_TRUNCATED		= 4,
183
184	CQE_TX_OKAY			= 32,
185	CQE_TX_SA_DROP			= 33,
186	CQE_TX_MTU_DROP			= 34,
187	CQE_TX_INVALID_OOB		= 35,
188	CQE_TX_INVALID_ETH_TYPE		= 36,
189	CQE_TX_HDR_PROCESSING_ERROR	= 37,
190	CQE_TX_VF_DISABLED		= 38,
191	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
192	CQE_TX_VPORT_DISABLED		= 40,
193	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
194};
195
196#define MANA_CQE_COMPLETION 1
197
198struct mana_cqe_header {
199	u32 cqe_type	: 6;
200	u32 client_type	: 2;
201	u32 vendor_err	: 24;
202}; /* HW DATA */
203
204/* NDIS HASH Types */
205#define NDIS_HASH_IPV4		BIT(0)
206#define NDIS_HASH_TCP_IPV4	BIT(1)
207#define NDIS_HASH_UDP_IPV4	BIT(2)
208#define NDIS_HASH_IPV6		BIT(3)
209#define NDIS_HASH_TCP_IPV6	BIT(4)
210#define NDIS_HASH_UDP_IPV6	BIT(5)
211#define NDIS_HASH_IPV6_EX	BIT(6)
212#define NDIS_HASH_TCP_IPV6_EX	BIT(7)
213#define NDIS_HASH_UDP_IPV6_EX	BIT(8)
214
215#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
216#define MANA_HASH_L4                                                         \
217	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
218	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
219
220struct mana_rxcomp_perpkt_info {
221	u32 pkt_len	: 16;
222	u32 reserved1	: 16;
223	u32 reserved2;
224	u32 pkt_hash;
225}; /* HW DATA */
226
227#define MANA_RXCOMP_OOB_NUM_PPI 4
228
229/* Receive completion OOB */
230struct mana_rxcomp_oob {
231	struct mana_cqe_header cqe_hdr;
232
233	u32 rx_vlan_id			: 12;
234	u32 rx_vlantag_present		: 1;
235	u32 rx_outer_iphdr_csum_succeed	: 1;
236	u32 rx_outer_iphdr_csum_fail	: 1;
237	u32 reserved1			: 1;
238	u32 rx_hashtype			: 9;
239	u32 rx_iphdr_csum_succeed	: 1;
240	u32 rx_iphdr_csum_fail		: 1;
241	u32 rx_tcp_csum_succeed		: 1;
242	u32 rx_tcp_csum_fail		: 1;
243	u32 rx_udp_csum_succeed		: 1;
244	u32 rx_udp_csum_fail		: 1;
245	u32 reserved2			: 1;
246
247	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
248
249	u32 rx_wqe_offset;
250}; /* HW DATA */
251
252struct mana_tx_comp_oob {
253	struct mana_cqe_header cqe_hdr;
254
255	u32 tx_data_offset;
256
257	u32 tx_sgl_offset	: 5;
258	u32 tx_wqe_offset	: 27;
259
260	u32 reserved[12];
261}; /* HW DATA */
262
263struct mana_rxq;
264
265#define CQE_POLLING_BUFFER 512
266
267struct mana_cq {
268	struct gdma_queue *gdma_cq;
269
270	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
271	u32 gdma_id;
272
273	/* Type of the CQ: TX or RX */
274	enum mana_cq_type type;
275
276	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
277	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
278	 */
279	struct mana_rxq *rxq;
280
281	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
282	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
283	 */
284	struct mana_txq *txq;
285
286	/* Buffer which the CQ handler can copy the CQE's into. */
287	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
288
289	/* NAPI data */
290	struct napi_struct napi;
291	int work_done;
292	int work_done_since_doorbell;
293	int budget;
294};
295
296struct mana_recv_buf_oob {
297	/* A valid GDMA work request representing the data buffer. */
298	struct gdma_wqe_request wqe_req;
299
300	void *buf_va;
301	bool from_pool; /* allocated from a page pool */
302
303	/* SGL of the buffer going to be sent as part of the work request. */
304	u32 num_sge;
305	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
306
307	/* Required to store the result of mana_gd_post_work_request.
308	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
309	 * work queue when the WQE is consumed.
310	 */
311	struct gdma_posted_wqe_info wqe_inf;
312};
313
314#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
315			+ ETH_HLEN)
316
317#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
318
319struct mana_rxq {
320	struct gdma_queue *gdma_rq;
321	/* Cache the gdma receive queue id */
322	u32 gdma_id;
323
324	/* Index of RQ in the vPort, not gdma receive queue id */
325	u32 rxq_idx;
326
327	u32 datasize;
328	u32 alloc_size;
329	u32 headroom;
330
331	mana_handle_t rxobj;
332
333	struct mana_cq rx_cq;
334
335	struct completion fence_event;
336
337	struct net_device *ndev;
338
339	/* Total number of receive buffers to be allocated */
340	u32 num_rx_buf;
341
342	u32 buf_index;
343
344	struct mana_stats_rx stats;
345
346	struct bpf_prog __rcu *bpf_prog;
347	struct xdp_rxq_info xdp_rxq;
348	void *xdp_save_va; /* for reusing */
349	bool xdp_flush;
350	int xdp_rc; /* XDP redirect return code */
351
352	struct page_pool *page_pool;
353	struct dentry *mana_rx_debugfs;
354
355	/* MUST BE THE LAST MEMBER:
356	 * Each receive buffer has an associated mana_recv_buf_oob.
357	 */
358	struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
359};
360
361struct mana_tx_qp {
362	struct mana_txq txq;
363
364	struct mana_cq tx_cq;
365
366	mana_handle_t tx_object;
367
368	struct dentry *mana_tx_debugfs;
369};
370
371struct mana_ethtool_stats {
372	u64 stop_queue;
373	u64 wake_queue;
374	u64 hc_rx_discards_no_wqe;
375	u64 hc_rx_err_vport_disabled;
376	u64 hc_rx_bytes;
377	u64 hc_rx_ucast_pkts;
378	u64 hc_rx_ucast_bytes;
379	u64 hc_rx_bcast_pkts;
380	u64 hc_rx_bcast_bytes;
381	u64 hc_rx_mcast_pkts;
382	u64 hc_rx_mcast_bytes;
383	u64 hc_tx_err_gf_disabled;
384	u64 hc_tx_err_vport_disabled;
385	u64 hc_tx_err_inval_vportoffset_pkt;
386	u64 hc_tx_err_vlan_enforcement;
387	u64 hc_tx_err_eth_type_enforcement;
388	u64 hc_tx_err_sa_enforcement;
389	u64 hc_tx_err_sqpdid_enforcement;
390	u64 hc_tx_err_cqpdid_enforcement;
391	u64 hc_tx_err_mtu_violation;
392	u64 hc_tx_err_inval_oob;
393	u64 hc_tx_bytes;
394	u64 hc_tx_ucast_pkts;
395	u64 hc_tx_ucast_bytes;
396	u64 hc_tx_bcast_pkts;
397	u64 hc_tx_bcast_bytes;
398	u64 hc_tx_mcast_pkts;
399	u64 hc_tx_mcast_bytes;
400	u64 hc_tx_err_gdma;
401	u64 tx_cqe_err;
402	u64 tx_cqe_unknown_type;
403	u64 rx_coalesced_err;
404	u64 rx_cqe_unknown_type;
405};
406
407struct mana_context {
408	struct gdma_dev *gdma_dev;
409
410	u16 num_ports;
411
412	struct mana_eq *eqs;
413	struct dentry *mana_eqs_debugfs;
414
415	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
416};
417
418struct mana_port_context {
419	struct mana_context *ac;
420	struct net_device *ndev;
421
422	u8 mac_addr[ETH_ALEN];
423
424	enum TRI_STATE rss_state;
425
426	mana_handle_t default_rxobj;
427	bool tx_shortform_allowed;
428	u16 tx_vp_offset;
429
430	struct mana_tx_qp *tx_qp;
431
432	/* Indirection Table for RX & TX. The values are queue indexes */
433	u32 *indir_table;
434	u32 indir_table_sz;
435
436	/* Indirection table containing RxObject Handles */
437	mana_handle_t *rxobj_table;
438
439	/*  Hash key used by the NIC */
440	u8 hashkey[MANA_HASH_KEY_SIZE];
441
442	/* This points to an array of num_queues of RQ pointers. */
443	struct mana_rxq **rxqs;
444
445	/* pre-allocated rx buffer array */
446	void **rxbufs_pre;
447	dma_addr_t *das_pre;
448	int rxbpre_total;
449	u32 rxbpre_datasize;
450	u32 rxbpre_alloc_size;
451	u32 rxbpre_headroom;
452
453	struct bpf_prog *bpf_prog;
454
455	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
456	unsigned int max_queues;
457	unsigned int num_queues;
458
459	unsigned int rx_queue_size;
460	unsigned int tx_queue_size;
461
462	mana_handle_t port_handle;
463	mana_handle_t pf_filter_handle;
464
465	/* Mutex for sharing access to vport_use_count */
466	struct mutex vport_mutex;
467	int vport_use_count;
468
469	u16 port_idx;
470
471	bool port_is_up;
472	bool port_st_save; /* Saved port state */
473
474	struct mana_ethtool_stats eth_stats;
475
476	/* Debugfs */
477	struct dentry *mana_port_debugfs;
478};
479
480netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
481int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
482		    bool update_hash, bool update_tab);
483
484int mana_alloc_queues(struct net_device *ndev);
485int mana_attach(struct net_device *ndev);
486int mana_detach(struct net_device *ndev, bool from_close);
487
488int mana_probe(struct gdma_dev *gd, bool resuming);
489void mana_remove(struct gdma_dev *gd, bool suspending);
490
491void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
492int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
493		  u32 flags);
494u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
495		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
496struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
497void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
498int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
499void mana_query_gf_stats(struct mana_port_context *apc);
500int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
501void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
502
503extern const struct ethtool_ops mana_ethtool_ops;
504extern struct dentry *mana_debugfs_root;
505
506/* A CQ can be created not associated with any EQ */
507#define GDMA_CQ_NO_EQ  0xffff
508
509struct mana_obj_spec {
510	u32 queue_index;
511	u64 gdma_region;
512	u32 queue_size;
513	u32 attached_eq;
514	u32 modr_ctx_id;
515};
516
517enum mana_command_code {
518	MANA_QUERY_DEV_CONFIG	= 0x20001,
519	MANA_QUERY_GF_STAT	= 0x20002,
520	MANA_CONFIG_VPORT_TX	= 0x20003,
521	MANA_CREATE_WQ_OBJ	= 0x20004,
522	MANA_DESTROY_WQ_OBJ	= 0x20005,
523	MANA_FENCE_RQ		= 0x20006,
524	MANA_CONFIG_VPORT_RX	= 0x20007,
525	MANA_QUERY_VPORT_CONFIG	= 0x20008,
526
527	/* Privileged commands for the PF mode */
528	MANA_REGISTER_FILTER	= 0x28000,
529	MANA_DEREGISTER_FILTER	= 0x28001,
530	MANA_REGISTER_HW_PORT	= 0x28003,
531	MANA_DEREGISTER_HW_PORT	= 0x28004,
532};
533
534/* Query Device Configuration */
535struct mana_query_device_cfg_req {
536	struct gdma_req_hdr hdr;
537
538	/* MANA Nic Driver Capability flags */
539	u64 mn_drv_cap_flags1;
540	u64 mn_drv_cap_flags2;
541	u64 mn_drv_cap_flags3;
542	u64 mn_drv_cap_flags4;
543
544	u32 proto_major_ver;
545	u32 proto_minor_ver;
546	u32 proto_micro_ver;
547
548	u32 reserved;
549}; /* HW DATA */
550
551struct mana_query_device_cfg_resp {
552	struct gdma_resp_hdr hdr;
553
554	u64 pf_cap_flags1;
555	u64 pf_cap_flags2;
556	u64 pf_cap_flags3;
557	u64 pf_cap_flags4;
558
559	u16 max_num_vports;
560	u16 reserved;
561	u32 max_num_eqs;
562
563	/* response v2: */
564	u16 adapter_mtu;
565	u16 reserved2;
566	u32 reserved3;
567}; /* HW DATA */
568
569/* Query vPort Configuration */
570struct mana_query_vport_cfg_req {
571	struct gdma_req_hdr hdr;
572	u32 vport_index;
573}; /* HW DATA */
574
575struct mana_query_vport_cfg_resp {
576	struct gdma_resp_hdr hdr;
577	u32 max_num_sq;
578	u32 max_num_rq;
579	u32 num_indirection_ent;
580	u32 reserved1;
581	u8 mac_addr[6];
582	u8 reserved2[2];
583	mana_handle_t vport;
584}; /* HW DATA */
585
586/* Configure vPort */
587struct mana_config_vport_req {
588	struct gdma_req_hdr hdr;
589	mana_handle_t vport;
590	u32 pdid;
591	u32 doorbell_pageid;
592}; /* HW DATA */
593
594struct mana_config_vport_resp {
595	struct gdma_resp_hdr hdr;
596	u16 tx_vport_offset;
597	u8 short_form_allowed;
598	u8 reserved;
599}; /* HW DATA */
600
601/* Create WQ Object */
602struct mana_create_wqobj_req {
603	struct gdma_req_hdr hdr;
604	mana_handle_t vport;
605	u32 wq_type;
606	u32 reserved;
607	u64 wq_gdma_region;
608	u64 cq_gdma_region;
609	u32 wq_size;
610	u32 cq_size;
611	u32 cq_moderation_ctx_id;
612	u32 cq_parent_qid;
613}; /* HW DATA */
614
615struct mana_create_wqobj_resp {
616	struct gdma_resp_hdr hdr;
617	u32 wq_id;
618	u32 cq_id;
619	mana_handle_t wq_obj;
620}; /* HW DATA */
621
622/* Destroy WQ Object */
623struct mana_destroy_wqobj_req {
624	struct gdma_req_hdr hdr;
625	u32 wq_type;
626	u32 reserved;
627	mana_handle_t wq_obj_handle;
628}; /* HW DATA */
629
630struct mana_destroy_wqobj_resp {
631	struct gdma_resp_hdr hdr;
632}; /* HW DATA */
633
634/* Fence RQ */
635struct mana_fence_rq_req {
636	struct gdma_req_hdr hdr;
637	mana_handle_t wq_obj_handle;
638}; /* HW DATA */
639
640struct mana_fence_rq_resp {
641	struct gdma_resp_hdr hdr;
642}; /* HW DATA */
643
644/* Query stats RQ */
645struct mana_query_gf_stat_req {
646	struct gdma_req_hdr hdr;
647	u64 req_stats;
648}; /* HW DATA */
649
650struct mana_query_gf_stat_resp {
651	struct gdma_resp_hdr hdr;
652	u64 reported_stats;
653	/* rx errors/discards */
654	u64 rx_discards_nowqe;
655	u64 rx_err_vport_disabled;
656	/* rx bytes/packets */
657	u64 hc_rx_bytes;
658	u64 hc_rx_ucast_pkts;
659	u64 hc_rx_ucast_bytes;
660	u64 hc_rx_bcast_pkts;
661	u64 hc_rx_bcast_bytes;
662	u64 hc_rx_mcast_pkts;
663	u64 hc_rx_mcast_bytes;
664	/* tx errors */
665	u64 tx_err_gf_disabled;
666	u64 tx_err_vport_disabled;
667	u64 tx_err_inval_vport_offset_pkt;
668	u64 tx_err_vlan_enforcement;
669	u64 tx_err_ethtype_enforcement;
670	u64 tx_err_SA_enforcement;
671	u64 tx_err_SQPDID_enforcement;
672	u64 tx_err_CQPDID_enforcement;
673	u64 tx_err_mtu_violation;
674	u64 tx_err_inval_oob;
675	/* tx bytes/packets */
676	u64 hc_tx_bytes;
677	u64 hc_tx_ucast_pkts;
678	u64 hc_tx_ucast_bytes;
679	u64 hc_tx_bcast_pkts;
680	u64 hc_tx_bcast_bytes;
681	u64 hc_tx_mcast_pkts;
682	u64 hc_tx_mcast_bytes;
683	/* tx error */
684	u64 tx_err_gdma;
685}; /* HW DATA */
686
687/* Configure vPort Rx Steering */
688struct mana_cfg_rx_steer_req_v2 {
689	struct gdma_req_hdr hdr;
690	mana_handle_t vport;
691	u16 num_indir_entries;
692	u16 indir_tab_offset;
693	u32 rx_enable;
694	u32 rss_enable;
695	u8 update_default_rxobj;
696	u8 update_hashkey;
697	u8 update_indir_tab;
698	u8 reserved;
699	mana_handle_t default_rxobj;
700	u8 hashkey[MANA_HASH_KEY_SIZE];
701	u8 cqe_coalescing_enable;
702	u8 reserved2[7];
703	mana_handle_t indir_tab[] __counted_by(num_indir_entries);
704}; /* HW DATA */
705
706struct mana_cfg_rx_steer_resp {
707	struct gdma_resp_hdr hdr;
708}; /* HW DATA */
709
710/* Register HW vPort */
711struct mana_register_hw_vport_req {
712	struct gdma_req_hdr hdr;
713	u16 attached_gfid;
714	u8 is_pf_default_vport;
715	u8 reserved1;
716	u8 allow_all_ether_types;
717	u8 reserved2;
718	u8 reserved3;
719	u8 reserved4;
720}; /* HW DATA */
721
722struct mana_register_hw_vport_resp {
723	struct gdma_resp_hdr hdr;
724	mana_handle_t hw_vport_handle;
725}; /* HW DATA */
726
727/* Deregister HW vPort */
728struct mana_deregister_hw_vport_req {
729	struct gdma_req_hdr hdr;
730	mana_handle_t hw_vport_handle;
731}; /* HW DATA */
732
733struct mana_deregister_hw_vport_resp {
734	struct gdma_resp_hdr hdr;
735}; /* HW DATA */
736
737/* Register filter */
738struct mana_register_filter_req {
739	struct gdma_req_hdr hdr;
740	mana_handle_t vport;
741	u8 mac_addr[6];
742	u8 reserved1;
743	u8 reserved2;
744	u8 reserved3;
745	u8 reserved4;
746	u16 reserved5;
747	u32 reserved6;
748	u32 reserved7;
749	u32 reserved8;
750}; /* HW DATA */
751
752struct mana_register_filter_resp {
753	struct gdma_resp_hdr hdr;
754	mana_handle_t filter_handle;
755}; /* HW DATA */
756
757/* Deregister filter */
758struct mana_deregister_filter_req {
759	struct gdma_req_hdr hdr;
760	mana_handle_t filter_handle;
761}; /* HW DATA */
762
763struct mana_deregister_filter_resp {
764	struct gdma_resp_hdr hdr;
765}; /* HW DATA */
766
767/* Requested GF stats Flags */
768/* Rx discards/Errors */
769#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
770#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
771/* Rx bytes/pkts */
772#define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
773#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
774#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
775#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
776#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
777#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
778#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
779/* Tx errors */
780#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
781#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
782#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
783							0x0000000000000800
784#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
785#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
786							0x0000000000002000
787#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
788#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
789#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
790#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
791#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
792/* Tx bytes/pkts */
793#define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
794#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
795#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
796#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
797#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
798#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
799#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
800/* Tx error */
801#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
802
803#define MANA_MAX_NUM_QUEUES 64
804
805#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
806
807struct mana_tx_package {
808	struct gdma_wqe_request wqe_req;
809	struct gdma_sge sgl_array[5];
810	struct gdma_sge *sgl_ptr;
811
812	struct mana_tx_oob tx_oob;
813
814	struct gdma_posted_wqe_info wqe_info;
815};
816
817int mana_create_wq_obj(struct mana_port_context *apc,
818		       mana_handle_t vport,
819		       u32 wq_type, struct mana_obj_spec *wq_spec,
820		       struct mana_obj_spec *cq_spec,
821		       mana_handle_t *wq_obj);
822
823void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
824			 mana_handle_t wq_obj);
825
826int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
827		   u32 doorbell_pg_id);
828void mana_uncfg_vport(struct mana_port_context *apc);
829
830struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index);
831#endif /* _MANA_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
  2/* Copyright (c) 2021, Microsoft Corporation. */
  3
  4#ifndef _MANA_H
  5#define _MANA_H
  6
  7#include <net/xdp.h>
  8
  9#include "gdma.h"
 10#include "hw_channel.h"
 11
 12/* Microsoft Azure Network Adapter (MANA)'s definitions
 13 *
 14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
 15 * them are naturally aligned and hence don't need __packed.
 16 */
 17
 18/* MANA protocol version */
 19#define MANA_MAJOR_VERSION	0
 20#define MANA_MINOR_VERSION	1
 21#define MANA_MICRO_VERSION	1
 22
 23typedef u64 mana_handle_t;
 24#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
 25
 26enum TRI_STATE {
 27	TRI_STATE_UNKNOWN = -1,
 28	TRI_STATE_FALSE = 0,
 29	TRI_STATE_TRUE = 1
 30};
 31
 32/* Number of entries for hardware indirection table must be in power of 2 */
 33#define MANA_INDIRECT_TABLE_SIZE 64
 34#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
 35
 36/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
 37#define MANA_HASH_KEY_SIZE 40
 38
 39#define COMP_ENTRY_SIZE 64
 40
 41#define RX_BUFFERS_PER_QUEUE 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43#define MAX_SEND_BUFFERS_PER_QUEUE 256
 44
 45#define EQ_SIZE (8 * PAGE_SIZE)
 46#define LOG2_EQ_THROTTLE 3
 47
 48#define MAX_PORTS_IN_MANA_DEV 256
 49
 50/* Update this count whenever the respective structures are changed */
 51#define MANA_STATS_RX_COUNT 5
 52#define MANA_STATS_TX_COUNT 11
 53
 54struct mana_stats_rx {
 55	u64 packets;
 56	u64 bytes;
 57	u64 xdp_drop;
 58	u64 xdp_tx;
 59	u64 xdp_redirect;
 60	struct u64_stats_sync syncp;
 61};
 62
 63struct mana_stats_tx {
 64	u64 packets;
 65	u64 bytes;
 66	u64 xdp_xmit;
 67	u64 tso_packets;
 68	u64 tso_bytes;
 69	u64 tso_inner_packets;
 70	u64 tso_inner_bytes;
 71	u64 short_pkt_fmt;
 72	u64 long_pkt_fmt;
 73	u64 csum_partial;
 74	u64 mana_map_err;
 75	struct u64_stats_sync syncp;
 76};
 77
 78struct mana_txq {
 79	struct gdma_queue *gdma_sq;
 80
 81	union {
 82		u32 gdma_txq_id;
 83		struct {
 84			u32 reserved1	: 10;
 85			u32 vsq_frame	: 14;
 86			u32 reserved2	: 8;
 87		};
 88	};
 89
 90	u16 vp_offset;
 91
 92	struct net_device *ndev;
 93
 94	/* The SKBs are sent to the HW and we are waiting for the CQEs. */
 95	struct sk_buff_head pending_skbs;
 96	struct netdev_queue *net_txq;
 97
 98	atomic_t pending_sends;
 99
 
 
100	struct mana_stats_tx stats;
101};
102
103/* skb data and frags dma mappings */
104struct mana_skb_head {
105	/* GSO pkts may have 2 SGEs for the linear part*/
106	dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
107
108	u32 size[MAX_SKB_FRAGS + 2];
109};
110
111#define MANA_HEADROOM sizeof(struct mana_skb_head)
112
113enum mana_tx_pkt_format {
114	MANA_SHORT_PKT_FMT	= 0,
115	MANA_LONG_PKT_FMT	= 1,
116};
117
118struct mana_tx_short_oob {
119	u32 pkt_fmt		: 2;
120	u32 is_outer_ipv4	: 1;
121	u32 is_outer_ipv6	: 1;
122	u32 comp_iphdr_csum	: 1;
123	u32 comp_tcp_csum	: 1;
124	u32 comp_udp_csum	: 1;
125	u32 supress_txcqe_gen	: 1;
126	u32 vcq_num		: 24;
127
128	u32 trans_off		: 10; /* Transport header offset */
129	u32 vsq_frame		: 14;
130	u32 short_vp_offset	: 8;
131}; /* HW DATA */
132
133struct mana_tx_long_oob {
134	u32 is_encap		: 1;
135	u32 inner_is_ipv6	: 1;
136	u32 inner_tcp_opt	: 1;
137	u32 inject_vlan_pri_tag : 1;
138	u32 reserved1		: 12;
139	u32 pcp			: 3;  /* 802.1Q */
140	u32 dei			: 1;  /* 802.1Q */
141	u32 vlan_id		: 12; /* 802.1Q */
142
143	u32 inner_frame_offset	: 10;
144	u32 inner_ip_rel_offset : 6;
145	u32 long_vp_offset	: 12;
146	u32 reserved2		: 4;
147
148	u32 reserved3;
149	u32 reserved4;
150}; /* HW DATA */
151
152struct mana_tx_oob {
153	struct mana_tx_short_oob s_oob;
154	struct mana_tx_long_oob l_oob;
155}; /* HW DATA */
156
157enum mana_cq_type {
158	MANA_CQ_TYPE_RX,
159	MANA_CQ_TYPE_TX,
160};
161
162enum mana_cqe_type {
163	CQE_INVALID			= 0,
164	CQE_RX_OKAY			= 1,
165	CQE_RX_COALESCED_4		= 2,
166	CQE_RX_OBJECT_FENCE		= 3,
167	CQE_RX_TRUNCATED		= 4,
168
169	CQE_TX_OKAY			= 32,
170	CQE_TX_SA_DROP			= 33,
171	CQE_TX_MTU_DROP			= 34,
172	CQE_TX_INVALID_OOB		= 35,
173	CQE_TX_INVALID_ETH_TYPE		= 36,
174	CQE_TX_HDR_PROCESSING_ERROR	= 37,
175	CQE_TX_VF_DISABLED		= 38,
176	CQE_TX_VPORT_IDX_OUT_OF_RANGE	= 39,
177	CQE_TX_VPORT_DISABLED		= 40,
178	CQE_TX_VLAN_TAGGING_VIOLATION	= 41,
179};
180
181#define MANA_CQE_COMPLETION 1
182
183struct mana_cqe_header {
184	u32 cqe_type	: 6;
185	u32 client_type	: 2;
186	u32 vendor_err	: 24;
187}; /* HW DATA */
188
189/* NDIS HASH Types */
190#define NDIS_HASH_IPV4		BIT(0)
191#define NDIS_HASH_TCP_IPV4	BIT(1)
192#define NDIS_HASH_UDP_IPV4	BIT(2)
193#define NDIS_HASH_IPV6		BIT(3)
194#define NDIS_HASH_TCP_IPV6	BIT(4)
195#define NDIS_HASH_UDP_IPV6	BIT(5)
196#define NDIS_HASH_IPV6_EX	BIT(6)
197#define NDIS_HASH_TCP_IPV6_EX	BIT(7)
198#define NDIS_HASH_UDP_IPV6_EX	BIT(8)
199
200#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
201#define MANA_HASH_L4                                                         \
202	(NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 |      \
203	 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
204
205struct mana_rxcomp_perpkt_info {
206	u32 pkt_len	: 16;
207	u32 reserved1	: 16;
208	u32 reserved2;
209	u32 pkt_hash;
210}; /* HW DATA */
211
212#define MANA_RXCOMP_OOB_NUM_PPI 4
213
214/* Receive completion OOB */
215struct mana_rxcomp_oob {
216	struct mana_cqe_header cqe_hdr;
217
218	u32 rx_vlan_id			: 12;
219	u32 rx_vlantag_present		: 1;
220	u32 rx_outer_iphdr_csum_succeed	: 1;
221	u32 rx_outer_iphdr_csum_fail	: 1;
222	u32 reserved1			: 1;
223	u32 rx_hashtype			: 9;
224	u32 rx_iphdr_csum_succeed	: 1;
225	u32 rx_iphdr_csum_fail		: 1;
226	u32 rx_tcp_csum_succeed		: 1;
227	u32 rx_tcp_csum_fail		: 1;
228	u32 rx_udp_csum_succeed		: 1;
229	u32 rx_udp_csum_fail		: 1;
230	u32 reserved2			: 1;
231
232	struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
233
234	u32 rx_wqe_offset;
235}; /* HW DATA */
236
237struct mana_tx_comp_oob {
238	struct mana_cqe_header cqe_hdr;
239
240	u32 tx_data_offset;
241
242	u32 tx_sgl_offset	: 5;
243	u32 tx_wqe_offset	: 27;
244
245	u32 reserved[12];
246}; /* HW DATA */
247
248struct mana_rxq;
249
250#define CQE_POLLING_BUFFER 512
251
252struct mana_cq {
253	struct gdma_queue *gdma_cq;
254
255	/* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
256	u32 gdma_id;
257
258	/* Type of the CQ: TX or RX */
259	enum mana_cq_type type;
260
261	/* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
262	 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
263	 */
264	struct mana_rxq *rxq;
265
266	/* Pointer to the mana_txq that is pushing TX CQEs to the queue.
267	 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
268	 */
269	struct mana_txq *txq;
270
271	/* Buffer which the CQ handler can copy the CQE's into. */
272	struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
273
274	/* NAPI data */
275	struct napi_struct napi;
276	int work_done;
 
277	int budget;
278};
279
280struct mana_recv_buf_oob {
281	/* A valid GDMA work request representing the data buffer. */
282	struct gdma_wqe_request wqe_req;
283
284	void *buf_va;
285	bool from_pool; /* allocated from a page pool */
286
287	/* SGL of the buffer going to be sent has part of the work request. */
288	u32 num_sge;
289	struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
290
291	/* Required to store the result of mana_gd_post_work_request.
292	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
293	 * work queue when the WQE is consumed.
294	 */
295	struct gdma_posted_wqe_info wqe_inf;
296};
297
298#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
299			+ ETH_HLEN)
300
301#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
302
303struct mana_rxq {
304	struct gdma_queue *gdma_rq;
305	/* Cache the gdma receive queue id */
306	u32 gdma_id;
307
308	/* Index of RQ in the vPort, not gdma receive queue id */
309	u32 rxq_idx;
310
311	u32 datasize;
312	u32 alloc_size;
313	u32 headroom;
314
315	mana_handle_t rxobj;
316
317	struct mana_cq rx_cq;
318
319	struct completion fence_event;
320
321	struct net_device *ndev;
322
323	/* Total number of receive buffers to be allocated */
324	u32 num_rx_buf;
325
326	u32 buf_index;
327
328	struct mana_stats_rx stats;
329
330	struct bpf_prog __rcu *bpf_prog;
331	struct xdp_rxq_info xdp_rxq;
332	void *xdp_save_va; /* for reusing */
333	bool xdp_flush;
334	int xdp_rc; /* XDP redirect return code */
335
336	struct page_pool *page_pool;
 
337
338	/* MUST BE THE LAST MEMBER:
339	 * Each receive buffer has an associated mana_recv_buf_oob.
340	 */
341	struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
342};
343
344struct mana_tx_qp {
345	struct mana_txq txq;
346
347	struct mana_cq tx_cq;
348
349	mana_handle_t tx_object;
 
 
350};
351
352struct mana_ethtool_stats {
353	u64 stop_queue;
354	u64 wake_queue;
355	u64 hc_rx_discards_no_wqe;
356	u64 hc_rx_err_vport_disabled;
357	u64 hc_rx_bytes;
358	u64 hc_rx_ucast_pkts;
359	u64 hc_rx_ucast_bytes;
360	u64 hc_rx_bcast_pkts;
361	u64 hc_rx_bcast_bytes;
362	u64 hc_rx_mcast_pkts;
363	u64 hc_rx_mcast_bytes;
364	u64 hc_tx_err_gf_disabled;
365	u64 hc_tx_err_vport_disabled;
366	u64 hc_tx_err_inval_vportoffset_pkt;
367	u64 hc_tx_err_vlan_enforcement;
368	u64 hc_tx_err_eth_type_enforcement;
369	u64 hc_tx_err_sa_enforcement;
370	u64 hc_tx_err_sqpdid_enforcement;
371	u64 hc_tx_err_cqpdid_enforcement;
372	u64 hc_tx_err_mtu_violation;
373	u64 hc_tx_err_inval_oob;
374	u64 hc_tx_bytes;
375	u64 hc_tx_ucast_pkts;
376	u64 hc_tx_ucast_bytes;
377	u64 hc_tx_bcast_pkts;
378	u64 hc_tx_bcast_bytes;
379	u64 hc_tx_mcast_pkts;
380	u64 hc_tx_mcast_bytes;
381	u64 hc_tx_err_gdma;
382	u64 tx_cqe_err;
383	u64 tx_cqe_unknown_type;
384	u64 rx_coalesced_err;
385	u64 rx_cqe_unknown_type;
386};
387
388struct mana_context {
389	struct gdma_dev *gdma_dev;
390
391	u16 num_ports;
392
393	struct mana_eq *eqs;
 
394
395	struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
396};
397
398struct mana_port_context {
399	struct mana_context *ac;
400	struct net_device *ndev;
401
402	u8 mac_addr[ETH_ALEN];
403
404	enum TRI_STATE rss_state;
405
406	mana_handle_t default_rxobj;
407	bool tx_shortform_allowed;
408	u16 tx_vp_offset;
409
410	struct mana_tx_qp *tx_qp;
411
412	/* Indirection Table for RX & TX. The values are queue indexes */
413	u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
 
414
415	/* Indirection table containing RxObject Handles */
416	mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
417
418	/*  Hash key used by the NIC */
419	u8 hashkey[MANA_HASH_KEY_SIZE];
420
421	/* This points to an array of num_queues of RQ pointers. */
422	struct mana_rxq **rxqs;
423
424	/* pre-allocated rx buffer array */
425	void **rxbufs_pre;
426	dma_addr_t *das_pre;
427	int rxbpre_total;
428	u32 rxbpre_datasize;
429	u32 rxbpre_alloc_size;
430	u32 rxbpre_headroom;
431
432	struct bpf_prog *bpf_prog;
433
434	/* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
435	unsigned int max_queues;
436	unsigned int num_queues;
437
 
 
 
438	mana_handle_t port_handle;
439	mana_handle_t pf_filter_handle;
440
441	/* Mutex for sharing access to vport_use_count */
442	struct mutex vport_mutex;
443	int vport_use_count;
444
445	u16 port_idx;
446
447	bool port_is_up;
448	bool port_st_save; /* Saved port state */
449
450	struct mana_ethtool_stats eth_stats;
 
 
 
451};
452
453netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
454int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
455		    bool update_hash, bool update_tab);
456
457int mana_alloc_queues(struct net_device *ndev);
458int mana_attach(struct net_device *ndev);
459int mana_detach(struct net_device *ndev, bool from_close);
460
461int mana_probe(struct gdma_dev *gd, bool resuming);
462void mana_remove(struct gdma_dev *gd, bool suspending);
463
464void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
465int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
466		  u32 flags);
467u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
468		 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
469struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
470void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
471int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
472void mana_query_gf_stats(struct mana_port_context *apc);
 
 
473
474extern const struct ethtool_ops mana_ethtool_ops;
 
475
476/* A CQ can be created not associated with any EQ */
477#define GDMA_CQ_NO_EQ  0xffff
478
479struct mana_obj_spec {
480	u32 queue_index;
481	u64 gdma_region;
482	u32 queue_size;
483	u32 attached_eq;
484	u32 modr_ctx_id;
485};
486
487enum mana_command_code {
488	MANA_QUERY_DEV_CONFIG	= 0x20001,
489	MANA_QUERY_GF_STAT	= 0x20002,
490	MANA_CONFIG_VPORT_TX	= 0x20003,
491	MANA_CREATE_WQ_OBJ	= 0x20004,
492	MANA_DESTROY_WQ_OBJ	= 0x20005,
493	MANA_FENCE_RQ		= 0x20006,
494	MANA_CONFIG_VPORT_RX	= 0x20007,
495	MANA_QUERY_VPORT_CONFIG	= 0x20008,
496
497	/* Privileged commands for the PF mode */
498	MANA_REGISTER_FILTER	= 0x28000,
499	MANA_DEREGISTER_FILTER	= 0x28001,
500	MANA_REGISTER_HW_PORT	= 0x28003,
501	MANA_DEREGISTER_HW_PORT	= 0x28004,
502};
503
504/* Query Device Configuration */
505struct mana_query_device_cfg_req {
506	struct gdma_req_hdr hdr;
507
508	/* MANA Nic Driver Capability flags */
509	u64 mn_drv_cap_flags1;
510	u64 mn_drv_cap_flags2;
511	u64 mn_drv_cap_flags3;
512	u64 mn_drv_cap_flags4;
513
514	u32 proto_major_ver;
515	u32 proto_minor_ver;
516	u32 proto_micro_ver;
517
518	u32 reserved;
519}; /* HW DATA */
520
521struct mana_query_device_cfg_resp {
522	struct gdma_resp_hdr hdr;
523
524	u64 pf_cap_flags1;
525	u64 pf_cap_flags2;
526	u64 pf_cap_flags3;
527	u64 pf_cap_flags4;
528
529	u16 max_num_vports;
530	u16 reserved;
531	u32 max_num_eqs;
532
533	/* response v2: */
534	u16 adapter_mtu;
535	u16 reserved2;
536	u32 reserved3;
537}; /* HW DATA */
538
539/* Query vPort Configuration */
540struct mana_query_vport_cfg_req {
541	struct gdma_req_hdr hdr;
542	u32 vport_index;
543}; /* HW DATA */
544
545struct mana_query_vport_cfg_resp {
546	struct gdma_resp_hdr hdr;
547	u32 max_num_sq;
548	u32 max_num_rq;
549	u32 num_indirection_ent;
550	u32 reserved1;
551	u8 mac_addr[6];
552	u8 reserved2[2];
553	mana_handle_t vport;
554}; /* HW DATA */
555
556/* Configure vPort */
557struct mana_config_vport_req {
558	struct gdma_req_hdr hdr;
559	mana_handle_t vport;
560	u32 pdid;
561	u32 doorbell_pageid;
562}; /* HW DATA */
563
564struct mana_config_vport_resp {
565	struct gdma_resp_hdr hdr;
566	u16 tx_vport_offset;
567	u8 short_form_allowed;
568	u8 reserved;
569}; /* HW DATA */
570
571/* Create WQ Object */
572struct mana_create_wqobj_req {
573	struct gdma_req_hdr hdr;
574	mana_handle_t vport;
575	u32 wq_type;
576	u32 reserved;
577	u64 wq_gdma_region;
578	u64 cq_gdma_region;
579	u32 wq_size;
580	u32 cq_size;
581	u32 cq_moderation_ctx_id;
582	u32 cq_parent_qid;
583}; /* HW DATA */
584
585struct mana_create_wqobj_resp {
586	struct gdma_resp_hdr hdr;
587	u32 wq_id;
588	u32 cq_id;
589	mana_handle_t wq_obj;
590}; /* HW DATA */
591
592/* Destroy WQ Object */
593struct mana_destroy_wqobj_req {
594	struct gdma_req_hdr hdr;
595	u32 wq_type;
596	u32 reserved;
597	mana_handle_t wq_obj_handle;
598}; /* HW DATA */
599
600struct mana_destroy_wqobj_resp {
601	struct gdma_resp_hdr hdr;
602}; /* HW DATA */
603
604/* Fence RQ */
605struct mana_fence_rq_req {
606	struct gdma_req_hdr hdr;
607	mana_handle_t wq_obj_handle;
608}; /* HW DATA */
609
610struct mana_fence_rq_resp {
611	struct gdma_resp_hdr hdr;
612}; /* HW DATA */
613
614/* Query stats RQ */
615struct mana_query_gf_stat_req {
616	struct gdma_req_hdr hdr;
617	u64 req_stats;
618}; /* HW DATA */
619
620struct mana_query_gf_stat_resp {
621	struct gdma_resp_hdr hdr;
622	u64 reported_stats;
623	/* rx errors/discards */
624	u64 rx_discards_nowqe;
625	u64 rx_err_vport_disabled;
626	/* rx bytes/packets */
627	u64 hc_rx_bytes;
628	u64 hc_rx_ucast_pkts;
629	u64 hc_rx_ucast_bytes;
630	u64 hc_rx_bcast_pkts;
631	u64 hc_rx_bcast_bytes;
632	u64 hc_rx_mcast_pkts;
633	u64 hc_rx_mcast_bytes;
634	/* tx errors */
635	u64 tx_err_gf_disabled;
636	u64 tx_err_vport_disabled;
637	u64 tx_err_inval_vport_offset_pkt;
638	u64 tx_err_vlan_enforcement;
639	u64 tx_err_ethtype_enforcement;
640	u64 tx_err_SA_enforcement;
641	u64 tx_err_SQPDID_enforcement;
642	u64 tx_err_CQPDID_enforcement;
643	u64 tx_err_mtu_violation;
644	u64 tx_err_inval_oob;
645	/* tx bytes/packets */
646	u64 hc_tx_bytes;
647	u64 hc_tx_ucast_pkts;
648	u64 hc_tx_ucast_bytes;
649	u64 hc_tx_bcast_pkts;
650	u64 hc_tx_bcast_bytes;
651	u64 hc_tx_mcast_pkts;
652	u64 hc_tx_mcast_bytes;
653	/* tx error */
654	u64 tx_err_gdma;
655}; /* HW DATA */
656
657/* Configure vPort Rx Steering */
658struct mana_cfg_rx_steer_req_v2 {
659	struct gdma_req_hdr hdr;
660	mana_handle_t vport;
661	u16 num_indir_entries;
662	u16 indir_tab_offset;
663	u32 rx_enable;
664	u32 rss_enable;
665	u8 update_default_rxobj;
666	u8 update_hashkey;
667	u8 update_indir_tab;
668	u8 reserved;
669	mana_handle_t default_rxobj;
670	u8 hashkey[MANA_HASH_KEY_SIZE];
671	u8 cqe_coalescing_enable;
672	u8 reserved2[7];
 
673}; /* HW DATA */
674
675struct mana_cfg_rx_steer_resp {
676	struct gdma_resp_hdr hdr;
677}; /* HW DATA */
678
679/* Register HW vPort */
680struct mana_register_hw_vport_req {
681	struct gdma_req_hdr hdr;
682	u16 attached_gfid;
683	u8 is_pf_default_vport;
684	u8 reserved1;
685	u8 allow_all_ether_types;
686	u8 reserved2;
687	u8 reserved3;
688	u8 reserved4;
689}; /* HW DATA */
690
691struct mana_register_hw_vport_resp {
692	struct gdma_resp_hdr hdr;
693	mana_handle_t hw_vport_handle;
694}; /* HW DATA */
695
696/* Deregister HW vPort */
697struct mana_deregister_hw_vport_req {
698	struct gdma_req_hdr hdr;
699	mana_handle_t hw_vport_handle;
700}; /* HW DATA */
701
702struct mana_deregister_hw_vport_resp {
703	struct gdma_resp_hdr hdr;
704}; /* HW DATA */
705
706/* Register filter */
707struct mana_register_filter_req {
708	struct gdma_req_hdr hdr;
709	mana_handle_t vport;
710	u8 mac_addr[6];
711	u8 reserved1;
712	u8 reserved2;
713	u8 reserved3;
714	u8 reserved4;
715	u16 reserved5;
716	u32 reserved6;
717	u32 reserved7;
718	u32 reserved8;
719}; /* HW DATA */
720
721struct mana_register_filter_resp {
722	struct gdma_resp_hdr hdr;
723	mana_handle_t filter_handle;
724}; /* HW DATA */
725
726/* Deregister filter */
727struct mana_deregister_filter_req {
728	struct gdma_req_hdr hdr;
729	mana_handle_t filter_handle;
730}; /* HW DATA */
731
732struct mana_deregister_filter_resp {
733	struct gdma_resp_hdr hdr;
734}; /* HW DATA */
735
736/* Requested GF stats Flags */
737/* Rx discards/Errors */
738#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE		0x0000000000000001
739#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED	0x0000000000000002
740/* Rx bytes/pkts */
741#define STATISTICS_FLAGS_HC_RX_BYTES			0x0000000000000004
742#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS		0x0000000000000008
743#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES		0x0000000000000010
744#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS		0x0000000000000020
745#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES		0x0000000000000040
746#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS		0x0000000000000080
747#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES		0x0000000000000100
748/* Tx errors */
749#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED		0x0000000000000200
750#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED	0x0000000000000400
751#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS		\
752							0x0000000000000800
753#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT	0x0000000000001000
754#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT			\
755							0x0000000000002000
756#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT	0x0000000000004000
757#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT	0x0000000000008000
758#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT	0x0000000000010000
759#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION	0x0000000000020000
760#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB		0x0000000000040000
761/* Tx bytes/pkts */
762#define STATISTICS_FLAGS_HC_TX_BYTES			0x0000000000080000
763#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS		0x0000000000100000
764#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES		0x0000000000200000
765#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS		0x0000000000400000
766#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES		0x0000000000800000
767#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS		0x0000000001000000
768#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES		0x0000000002000000
769/* Tx error */
770#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR		0x0000000004000000
771
772#define MANA_MAX_NUM_QUEUES 64
773
774#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
775
776struct mana_tx_package {
777	struct gdma_wqe_request wqe_req;
778	struct gdma_sge sgl_array[5];
779	struct gdma_sge *sgl_ptr;
780
781	struct mana_tx_oob tx_oob;
782
783	struct gdma_posted_wqe_info wqe_info;
784};
785
786int mana_create_wq_obj(struct mana_port_context *apc,
787		       mana_handle_t vport,
788		       u32 wq_type, struct mana_obj_spec *wq_spec,
789		       struct mana_obj_spec *cq_spec,
790		       mana_handle_t *wq_obj);
791
792void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
793			 mana_handle_t wq_obj);
794
795int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
796		   u32 doorbell_pg_id);
797void mana_uncfg_vport(struct mana_port_context *apc);
 
 
798#endif /* _MANA_H */