Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include <net/xdp.h>
8
9#include "gdma.h"
10#include "hw_channel.h"
11
12/* Microsoft Azure Network Adapter (MANA)'s definitions
13 *
14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
16 */
17
18/* MANA protocol version */
19#define MANA_MAJOR_VERSION 0
20#define MANA_MINOR_VERSION 1
21#define MANA_MICRO_VERSION 1
22
23typedef u64 mana_handle_t;
24#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25
26enum TRI_STATE {
27 TRI_STATE_UNKNOWN = -1,
28 TRI_STATE_FALSE = 0,
29 TRI_STATE_TRUE = 1
30};
31
32/* Number of entries for hardware indirection table must be in power of 2 */
33#define MANA_INDIRECT_TABLE_SIZE 64
34#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
35
36/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37#define MANA_HASH_KEY_SIZE 40
38
39#define COMP_ENTRY_SIZE 64
40
41#define RX_BUFFERS_PER_QUEUE 512
42#define MANA_RX_DATA_ALIGN 64
43
44#define MAX_SEND_BUFFERS_PER_QUEUE 256
45
46#define EQ_SIZE (8 * PAGE_SIZE)
47#define LOG2_EQ_THROTTLE 3
48
49#define MAX_PORTS_IN_MANA_DEV 256
50
51/* Update this count whenever the respective structures are changed */
52#define MANA_STATS_RX_COUNT 5
53#define MANA_STATS_TX_COUNT 11
54
55struct mana_stats_rx {
56 u64 packets;
57 u64 bytes;
58 u64 xdp_drop;
59 u64 xdp_tx;
60 u64 xdp_redirect;
61 struct u64_stats_sync syncp;
62};
63
64struct mana_stats_tx {
65 u64 packets;
66 u64 bytes;
67 u64 xdp_xmit;
68 u64 tso_packets;
69 u64 tso_bytes;
70 u64 tso_inner_packets;
71 u64 tso_inner_bytes;
72 u64 short_pkt_fmt;
73 u64 long_pkt_fmt;
74 u64 csum_partial;
75 u64 mana_map_err;
76 struct u64_stats_sync syncp;
77};
78
79struct mana_txq {
80 struct gdma_queue *gdma_sq;
81
82 union {
83 u32 gdma_txq_id;
84 struct {
85 u32 reserved1 : 10;
86 u32 vsq_frame : 14;
87 u32 reserved2 : 8;
88 };
89 };
90
91 u16 vp_offset;
92
93 struct net_device *ndev;
94
95 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
96 struct sk_buff_head pending_skbs;
97 struct netdev_queue *net_txq;
98
99 atomic_t pending_sends;
100
101 struct mana_stats_tx stats;
102};
103
104/* skb data and frags dma mappings */
105struct mana_skb_head {
106 /* GSO pkts may have 2 SGEs for the linear part*/
107 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
108
109 u32 size[MAX_SKB_FRAGS + 2];
110};
111
112#define MANA_HEADROOM sizeof(struct mana_skb_head)
113
114enum mana_tx_pkt_format {
115 MANA_SHORT_PKT_FMT = 0,
116 MANA_LONG_PKT_FMT = 1,
117};
118
119struct mana_tx_short_oob {
120 u32 pkt_fmt : 2;
121 u32 is_outer_ipv4 : 1;
122 u32 is_outer_ipv6 : 1;
123 u32 comp_iphdr_csum : 1;
124 u32 comp_tcp_csum : 1;
125 u32 comp_udp_csum : 1;
126 u32 supress_txcqe_gen : 1;
127 u32 vcq_num : 24;
128
129 u32 trans_off : 10; /* Transport header offset */
130 u32 vsq_frame : 14;
131 u32 short_vp_offset : 8;
132}; /* HW DATA */
133
134struct mana_tx_long_oob {
135 u32 is_encap : 1;
136 u32 inner_is_ipv6 : 1;
137 u32 inner_tcp_opt : 1;
138 u32 inject_vlan_pri_tag : 1;
139 u32 reserved1 : 12;
140 u32 pcp : 3; /* 802.1Q */
141 u32 dei : 1; /* 802.1Q */
142 u32 vlan_id : 12; /* 802.1Q */
143
144 u32 inner_frame_offset : 10;
145 u32 inner_ip_rel_offset : 6;
146 u32 long_vp_offset : 12;
147 u32 reserved2 : 4;
148
149 u32 reserved3;
150 u32 reserved4;
151}; /* HW DATA */
152
153struct mana_tx_oob {
154 struct mana_tx_short_oob s_oob;
155 struct mana_tx_long_oob l_oob;
156}; /* HW DATA */
157
158enum mana_cq_type {
159 MANA_CQ_TYPE_RX,
160 MANA_CQ_TYPE_TX,
161};
162
163enum mana_cqe_type {
164 CQE_INVALID = 0,
165 CQE_RX_OKAY = 1,
166 CQE_RX_COALESCED_4 = 2,
167 CQE_RX_OBJECT_FENCE = 3,
168 CQE_RX_TRUNCATED = 4,
169
170 CQE_TX_OKAY = 32,
171 CQE_TX_SA_DROP = 33,
172 CQE_TX_MTU_DROP = 34,
173 CQE_TX_INVALID_OOB = 35,
174 CQE_TX_INVALID_ETH_TYPE = 36,
175 CQE_TX_HDR_PROCESSING_ERROR = 37,
176 CQE_TX_VF_DISABLED = 38,
177 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
178 CQE_TX_VPORT_DISABLED = 40,
179 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
180};
181
182#define MANA_CQE_COMPLETION 1
183
184struct mana_cqe_header {
185 u32 cqe_type : 6;
186 u32 client_type : 2;
187 u32 vendor_err : 24;
188}; /* HW DATA */
189
190/* NDIS HASH Types */
191#define NDIS_HASH_IPV4 BIT(0)
192#define NDIS_HASH_TCP_IPV4 BIT(1)
193#define NDIS_HASH_UDP_IPV4 BIT(2)
194#define NDIS_HASH_IPV6 BIT(3)
195#define NDIS_HASH_TCP_IPV6 BIT(4)
196#define NDIS_HASH_UDP_IPV6 BIT(5)
197#define NDIS_HASH_IPV6_EX BIT(6)
198#define NDIS_HASH_TCP_IPV6_EX BIT(7)
199#define NDIS_HASH_UDP_IPV6_EX BIT(8)
200
201#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
202#define MANA_HASH_L4 \
203 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
204 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
205
206struct mana_rxcomp_perpkt_info {
207 u32 pkt_len : 16;
208 u32 reserved1 : 16;
209 u32 reserved2;
210 u32 pkt_hash;
211}; /* HW DATA */
212
213#define MANA_RXCOMP_OOB_NUM_PPI 4
214
215/* Receive completion OOB */
216struct mana_rxcomp_oob {
217 struct mana_cqe_header cqe_hdr;
218
219 u32 rx_vlan_id : 12;
220 u32 rx_vlantag_present : 1;
221 u32 rx_outer_iphdr_csum_succeed : 1;
222 u32 rx_outer_iphdr_csum_fail : 1;
223 u32 reserved1 : 1;
224 u32 rx_hashtype : 9;
225 u32 rx_iphdr_csum_succeed : 1;
226 u32 rx_iphdr_csum_fail : 1;
227 u32 rx_tcp_csum_succeed : 1;
228 u32 rx_tcp_csum_fail : 1;
229 u32 rx_udp_csum_succeed : 1;
230 u32 rx_udp_csum_fail : 1;
231 u32 reserved2 : 1;
232
233 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
234
235 u32 rx_wqe_offset;
236}; /* HW DATA */
237
238struct mana_tx_comp_oob {
239 struct mana_cqe_header cqe_hdr;
240
241 u32 tx_data_offset;
242
243 u32 tx_sgl_offset : 5;
244 u32 tx_wqe_offset : 27;
245
246 u32 reserved[12];
247}; /* HW DATA */
248
249struct mana_rxq;
250
251#define CQE_POLLING_BUFFER 512
252
253struct mana_cq {
254 struct gdma_queue *gdma_cq;
255
256 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
257 u32 gdma_id;
258
259 /* Type of the CQ: TX or RX */
260 enum mana_cq_type type;
261
262 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
263 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
264 */
265 struct mana_rxq *rxq;
266
267 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
268 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
269 */
270 struct mana_txq *txq;
271
272 /* Buffer which the CQ handler can copy the CQE's into. */
273 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
274
275 /* NAPI data */
276 struct napi_struct napi;
277 int work_done;
278 int budget;
279};
280
281struct mana_recv_buf_oob {
282 /* A valid GDMA work request representing the data buffer. */
283 struct gdma_wqe_request wqe_req;
284
285 void *buf_va;
286 bool from_pool; /* allocated from a page pool */
287
288 /* SGL of the buffer going to be sent has part of the work request. */
289 u32 num_sge;
290 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
291
292 /* Required to store the result of mana_gd_post_work_request.
293 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
294 * work queue when the WQE is consumed.
295 */
296 struct gdma_posted_wqe_info wqe_inf;
297};
298
299#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
300 + ETH_HLEN)
301
302#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
303
304struct mana_rxq {
305 struct gdma_queue *gdma_rq;
306 /* Cache the gdma receive queue id */
307 u32 gdma_id;
308
309 /* Index of RQ in the vPort, not gdma receive queue id */
310 u32 rxq_idx;
311
312 u32 datasize;
313 u32 alloc_size;
314 u32 headroom;
315
316 mana_handle_t rxobj;
317
318 struct mana_cq rx_cq;
319
320 struct completion fence_event;
321
322 struct net_device *ndev;
323
324 /* Total number of receive buffers to be allocated */
325 u32 num_rx_buf;
326
327 u32 buf_index;
328
329 struct mana_stats_rx stats;
330
331 struct bpf_prog __rcu *bpf_prog;
332 struct xdp_rxq_info xdp_rxq;
333 void *xdp_save_va; /* for reusing */
334 bool xdp_flush;
335 int xdp_rc; /* XDP redirect return code */
336
337 struct page_pool *page_pool;
338
339 /* MUST BE THE LAST MEMBER:
340 * Each receive buffer has an associated mana_recv_buf_oob.
341 */
342 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
343};
344
345struct mana_tx_qp {
346 struct mana_txq txq;
347
348 struct mana_cq tx_cq;
349
350 mana_handle_t tx_object;
351};
352
353struct mana_ethtool_stats {
354 u64 stop_queue;
355 u64 wake_queue;
356 u64 hc_rx_discards_no_wqe;
357 u64 hc_rx_err_vport_disabled;
358 u64 hc_rx_bytes;
359 u64 hc_rx_ucast_pkts;
360 u64 hc_rx_ucast_bytes;
361 u64 hc_rx_bcast_pkts;
362 u64 hc_rx_bcast_bytes;
363 u64 hc_rx_mcast_pkts;
364 u64 hc_rx_mcast_bytes;
365 u64 hc_tx_err_gf_disabled;
366 u64 hc_tx_err_vport_disabled;
367 u64 hc_tx_err_inval_vportoffset_pkt;
368 u64 hc_tx_err_vlan_enforcement;
369 u64 hc_tx_err_eth_type_enforcement;
370 u64 hc_tx_err_sa_enforcement;
371 u64 hc_tx_err_sqpdid_enforcement;
372 u64 hc_tx_err_cqpdid_enforcement;
373 u64 hc_tx_err_mtu_violation;
374 u64 hc_tx_err_inval_oob;
375 u64 hc_tx_bytes;
376 u64 hc_tx_ucast_pkts;
377 u64 hc_tx_ucast_bytes;
378 u64 hc_tx_bcast_pkts;
379 u64 hc_tx_bcast_bytes;
380 u64 hc_tx_mcast_pkts;
381 u64 hc_tx_mcast_bytes;
382 u64 hc_tx_err_gdma;
383 u64 tx_cqe_err;
384 u64 tx_cqe_unknown_type;
385 u64 rx_coalesced_err;
386 u64 rx_cqe_unknown_type;
387};
388
389struct mana_context {
390 struct gdma_dev *gdma_dev;
391
392 u16 num_ports;
393
394 struct mana_eq *eqs;
395
396 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
397};
398
399struct mana_port_context {
400 struct mana_context *ac;
401 struct net_device *ndev;
402
403 u8 mac_addr[ETH_ALEN];
404
405 enum TRI_STATE rss_state;
406
407 mana_handle_t default_rxobj;
408 bool tx_shortform_allowed;
409 u16 tx_vp_offset;
410
411 struct mana_tx_qp *tx_qp;
412
413 /* Indirection Table for RX & TX. The values are queue indexes */
414 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
415
416 /* Indirection table containing RxObject Handles */
417 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
418
419 /* Hash key used by the NIC */
420 u8 hashkey[MANA_HASH_KEY_SIZE];
421
422 /* This points to an array of num_queues of RQ pointers. */
423 struct mana_rxq **rxqs;
424
425 /* pre-allocated rx buffer array */
426 void **rxbufs_pre;
427 dma_addr_t *das_pre;
428 int rxbpre_total;
429 u32 rxbpre_datasize;
430 u32 rxbpre_alloc_size;
431 u32 rxbpre_headroom;
432
433 struct bpf_prog *bpf_prog;
434
435 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
436 unsigned int max_queues;
437 unsigned int num_queues;
438
439 mana_handle_t port_handle;
440 mana_handle_t pf_filter_handle;
441
442 /* Mutex for sharing access to vport_use_count */
443 struct mutex vport_mutex;
444 int vport_use_count;
445
446 u16 port_idx;
447
448 bool port_is_up;
449 bool port_st_save; /* Saved port state */
450
451 struct mana_ethtool_stats eth_stats;
452};
453
454netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
455int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
456 bool update_hash, bool update_tab);
457
458int mana_alloc_queues(struct net_device *ndev);
459int mana_attach(struct net_device *ndev);
460int mana_detach(struct net_device *ndev, bool from_close);
461
462int mana_probe(struct gdma_dev *gd, bool resuming);
463void mana_remove(struct gdma_dev *gd, bool suspending);
464
465void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
466int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
467 u32 flags);
468u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
469 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
470struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
471void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
472int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
473void mana_query_gf_stats(struct mana_port_context *apc);
474
475extern const struct ethtool_ops mana_ethtool_ops;
476
477/* A CQ can be created not associated with any EQ */
478#define GDMA_CQ_NO_EQ 0xffff
479
480struct mana_obj_spec {
481 u32 queue_index;
482 u64 gdma_region;
483 u32 queue_size;
484 u32 attached_eq;
485 u32 modr_ctx_id;
486};
487
488enum mana_command_code {
489 MANA_QUERY_DEV_CONFIG = 0x20001,
490 MANA_QUERY_GF_STAT = 0x20002,
491 MANA_CONFIG_VPORT_TX = 0x20003,
492 MANA_CREATE_WQ_OBJ = 0x20004,
493 MANA_DESTROY_WQ_OBJ = 0x20005,
494 MANA_FENCE_RQ = 0x20006,
495 MANA_CONFIG_VPORT_RX = 0x20007,
496 MANA_QUERY_VPORT_CONFIG = 0x20008,
497
498 /* Privileged commands for the PF mode */
499 MANA_REGISTER_FILTER = 0x28000,
500 MANA_DEREGISTER_FILTER = 0x28001,
501 MANA_REGISTER_HW_PORT = 0x28003,
502 MANA_DEREGISTER_HW_PORT = 0x28004,
503};
504
505/* Query Device Configuration */
506struct mana_query_device_cfg_req {
507 struct gdma_req_hdr hdr;
508
509 /* MANA Nic Driver Capability flags */
510 u64 mn_drv_cap_flags1;
511 u64 mn_drv_cap_flags2;
512 u64 mn_drv_cap_flags3;
513 u64 mn_drv_cap_flags4;
514
515 u32 proto_major_ver;
516 u32 proto_minor_ver;
517 u32 proto_micro_ver;
518
519 u32 reserved;
520}; /* HW DATA */
521
522struct mana_query_device_cfg_resp {
523 struct gdma_resp_hdr hdr;
524
525 u64 pf_cap_flags1;
526 u64 pf_cap_flags2;
527 u64 pf_cap_flags3;
528 u64 pf_cap_flags4;
529
530 u16 max_num_vports;
531 u16 reserved;
532 u32 max_num_eqs;
533
534 /* response v2: */
535 u16 adapter_mtu;
536 u16 reserved2;
537 u32 reserved3;
538}; /* HW DATA */
539
540/* Query vPort Configuration */
541struct mana_query_vport_cfg_req {
542 struct gdma_req_hdr hdr;
543 u32 vport_index;
544}; /* HW DATA */
545
546struct mana_query_vport_cfg_resp {
547 struct gdma_resp_hdr hdr;
548 u32 max_num_sq;
549 u32 max_num_rq;
550 u32 num_indirection_ent;
551 u32 reserved1;
552 u8 mac_addr[6];
553 u8 reserved2[2];
554 mana_handle_t vport;
555}; /* HW DATA */
556
557/* Configure vPort */
558struct mana_config_vport_req {
559 struct gdma_req_hdr hdr;
560 mana_handle_t vport;
561 u32 pdid;
562 u32 doorbell_pageid;
563}; /* HW DATA */
564
565struct mana_config_vport_resp {
566 struct gdma_resp_hdr hdr;
567 u16 tx_vport_offset;
568 u8 short_form_allowed;
569 u8 reserved;
570}; /* HW DATA */
571
572/* Create WQ Object */
573struct mana_create_wqobj_req {
574 struct gdma_req_hdr hdr;
575 mana_handle_t vport;
576 u32 wq_type;
577 u32 reserved;
578 u64 wq_gdma_region;
579 u64 cq_gdma_region;
580 u32 wq_size;
581 u32 cq_size;
582 u32 cq_moderation_ctx_id;
583 u32 cq_parent_qid;
584}; /* HW DATA */
585
586struct mana_create_wqobj_resp {
587 struct gdma_resp_hdr hdr;
588 u32 wq_id;
589 u32 cq_id;
590 mana_handle_t wq_obj;
591}; /* HW DATA */
592
593/* Destroy WQ Object */
594struct mana_destroy_wqobj_req {
595 struct gdma_req_hdr hdr;
596 u32 wq_type;
597 u32 reserved;
598 mana_handle_t wq_obj_handle;
599}; /* HW DATA */
600
601struct mana_destroy_wqobj_resp {
602 struct gdma_resp_hdr hdr;
603}; /* HW DATA */
604
605/* Fence RQ */
606struct mana_fence_rq_req {
607 struct gdma_req_hdr hdr;
608 mana_handle_t wq_obj_handle;
609}; /* HW DATA */
610
611struct mana_fence_rq_resp {
612 struct gdma_resp_hdr hdr;
613}; /* HW DATA */
614
615/* Query stats RQ */
616struct mana_query_gf_stat_req {
617 struct gdma_req_hdr hdr;
618 u64 req_stats;
619}; /* HW DATA */
620
621struct mana_query_gf_stat_resp {
622 struct gdma_resp_hdr hdr;
623 u64 reported_stats;
624 /* rx errors/discards */
625 u64 rx_discards_nowqe;
626 u64 rx_err_vport_disabled;
627 /* rx bytes/packets */
628 u64 hc_rx_bytes;
629 u64 hc_rx_ucast_pkts;
630 u64 hc_rx_ucast_bytes;
631 u64 hc_rx_bcast_pkts;
632 u64 hc_rx_bcast_bytes;
633 u64 hc_rx_mcast_pkts;
634 u64 hc_rx_mcast_bytes;
635 /* tx errors */
636 u64 tx_err_gf_disabled;
637 u64 tx_err_vport_disabled;
638 u64 tx_err_inval_vport_offset_pkt;
639 u64 tx_err_vlan_enforcement;
640 u64 tx_err_ethtype_enforcement;
641 u64 tx_err_SA_enforcement;
642 u64 tx_err_SQPDID_enforcement;
643 u64 tx_err_CQPDID_enforcement;
644 u64 tx_err_mtu_violation;
645 u64 tx_err_inval_oob;
646 /* tx bytes/packets */
647 u64 hc_tx_bytes;
648 u64 hc_tx_ucast_pkts;
649 u64 hc_tx_ucast_bytes;
650 u64 hc_tx_bcast_pkts;
651 u64 hc_tx_bcast_bytes;
652 u64 hc_tx_mcast_pkts;
653 u64 hc_tx_mcast_bytes;
654 /* tx error */
655 u64 tx_err_gdma;
656}; /* HW DATA */
657
658/* Configure vPort Rx Steering */
659struct mana_cfg_rx_steer_req_v2 {
660 struct gdma_req_hdr hdr;
661 mana_handle_t vport;
662 u16 num_indir_entries;
663 u16 indir_tab_offset;
664 u32 rx_enable;
665 u32 rss_enable;
666 u8 update_default_rxobj;
667 u8 update_hashkey;
668 u8 update_indir_tab;
669 u8 reserved;
670 mana_handle_t default_rxobj;
671 u8 hashkey[MANA_HASH_KEY_SIZE];
672 u8 cqe_coalescing_enable;
673 u8 reserved2[7];
674}; /* HW DATA */
675
676struct mana_cfg_rx_steer_resp {
677 struct gdma_resp_hdr hdr;
678}; /* HW DATA */
679
680/* Register HW vPort */
681struct mana_register_hw_vport_req {
682 struct gdma_req_hdr hdr;
683 u16 attached_gfid;
684 u8 is_pf_default_vport;
685 u8 reserved1;
686 u8 allow_all_ether_types;
687 u8 reserved2;
688 u8 reserved3;
689 u8 reserved4;
690}; /* HW DATA */
691
692struct mana_register_hw_vport_resp {
693 struct gdma_resp_hdr hdr;
694 mana_handle_t hw_vport_handle;
695}; /* HW DATA */
696
697/* Deregister HW vPort */
698struct mana_deregister_hw_vport_req {
699 struct gdma_req_hdr hdr;
700 mana_handle_t hw_vport_handle;
701}; /* HW DATA */
702
703struct mana_deregister_hw_vport_resp {
704 struct gdma_resp_hdr hdr;
705}; /* HW DATA */
706
707/* Register filter */
708struct mana_register_filter_req {
709 struct gdma_req_hdr hdr;
710 mana_handle_t vport;
711 u8 mac_addr[6];
712 u8 reserved1;
713 u8 reserved2;
714 u8 reserved3;
715 u8 reserved4;
716 u16 reserved5;
717 u32 reserved6;
718 u32 reserved7;
719 u32 reserved8;
720}; /* HW DATA */
721
722struct mana_register_filter_resp {
723 struct gdma_resp_hdr hdr;
724 mana_handle_t filter_handle;
725}; /* HW DATA */
726
727/* Deregister filter */
728struct mana_deregister_filter_req {
729 struct gdma_req_hdr hdr;
730 mana_handle_t filter_handle;
731}; /* HW DATA */
732
733struct mana_deregister_filter_resp {
734 struct gdma_resp_hdr hdr;
735}; /* HW DATA */
736
737/* Requested GF stats Flags */
738/* Rx discards/Errors */
739#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
740#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
741/* Rx bytes/pkts */
742#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
743#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
744#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
745#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
746#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
747#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
748#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
749/* Tx errors */
750#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
751#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
752#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
753 0x0000000000000800
754#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
755#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
756 0x0000000000002000
757#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
758#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
759#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
760#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
761#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
762/* Tx bytes/pkts */
763#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
764#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
765#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
766#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
767#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
768#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
769#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
770/* Tx error */
771#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
772
773#define MANA_MAX_NUM_QUEUES 64
774
775#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
776
777struct mana_tx_package {
778 struct gdma_wqe_request wqe_req;
779 struct gdma_sge sgl_array[5];
780 struct gdma_sge *sgl_ptr;
781
782 struct mana_tx_oob tx_oob;
783
784 struct gdma_posted_wqe_info wqe_info;
785};
786
787int mana_create_wq_obj(struct mana_port_context *apc,
788 mana_handle_t vport,
789 u32 wq_type, struct mana_obj_spec *wq_spec,
790 struct mana_obj_spec *cq_spec,
791 mana_handle_t *wq_obj);
792
793void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
794 mana_handle_t wq_obj);
795
796int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
797 u32 doorbell_pg_id);
798void mana_uncfg_vport(struct mana_port_context *apc);
799#endif /* _MANA_H */
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include <net/xdp.h>
8
9#include "gdma.h"
10#include "hw_channel.h"
11
12/* Microsoft Azure Network Adapter (MANA)'s definitions
13 *
14 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
15 * them are naturally aligned and hence don't need __packed.
16 */
17
18/* MANA protocol version */
19#define MANA_MAJOR_VERSION 0
20#define MANA_MINOR_VERSION 1
21#define MANA_MICRO_VERSION 1
22
23typedef u64 mana_handle_t;
24#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
25
26enum TRI_STATE {
27 TRI_STATE_UNKNOWN = -1,
28 TRI_STATE_FALSE = 0,
29 TRI_STATE_TRUE = 1
30};
31
32/* Number of entries for hardware indirection table must be in power of 2 */
33#define MANA_INDIRECT_TABLE_SIZE 64
34#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
35
36/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
37#define MANA_HASH_KEY_SIZE 40
38
39#define COMP_ENTRY_SIZE 64
40
41#define RX_BUFFERS_PER_QUEUE 512
42
43#define MAX_SEND_BUFFERS_PER_QUEUE 256
44
45#define EQ_SIZE (8 * PAGE_SIZE)
46#define LOG2_EQ_THROTTLE 3
47
48#define MAX_PORTS_IN_MANA_DEV 256
49
50/* Update this count whenever the respective structures are changed */
51#define MANA_STATS_RX_COUNT 5
52#define MANA_STATS_TX_COUNT 11
53
54struct mana_stats_rx {
55 u64 packets;
56 u64 bytes;
57 u64 xdp_drop;
58 u64 xdp_tx;
59 u64 xdp_redirect;
60 struct u64_stats_sync syncp;
61};
62
63struct mana_stats_tx {
64 u64 packets;
65 u64 bytes;
66 u64 xdp_xmit;
67 u64 tso_packets;
68 u64 tso_bytes;
69 u64 tso_inner_packets;
70 u64 tso_inner_bytes;
71 u64 short_pkt_fmt;
72 u64 long_pkt_fmt;
73 u64 csum_partial;
74 u64 mana_map_err;
75 struct u64_stats_sync syncp;
76};
77
78struct mana_txq {
79 struct gdma_queue *gdma_sq;
80
81 union {
82 u32 gdma_txq_id;
83 struct {
84 u32 reserved1 : 10;
85 u32 vsq_frame : 14;
86 u32 reserved2 : 8;
87 };
88 };
89
90 u16 vp_offset;
91
92 struct net_device *ndev;
93
94 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
95 struct sk_buff_head pending_skbs;
96 struct netdev_queue *net_txq;
97
98 atomic_t pending_sends;
99
100 struct mana_stats_tx stats;
101};
102
103/* skb data and frags dma mappings */
104struct mana_skb_head {
105 /* GSO pkts may have 2 SGEs for the linear part*/
106 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
107
108 u32 size[MAX_SKB_FRAGS + 2];
109};
110
111#define MANA_HEADROOM sizeof(struct mana_skb_head)
112
113enum mana_tx_pkt_format {
114 MANA_SHORT_PKT_FMT = 0,
115 MANA_LONG_PKT_FMT = 1,
116};
117
118struct mana_tx_short_oob {
119 u32 pkt_fmt : 2;
120 u32 is_outer_ipv4 : 1;
121 u32 is_outer_ipv6 : 1;
122 u32 comp_iphdr_csum : 1;
123 u32 comp_tcp_csum : 1;
124 u32 comp_udp_csum : 1;
125 u32 supress_txcqe_gen : 1;
126 u32 vcq_num : 24;
127
128 u32 trans_off : 10; /* Transport header offset */
129 u32 vsq_frame : 14;
130 u32 short_vp_offset : 8;
131}; /* HW DATA */
132
133struct mana_tx_long_oob {
134 u32 is_encap : 1;
135 u32 inner_is_ipv6 : 1;
136 u32 inner_tcp_opt : 1;
137 u32 inject_vlan_pri_tag : 1;
138 u32 reserved1 : 12;
139 u32 pcp : 3; /* 802.1Q */
140 u32 dei : 1; /* 802.1Q */
141 u32 vlan_id : 12; /* 802.1Q */
142
143 u32 inner_frame_offset : 10;
144 u32 inner_ip_rel_offset : 6;
145 u32 long_vp_offset : 12;
146 u32 reserved2 : 4;
147
148 u32 reserved3;
149 u32 reserved4;
150}; /* HW DATA */
151
152struct mana_tx_oob {
153 struct mana_tx_short_oob s_oob;
154 struct mana_tx_long_oob l_oob;
155}; /* HW DATA */
156
157enum mana_cq_type {
158 MANA_CQ_TYPE_RX,
159 MANA_CQ_TYPE_TX,
160};
161
162enum mana_cqe_type {
163 CQE_INVALID = 0,
164 CQE_RX_OKAY = 1,
165 CQE_RX_COALESCED_4 = 2,
166 CQE_RX_OBJECT_FENCE = 3,
167 CQE_RX_TRUNCATED = 4,
168
169 CQE_TX_OKAY = 32,
170 CQE_TX_SA_DROP = 33,
171 CQE_TX_MTU_DROP = 34,
172 CQE_TX_INVALID_OOB = 35,
173 CQE_TX_INVALID_ETH_TYPE = 36,
174 CQE_TX_HDR_PROCESSING_ERROR = 37,
175 CQE_TX_VF_DISABLED = 38,
176 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
177 CQE_TX_VPORT_DISABLED = 40,
178 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
179};
180
181#define MANA_CQE_COMPLETION 1
182
183struct mana_cqe_header {
184 u32 cqe_type : 6;
185 u32 client_type : 2;
186 u32 vendor_err : 24;
187}; /* HW DATA */
188
189/* NDIS HASH Types */
190#define NDIS_HASH_IPV4 BIT(0)
191#define NDIS_HASH_TCP_IPV4 BIT(1)
192#define NDIS_HASH_UDP_IPV4 BIT(2)
193#define NDIS_HASH_IPV6 BIT(3)
194#define NDIS_HASH_TCP_IPV6 BIT(4)
195#define NDIS_HASH_UDP_IPV6 BIT(5)
196#define NDIS_HASH_IPV6_EX BIT(6)
197#define NDIS_HASH_TCP_IPV6_EX BIT(7)
198#define NDIS_HASH_UDP_IPV6_EX BIT(8)
199
200#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
201#define MANA_HASH_L4 \
202 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
203 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
204
205struct mana_rxcomp_perpkt_info {
206 u32 pkt_len : 16;
207 u32 reserved1 : 16;
208 u32 reserved2;
209 u32 pkt_hash;
210}; /* HW DATA */
211
212#define MANA_RXCOMP_OOB_NUM_PPI 4
213
214/* Receive completion OOB */
215struct mana_rxcomp_oob {
216 struct mana_cqe_header cqe_hdr;
217
218 u32 rx_vlan_id : 12;
219 u32 rx_vlantag_present : 1;
220 u32 rx_outer_iphdr_csum_succeed : 1;
221 u32 rx_outer_iphdr_csum_fail : 1;
222 u32 reserved1 : 1;
223 u32 rx_hashtype : 9;
224 u32 rx_iphdr_csum_succeed : 1;
225 u32 rx_iphdr_csum_fail : 1;
226 u32 rx_tcp_csum_succeed : 1;
227 u32 rx_tcp_csum_fail : 1;
228 u32 rx_udp_csum_succeed : 1;
229 u32 rx_udp_csum_fail : 1;
230 u32 reserved2 : 1;
231
232 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
233
234 u32 rx_wqe_offset;
235}; /* HW DATA */
236
237struct mana_tx_comp_oob {
238 struct mana_cqe_header cqe_hdr;
239
240 u32 tx_data_offset;
241
242 u32 tx_sgl_offset : 5;
243 u32 tx_wqe_offset : 27;
244
245 u32 reserved[12];
246}; /* HW DATA */
247
248struct mana_rxq;
249
250#define CQE_POLLING_BUFFER 512
251
252struct mana_cq {
253 struct gdma_queue *gdma_cq;
254
255 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
256 u32 gdma_id;
257
258 /* Type of the CQ: TX or RX */
259 enum mana_cq_type type;
260
261 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
262 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
263 */
264 struct mana_rxq *rxq;
265
266 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
267 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
268 */
269 struct mana_txq *txq;
270
271 /* Buffer which the CQ handler can copy the CQE's into. */
272 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
273
274 /* NAPI data */
275 struct napi_struct napi;
276 int work_done;
277 int budget;
278};
279
280struct mana_recv_buf_oob {
281 /* A valid GDMA work request representing the data buffer. */
282 struct gdma_wqe_request wqe_req;
283
284 void *buf_va;
285 bool from_pool; /* allocated from a page pool */
286
287 /* SGL of the buffer going to be sent has part of the work request. */
288 u32 num_sge;
289 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
290
291 /* Required to store the result of mana_gd_post_work_request.
292 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
293 * work queue when the WQE is consumed.
294 */
295 struct gdma_posted_wqe_info wqe_inf;
296};
297
298#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
299 + ETH_HLEN)
300
301#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
302
303struct mana_rxq {
304 struct gdma_queue *gdma_rq;
305 /* Cache the gdma receive queue id */
306 u32 gdma_id;
307
308 /* Index of RQ in the vPort, not gdma receive queue id */
309 u32 rxq_idx;
310
311 u32 datasize;
312 u32 alloc_size;
313 u32 headroom;
314
315 mana_handle_t rxobj;
316
317 struct mana_cq rx_cq;
318
319 struct completion fence_event;
320
321 struct net_device *ndev;
322
323 /* Total number of receive buffers to be allocated */
324 u32 num_rx_buf;
325
326 u32 buf_index;
327
328 struct mana_stats_rx stats;
329
330 struct bpf_prog __rcu *bpf_prog;
331 struct xdp_rxq_info xdp_rxq;
332 void *xdp_save_va; /* for reusing */
333 bool xdp_flush;
334 int xdp_rc; /* XDP redirect return code */
335
336 struct page_pool *page_pool;
337
338 /* MUST BE THE LAST MEMBER:
339 * Each receive buffer has an associated mana_recv_buf_oob.
340 */
341 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
342};
343
344struct mana_tx_qp {
345 struct mana_txq txq;
346
347 struct mana_cq tx_cq;
348
349 mana_handle_t tx_object;
350};
351
352struct mana_ethtool_stats {
353 u64 stop_queue;
354 u64 wake_queue;
355 u64 hc_rx_discards_no_wqe;
356 u64 hc_rx_err_vport_disabled;
357 u64 hc_rx_bytes;
358 u64 hc_rx_ucast_pkts;
359 u64 hc_rx_ucast_bytes;
360 u64 hc_rx_bcast_pkts;
361 u64 hc_rx_bcast_bytes;
362 u64 hc_rx_mcast_pkts;
363 u64 hc_rx_mcast_bytes;
364 u64 hc_tx_err_gf_disabled;
365 u64 hc_tx_err_vport_disabled;
366 u64 hc_tx_err_inval_vportoffset_pkt;
367 u64 hc_tx_err_vlan_enforcement;
368 u64 hc_tx_err_eth_type_enforcement;
369 u64 hc_tx_err_sa_enforcement;
370 u64 hc_tx_err_sqpdid_enforcement;
371 u64 hc_tx_err_cqpdid_enforcement;
372 u64 hc_tx_err_mtu_violation;
373 u64 hc_tx_err_inval_oob;
374 u64 hc_tx_bytes;
375 u64 hc_tx_ucast_pkts;
376 u64 hc_tx_ucast_bytes;
377 u64 hc_tx_bcast_pkts;
378 u64 hc_tx_bcast_bytes;
379 u64 hc_tx_mcast_pkts;
380 u64 hc_tx_mcast_bytes;
381 u64 hc_tx_err_gdma;
382 u64 tx_cqe_err;
383 u64 tx_cqe_unknown_type;
384 u64 rx_coalesced_err;
385 u64 rx_cqe_unknown_type;
386};
387
388struct mana_context {
389 struct gdma_dev *gdma_dev;
390
391 u16 num_ports;
392
393 struct mana_eq *eqs;
394
395 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
396};
397
398struct mana_port_context {
399 struct mana_context *ac;
400 struct net_device *ndev;
401
402 u8 mac_addr[ETH_ALEN];
403
404 enum TRI_STATE rss_state;
405
406 mana_handle_t default_rxobj;
407 bool tx_shortform_allowed;
408 u16 tx_vp_offset;
409
410 struct mana_tx_qp *tx_qp;
411
412 /* Indirection Table for RX & TX. The values are queue indexes */
413 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
414
415 /* Indirection table containing RxObject Handles */
416 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
417
418 /* Hash key used by the NIC */
419 u8 hashkey[MANA_HASH_KEY_SIZE];
420
421 /* This points to an array of num_queues of RQ pointers. */
422 struct mana_rxq **rxqs;
423
424 /* pre-allocated rx buffer array */
425 void **rxbufs_pre;
426 dma_addr_t *das_pre;
427 int rxbpre_total;
428 u32 rxbpre_datasize;
429 u32 rxbpre_alloc_size;
430 u32 rxbpre_headroom;
431
432 struct bpf_prog *bpf_prog;
433
434 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
435 unsigned int max_queues;
436 unsigned int num_queues;
437
438 mana_handle_t port_handle;
439 mana_handle_t pf_filter_handle;
440
441 /* Mutex for sharing access to vport_use_count */
442 struct mutex vport_mutex;
443 int vport_use_count;
444
445 u16 port_idx;
446
447 bool port_is_up;
448 bool port_st_save; /* Saved port state */
449
450 struct mana_ethtool_stats eth_stats;
451};
452
453netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
454int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
455 bool update_hash, bool update_tab);
456
457int mana_alloc_queues(struct net_device *ndev);
458int mana_attach(struct net_device *ndev);
459int mana_detach(struct net_device *ndev, bool from_close);
460
461int mana_probe(struct gdma_dev *gd, bool resuming);
462void mana_remove(struct gdma_dev *gd, bool suspending);
463
464void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
465int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
466 u32 flags);
467u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
468 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
469struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
470void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
471int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
472void mana_query_gf_stats(struct mana_port_context *apc);
473
474extern const struct ethtool_ops mana_ethtool_ops;
475
476/* A CQ can be created not associated with any EQ */
477#define GDMA_CQ_NO_EQ 0xffff
478
479struct mana_obj_spec {
480 u32 queue_index;
481 u64 gdma_region;
482 u32 queue_size;
483 u32 attached_eq;
484 u32 modr_ctx_id;
485};
486
487enum mana_command_code {
488 MANA_QUERY_DEV_CONFIG = 0x20001,
489 MANA_QUERY_GF_STAT = 0x20002,
490 MANA_CONFIG_VPORT_TX = 0x20003,
491 MANA_CREATE_WQ_OBJ = 0x20004,
492 MANA_DESTROY_WQ_OBJ = 0x20005,
493 MANA_FENCE_RQ = 0x20006,
494 MANA_CONFIG_VPORT_RX = 0x20007,
495 MANA_QUERY_VPORT_CONFIG = 0x20008,
496
497 /* Privileged commands for the PF mode */
498 MANA_REGISTER_FILTER = 0x28000,
499 MANA_DEREGISTER_FILTER = 0x28001,
500 MANA_REGISTER_HW_PORT = 0x28003,
501 MANA_DEREGISTER_HW_PORT = 0x28004,
502};
503
504/* Query Device Configuration */
505struct mana_query_device_cfg_req {
506 struct gdma_req_hdr hdr;
507
508 /* MANA Nic Driver Capability flags */
509 u64 mn_drv_cap_flags1;
510 u64 mn_drv_cap_flags2;
511 u64 mn_drv_cap_flags3;
512 u64 mn_drv_cap_flags4;
513
514 u32 proto_major_ver;
515 u32 proto_minor_ver;
516 u32 proto_micro_ver;
517
518 u32 reserved;
519}; /* HW DATA */
520
521struct mana_query_device_cfg_resp {
522 struct gdma_resp_hdr hdr;
523
524 u64 pf_cap_flags1;
525 u64 pf_cap_flags2;
526 u64 pf_cap_flags3;
527 u64 pf_cap_flags4;
528
529 u16 max_num_vports;
530 u16 reserved;
531 u32 max_num_eqs;
532
533 /* response v2: */
534 u16 adapter_mtu;
535 u16 reserved2;
536 u32 reserved3;
537}; /* HW DATA */
538
539/* Query vPort Configuration */
540struct mana_query_vport_cfg_req {
541 struct gdma_req_hdr hdr;
542 u32 vport_index;
543}; /* HW DATA */
544
545struct mana_query_vport_cfg_resp {
546 struct gdma_resp_hdr hdr;
547 u32 max_num_sq;
548 u32 max_num_rq;
549 u32 num_indirection_ent;
550 u32 reserved1;
551 u8 mac_addr[6];
552 u8 reserved2[2];
553 mana_handle_t vport;
554}; /* HW DATA */
555
556/* Configure vPort */
557struct mana_config_vport_req {
558 struct gdma_req_hdr hdr;
559 mana_handle_t vport;
560 u32 pdid;
561 u32 doorbell_pageid;
562}; /* HW DATA */
563
564struct mana_config_vport_resp {
565 struct gdma_resp_hdr hdr;
566 u16 tx_vport_offset;
567 u8 short_form_allowed;
568 u8 reserved;
569}; /* HW DATA */
570
571/* Create WQ Object */
572struct mana_create_wqobj_req {
573 struct gdma_req_hdr hdr;
574 mana_handle_t vport;
575 u32 wq_type;
576 u32 reserved;
577 u64 wq_gdma_region;
578 u64 cq_gdma_region;
579 u32 wq_size;
580 u32 cq_size;
581 u32 cq_moderation_ctx_id;
582 u32 cq_parent_qid;
583}; /* HW DATA */
584
585struct mana_create_wqobj_resp {
586 struct gdma_resp_hdr hdr;
587 u32 wq_id;
588 u32 cq_id;
589 mana_handle_t wq_obj;
590}; /* HW DATA */
591
592/* Destroy WQ Object */
593struct mana_destroy_wqobj_req {
594 struct gdma_req_hdr hdr;
595 u32 wq_type;
596 u32 reserved;
597 mana_handle_t wq_obj_handle;
598}; /* HW DATA */
599
600struct mana_destroy_wqobj_resp {
601 struct gdma_resp_hdr hdr;
602}; /* HW DATA */
603
604/* Fence RQ */
605struct mana_fence_rq_req {
606 struct gdma_req_hdr hdr;
607 mana_handle_t wq_obj_handle;
608}; /* HW DATA */
609
610struct mana_fence_rq_resp {
611 struct gdma_resp_hdr hdr;
612}; /* HW DATA */
613
614/* Query stats RQ */
615struct mana_query_gf_stat_req {
616 struct gdma_req_hdr hdr;
617 u64 req_stats;
618}; /* HW DATA */
619
620struct mana_query_gf_stat_resp {
621 struct gdma_resp_hdr hdr;
622 u64 reported_stats;
623 /* rx errors/discards */
624 u64 rx_discards_nowqe;
625 u64 rx_err_vport_disabled;
626 /* rx bytes/packets */
627 u64 hc_rx_bytes;
628 u64 hc_rx_ucast_pkts;
629 u64 hc_rx_ucast_bytes;
630 u64 hc_rx_bcast_pkts;
631 u64 hc_rx_bcast_bytes;
632 u64 hc_rx_mcast_pkts;
633 u64 hc_rx_mcast_bytes;
634 /* tx errors */
635 u64 tx_err_gf_disabled;
636 u64 tx_err_vport_disabled;
637 u64 tx_err_inval_vport_offset_pkt;
638 u64 tx_err_vlan_enforcement;
639 u64 tx_err_ethtype_enforcement;
640 u64 tx_err_SA_enforcement;
641 u64 tx_err_SQPDID_enforcement;
642 u64 tx_err_CQPDID_enforcement;
643 u64 tx_err_mtu_violation;
644 u64 tx_err_inval_oob;
645 /* tx bytes/packets */
646 u64 hc_tx_bytes;
647 u64 hc_tx_ucast_pkts;
648 u64 hc_tx_ucast_bytes;
649 u64 hc_tx_bcast_pkts;
650 u64 hc_tx_bcast_bytes;
651 u64 hc_tx_mcast_pkts;
652 u64 hc_tx_mcast_bytes;
653 /* tx error */
654 u64 tx_err_gdma;
655}; /* HW DATA */
656
657/* Configure vPort Rx Steering */
658struct mana_cfg_rx_steer_req_v2 {
659 struct gdma_req_hdr hdr;
660 mana_handle_t vport;
661 u16 num_indir_entries;
662 u16 indir_tab_offset;
663 u32 rx_enable;
664 u32 rss_enable;
665 u8 update_default_rxobj;
666 u8 update_hashkey;
667 u8 update_indir_tab;
668 u8 reserved;
669 mana_handle_t default_rxobj;
670 u8 hashkey[MANA_HASH_KEY_SIZE];
671 u8 cqe_coalescing_enable;
672 u8 reserved2[7];
673}; /* HW DATA */
674
675struct mana_cfg_rx_steer_resp {
676 struct gdma_resp_hdr hdr;
677}; /* HW DATA */
678
679/* Register HW vPort */
680struct mana_register_hw_vport_req {
681 struct gdma_req_hdr hdr;
682 u16 attached_gfid;
683 u8 is_pf_default_vport;
684 u8 reserved1;
685 u8 allow_all_ether_types;
686 u8 reserved2;
687 u8 reserved3;
688 u8 reserved4;
689}; /* HW DATA */
690
691struct mana_register_hw_vport_resp {
692 struct gdma_resp_hdr hdr;
693 mana_handle_t hw_vport_handle;
694}; /* HW DATA */
695
696/* Deregister HW vPort */
697struct mana_deregister_hw_vport_req {
698 struct gdma_req_hdr hdr;
699 mana_handle_t hw_vport_handle;
700}; /* HW DATA */
701
702struct mana_deregister_hw_vport_resp {
703 struct gdma_resp_hdr hdr;
704}; /* HW DATA */
705
706/* Register filter */
707struct mana_register_filter_req {
708 struct gdma_req_hdr hdr;
709 mana_handle_t vport;
710 u8 mac_addr[6];
711 u8 reserved1;
712 u8 reserved2;
713 u8 reserved3;
714 u8 reserved4;
715 u16 reserved5;
716 u32 reserved6;
717 u32 reserved7;
718 u32 reserved8;
719}; /* HW DATA */
720
721struct mana_register_filter_resp {
722 struct gdma_resp_hdr hdr;
723 mana_handle_t filter_handle;
724}; /* HW DATA */
725
726/* Deregister filter */
727struct mana_deregister_filter_req {
728 struct gdma_req_hdr hdr;
729 mana_handle_t filter_handle;
730}; /* HW DATA */
731
732struct mana_deregister_filter_resp {
733 struct gdma_resp_hdr hdr;
734}; /* HW DATA */
735
736/* Requested GF stats Flags */
737/* Rx discards/Errors */
738#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
739#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
740/* Rx bytes/pkts */
741#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
742#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
743#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
744#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
745#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
746#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
747#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
748/* Tx errors */
749#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
750#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
751#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
752 0x0000000000000800
753#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
754#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
755 0x0000000000002000
756#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
757#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
758#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
759#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
760#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
761/* Tx bytes/pkts */
762#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
763#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
764#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
765#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
766#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
767#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
768#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
769/* Tx error */
770#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
771
772#define MANA_MAX_NUM_QUEUES 64
773
774#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
775
776struct mana_tx_package {
777 struct gdma_wqe_request wqe_req;
778 struct gdma_sge sgl_array[5];
779 struct gdma_sge *sgl_ptr;
780
781 struct mana_tx_oob tx_oob;
782
783 struct gdma_posted_wqe_info wqe_info;
784};
785
786int mana_create_wq_obj(struct mana_port_context *apc,
787 mana_handle_t vport,
788 u32 wq_type, struct mana_obj_spec *wq_spec,
789 struct mana_obj_spec *cq_spec,
790 mana_handle_t *wq_obj);
791
792void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
793 mana_handle_t wq_obj);
794
795int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
796 u32 doorbell_pg_id);
797void mana_uncfg_vport(struct mana_port_context *apc);
798#endif /* _MANA_H */