Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/* Copyright (c) 2015 - 2020 Intel Corporation */
3#ifndef IRDMA_USER_H
4#define IRDMA_USER_H
5
6#define irdma_handle void *
7#define irdma_adapter_handle irdma_handle
8#define irdma_qp_handle irdma_handle
9#define irdma_cq_handle irdma_handle
10#define irdma_pd_id irdma_handle
11#define irdma_stag_handle irdma_handle
12#define irdma_stag_index u32
13#define irdma_stag u32
14#define irdma_stag_key u8
15#define irdma_tagged_offset u64
16#define irdma_access_privileges u32
17#define irdma_physical_fragment u64
18#define irdma_address_list u64 *
19
20#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
21
22#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
23#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
24#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
25#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
26#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
27#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
28#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
29#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
30#define IRDMA_ACCESS_FLAGS_ALL 0x3f
31
32#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
33#define IRDMA_OP_TYPE_RDMA_READ 0x01
34#define IRDMA_OP_TYPE_SEND 0x03
35#define IRDMA_OP_TYPE_SEND_INV 0x04
36#define IRDMA_OP_TYPE_SEND_SOL 0x05
37#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
38#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
39#define IRDMA_OP_TYPE_BIND_MW 0x08
40#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
41#define IRDMA_OP_TYPE_INV_STAG 0x0a
42#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
43#define IRDMA_OP_TYPE_NOP 0x0c
44#define IRDMA_OP_TYPE_REC 0x3e
45#define IRDMA_OP_TYPE_REC_IMM 0x3f
46
47#define IRDMA_FLUSH_MAJOR_ERR 1
48
49enum irdma_device_caps_const {
50 IRDMA_WQE_SIZE = 4,
51 IRDMA_CQP_WQE_SIZE = 8,
52 IRDMA_CQE_SIZE = 4,
53 IRDMA_EXTENDED_CQE_SIZE = 8,
54 IRDMA_AEQE_SIZE = 2,
55 IRDMA_CEQE_SIZE = 1,
56 IRDMA_CQP_CTX_SIZE = 8,
57 IRDMA_SHADOW_AREA_SIZE = 8,
58 IRDMA_QUERY_FPM_BUF_SIZE = 176,
59 IRDMA_COMMIT_FPM_BUF_SIZE = 176,
60 IRDMA_GATHER_STATS_BUF_SIZE = 1024,
61 IRDMA_MIN_IW_QP_ID = 0,
62 IRDMA_MAX_IW_QP_ID = 262143,
63 IRDMA_MIN_CEQID = 0,
64 IRDMA_MAX_CEQID = 1023,
65 IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
66 IRDMA_MIN_CQID = 0,
67 IRDMA_MAX_CQID = 524287,
68 IRDMA_MIN_AEQ_ENTRIES = 1,
69 IRDMA_MAX_AEQ_ENTRIES = 524287,
70 IRDMA_MIN_CEQ_ENTRIES = 1,
71 IRDMA_MAX_CEQ_ENTRIES = 262143,
72 IRDMA_MIN_CQ_SIZE = 1,
73 IRDMA_MAX_CQ_SIZE = 1048575,
74 IRDMA_DB_ID_ZERO = 0,
75 IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
76 IRDMA_MAX_SGE_RD = 13,
77 IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
78 IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
79 IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
80 IRDMA_MAX_PE_ENA_VF_COUNT = 32,
81 IRDMA_MAX_VF_FPM_ID = 47,
82 IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
83 IRDMA_MAX_INLINE_DATA_SIZE = 101,
84 IRDMA_MAX_WQ_ENTRIES = 32768,
85 IRDMA_Q2_BUF_SIZE = 256,
86 IRDMA_QP_CTX_SIZE = 256,
87 IRDMA_MAX_PDS = 262144,
88 IRDMA_MIN_WQ_SIZE_GEN2 = 8,
89};
90
91enum irdma_addressing_type {
92 IRDMA_ADDR_TYPE_ZERO_BASED = 0,
93 IRDMA_ADDR_TYPE_VA_BASED = 1,
94};
95
96enum irdma_flush_opcode {
97 FLUSH_INVALID = 0,
98 FLUSH_GENERAL_ERR,
99 FLUSH_PROT_ERR,
100 FLUSH_REM_ACCESS_ERR,
101 FLUSH_LOC_QP_OP_ERR,
102 FLUSH_REM_OP_ERR,
103 FLUSH_LOC_LEN_ERR,
104 FLUSH_FATAL_ERR,
105 FLUSH_RETRY_EXC_ERR,
106 FLUSH_MW_BIND_ERR,
107 FLUSH_REM_INV_REQ_ERR,
108};
109
110enum irdma_cmpl_status {
111 IRDMA_COMPL_STATUS_SUCCESS = 0,
112 IRDMA_COMPL_STATUS_FLUSHED,
113 IRDMA_COMPL_STATUS_INVALID_WQE,
114 IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
115 IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
116 IRDMA_COMPL_STATUS_INVALID_STAG,
117 IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
118 IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
119 IRDMA_COMPL_STATUS_INVALID_PD_ID,
120 IRDMA_COMPL_STATUS_WRAP_ERROR,
121 IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
122 IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
123 IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
124 IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
125 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
126 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
127 IRDMA_COMPL_STATUS_INVALID_FBO,
128 IRDMA_COMPL_STATUS_INVALID_LEN,
129 IRDMA_COMPL_STATUS_INVALID_ACCESS,
130 IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
131 IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
132 IRDMA_COMPL_STATUS_INVALID_REGION,
133 IRDMA_COMPL_STATUS_INVALID_WINDOW,
134 IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
135 IRDMA_COMPL_STATUS_UNKNOWN,
136};
137
138enum irdma_cmpl_notify {
139 IRDMA_CQ_COMPL_EVENT = 0,
140 IRDMA_CQ_COMPL_SOLICITED = 1,
141};
142
143enum irdma_qp_caps {
144 IRDMA_WRITE_WITH_IMM = 1,
145 IRDMA_SEND_WITH_IMM = 2,
146 IRDMA_ROCE = 4,
147 IRDMA_PUSH_MODE = 8,
148};
149
150struct irdma_qp_uk;
151struct irdma_cq_uk;
152struct irdma_qp_uk_init_info;
153struct irdma_cq_uk_init_info;
154
155struct irdma_ring {
156 u32 head;
157 u32 tail;
158 u32 size;
159};
160
161struct irdma_cqe {
162 __le64 buf[IRDMA_CQE_SIZE];
163};
164
165struct irdma_extended_cqe {
166 __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
167};
168
169struct irdma_post_send {
170 struct ib_sge *sg_list;
171 u32 num_sges;
172 u32 qkey;
173 u32 dest_qp;
174 u32 ah_id;
175};
176
177struct irdma_post_rq_info {
178 u64 wr_id;
179 struct ib_sge *sg_list;
180 u32 num_sges;
181};
182
183struct irdma_rdma_write {
184 struct ib_sge *lo_sg_list;
185 u32 num_lo_sges;
186 struct ib_sge rem_addr;
187};
188
189struct irdma_rdma_read {
190 struct ib_sge *lo_sg_list;
191 u32 num_lo_sges;
192 struct ib_sge rem_addr;
193};
194
195struct irdma_bind_window {
196 irdma_stag mr_stag;
197 u64 bind_len;
198 void *va;
199 enum irdma_addressing_type addressing_type;
200 bool ena_reads:1;
201 bool ena_writes:1;
202 irdma_stag mw_stag;
203 bool mem_window_type_1:1;
204};
205
206struct irdma_inv_local_stag {
207 irdma_stag target_stag;
208};
209
210struct irdma_post_sq_info {
211 u64 wr_id;
212 u8 op_type;
213 u8 l4len;
214 bool signaled:1;
215 bool read_fence:1;
216 bool local_fence:1;
217 bool inline_data:1;
218 bool imm_data_valid:1;
219 bool report_rtt:1;
220 bool udp_hdr:1;
221 bool defer_flag:1;
222 u32 imm_data;
223 u32 stag_to_inv;
224 union {
225 struct irdma_post_send send;
226 struct irdma_rdma_write rdma_write;
227 struct irdma_rdma_read rdma_read;
228 struct irdma_bind_window bind_window;
229 struct irdma_inv_local_stag inv_local_stag;
230 } op;
231};
232
233struct irdma_cq_poll_info {
234 u64 wr_id;
235 irdma_qp_handle qp_handle;
236 u32 bytes_xfered;
237 u32 tcp_seq_num_rtt;
238 u32 qp_id;
239 u32 ud_src_qpn;
240 u32 imm_data;
241 irdma_stag inv_stag; /* or L_R_Key */
242 enum irdma_cmpl_status comp_status;
243 u16 major_err;
244 u16 minor_err;
245 u16 ud_vlan;
246 u8 ud_smac[6];
247 u8 op_type;
248 u8 q_type;
249 bool stag_invalid_set:1; /* or L_R_Key set */
250 bool error:1;
251 bool solicited_event:1;
252 bool ipv4:1;
253 bool ud_vlan_valid:1;
254 bool ud_smac_valid:1;
255 bool imm_valid:1;
256};
257
258int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
259 struct irdma_post_sq_info *info, bool post_sq);
260int irdma_uk_inline_send(struct irdma_qp_uk *qp,
261 struct irdma_post_sq_info *info, bool post_sq);
262int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
263 bool post_sq);
264int irdma_uk_post_receive(struct irdma_qp_uk *qp,
265 struct irdma_post_rq_info *info);
266void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
267int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
268 bool inv_stag, bool post_sq);
269int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
270 bool post_sq);
271int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
272 bool post_sq);
273int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
274 struct irdma_post_sq_info *info,
275 bool post_sq);
276
277struct irdma_wqe_uk_ops {
278 void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
279 u32 num_sges, u8 polarity);
280 u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
281 void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
282 u8 valid);
283 void (*iw_set_mw_bind_wqe)(__le64 *wqe,
284 struct irdma_bind_window *op_info);
285};
286
287int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
288 struct irdma_cq_poll_info *info);
289void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
290 enum irdma_cmpl_notify cq_notify);
291void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
292void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
293void irdma_uk_cq_init(struct irdma_cq_uk *cq,
294 struct irdma_cq_uk_init_info *info);
295int irdma_uk_qp_init(struct irdma_qp_uk *qp,
296 struct irdma_qp_uk_init_info *info);
297void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
298 u8 *rq_shift);
299int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
300 u32 *sq_depth, u8 *sq_shift);
301int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
302 u32 *rq_depth, u8 *rq_shift);
303struct irdma_sq_uk_wr_trk_info {
304 u64 wrid;
305 u32 wr_len;
306 u16 quanta;
307 u8 reserved[2];
308};
309
310struct irdma_qp_quanta {
311 __le64 elem[IRDMA_WQE_SIZE];
312};
313
314struct irdma_qp_uk {
315 struct irdma_qp_quanta *sq_base;
316 struct irdma_qp_quanta *rq_base;
317 struct irdma_uk_attrs *uk_attrs;
318 u32 __iomem *wqe_alloc_db;
319 struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
320 u64 *rq_wrid_array;
321 __le64 *shadow_area;
322 struct irdma_ring sq_ring;
323 struct irdma_ring rq_ring;
324 struct irdma_ring initial_ring;
325 u32 qp_id;
326 u32 qp_caps;
327 u32 sq_size;
328 u32 rq_size;
329 u32 max_sq_frag_cnt;
330 u32 max_rq_frag_cnt;
331 u32 max_inline_data;
332 struct irdma_wqe_uk_ops wqe_ops;
333 u16 conn_wqes;
334 u8 qp_type;
335 u8 swqe_polarity;
336 u8 swqe_polarity_deferred;
337 u8 rwqe_polarity;
338 u8 rq_wqe_size;
339 u8 rq_wqe_size_multiplier;
340 bool deferred_flag:1;
341 bool first_sq_wq:1;
342 bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
343 bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
344 bool destroy_pending:1; /* Indicates the QP is being destroyed */
345 void *back_qp;
346 u8 dbg_rq_flushed;
347 u8 sq_flush_seen;
348 u8 rq_flush_seen;
349};
350
351struct irdma_cq_uk {
352 struct irdma_cqe *cq_base;
353 u32 __iomem *cqe_alloc_db;
354 u32 __iomem *cq_ack_db;
355 __le64 *shadow_area;
356 u32 cq_id;
357 u32 cq_size;
358 struct irdma_ring cq_ring;
359 u8 polarity;
360 bool avoid_mem_cflct:1;
361};
362
363struct irdma_qp_uk_init_info {
364 struct irdma_qp_quanta *sq;
365 struct irdma_qp_quanta *rq;
366 struct irdma_uk_attrs *uk_attrs;
367 u32 __iomem *wqe_alloc_db;
368 __le64 *shadow_area;
369 struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
370 u64 *rq_wrid_array;
371 u32 qp_id;
372 u32 qp_caps;
373 u32 sq_size;
374 u32 rq_size;
375 u32 max_sq_frag_cnt;
376 u32 max_rq_frag_cnt;
377 u32 max_inline_data;
378 u32 sq_depth;
379 u32 rq_depth;
380 u8 first_sq_wq;
381 u8 type;
382 u8 sq_shift;
383 u8 rq_shift;
384 int abi_ver;
385 bool legacy_mode;
386};
387
388struct irdma_cq_uk_init_info {
389 u32 __iomem *cqe_alloc_db;
390 u32 __iomem *cq_ack_db;
391 struct irdma_cqe *cq_base;
392 __le64 *shadow_area;
393 u32 cq_size;
394 u32 cq_id;
395 bool avoid_mem_cflct;
396};
397
398__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
399 u16 quanta, u32 total_size,
400 struct irdma_post_sq_info *info);
401__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
402void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
403int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
404int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
405int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
406void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
407 u32 inline_data, u8 *shift);
408int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
409 u32 *wqdepth);
410int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
411 u32 *wqdepth);
412void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
413#endif /* IRDMA_USER_H */
1/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2/* Copyright (c) 2015 - 2020 Intel Corporation */
3#ifndef IRDMA_USER_H
4#define IRDMA_USER_H
5
6#define irdma_handle void *
7#define irdma_adapter_handle irdma_handle
8#define irdma_qp_handle irdma_handle
9#define irdma_cq_handle irdma_handle
10#define irdma_pd_id irdma_handle
11#define irdma_stag_handle irdma_handle
12#define irdma_stag_index u32
13#define irdma_stag u32
14#define irdma_stag_key u8
15#define irdma_tagged_offset u64
16#define irdma_access_privileges u32
17#define irdma_physical_fragment u64
18#define irdma_address_list u64 *
19#define irdma_sgl struct irdma_sge *
20
21#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
22
23#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
24#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
25#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
26#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
27#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
28#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
29#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
30#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
31#define IRDMA_ACCESS_FLAGS_ALL 0x3f
32
33#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
34#define IRDMA_OP_TYPE_RDMA_READ 0x01
35#define IRDMA_OP_TYPE_SEND 0x03
36#define IRDMA_OP_TYPE_SEND_INV 0x04
37#define IRDMA_OP_TYPE_SEND_SOL 0x05
38#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
39#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
40#define IRDMA_OP_TYPE_BIND_MW 0x08
41#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
42#define IRDMA_OP_TYPE_INV_STAG 0x0a
43#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
44#define IRDMA_OP_TYPE_NOP 0x0c
45#define IRDMA_OP_TYPE_REC 0x3e
46#define IRDMA_OP_TYPE_REC_IMM 0x3f
47
48#define IRDMA_FLUSH_MAJOR_ERR 1
49
50enum irdma_device_caps_const {
51 IRDMA_WQE_SIZE = 4,
52 IRDMA_CQP_WQE_SIZE = 8,
53 IRDMA_CQE_SIZE = 4,
54 IRDMA_EXTENDED_CQE_SIZE = 8,
55 IRDMA_AEQE_SIZE = 2,
56 IRDMA_CEQE_SIZE = 1,
57 IRDMA_CQP_CTX_SIZE = 8,
58 IRDMA_SHADOW_AREA_SIZE = 8,
59 IRDMA_QUERY_FPM_BUF_SIZE = 176,
60 IRDMA_COMMIT_FPM_BUF_SIZE = 176,
61 IRDMA_GATHER_STATS_BUF_SIZE = 1024,
62 IRDMA_MIN_IW_QP_ID = 0,
63 IRDMA_MAX_IW_QP_ID = 262143,
64 IRDMA_MIN_CEQID = 0,
65 IRDMA_MAX_CEQID = 1023,
66 IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
67 IRDMA_MIN_CQID = 0,
68 IRDMA_MAX_CQID = 524287,
69 IRDMA_MIN_AEQ_ENTRIES = 1,
70 IRDMA_MAX_AEQ_ENTRIES = 524287,
71 IRDMA_MIN_CEQ_ENTRIES = 1,
72 IRDMA_MAX_CEQ_ENTRIES = 262143,
73 IRDMA_MIN_CQ_SIZE = 1,
74 IRDMA_MAX_CQ_SIZE = 1048575,
75 IRDMA_DB_ID_ZERO = 0,
76 IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
77 IRDMA_MAX_SGE_RD = 13,
78 IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
79 IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
80 IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
81 IRDMA_MAX_PE_ENA_VF_COUNT = 32,
82 IRDMA_MAX_VF_FPM_ID = 47,
83 IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
84 IRDMA_MAX_INLINE_DATA_SIZE = 101,
85 IRDMA_MAX_WQ_ENTRIES = 32768,
86 IRDMA_Q2_BUF_SIZE = 256,
87 IRDMA_QP_CTX_SIZE = 256,
88 IRDMA_MAX_PDS = 262144,
89};
90
91enum irdma_addressing_type {
92 IRDMA_ADDR_TYPE_ZERO_BASED = 0,
93 IRDMA_ADDR_TYPE_VA_BASED = 1,
94};
95
96enum irdma_flush_opcode {
97 FLUSH_INVALID = 0,
98 FLUSH_GENERAL_ERR,
99 FLUSH_PROT_ERR,
100 FLUSH_REM_ACCESS_ERR,
101 FLUSH_LOC_QP_OP_ERR,
102 FLUSH_REM_OP_ERR,
103 FLUSH_LOC_LEN_ERR,
104 FLUSH_FATAL_ERR,
105 FLUSH_RETRY_EXC_ERR,
106 FLUSH_MW_BIND_ERR,
107};
108
109enum irdma_cmpl_status {
110 IRDMA_COMPL_STATUS_SUCCESS = 0,
111 IRDMA_COMPL_STATUS_FLUSHED,
112 IRDMA_COMPL_STATUS_INVALID_WQE,
113 IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
114 IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
115 IRDMA_COMPL_STATUS_INVALID_STAG,
116 IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
117 IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
118 IRDMA_COMPL_STATUS_INVALID_PD_ID,
119 IRDMA_COMPL_STATUS_WRAP_ERROR,
120 IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
121 IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
122 IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
123 IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
124 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
125 IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
126 IRDMA_COMPL_STATUS_INVALID_FBO,
127 IRDMA_COMPL_STATUS_INVALID_LEN,
128 IRDMA_COMPL_STATUS_INVALID_ACCESS,
129 IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
130 IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
131 IRDMA_COMPL_STATUS_INVALID_REGION,
132 IRDMA_COMPL_STATUS_INVALID_WINDOW,
133 IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
134 IRDMA_COMPL_STATUS_UNKNOWN,
135};
136
137enum irdma_cmpl_notify {
138 IRDMA_CQ_COMPL_EVENT = 0,
139 IRDMA_CQ_COMPL_SOLICITED = 1,
140};
141
142enum irdma_qp_caps {
143 IRDMA_WRITE_WITH_IMM = 1,
144 IRDMA_SEND_WITH_IMM = 2,
145 IRDMA_ROCE = 4,
146 IRDMA_PUSH_MODE = 8,
147};
148
149struct irdma_qp_uk;
150struct irdma_cq_uk;
151struct irdma_qp_uk_init_info;
152struct irdma_cq_uk_init_info;
153
154struct irdma_sge {
155 irdma_tagged_offset tag_off;
156 u32 len;
157 irdma_stag stag;
158};
159
160struct irdma_ring {
161 u32 head;
162 u32 tail;
163 u32 size;
164};
165
166struct irdma_cqe {
167 __le64 buf[IRDMA_CQE_SIZE];
168};
169
170struct irdma_extended_cqe {
171 __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
172};
173
174struct irdma_post_send {
175 irdma_sgl sg_list;
176 u32 num_sges;
177 u32 qkey;
178 u32 dest_qp;
179 u32 ah_id;
180};
181
182struct irdma_post_inline_send {
183 void *data;
184 u32 len;
185 u32 qkey;
186 u32 dest_qp;
187 u32 ah_id;
188};
189
190struct irdma_post_rq_info {
191 u64 wr_id;
192 irdma_sgl sg_list;
193 u32 num_sges;
194};
195
196struct irdma_rdma_write {
197 irdma_sgl lo_sg_list;
198 u32 num_lo_sges;
199 struct irdma_sge rem_addr;
200};
201
202struct irdma_inline_rdma_write {
203 void *data;
204 u32 len;
205 struct irdma_sge rem_addr;
206};
207
208struct irdma_rdma_read {
209 irdma_sgl lo_sg_list;
210 u32 num_lo_sges;
211 struct irdma_sge rem_addr;
212};
213
214struct irdma_bind_window {
215 irdma_stag mr_stag;
216 u64 bind_len;
217 void *va;
218 enum irdma_addressing_type addressing_type;
219 bool ena_reads:1;
220 bool ena_writes:1;
221 irdma_stag mw_stag;
222 bool mem_window_type_1:1;
223};
224
225struct irdma_inv_local_stag {
226 irdma_stag target_stag;
227};
228
229struct irdma_post_sq_info {
230 u64 wr_id;
231 u8 op_type;
232 u8 l4len;
233 bool signaled:1;
234 bool read_fence:1;
235 bool local_fence:1;
236 bool inline_data:1;
237 bool imm_data_valid:1;
238 bool push_wqe:1;
239 bool report_rtt:1;
240 bool udp_hdr:1;
241 bool defer_flag:1;
242 u32 imm_data;
243 u32 stag_to_inv;
244 union {
245 struct irdma_post_send send;
246 struct irdma_rdma_write rdma_write;
247 struct irdma_rdma_read rdma_read;
248 struct irdma_bind_window bind_window;
249 struct irdma_inv_local_stag inv_local_stag;
250 struct irdma_inline_rdma_write inline_rdma_write;
251 struct irdma_post_inline_send inline_send;
252 } op;
253};
254
255struct irdma_cq_poll_info {
256 u64 wr_id;
257 irdma_qp_handle qp_handle;
258 u32 bytes_xfered;
259 u32 tcp_seq_num_rtt;
260 u32 qp_id;
261 u32 ud_src_qpn;
262 u32 imm_data;
263 irdma_stag inv_stag; /* or L_R_Key */
264 enum irdma_cmpl_status comp_status;
265 u16 major_err;
266 u16 minor_err;
267 u16 ud_vlan;
268 u8 ud_smac[6];
269 u8 op_type;
270 bool stag_invalid_set:1; /* or L_R_Key set */
271 bool push_dropped:1;
272 bool error:1;
273 bool solicited_event:1;
274 bool ipv4:1;
275 bool ud_vlan_valid:1;
276 bool ud_smac_valid:1;
277 bool imm_valid:1;
278};
279
280enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
281 struct irdma_post_sq_info *info,
282 bool post_sq);
283enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
284 struct irdma_post_sq_info *info,
285 bool post_sq);
286enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
287 struct irdma_post_sq_info *info,
288 bool post_sq);
289enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
290 bool signaled, bool post_sq);
291enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
292 struct irdma_post_rq_info *info);
293void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
294enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
295 struct irdma_post_sq_info *info,
296 bool inv_stag, bool post_sq);
297enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
298 struct irdma_post_sq_info *info,
299 bool post_sq);
300enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
301 struct irdma_post_sq_info *info, bool post_sq);
302enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
303 struct irdma_post_sq_info *info,
304 bool post_sq);
305
306struct irdma_wqe_uk_ops {
307 void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
308 u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
309 void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
310 u8 valid);
311 void (*iw_set_mw_bind_wqe)(__le64 *wqe,
312 struct irdma_bind_window *op_info);
313};
314
315enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
316 struct irdma_cq_poll_info *info);
317void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
318 enum irdma_cmpl_notify cq_notify);
319void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
320void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
321enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
322 struct irdma_cq_uk_init_info *info);
323enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
324 struct irdma_qp_uk_init_info *info);
325struct irdma_sq_uk_wr_trk_info {
326 u64 wrid;
327 u32 wr_len;
328 u16 quanta;
329 u8 reserved[2];
330};
331
332struct irdma_qp_quanta {
333 __le64 elem[IRDMA_WQE_SIZE];
334};
335
336struct irdma_qp_uk {
337 struct irdma_qp_quanta *sq_base;
338 struct irdma_qp_quanta *rq_base;
339 struct irdma_uk_attrs *uk_attrs;
340 u32 __iomem *wqe_alloc_db;
341 struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
342 u64 *rq_wrid_array;
343 __le64 *shadow_area;
344 __le32 *push_db;
345 __le64 *push_wqe;
346 struct irdma_ring sq_ring;
347 struct irdma_ring rq_ring;
348 struct irdma_ring initial_ring;
349 u32 qp_id;
350 u32 qp_caps;
351 u32 sq_size;
352 u32 rq_size;
353 u32 max_sq_frag_cnt;
354 u32 max_rq_frag_cnt;
355 u32 max_inline_data;
356 struct irdma_wqe_uk_ops wqe_ops;
357 u16 conn_wqes;
358 u8 qp_type;
359 u8 swqe_polarity;
360 u8 swqe_polarity_deferred;
361 u8 rwqe_polarity;
362 u8 rq_wqe_size;
363 u8 rq_wqe_size_multiplier;
364 bool deferred_flag:1;
365 bool push_mode:1; /* whether the last post wqe was pushed */
366 bool push_dropped:1;
367 bool first_sq_wq:1;
368 bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
369 bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
370 bool destroy_pending:1; /* Indicates the QP is being destroyed */
371 void *back_qp;
372 spinlock_t *lock;
373 u8 dbg_rq_flushed;
374 u8 sq_flush_seen;
375 u8 rq_flush_seen;
376};
377
378struct irdma_cq_uk {
379 struct irdma_cqe *cq_base;
380 u32 __iomem *cqe_alloc_db;
381 u32 __iomem *cq_ack_db;
382 __le64 *shadow_area;
383 u32 cq_id;
384 u32 cq_size;
385 struct irdma_ring cq_ring;
386 u8 polarity;
387 bool avoid_mem_cflct:1;
388};
389
390struct irdma_qp_uk_init_info {
391 struct irdma_qp_quanta *sq;
392 struct irdma_qp_quanta *rq;
393 struct irdma_uk_attrs *uk_attrs;
394 u32 __iomem *wqe_alloc_db;
395 __le64 *shadow_area;
396 struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
397 u64 *rq_wrid_array;
398 u32 qp_id;
399 u32 qp_caps;
400 u32 sq_size;
401 u32 rq_size;
402 u32 max_sq_frag_cnt;
403 u32 max_rq_frag_cnt;
404 u32 max_inline_data;
405 u8 first_sq_wq;
406 u8 type;
407 int abi_ver;
408 bool legacy_mode;
409};
410
411struct irdma_cq_uk_init_info {
412 u32 __iomem *cqe_alloc_db;
413 u32 __iomem *cq_ack_db;
414 struct irdma_cqe *cq_base;
415 __le64 *shadow_area;
416 u32 cq_size;
417 u32 cq_id;
418 bool avoid_mem_cflct;
419};
420
421__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
422 u16 quanta, u32 total_size,
423 struct irdma_post_sq_info *info);
424__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
425void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
426enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id,
427 bool signaled, bool post_sq);
428enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
429enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
430void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
431 u32 inline_data, u8 *shift);
432enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs,
433 u32 sq_size, u8 shift, u32 *wqdepth);
434enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs,
435 u32 rq_size, u8 shift, u32 *wqdepth);
436void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
437 u32 wqe_idx, bool post_sq);
438void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
439#endif /* IRDMA_USER_H */