Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
6
7#ifndef RXE_VERBS_H
8#define RXE_VERBS_H
9
10#include <linux/interrupt.h>
11#include <linux/workqueue.h>
12#include "rxe_pool.h"
13#include "rxe_task.h"
14#include "rxe_hw_counters.h"
15
16static inline int pkey_match(u16 key1, u16 key2)
17{
18 return (((key1 & 0x7fff) != 0) &&
19 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
20 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
21}
22
23/* Return >0 if psn_a > psn_b
24 * 0 if psn_a == psn_b
25 * <0 if psn_a < psn_b
26 */
27static inline int psn_compare(u32 psn_a, u32 psn_b)
28{
29 s32 diff;
30
31 diff = (psn_a - psn_b) << 8;
32 return diff;
33}
34
35struct rxe_ucontext {
36 struct ib_ucontext ibuc;
37 struct rxe_pool_elem elem;
38};
39
40struct rxe_pd {
41 struct ib_pd ibpd;
42 struct rxe_pool_elem elem;
43};
44
45struct rxe_ah {
46 struct ib_ah ibah;
47 struct rxe_pool_elem elem;
48 struct rxe_av av;
49 bool is_user;
50 int ah_num;
51};
52
53struct rxe_cqe {
54 union {
55 struct ib_wc ibwc;
56 struct ib_uverbs_wc uibwc;
57 };
58};
59
60struct rxe_cq {
61 struct ib_cq ibcq;
62 struct rxe_pool_elem elem;
63 struct rxe_queue *queue;
64 spinlock_t cq_lock;
65 u8 notify;
66 bool is_dying;
67 bool is_user;
68 struct tasklet_struct comp_task;
69 atomic_t num_wq;
70};
71
72enum wqe_state {
73 wqe_state_posted,
74 wqe_state_processing,
75 wqe_state_pending,
76 wqe_state_done,
77 wqe_state_error,
78};
79
80struct rxe_sq {
81 int max_wr;
82 int max_sge;
83 int max_inline;
84 spinlock_t sq_lock; /* guard queue */
85 struct rxe_queue *queue;
86};
87
88struct rxe_rq {
89 int max_wr;
90 int max_sge;
91 spinlock_t producer_lock; /* guard queue producer */
92 spinlock_t consumer_lock; /* guard queue consumer */
93 struct rxe_queue *queue;
94};
95
96struct rxe_srq {
97 struct ib_srq ibsrq;
98 struct rxe_pool_elem elem;
99 struct rxe_pd *pd;
100 struct rxe_rq rq;
101 u32 srq_num;
102
103 int limit;
104 int error;
105};
106
107enum rxe_qp_state {
108 QP_STATE_RESET,
109 QP_STATE_INIT,
110 QP_STATE_READY,
111 QP_STATE_DRAIN, /* req only */
112 QP_STATE_DRAINED, /* req only */
113 QP_STATE_ERROR
114};
115
116struct rxe_req_info {
117 enum rxe_qp_state state;
118 int wqe_index;
119 u32 psn;
120 int opcode;
121 atomic_t rd_atomic;
122 int wait_fence;
123 int need_rd_atomic;
124 int wait_psn;
125 int need_retry;
126 int wait_for_rnr_timer;
127 int noack_pkts;
128 struct rxe_task task;
129};
130
131struct rxe_comp_info {
132 enum rxe_qp_state state;
133 u32 psn;
134 int opcode;
135 int timeout;
136 int timeout_retry;
137 int started_retry;
138 u32 retry_cnt;
139 u32 rnr_retry;
140 struct rxe_task task;
141};
142
143enum rdatm_res_state {
144 rdatm_res_state_next,
145 rdatm_res_state_new,
146 rdatm_res_state_replay,
147};
148
149struct resp_res {
150 int type;
151 int replay;
152 u32 first_psn;
153 u32 last_psn;
154 u32 cur_psn;
155 enum rdatm_res_state state;
156
157 union {
158 struct {
159 u64 orig_val;
160 } atomic;
161 struct {
162 u64 va_org;
163 u32 rkey;
164 u32 length;
165 u64 va;
166 u32 resid;
167 } read;
168 struct {
169 u32 length;
170 u64 va;
171 u8 type;
172 u8 level;
173 } flush;
174 };
175};
176
177struct rxe_resp_info {
178 enum rxe_qp_state state;
179 u32 msn;
180 u32 psn;
181 u32 ack_psn;
182 int opcode;
183 int drop_msg;
184 int goto_error;
185 int sent_psn_nak;
186 enum ib_wc_status status;
187 u8 aeth_syndrome;
188
189 /* Receive only */
190 struct rxe_recv_wqe *wqe;
191
192 /* RDMA read / atomic only */
193 u64 va;
194 u64 offset;
195 struct rxe_mr *mr;
196 u32 resid;
197 u32 rkey;
198 u32 length;
199
200 /* SRQ only */
201 struct {
202 struct rxe_recv_wqe wqe;
203 struct ib_sge sge[RXE_MAX_SGE];
204 } srq_wqe;
205
206 /* Responder resources. It's a circular list where the oldest
207 * resource is dropped first.
208 */
209 struct resp_res *resources;
210 unsigned int res_head;
211 unsigned int res_tail;
212 struct resp_res *res;
213 struct rxe_task task;
214};
215
216struct rxe_qp {
217 struct ib_qp ibqp;
218 struct rxe_pool_elem elem;
219 struct ib_qp_attr attr;
220 unsigned int valid;
221 unsigned int mtu;
222 bool is_user;
223
224 struct rxe_pd *pd;
225 struct rxe_srq *srq;
226 struct rxe_cq *scq;
227 struct rxe_cq *rcq;
228
229 enum ib_sig_type sq_sig_type;
230
231 struct rxe_sq sq;
232 struct rxe_rq rq;
233
234 struct socket *sk;
235 u32 dst_cookie;
236 u16 src_port;
237
238 struct rxe_av pri_av;
239 struct rxe_av alt_av;
240
241 atomic_t mcg_num;
242
243 struct sk_buff_head req_pkts;
244 struct sk_buff_head resp_pkts;
245
246 struct rxe_req_info req;
247 struct rxe_comp_info comp;
248 struct rxe_resp_info resp;
249
250 atomic_t ssn;
251 atomic_t skb_out;
252 int need_req_skb;
253
254 /* Timer for retranmitting packet when ACKs have been lost. RC
255 * only. The requester sets it when it is not already
256 * started. The responder resets it whenever an ack is
257 * received.
258 */
259 struct timer_list retrans_timer;
260 u64 qp_timeout_jiffies;
261
262 /* Timer for handling RNR NAKS. */
263 struct timer_list rnr_nak_timer;
264
265 spinlock_t state_lock; /* guard requester and completer */
266
267 struct execute_work cleanup_work;
268};
269
270enum rxe_mr_state {
271 RXE_MR_STATE_INVALID,
272 RXE_MR_STATE_FREE,
273 RXE_MR_STATE_VALID,
274};
275
276enum rxe_mr_copy_dir {
277 RXE_TO_MR_OBJ,
278 RXE_FROM_MR_OBJ,
279};
280
281enum rxe_mr_lookup_type {
282 RXE_LOOKUP_LOCAL,
283 RXE_LOOKUP_REMOTE,
284};
285
286#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
287
288struct rxe_phys_buf {
289 u64 addr;
290 u64 size;
291};
292
293struct rxe_map {
294 struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
295};
296
297static inline int rkey_is_mw(u32 rkey)
298{
299 u32 index = rkey >> 8;
300
301 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
302}
303
304struct rxe_mr {
305 struct rxe_pool_elem elem;
306 struct ib_mr ibmr;
307
308 struct ib_umem *umem;
309
310 u32 lkey;
311 u32 rkey;
312 enum rxe_mr_state state;
313 u32 offset;
314 int access;
315
316 int page_shift;
317 int page_mask;
318 int map_shift;
319 int map_mask;
320
321 u32 num_buf;
322 u32 nbuf;
323
324 u32 max_buf;
325 u32 num_map;
326
327 atomic_t num_mw;
328
329 struct rxe_map **map;
330};
331
332enum rxe_mw_state {
333 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
334 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
335 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
336};
337
338struct rxe_mw {
339 struct ib_mw ibmw;
340 struct rxe_pool_elem elem;
341 spinlock_t lock;
342 enum rxe_mw_state state;
343 struct rxe_qp *qp; /* Type 2 only */
344 struct rxe_mr *mr;
345 u32 rkey;
346 int access;
347 u64 addr;
348 u64 length;
349};
350
351struct rxe_mcg {
352 struct rb_node node;
353 struct kref ref_cnt;
354 struct rxe_dev *rxe;
355 struct list_head qp_list;
356 union ib_gid mgid;
357 atomic_t qp_num;
358 u32 qkey;
359 u16 pkey;
360};
361
362struct rxe_mca {
363 struct list_head qp_list;
364 struct rxe_qp *qp;
365};
366
367struct rxe_port {
368 struct ib_port_attr attr;
369 __be64 port_guid;
370 __be64 subnet_prefix;
371 spinlock_t port_lock; /* guard port */
372 unsigned int mtu_cap;
373 /* special QPs */
374 u32 qp_gsi_index;
375};
376
377struct rxe_dev {
378 struct ib_device ib_dev;
379 struct ib_device_attr attr;
380 int max_ucontext;
381 int max_inline_data;
382 struct mutex usdev_lock;
383
384 struct net_device *ndev;
385
386 struct rxe_pool uc_pool;
387 struct rxe_pool pd_pool;
388 struct rxe_pool ah_pool;
389 struct rxe_pool srq_pool;
390 struct rxe_pool qp_pool;
391 struct rxe_pool cq_pool;
392 struct rxe_pool mr_pool;
393 struct rxe_pool mw_pool;
394
395 /* multicast support */
396 spinlock_t mcg_lock;
397 struct rb_root mcg_tree;
398 atomic_t mcg_num;
399 atomic_t mcg_attach;
400
401 spinlock_t pending_lock; /* guard pending_mmaps */
402 struct list_head pending_mmaps;
403
404 spinlock_t mmap_offset_lock; /* guard mmap_offset */
405 u64 mmap_offset;
406
407 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
408
409 struct rxe_port port;
410 struct crypto_shash *tfm;
411};
412
413static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
414{
415 atomic64_inc(&rxe->stats_counters[index]);
416}
417
418static inline struct rxe_dev *to_rdev(struct ib_device *dev)
419{
420 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
421}
422
423static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
424{
425 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
426}
427
428static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
429{
430 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
431}
432
433static inline struct rxe_ah *to_rah(struct ib_ah *ah)
434{
435 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
436}
437
438static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
439{
440 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
441}
442
443static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
444{
445 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
446}
447
448static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
449{
450 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
451}
452
453static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
454{
455 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
456}
457
458static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
459{
460 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
461}
462
463static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
464{
465 return to_rpd(ah->ibah.pd);
466}
467
468static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
469{
470 return to_rpd(mr->ibmr.pd);
471}
472
473static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
474{
475 return to_rpd(mw->ibmw.pd);
476}
477
478int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
479
480#endif /* RXE_VERBS_H */