Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * RDMA Transport Layer
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10#undef pr_fmt
11#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13#include <linux/module.h>
14
15#include "rtrs-srv.h"
16#include "rtrs-log.h"
17#include <rdma/ib_cm.h>
18#include <rdma/ib_verbs.h>
19#include "rtrs-srv-trace.h"
20
21MODULE_DESCRIPTION("RDMA Transport Server");
22MODULE_LICENSE("GPL");
23
24/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
25#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
26#define DEFAULT_SESS_QUEUE_DEPTH 512
27#define MAX_HDR_SIZE PAGE_SIZE
28
29static struct rtrs_rdma_dev_pd dev_pd;
30const struct class rtrs_dev_class = {
31 .name = "rtrs-server",
32};
33static struct rtrs_srv_ib_ctx ib_ctx;
34
35static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
36static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
37
38static bool always_invalidate = true;
39module_param(always_invalidate, bool, 0444);
40MODULE_PARM_DESC(always_invalidate,
41 "Invalidate memory registration for contiguous memory regions before accessing.");
42
43module_param_named(max_chunk_size, max_chunk_size, int, 0444);
44MODULE_PARM_DESC(max_chunk_size,
45 "Max size for each IO request, when change the unit is in byte (default: "
46 __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
47
48module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
49MODULE_PARM_DESC(sess_queue_depth,
50 "Number of buffers for pending I/O requests to allocate per session. Maximum: "
51 __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
52 __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
53
54static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
55
56static struct workqueue_struct *rtrs_wq;
57
58static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
59{
60 return container_of(c, struct rtrs_srv_con, c);
61}
62
63static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
64 enum rtrs_srv_state new_state)
65{
66 enum rtrs_srv_state old_state;
67 bool changed = false;
68 unsigned long flags;
69
70 spin_lock_irqsave(&srv_path->state_lock, flags);
71 old_state = srv_path->state;
72 switch (new_state) {
73 case RTRS_SRV_CONNECTED:
74 if (old_state == RTRS_SRV_CONNECTING)
75 changed = true;
76 break;
77 case RTRS_SRV_CLOSING:
78 if (old_state == RTRS_SRV_CONNECTING ||
79 old_state == RTRS_SRV_CONNECTED)
80 changed = true;
81 break;
82 case RTRS_SRV_CLOSED:
83 if (old_state == RTRS_SRV_CLOSING)
84 changed = true;
85 break;
86 default:
87 break;
88 }
89 if (changed)
90 srv_path->state = new_state;
91 spin_unlock_irqrestore(&srv_path->state_lock, flags);
92
93 return changed;
94}
95
96static void free_id(struct rtrs_srv_op *id)
97{
98 if (!id)
99 return;
100 kfree(id);
101}
102
103static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
104{
105 struct rtrs_srv_sess *srv = srv_path->srv;
106 int i;
107
108 if (srv_path->ops_ids) {
109 for (i = 0; i < srv->queue_depth; i++)
110 free_id(srv_path->ops_ids[i]);
111 kfree(srv_path->ops_ids);
112 srv_path->ops_ids = NULL;
113 }
114}
115
116static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
117
118static struct ib_cqe io_comp_cqe = {
119 .done = rtrs_srv_rdma_done
120};
121
122static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
123{
124 struct rtrs_srv_path *srv_path = container_of(ref,
125 struct rtrs_srv_path,
126 ids_inflight_ref);
127
128 percpu_ref_exit(&srv_path->ids_inflight_ref);
129 complete(&srv_path->complete_done);
130}
131
132static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
133{
134 struct rtrs_srv_sess *srv = srv_path->srv;
135 struct rtrs_srv_op *id;
136 int i, ret;
137
138 srv_path->ops_ids = kcalloc(srv->queue_depth,
139 sizeof(*srv_path->ops_ids),
140 GFP_KERNEL);
141 if (!srv_path->ops_ids)
142 goto err;
143
144 for (i = 0; i < srv->queue_depth; ++i) {
145 id = kzalloc(sizeof(*id), GFP_KERNEL);
146 if (!id)
147 goto err;
148
149 srv_path->ops_ids[i] = id;
150 }
151
152 ret = percpu_ref_init(&srv_path->ids_inflight_ref,
153 rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
154 if (ret) {
155 pr_err("Percpu reference init failed\n");
156 goto err;
157 }
158 init_completion(&srv_path->complete_done);
159
160 return 0;
161
162err:
163 rtrs_srv_free_ops_ids(srv_path);
164 return -ENOMEM;
165}
166
167static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
168{
169 percpu_ref_get(&srv_path->ids_inflight_ref);
170}
171
172static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
173{
174 percpu_ref_put(&srv_path->ids_inflight_ref);
175}
176
177static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
178{
179 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
180 struct rtrs_path *s = con->c.path;
181 struct rtrs_srv_path *srv_path = to_srv_path(s);
182
183 if (wc->status != IB_WC_SUCCESS) {
184 rtrs_err(s, "REG MR failed: %s\n",
185 ib_wc_status_msg(wc->status));
186 close_path(srv_path);
187 return;
188 }
189}
190
191static struct ib_cqe local_reg_cqe = {
192 .done = rtrs_srv_reg_mr_done
193};
194
195static int rdma_write_sg(struct rtrs_srv_op *id)
196{
197 struct rtrs_path *s = id->con->c.path;
198 struct rtrs_srv_path *srv_path = to_srv_path(s);
199 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
200 struct rtrs_srv_mr *srv_mr;
201 struct ib_send_wr inv_wr;
202 struct ib_rdma_wr imm_wr;
203 struct ib_rdma_wr *wr = NULL;
204 enum ib_send_flags flags;
205 size_t sg_cnt;
206 int err, offset;
207 bool need_inval;
208 u32 rkey = 0;
209 struct ib_reg_wr rwr;
210 struct ib_sge *plist;
211 struct ib_sge list;
212
213 sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
214 need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
215 if (sg_cnt != 1)
216 return -EINVAL;
217
218 offset = 0;
219
220 wr = &id->tx_wr;
221 plist = &id->tx_sg;
222 plist->addr = dma_addr + offset;
223 plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
224
225 /* WR will fail with length error
226 * if this is 0
227 */
228 if (plist->length == 0) {
229 rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
230 return -EINVAL;
231 }
232
233 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
234 offset += plist->length;
235
236 wr->wr.sg_list = plist;
237 wr->wr.num_sge = 1;
238 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
239 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
240 if (rkey == 0)
241 rkey = wr->rkey;
242 else
243 /* Only one key is actually used */
244 WARN_ON_ONCE(rkey != wr->rkey);
245
246 wr->wr.opcode = IB_WR_RDMA_WRITE;
247 wr->wr.wr_cqe = &io_comp_cqe;
248 wr->wr.ex.imm_data = 0;
249 wr->wr.send_flags = 0;
250
251 if (need_inval && always_invalidate) {
252 wr->wr.next = &rwr.wr;
253 rwr.wr.next = &inv_wr;
254 inv_wr.next = &imm_wr.wr;
255 } else if (always_invalidate) {
256 wr->wr.next = &rwr.wr;
257 rwr.wr.next = &imm_wr.wr;
258 } else if (need_inval) {
259 wr->wr.next = &inv_wr;
260 inv_wr.next = &imm_wr.wr;
261 } else {
262 wr->wr.next = &imm_wr.wr;
263 }
264 /*
265 * From time to time we have to post signaled sends,
266 * or send queue will fill up and only QP reset can help.
267 */
268 flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
269 0 : IB_SEND_SIGNALED;
270
271 if (need_inval) {
272 inv_wr.sg_list = NULL;
273 inv_wr.num_sge = 0;
274 inv_wr.opcode = IB_WR_SEND_WITH_INV;
275 inv_wr.wr_cqe = &io_comp_cqe;
276 inv_wr.send_flags = 0;
277 inv_wr.ex.invalidate_rkey = rkey;
278 }
279
280 imm_wr.wr.next = NULL;
281 if (always_invalidate) {
282 struct rtrs_msg_rkey_rsp *msg;
283
284 srv_mr = &srv_path->mrs[id->msg_id];
285 rwr.wr.opcode = IB_WR_REG_MR;
286 rwr.wr.wr_cqe = &local_reg_cqe;
287 rwr.wr.num_sge = 0;
288 rwr.mr = srv_mr->mr;
289 rwr.wr.send_flags = 0;
290 rwr.key = srv_mr->mr->rkey;
291 rwr.access = (IB_ACCESS_LOCAL_WRITE |
292 IB_ACCESS_REMOTE_WRITE);
293 msg = srv_mr->iu->buf;
294 msg->buf_id = cpu_to_le16(id->msg_id);
295 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
296 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
297
298 list.addr = srv_mr->iu->dma_addr;
299 list.length = sizeof(*msg);
300 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
301 imm_wr.wr.sg_list = &list;
302 imm_wr.wr.num_sge = 1;
303 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
304 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
305 srv_mr->iu->dma_addr,
306 srv_mr->iu->size, DMA_TO_DEVICE);
307 } else {
308 imm_wr.wr.sg_list = NULL;
309 imm_wr.wr.num_sge = 0;
310 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
311 }
312 imm_wr.wr.send_flags = flags;
313 imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
314 0, need_inval));
315
316 imm_wr.wr.wr_cqe = &io_comp_cqe;
317 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
318 offset, DMA_BIDIRECTIONAL);
319
320 err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
321 if (err)
322 rtrs_err(s,
323 "Posting RDMA-Write-Request to QP failed, err: %d\n",
324 err);
325
326 return err;
327}
328
329/**
330 * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
331 * requests or on successful WRITE request.
332 * @con: the connection to send back result
333 * @id: the id associated with the IO
334 * @errno: the error number of the IO.
335 *
336 * Return 0 on success, errno otherwise.
337 */
338static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
339 int errno)
340{
341 struct rtrs_path *s = con->c.path;
342 struct rtrs_srv_path *srv_path = to_srv_path(s);
343 struct ib_send_wr inv_wr, *wr = NULL;
344 struct ib_rdma_wr imm_wr;
345 struct ib_reg_wr rwr;
346 struct rtrs_srv_mr *srv_mr;
347 bool need_inval = false;
348 enum ib_send_flags flags;
349 u32 imm;
350 int err;
351
352 if (id->dir == READ) {
353 struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
354 size_t sg_cnt;
355
356 need_inval = le16_to_cpu(rd_msg->flags) &
357 RTRS_MSG_NEED_INVAL_F;
358 sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
359
360 if (need_inval) {
361 if (sg_cnt) {
362 inv_wr.wr_cqe = &io_comp_cqe;
363 inv_wr.sg_list = NULL;
364 inv_wr.num_sge = 0;
365 inv_wr.opcode = IB_WR_SEND_WITH_INV;
366 inv_wr.send_flags = 0;
367 /* Only one key is actually used */
368 inv_wr.ex.invalidate_rkey =
369 le32_to_cpu(rd_msg->desc[0].key);
370 } else {
371 WARN_ON_ONCE(1);
372 need_inval = false;
373 }
374 }
375 }
376
377 trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
378
379 if (need_inval && always_invalidate) {
380 wr = &inv_wr;
381 inv_wr.next = &rwr.wr;
382 rwr.wr.next = &imm_wr.wr;
383 } else if (always_invalidate) {
384 wr = &rwr.wr;
385 rwr.wr.next = &imm_wr.wr;
386 } else if (need_inval) {
387 wr = &inv_wr;
388 inv_wr.next = &imm_wr.wr;
389 } else {
390 wr = &imm_wr.wr;
391 }
392 /*
393 * From time to time we have to post signalled sends,
394 * or send queue will fill up and only QP reset can help.
395 */
396 flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
397 0 : IB_SEND_SIGNALED;
398 imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
399 imm_wr.wr.next = NULL;
400 if (always_invalidate) {
401 struct ib_sge list;
402 struct rtrs_msg_rkey_rsp *msg;
403
404 srv_mr = &srv_path->mrs[id->msg_id];
405 rwr.wr.next = &imm_wr.wr;
406 rwr.wr.opcode = IB_WR_REG_MR;
407 rwr.wr.wr_cqe = &local_reg_cqe;
408 rwr.wr.num_sge = 0;
409 rwr.wr.send_flags = 0;
410 rwr.mr = srv_mr->mr;
411 rwr.key = srv_mr->mr->rkey;
412 rwr.access = (IB_ACCESS_LOCAL_WRITE |
413 IB_ACCESS_REMOTE_WRITE);
414 msg = srv_mr->iu->buf;
415 msg->buf_id = cpu_to_le16(id->msg_id);
416 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
417 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
418
419 list.addr = srv_mr->iu->dma_addr;
420 list.length = sizeof(*msg);
421 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
422 imm_wr.wr.sg_list = &list;
423 imm_wr.wr.num_sge = 1;
424 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
425 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
426 srv_mr->iu->dma_addr,
427 srv_mr->iu->size, DMA_TO_DEVICE);
428 } else {
429 imm_wr.wr.sg_list = NULL;
430 imm_wr.wr.num_sge = 0;
431 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
432 }
433 imm_wr.wr.send_flags = flags;
434 imm_wr.wr.wr_cqe = &io_comp_cqe;
435
436 imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
437
438 err = ib_post_send(id->con->c.qp, wr, NULL);
439 if (err)
440 rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
441 err);
442
443 return err;
444}
445
446void close_path(struct rtrs_srv_path *srv_path)
447{
448 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
449 queue_work(rtrs_wq, &srv_path->close_work);
450 WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
451}
452
453static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
454{
455 switch (state) {
456 case RTRS_SRV_CONNECTING:
457 return "RTRS_SRV_CONNECTING";
458 case RTRS_SRV_CONNECTED:
459 return "RTRS_SRV_CONNECTED";
460 case RTRS_SRV_CLOSING:
461 return "RTRS_SRV_CLOSING";
462 case RTRS_SRV_CLOSED:
463 return "RTRS_SRV_CLOSED";
464 default:
465 return "UNKNOWN";
466 }
467}
468
469/**
470 * rtrs_srv_resp_rdma() - Finish an RDMA request
471 *
472 * @id: Internal RTRS operation identifier
473 * @status: Response Code sent to the other side for this operation.
474 * 0 = success, <=0 error
475 * Context: any
476 *
477 * Finish a RDMA operation. A message is sent to the client and the
478 * corresponding memory areas will be released.
479 */
480bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
481{
482 struct rtrs_srv_path *srv_path;
483 struct rtrs_srv_con *con;
484 struct rtrs_path *s;
485 int err;
486
487 if (WARN_ON(!id))
488 return true;
489
490 con = id->con;
491 s = con->c.path;
492 srv_path = to_srv_path(s);
493
494 id->status = status;
495
496 if (srv_path->state != RTRS_SRV_CONNECTED) {
497 rtrs_err_rl(s,
498 "Sending I/O response failed, server path %s is disconnected, path state %s\n",
499 kobject_name(&srv_path->kobj),
500 rtrs_srv_state_str(srv_path->state));
501 goto out;
502 }
503 if (always_invalidate) {
504 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
505
506 ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
507 }
508 if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
509 rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
510 kobject_name(&srv_path->kobj),
511 con->c.cid);
512 atomic_add(1, &con->c.sq_wr_avail);
513 spin_lock(&con->rsp_wr_wait_lock);
514 list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
515 spin_unlock(&con->rsp_wr_wait_lock);
516 return false;
517 }
518
519 if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
520 err = send_io_resp_imm(con, id, status);
521 else
522 err = rdma_write_sg(id);
523
524 if (err) {
525 rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
526 kobject_name(&srv_path->kobj));
527 close_path(srv_path);
528 }
529out:
530 rtrs_srv_put_ops_ids(srv_path);
531 return true;
532}
533EXPORT_SYMBOL(rtrs_srv_resp_rdma);
534
535/**
536 * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
537 * @srv: Session pointer
538 * @priv: The private pointer that is associated with the session.
539 */
540void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
541{
542 srv->priv = priv;
543}
544EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
545
546static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
547{
548 int i;
549
550 for (i = 0; i < srv_path->mrs_num; i++) {
551 struct rtrs_srv_mr *srv_mr;
552
553 srv_mr = &srv_path->mrs[i];
554
555 if (always_invalidate)
556 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
557
558 ib_dereg_mr(srv_mr->mr);
559 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
560 srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
561 sg_free_table(&srv_mr->sgt);
562 }
563 kfree(srv_path->mrs);
564}
565
566static int map_cont_bufs(struct rtrs_srv_path *srv_path)
567{
568 struct rtrs_srv_sess *srv = srv_path->srv;
569 struct rtrs_path *ss = &srv_path->s;
570 int i, err, mrs_num;
571 unsigned int chunk_bits;
572 int chunks_per_mr = 1;
573 struct ib_mr *mr;
574 struct sg_table *sgt;
575
576 /*
577 * Here we map queue_depth chunks to MR. Firstly we have to
578 * figure out how many chunks can we map per MR.
579 */
580 if (always_invalidate) {
581 /*
582 * in order to do invalidate for each chunks of memory, we needs
583 * more memory regions.
584 */
585 mrs_num = srv->queue_depth;
586 } else {
587 chunks_per_mr =
588 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
589 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
590 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
591 }
592
593 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
594 if (!srv_path->mrs)
595 return -ENOMEM;
596
597 for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
598 srv_path->mrs_num++) {
599 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
600 struct scatterlist *s;
601 int nr, nr_sgt, chunks;
602
603 sgt = &srv_mr->sgt;
604 chunks = chunks_per_mr * srv_path->mrs_num;
605 if (!always_invalidate)
606 chunks_per_mr = min_t(int, chunks_per_mr,
607 srv->queue_depth - chunks);
608
609 err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
610 if (err)
611 goto err;
612
613 for_each_sg(sgt->sgl, s, chunks_per_mr, i)
614 sg_set_page(s, srv->chunks[chunks + i],
615 max_chunk_size, 0);
616
617 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
618 sgt->nents, DMA_BIDIRECTIONAL);
619 if (!nr_sgt) {
620 err = -EINVAL;
621 goto free_sg;
622 }
623 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
624 nr_sgt);
625 if (IS_ERR(mr)) {
626 err = PTR_ERR(mr);
627 goto unmap_sg;
628 }
629 nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
630 NULL, max_chunk_size);
631 if (nr != nr_sgt) {
632 err = nr < 0 ? nr : -EINVAL;
633 goto dereg_mr;
634 }
635
636 if (always_invalidate) {
637 srv_mr->iu = rtrs_iu_alloc(1,
638 sizeof(struct rtrs_msg_rkey_rsp),
639 GFP_KERNEL, srv_path->s.dev->ib_dev,
640 DMA_TO_DEVICE, rtrs_srv_rdma_done);
641 if (!srv_mr->iu) {
642 err = -ENOMEM;
643 rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
644 goto dereg_mr;
645 }
646 }
647 /* Eventually dma addr for each chunk can be cached */
648 for_each_sg(sgt->sgl, s, nr_sgt, i)
649 srv_path->dma_addr[chunks + i] = sg_dma_address(s);
650
651 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
652 srv_mr->mr = mr;
653 }
654
655 chunk_bits = ilog2(srv->queue_depth - 1) + 1;
656 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
657
658 return 0;
659
660dereg_mr:
661 ib_dereg_mr(mr);
662unmap_sg:
663 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
664 sgt->nents, DMA_BIDIRECTIONAL);
665free_sg:
666 sg_free_table(sgt);
667err:
668 unmap_cont_bufs(srv_path);
669
670 return err;
671}
672
673static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
674{
675 close_path(to_srv_path(c->path));
676}
677
678static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
679{
680 rtrs_init_hb(&srv_path->s, &io_comp_cqe,
681 RTRS_HB_INTERVAL_MS,
682 RTRS_HB_MISSED_MAX,
683 rtrs_srv_hb_err_handler,
684 rtrs_wq);
685}
686
687static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
688{
689 rtrs_start_hb(&srv_path->s);
690}
691
692static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
693{
694 rtrs_stop_hb(&srv_path->s);
695}
696
697static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
698{
699 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
700 struct rtrs_path *s = con->c.path;
701 struct rtrs_srv_path *srv_path = to_srv_path(s);
702 struct rtrs_iu *iu;
703
704 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
705 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
706
707 if (wc->status != IB_WC_SUCCESS) {
708 rtrs_err(s, "Sess info response send failed: %s\n",
709 ib_wc_status_msg(wc->status));
710 close_path(srv_path);
711 return;
712 }
713 WARN_ON(wc->opcode != IB_WC_SEND);
714}
715
716static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
717{
718 struct rtrs_srv_sess *srv = srv_path->srv;
719 struct rtrs_srv_ctx *ctx = srv->ctx;
720 int up, ret = 0;
721
722 mutex_lock(&srv->paths_ev_mutex);
723 up = ++srv->paths_up;
724 if (up == 1)
725 ret = ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
726 mutex_unlock(&srv->paths_ev_mutex);
727
728 /* Mark session as established */
729 if (!ret)
730 srv_path->established = true;
731
732 return ret;
733}
734
735static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
736{
737 struct rtrs_srv_sess *srv = srv_path->srv;
738 struct rtrs_srv_ctx *ctx = srv->ctx;
739
740 if (!srv_path->established)
741 return;
742
743 srv_path->established = false;
744 mutex_lock(&srv->paths_ev_mutex);
745 WARN_ON(!srv->paths_up);
746 if (--srv->paths_up == 0)
747 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
748 mutex_unlock(&srv->paths_ev_mutex);
749}
750
751static bool exist_pathname(struct rtrs_srv_ctx *ctx,
752 const char *pathname, const uuid_t *path_uuid)
753{
754 struct rtrs_srv_sess *srv;
755 struct rtrs_srv_path *srv_path;
756 bool found = false;
757
758 mutex_lock(&ctx->srv_mutex);
759 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
760 mutex_lock(&srv->paths_mutex);
761
762 /* when a client with same uuid and same sessname tried to add a path */
763 if (uuid_equal(&srv->paths_uuid, path_uuid)) {
764 mutex_unlock(&srv->paths_mutex);
765 continue;
766 }
767
768 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
769 if (strlen(srv_path->s.sessname) == strlen(pathname) &&
770 !strcmp(srv_path->s.sessname, pathname)) {
771 found = true;
772 break;
773 }
774 }
775 mutex_unlock(&srv->paths_mutex);
776 if (found)
777 break;
778 }
779 mutex_unlock(&ctx->srv_mutex);
780 return found;
781}
782
783static int post_recv_path(struct rtrs_srv_path *srv_path);
784static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
785
786static int process_info_req(struct rtrs_srv_con *con,
787 struct rtrs_msg_info_req *msg)
788{
789 struct rtrs_path *s = con->c.path;
790 struct rtrs_srv_path *srv_path = to_srv_path(s);
791 struct ib_send_wr *reg_wr = NULL;
792 struct rtrs_msg_info_rsp *rsp;
793 struct rtrs_iu *tx_iu;
794 struct ib_reg_wr *rwr;
795 int mri, err;
796 size_t tx_sz;
797
798 err = post_recv_path(srv_path);
799 if (err) {
800 rtrs_err(s, "post_recv_path(), err: %d\n", err);
801 return err;
802 }
803
804 if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
805 rtrs_err(s, "pathname cannot contain / and .\n");
806 return -EINVAL;
807 }
808
809 if (exist_pathname(srv_path->srv->ctx,
810 msg->pathname, &srv_path->srv->paths_uuid)) {
811 rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
812 return -EPERM;
813 }
814 strscpy(srv_path->s.sessname, msg->pathname,
815 sizeof(srv_path->s.sessname));
816
817 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
818 if (!rwr)
819 return -ENOMEM;
820
821 tx_sz = sizeof(*rsp);
822 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
823 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
824 DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
825 if (!tx_iu) {
826 err = -ENOMEM;
827 goto rwr_free;
828 }
829
830 rsp = tx_iu->buf;
831 rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
832 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
833
834 for (mri = 0; mri < srv_path->mrs_num; mri++) {
835 struct ib_mr *mr = srv_path->mrs[mri].mr;
836
837 rsp->desc[mri].addr = cpu_to_le64(mr->iova);
838 rsp->desc[mri].key = cpu_to_le32(mr->rkey);
839 rsp->desc[mri].len = cpu_to_le32(mr->length);
840
841 /*
842 * Fill in reg MR request and chain them *backwards*
843 */
844 rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
845 rwr[mri].wr.opcode = IB_WR_REG_MR;
846 rwr[mri].wr.wr_cqe = &local_reg_cqe;
847 rwr[mri].wr.num_sge = 0;
848 rwr[mri].wr.send_flags = 0;
849 rwr[mri].mr = mr;
850 rwr[mri].key = mr->rkey;
851 rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
852 IB_ACCESS_REMOTE_WRITE);
853 reg_wr = &rwr[mri].wr;
854 }
855
856 err = rtrs_srv_create_path_files(srv_path);
857 if (err)
858 goto iu_free;
859 kobject_get(&srv_path->kobj);
860 get_device(&srv_path->srv->dev);
861 err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
862 if (!err) {
863 rtrs_err(s, "rtrs_srv_change_state(), err: %d\n", err);
864 goto iu_free;
865 }
866
867 rtrs_srv_start_hb(srv_path);
868
869 /*
870 * We do not account number of established connections at the current
871 * moment, we rely on the client, which should send info request when
872 * all connections are successfully established. Thus, simply notify
873 * listener with a proper event if we are the first path.
874 */
875 err = rtrs_srv_path_up(srv_path);
876 if (err) {
877 rtrs_err(s, "rtrs_srv_path_up(), err: %d\n", err);
878 goto iu_free;
879 }
880
881 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
882 tx_iu->dma_addr,
883 tx_iu->size, DMA_TO_DEVICE);
884
885 /* Send info response */
886 err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
887 if (err) {
888 rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
889iu_free:
890 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
891 }
892rwr_free:
893 kfree(rwr);
894
895 return err;
896}
897
898static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
899{
900 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
901 struct rtrs_path *s = con->c.path;
902 struct rtrs_srv_path *srv_path = to_srv_path(s);
903 struct rtrs_msg_info_req *msg;
904 struct rtrs_iu *iu;
905 int err;
906
907 WARN_ON(con->c.cid);
908
909 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
910 if (wc->status != IB_WC_SUCCESS) {
911 rtrs_err(s, "Sess info request receive failed: %s\n",
912 ib_wc_status_msg(wc->status));
913 goto close;
914 }
915 WARN_ON(wc->opcode != IB_WC_RECV);
916
917 if (wc->byte_len < sizeof(*msg)) {
918 rtrs_err(s, "Sess info request is malformed: size %d\n",
919 wc->byte_len);
920 goto close;
921 }
922 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
923 iu->size, DMA_FROM_DEVICE);
924 msg = iu->buf;
925 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
926 rtrs_err(s, "Sess info request is malformed: type %d\n",
927 le16_to_cpu(msg->type));
928 goto close;
929 }
930 err = process_info_req(con, msg);
931 if (err)
932 goto close;
933
934out:
935 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
936 return;
937close:
938 close_path(srv_path);
939 goto out;
940}
941
942static int post_recv_info_req(struct rtrs_srv_con *con)
943{
944 struct rtrs_path *s = con->c.path;
945 struct rtrs_srv_path *srv_path = to_srv_path(s);
946 struct rtrs_iu *rx_iu;
947 int err;
948
949 rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
950 GFP_KERNEL, srv_path->s.dev->ib_dev,
951 DMA_FROM_DEVICE, rtrs_srv_info_req_done);
952 if (!rx_iu)
953 return -ENOMEM;
954 /* Prepare for getting info response */
955 err = rtrs_iu_post_recv(&con->c, rx_iu);
956 if (err) {
957 rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
958 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
959 return err;
960 }
961
962 return 0;
963}
964
965static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
966{
967 int i, err;
968
969 for (i = 0; i < q_size; i++) {
970 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
971 if (err)
972 return err;
973 }
974
975 return 0;
976}
977
978static int post_recv_path(struct rtrs_srv_path *srv_path)
979{
980 struct rtrs_srv_sess *srv = srv_path->srv;
981 struct rtrs_path *s = &srv_path->s;
982 size_t q_size;
983 int err, cid;
984
985 for (cid = 0; cid < srv_path->s.con_num; cid++) {
986 if (cid == 0)
987 q_size = SERVICE_CON_QUEUE_DEPTH;
988 else
989 q_size = srv->queue_depth;
990
991 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
992 if (err) {
993 rtrs_err(s, "post_recv_io(), err: %d\n", err);
994 return err;
995 }
996 }
997
998 return 0;
999}
1000
1001static void process_read(struct rtrs_srv_con *con,
1002 struct rtrs_msg_rdma_read *msg,
1003 u32 buf_id, u32 off)
1004{
1005 struct rtrs_path *s = con->c.path;
1006 struct rtrs_srv_path *srv_path = to_srv_path(s);
1007 struct rtrs_srv_sess *srv = srv_path->srv;
1008 struct rtrs_srv_ctx *ctx = srv->ctx;
1009 struct rtrs_srv_op *id;
1010
1011 size_t usr_len, data_len;
1012 void *data;
1013 int ret;
1014
1015 if (srv_path->state != RTRS_SRV_CONNECTED) {
1016 rtrs_err_rl(s,
1017 "Processing read request failed, session is disconnected, sess state %s\n",
1018 rtrs_srv_state_str(srv_path->state));
1019 return;
1020 }
1021 if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
1022 rtrs_err_rl(s,
1023 "Processing read request failed, invalid message\n");
1024 return;
1025 }
1026 rtrs_srv_get_ops_ids(srv_path);
1027 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
1028 id = srv_path->ops_ids[buf_id];
1029 id->con = con;
1030 id->dir = READ;
1031 id->msg_id = buf_id;
1032 id->rd_msg = msg;
1033 usr_len = le16_to_cpu(msg->usr_len);
1034 data_len = off - usr_len;
1035 data = page_address(srv->chunks[buf_id]);
1036 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1037 data + data_len, usr_len);
1038
1039 if (ret) {
1040 rtrs_err_rl(s,
1041 "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
1042 buf_id, ret);
1043 goto send_err_msg;
1044 }
1045
1046 return;
1047
1048send_err_msg:
1049 ret = send_io_resp_imm(con, id, ret);
1050 if (ret < 0) {
1051 rtrs_err_rl(s,
1052 "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
1053 buf_id, ret);
1054 close_path(srv_path);
1055 }
1056 rtrs_srv_put_ops_ids(srv_path);
1057}
1058
1059static void process_write(struct rtrs_srv_con *con,
1060 struct rtrs_msg_rdma_write *req,
1061 u32 buf_id, u32 off)
1062{
1063 struct rtrs_path *s = con->c.path;
1064 struct rtrs_srv_path *srv_path = to_srv_path(s);
1065 struct rtrs_srv_sess *srv = srv_path->srv;
1066 struct rtrs_srv_ctx *ctx = srv->ctx;
1067 struct rtrs_srv_op *id;
1068
1069 size_t data_len, usr_len;
1070 void *data;
1071 int ret;
1072
1073 if (srv_path->state != RTRS_SRV_CONNECTED) {
1074 rtrs_err_rl(s,
1075 "Processing write request failed, session is disconnected, sess state %s\n",
1076 rtrs_srv_state_str(srv_path->state));
1077 return;
1078 }
1079 rtrs_srv_get_ops_ids(srv_path);
1080 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
1081 id = srv_path->ops_ids[buf_id];
1082 id->con = con;
1083 id->dir = WRITE;
1084 id->msg_id = buf_id;
1085
1086 usr_len = le16_to_cpu(req->usr_len);
1087 data_len = off - usr_len;
1088 data = page_address(srv->chunks[buf_id]);
1089 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1090 data + data_len, usr_len);
1091 if (ret) {
1092 rtrs_err_rl(s,
1093 "Processing write request failed, user module callback reports err: %d\n",
1094 ret);
1095 goto send_err_msg;
1096 }
1097
1098 return;
1099
1100send_err_msg:
1101 ret = send_io_resp_imm(con, id, ret);
1102 if (ret < 0) {
1103 rtrs_err_rl(s,
1104 "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
1105 buf_id, ret);
1106 close_path(srv_path);
1107 }
1108 rtrs_srv_put_ops_ids(srv_path);
1109}
1110
1111static void process_io_req(struct rtrs_srv_con *con, void *msg,
1112 u32 id, u32 off)
1113{
1114 struct rtrs_path *s = con->c.path;
1115 struct rtrs_srv_path *srv_path = to_srv_path(s);
1116 struct rtrs_msg_rdma_hdr *hdr;
1117 unsigned int type;
1118
1119 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
1120 srv_path->dma_addr[id],
1121 max_chunk_size, DMA_BIDIRECTIONAL);
1122 hdr = msg;
1123 type = le16_to_cpu(hdr->type);
1124
1125 switch (type) {
1126 case RTRS_MSG_WRITE:
1127 process_write(con, msg, id, off);
1128 break;
1129 case RTRS_MSG_READ:
1130 process_read(con, msg, id, off);
1131 break;
1132 default:
1133 rtrs_err(s,
1134 "Processing I/O request failed, unknown message type received: 0x%02x\n",
1135 type);
1136 goto err;
1137 }
1138
1139 return;
1140
1141err:
1142 close_path(srv_path);
1143}
1144
1145static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1146{
1147 struct rtrs_srv_mr *mr =
1148 container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
1149 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1150 struct rtrs_path *s = con->c.path;
1151 struct rtrs_srv_path *srv_path = to_srv_path(s);
1152 struct rtrs_srv_sess *srv = srv_path->srv;
1153 u32 msg_id, off;
1154 void *data;
1155
1156 if (wc->status != IB_WC_SUCCESS) {
1157 rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
1158 ib_wc_status_msg(wc->status));
1159 close_path(srv_path);
1160 }
1161 msg_id = mr->msg_id;
1162 off = mr->msg_off;
1163 data = page_address(srv->chunks[msg_id]) + off;
1164 process_io_req(con, data, msg_id, off);
1165}
1166
1167static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
1168 struct rtrs_srv_mr *mr)
1169{
1170 struct ib_send_wr wr = {
1171 .opcode = IB_WR_LOCAL_INV,
1172 .wr_cqe = &mr->inv_cqe,
1173 .send_flags = IB_SEND_SIGNALED,
1174 .ex.invalidate_rkey = mr->mr->rkey,
1175 };
1176 mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
1177
1178 return ib_post_send(con->c.qp, &wr, NULL);
1179}
1180
1181static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
1182{
1183 spin_lock(&con->rsp_wr_wait_lock);
1184 while (!list_empty(&con->rsp_wr_wait_list)) {
1185 struct rtrs_srv_op *id;
1186 int ret;
1187
1188 id = list_entry(con->rsp_wr_wait_list.next,
1189 struct rtrs_srv_op, wait_list);
1190 list_del(&id->wait_list);
1191
1192 spin_unlock(&con->rsp_wr_wait_lock);
1193 ret = rtrs_srv_resp_rdma(id, id->status);
1194 spin_lock(&con->rsp_wr_wait_lock);
1195
1196 if (!ret) {
1197 list_add(&id->wait_list, &con->rsp_wr_wait_list);
1198 break;
1199 }
1200 }
1201 spin_unlock(&con->rsp_wr_wait_lock);
1202}
1203
1204static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
1205{
1206 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1207 struct rtrs_path *s = con->c.path;
1208 struct rtrs_srv_path *srv_path = to_srv_path(s);
1209 struct rtrs_srv_sess *srv = srv_path->srv;
1210 u32 imm_type, imm_payload;
1211 int err;
1212
1213 if (wc->status != IB_WC_SUCCESS) {
1214 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1215 rtrs_err(s,
1216 "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
1217 ib_wc_status_msg(wc->status), wc->wr_cqe,
1218 wc->opcode, wc->vendor_err, wc->byte_len);
1219 close_path(srv_path);
1220 }
1221 return;
1222 }
1223
1224 switch (wc->opcode) {
1225 case IB_WC_RECV_RDMA_WITH_IMM:
1226 /*
1227 * post_recv() RDMA write completions of IO reqs (read/write)
1228 * and hb
1229 */
1230 if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
1231 return;
1232 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
1233 if (err) {
1234 rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
1235 close_path(srv_path);
1236 break;
1237 }
1238 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
1239 &imm_type, &imm_payload);
1240 if (imm_type == RTRS_IO_REQ_IMM) {
1241 u32 msg_id, off;
1242 void *data;
1243
1244 msg_id = imm_payload >> srv_path->mem_bits;
1245 off = imm_payload & ((1 << srv_path->mem_bits) - 1);
1246 if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
1247 rtrs_err(s, "Wrong msg_id %u, off %u\n",
1248 msg_id, off);
1249 close_path(srv_path);
1250 return;
1251 }
1252 if (always_invalidate) {
1253 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
1254
1255 mr->msg_off = off;
1256 mr->msg_id = msg_id;
1257 err = rtrs_srv_inv_rkey(con, mr);
1258 if (err) {
1259 rtrs_err(s, "rtrs_post_recv(), err: %d\n",
1260 err);
1261 close_path(srv_path);
1262 break;
1263 }
1264 } else {
1265 data = page_address(srv->chunks[msg_id]) + off;
1266 process_io_req(con, data, msg_id, off);
1267 }
1268 } else if (imm_type == RTRS_HB_MSG_IMM) {
1269 WARN_ON(con->c.cid);
1270 rtrs_send_hb_ack(&srv_path->s);
1271 } else if (imm_type == RTRS_HB_ACK_IMM) {
1272 WARN_ON(con->c.cid);
1273 srv_path->s.hb_missed_cnt = 0;
1274 } else {
1275 rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
1276 }
1277 break;
1278 case IB_WC_RDMA_WRITE:
1279 case IB_WC_SEND:
1280 /*
1281 * post_send() RDMA write completions of IO reqs (read/write)
1282 * and hb.
1283 */
1284 atomic_add(s->signal_interval, &con->c.sq_wr_avail);
1285
1286 if (!list_empty_careful(&con->rsp_wr_wait_list))
1287 rtrs_rdma_process_wr_wait_list(con);
1288
1289 break;
1290 default:
1291 rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
1292 return;
1293 }
1294}
1295
1296/**
1297 * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
1298 * @srv: Session
1299 * @pathname: Pathname buffer
1300 * @len: Length of sessname buffer
1301 */
1302int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
1303 size_t len)
1304{
1305 struct rtrs_srv_path *srv_path;
1306 int err = -ENOTCONN;
1307
1308 mutex_lock(&srv->paths_mutex);
1309 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1310 if (srv_path->state != RTRS_SRV_CONNECTED)
1311 continue;
1312 strscpy(pathname, srv_path->s.sessname,
1313 min_t(size_t, sizeof(srv_path->s.sessname), len));
1314 err = 0;
1315 break;
1316 }
1317 mutex_unlock(&srv->paths_mutex);
1318
1319 return err;
1320}
1321EXPORT_SYMBOL(rtrs_srv_get_path_name);
1322
1323/**
1324 * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
1325 * @srv: Session
1326 */
1327int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
1328{
1329 return srv->queue_depth;
1330}
1331EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
1332
1333static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
1334{
1335 struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
1336 int v;
1337
1338 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
1339 if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
1340 v = cpumask_first(&cq_affinity_mask);
1341 return v;
1342}
1343
1344static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
1345{
1346 srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
1347
1348 return srv_path->cur_cq_vector;
1349}
1350
1351static void rtrs_srv_dev_release(struct device *dev)
1352{
1353 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
1354 dev);
1355
1356 kfree(srv);
1357}
1358
1359static void free_srv(struct rtrs_srv_sess *srv)
1360{
1361 int i;
1362
1363 WARN_ON(refcount_read(&srv->refcount));
1364 for (i = 0; i < srv->queue_depth; i++)
1365 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1366 kfree(srv->chunks);
1367 mutex_destroy(&srv->paths_mutex);
1368 mutex_destroy(&srv->paths_ev_mutex);
1369 /* last put to release the srv structure */
1370 put_device(&srv->dev);
1371}
1372
1373static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
1374 const uuid_t *paths_uuid,
1375 bool first_conn)
1376{
1377 struct rtrs_srv_sess *srv;
1378 int i;
1379
1380 mutex_lock(&ctx->srv_mutex);
1381 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
1382 if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
1383 refcount_inc_not_zero(&srv->refcount)) {
1384 mutex_unlock(&ctx->srv_mutex);
1385 return srv;
1386 }
1387 }
1388 mutex_unlock(&ctx->srv_mutex);
1389 /*
1390 * If this request is not the first connection request from the
1391 * client for this session then fail and return error.
1392 */
1393 if (!first_conn) {
1394 pr_err_ratelimited("Error: Not the first connection request for this session\n");
1395 return ERR_PTR(-ENXIO);
1396 }
1397
1398 /* need to allocate a new srv */
1399 srv = kzalloc(sizeof(*srv), GFP_KERNEL);
1400 if (!srv)
1401 return ERR_PTR(-ENOMEM);
1402
1403 INIT_LIST_HEAD(&srv->paths_list);
1404 mutex_init(&srv->paths_mutex);
1405 mutex_init(&srv->paths_ev_mutex);
1406 uuid_copy(&srv->paths_uuid, paths_uuid);
1407 srv->queue_depth = sess_queue_depth;
1408 srv->ctx = ctx;
1409 device_initialize(&srv->dev);
1410 srv->dev.release = rtrs_srv_dev_release;
1411
1412 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
1413 GFP_KERNEL);
1414 if (!srv->chunks)
1415 goto err_free_srv;
1416
1417 for (i = 0; i < srv->queue_depth; i++) {
1418 srv->chunks[i] = alloc_pages(GFP_KERNEL,
1419 get_order(max_chunk_size));
1420 if (!srv->chunks[i])
1421 goto err_free_chunks;
1422 }
1423 refcount_set(&srv->refcount, 1);
1424 mutex_lock(&ctx->srv_mutex);
1425 list_add(&srv->ctx_list, &ctx->srv_list);
1426 mutex_unlock(&ctx->srv_mutex);
1427
1428 return srv;
1429
1430err_free_chunks:
1431 while (i--)
1432 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1433 kfree(srv->chunks);
1434
1435err_free_srv:
1436 kfree(srv);
1437 return ERR_PTR(-ENOMEM);
1438}
1439
1440static void put_srv(struct rtrs_srv_sess *srv)
1441{
1442 if (refcount_dec_and_test(&srv->refcount)) {
1443 struct rtrs_srv_ctx *ctx = srv->ctx;
1444
1445 WARN_ON(srv->dev.kobj.state_in_sysfs);
1446
1447 mutex_lock(&ctx->srv_mutex);
1448 list_del(&srv->ctx_list);
1449 mutex_unlock(&ctx->srv_mutex);
1450 free_srv(srv);
1451 }
1452}
1453
1454static void __add_path_to_srv(struct rtrs_srv_sess *srv,
1455 struct rtrs_srv_path *srv_path)
1456{
1457 list_add_tail(&srv_path->s.entry, &srv->paths_list);
1458 srv->paths_num++;
1459 WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
1460}
1461
1462static void del_path_from_srv(struct rtrs_srv_path *srv_path)
1463{
1464 struct rtrs_srv_sess *srv = srv_path->srv;
1465
1466 if (WARN_ON(!srv))
1467 return;
1468
1469 mutex_lock(&srv->paths_mutex);
1470 list_del(&srv_path->s.entry);
1471 WARN_ON(!srv->paths_num);
1472 srv->paths_num--;
1473 mutex_unlock(&srv->paths_mutex);
1474}
1475
1476/* return true if addresses are the same, error other wise */
1477static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
1478{
1479 switch (a->sa_family) {
1480 case AF_IB:
1481 return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
1482 &((struct sockaddr_ib *)b)->sib_addr,
1483 sizeof(struct ib_addr)) &&
1484 (b->sa_family == AF_IB);
1485 case AF_INET:
1486 return memcmp(&((struct sockaddr_in *)a)->sin_addr,
1487 &((struct sockaddr_in *)b)->sin_addr,
1488 sizeof(struct in_addr)) &&
1489 (b->sa_family == AF_INET);
1490 case AF_INET6:
1491 return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
1492 &((struct sockaddr_in6 *)b)->sin6_addr,
1493 sizeof(struct in6_addr)) &&
1494 (b->sa_family == AF_INET6);
1495 default:
1496 return -ENOENT;
1497 }
1498}
1499
1500static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
1501 struct rdma_addr *addr)
1502{
1503 struct rtrs_srv_path *srv_path;
1504
1505 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
1506 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
1507 (struct sockaddr *)&addr->dst_addr) &&
1508 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
1509 (struct sockaddr *)&addr->src_addr))
1510 return true;
1511
1512 return false;
1513}
1514
1515static void free_path(struct rtrs_srv_path *srv_path)
1516{
1517 if (srv_path->kobj.state_in_sysfs) {
1518 kobject_del(&srv_path->kobj);
1519 kobject_put(&srv_path->kobj);
1520 } else {
1521 free_percpu(srv_path->stats->rdma_stats);
1522 kfree(srv_path->stats);
1523 kfree(srv_path);
1524 }
1525}
1526
1527static void rtrs_srv_close_work(struct work_struct *work)
1528{
1529 struct rtrs_srv_path *srv_path;
1530 struct rtrs_srv_con *con;
1531 int i;
1532
1533 srv_path = container_of(work, typeof(*srv_path), close_work);
1534
1535 rtrs_srv_stop_hb(srv_path);
1536
1537 for (i = 0; i < srv_path->s.con_num; i++) {
1538 if (!srv_path->s.con[i])
1539 continue;
1540 con = to_srv_con(srv_path->s.con[i]);
1541 rdma_disconnect(con->c.cm_id);
1542 ib_drain_qp(con->c.qp);
1543 }
1544
1545 /*
1546 * Degrade ref count to the usual model with a single shared
1547 * atomic_t counter
1548 */
1549 percpu_ref_kill(&srv_path->ids_inflight_ref);
1550
1551 /* Wait for all completion */
1552 wait_for_completion(&srv_path->complete_done);
1553
1554 rtrs_srv_destroy_path_files(srv_path);
1555
1556 /* Notify upper layer if we are the last path */
1557 rtrs_srv_path_down(srv_path);
1558
1559 unmap_cont_bufs(srv_path);
1560 rtrs_srv_free_ops_ids(srv_path);
1561
1562 for (i = 0; i < srv_path->s.con_num; i++) {
1563 if (!srv_path->s.con[i])
1564 continue;
1565 con = to_srv_con(srv_path->s.con[i]);
1566 rtrs_cq_qp_destroy(&con->c);
1567 rdma_destroy_id(con->c.cm_id);
1568 kfree(con);
1569 }
1570 rtrs_ib_dev_put(srv_path->s.dev);
1571
1572 del_path_from_srv(srv_path);
1573 put_srv(srv_path->srv);
1574 srv_path->srv = NULL;
1575 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
1576
1577 kfree(srv_path->dma_addr);
1578 kfree(srv_path->s.con);
1579 free_path(srv_path);
1580}
1581
1582static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
1583 struct rdma_cm_id *cm_id)
1584{
1585 struct rtrs_srv_sess *srv = srv_path->srv;
1586 struct rtrs_msg_conn_rsp msg;
1587 struct rdma_conn_param param;
1588 int err;
1589
1590 param = (struct rdma_conn_param) {
1591 .rnr_retry_count = 7,
1592 .private_data = &msg,
1593 .private_data_len = sizeof(msg),
1594 };
1595
1596 msg = (struct rtrs_msg_conn_rsp) {
1597 .magic = cpu_to_le16(RTRS_MAGIC),
1598 .version = cpu_to_le16(RTRS_PROTO_VER),
1599 .queue_depth = cpu_to_le16(srv->queue_depth),
1600 .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
1601 .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
1602 };
1603
1604 if (always_invalidate)
1605 msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
1606
1607 err = rdma_accept(cm_id, ¶m);
1608 if (err)
1609 pr_err("rdma_accept(), err: %d\n", err);
1610
1611 return err;
1612}
1613
1614static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
1615{
1616 struct rtrs_msg_conn_rsp msg;
1617 int err;
1618
1619 msg = (struct rtrs_msg_conn_rsp) {
1620 .magic = cpu_to_le16(RTRS_MAGIC),
1621 .version = cpu_to_le16(RTRS_PROTO_VER),
1622 .errno = cpu_to_le16(errno),
1623 };
1624
1625 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
1626 if (err)
1627 pr_err("rdma_reject(), err: %d\n", err);
1628
1629 /* Bounce errno back */
1630 return errno;
1631}
1632
1633static struct rtrs_srv_path *
1634__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
1635{
1636 struct rtrs_srv_path *srv_path;
1637
1638 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1639 if (uuid_equal(&srv_path->s.uuid, sess_uuid))
1640 return srv_path;
1641 }
1642
1643 return NULL;
1644}
1645
1646static int create_con(struct rtrs_srv_path *srv_path,
1647 struct rdma_cm_id *cm_id,
1648 unsigned int cid)
1649{
1650 struct rtrs_srv_sess *srv = srv_path->srv;
1651 struct rtrs_path *s = &srv_path->s;
1652 struct rtrs_srv_con *con;
1653
1654 u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
1655 int err, cq_vector;
1656
1657 con = kzalloc(sizeof(*con), GFP_KERNEL);
1658 if (!con) {
1659 err = -ENOMEM;
1660 goto err;
1661 }
1662
1663 spin_lock_init(&con->rsp_wr_wait_lock);
1664 INIT_LIST_HEAD(&con->rsp_wr_wait_list);
1665 con->c.cm_id = cm_id;
1666 con->c.path = &srv_path->s;
1667 con->c.cid = cid;
1668 atomic_set(&con->c.wr_cnt, 1);
1669 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
1670
1671 if (con->c.cid == 0) {
1672 /*
1673 * All receive and all send (each requiring invalidate)
1674 * + 2 for drain and heartbeat
1675 */
1676 max_send_wr = min_t(int, wr_limit,
1677 SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1678 max_recv_wr = max_send_wr;
1679 s->signal_interval = min_not_zero(srv->queue_depth,
1680 (size_t)SERVICE_CON_QUEUE_DEPTH);
1681 } else {
1682 /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
1683 if (always_invalidate)
1684 max_send_wr =
1685 min_t(int, wr_limit,
1686 srv->queue_depth * (1 + 4) + 1);
1687 else
1688 max_send_wr =
1689 min_t(int, wr_limit,
1690 srv->queue_depth * (1 + 2) + 1);
1691
1692 max_recv_wr = srv->queue_depth + 1;
1693 }
1694 cq_num = max_send_wr + max_recv_wr;
1695 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1696 cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
1697
1698 /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
1699 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
1700 max_send_wr, max_recv_wr,
1701 IB_POLL_WORKQUEUE);
1702 if (err) {
1703 rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
1704 goto free_con;
1705 }
1706 if (con->c.cid == 0) {
1707 err = post_recv_info_req(con);
1708 if (err)
1709 goto free_cqqp;
1710 }
1711 WARN_ON(srv_path->s.con[cid]);
1712 srv_path->s.con[cid] = &con->c;
1713
1714 /*
1715 * Change context from server to current connection. The other
1716 * way is to use cm_id->qp->qp_context, which does not work on OFED.
1717 */
1718 cm_id->context = &con->c;
1719
1720 return 0;
1721
1722free_cqqp:
1723 rtrs_cq_qp_destroy(&con->c);
1724free_con:
1725 kfree(con);
1726
1727err:
1728 return err;
1729}
1730
1731static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
1732 struct rdma_cm_id *cm_id,
1733 unsigned int con_num,
1734 unsigned int recon_cnt,
1735 const uuid_t *uuid)
1736{
1737 struct rtrs_srv_path *srv_path;
1738 int err = -ENOMEM;
1739 char str[NAME_MAX];
1740 struct rtrs_addr path;
1741
1742 if (srv->paths_num >= MAX_PATHS_NUM) {
1743 err = -ECONNRESET;
1744 goto err;
1745 }
1746 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
1747 err = -EEXIST;
1748 pr_err("Path with same addr exists\n");
1749 goto err;
1750 }
1751 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
1752 if (!srv_path)
1753 goto err;
1754
1755 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
1756 if (!srv_path->stats)
1757 goto err_free_sess;
1758
1759 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
1760 if (!srv_path->stats->rdma_stats)
1761 goto err_free_stats;
1762
1763 srv_path->stats->srv_path = srv_path;
1764
1765 srv_path->dma_addr = kcalloc(srv->queue_depth,
1766 sizeof(*srv_path->dma_addr),
1767 GFP_KERNEL);
1768 if (!srv_path->dma_addr)
1769 goto err_free_percpu;
1770
1771 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
1772 GFP_KERNEL);
1773 if (!srv_path->s.con)
1774 goto err_free_dma_addr;
1775
1776 srv_path->state = RTRS_SRV_CONNECTING;
1777 srv_path->srv = srv;
1778 srv_path->cur_cq_vector = -1;
1779 srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
1780 srv_path->s.src_addr = cm_id->route.addr.src_addr;
1781
1782 /* temporary until receiving session-name from client */
1783 path.src = &srv_path->s.src_addr;
1784 path.dst = &srv_path->s.dst_addr;
1785 rtrs_addr_to_str(&path, str, sizeof(str));
1786 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
1787
1788 srv_path->s.con_num = con_num;
1789 srv_path->s.irq_con_num = con_num;
1790 srv_path->s.recon_cnt = recon_cnt;
1791 uuid_copy(&srv_path->s.uuid, uuid);
1792 spin_lock_init(&srv_path->state_lock);
1793 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
1794 rtrs_srv_init_hb(srv_path);
1795
1796 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
1797 if (!srv_path->s.dev) {
1798 err = -ENOMEM;
1799 goto err_free_con;
1800 }
1801 err = map_cont_bufs(srv_path);
1802 if (err)
1803 goto err_put_dev;
1804
1805 err = rtrs_srv_alloc_ops_ids(srv_path);
1806 if (err)
1807 goto err_unmap_bufs;
1808
1809 __add_path_to_srv(srv, srv_path);
1810
1811 return srv_path;
1812
1813err_unmap_bufs:
1814 unmap_cont_bufs(srv_path);
1815err_put_dev:
1816 rtrs_ib_dev_put(srv_path->s.dev);
1817err_free_con:
1818 kfree(srv_path->s.con);
1819err_free_dma_addr:
1820 kfree(srv_path->dma_addr);
1821err_free_percpu:
1822 free_percpu(srv_path->stats->rdma_stats);
1823err_free_stats:
1824 kfree(srv_path->stats);
1825err_free_sess:
1826 kfree(srv_path);
1827err:
1828 return ERR_PTR(err);
1829}
1830
1831static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
1832 const struct rtrs_msg_conn_req *msg,
1833 size_t len)
1834{
1835 struct rtrs_srv_ctx *ctx = cm_id->context;
1836 struct rtrs_srv_path *srv_path;
1837 struct rtrs_srv_sess *srv;
1838
1839 u16 version, con_num, cid;
1840 u16 recon_cnt;
1841 int err = -ECONNRESET;
1842
1843 if (len < sizeof(*msg)) {
1844 pr_err("Invalid RTRS connection request\n");
1845 goto reject_w_err;
1846 }
1847 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1848 pr_err("Invalid RTRS magic\n");
1849 goto reject_w_err;
1850 }
1851 version = le16_to_cpu(msg->version);
1852 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1853 pr_err("Unsupported major RTRS version: %d, expected %d\n",
1854 version >> 8, RTRS_PROTO_VER_MAJOR);
1855 goto reject_w_err;
1856 }
1857 con_num = le16_to_cpu(msg->cid_num);
1858 if (con_num > 4096) {
1859 /* Sanity check */
1860 pr_err("Too many connections requested: %d\n", con_num);
1861 goto reject_w_err;
1862 }
1863 cid = le16_to_cpu(msg->cid);
1864 if (cid >= con_num) {
1865 /* Sanity check */
1866 pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
1867 goto reject_w_err;
1868 }
1869 recon_cnt = le16_to_cpu(msg->recon_cnt);
1870 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
1871 if (IS_ERR(srv)) {
1872 err = PTR_ERR(srv);
1873 pr_err("get_or_create_srv(), error %d\n", err);
1874 goto reject_w_err;
1875 }
1876 mutex_lock(&srv->paths_mutex);
1877 srv_path = __find_path(srv, &msg->sess_uuid);
1878 if (srv_path) {
1879 struct rtrs_path *s = &srv_path->s;
1880
1881 /* Session already holds a reference */
1882 put_srv(srv);
1883
1884 if (srv_path->state != RTRS_SRV_CONNECTING) {
1885 rtrs_err(s, "Session in wrong state: %s\n",
1886 rtrs_srv_state_str(srv_path->state));
1887 mutex_unlock(&srv->paths_mutex);
1888 goto reject_w_err;
1889 }
1890 /*
1891 * Sanity checks
1892 */
1893 if (con_num != s->con_num || cid >= s->con_num) {
1894 rtrs_err(s, "Incorrect request: %d, %d\n",
1895 cid, con_num);
1896 mutex_unlock(&srv->paths_mutex);
1897 goto reject_w_err;
1898 }
1899 if (s->con[cid]) {
1900 rtrs_err(s, "Connection already exists: %d\n",
1901 cid);
1902 mutex_unlock(&srv->paths_mutex);
1903 goto reject_w_err;
1904 }
1905 } else {
1906 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
1907 &msg->sess_uuid);
1908 if (IS_ERR(srv_path)) {
1909 mutex_unlock(&srv->paths_mutex);
1910 put_srv(srv);
1911 err = PTR_ERR(srv_path);
1912 pr_err("RTRS server session allocation failed: %d\n", err);
1913 goto reject_w_err;
1914 }
1915 }
1916 err = create_con(srv_path, cm_id, cid);
1917 if (err) {
1918 rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
1919 rtrs_rdma_do_reject(cm_id, err);
1920 /*
1921 * Since session has other connections we follow normal way
1922 * through workqueue, but still return an error to tell cma.c
1923 * to call rdma_destroy_id() for current connection.
1924 */
1925 goto close_and_return_err;
1926 }
1927 err = rtrs_rdma_do_accept(srv_path, cm_id);
1928 if (err) {
1929 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
1930 rtrs_rdma_do_reject(cm_id, err);
1931 /*
1932 * Since current connection was successfully added to the
1933 * session we follow normal way through workqueue to close the
1934 * session, thus return 0 to tell cma.c we call
1935 * rdma_destroy_id() ourselves.
1936 */
1937 err = 0;
1938 goto close_and_return_err;
1939 }
1940 mutex_unlock(&srv->paths_mutex);
1941
1942 return 0;
1943
1944reject_w_err:
1945 return rtrs_rdma_do_reject(cm_id, err);
1946
1947close_and_return_err:
1948 mutex_unlock(&srv->paths_mutex);
1949 close_path(srv_path);
1950
1951 return err;
1952}
1953
1954static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
1955 struct rdma_cm_event *ev)
1956{
1957 struct rtrs_srv_path *srv_path = NULL;
1958 struct rtrs_path *s = NULL;
1959 struct rtrs_con *c = NULL;
1960
1961 if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
1962 /*
1963 * In case of error cma.c will destroy cm_id,
1964 * see cma_process_remove()
1965 */
1966 return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
1967 ev->param.conn.private_data_len);
1968
1969 c = cm_id->context;
1970 s = c->path;
1971 srv_path = to_srv_path(s);
1972
1973 switch (ev->event) {
1974 case RDMA_CM_EVENT_ESTABLISHED:
1975 /* Nothing here */
1976 break;
1977 case RDMA_CM_EVENT_REJECTED:
1978 case RDMA_CM_EVENT_CONNECT_ERROR:
1979 case RDMA_CM_EVENT_UNREACHABLE:
1980 rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
1981 rdma_event_msg(ev->event), ev->status);
1982 fallthrough;
1983 case RDMA_CM_EVENT_DISCONNECTED:
1984 case RDMA_CM_EVENT_ADDR_CHANGE:
1985 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1986 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1987 close_path(srv_path);
1988 break;
1989 default:
1990 pr_err("Ignoring unexpected CM event %s, err %d\n",
1991 rdma_event_msg(ev->event), ev->status);
1992 break;
1993 }
1994
1995 return 0;
1996}
1997
1998static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
1999 struct sockaddr *addr,
2000 enum rdma_ucm_port_space ps)
2001{
2002 struct rdma_cm_id *cm_id;
2003 int ret;
2004
2005 cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
2006 ctx, ps, IB_QPT_RC);
2007 if (IS_ERR(cm_id)) {
2008 ret = PTR_ERR(cm_id);
2009 pr_err("Creating id for RDMA connection failed, err: %d\n",
2010 ret);
2011 goto err_out;
2012 }
2013 ret = rdma_bind_addr(cm_id, addr);
2014 if (ret) {
2015 pr_err("Binding RDMA address failed, err: %d\n", ret);
2016 goto err_cm;
2017 }
2018 ret = rdma_listen(cm_id, 64);
2019 if (ret) {
2020 pr_err("Listening on RDMA connection failed, err: %d\n",
2021 ret);
2022 goto err_cm;
2023 }
2024
2025 return cm_id;
2026
2027err_cm:
2028 rdma_destroy_id(cm_id);
2029err_out:
2030
2031 return ERR_PTR(ret);
2032}
2033
2034static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
2035{
2036 struct sockaddr_in6 sin = {
2037 .sin6_family = AF_INET6,
2038 .sin6_addr = IN6ADDR_ANY_INIT,
2039 .sin6_port = htons(port),
2040 };
2041 struct sockaddr_ib sib = {
2042 .sib_family = AF_IB,
2043 .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
2044 .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
2045 .sib_pkey = cpu_to_be16(0xffff),
2046 };
2047 struct rdma_cm_id *cm_ip, *cm_ib;
2048 int ret;
2049
2050 /*
2051 * We accept both IPoIB and IB connections, so we need to keep
2052 * two cm id's, one for each socket type and port space.
2053 * If the cm initialization of one of the id's fails, we abort
2054 * everything.
2055 */
2056 cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
2057 if (IS_ERR(cm_ip))
2058 return PTR_ERR(cm_ip);
2059
2060 cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
2061 if (IS_ERR(cm_ib)) {
2062 ret = PTR_ERR(cm_ib);
2063 goto free_cm_ip;
2064 }
2065
2066 ctx->cm_id_ip = cm_ip;
2067 ctx->cm_id_ib = cm_ib;
2068
2069 return 0;
2070
2071free_cm_ip:
2072 rdma_destroy_id(cm_ip);
2073
2074 return ret;
2075}
2076
2077static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
2078{
2079 struct rtrs_srv_ctx *ctx;
2080
2081 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2082 if (!ctx)
2083 return NULL;
2084
2085 ctx->ops = *ops;
2086 mutex_init(&ctx->srv_mutex);
2087 INIT_LIST_HEAD(&ctx->srv_list);
2088
2089 return ctx;
2090}
2091
2092static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
2093{
2094 WARN_ON(!list_empty(&ctx->srv_list));
2095 mutex_destroy(&ctx->srv_mutex);
2096 kfree(ctx);
2097}
2098
2099static int rtrs_srv_add_one(struct ib_device *device)
2100{
2101 struct rtrs_srv_ctx *ctx;
2102 int ret = 0;
2103
2104 mutex_lock(&ib_ctx.ib_dev_mutex);
2105 if (ib_ctx.ib_dev_count)
2106 goto out;
2107
2108 /*
2109 * Since our CM IDs are NOT bound to any ib device we will create them
2110 * only once
2111 */
2112 ctx = ib_ctx.srv_ctx;
2113 ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
2114 if (ret) {
2115 /*
2116 * We errored out here.
2117 * According to the ib code, if we encounter an error here then the
2118 * error code is ignored, and no more calls to our ops are made.
2119 */
2120 pr_err("Failed to initialize RDMA connection");
2121 goto err_out;
2122 }
2123
2124out:
2125 /*
2126 * Keep a track on the number of ib devices added
2127 */
2128 ib_ctx.ib_dev_count++;
2129
2130err_out:
2131 mutex_unlock(&ib_ctx.ib_dev_mutex);
2132 return ret;
2133}
2134
2135static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
2136{
2137 struct rtrs_srv_ctx *ctx;
2138
2139 mutex_lock(&ib_ctx.ib_dev_mutex);
2140 ib_ctx.ib_dev_count--;
2141
2142 if (ib_ctx.ib_dev_count)
2143 goto out;
2144
2145 /*
2146 * Since our CM IDs are NOT bound to any ib device we will remove them
2147 * only once, when the last device is removed
2148 */
2149 ctx = ib_ctx.srv_ctx;
2150 rdma_destroy_id(ctx->cm_id_ip);
2151 rdma_destroy_id(ctx->cm_id_ib);
2152
2153out:
2154 mutex_unlock(&ib_ctx.ib_dev_mutex);
2155}
2156
2157static struct ib_client rtrs_srv_client = {
2158 .name = "rtrs_server",
2159 .add = rtrs_srv_add_one,
2160 .remove = rtrs_srv_remove_one
2161};
2162
2163/**
2164 * rtrs_srv_open() - open RTRS server context
2165 * @ops: callback functions
2166 * @port: port to listen on
2167 *
2168 * Creates server context with specified callbacks.
2169 *
2170 * Return a valid pointer on success otherwise PTR_ERR.
2171 */
2172struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
2173{
2174 struct rtrs_srv_ctx *ctx;
2175 int err;
2176
2177 ctx = alloc_srv_ctx(ops);
2178 if (!ctx)
2179 return ERR_PTR(-ENOMEM);
2180
2181 mutex_init(&ib_ctx.ib_dev_mutex);
2182 ib_ctx.srv_ctx = ctx;
2183 ib_ctx.port = port;
2184
2185 err = ib_register_client(&rtrs_srv_client);
2186 if (err) {
2187 free_srv_ctx(ctx);
2188 return ERR_PTR(err);
2189 }
2190
2191 return ctx;
2192}
2193EXPORT_SYMBOL(rtrs_srv_open);
2194
2195static void close_paths(struct rtrs_srv_sess *srv)
2196{
2197 struct rtrs_srv_path *srv_path;
2198
2199 mutex_lock(&srv->paths_mutex);
2200 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
2201 close_path(srv_path);
2202 mutex_unlock(&srv->paths_mutex);
2203}
2204
2205static void close_ctx(struct rtrs_srv_ctx *ctx)
2206{
2207 struct rtrs_srv_sess *srv;
2208
2209 mutex_lock(&ctx->srv_mutex);
2210 list_for_each_entry(srv, &ctx->srv_list, ctx_list)
2211 close_paths(srv);
2212 mutex_unlock(&ctx->srv_mutex);
2213 flush_workqueue(rtrs_wq);
2214}
2215
2216/**
2217 * rtrs_srv_close() - close RTRS server context
2218 * @ctx: pointer to server context
2219 *
2220 * Closes RTRS server context with all client sessions.
2221 */
2222void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
2223{
2224 ib_unregister_client(&rtrs_srv_client);
2225 mutex_destroy(&ib_ctx.ib_dev_mutex);
2226 close_ctx(ctx);
2227 free_srv_ctx(ctx);
2228}
2229EXPORT_SYMBOL(rtrs_srv_close);
2230
2231static int check_module_params(void)
2232{
2233 if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
2234 pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
2235 sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
2236 return -EINVAL;
2237 }
2238 if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
2239 pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
2240 max_chunk_size, MIN_CHUNK_SIZE);
2241 return -EINVAL;
2242 }
2243
2244 /*
2245 * Check if IB immediate data size is enough to hold the mem_id and the
2246 * offset inside the memory chunk
2247 */
2248 if ((ilog2(sess_queue_depth - 1) + 1) +
2249 (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
2250 pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
2251 MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
2252 return -EINVAL;
2253 }
2254
2255 return 0;
2256}
2257
2258static int __init rtrs_server_init(void)
2259{
2260 int err;
2261
2262 pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
2263 KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
2264 max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
2265 sess_queue_depth, always_invalidate);
2266
2267 rtrs_rdma_dev_pd_init(0, &dev_pd);
2268
2269 err = check_module_params();
2270 if (err) {
2271 pr_err("Failed to load module, invalid module parameters, err: %d\n",
2272 err);
2273 return err;
2274 }
2275 err = class_register(&rtrs_dev_class);
2276 if (err)
2277 goto out_err;
2278
2279 rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
2280 if (!rtrs_wq) {
2281 err = -ENOMEM;
2282 goto out_dev_class;
2283 }
2284
2285 return 0;
2286
2287out_dev_class:
2288 class_unregister(&rtrs_dev_class);
2289out_err:
2290 return err;
2291}
2292
2293static void __exit rtrs_server_exit(void)
2294{
2295 destroy_workqueue(rtrs_wq);
2296 class_unregister(&rtrs_dev_class);
2297 rtrs_rdma_dev_pd_deinit(&dev_pd);
2298}
2299
2300module_init(rtrs_server_init);
2301module_exit(rtrs_server_exit);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * RDMA Transport Layer
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10#undef pr_fmt
11#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13#include <linux/module.h>
14
15#include "rtrs-srv.h"
16#include "rtrs-log.h"
17#include <rdma/ib_cm.h>
18#include <rdma/ib_verbs.h>
19#include "rtrs-srv-trace.h"
20
21MODULE_DESCRIPTION("RDMA Transport Server");
22MODULE_LICENSE("GPL");
23
24/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
25#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
26#define DEFAULT_SESS_QUEUE_DEPTH 512
27#define MAX_HDR_SIZE PAGE_SIZE
28
29static struct rtrs_rdma_dev_pd dev_pd;
30struct class *rtrs_dev_class;
31static struct rtrs_srv_ib_ctx ib_ctx;
32
33static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
34static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
35
36static bool always_invalidate = true;
37module_param(always_invalidate, bool, 0444);
38MODULE_PARM_DESC(always_invalidate,
39 "Invalidate memory registration for contiguous memory regions before accessing.");
40
41module_param_named(max_chunk_size, max_chunk_size, int, 0444);
42MODULE_PARM_DESC(max_chunk_size,
43 "Max size for each IO request, when change the unit is in byte (default: "
44 __stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
45
46module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
47MODULE_PARM_DESC(sess_queue_depth,
48 "Number of buffers for pending I/O requests to allocate per session. Maximum: "
49 __stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
50 __stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
51
52static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
53
54static struct workqueue_struct *rtrs_wq;
55
56static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
57{
58 return container_of(c, struct rtrs_srv_con, c);
59}
60
61static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
62 enum rtrs_srv_state new_state)
63{
64 enum rtrs_srv_state old_state;
65 bool changed = false;
66
67 spin_lock_irq(&srv_path->state_lock);
68 old_state = srv_path->state;
69 switch (new_state) {
70 case RTRS_SRV_CONNECTED:
71 if (old_state == RTRS_SRV_CONNECTING)
72 changed = true;
73 break;
74 case RTRS_SRV_CLOSING:
75 if (old_state == RTRS_SRV_CONNECTING ||
76 old_state == RTRS_SRV_CONNECTED)
77 changed = true;
78 break;
79 case RTRS_SRV_CLOSED:
80 if (old_state == RTRS_SRV_CLOSING)
81 changed = true;
82 break;
83 default:
84 break;
85 }
86 if (changed)
87 srv_path->state = new_state;
88 spin_unlock_irq(&srv_path->state_lock);
89
90 return changed;
91}
92
93static void free_id(struct rtrs_srv_op *id)
94{
95 if (!id)
96 return;
97 kfree(id);
98}
99
100static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
101{
102 struct rtrs_srv_sess *srv = srv_path->srv;
103 int i;
104
105 if (srv_path->ops_ids) {
106 for (i = 0; i < srv->queue_depth; i++)
107 free_id(srv_path->ops_ids[i]);
108 kfree(srv_path->ops_ids);
109 srv_path->ops_ids = NULL;
110 }
111}
112
113static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
114
115static struct ib_cqe io_comp_cqe = {
116 .done = rtrs_srv_rdma_done
117};
118
119static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
120{
121 struct rtrs_srv_path *srv_path = container_of(ref,
122 struct rtrs_srv_path,
123 ids_inflight_ref);
124
125 percpu_ref_exit(&srv_path->ids_inflight_ref);
126 complete(&srv_path->complete_done);
127}
128
129static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
130{
131 struct rtrs_srv_sess *srv = srv_path->srv;
132 struct rtrs_srv_op *id;
133 int i, ret;
134
135 srv_path->ops_ids = kcalloc(srv->queue_depth,
136 sizeof(*srv_path->ops_ids),
137 GFP_KERNEL);
138 if (!srv_path->ops_ids)
139 goto err;
140
141 for (i = 0; i < srv->queue_depth; ++i) {
142 id = kzalloc(sizeof(*id), GFP_KERNEL);
143 if (!id)
144 goto err;
145
146 srv_path->ops_ids[i] = id;
147 }
148
149 ret = percpu_ref_init(&srv_path->ids_inflight_ref,
150 rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
151 if (ret) {
152 pr_err("Percpu reference init failed\n");
153 goto err;
154 }
155 init_completion(&srv_path->complete_done);
156
157 return 0;
158
159err:
160 rtrs_srv_free_ops_ids(srv_path);
161 return -ENOMEM;
162}
163
164static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
165{
166 percpu_ref_get(&srv_path->ids_inflight_ref);
167}
168
169static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
170{
171 percpu_ref_put(&srv_path->ids_inflight_ref);
172}
173
174static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
175{
176 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
177 struct rtrs_path *s = con->c.path;
178 struct rtrs_srv_path *srv_path = to_srv_path(s);
179
180 if (wc->status != IB_WC_SUCCESS) {
181 rtrs_err(s, "REG MR failed: %s\n",
182 ib_wc_status_msg(wc->status));
183 close_path(srv_path);
184 return;
185 }
186}
187
188static struct ib_cqe local_reg_cqe = {
189 .done = rtrs_srv_reg_mr_done
190};
191
192static int rdma_write_sg(struct rtrs_srv_op *id)
193{
194 struct rtrs_path *s = id->con->c.path;
195 struct rtrs_srv_path *srv_path = to_srv_path(s);
196 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
197 struct rtrs_srv_mr *srv_mr;
198 struct ib_send_wr inv_wr;
199 struct ib_rdma_wr imm_wr;
200 struct ib_rdma_wr *wr = NULL;
201 enum ib_send_flags flags;
202 size_t sg_cnt;
203 int err, offset;
204 bool need_inval;
205 u32 rkey = 0;
206 struct ib_reg_wr rwr;
207 struct ib_sge *plist;
208 struct ib_sge list;
209
210 sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
211 need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
212 if (sg_cnt != 1)
213 return -EINVAL;
214
215 offset = 0;
216
217 wr = &id->tx_wr;
218 plist = &id->tx_sg;
219 plist->addr = dma_addr + offset;
220 plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
221
222 /* WR will fail with length error
223 * if this is 0
224 */
225 if (plist->length == 0) {
226 rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
227 return -EINVAL;
228 }
229
230 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
231 offset += plist->length;
232
233 wr->wr.sg_list = plist;
234 wr->wr.num_sge = 1;
235 wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
236 wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
237 if (rkey == 0)
238 rkey = wr->rkey;
239 else
240 /* Only one key is actually used */
241 WARN_ON_ONCE(rkey != wr->rkey);
242
243 wr->wr.opcode = IB_WR_RDMA_WRITE;
244 wr->wr.wr_cqe = &io_comp_cqe;
245 wr->wr.ex.imm_data = 0;
246 wr->wr.send_flags = 0;
247
248 if (need_inval && always_invalidate) {
249 wr->wr.next = &rwr.wr;
250 rwr.wr.next = &inv_wr;
251 inv_wr.next = &imm_wr.wr;
252 } else if (always_invalidate) {
253 wr->wr.next = &rwr.wr;
254 rwr.wr.next = &imm_wr.wr;
255 } else if (need_inval) {
256 wr->wr.next = &inv_wr;
257 inv_wr.next = &imm_wr.wr;
258 } else {
259 wr->wr.next = &imm_wr.wr;
260 }
261 /*
262 * From time to time we have to post signaled sends,
263 * or send queue will fill up and only QP reset can help.
264 */
265 flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
266 0 : IB_SEND_SIGNALED;
267
268 if (need_inval) {
269 inv_wr.sg_list = NULL;
270 inv_wr.num_sge = 0;
271 inv_wr.opcode = IB_WR_SEND_WITH_INV;
272 inv_wr.wr_cqe = &io_comp_cqe;
273 inv_wr.send_flags = 0;
274 inv_wr.ex.invalidate_rkey = rkey;
275 }
276
277 imm_wr.wr.next = NULL;
278 if (always_invalidate) {
279 struct rtrs_msg_rkey_rsp *msg;
280
281 srv_mr = &srv_path->mrs[id->msg_id];
282 rwr.wr.opcode = IB_WR_REG_MR;
283 rwr.wr.wr_cqe = &local_reg_cqe;
284 rwr.wr.num_sge = 0;
285 rwr.mr = srv_mr->mr;
286 rwr.wr.send_flags = 0;
287 rwr.key = srv_mr->mr->rkey;
288 rwr.access = (IB_ACCESS_LOCAL_WRITE |
289 IB_ACCESS_REMOTE_WRITE);
290 msg = srv_mr->iu->buf;
291 msg->buf_id = cpu_to_le16(id->msg_id);
292 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
293 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
294
295 list.addr = srv_mr->iu->dma_addr;
296 list.length = sizeof(*msg);
297 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
298 imm_wr.wr.sg_list = &list;
299 imm_wr.wr.num_sge = 1;
300 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
301 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
302 srv_mr->iu->dma_addr,
303 srv_mr->iu->size, DMA_TO_DEVICE);
304 } else {
305 imm_wr.wr.sg_list = NULL;
306 imm_wr.wr.num_sge = 0;
307 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
308 }
309 imm_wr.wr.send_flags = flags;
310 imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
311 0, need_inval));
312
313 imm_wr.wr.wr_cqe = &io_comp_cqe;
314 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
315 offset, DMA_BIDIRECTIONAL);
316
317 err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
318 if (err)
319 rtrs_err(s,
320 "Posting RDMA-Write-Request to QP failed, err: %d\n",
321 err);
322
323 return err;
324}
325
326/**
327 * send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
328 * requests or on successful WRITE request.
329 * @con: the connection to send back result
330 * @id: the id associated with the IO
331 * @errno: the error number of the IO.
332 *
333 * Return 0 on success, errno otherwise.
334 */
335static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
336 int errno)
337{
338 struct rtrs_path *s = con->c.path;
339 struct rtrs_srv_path *srv_path = to_srv_path(s);
340 struct ib_send_wr inv_wr, *wr = NULL;
341 struct ib_rdma_wr imm_wr;
342 struct ib_reg_wr rwr;
343 struct rtrs_srv_mr *srv_mr;
344 bool need_inval = false;
345 enum ib_send_flags flags;
346 u32 imm;
347 int err;
348
349 if (id->dir == READ) {
350 struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
351 size_t sg_cnt;
352
353 need_inval = le16_to_cpu(rd_msg->flags) &
354 RTRS_MSG_NEED_INVAL_F;
355 sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
356
357 if (need_inval) {
358 if (sg_cnt) {
359 inv_wr.wr_cqe = &io_comp_cqe;
360 inv_wr.sg_list = NULL;
361 inv_wr.num_sge = 0;
362 inv_wr.opcode = IB_WR_SEND_WITH_INV;
363 inv_wr.send_flags = 0;
364 /* Only one key is actually used */
365 inv_wr.ex.invalidate_rkey =
366 le32_to_cpu(rd_msg->desc[0].key);
367 } else {
368 WARN_ON_ONCE(1);
369 need_inval = false;
370 }
371 }
372 }
373
374 trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
375
376 if (need_inval && always_invalidate) {
377 wr = &inv_wr;
378 inv_wr.next = &rwr.wr;
379 rwr.wr.next = &imm_wr.wr;
380 } else if (always_invalidate) {
381 wr = &rwr.wr;
382 rwr.wr.next = &imm_wr.wr;
383 } else if (need_inval) {
384 wr = &inv_wr;
385 inv_wr.next = &imm_wr.wr;
386 } else {
387 wr = &imm_wr.wr;
388 }
389 /*
390 * From time to time we have to post signalled sends,
391 * or send queue will fill up and only QP reset can help.
392 */
393 flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
394 0 : IB_SEND_SIGNALED;
395 imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
396 imm_wr.wr.next = NULL;
397 if (always_invalidate) {
398 struct ib_sge list;
399 struct rtrs_msg_rkey_rsp *msg;
400
401 srv_mr = &srv_path->mrs[id->msg_id];
402 rwr.wr.next = &imm_wr.wr;
403 rwr.wr.opcode = IB_WR_REG_MR;
404 rwr.wr.wr_cqe = &local_reg_cqe;
405 rwr.wr.num_sge = 0;
406 rwr.wr.send_flags = 0;
407 rwr.mr = srv_mr->mr;
408 rwr.key = srv_mr->mr->rkey;
409 rwr.access = (IB_ACCESS_LOCAL_WRITE |
410 IB_ACCESS_REMOTE_WRITE);
411 msg = srv_mr->iu->buf;
412 msg->buf_id = cpu_to_le16(id->msg_id);
413 msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
414 msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
415
416 list.addr = srv_mr->iu->dma_addr;
417 list.length = sizeof(*msg);
418 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
419 imm_wr.wr.sg_list = &list;
420 imm_wr.wr.num_sge = 1;
421 imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
422 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
423 srv_mr->iu->dma_addr,
424 srv_mr->iu->size, DMA_TO_DEVICE);
425 } else {
426 imm_wr.wr.sg_list = NULL;
427 imm_wr.wr.num_sge = 0;
428 imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
429 }
430 imm_wr.wr.send_flags = flags;
431 imm_wr.wr.wr_cqe = &io_comp_cqe;
432
433 imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
434
435 err = ib_post_send(id->con->c.qp, wr, NULL);
436 if (err)
437 rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
438 err);
439
440 return err;
441}
442
443void close_path(struct rtrs_srv_path *srv_path)
444{
445 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
446 queue_work(rtrs_wq, &srv_path->close_work);
447 WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
448}
449
450static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
451{
452 switch (state) {
453 case RTRS_SRV_CONNECTING:
454 return "RTRS_SRV_CONNECTING";
455 case RTRS_SRV_CONNECTED:
456 return "RTRS_SRV_CONNECTED";
457 case RTRS_SRV_CLOSING:
458 return "RTRS_SRV_CLOSING";
459 case RTRS_SRV_CLOSED:
460 return "RTRS_SRV_CLOSED";
461 default:
462 return "UNKNOWN";
463 }
464}
465
466/**
467 * rtrs_srv_resp_rdma() - Finish an RDMA request
468 *
469 * @id: Internal RTRS operation identifier
470 * @status: Response Code sent to the other side for this operation.
471 * 0 = success, <=0 error
472 * Context: any
473 *
474 * Finish a RDMA operation. A message is sent to the client and the
475 * corresponding memory areas will be released.
476 */
477bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
478{
479 struct rtrs_srv_path *srv_path;
480 struct rtrs_srv_con *con;
481 struct rtrs_path *s;
482 int err;
483
484 if (WARN_ON(!id))
485 return true;
486
487 con = id->con;
488 s = con->c.path;
489 srv_path = to_srv_path(s);
490
491 id->status = status;
492
493 if (srv_path->state != RTRS_SRV_CONNECTED) {
494 rtrs_err_rl(s,
495 "Sending I/O response failed, server path %s is disconnected, path state %s\n",
496 kobject_name(&srv_path->kobj),
497 rtrs_srv_state_str(srv_path->state));
498 goto out;
499 }
500 if (always_invalidate) {
501 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
502
503 ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
504 }
505 if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
506 rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
507 kobject_name(&srv_path->kobj),
508 con->c.cid);
509 atomic_add(1, &con->c.sq_wr_avail);
510 spin_lock(&con->rsp_wr_wait_lock);
511 list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
512 spin_unlock(&con->rsp_wr_wait_lock);
513 return false;
514 }
515
516 if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
517 err = send_io_resp_imm(con, id, status);
518 else
519 err = rdma_write_sg(id);
520
521 if (err) {
522 rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
523 kobject_name(&srv_path->kobj));
524 close_path(srv_path);
525 }
526out:
527 rtrs_srv_put_ops_ids(srv_path);
528 return true;
529}
530EXPORT_SYMBOL(rtrs_srv_resp_rdma);
531
532/**
533 * rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
534 * @srv: Session pointer
535 * @priv: The private pointer that is associated with the session.
536 */
537void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
538{
539 srv->priv = priv;
540}
541EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
542
543static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
544{
545 int i;
546
547 for (i = 0; i < srv_path->mrs_num; i++) {
548 struct rtrs_srv_mr *srv_mr;
549
550 srv_mr = &srv_path->mrs[i];
551 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
552 ib_dereg_mr(srv_mr->mr);
553 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
554 srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
555 sg_free_table(&srv_mr->sgt);
556 }
557 kfree(srv_path->mrs);
558}
559
560static int map_cont_bufs(struct rtrs_srv_path *srv_path)
561{
562 struct rtrs_srv_sess *srv = srv_path->srv;
563 struct rtrs_path *ss = &srv_path->s;
564 int i, err, mrs_num;
565 unsigned int chunk_bits;
566 int chunks_per_mr = 1;
567 struct ib_mr *mr;
568 struct sg_table *sgt;
569
570 /*
571 * Here we map queue_depth chunks to MR. Firstly we have to
572 * figure out how many chunks can we map per MR.
573 */
574 if (always_invalidate) {
575 /*
576 * in order to do invalidate for each chunks of memory, we needs
577 * more memory regions.
578 */
579 mrs_num = srv->queue_depth;
580 } else {
581 chunks_per_mr =
582 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
583 mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
584 chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
585 }
586
587 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
588 if (!srv_path->mrs)
589 return -ENOMEM;
590
591 for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
592 srv_path->mrs_num++) {
593 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
594 struct scatterlist *s;
595 int nr, nr_sgt, chunks;
596
597 sgt = &srv_mr->sgt;
598 chunks = chunks_per_mr * srv_path->mrs_num;
599 if (!always_invalidate)
600 chunks_per_mr = min_t(int, chunks_per_mr,
601 srv->queue_depth - chunks);
602
603 err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
604 if (err)
605 goto err;
606
607 for_each_sg(sgt->sgl, s, chunks_per_mr, i)
608 sg_set_page(s, srv->chunks[chunks + i],
609 max_chunk_size, 0);
610
611 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
612 sgt->nents, DMA_BIDIRECTIONAL);
613 if (!nr_sgt) {
614 err = -EINVAL;
615 goto free_sg;
616 }
617 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
618 nr_sgt);
619 if (IS_ERR(mr)) {
620 err = PTR_ERR(mr);
621 goto unmap_sg;
622 }
623 nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
624 NULL, max_chunk_size);
625 if (nr != nr_sgt) {
626 err = nr < 0 ? nr : -EINVAL;
627 goto dereg_mr;
628 }
629
630 if (always_invalidate) {
631 srv_mr->iu = rtrs_iu_alloc(1,
632 sizeof(struct rtrs_msg_rkey_rsp),
633 GFP_KERNEL, srv_path->s.dev->ib_dev,
634 DMA_TO_DEVICE, rtrs_srv_rdma_done);
635 if (!srv_mr->iu) {
636 err = -ENOMEM;
637 rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
638 goto dereg_mr;
639 }
640 }
641 /* Eventually dma addr for each chunk can be cached */
642 for_each_sg(sgt->sgl, s, nr_sgt, i)
643 srv_path->dma_addr[chunks + i] = sg_dma_address(s);
644
645 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
646 srv_mr->mr = mr;
647 }
648
649 chunk_bits = ilog2(srv->queue_depth - 1) + 1;
650 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
651
652 return 0;
653
654dereg_mr:
655 ib_dereg_mr(mr);
656unmap_sg:
657 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
658 sgt->nents, DMA_BIDIRECTIONAL);
659free_sg:
660 sg_free_table(sgt);
661err:
662 unmap_cont_bufs(srv_path);
663
664 return err;
665}
666
667static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
668{
669 close_path(to_srv_path(c->path));
670}
671
672static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
673{
674 rtrs_init_hb(&srv_path->s, &io_comp_cqe,
675 RTRS_HB_INTERVAL_MS,
676 RTRS_HB_MISSED_MAX,
677 rtrs_srv_hb_err_handler,
678 rtrs_wq);
679}
680
681static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
682{
683 rtrs_start_hb(&srv_path->s);
684}
685
686static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
687{
688 rtrs_stop_hb(&srv_path->s);
689}
690
691static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
692{
693 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
694 struct rtrs_path *s = con->c.path;
695 struct rtrs_srv_path *srv_path = to_srv_path(s);
696 struct rtrs_iu *iu;
697
698 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
699 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
700
701 if (wc->status != IB_WC_SUCCESS) {
702 rtrs_err(s, "Sess info response send failed: %s\n",
703 ib_wc_status_msg(wc->status));
704 close_path(srv_path);
705 return;
706 }
707 WARN_ON(wc->opcode != IB_WC_SEND);
708}
709
710static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
711{
712 struct rtrs_srv_sess *srv = srv_path->srv;
713 struct rtrs_srv_ctx *ctx = srv->ctx;
714 int up;
715
716 mutex_lock(&srv->paths_ev_mutex);
717 up = ++srv->paths_up;
718 if (up == 1)
719 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
720 mutex_unlock(&srv->paths_ev_mutex);
721
722 /* Mark session as established */
723 srv_path->established = true;
724}
725
726static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
727{
728 struct rtrs_srv_sess *srv = srv_path->srv;
729 struct rtrs_srv_ctx *ctx = srv->ctx;
730
731 if (!srv_path->established)
732 return;
733
734 srv_path->established = false;
735 mutex_lock(&srv->paths_ev_mutex);
736 WARN_ON(!srv->paths_up);
737 if (--srv->paths_up == 0)
738 ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
739 mutex_unlock(&srv->paths_ev_mutex);
740}
741
742static bool exist_pathname(struct rtrs_srv_ctx *ctx,
743 const char *pathname, const uuid_t *path_uuid)
744{
745 struct rtrs_srv_sess *srv;
746 struct rtrs_srv_path *srv_path;
747 bool found = false;
748
749 mutex_lock(&ctx->srv_mutex);
750 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
751 mutex_lock(&srv->paths_mutex);
752
753 /* when a client with same uuid and same sessname tried to add a path */
754 if (uuid_equal(&srv->paths_uuid, path_uuid)) {
755 mutex_unlock(&srv->paths_mutex);
756 continue;
757 }
758
759 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
760 if (strlen(srv_path->s.sessname) == strlen(pathname) &&
761 !strcmp(srv_path->s.sessname, pathname)) {
762 found = true;
763 break;
764 }
765 }
766 mutex_unlock(&srv->paths_mutex);
767 if (found)
768 break;
769 }
770 mutex_unlock(&ctx->srv_mutex);
771 return found;
772}
773
774static int post_recv_path(struct rtrs_srv_path *srv_path);
775static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
776
777static int process_info_req(struct rtrs_srv_con *con,
778 struct rtrs_msg_info_req *msg)
779{
780 struct rtrs_path *s = con->c.path;
781 struct rtrs_srv_path *srv_path = to_srv_path(s);
782 struct ib_send_wr *reg_wr = NULL;
783 struct rtrs_msg_info_rsp *rsp;
784 struct rtrs_iu *tx_iu;
785 struct ib_reg_wr *rwr;
786 int mri, err;
787 size_t tx_sz;
788
789 err = post_recv_path(srv_path);
790 if (err) {
791 rtrs_err(s, "post_recv_path(), err: %d\n", err);
792 return err;
793 }
794
795 if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
796 rtrs_err(s, "pathname cannot contain / and .\n");
797 return -EINVAL;
798 }
799
800 if (exist_pathname(srv_path->srv->ctx,
801 msg->pathname, &srv_path->srv->paths_uuid)) {
802 rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
803 return -EPERM;
804 }
805 strscpy(srv_path->s.sessname, msg->pathname,
806 sizeof(srv_path->s.sessname));
807
808 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
809 if (!rwr)
810 return -ENOMEM;
811
812 tx_sz = sizeof(*rsp);
813 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
814 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
815 DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
816 if (!tx_iu) {
817 err = -ENOMEM;
818 goto rwr_free;
819 }
820
821 rsp = tx_iu->buf;
822 rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
823 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
824
825 for (mri = 0; mri < srv_path->mrs_num; mri++) {
826 struct ib_mr *mr = srv_path->mrs[mri].mr;
827
828 rsp->desc[mri].addr = cpu_to_le64(mr->iova);
829 rsp->desc[mri].key = cpu_to_le32(mr->rkey);
830 rsp->desc[mri].len = cpu_to_le32(mr->length);
831
832 /*
833 * Fill in reg MR request and chain them *backwards*
834 */
835 rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
836 rwr[mri].wr.opcode = IB_WR_REG_MR;
837 rwr[mri].wr.wr_cqe = &local_reg_cqe;
838 rwr[mri].wr.num_sge = 0;
839 rwr[mri].wr.send_flags = 0;
840 rwr[mri].mr = mr;
841 rwr[mri].key = mr->rkey;
842 rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
843 IB_ACCESS_REMOTE_WRITE);
844 reg_wr = &rwr[mri].wr;
845 }
846
847 err = rtrs_srv_create_path_files(srv_path);
848 if (err)
849 goto iu_free;
850 kobject_get(&srv_path->kobj);
851 get_device(&srv_path->srv->dev);
852 rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
853 rtrs_srv_start_hb(srv_path);
854
855 /*
856 * We do not account number of established connections at the current
857 * moment, we rely on the client, which should send info request when
858 * all connections are successfully established. Thus, simply notify
859 * listener with a proper event if we are the first path.
860 */
861 rtrs_srv_path_up(srv_path);
862
863 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
864 tx_iu->dma_addr,
865 tx_iu->size, DMA_TO_DEVICE);
866
867 /* Send info response */
868 err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
869 if (err) {
870 rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
871iu_free:
872 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
873 }
874rwr_free:
875 kfree(rwr);
876
877 return err;
878}
879
880static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
881{
882 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
883 struct rtrs_path *s = con->c.path;
884 struct rtrs_srv_path *srv_path = to_srv_path(s);
885 struct rtrs_msg_info_req *msg;
886 struct rtrs_iu *iu;
887 int err;
888
889 WARN_ON(con->c.cid);
890
891 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
892 if (wc->status != IB_WC_SUCCESS) {
893 rtrs_err(s, "Sess info request receive failed: %s\n",
894 ib_wc_status_msg(wc->status));
895 goto close;
896 }
897 WARN_ON(wc->opcode != IB_WC_RECV);
898
899 if (wc->byte_len < sizeof(*msg)) {
900 rtrs_err(s, "Sess info request is malformed: size %d\n",
901 wc->byte_len);
902 goto close;
903 }
904 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
905 iu->size, DMA_FROM_DEVICE);
906 msg = iu->buf;
907 if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
908 rtrs_err(s, "Sess info request is malformed: type %d\n",
909 le16_to_cpu(msg->type));
910 goto close;
911 }
912 err = process_info_req(con, msg);
913 if (err)
914 goto close;
915
916out:
917 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
918 return;
919close:
920 close_path(srv_path);
921 goto out;
922}
923
924static int post_recv_info_req(struct rtrs_srv_con *con)
925{
926 struct rtrs_path *s = con->c.path;
927 struct rtrs_srv_path *srv_path = to_srv_path(s);
928 struct rtrs_iu *rx_iu;
929 int err;
930
931 rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
932 GFP_KERNEL, srv_path->s.dev->ib_dev,
933 DMA_FROM_DEVICE, rtrs_srv_info_req_done);
934 if (!rx_iu)
935 return -ENOMEM;
936 /* Prepare for getting info response */
937 err = rtrs_iu_post_recv(&con->c, rx_iu);
938 if (err) {
939 rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
940 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
941 return err;
942 }
943
944 return 0;
945}
946
947static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
948{
949 int i, err;
950
951 for (i = 0; i < q_size; i++) {
952 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
953 if (err)
954 return err;
955 }
956
957 return 0;
958}
959
960static int post_recv_path(struct rtrs_srv_path *srv_path)
961{
962 struct rtrs_srv_sess *srv = srv_path->srv;
963 struct rtrs_path *s = &srv_path->s;
964 size_t q_size;
965 int err, cid;
966
967 for (cid = 0; cid < srv_path->s.con_num; cid++) {
968 if (cid == 0)
969 q_size = SERVICE_CON_QUEUE_DEPTH;
970 else
971 q_size = srv->queue_depth;
972
973 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
974 if (err) {
975 rtrs_err(s, "post_recv_io(), err: %d\n", err);
976 return err;
977 }
978 }
979
980 return 0;
981}
982
983static void process_read(struct rtrs_srv_con *con,
984 struct rtrs_msg_rdma_read *msg,
985 u32 buf_id, u32 off)
986{
987 struct rtrs_path *s = con->c.path;
988 struct rtrs_srv_path *srv_path = to_srv_path(s);
989 struct rtrs_srv_sess *srv = srv_path->srv;
990 struct rtrs_srv_ctx *ctx = srv->ctx;
991 struct rtrs_srv_op *id;
992
993 size_t usr_len, data_len;
994 void *data;
995 int ret;
996
997 if (srv_path->state != RTRS_SRV_CONNECTED) {
998 rtrs_err_rl(s,
999 "Processing read request failed, session is disconnected, sess state %s\n",
1000 rtrs_srv_state_str(srv_path->state));
1001 return;
1002 }
1003 if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
1004 rtrs_err_rl(s,
1005 "Processing read request failed, invalid message\n");
1006 return;
1007 }
1008 rtrs_srv_get_ops_ids(srv_path);
1009 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
1010 id = srv_path->ops_ids[buf_id];
1011 id->con = con;
1012 id->dir = READ;
1013 id->msg_id = buf_id;
1014 id->rd_msg = msg;
1015 usr_len = le16_to_cpu(msg->usr_len);
1016 data_len = off - usr_len;
1017 data = page_address(srv->chunks[buf_id]);
1018 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1019 data + data_len, usr_len);
1020
1021 if (ret) {
1022 rtrs_err_rl(s,
1023 "Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
1024 buf_id, ret);
1025 goto send_err_msg;
1026 }
1027
1028 return;
1029
1030send_err_msg:
1031 ret = send_io_resp_imm(con, id, ret);
1032 if (ret < 0) {
1033 rtrs_err_rl(s,
1034 "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
1035 buf_id, ret);
1036 close_path(srv_path);
1037 }
1038 rtrs_srv_put_ops_ids(srv_path);
1039}
1040
1041static void process_write(struct rtrs_srv_con *con,
1042 struct rtrs_msg_rdma_write *req,
1043 u32 buf_id, u32 off)
1044{
1045 struct rtrs_path *s = con->c.path;
1046 struct rtrs_srv_path *srv_path = to_srv_path(s);
1047 struct rtrs_srv_sess *srv = srv_path->srv;
1048 struct rtrs_srv_ctx *ctx = srv->ctx;
1049 struct rtrs_srv_op *id;
1050
1051 size_t data_len, usr_len;
1052 void *data;
1053 int ret;
1054
1055 if (srv_path->state != RTRS_SRV_CONNECTED) {
1056 rtrs_err_rl(s,
1057 "Processing write request failed, session is disconnected, sess state %s\n",
1058 rtrs_srv_state_str(srv_path->state));
1059 return;
1060 }
1061 rtrs_srv_get_ops_ids(srv_path);
1062 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
1063 id = srv_path->ops_ids[buf_id];
1064 id->con = con;
1065 id->dir = WRITE;
1066 id->msg_id = buf_id;
1067
1068 usr_len = le16_to_cpu(req->usr_len);
1069 data_len = off - usr_len;
1070 data = page_address(srv->chunks[buf_id]);
1071 ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
1072 data + data_len, usr_len);
1073 if (ret) {
1074 rtrs_err_rl(s,
1075 "Processing write request failed, user module callback reports err: %d\n",
1076 ret);
1077 goto send_err_msg;
1078 }
1079
1080 return;
1081
1082send_err_msg:
1083 ret = send_io_resp_imm(con, id, ret);
1084 if (ret < 0) {
1085 rtrs_err_rl(s,
1086 "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
1087 buf_id, ret);
1088 close_path(srv_path);
1089 }
1090 rtrs_srv_put_ops_ids(srv_path);
1091}
1092
1093static void process_io_req(struct rtrs_srv_con *con, void *msg,
1094 u32 id, u32 off)
1095{
1096 struct rtrs_path *s = con->c.path;
1097 struct rtrs_srv_path *srv_path = to_srv_path(s);
1098 struct rtrs_msg_rdma_hdr *hdr;
1099 unsigned int type;
1100
1101 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
1102 srv_path->dma_addr[id],
1103 max_chunk_size, DMA_BIDIRECTIONAL);
1104 hdr = msg;
1105 type = le16_to_cpu(hdr->type);
1106
1107 switch (type) {
1108 case RTRS_MSG_WRITE:
1109 process_write(con, msg, id, off);
1110 break;
1111 case RTRS_MSG_READ:
1112 process_read(con, msg, id, off);
1113 break;
1114 default:
1115 rtrs_err(s,
1116 "Processing I/O request failed, unknown message type received: 0x%02x\n",
1117 type);
1118 goto err;
1119 }
1120
1121 return;
1122
1123err:
1124 close_path(srv_path);
1125}
1126
1127static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1128{
1129 struct rtrs_srv_mr *mr =
1130 container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
1131 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1132 struct rtrs_path *s = con->c.path;
1133 struct rtrs_srv_path *srv_path = to_srv_path(s);
1134 struct rtrs_srv_sess *srv = srv_path->srv;
1135 u32 msg_id, off;
1136 void *data;
1137
1138 if (wc->status != IB_WC_SUCCESS) {
1139 rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
1140 ib_wc_status_msg(wc->status));
1141 close_path(srv_path);
1142 }
1143 msg_id = mr->msg_id;
1144 off = mr->msg_off;
1145 data = page_address(srv->chunks[msg_id]) + off;
1146 process_io_req(con, data, msg_id, off);
1147}
1148
1149static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
1150 struct rtrs_srv_mr *mr)
1151{
1152 struct ib_send_wr wr = {
1153 .opcode = IB_WR_LOCAL_INV,
1154 .wr_cqe = &mr->inv_cqe,
1155 .send_flags = IB_SEND_SIGNALED,
1156 .ex.invalidate_rkey = mr->mr->rkey,
1157 };
1158 mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
1159
1160 return ib_post_send(con->c.qp, &wr, NULL);
1161}
1162
1163static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
1164{
1165 spin_lock(&con->rsp_wr_wait_lock);
1166 while (!list_empty(&con->rsp_wr_wait_list)) {
1167 struct rtrs_srv_op *id;
1168 int ret;
1169
1170 id = list_entry(con->rsp_wr_wait_list.next,
1171 struct rtrs_srv_op, wait_list);
1172 list_del(&id->wait_list);
1173
1174 spin_unlock(&con->rsp_wr_wait_lock);
1175 ret = rtrs_srv_resp_rdma(id, id->status);
1176 spin_lock(&con->rsp_wr_wait_lock);
1177
1178 if (!ret) {
1179 list_add(&id->wait_list, &con->rsp_wr_wait_list);
1180 break;
1181 }
1182 }
1183 spin_unlock(&con->rsp_wr_wait_lock);
1184}
1185
1186static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
1187{
1188 struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
1189 struct rtrs_path *s = con->c.path;
1190 struct rtrs_srv_path *srv_path = to_srv_path(s);
1191 struct rtrs_srv_sess *srv = srv_path->srv;
1192 u32 imm_type, imm_payload;
1193 int err;
1194
1195 if (wc->status != IB_WC_SUCCESS) {
1196 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1197 rtrs_err(s,
1198 "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
1199 ib_wc_status_msg(wc->status), wc->wr_cqe,
1200 wc->opcode, wc->vendor_err, wc->byte_len);
1201 close_path(srv_path);
1202 }
1203 return;
1204 }
1205
1206 switch (wc->opcode) {
1207 case IB_WC_RECV_RDMA_WITH_IMM:
1208 /*
1209 * post_recv() RDMA write completions of IO reqs (read/write)
1210 * and hb
1211 */
1212 if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
1213 return;
1214 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
1215 if (err) {
1216 rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
1217 close_path(srv_path);
1218 break;
1219 }
1220 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
1221 &imm_type, &imm_payload);
1222 if (imm_type == RTRS_IO_REQ_IMM) {
1223 u32 msg_id, off;
1224 void *data;
1225
1226 msg_id = imm_payload >> srv_path->mem_bits;
1227 off = imm_payload & ((1 << srv_path->mem_bits) - 1);
1228 if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
1229 rtrs_err(s, "Wrong msg_id %u, off %u\n",
1230 msg_id, off);
1231 close_path(srv_path);
1232 return;
1233 }
1234 if (always_invalidate) {
1235 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
1236
1237 mr->msg_off = off;
1238 mr->msg_id = msg_id;
1239 err = rtrs_srv_inv_rkey(con, mr);
1240 if (err) {
1241 rtrs_err(s, "rtrs_post_recv(), err: %d\n",
1242 err);
1243 close_path(srv_path);
1244 break;
1245 }
1246 } else {
1247 data = page_address(srv->chunks[msg_id]) + off;
1248 process_io_req(con, data, msg_id, off);
1249 }
1250 } else if (imm_type == RTRS_HB_MSG_IMM) {
1251 WARN_ON(con->c.cid);
1252 rtrs_send_hb_ack(&srv_path->s);
1253 } else if (imm_type == RTRS_HB_ACK_IMM) {
1254 WARN_ON(con->c.cid);
1255 srv_path->s.hb_missed_cnt = 0;
1256 } else {
1257 rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
1258 }
1259 break;
1260 case IB_WC_RDMA_WRITE:
1261 case IB_WC_SEND:
1262 /*
1263 * post_send() RDMA write completions of IO reqs (read/write)
1264 * and hb.
1265 */
1266 atomic_add(s->signal_interval, &con->c.sq_wr_avail);
1267
1268 if (!list_empty_careful(&con->rsp_wr_wait_list))
1269 rtrs_rdma_process_wr_wait_list(con);
1270
1271 break;
1272 default:
1273 rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
1274 return;
1275 }
1276}
1277
1278/**
1279 * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
1280 * @srv: Session
1281 * @pathname: Pathname buffer
1282 * @len: Length of sessname buffer
1283 */
1284int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
1285 size_t len)
1286{
1287 struct rtrs_srv_path *srv_path;
1288 int err = -ENOTCONN;
1289
1290 mutex_lock(&srv->paths_mutex);
1291 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1292 if (srv_path->state != RTRS_SRV_CONNECTED)
1293 continue;
1294 strscpy(pathname, srv_path->s.sessname,
1295 min_t(size_t, sizeof(srv_path->s.sessname), len));
1296 err = 0;
1297 break;
1298 }
1299 mutex_unlock(&srv->paths_mutex);
1300
1301 return err;
1302}
1303EXPORT_SYMBOL(rtrs_srv_get_path_name);
1304
1305/**
1306 * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
1307 * @srv: Session
1308 */
1309int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
1310{
1311 return srv->queue_depth;
1312}
1313EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
1314
1315static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
1316{
1317 struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
1318 int v;
1319
1320 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
1321 if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
1322 v = cpumask_first(&cq_affinity_mask);
1323 return v;
1324}
1325
1326static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
1327{
1328 srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
1329
1330 return srv_path->cur_cq_vector;
1331}
1332
1333static void rtrs_srv_dev_release(struct device *dev)
1334{
1335 struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
1336 dev);
1337
1338 kfree(srv);
1339}
1340
1341static void free_srv(struct rtrs_srv_sess *srv)
1342{
1343 int i;
1344
1345 WARN_ON(refcount_read(&srv->refcount));
1346 for (i = 0; i < srv->queue_depth; i++)
1347 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1348 kfree(srv->chunks);
1349 mutex_destroy(&srv->paths_mutex);
1350 mutex_destroy(&srv->paths_ev_mutex);
1351 /* last put to release the srv structure */
1352 put_device(&srv->dev);
1353}
1354
1355static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
1356 const uuid_t *paths_uuid,
1357 bool first_conn)
1358{
1359 struct rtrs_srv_sess *srv;
1360 int i;
1361
1362 mutex_lock(&ctx->srv_mutex);
1363 list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
1364 if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
1365 refcount_inc_not_zero(&srv->refcount)) {
1366 mutex_unlock(&ctx->srv_mutex);
1367 return srv;
1368 }
1369 }
1370 mutex_unlock(&ctx->srv_mutex);
1371 /*
1372 * If this request is not the first connection request from the
1373 * client for this session then fail and return error.
1374 */
1375 if (!first_conn) {
1376 pr_err_ratelimited("Error: Not the first connection request for this session\n");
1377 return ERR_PTR(-ENXIO);
1378 }
1379
1380 /* need to allocate a new srv */
1381 srv = kzalloc(sizeof(*srv), GFP_KERNEL);
1382 if (!srv)
1383 return ERR_PTR(-ENOMEM);
1384
1385 INIT_LIST_HEAD(&srv->paths_list);
1386 mutex_init(&srv->paths_mutex);
1387 mutex_init(&srv->paths_ev_mutex);
1388 uuid_copy(&srv->paths_uuid, paths_uuid);
1389 srv->queue_depth = sess_queue_depth;
1390 srv->ctx = ctx;
1391 device_initialize(&srv->dev);
1392 srv->dev.release = rtrs_srv_dev_release;
1393
1394 srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
1395 GFP_KERNEL);
1396 if (!srv->chunks)
1397 goto err_free_srv;
1398
1399 for (i = 0; i < srv->queue_depth; i++) {
1400 srv->chunks[i] = alloc_pages(GFP_KERNEL,
1401 get_order(max_chunk_size));
1402 if (!srv->chunks[i])
1403 goto err_free_chunks;
1404 }
1405 refcount_set(&srv->refcount, 1);
1406 mutex_lock(&ctx->srv_mutex);
1407 list_add(&srv->ctx_list, &ctx->srv_list);
1408 mutex_unlock(&ctx->srv_mutex);
1409
1410 return srv;
1411
1412err_free_chunks:
1413 while (i--)
1414 __free_pages(srv->chunks[i], get_order(max_chunk_size));
1415 kfree(srv->chunks);
1416
1417err_free_srv:
1418 kfree(srv);
1419 return ERR_PTR(-ENOMEM);
1420}
1421
1422static void put_srv(struct rtrs_srv_sess *srv)
1423{
1424 if (refcount_dec_and_test(&srv->refcount)) {
1425 struct rtrs_srv_ctx *ctx = srv->ctx;
1426
1427 WARN_ON(srv->dev.kobj.state_in_sysfs);
1428
1429 mutex_lock(&ctx->srv_mutex);
1430 list_del(&srv->ctx_list);
1431 mutex_unlock(&ctx->srv_mutex);
1432 free_srv(srv);
1433 }
1434}
1435
1436static void __add_path_to_srv(struct rtrs_srv_sess *srv,
1437 struct rtrs_srv_path *srv_path)
1438{
1439 list_add_tail(&srv_path->s.entry, &srv->paths_list);
1440 srv->paths_num++;
1441 WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
1442}
1443
1444static void del_path_from_srv(struct rtrs_srv_path *srv_path)
1445{
1446 struct rtrs_srv_sess *srv = srv_path->srv;
1447
1448 if (WARN_ON(!srv))
1449 return;
1450
1451 mutex_lock(&srv->paths_mutex);
1452 list_del(&srv_path->s.entry);
1453 WARN_ON(!srv->paths_num);
1454 srv->paths_num--;
1455 mutex_unlock(&srv->paths_mutex);
1456}
1457
1458/* return true if addresses are the same, error other wise */
1459static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
1460{
1461 switch (a->sa_family) {
1462 case AF_IB:
1463 return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
1464 &((struct sockaddr_ib *)b)->sib_addr,
1465 sizeof(struct ib_addr)) &&
1466 (b->sa_family == AF_IB);
1467 case AF_INET:
1468 return memcmp(&((struct sockaddr_in *)a)->sin_addr,
1469 &((struct sockaddr_in *)b)->sin_addr,
1470 sizeof(struct in_addr)) &&
1471 (b->sa_family == AF_INET);
1472 case AF_INET6:
1473 return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
1474 &((struct sockaddr_in6 *)b)->sin6_addr,
1475 sizeof(struct in6_addr)) &&
1476 (b->sa_family == AF_INET6);
1477 default:
1478 return -ENOENT;
1479 }
1480}
1481
1482static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
1483 struct rdma_addr *addr)
1484{
1485 struct rtrs_srv_path *srv_path;
1486
1487 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
1488 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
1489 (struct sockaddr *)&addr->dst_addr) &&
1490 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
1491 (struct sockaddr *)&addr->src_addr))
1492 return true;
1493
1494 return false;
1495}
1496
1497static void free_path(struct rtrs_srv_path *srv_path)
1498{
1499 if (srv_path->kobj.state_in_sysfs) {
1500 kobject_del(&srv_path->kobj);
1501 kobject_put(&srv_path->kobj);
1502 } else {
1503 free_percpu(srv_path->stats->rdma_stats);
1504 kfree(srv_path->stats);
1505 kfree(srv_path);
1506 }
1507}
1508
1509static void rtrs_srv_close_work(struct work_struct *work)
1510{
1511 struct rtrs_srv_path *srv_path;
1512 struct rtrs_srv_con *con;
1513 int i;
1514
1515 srv_path = container_of(work, typeof(*srv_path), close_work);
1516
1517 rtrs_srv_destroy_path_files(srv_path);
1518 rtrs_srv_stop_hb(srv_path);
1519
1520 for (i = 0; i < srv_path->s.con_num; i++) {
1521 if (!srv_path->s.con[i])
1522 continue;
1523 con = to_srv_con(srv_path->s.con[i]);
1524 rdma_disconnect(con->c.cm_id);
1525 ib_drain_qp(con->c.qp);
1526 }
1527
1528 /*
1529 * Degrade ref count to the usual model with a single shared
1530 * atomic_t counter
1531 */
1532 percpu_ref_kill(&srv_path->ids_inflight_ref);
1533
1534 /* Wait for all completion */
1535 wait_for_completion(&srv_path->complete_done);
1536
1537 /* Notify upper layer if we are the last path */
1538 rtrs_srv_path_down(srv_path);
1539
1540 unmap_cont_bufs(srv_path);
1541 rtrs_srv_free_ops_ids(srv_path);
1542
1543 for (i = 0; i < srv_path->s.con_num; i++) {
1544 if (!srv_path->s.con[i])
1545 continue;
1546 con = to_srv_con(srv_path->s.con[i]);
1547 rtrs_cq_qp_destroy(&con->c);
1548 rdma_destroy_id(con->c.cm_id);
1549 kfree(con);
1550 }
1551 rtrs_ib_dev_put(srv_path->s.dev);
1552
1553 del_path_from_srv(srv_path);
1554 put_srv(srv_path->srv);
1555 srv_path->srv = NULL;
1556 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
1557
1558 kfree(srv_path->dma_addr);
1559 kfree(srv_path->s.con);
1560 free_path(srv_path);
1561}
1562
1563static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
1564 struct rdma_cm_id *cm_id)
1565{
1566 struct rtrs_srv_sess *srv = srv_path->srv;
1567 struct rtrs_msg_conn_rsp msg;
1568 struct rdma_conn_param param;
1569 int err;
1570
1571 param = (struct rdma_conn_param) {
1572 .rnr_retry_count = 7,
1573 .private_data = &msg,
1574 .private_data_len = sizeof(msg),
1575 };
1576
1577 msg = (struct rtrs_msg_conn_rsp) {
1578 .magic = cpu_to_le16(RTRS_MAGIC),
1579 .version = cpu_to_le16(RTRS_PROTO_VER),
1580 .queue_depth = cpu_to_le16(srv->queue_depth),
1581 .max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
1582 .max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
1583 };
1584
1585 if (always_invalidate)
1586 msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
1587
1588 err = rdma_accept(cm_id, ¶m);
1589 if (err)
1590 pr_err("rdma_accept(), err: %d\n", err);
1591
1592 return err;
1593}
1594
1595static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
1596{
1597 struct rtrs_msg_conn_rsp msg;
1598 int err;
1599
1600 msg = (struct rtrs_msg_conn_rsp) {
1601 .magic = cpu_to_le16(RTRS_MAGIC),
1602 .version = cpu_to_le16(RTRS_PROTO_VER),
1603 .errno = cpu_to_le16(errno),
1604 };
1605
1606 err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
1607 if (err)
1608 pr_err("rdma_reject(), err: %d\n", err);
1609
1610 /* Bounce errno back */
1611 return errno;
1612}
1613
1614static struct rtrs_srv_path *
1615__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
1616{
1617 struct rtrs_srv_path *srv_path;
1618
1619 list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
1620 if (uuid_equal(&srv_path->s.uuid, sess_uuid))
1621 return srv_path;
1622 }
1623
1624 return NULL;
1625}
1626
1627static int create_con(struct rtrs_srv_path *srv_path,
1628 struct rdma_cm_id *cm_id,
1629 unsigned int cid)
1630{
1631 struct rtrs_srv_sess *srv = srv_path->srv;
1632 struct rtrs_path *s = &srv_path->s;
1633 struct rtrs_srv_con *con;
1634
1635 u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
1636 int err, cq_vector;
1637
1638 con = kzalloc(sizeof(*con), GFP_KERNEL);
1639 if (!con) {
1640 err = -ENOMEM;
1641 goto err;
1642 }
1643
1644 spin_lock_init(&con->rsp_wr_wait_lock);
1645 INIT_LIST_HEAD(&con->rsp_wr_wait_list);
1646 con->c.cm_id = cm_id;
1647 con->c.path = &srv_path->s;
1648 con->c.cid = cid;
1649 atomic_set(&con->c.wr_cnt, 1);
1650 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
1651
1652 if (con->c.cid == 0) {
1653 /*
1654 * All receive and all send (each requiring invalidate)
1655 * + 2 for drain and heartbeat
1656 */
1657 max_send_wr = min_t(int, wr_limit,
1658 SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1659 max_recv_wr = max_send_wr;
1660 s->signal_interval = min_not_zero(srv->queue_depth,
1661 (size_t)SERVICE_CON_QUEUE_DEPTH);
1662 } else {
1663 /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
1664 if (always_invalidate)
1665 max_send_wr =
1666 min_t(int, wr_limit,
1667 srv->queue_depth * (1 + 4) + 1);
1668 else
1669 max_send_wr =
1670 min_t(int, wr_limit,
1671 srv->queue_depth * (1 + 2) + 1);
1672
1673 max_recv_wr = srv->queue_depth + 1;
1674 }
1675 cq_num = max_send_wr + max_recv_wr;
1676 atomic_set(&con->c.sq_wr_avail, max_send_wr);
1677 cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
1678
1679 /* TODO: SOFTIRQ can be faster, but be careful with softirq context */
1680 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
1681 max_send_wr, max_recv_wr,
1682 IB_POLL_WORKQUEUE);
1683 if (err) {
1684 rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
1685 goto free_con;
1686 }
1687 if (con->c.cid == 0) {
1688 err = post_recv_info_req(con);
1689 if (err)
1690 goto free_cqqp;
1691 }
1692 WARN_ON(srv_path->s.con[cid]);
1693 srv_path->s.con[cid] = &con->c;
1694
1695 /*
1696 * Change context from server to current connection. The other
1697 * way is to use cm_id->qp->qp_context, which does not work on OFED.
1698 */
1699 cm_id->context = &con->c;
1700
1701 return 0;
1702
1703free_cqqp:
1704 rtrs_cq_qp_destroy(&con->c);
1705free_con:
1706 kfree(con);
1707
1708err:
1709 return err;
1710}
1711
1712static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
1713 struct rdma_cm_id *cm_id,
1714 unsigned int con_num,
1715 unsigned int recon_cnt,
1716 const uuid_t *uuid)
1717{
1718 struct rtrs_srv_path *srv_path;
1719 int err = -ENOMEM;
1720 char str[NAME_MAX];
1721 struct rtrs_addr path;
1722
1723 if (srv->paths_num >= MAX_PATHS_NUM) {
1724 err = -ECONNRESET;
1725 goto err;
1726 }
1727 if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
1728 err = -EEXIST;
1729 pr_err("Path with same addr exists\n");
1730 goto err;
1731 }
1732 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
1733 if (!srv_path)
1734 goto err;
1735
1736 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
1737 if (!srv_path->stats)
1738 goto err_free_sess;
1739
1740 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
1741 if (!srv_path->stats->rdma_stats)
1742 goto err_free_stats;
1743
1744 srv_path->stats->srv_path = srv_path;
1745
1746 srv_path->dma_addr = kcalloc(srv->queue_depth,
1747 sizeof(*srv_path->dma_addr),
1748 GFP_KERNEL);
1749 if (!srv_path->dma_addr)
1750 goto err_free_percpu;
1751
1752 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
1753 GFP_KERNEL);
1754 if (!srv_path->s.con)
1755 goto err_free_dma_addr;
1756
1757 srv_path->state = RTRS_SRV_CONNECTING;
1758 srv_path->srv = srv;
1759 srv_path->cur_cq_vector = -1;
1760 srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
1761 srv_path->s.src_addr = cm_id->route.addr.src_addr;
1762
1763 /* temporary until receiving session-name from client */
1764 path.src = &srv_path->s.src_addr;
1765 path.dst = &srv_path->s.dst_addr;
1766 rtrs_addr_to_str(&path, str, sizeof(str));
1767 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
1768
1769 srv_path->s.con_num = con_num;
1770 srv_path->s.irq_con_num = con_num;
1771 srv_path->s.recon_cnt = recon_cnt;
1772 uuid_copy(&srv_path->s.uuid, uuid);
1773 spin_lock_init(&srv_path->state_lock);
1774 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
1775 rtrs_srv_init_hb(srv_path);
1776
1777 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
1778 if (!srv_path->s.dev) {
1779 err = -ENOMEM;
1780 goto err_free_con;
1781 }
1782 err = map_cont_bufs(srv_path);
1783 if (err)
1784 goto err_put_dev;
1785
1786 err = rtrs_srv_alloc_ops_ids(srv_path);
1787 if (err)
1788 goto err_unmap_bufs;
1789
1790 __add_path_to_srv(srv, srv_path);
1791
1792 return srv_path;
1793
1794err_unmap_bufs:
1795 unmap_cont_bufs(srv_path);
1796err_put_dev:
1797 rtrs_ib_dev_put(srv_path->s.dev);
1798err_free_con:
1799 kfree(srv_path->s.con);
1800err_free_dma_addr:
1801 kfree(srv_path->dma_addr);
1802err_free_percpu:
1803 free_percpu(srv_path->stats->rdma_stats);
1804err_free_stats:
1805 kfree(srv_path->stats);
1806err_free_sess:
1807 kfree(srv_path);
1808err:
1809 return ERR_PTR(err);
1810}
1811
1812static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
1813 const struct rtrs_msg_conn_req *msg,
1814 size_t len)
1815{
1816 struct rtrs_srv_ctx *ctx = cm_id->context;
1817 struct rtrs_srv_path *srv_path;
1818 struct rtrs_srv_sess *srv;
1819
1820 u16 version, con_num, cid;
1821 u16 recon_cnt;
1822 int err = -ECONNRESET;
1823
1824 if (len < sizeof(*msg)) {
1825 pr_err("Invalid RTRS connection request\n");
1826 goto reject_w_err;
1827 }
1828 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1829 pr_err("Invalid RTRS magic\n");
1830 goto reject_w_err;
1831 }
1832 version = le16_to_cpu(msg->version);
1833 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1834 pr_err("Unsupported major RTRS version: %d, expected %d\n",
1835 version >> 8, RTRS_PROTO_VER_MAJOR);
1836 goto reject_w_err;
1837 }
1838 con_num = le16_to_cpu(msg->cid_num);
1839 if (con_num > 4096) {
1840 /* Sanity check */
1841 pr_err("Too many connections requested: %d\n", con_num);
1842 goto reject_w_err;
1843 }
1844 cid = le16_to_cpu(msg->cid);
1845 if (cid >= con_num) {
1846 /* Sanity check */
1847 pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
1848 goto reject_w_err;
1849 }
1850 recon_cnt = le16_to_cpu(msg->recon_cnt);
1851 srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
1852 if (IS_ERR(srv)) {
1853 err = PTR_ERR(srv);
1854 pr_err("get_or_create_srv(), error %d\n", err);
1855 goto reject_w_err;
1856 }
1857 mutex_lock(&srv->paths_mutex);
1858 srv_path = __find_path(srv, &msg->sess_uuid);
1859 if (srv_path) {
1860 struct rtrs_path *s = &srv_path->s;
1861
1862 /* Session already holds a reference */
1863 put_srv(srv);
1864
1865 if (srv_path->state != RTRS_SRV_CONNECTING) {
1866 rtrs_err(s, "Session in wrong state: %s\n",
1867 rtrs_srv_state_str(srv_path->state));
1868 mutex_unlock(&srv->paths_mutex);
1869 goto reject_w_err;
1870 }
1871 /*
1872 * Sanity checks
1873 */
1874 if (con_num != s->con_num || cid >= s->con_num) {
1875 rtrs_err(s, "Incorrect request: %d, %d\n",
1876 cid, con_num);
1877 mutex_unlock(&srv->paths_mutex);
1878 goto reject_w_err;
1879 }
1880 if (s->con[cid]) {
1881 rtrs_err(s, "Connection already exists: %d\n",
1882 cid);
1883 mutex_unlock(&srv->paths_mutex);
1884 goto reject_w_err;
1885 }
1886 } else {
1887 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
1888 &msg->sess_uuid);
1889 if (IS_ERR(srv_path)) {
1890 mutex_unlock(&srv->paths_mutex);
1891 put_srv(srv);
1892 err = PTR_ERR(srv_path);
1893 pr_err("RTRS server session allocation failed: %d\n", err);
1894 goto reject_w_err;
1895 }
1896 }
1897 err = create_con(srv_path, cm_id, cid);
1898 if (err) {
1899 rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
1900 rtrs_rdma_do_reject(cm_id, err);
1901 /*
1902 * Since session has other connections we follow normal way
1903 * through workqueue, but still return an error to tell cma.c
1904 * to call rdma_destroy_id() for current connection.
1905 */
1906 goto close_and_return_err;
1907 }
1908 err = rtrs_rdma_do_accept(srv_path, cm_id);
1909 if (err) {
1910 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
1911 rtrs_rdma_do_reject(cm_id, err);
1912 /*
1913 * Since current connection was successfully added to the
1914 * session we follow normal way through workqueue to close the
1915 * session, thus return 0 to tell cma.c we call
1916 * rdma_destroy_id() ourselves.
1917 */
1918 err = 0;
1919 goto close_and_return_err;
1920 }
1921 mutex_unlock(&srv->paths_mutex);
1922
1923 return 0;
1924
1925reject_w_err:
1926 return rtrs_rdma_do_reject(cm_id, err);
1927
1928close_and_return_err:
1929 mutex_unlock(&srv->paths_mutex);
1930 close_path(srv_path);
1931
1932 return err;
1933}
1934
1935static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
1936 struct rdma_cm_event *ev)
1937{
1938 struct rtrs_srv_path *srv_path = NULL;
1939 struct rtrs_path *s = NULL;
1940 struct rtrs_con *c = NULL;
1941
1942 if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
1943 /*
1944 * In case of error cma.c will destroy cm_id,
1945 * see cma_process_remove()
1946 */
1947 return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
1948 ev->param.conn.private_data_len);
1949
1950 c = cm_id->context;
1951 s = c->path;
1952 srv_path = to_srv_path(s);
1953
1954 switch (ev->event) {
1955 case RDMA_CM_EVENT_ESTABLISHED:
1956 /* Nothing here */
1957 break;
1958 case RDMA_CM_EVENT_REJECTED:
1959 case RDMA_CM_EVENT_CONNECT_ERROR:
1960 case RDMA_CM_EVENT_UNREACHABLE:
1961 rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
1962 rdma_event_msg(ev->event), ev->status);
1963 fallthrough;
1964 case RDMA_CM_EVENT_DISCONNECTED:
1965 case RDMA_CM_EVENT_ADDR_CHANGE:
1966 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1967 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1968 close_path(srv_path);
1969 break;
1970 default:
1971 pr_err("Ignoring unexpected CM event %s, err %d\n",
1972 rdma_event_msg(ev->event), ev->status);
1973 break;
1974 }
1975
1976 return 0;
1977}
1978
1979static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
1980 struct sockaddr *addr,
1981 enum rdma_ucm_port_space ps)
1982{
1983 struct rdma_cm_id *cm_id;
1984 int ret;
1985
1986 cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
1987 ctx, ps, IB_QPT_RC);
1988 if (IS_ERR(cm_id)) {
1989 ret = PTR_ERR(cm_id);
1990 pr_err("Creating id for RDMA connection failed, err: %d\n",
1991 ret);
1992 goto err_out;
1993 }
1994 ret = rdma_bind_addr(cm_id, addr);
1995 if (ret) {
1996 pr_err("Binding RDMA address failed, err: %d\n", ret);
1997 goto err_cm;
1998 }
1999 ret = rdma_listen(cm_id, 64);
2000 if (ret) {
2001 pr_err("Listening on RDMA connection failed, err: %d\n",
2002 ret);
2003 goto err_cm;
2004 }
2005
2006 return cm_id;
2007
2008err_cm:
2009 rdma_destroy_id(cm_id);
2010err_out:
2011
2012 return ERR_PTR(ret);
2013}
2014
2015static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
2016{
2017 struct sockaddr_in6 sin = {
2018 .sin6_family = AF_INET6,
2019 .sin6_addr = IN6ADDR_ANY_INIT,
2020 .sin6_port = htons(port),
2021 };
2022 struct sockaddr_ib sib = {
2023 .sib_family = AF_IB,
2024 .sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
2025 .sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
2026 .sib_pkey = cpu_to_be16(0xffff),
2027 };
2028 struct rdma_cm_id *cm_ip, *cm_ib;
2029 int ret;
2030
2031 /*
2032 * We accept both IPoIB and IB connections, so we need to keep
2033 * two cm id's, one for each socket type and port space.
2034 * If the cm initialization of one of the id's fails, we abort
2035 * everything.
2036 */
2037 cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
2038 if (IS_ERR(cm_ip))
2039 return PTR_ERR(cm_ip);
2040
2041 cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
2042 if (IS_ERR(cm_ib)) {
2043 ret = PTR_ERR(cm_ib);
2044 goto free_cm_ip;
2045 }
2046
2047 ctx->cm_id_ip = cm_ip;
2048 ctx->cm_id_ib = cm_ib;
2049
2050 return 0;
2051
2052free_cm_ip:
2053 rdma_destroy_id(cm_ip);
2054
2055 return ret;
2056}
2057
2058static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
2059{
2060 struct rtrs_srv_ctx *ctx;
2061
2062 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2063 if (!ctx)
2064 return NULL;
2065
2066 ctx->ops = *ops;
2067 mutex_init(&ctx->srv_mutex);
2068 INIT_LIST_HEAD(&ctx->srv_list);
2069
2070 return ctx;
2071}
2072
2073static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
2074{
2075 WARN_ON(!list_empty(&ctx->srv_list));
2076 mutex_destroy(&ctx->srv_mutex);
2077 kfree(ctx);
2078}
2079
2080static int rtrs_srv_add_one(struct ib_device *device)
2081{
2082 struct rtrs_srv_ctx *ctx;
2083 int ret = 0;
2084
2085 mutex_lock(&ib_ctx.ib_dev_mutex);
2086 if (ib_ctx.ib_dev_count)
2087 goto out;
2088
2089 /*
2090 * Since our CM IDs are NOT bound to any ib device we will create them
2091 * only once
2092 */
2093 ctx = ib_ctx.srv_ctx;
2094 ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
2095 if (ret) {
2096 /*
2097 * We errored out here.
2098 * According to the ib code, if we encounter an error here then the
2099 * error code is ignored, and no more calls to our ops are made.
2100 */
2101 pr_err("Failed to initialize RDMA connection");
2102 goto err_out;
2103 }
2104
2105out:
2106 /*
2107 * Keep a track on the number of ib devices added
2108 */
2109 ib_ctx.ib_dev_count++;
2110
2111err_out:
2112 mutex_unlock(&ib_ctx.ib_dev_mutex);
2113 return ret;
2114}
2115
2116static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
2117{
2118 struct rtrs_srv_ctx *ctx;
2119
2120 mutex_lock(&ib_ctx.ib_dev_mutex);
2121 ib_ctx.ib_dev_count--;
2122
2123 if (ib_ctx.ib_dev_count)
2124 goto out;
2125
2126 /*
2127 * Since our CM IDs are NOT bound to any ib device we will remove them
2128 * only once, when the last device is removed
2129 */
2130 ctx = ib_ctx.srv_ctx;
2131 rdma_destroy_id(ctx->cm_id_ip);
2132 rdma_destroy_id(ctx->cm_id_ib);
2133
2134out:
2135 mutex_unlock(&ib_ctx.ib_dev_mutex);
2136}
2137
2138static struct ib_client rtrs_srv_client = {
2139 .name = "rtrs_server",
2140 .add = rtrs_srv_add_one,
2141 .remove = rtrs_srv_remove_one
2142};
2143
2144/**
2145 * rtrs_srv_open() - open RTRS server context
2146 * @ops: callback functions
2147 * @port: port to listen on
2148 *
2149 * Creates server context with specified callbacks.
2150 *
2151 * Return a valid pointer on success otherwise PTR_ERR.
2152 */
2153struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
2154{
2155 struct rtrs_srv_ctx *ctx;
2156 int err;
2157
2158 ctx = alloc_srv_ctx(ops);
2159 if (!ctx)
2160 return ERR_PTR(-ENOMEM);
2161
2162 mutex_init(&ib_ctx.ib_dev_mutex);
2163 ib_ctx.srv_ctx = ctx;
2164 ib_ctx.port = port;
2165
2166 err = ib_register_client(&rtrs_srv_client);
2167 if (err) {
2168 free_srv_ctx(ctx);
2169 return ERR_PTR(err);
2170 }
2171
2172 return ctx;
2173}
2174EXPORT_SYMBOL(rtrs_srv_open);
2175
2176static void close_paths(struct rtrs_srv_sess *srv)
2177{
2178 struct rtrs_srv_path *srv_path;
2179
2180 mutex_lock(&srv->paths_mutex);
2181 list_for_each_entry(srv_path, &srv->paths_list, s.entry)
2182 close_path(srv_path);
2183 mutex_unlock(&srv->paths_mutex);
2184}
2185
2186static void close_ctx(struct rtrs_srv_ctx *ctx)
2187{
2188 struct rtrs_srv_sess *srv;
2189
2190 mutex_lock(&ctx->srv_mutex);
2191 list_for_each_entry(srv, &ctx->srv_list, ctx_list)
2192 close_paths(srv);
2193 mutex_unlock(&ctx->srv_mutex);
2194 flush_workqueue(rtrs_wq);
2195}
2196
2197/**
2198 * rtrs_srv_close() - close RTRS server context
2199 * @ctx: pointer to server context
2200 *
2201 * Closes RTRS server context with all client sessions.
2202 */
2203void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
2204{
2205 ib_unregister_client(&rtrs_srv_client);
2206 mutex_destroy(&ib_ctx.ib_dev_mutex);
2207 close_ctx(ctx);
2208 free_srv_ctx(ctx);
2209}
2210EXPORT_SYMBOL(rtrs_srv_close);
2211
2212static int check_module_params(void)
2213{
2214 if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
2215 pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
2216 sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
2217 return -EINVAL;
2218 }
2219 if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
2220 pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
2221 max_chunk_size, MIN_CHUNK_SIZE);
2222 return -EINVAL;
2223 }
2224
2225 /*
2226 * Check if IB immediate data size is enough to hold the mem_id and the
2227 * offset inside the memory chunk
2228 */
2229 if ((ilog2(sess_queue_depth - 1) + 1) +
2230 (ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
2231 pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
2232 MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
2233 return -EINVAL;
2234 }
2235
2236 return 0;
2237}
2238
2239static int __init rtrs_server_init(void)
2240{
2241 int err;
2242
2243 pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
2244 KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
2245 max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
2246 sess_queue_depth, always_invalidate);
2247
2248 rtrs_rdma_dev_pd_init(0, &dev_pd);
2249
2250 err = check_module_params();
2251 if (err) {
2252 pr_err("Failed to load module, invalid module parameters, err: %d\n",
2253 err);
2254 return err;
2255 }
2256 rtrs_dev_class = class_create(THIS_MODULE, "rtrs-server");
2257 if (IS_ERR(rtrs_dev_class)) {
2258 err = PTR_ERR(rtrs_dev_class);
2259 goto out_err;
2260 }
2261 rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
2262 if (!rtrs_wq) {
2263 err = -ENOMEM;
2264 goto out_dev_class;
2265 }
2266
2267 return 0;
2268
2269out_dev_class:
2270 class_destroy(rtrs_dev_class);
2271out_err:
2272 return err;
2273}
2274
2275static void __exit rtrs_server_exit(void)
2276{
2277 destroy_workqueue(rtrs_wq);
2278 class_destroy(rtrs_dev_class);
2279 rtrs_rdma_dev_pd_deinit(&dev_pd);
2280}
2281
2282module_init(rtrs_server_init);
2283module_exit(rtrs_server_exit);