Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 struct ib_udata *udata)
10{
11 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
12 struct ib_device *ibdev = ibcq->device;
13 struct mana_ib_create_cq ucmd = {};
14 struct mana_ib_dev *mdev;
15 struct gdma_context *gc;
16 int err;
17
18 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
19 gc = mdev->gdma_dev->gdma_context;
20
21 if (udata->inlen < sizeof(ucmd))
22 return -EINVAL;
23
24 if (attr->comp_vector > gc->max_num_queues)
25 return -EINVAL;
26
27 cq->comp_vector = attr->comp_vector;
28
29 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
30 if (err) {
31 ibdev_dbg(ibdev,
32 "Failed to copy from udata for create cq, %d\n", err);
33 return err;
34 }
35
36 if (attr->cqe > mdev->adapter_caps.max_qp_wr) {
37 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
38 return -EINVAL;
39 }
40
41 cq->cqe = attr->cqe;
42 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
43 IB_ACCESS_LOCAL_WRITE);
44 if (IS_ERR(cq->umem)) {
45 err = PTR_ERR(cq->umem);
46 ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
47 err);
48 return err;
49 }
50
51 err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
52 if (err) {
53 ibdev_dbg(ibdev,
54 "Failed to create dma region for create cq, %d\n",
55 err);
56 goto err_release_umem;
57 }
58
59 ibdev_dbg(ibdev,
60 "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
61 err, cq->gdma_region);
62
63 /*
64 * The CQ ID is not known at this time. The ID is generated at create_qp
65 */
66 cq->id = INVALID_QUEUE_ID;
67
68 return 0;
69
70err_release_umem:
71 ib_umem_release(cq->umem);
72 return err;
73}
74
75int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
76{
77 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
78 struct ib_device *ibdev = ibcq->device;
79 struct mana_ib_dev *mdev;
80 struct gdma_context *gc;
81 int err;
82
83 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
84 gc = mdev->gdma_dev->gdma_context;
85
86 err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
87 if (err) {
88 ibdev_dbg(ibdev,
89 "Failed to destroy dma region, %d\n", err);
90 return err;
91 }
92
93 if (cq->id != INVALID_QUEUE_ID) {
94 kfree(gc->cq_table[cq->id]);
95 gc->cq_table[cq->id] = NULL;
96 }
97
98 ib_umem_release(cq->umem);
99
100 return 0;
101}
102
103void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
104{
105 struct mana_ib_cq *cq = ctx;
106
107 if (cq->ibcq.comp_handler)
108 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
109}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 struct uverbs_attr_bundle *attrs)
10{
11 struct ib_udata *udata = &attrs->driver_udata;
12 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
13 struct mana_ib_create_cq_resp resp = {};
14 struct mana_ib_ucontext *mana_ucontext;
15 struct ib_device *ibdev = ibcq->device;
16 struct mana_ib_create_cq ucmd = {};
17 struct mana_ib_dev *mdev;
18 bool is_rnic_cq;
19 u32 doorbell;
20 int err;
21
22 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
23
24 cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
25 cq->cq_handle = INVALID_MANA_HANDLE;
26
27 if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
28 return -EINVAL;
29
30 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
31 if (err) {
32 ibdev_dbg(ibdev,
33 "Failed to copy from udata for create cq, %d\n", err);
34 return err;
35 }
36
37 is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
38
39 if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
40 ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
41 return -EINVAL;
42 }
43
44 cq->cqe = attr->cqe;
45 err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
46 if (err) {
47 ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
48 return err;
49 }
50
51 mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
52 ibucontext);
53 doorbell = mana_ucontext->doorbell;
54
55 if (is_rnic_cq) {
56 err = mana_ib_gd_create_cq(mdev, cq, doorbell);
57 if (err) {
58 ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
59 goto err_destroy_queue;
60 }
61
62 err = mana_ib_install_cq_cb(mdev, cq);
63 if (err) {
64 ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
65 goto err_destroy_rnic_cq;
66 }
67 }
68
69 resp.cqid = cq->queue.id;
70 err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
71 if (err) {
72 ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
73 goto err_remove_cq_cb;
74 }
75
76 return 0;
77
78err_remove_cq_cb:
79 mana_ib_remove_cq_cb(mdev, cq);
80err_destroy_rnic_cq:
81 mana_ib_gd_destroy_cq(mdev, cq);
82err_destroy_queue:
83 mana_ib_destroy_queue(mdev, &cq->queue);
84
85 return err;
86}
87
88int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
89{
90 struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
91 struct ib_device *ibdev = ibcq->device;
92 struct mana_ib_dev *mdev;
93
94 mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
95
96 mana_ib_remove_cq_cb(mdev, cq);
97
98 /* Ignore return code as there is not much we can do about it.
99 * The error message is printed inside.
100 */
101 mana_ib_gd_destroy_cq(mdev, cq);
102
103 mana_ib_destroy_queue(mdev, &cq->queue);
104
105 return 0;
106}
107
108static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
109{
110 struct mana_ib_cq *cq = ctx;
111
112 if (cq->ibcq.comp_handler)
113 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
114}
115
116int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
117{
118 struct gdma_context *gc = mdev_to_gc(mdev);
119 struct gdma_queue *gdma_cq;
120
121 if (cq->queue.id >= gc->max_num_cqs)
122 return -EINVAL;
123 /* Create CQ table entry */
124 WARN_ON(gc->cq_table[cq->queue.id]);
125 gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
126 if (!gdma_cq)
127 return -ENOMEM;
128
129 gdma_cq->cq.context = cq;
130 gdma_cq->type = GDMA_CQ;
131 gdma_cq->cq.callback = mana_ib_cq_handler;
132 gdma_cq->id = cq->queue.id;
133 gc->cq_table[cq->queue.id] = gdma_cq;
134 return 0;
135}
136
137void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
138{
139 struct gdma_context *gc = mdev_to_gc(mdev);
140
141 if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
142 return;
143
144 kfree(gc->cq_table[cq->queue.id]);
145 gc->cq_table[cq->queue.id] = NULL;
146}