Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14{
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req_v2 *req;
17 struct mana_cfg_rx_steer_resp resp = {};
18 struct gdma_context *gc;
19 u32 req_buf_size;
20 int i, err;
21
22 gc = mdev_to_gc(dev);
23
24 req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE);
25 req = kzalloc(req_buf_size, GFP_KERNEL);
26 if (!req)
27 return -ENOMEM;
28
29 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
30 sizeof(resp));
31
32 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
33
34 req->vport = mpc->port_handle;
35 req->rx_enable = 1;
36 req->update_default_rxobj = 1;
37 req->default_rxobj = default_rxobj;
38 req->hdr.dev_id = gc->mana.dev_id;
39
40 /* If there are more than 1 entries in indirection table, enable RSS */
41 if (log_ind_tbl_size)
42 req->rss_enable = true;
43
44 req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE;
45 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
46 indir_tab);
47 req->update_indir_tab = true;
48 req->cqe_coalescing_enable = 1;
49
50 /* The ind table passed to the hardware must have
51 * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb
52 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
53 */
54 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
55 for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) {
56 req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
57 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
58 req->indir_tab[i]);
59 }
60
61 req->update_hashkey = true;
62 if (rx_hash_key_len)
63 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
64 else
65 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
66
67 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
68 req->vport, default_rxobj);
69
70 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
71 if (err) {
72 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
73 goto out;
74 }
75
76 if (resp.hdr.status) {
77 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
78 resp.hdr.status);
79 err = -EPROTO;
80 goto out;
81 }
82
83 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
84 mpc->port_handle, log_ind_tbl_size);
85
86out:
87 kfree(req);
88 return err;
89}
90
91static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
92 struct ib_qp_init_attr *attr,
93 struct ib_udata *udata)
94{
95 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
96 struct mana_ib_dev *mdev =
97 container_of(pd->device, struct mana_ib_dev, ib_dev);
98 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
99 struct mana_ib_create_qp_rss_resp resp = {};
100 struct mana_ib_create_qp_rss ucmd = {};
101 mana_handle_t *mana_ind_table;
102 struct mana_port_context *mpc;
103 unsigned int ind_tbl_size;
104 struct net_device *ndev;
105 struct mana_ib_cq *cq;
106 struct mana_ib_wq *wq;
107 struct mana_eq *eq;
108 struct ib_cq *ibcq;
109 struct ib_wq *ibwq;
110 int i = 0;
111 u32 port;
112 int ret;
113
114 if (!udata || udata->inlen < sizeof(ucmd))
115 return -EINVAL;
116
117 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
118 if (ret) {
119 ibdev_dbg(&mdev->ib_dev,
120 "Failed copy from udata for create rss-qp, err %d\n",
121 ret);
122 return ret;
123 }
124
125 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
126 ibdev_dbg(&mdev->ib_dev,
127 "Requested max_recv_wr %d exceeding limit\n",
128 attr->cap.max_recv_wr);
129 return -EINVAL;
130 }
131
132 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
133 ibdev_dbg(&mdev->ib_dev,
134 "Requested max_recv_sge %d exceeding limit\n",
135 attr->cap.max_recv_sge);
136 return -EINVAL;
137 }
138
139 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
140 if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) {
141 ibdev_dbg(&mdev->ib_dev,
142 "Indirect table size %d exceeding limit\n",
143 ind_tbl_size);
144 return -EINVAL;
145 }
146
147 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
148 ibdev_dbg(&mdev->ib_dev,
149 "RX Hash function is not supported, %d\n",
150 ucmd.rx_hash_function);
151 return -EINVAL;
152 }
153
154 /* IB ports start with 1, MANA start with 0 */
155 port = ucmd.port;
156 ndev = mana_ib_get_netdev(pd->device, port);
157 if (!ndev) {
158 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
159 port);
160 return -EINVAL;
161 }
162 mpc = netdev_priv(ndev);
163
164 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
165 ucmd.rx_hash_function, port);
166
167 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
168 GFP_KERNEL);
169 if (!mana_ind_table) {
170 ret = -ENOMEM;
171 goto fail;
172 }
173
174 qp->port = port;
175
176 for (i = 0; i < ind_tbl_size; i++) {
177 struct mana_obj_spec wq_spec = {};
178 struct mana_obj_spec cq_spec = {};
179
180 ibwq = ind_tbl->ind_tbl[i];
181 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
182
183 ibcq = ibwq->cq;
184 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
185
186 wq_spec.gdma_region = wq->queue.gdma_region;
187 wq_spec.queue_size = wq->wq_buf_size;
188
189 cq_spec.gdma_region = cq->queue.gdma_region;
190 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
191 cq_spec.modr_ctx_id = 0;
192 eq = &mpc->ac->eqs[cq->comp_vector];
193 cq_spec.attached_eq = eq->eq->id;
194
195 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
196 &wq_spec, &cq_spec, &wq->rx_object);
197 if (ret) {
198 /* Do cleanup starting with index i-1 */
199 i--;
200 goto fail;
201 }
202
203 /* The GDMA regions are now owned by the WQ object */
204 wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
205 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
206
207 wq->queue.id = wq_spec.queue_index;
208 cq->queue.id = cq_spec.queue_index;
209
210 ibdev_dbg(&mdev->ib_dev,
211 "rx_object 0x%llx wq id %llu cq id %llu\n",
212 wq->rx_object, wq->queue.id, cq->queue.id);
213
214 resp.entries[i].cqid = cq->queue.id;
215 resp.entries[i].wqid = wq->queue.id;
216
217 mana_ind_table[i] = wq->rx_object;
218
219 /* Create CQ table entry */
220 ret = mana_ib_install_cq_cb(mdev, cq);
221 if (ret)
222 goto fail;
223 }
224 resp.num_entries = i;
225
226 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
227 mana_ind_table,
228 ind_tbl->log_ind_tbl_size,
229 ucmd.rx_hash_key_len,
230 ucmd.rx_hash_key);
231 if (ret)
232 goto fail;
233
234 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
235 if (ret) {
236 ibdev_dbg(&mdev->ib_dev,
237 "Failed to copy to udata create rss-qp, %d\n",
238 ret);
239 goto fail;
240 }
241
242 kfree(mana_ind_table);
243
244 return 0;
245
246fail:
247 while (i-- > 0) {
248 ibwq = ind_tbl->ind_tbl[i];
249 ibcq = ibwq->cq;
250 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
251 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
252
253 mana_ib_remove_cq_cb(mdev, cq);
254 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
255 }
256
257 kfree(mana_ind_table);
258
259 return ret;
260}
261
262static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
263 struct ib_qp_init_attr *attr,
264 struct ib_udata *udata)
265{
266 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
267 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
268 struct mana_ib_dev *mdev =
269 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
270 struct mana_ib_cq *send_cq =
271 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
272 struct mana_ib_ucontext *mana_ucontext =
273 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
274 ibucontext);
275 struct mana_ib_create_qp_resp resp = {};
276 struct mana_ib_create_qp ucmd = {};
277 struct mana_obj_spec wq_spec = {};
278 struct mana_obj_spec cq_spec = {};
279 struct mana_port_context *mpc;
280 struct net_device *ndev;
281 struct mana_eq *eq;
282 int eq_vec;
283 u32 port;
284 int err;
285
286 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
287 return -EINVAL;
288
289 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
290 if (err) {
291 ibdev_dbg(&mdev->ib_dev,
292 "Failed to copy from udata create qp-raw, %d\n", err);
293 return err;
294 }
295
296 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
297 ibdev_dbg(&mdev->ib_dev,
298 "Requested max_send_wr %d exceeding limit\n",
299 attr->cap.max_send_wr);
300 return -EINVAL;
301 }
302
303 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
304 ibdev_dbg(&mdev->ib_dev,
305 "Requested max_send_sge %d exceeding limit\n",
306 attr->cap.max_send_sge);
307 return -EINVAL;
308 }
309
310 port = ucmd.port;
311 ndev = mana_ib_get_netdev(ibpd->device, port);
312 if (!ndev) {
313 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
314 port);
315 return -EINVAL;
316 }
317 mpc = netdev_priv(ndev);
318 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
319
320 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
321 if (err)
322 return -ENODEV;
323
324 qp->port = port;
325
326 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
327 ucmd.sq_buf_addr, ucmd.port);
328
329 err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
330 if (err) {
331 ibdev_dbg(&mdev->ib_dev,
332 "Failed to create queue for create qp-raw, err %d\n", err);
333 goto err_free_vport;
334 }
335
336 /* Create a WQ on the same port handle used by the Ethernet */
337 wq_spec.gdma_region = qp->raw_sq.gdma_region;
338 wq_spec.queue_size = ucmd.sq_buf_size;
339
340 cq_spec.gdma_region = send_cq->queue.gdma_region;
341 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
342 cq_spec.modr_ctx_id = 0;
343 eq_vec = send_cq->comp_vector;
344 eq = &mpc->ac->eqs[eq_vec];
345 cq_spec.attached_eq = eq->eq->id;
346
347 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
348 &cq_spec, &qp->qp_handle);
349 if (err) {
350 ibdev_dbg(&mdev->ib_dev,
351 "Failed to create wq for create raw-qp, err %d\n",
352 err);
353 goto err_destroy_queue;
354 }
355
356 /* The GDMA regions are now owned by the WQ object */
357 qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
358 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
359
360 qp->raw_sq.id = wq_spec.queue_index;
361 send_cq->queue.id = cq_spec.queue_index;
362
363 /* Create CQ table entry */
364 err = mana_ib_install_cq_cb(mdev, send_cq);
365 if (err)
366 goto err_destroy_wq_obj;
367
368 ibdev_dbg(&mdev->ib_dev,
369 "qp->qp_handle 0x%llx sq id %llu cq id %llu\n",
370 qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
371
372 resp.sqid = qp->raw_sq.id;
373 resp.cqid = send_cq->queue.id;
374 resp.tx_vp_offset = pd->tx_vp_offset;
375
376 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
377 if (err) {
378 ibdev_dbg(&mdev->ib_dev,
379 "Failed copy udata for create qp-raw, %d\n",
380 err);
381 goto err_remove_cq_cb;
382 }
383
384 return 0;
385
386err_remove_cq_cb:
387 mana_ib_remove_cq_cb(mdev, send_cq);
388
389err_destroy_wq_obj:
390 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
391
392err_destroy_queue:
393 mana_ib_destroy_queue(mdev, &qp->raw_sq);
394
395err_free_vport:
396 mana_ib_uncfg_vport(mdev, pd, port);
397
398 return err;
399}
400
401static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
402{
403 refcount_set(&qp->refcount, 1);
404 init_completion(&qp->free);
405 return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
406 GFP_KERNEL);
407}
408
409static void mana_table_remove_qp(struct mana_ib_dev *mdev,
410 struct mana_ib_qp *qp)
411{
412 xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
413 mana_put_qp_ref(qp);
414 wait_for_completion(&qp->free);
415}
416
417static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
418 struct ib_qp_init_attr *attr, struct ib_udata *udata)
419{
420 struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
421 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
422 struct mana_ib_create_rc_qp_resp resp = {};
423 struct mana_ib_ucontext *mana_ucontext;
424 struct mana_ib_create_rc_qp ucmd = {};
425 int i, err, j;
426 u64 flags = 0;
427 u32 doorbell;
428
429 if (!udata || udata->inlen < sizeof(ucmd))
430 return -EINVAL;
431
432 mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext);
433 doorbell = mana_ucontext->doorbell;
434 flags = MANA_RC_FLAG_NO_FMR;
435 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
436 if (err) {
437 ibdev_dbg(&mdev->ib_dev, "Failed to copy from udata, %d\n", err);
438 return err;
439 }
440
441 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
442 /* skip FMR for user-level RC QPs */
443 if (i == MANA_RC_SEND_QUEUE_FMR) {
444 qp->rc_qp.queues[i].id = INVALID_QUEUE_ID;
445 qp->rc_qp.queues[i].gdma_region = GDMA_INVALID_DMA_REGION;
446 continue;
447 }
448 err = mana_ib_create_queue(mdev, ucmd.queue_buf[j], ucmd.queue_size[j],
449 &qp->rc_qp.queues[i]);
450 if (err) {
451 ibdev_err(&mdev->ib_dev, "Failed to create queue %d, err %d\n", i, err);
452 goto destroy_queues;
453 }
454 j++;
455 }
456
457 err = mana_ib_gd_create_rc_qp(mdev, qp, attr, doorbell, flags);
458 if (err) {
459 ibdev_err(&mdev->ib_dev, "Failed to create rc qp %d\n", err);
460 goto destroy_queues;
461 }
462 qp->ibqp.qp_num = qp->rc_qp.queues[MANA_RC_RECV_QUEUE_RESPONDER].id;
463 qp->port = attr->port_num;
464
465 if (udata) {
466 for (i = 0, j = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i) {
467 if (i == MANA_RC_SEND_QUEUE_FMR)
468 continue;
469 resp.queue_id[j] = qp->rc_qp.queues[i].id;
470 j++;
471 }
472 err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
473 if (err) {
474 ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
475 goto destroy_qp;
476 }
477 }
478
479 err = mana_table_store_qp(mdev, qp);
480 if (err)
481 goto destroy_qp;
482
483 return 0;
484
485destroy_qp:
486 mana_ib_gd_destroy_rc_qp(mdev, qp);
487destroy_queues:
488 while (i-- > 0)
489 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
490 return err;
491}
492
493int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
494 struct ib_udata *udata)
495{
496 switch (attr->qp_type) {
497 case IB_QPT_RAW_PACKET:
498 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
499 if (attr->rwq_ind_tbl)
500 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
501 udata);
502
503 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
504 case IB_QPT_RC:
505 return mana_ib_create_rc_qp(ibqp, ibqp->pd, attr, udata);
506 default:
507 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
508 attr->qp_type);
509 }
510
511 return -EINVAL;
512}
513
514static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
515 int attr_mask, struct ib_udata *udata)
516{
517 struct mana_ib_dev *mdev = container_of(ibqp->device, struct mana_ib_dev, ib_dev);
518 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
519 struct mana_rnic_set_qp_state_resp resp = {};
520 struct mana_rnic_set_qp_state_req req = {};
521 struct gdma_context *gc = mdev_to_gc(mdev);
522 struct mana_port_context *mpc;
523 struct net_device *ndev;
524 int err;
525
526 mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
527 req.hdr.dev_id = gc->mana_ib.dev_id;
528 req.adapter = mdev->adapter_handle;
529 req.qp_handle = qp->qp_handle;
530 req.qp_state = attr->qp_state;
531 req.attr_mask = attr_mask;
532 req.path_mtu = attr->path_mtu;
533 req.rq_psn = attr->rq_psn;
534 req.sq_psn = attr->sq_psn;
535 req.dest_qpn = attr->dest_qp_num;
536 req.max_dest_rd_atomic = attr->max_dest_rd_atomic;
537 req.retry_cnt = attr->retry_cnt;
538 req.rnr_retry = attr->rnr_retry;
539 req.min_rnr_timer = attr->min_rnr_timer;
540 if (attr_mask & IB_QP_AV) {
541 ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
542 if (!ndev) {
543 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in QP %u\n",
544 ibqp->port, ibqp->qp_num);
545 return -EINVAL;
546 }
547 mpc = netdev_priv(ndev);
548 copy_in_reverse(req.ah_attr.src_mac, mpc->mac_addr, ETH_ALEN);
549 copy_in_reverse(req.ah_attr.dest_mac, attr->ah_attr.roce.dmac, ETH_ALEN);
550 copy_in_reverse(req.ah_attr.src_addr, attr->ah_attr.grh.sgid_attr->gid.raw,
551 sizeof(union ib_gid));
552 copy_in_reverse(req.ah_attr.dest_addr, attr->ah_attr.grh.dgid.raw,
553 sizeof(union ib_gid));
554 if (rdma_gid_attr_network_type(attr->ah_attr.grh.sgid_attr) == RDMA_NETWORK_IPV4) {
555 req.ah_attr.src_addr_type = SGID_TYPE_IPV4;
556 req.ah_attr.dest_addr_type = SGID_TYPE_IPV4;
557 } else {
558 req.ah_attr.src_addr_type = SGID_TYPE_IPV6;
559 req.ah_attr.dest_addr_type = SGID_TYPE_IPV6;
560 }
561 req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
562 req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
563 ibqp->qp_num, attr->dest_qp_num);
564 req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
565 req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
566 }
567
568 err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
569 if (err) {
570 ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
571 return err;
572 }
573
574 return 0;
575}
576
577int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
578 int attr_mask, struct ib_udata *udata)
579{
580 switch (ibqp->qp_type) {
581 case IB_QPT_RC:
582 return mana_ib_gd_modify_qp(ibqp, attr, attr_mask, udata);
583 default:
584 ibdev_dbg(ibqp->device, "Modify QP type %u not supported", ibqp->qp_type);
585 return -EOPNOTSUPP;
586 }
587}
588
589static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
590 struct ib_rwq_ind_table *ind_tbl,
591 struct ib_udata *udata)
592{
593 struct mana_ib_dev *mdev =
594 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
595 struct mana_port_context *mpc;
596 struct net_device *ndev;
597 struct mana_ib_wq *wq;
598 struct ib_wq *ibwq;
599 int i;
600
601 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
602 mpc = netdev_priv(ndev);
603
604 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
605 ibwq = ind_tbl->ind_tbl[i];
606 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
607 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
608 wq->rx_object);
609 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
610 }
611
612 return 0;
613}
614
615static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
616{
617 struct mana_ib_dev *mdev =
618 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
619 struct ib_pd *ibpd = qp->ibqp.pd;
620 struct mana_port_context *mpc;
621 struct net_device *ndev;
622 struct mana_ib_pd *pd;
623
624 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
625 mpc = netdev_priv(ndev);
626 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
627
628 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
629
630 mana_ib_destroy_queue(mdev, &qp->raw_sq);
631
632 mana_ib_uncfg_vport(mdev, pd, qp->port);
633
634 return 0;
635}
636
637static int mana_ib_destroy_rc_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
638{
639 struct mana_ib_dev *mdev =
640 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
641 int i;
642
643 mana_table_remove_qp(mdev, qp);
644
645 /* Ignore return code as there is not much we can do about it.
646 * The error message is printed inside.
647 */
648 mana_ib_gd_destroy_rc_qp(mdev, qp);
649 for (i = 0; i < MANA_RC_QUEUE_TYPE_MAX; ++i)
650 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]);
651
652 return 0;
653}
654
655int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
656{
657 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
658
659 switch (ibqp->qp_type) {
660 case IB_QPT_RAW_PACKET:
661 if (ibqp->rwq_ind_tbl)
662 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
663 udata);
664
665 return mana_ib_destroy_qp_raw(qp, udata);
666 case IB_QPT_RC:
667 return mana_ib_destroy_rc_qp(qp, udata);
668 default:
669 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
670 ibqp->qp_type);
671 }
672
673 return -ENOENT;
674}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14{
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req_v2 *req;
17 struct mana_cfg_rx_steer_resp resp = {};
18 mana_handle_t *req_indir_tab;
19 struct gdma_context *gc;
20 u32 req_buf_size;
21 int i, err;
22
23 gc = mdev_to_gc(dev);
24
25 req_buf_size =
26 sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
27 req = kzalloc(req_buf_size, GFP_KERNEL);
28 if (!req)
29 return -ENOMEM;
30
31 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
32 sizeof(resp));
33
34 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
35
36 req->vport = mpc->port_handle;
37 req->rx_enable = 1;
38 req->update_default_rxobj = 1;
39 req->default_rxobj = default_rxobj;
40 req->hdr.dev_id = gc->mana.dev_id;
41
42 /* If there are more than 1 entries in indirection table, enable RSS */
43 if (log_ind_tbl_size)
44 req->rss_enable = true;
45
46 req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
47 req->indir_tab_offset = sizeof(*req);
48 req->update_indir_tab = true;
49 req->cqe_coalescing_enable = 1;
50
51 req_indir_tab = (mana_handle_t *)(req + 1);
52 /* The ind table passed to the hardware must have
53 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
54 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
55 */
56 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
57 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
58 req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
59 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
60 req_indir_tab[i]);
61 }
62
63 req->update_hashkey = true;
64 if (rx_hash_key_len)
65 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
66 else
67 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
68
69 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
70 req->vport, default_rxobj);
71
72 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
73 if (err) {
74 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
75 goto out;
76 }
77
78 if (resp.hdr.status) {
79 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
80 resp.hdr.status);
81 err = -EPROTO;
82 goto out;
83 }
84
85 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
86 mpc->port_handle, log_ind_tbl_size);
87
88out:
89 kfree(req);
90 return err;
91}
92
93static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
94 struct ib_qp_init_attr *attr,
95 struct ib_udata *udata)
96{
97 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
98 struct mana_ib_dev *mdev =
99 container_of(pd->device, struct mana_ib_dev, ib_dev);
100 struct gdma_context *gc = mdev_to_gc(mdev);
101 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
102 struct mana_ib_create_qp_rss_resp resp = {};
103 struct mana_ib_create_qp_rss ucmd = {};
104 struct gdma_queue **gdma_cq_allocated;
105 mana_handle_t *mana_ind_table;
106 struct mana_port_context *mpc;
107 unsigned int ind_tbl_size;
108 struct net_device *ndev;
109 struct mana_ib_cq *cq;
110 struct mana_ib_wq *wq;
111 struct mana_eq *eq;
112 struct ib_cq *ibcq;
113 struct ib_wq *ibwq;
114 int i = 0;
115 u32 port;
116 int ret;
117
118 if (!udata || udata->inlen < sizeof(ucmd))
119 return -EINVAL;
120
121 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
122 if (ret) {
123 ibdev_dbg(&mdev->ib_dev,
124 "Failed copy from udata for create rss-qp, err %d\n",
125 ret);
126 return ret;
127 }
128
129 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
130 ibdev_dbg(&mdev->ib_dev,
131 "Requested max_recv_wr %d exceeding limit\n",
132 attr->cap.max_recv_wr);
133 return -EINVAL;
134 }
135
136 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
137 ibdev_dbg(&mdev->ib_dev,
138 "Requested max_recv_sge %d exceeding limit\n",
139 attr->cap.max_recv_sge);
140 return -EINVAL;
141 }
142
143 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
144 if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
145 ibdev_dbg(&mdev->ib_dev,
146 "Indirect table size %d exceeding limit\n",
147 ind_tbl_size);
148 return -EINVAL;
149 }
150
151 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
152 ibdev_dbg(&mdev->ib_dev,
153 "RX Hash function is not supported, %d\n",
154 ucmd.rx_hash_function);
155 return -EINVAL;
156 }
157
158 /* IB ports start with 1, MANA start with 0 */
159 port = ucmd.port;
160 ndev = mana_ib_get_netdev(pd->device, port);
161 if (!ndev) {
162 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
163 port);
164 return -EINVAL;
165 }
166 mpc = netdev_priv(ndev);
167
168 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
169 ucmd.rx_hash_function, port);
170
171 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
172 GFP_KERNEL);
173 if (!mana_ind_table) {
174 ret = -ENOMEM;
175 goto fail;
176 }
177
178 gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
179 GFP_KERNEL);
180 if (!gdma_cq_allocated) {
181 ret = -ENOMEM;
182 goto fail;
183 }
184
185 qp->port = port;
186
187 for (i = 0; i < ind_tbl_size; i++) {
188 struct mana_obj_spec wq_spec = {};
189 struct mana_obj_spec cq_spec = {};
190
191 ibwq = ind_tbl->ind_tbl[i];
192 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
193
194 ibcq = ibwq->cq;
195 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
196
197 wq_spec.gdma_region = wq->gdma_region;
198 wq_spec.queue_size = wq->wq_buf_size;
199
200 cq_spec.gdma_region = cq->queue.gdma_region;
201 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
202 cq_spec.modr_ctx_id = 0;
203 eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
204 cq_spec.attached_eq = eq->eq->id;
205
206 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
207 &wq_spec, &cq_spec, &wq->rx_object);
208 if (ret) {
209 /* Do cleanup starting with index i-1 */
210 i--;
211 goto fail;
212 }
213
214 /* The GDMA regions are now owned by the WQ object */
215 wq->gdma_region = GDMA_INVALID_DMA_REGION;
216 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
217
218 wq->id = wq_spec.queue_index;
219 cq->queue.id = cq_spec.queue_index;
220
221 ibdev_dbg(&mdev->ib_dev,
222 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
223 ret, wq->rx_object, wq->id, cq->queue.id);
224
225 resp.entries[i].cqid = cq->queue.id;
226 resp.entries[i].wqid = wq->id;
227
228 mana_ind_table[i] = wq->rx_object;
229
230 /* Create CQ table entry */
231 ret = mana_ib_install_cq_cb(mdev, cq);
232 if (ret)
233 goto fail;
234
235 gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
236 }
237 resp.num_entries = i;
238
239 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
240 mana_ind_table,
241 ind_tbl->log_ind_tbl_size,
242 ucmd.rx_hash_key_len,
243 ucmd.rx_hash_key);
244 if (ret)
245 goto fail;
246
247 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
248 if (ret) {
249 ibdev_dbg(&mdev->ib_dev,
250 "Failed to copy to udata create rss-qp, %d\n",
251 ret);
252 goto fail;
253 }
254
255 kfree(gdma_cq_allocated);
256 kfree(mana_ind_table);
257
258 return 0;
259
260fail:
261 while (i-- > 0) {
262 ibwq = ind_tbl->ind_tbl[i];
263 ibcq = ibwq->cq;
264 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
265 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
266
267 gc->cq_table[cq->queue.id] = NULL;
268 kfree(gdma_cq_allocated[i]);
269
270 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
271 }
272
273 kfree(gdma_cq_allocated);
274 kfree(mana_ind_table);
275
276 return ret;
277}
278
279static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
280 struct ib_qp_init_attr *attr,
281 struct ib_udata *udata)
282{
283 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
284 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
285 struct mana_ib_dev *mdev =
286 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
287 struct mana_ib_cq *send_cq =
288 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
289 struct mana_ib_ucontext *mana_ucontext =
290 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
291 ibucontext);
292 struct gdma_context *gc = mdev_to_gc(mdev);
293 struct mana_ib_create_qp_resp resp = {};
294 struct mana_ib_create_qp ucmd = {};
295 struct gdma_queue *gdma_cq = NULL;
296 struct mana_obj_spec wq_spec = {};
297 struct mana_obj_spec cq_spec = {};
298 struct mana_port_context *mpc;
299 struct net_device *ndev;
300 struct ib_umem *umem;
301 struct mana_eq *eq;
302 int eq_vec;
303 u32 port;
304 int err;
305
306 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
307 return -EINVAL;
308
309 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
310 if (err) {
311 ibdev_dbg(&mdev->ib_dev,
312 "Failed to copy from udata create qp-raw, %d\n", err);
313 return err;
314 }
315
316 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
317 ibdev_dbg(&mdev->ib_dev,
318 "Requested max_send_wr %d exceeding limit\n",
319 attr->cap.max_send_wr);
320 return -EINVAL;
321 }
322
323 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
324 ibdev_dbg(&mdev->ib_dev,
325 "Requested max_send_sge %d exceeding limit\n",
326 attr->cap.max_send_sge);
327 return -EINVAL;
328 }
329
330 port = ucmd.port;
331 ndev = mana_ib_get_netdev(ibpd->device, port);
332 if (!ndev) {
333 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
334 port);
335 return -EINVAL;
336 }
337 mpc = netdev_priv(ndev);
338 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
339
340 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
341 if (err)
342 return -ENODEV;
343
344 qp->port = port;
345
346 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
347 ucmd.sq_buf_addr, ucmd.port);
348
349 umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
350 IB_ACCESS_LOCAL_WRITE);
351 if (IS_ERR(umem)) {
352 err = PTR_ERR(umem);
353 ibdev_dbg(&mdev->ib_dev,
354 "Failed to get umem for create qp-raw, err %d\n",
355 err);
356 goto err_free_vport;
357 }
358 qp->sq_umem = umem;
359
360 err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
361 &qp->sq_gdma_region);
362 if (err) {
363 ibdev_dbg(&mdev->ib_dev,
364 "Failed to create dma region for create qp-raw, %d\n",
365 err);
366 goto err_release_umem;
367 }
368
369 ibdev_dbg(&mdev->ib_dev,
370 "create_dma_region ret %d gdma_region 0x%llx\n",
371 err, qp->sq_gdma_region);
372
373 /* Create a WQ on the same port handle used by the Ethernet */
374 wq_spec.gdma_region = qp->sq_gdma_region;
375 wq_spec.queue_size = ucmd.sq_buf_size;
376
377 cq_spec.gdma_region = send_cq->queue.gdma_region;
378 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
379 cq_spec.modr_ctx_id = 0;
380 eq_vec = send_cq->comp_vector % gc->max_num_queues;
381 eq = &mpc->ac->eqs[eq_vec];
382 cq_spec.attached_eq = eq->eq->id;
383
384 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
385 &cq_spec, &qp->tx_object);
386 if (err) {
387 ibdev_dbg(&mdev->ib_dev,
388 "Failed to create wq for create raw-qp, err %d\n",
389 err);
390 goto err_destroy_dma_region;
391 }
392
393 /* The GDMA regions are now owned by the WQ object */
394 qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
395 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
396
397 qp->sq_id = wq_spec.queue_index;
398 send_cq->queue.id = cq_spec.queue_index;
399
400 /* Create CQ table entry */
401 err = mana_ib_install_cq_cb(mdev, send_cq);
402 if (err)
403 goto err_destroy_wq_obj;
404
405 ibdev_dbg(&mdev->ib_dev,
406 "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
407 qp->tx_object, qp->sq_id, send_cq->queue.id);
408
409 resp.sqid = qp->sq_id;
410 resp.cqid = send_cq->queue.id;
411 resp.tx_vp_offset = pd->tx_vp_offset;
412
413 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
414 if (err) {
415 ibdev_dbg(&mdev->ib_dev,
416 "Failed copy udata for create qp-raw, %d\n",
417 err);
418 goto err_release_gdma_cq;
419 }
420
421 return 0;
422
423err_release_gdma_cq:
424 kfree(gdma_cq);
425 gc->cq_table[send_cq->queue.id] = NULL;
426
427err_destroy_wq_obj:
428 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
429
430err_destroy_dma_region:
431 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
432
433err_release_umem:
434 ib_umem_release(umem);
435
436err_free_vport:
437 mana_ib_uncfg_vport(mdev, pd, port);
438
439 return err;
440}
441
442int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
443 struct ib_udata *udata)
444{
445 switch (attr->qp_type) {
446 case IB_QPT_RAW_PACKET:
447 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
448 if (attr->rwq_ind_tbl)
449 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
450 udata);
451
452 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
453 default:
454 /* Creating QP other than IB_QPT_RAW_PACKET is not supported */
455 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
456 attr->qp_type);
457 }
458
459 return -EINVAL;
460}
461
462int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
463 int attr_mask, struct ib_udata *udata)
464{
465 /* modify_qp is not supported by this version of the driver */
466 return -EOPNOTSUPP;
467}
468
469static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
470 struct ib_rwq_ind_table *ind_tbl,
471 struct ib_udata *udata)
472{
473 struct mana_ib_dev *mdev =
474 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
475 struct mana_port_context *mpc;
476 struct net_device *ndev;
477 struct mana_ib_wq *wq;
478 struct ib_wq *ibwq;
479 int i;
480
481 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
482 mpc = netdev_priv(ndev);
483
484 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
485 ibwq = ind_tbl->ind_tbl[i];
486 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
487 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
488 wq->rx_object);
489 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
490 }
491
492 return 0;
493}
494
495static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
496{
497 struct mana_ib_dev *mdev =
498 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
499 struct ib_pd *ibpd = qp->ibqp.pd;
500 struct mana_port_context *mpc;
501 struct net_device *ndev;
502 struct mana_ib_pd *pd;
503
504 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
505 mpc = netdev_priv(ndev);
506 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
507
508 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
509
510 if (qp->sq_umem) {
511 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
512 ib_umem_release(qp->sq_umem);
513 }
514
515 mana_ib_uncfg_vport(mdev, pd, qp->port);
516
517 return 0;
518}
519
520int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
521{
522 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
523
524 switch (ibqp->qp_type) {
525 case IB_QPT_RAW_PACKET:
526 if (ibqp->rwq_ind_tbl)
527 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
528 udata);
529
530 return mana_ib_destroy_qp_raw(qp, udata);
531
532 default:
533 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
534 ibqp->qp_type);
535 }
536
537 return -ENOENT;
538}