Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14{
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req_v2 *req;
17 struct mana_cfg_rx_steer_resp resp = {};
18 mana_handle_t *req_indir_tab;
19 struct gdma_context *gc;
20 struct gdma_dev *mdev;
21 u32 req_buf_size;
22 int i, err;
23
24 gc = dev->gdma_dev->gdma_context;
25 mdev = &gc->mana;
26
27 req_buf_size =
28 sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
29 req = kzalloc(req_buf_size, GFP_KERNEL);
30 if (!req)
31 return -ENOMEM;
32
33 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
34 sizeof(resp));
35
36 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
37
38 req->vport = mpc->port_handle;
39 req->rx_enable = 1;
40 req->update_default_rxobj = 1;
41 req->default_rxobj = default_rxobj;
42 req->hdr.dev_id = mdev->dev_id;
43
44 /* If there are more than 1 entries in indirection table, enable RSS */
45 if (log_ind_tbl_size)
46 req->rss_enable = true;
47
48 req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
49 req->indir_tab_offset = sizeof(*req);
50 req->update_indir_tab = true;
51 req->cqe_coalescing_enable = 1;
52
53 req_indir_tab = (mana_handle_t *)(req + 1);
54 /* The ind table passed to the hardware must have
55 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
56 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
57 */
58 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
59 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
60 req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
61 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
62 req_indir_tab[i]);
63 }
64
65 req->update_hashkey = true;
66 if (rx_hash_key_len)
67 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
68 else
69 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
70
71 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
72 req->vport, default_rxobj);
73
74 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
75 if (err) {
76 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
77 goto out;
78 }
79
80 if (resp.hdr.status) {
81 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
82 resp.hdr.status);
83 err = -EPROTO;
84 goto out;
85 }
86
87 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
88 mpc->port_handle, log_ind_tbl_size);
89
90out:
91 kfree(req);
92 return err;
93}
94
95static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
96 struct ib_qp_init_attr *attr,
97 struct ib_udata *udata)
98{
99 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
100 struct mana_ib_dev *mdev =
101 container_of(pd->device, struct mana_ib_dev, ib_dev);
102 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
103 struct mana_ib_create_qp_rss_resp resp = {};
104 struct mana_ib_create_qp_rss ucmd = {};
105 struct gdma_queue **gdma_cq_allocated;
106 mana_handle_t *mana_ind_table;
107 struct mana_port_context *mpc;
108 struct gdma_queue *gdma_cq;
109 unsigned int ind_tbl_size;
110 struct mana_context *mc;
111 struct net_device *ndev;
112 struct gdma_context *gc;
113 struct mana_ib_cq *cq;
114 struct mana_ib_wq *wq;
115 struct gdma_dev *gd;
116 struct mana_eq *eq;
117 struct ib_cq *ibcq;
118 struct ib_wq *ibwq;
119 int i = 0;
120 u32 port;
121 int ret;
122
123 gc = mdev->gdma_dev->gdma_context;
124 gd = &gc->mana;
125 mc = gd->driver_data;
126
127 if (!udata || udata->inlen < sizeof(ucmd))
128 return -EINVAL;
129
130 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
131 if (ret) {
132 ibdev_dbg(&mdev->ib_dev,
133 "Failed copy from udata for create rss-qp, err %d\n",
134 ret);
135 return ret;
136 }
137
138 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
139 ibdev_dbg(&mdev->ib_dev,
140 "Requested max_recv_wr %d exceeding limit\n",
141 attr->cap.max_recv_wr);
142 return -EINVAL;
143 }
144
145 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
146 ibdev_dbg(&mdev->ib_dev,
147 "Requested max_recv_sge %d exceeding limit\n",
148 attr->cap.max_recv_sge);
149 return -EINVAL;
150 }
151
152 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
153 if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
154 ibdev_dbg(&mdev->ib_dev,
155 "Indirect table size %d exceeding limit\n",
156 ind_tbl_size);
157 return -EINVAL;
158 }
159
160 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
161 ibdev_dbg(&mdev->ib_dev,
162 "RX Hash function is not supported, %d\n",
163 ucmd.rx_hash_function);
164 return -EINVAL;
165 }
166
167 /* IB ports start with 1, MANA start with 0 */
168 port = ucmd.port;
169 if (port < 1 || port > mc->num_ports) {
170 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
171 port);
172 return -EINVAL;
173 }
174 ndev = mc->ports[port - 1];
175 mpc = netdev_priv(ndev);
176
177 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
178 ucmd.rx_hash_function, port);
179
180 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
181 GFP_KERNEL);
182 if (!mana_ind_table) {
183 ret = -ENOMEM;
184 goto fail;
185 }
186
187 gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
188 GFP_KERNEL);
189 if (!gdma_cq_allocated) {
190 ret = -ENOMEM;
191 goto fail;
192 }
193
194 qp->port = port;
195
196 for (i = 0; i < ind_tbl_size; i++) {
197 struct mana_obj_spec wq_spec = {};
198 struct mana_obj_spec cq_spec = {};
199
200 ibwq = ind_tbl->ind_tbl[i];
201 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
202
203 ibcq = ibwq->cq;
204 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
205
206 wq_spec.gdma_region = wq->gdma_region;
207 wq_spec.queue_size = wq->wq_buf_size;
208
209 cq_spec.gdma_region = cq->gdma_region;
210 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
211 cq_spec.modr_ctx_id = 0;
212 eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
213 cq_spec.attached_eq = eq->eq->id;
214
215 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
216 &wq_spec, &cq_spec, &wq->rx_object);
217 if (ret) {
218 /* Do cleanup starting with index i-1 */
219 i--;
220 goto fail;
221 }
222
223 /* The GDMA regions are now owned by the WQ object */
224 wq->gdma_region = GDMA_INVALID_DMA_REGION;
225 cq->gdma_region = GDMA_INVALID_DMA_REGION;
226
227 wq->id = wq_spec.queue_index;
228 cq->id = cq_spec.queue_index;
229
230 ibdev_dbg(&mdev->ib_dev,
231 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
232 ret, wq->rx_object, wq->id, cq->id);
233
234 resp.entries[i].cqid = cq->id;
235 resp.entries[i].wqid = wq->id;
236
237 mana_ind_table[i] = wq->rx_object;
238
239 /* Create CQ table entry */
240 WARN_ON(gc->cq_table[cq->id]);
241 gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
242 if (!gdma_cq) {
243 ret = -ENOMEM;
244 goto fail;
245 }
246 gdma_cq_allocated[i] = gdma_cq;
247
248 gdma_cq->cq.context = cq;
249 gdma_cq->type = GDMA_CQ;
250 gdma_cq->cq.callback = mana_ib_cq_handler;
251 gdma_cq->id = cq->id;
252 gc->cq_table[cq->id] = gdma_cq;
253 }
254 resp.num_entries = i;
255
256 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
257 mana_ind_table,
258 ind_tbl->log_ind_tbl_size,
259 ucmd.rx_hash_key_len,
260 ucmd.rx_hash_key);
261 if (ret)
262 goto fail;
263
264 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
265 if (ret) {
266 ibdev_dbg(&mdev->ib_dev,
267 "Failed to copy to udata create rss-qp, %d\n",
268 ret);
269 goto fail;
270 }
271
272 kfree(gdma_cq_allocated);
273 kfree(mana_ind_table);
274
275 return 0;
276
277fail:
278 while (i-- > 0) {
279 ibwq = ind_tbl->ind_tbl[i];
280 ibcq = ibwq->cq;
281 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
282 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
283
284 gc->cq_table[cq->id] = NULL;
285 kfree(gdma_cq_allocated[i]);
286
287 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
288 }
289
290 kfree(gdma_cq_allocated);
291 kfree(mana_ind_table);
292
293 return ret;
294}
295
296static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
297 struct ib_qp_init_attr *attr,
298 struct ib_udata *udata)
299{
300 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
301 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
302 struct mana_ib_dev *mdev =
303 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
304 struct mana_ib_cq *send_cq =
305 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
306 struct mana_ib_ucontext *mana_ucontext =
307 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
308 ibucontext);
309 struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
310 struct mana_ib_create_qp_resp resp = {};
311 struct mana_ib_create_qp ucmd = {};
312 struct gdma_queue *gdma_cq = NULL;
313 struct mana_obj_spec wq_spec = {};
314 struct mana_obj_spec cq_spec = {};
315 struct mana_port_context *mpc;
316 struct mana_context *mc;
317 struct net_device *ndev;
318 struct ib_umem *umem;
319 struct mana_eq *eq;
320 int eq_vec;
321 u32 port;
322 int err;
323
324 mc = gd->driver_data;
325
326 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
327 return -EINVAL;
328
329 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
330 if (err) {
331 ibdev_dbg(&mdev->ib_dev,
332 "Failed to copy from udata create qp-raw, %d\n", err);
333 return err;
334 }
335
336 /* IB ports start with 1, MANA Ethernet ports start with 0 */
337 port = ucmd.port;
338 if (port < 1 || port > mc->num_ports)
339 return -EINVAL;
340
341 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
342 ibdev_dbg(&mdev->ib_dev,
343 "Requested max_send_wr %d exceeding limit\n",
344 attr->cap.max_send_wr);
345 return -EINVAL;
346 }
347
348 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
349 ibdev_dbg(&mdev->ib_dev,
350 "Requested max_send_sge %d exceeding limit\n",
351 attr->cap.max_send_sge);
352 return -EINVAL;
353 }
354
355 ndev = mc->ports[port - 1];
356 mpc = netdev_priv(ndev);
357 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
358
359 err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
360 if (err)
361 return -ENODEV;
362
363 qp->port = port;
364
365 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
366 ucmd.sq_buf_addr, ucmd.port);
367
368 umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
369 IB_ACCESS_LOCAL_WRITE);
370 if (IS_ERR(umem)) {
371 err = PTR_ERR(umem);
372 ibdev_dbg(&mdev->ib_dev,
373 "Failed to get umem for create qp-raw, err %d\n",
374 err);
375 goto err_free_vport;
376 }
377 qp->sq_umem = umem;
378
379 err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
380 &qp->sq_gdma_region);
381 if (err) {
382 ibdev_dbg(&mdev->ib_dev,
383 "Failed to create dma region for create qp-raw, %d\n",
384 err);
385 goto err_release_umem;
386 }
387
388 ibdev_dbg(&mdev->ib_dev,
389 "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
390 err, qp->sq_gdma_region);
391
392 /* Create a WQ on the same port handle used by the Ethernet */
393 wq_spec.gdma_region = qp->sq_gdma_region;
394 wq_spec.queue_size = ucmd.sq_buf_size;
395
396 cq_spec.gdma_region = send_cq->gdma_region;
397 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
398 cq_spec.modr_ctx_id = 0;
399 eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
400 eq = &mc->eqs[eq_vec];
401 cq_spec.attached_eq = eq->eq->id;
402
403 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
404 &cq_spec, &qp->tx_object);
405 if (err) {
406 ibdev_dbg(&mdev->ib_dev,
407 "Failed to create wq for create raw-qp, err %d\n",
408 err);
409 goto err_destroy_dma_region;
410 }
411
412 /* The GDMA regions are now owned by the WQ object */
413 qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
414 send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
415
416 qp->sq_id = wq_spec.queue_index;
417 send_cq->id = cq_spec.queue_index;
418
419 /* Create CQ table entry */
420 WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
421 gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
422 if (!gdma_cq) {
423 err = -ENOMEM;
424 goto err_destroy_wq_obj;
425 }
426
427 gdma_cq->cq.context = send_cq;
428 gdma_cq->type = GDMA_CQ;
429 gdma_cq->cq.callback = mana_ib_cq_handler;
430 gdma_cq->id = send_cq->id;
431 gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
432
433 ibdev_dbg(&mdev->ib_dev,
434 "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
435 qp->tx_object, qp->sq_id, send_cq->id);
436
437 resp.sqid = qp->sq_id;
438 resp.cqid = send_cq->id;
439 resp.tx_vp_offset = pd->tx_vp_offset;
440
441 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
442 if (err) {
443 ibdev_dbg(&mdev->ib_dev,
444 "Failed copy udata for create qp-raw, %d\n",
445 err);
446 goto err_release_gdma_cq;
447 }
448
449 return 0;
450
451err_release_gdma_cq:
452 kfree(gdma_cq);
453 gd->gdma_context->cq_table[send_cq->id] = NULL;
454
455err_destroy_wq_obj:
456 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
457
458err_destroy_dma_region:
459 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
460
461err_release_umem:
462 ib_umem_release(umem);
463
464err_free_vport:
465 mana_ib_uncfg_vport(mdev, pd, port - 1);
466
467 return err;
468}
469
470int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
471 struct ib_udata *udata)
472{
473 switch (attr->qp_type) {
474 case IB_QPT_RAW_PACKET:
475 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
476 if (attr->rwq_ind_tbl)
477 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
478 udata);
479
480 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
481 default:
482 /* Creating QP other than IB_QPT_RAW_PACKET is not supported */
483 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
484 attr->qp_type);
485 }
486
487 return -EINVAL;
488}
489
490int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
491 int attr_mask, struct ib_udata *udata)
492{
493 /* modify_qp is not supported by this version of the driver */
494 return -EOPNOTSUPP;
495}
496
497static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
498 struct ib_rwq_ind_table *ind_tbl,
499 struct ib_udata *udata)
500{
501 struct mana_ib_dev *mdev =
502 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
503 struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
504 struct mana_port_context *mpc;
505 struct mana_context *mc;
506 struct net_device *ndev;
507 struct mana_ib_wq *wq;
508 struct ib_wq *ibwq;
509 int i;
510
511 mc = gd->driver_data;
512 ndev = mc->ports[qp->port - 1];
513 mpc = netdev_priv(ndev);
514
515 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
516 ibwq = ind_tbl->ind_tbl[i];
517 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
518 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
519 wq->rx_object);
520 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
521 }
522
523 return 0;
524}
525
526static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
527{
528 struct mana_ib_dev *mdev =
529 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
530 struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
531 struct ib_pd *ibpd = qp->ibqp.pd;
532 struct mana_port_context *mpc;
533 struct mana_context *mc;
534 struct net_device *ndev;
535 struct mana_ib_pd *pd;
536
537 mc = gd->driver_data;
538 ndev = mc->ports[qp->port - 1];
539 mpc = netdev_priv(ndev);
540 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
541
542 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
543
544 if (qp->sq_umem) {
545 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
546 ib_umem_release(qp->sq_umem);
547 }
548
549 mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
550
551 return 0;
552}
553
554int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
555{
556 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
557
558 switch (ibqp->qp_type) {
559 case IB_QPT_RAW_PACKET:
560 if (ibqp->rwq_ind_tbl)
561 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
562 udata);
563
564 return mana_ib_destroy_qp_raw(qp, udata);
565
566 default:
567 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
568 ibqp->qp_type);
569 }
570
571 return -ENOENT;
572}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4 */
5
6#include "mana_ib.h"
7
8static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
9 struct net_device *ndev,
10 mana_handle_t default_rxobj,
11 mana_handle_t ind_table[],
12 u32 log_ind_tbl_size, u32 rx_hash_key_len,
13 u8 *rx_hash_key)
14{
15 struct mana_port_context *mpc = netdev_priv(ndev);
16 struct mana_cfg_rx_steer_req_v2 *req;
17 struct mana_cfg_rx_steer_resp resp = {};
18 mana_handle_t *req_indir_tab;
19 struct gdma_context *gc;
20 u32 req_buf_size;
21 int i, err;
22
23 gc = mdev_to_gc(dev);
24
25 req_buf_size =
26 sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
27 req = kzalloc(req_buf_size, GFP_KERNEL);
28 if (!req)
29 return -ENOMEM;
30
31 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
32 sizeof(resp));
33
34 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
35
36 req->vport = mpc->port_handle;
37 req->rx_enable = 1;
38 req->update_default_rxobj = 1;
39 req->default_rxobj = default_rxobj;
40 req->hdr.dev_id = gc->mana.dev_id;
41
42 /* If there are more than 1 entries in indirection table, enable RSS */
43 if (log_ind_tbl_size)
44 req->rss_enable = true;
45
46 req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
47 req->indir_tab_offset = sizeof(*req);
48 req->update_indir_tab = true;
49 req->cqe_coalescing_enable = 1;
50
51 req_indir_tab = (mana_handle_t *)(req + 1);
52 /* The ind table passed to the hardware must have
53 * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb
54 * ind_table to MANA_INDIRECT_TABLE_SIZE if required
55 */
56 ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size);
57 for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
58 req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)];
59 ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i,
60 req_indir_tab[i]);
61 }
62
63 req->update_hashkey = true;
64 if (rx_hash_key_len)
65 memcpy(req->hashkey, rx_hash_key, rx_hash_key_len);
66 else
67 netdev_rss_key_fill(req->hashkey, MANA_HASH_KEY_SIZE);
68
69 ibdev_dbg(&dev->ib_dev, "vport handle %llu default_rxobj 0x%llx\n",
70 req->vport, default_rxobj);
71
72 err = mana_gd_send_request(gc, req_buf_size, req, sizeof(resp), &resp);
73 if (err) {
74 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
75 goto out;
76 }
77
78 if (resp.hdr.status) {
79 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
80 resp.hdr.status);
81 err = -EPROTO;
82 goto out;
83 }
84
85 netdev_info(ndev, "Configured steering vPort %llu log_entries %u\n",
86 mpc->port_handle, log_ind_tbl_size);
87
88out:
89 kfree(req);
90 return err;
91}
92
93static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
94 struct ib_qp_init_attr *attr,
95 struct ib_udata *udata)
96{
97 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
98 struct mana_ib_dev *mdev =
99 container_of(pd->device, struct mana_ib_dev, ib_dev);
100 struct gdma_context *gc = mdev_to_gc(mdev);
101 struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
102 struct mana_ib_create_qp_rss_resp resp = {};
103 struct mana_ib_create_qp_rss ucmd = {};
104 struct gdma_queue **gdma_cq_allocated;
105 mana_handle_t *mana_ind_table;
106 struct mana_port_context *mpc;
107 unsigned int ind_tbl_size;
108 struct net_device *ndev;
109 struct mana_ib_cq *cq;
110 struct mana_ib_wq *wq;
111 struct mana_eq *eq;
112 struct ib_cq *ibcq;
113 struct ib_wq *ibwq;
114 int i = 0;
115 u32 port;
116 int ret;
117
118 if (!udata || udata->inlen < sizeof(ucmd))
119 return -EINVAL;
120
121 ret = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
122 if (ret) {
123 ibdev_dbg(&mdev->ib_dev,
124 "Failed copy from udata for create rss-qp, err %d\n",
125 ret);
126 return ret;
127 }
128
129 if (attr->cap.max_recv_wr > mdev->adapter_caps.max_qp_wr) {
130 ibdev_dbg(&mdev->ib_dev,
131 "Requested max_recv_wr %d exceeding limit\n",
132 attr->cap.max_recv_wr);
133 return -EINVAL;
134 }
135
136 if (attr->cap.max_recv_sge > MAX_RX_WQE_SGL_ENTRIES) {
137 ibdev_dbg(&mdev->ib_dev,
138 "Requested max_recv_sge %d exceeding limit\n",
139 attr->cap.max_recv_sge);
140 return -EINVAL;
141 }
142
143 ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size;
144 if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) {
145 ibdev_dbg(&mdev->ib_dev,
146 "Indirect table size %d exceeding limit\n",
147 ind_tbl_size);
148 return -EINVAL;
149 }
150
151 if (ucmd.rx_hash_function != MANA_IB_RX_HASH_FUNC_TOEPLITZ) {
152 ibdev_dbg(&mdev->ib_dev,
153 "RX Hash function is not supported, %d\n",
154 ucmd.rx_hash_function);
155 return -EINVAL;
156 }
157
158 /* IB ports start with 1, MANA start with 0 */
159 port = ucmd.port;
160 ndev = mana_ib_get_netdev(pd->device, port);
161 if (!ndev) {
162 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
163 port);
164 return -EINVAL;
165 }
166 mpc = netdev_priv(ndev);
167
168 ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
169 ucmd.rx_hash_function, port);
170
171 mana_ind_table = kcalloc(ind_tbl_size, sizeof(mana_handle_t),
172 GFP_KERNEL);
173 if (!mana_ind_table) {
174 ret = -ENOMEM;
175 goto fail;
176 }
177
178 gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated),
179 GFP_KERNEL);
180 if (!gdma_cq_allocated) {
181 ret = -ENOMEM;
182 goto fail;
183 }
184
185 qp->port = port;
186
187 for (i = 0; i < ind_tbl_size; i++) {
188 struct mana_obj_spec wq_spec = {};
189 struct mana_obj_spec cq_spec = {};
190
191 ibwq = ind_tbl->ind_tbl[i];
192 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
193
194 ibcq = ibwq->cq;
195 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
196
197 wq_spec.gdma_region = wq->gdma_region;
198 wq_spec.queue_size = wq->wq_buf_size;
199
200 cq_spec.gdma_region = cq->queue.gdma_region;
201 cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
202 cq_spec.modr_ctx_id = 0;
203 eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
204 cq_spec.attached_eq = eq->eq->id;
205
206 ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
207 &wq_spec, &cq_spec, &wq->rx_object);
208 if (ret) {
209 /* Do cleanup starting with index i-1 */
210 i--;
211 goto fail;
212 }
213
214 /* The GDMA regions are now owned by the WQ object */
215 wq->gdma_region = GDMA_INVALID_DMA_REGION;
216 cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
217
218 wq->id = wq_spec.queue_index;
219 cq->queue.id = cq_spec.queue_index;
220
221 ibdev_dbg(&mdev->ib_dev,
222 "ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
223 ret, wq->rx_object, wq->id, cq->queue.id);
224
225 resp.entries[i].cqid = cq->queue.id;
226 resp.entries[i].wqid = wq->id;
227
228 mana_ind_table[i] = wq->rx_object;
229
230 /* Create CQ table entry */
231 ret = mana_ib_install_cq_cb(mdev, cq);
232 if (ret)
233 goto fail;
234
235 gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
236 }
237 resp.num_entries = i;
238
239 ret = mana_ib_cfg_vport_steering(mdev, ndev, wq->rx_object,
240 mana_ind_table,
241 ind_tbl->log_ind_tbl_size,
242 ucmd.rx_hash_key_len,
243 ucmd.rx_hash_key);
244 if (ret)
245 goto fail;
246
247 ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
248 if (ret) {
249 ibdev_dbg(&mdev->ib_dev,
250 "Failed to copy to udata create rss-qp, %d\n",
251 ret);
252 goto fail;
253 }
254
255 kfree(gdma_cq_allocated);
256 kfree(mana_ind_table);
257
258 return 0;
259
260fail:
261 while (i-- > 0) {
262 ibwq = ind_tbl->ind_tbl[i];
263 ibcq = ibwq->cq;
264 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
265 cq = container_of(ibcq, struct mana_ib_cq, ibcq);
266
267 gc->cq_table[cq->queue.id] = NULL;
268 kfree(gdma_cq_allocated[i]);
269
270 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
271 }
272
273 kfree(gdma_cq_allocated);
274 kfree(mana_ind_table);
275
276 return ret;
277}
278
279static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
280 struct ib_qp_init_attr *attr,
281 struct ib_udata *udata)
282{
283 struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
284 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
285 struct mana_ib_dev *mdev =
286 container_of(ibpd->device, struct mana_ib_dev, ib_dev);
287 struct mana_ib_cq *send_cq =
288 container_of(attr->send_cq, struct mana_ib_cq, ibcq);
289 struct mana_ib_ucontext *mana_ucontext =
290 rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
291 ibucontext);
292 struct gdma_context *gc = mdev_to_gc(mdev);
293 struct mana_ib_create_qp_resp resp = {};
294 struct mana_ib_create_qp ucmd = {};
295 struct gdma_queue *gdma_cq = NULL;
296 struct mana_obj_spec wq_spec = {};
297 struct mana_obj_spec cq_spec = {};
298 struct mana_port_context *mpc;
299 struct net_device *ndev;
300 struct ib_umem *umem;
301 struct mana_eq *eq;
302 int eq_vec;
303 u32 port;
304 int err;
305
306 if (!mana_ucontext || udata->inlen < sizeof(ucmd))
307 return -EINVAL;
308
309 err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
310 if (err) {
311 ibdev_dbg(&mdev->ib_dev,
312 "Failed to copy from udata create qp-raw, %d\n", err);
313 return err;
314 }
315
316 if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
317 ibdev_dbg(&mdev->ib_dev,
318 "Requested max_send_wr %d exceeding limit\n",
319 attr->cap.max_send_wr);
320 return -EINVAL;
321 }
322
323 if (attr->cap.max_send_sge > MAX_TX_WQE_SGL_ENTRIES) {
324 ibdev_dbg(&mdev->ib_dev,
325 "Requested max_send_sge %d exceeding limit\n",
326 attr->cap.max_send_sge);
327 return -EINVAL;
328 }
329
330 port = ucmd.port;
331 ndev = mana_ib_get_netdev(ibpd->device, port);
332 if (!ndev) {
333 ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
334 port);
335 return -EINVAL;
336 }
337 mpc = netdev_priv(ndev);
338 ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
339
340 err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
341 if (err)
342 return -ENODEV;
343
344 qp->port = port;
345
346 ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
347 ucmd.sq_buf_addr, ucmd.port);
348
349 umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
350 IB_ACCESS_LOCAL_WRITE);
351 if (IS_ERR(umem)) {
352 err = PTR_ERR(umem);
353 ibdev_dbg(&mdev->ib_dev,
354 "Failed to get umem for create qp-raw, err %d\n",
355 err);
356 goto err_free_vport;
357 }
358 qp->sq_umem = umem;
359
360 err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
361 &qp->sq_gdma_region);
362 if (err) {
363 ibdev_dbg(&mdev->ib_dev,
364 "Failed to create dma region for create qp-raw, %d\n",
365 err);
366 goto err_release_umem;
367 }
368
369 ibdev_dbg(&mdev->ib_dev,
370 "create_dma_region ret %d gdma_region 0x%llx\n",
371 err, qp->sq_gdma_region);
372
373 /* Create a WQ on the same port handle used by the Ethernet */
374 wq_spec.gdma_region = qp->sq_gdma_region;
375 wq_spec.queue_size = ucmd.sq_buf_size;
376
377 cq_spec.gdma_region = send_cq->queue.gdma_region;
378 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
379 cq_spec.modr_ctx_id = 0;
380 eq_vec = send_cq->comp_vector % gc->max_num_queues;
381 eq = &mpc->ac->eqs[eq_vec];
382 cq_spec.attached_eq = eq->eq->id;
383
384 err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
385 &cq_spec, &qp->tx_object);
386 if (err) {
387 ibdev_dbg(&mdev->ib_dev,
388 "Failed to create wq for create raw-qp, err %d\n",
389 err);
390 goto err_destroy_dma_region;
391 }
392
393 /* The GDMA regions are now owned by the WQ object */
394 qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
395 send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
396
397 qp->sq_id = wq_spec.queue_index;
398 send_cq->queue.id = cq_spec.queue_index;
399
400 /* Create CQ table entry */
401 err = mana_ib_install_cq_cb(mdev, send_cq);
402 if (err)
403 goto err_destroy_wq_obj;
404
405 ibdev_dbg(&mdev->ib_dev,
406 "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
407 qp->tx_object, qp->sq_id, send_cq->queue.id);
408
409 resp.sqid = qp->sq_id;
410 resp.cqid = send_cq->queue.id;
411 resp.tx_vp_offset = pd->tx_vp_offset;
412
413 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
414 if (err) {
415 ibdev_dbg(&mdev->ib_dev,
416 "Failed copy udata for create qp-raw, %d\n",
417 err);
418 goto err_release_gdma_cq;
419 }
420
421 return 0;
422
423err_release_gdma_cq:
424 kfree(gdma_cq);
425 gc->cq_table[send_cq->queue.id] = NULL;
426
427err_destroy_wq_obj:
428 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
429
430err_destroy_dma_region:
431 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
432
433err_release_umem:
434 ib_umem_release(umem);
435
436err_free_vport:
437 mana_ib_uncfg_vport(mdev, pd, port);
438
439 return err;
440}
441
442int mana_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
443 struct ib_udata *udata)
444{
445 switch (attr->qp_type) {
446 case IB_QPT_RAW_PACKET:
447 /* When rwq_ind_tbl is used, it's for creating WQs for RSS */
448 if (attr->rwq_ind_tbl)
449 return mana_ib_create_qp_rss(ibqp, ibqp->pd, attr,
450 udata);
451
452 return mana_ib_create_qp_raw(ibqp, ibqp->pd, attr, udata);
453 default:
454 /* Creating QP other than IB_QPT_RAW_PACKET is not supported */
455 ibdev_dbg(ibqp->device, "Creating QP type %u not supported\n",
456 attr->qp_type);
457 }
458
459 return -EINVAL;
460}
461
462int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
463 int attr_mask, struct ib_udata *udata)
464{
465 /* modify_qp is not supported by this version of the driver */
466 return -EOPNOTSUPP;
467}
468
469static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
470 struct ib_rwq_ind_table *ind_tbl,
471 struct ib_udata *udata)
472{
473 struct mana_ib_dev *mdev =
474 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
475 struct mana_port_context *mpc;
476 struct net_device *ndev;
477 struct mana_ib_wq *wq;
478 struct ib_wq *ibwq;
479 int i;
480
481 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
482 mpc = netdev_priv(ndev);
483
484 for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
485 ibwq = ind_tbl->ind_tbl[i];
486 wq = container_of(ibwq, struct mana_ib_wq, ibwq);
487 ibdev_dbg(&mdev->ib_dev, "destroying wq->rx_object %llu\n",
488 wq->rx_object);
489 mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
490 }
491
492 return 0;
493}
494
495static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
496{
497 struct mana_ib_dev *mdev =
498 container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
499 struct ib_pd *ibpd = qp->ibqp.pd;
500 struct mana_port_context *mpc;
501 struct net_device *ndev;
502 struct mana_ib_pd *pd;
503
504 ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
505 mpc = netdev_priv(ndev);
506 pd = container_of(ibpd, struct mana_ib_pd, ibpd);
507
508 mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
509
510 if (qp->sq_umem) {
511 mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
512 ib_umem_release(qp->sq_umem);
513 }
514
515 mana_ib_uncfg_vport(mdev, pd, qp->port);
516
517 return 0;
518}
519
520int mana_ib_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
521{
522 struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
523
524 switch (ibqp->qp_type) {
525 case IB_QPT_RAW_PACKET:
526 if (ibqp->rwq_ind_tbl)
527 return mana_ib_destroy_qp_rss(qp, ibqp->rwq_ind_tbl,
528 udata);
529
530 return mana_ib_destroy_qp_raw(qp, udata);
531
532 default:
533 ibdev_dbg(ibqp->device, "Unexpected QP type %u\n",
534 ibqp->qp_type);
535 }
536
537 return -ENOENT;
538}