Loading...
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44#include <net/addrconf.h>
45
46#include <rdma/ib_verbs.h>
47#include <rdma/ib_user_verbs.h>
48#include <rdma/ib_umem.h>
49#include <rdma/ib_addr.h>
50#include <rdma/ib_mad.h>
51#include <rdma/ib_cache.h>
52#include <rdma/uverbs_ioctl.h>
53#include <linux/hashtable.h>
54
55#include "bnxt_ulp.h"
56
57#include "roce_hsi.h"
58#include "qplib_res.h"
59#include "qplib_sp.h"
60#include "qplib_fp.h"
61#include "qplib_rcfw.h"
62
63#include "bnxt_re.h"
64#include "ib_verbs.h"
65
66#include <rdma/uverbs_types.h>
67#include <rdma/uverbs_std_types.h>
68
69#include <rdma/ib_user_ioctl_cmds.h>
70
71#define UVERBS_MODULE_NAME bnxt_re
72#include <rdma/uverbs_named_ioctl.h>
73
74#include <rdma/bnxt_re-abi.h>
75
76static int __from_ib_access_flags(int iflags)
77{
78 int qflags = 0;
79
80 if (iflags & IB_ACCESS_LOCAL_WRITE)
81 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
82 if (iflags & IB_ACCESS_REMOTE_READ)
83 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
84 if (iflags & IB_ACCESS_REMOTE_WRITE)
85 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
86 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
87 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
88 if (iflags & IB_ACCESS_MW_BIND)
89 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
90 if (iflags & IB_ZERO_BASED)
91 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
92 if (iflags & IB_ACCESS_ON_DEMAND)
93 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
94 return qflags;
95};
96
97static enum ib_access_flags __to_ib_access_flags(int qflags)
98{
99 enum ib_access_flags iflags = 0;
100
101 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
102 iflags |= IB_ACCESS_LOCAL_WRITE;
103 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
104 iflags |= IB_ACCESS_REMOTE_WRITE;
105 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
106 iflags |= IB_ACCESS_REMOTE_READ;
107 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
108 iflags |= IB_ACCESS_REMOTE_ATOMIC;
109 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
110 iflags |= IB_ACCESS_MW_BIND;
111 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
112 iflags |= IB_ZERO_BASED;
113 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
114 iflags |= IB_ACCESS_ON_DEMAND;
115 return iflags;
116};
117
118static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
119 struct bnxt_qplib_sge *sg_list, int num)
120{
121 int i, total = 0;
122
123 for (i = 0; i < num; i++) {
124 sg_list[i].addr = ib_sg_list[i].addr;
125 sg_list[i].lkey = ib_sg_list[i].lkey;
126 sg_list[i].size = ib_sg_list[i].length;
127 total += sg_list[i].size;
128 }
129 return total;
130}
131
132/* Device */
133int bnxt_re_query_device(struct ib_device *ibdev,
134 struct ib_device_attr *ib_attr,
135 struct ib_udata *udata)
136{
137 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
138 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
139
140 memset(ib_attr, 0, sizeof(*ib_attr));
141 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
142 min(sizeof(dev_attr->fw_ver),
143 sizeof(ib_attr->fw_ver)));
144 addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
145 rdev->netdev->dev_addr);
146 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
147 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
148
149 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
150 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
151 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
152 ib_attr->max_qp = dev_attr->max_qp;
153 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
154 ib_attr->device_cap_flags =
155 IB_DEVICE_CURR_QP_STATE_MOD
156 | IB_DEVICE_RC_RNR_NAK_GEN
157 | IB_DEVICE_SHUTDOWN_PORT
158 | IB_DEVICE_SYS_IMAGE_GUID
159 | IB_DEVICE_RESIZE_MAX_WR
160 | IB_DEVICE_PORT_ACTIVE_EVENT
161 | IB_DEVICE_N_NOTIFY_CQ
162 | IB_DEVICE_MEM_WINDOW
163 | IB_DEVICE_MEM_WINDOW_TYPE_2B
164 | IB_DEVICE_MEM_MGT_EXTENSIONS;
165 ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
166 ib_attr->max_send_sge = dev_attr->max_qp_sges;
167 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
168 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
169 ib_attr->max_cq = dev_attr->max_cq;
170 ib_attr->max_cqe = dev_attr->max_cq_wqes;
171 ib_attr->max_mr = dev_attr->max_mr;
172 ib_attr->max_pd = dev_attr->max_pd;
173 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
174 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
175 ib_attr->atomic_cap = IB_ATOMIC_NONE;
176 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
177 if (dev_attr->is_atomic) {
178 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
179 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
180 }
181
182 ib_attr->max_ee_rd_atom = 0;
183 ib_attr->max_res_rd_atom = 0;
184 ib_attr->max_ee_init_rd_atom = 0;
185 ib_attr->max_ee = 0;
186 ib_attr->max_rdd = 0;
187 ib_attr->max_mw = dev_attr->max_mw;
188 ib_attr->max_raw_ipv6_qp = 0;
189 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
190 ib_attr->max_mcast_grp = 0;
191 ib_attr->max_mcast_qp_attach = 0;
192 ib_attr->max_total_mcast_qp_attach = 0;
193 ib_attr->max_ah = dev_attr->max_ah;
194
195 ib_attr->max_srq = dev_attr->max_srq;
196 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
197 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
198
199 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
200
201 ib_attr->max_pkeys = 1;
202 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
203 return 0;
204}
205
206/* Port */
207int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
208 struct ib_port_attr *port_attr)
209{
210 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
211 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
212 int rc;
213
214 memset(port_attr, 0, sizeof(*port_attr));
215
216 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
217 port_attr->state = IB_PORT_ACTIVE;
218 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
219 } else {
220 port_attr->state = IB_PORT_DOWN;
221 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
222 }
223 port_attr->max_mtu = IB_MTU_4096;
224 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
225 port_attr->gid_tbl_len = dev_attr->max_sgid;
226 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
227 IB_PORT_DEVICE_MGMT_SUP |
228 IB_PORT_VENDOR_CLASS_SUP;
229 port_attr->ip_gids = true;
230
231 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
232 port_attr->bad_pkey_cntr = 0;
233 port_attr->qkey_viol_cntr = 0;
234 port_attr->pkey_tbl_len = dev_attr->max_pkey;
235 port_attr->lid = 0;
236 port_attr->sm_lid = 0;
237 port_attr->lmc = 0;
238 port_attr->max_vl_num = 4;
239 port_attr->sm_sl = 0;
240 port_attr->subnet_timeout = 0;
241 port_attr->init_type_reply = 0;
242 rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
243 &port_attr->active_width);
244
245 return rc;
246}
247
248int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
249 struct ib_port_immutable *immutable)
250{
251 struct ib_port_attr port_attr;
252
253 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
254 return -EINVAL;
255
256 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
257 immutable->gid_tbl_len = port_attr.gid_tbl_len;
258 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
259 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
260 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
261 return 0;
262}
263
264void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
265{
266 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
267
268 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
269 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
270 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
271}
272
273int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
274 u16 index, u16 *pkey)
275{
276 if (index > 0)
277 return -EINVAL;
278
279 *pkey = IB_DEFAULT_PKEY_FULL;
280
281 return 0;
282}
283
284int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
285 int index, union ib_gid *gid)
286{
287 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
288 int rc;
289
290 /* Ignore port_num */
291 memset(gid, 0, sizeof(*gid));
292 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
293 &rdev->qplib_res.sgid_tbl, index,
294 (struct bnxt_qplib_gid *)gid);
295 return rc;
296}
297
298int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
299{
300 int rc = 0;
301 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
302 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
303 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
304 struct bnxt_qplib_gid *gid_to_del;
305 u16 vlan_id = 0xFFFF;
306
307 /* Delete the entry from the hardware */
308 ctx = *context;
309 if (!ctx)
310 return -EINVAL;
311
312 if (sgid_tbl && sgid_tbl->active) {
313 if (ctx->idx >= sgid_tbl->max)
314 return -EINVAL;
315 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
316 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
317 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
318 * or via the ib_unregister_device path. In the former case QP1
319 * may not be destroyed yet, in which case just return as FW
320 * needs that entry to be present and will fail it's deletion.
321 * We could get invoked again after QP1 is destroyed OR get an
322 * ADD_GID call with a different GID value for the same index
323 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
324 */
325 if (ctx->idx == 0 &&
326 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
327 ctx->refcnt == 1 && rdev->gsi_ctx.gsi_sqp) {
328 ibdev_dbg(&rdev->ibdev,
329 "Trying to delete GID0 while QP1 is alive\n");
330 return -EFAULT;
331 }
332 ctx->refcnt--;
333 if (!ctx->refcnt) {
334 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
335 vlan_id, true);
336 if (rc) {
337 ibdev_err(&rdev->ibdev,
338 "Failed to remove GID: %#x", rc);
339 } else {
340 ctx_tbl = sgid_tbl->ctx;
341 ctx_tbl[ctx->idx] = NULL;
342 kfree(ctx);
343 }
344 }
345 } else {
346 return -EINVAL;
347 }
348 return rc;
349}
350
351int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
352{
353 int rc;
354 u32 tbl_idx = 0;
355 u16 vlan_id = 0xFFFF;
356 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
357 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
358 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
359
360 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
361 if (rc)
362 return rc;
363
364 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
365 rdev->qplib_res.netdev->dev_addr,
366 vlan_id, true, &tbl_idx);
367 if (rc == -EALREADY) {
368 ctx_tbl = sgid_tbl->ctx;
369 ctx_tbl[tbl_idx]->refcnt++;
370 *context = ctx_tbl[tbl_idx];
371 return 0;
372 }
373
374 if (rc < 0) {
375 ibdev_err(&rdev->ibdev, "Failed to add GID: %#x", rc);
376 return rc;
377 }
378
379 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
380 if (!ctx)
381 return -ENOMEM;
382 ctx_tbl = sgid_tbl->ctx;
383 ctx->idx = tbl_idx;
384 ctx->refcnt = 1;
385 ctx_tbl[tbl_idx] = ctx;
386 *context = ctx;
387
388 return rc;
389}
390
391enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
392 u32 port_num)
393{
394 return IB_LINK_LAYER_ETHERNET;
395}
396
397#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
398
399static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
400{
401 struct bnxt_re_fence_data *fence = &pd->fence;
402 struct ib_mr *ib_mr = &fence->mr->ib_mr;
403 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
404 struct bnxt_re_dev *rdev = pd->rdev;
405
406 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
407 return;
408
409 memset(wqe, 0, sizeof(*wqe));
410 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
411 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
412 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
413 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
414 wqe->bind.zero_based = false;
415 wqe->bind.parent_l_key = ib_mr->lkey;
416 wqe->bind.va = (u64)(unsigned long)fence->va;
417 wqe->bind.length = fence->size;
418 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
419 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
420
421 /* Save the initial rkey in fence structure for now;
422 * wqe->bind.r_key will be set at (re)bind time.
423 */
424 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
425}
426
427static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
428{
429 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
430 qplib_qp);
431 struct ib_pd *ib_pd = qp->ib_qp.pd;
432 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
433 struct bnxt_re_fence_data *fence = &pd->fence;
434 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
435 struct bnxt_qplib_swqe wqe;
436 int rc;
437
438 memcpy(&wqe, fence_wqe, sizeof(wqe));
439 wqe.bind.r_key = fence->bind_rkey;
440 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
441
442 ibdev_dbg(&qp->rdev->ibdev,
443 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
444 wqe.bind.r_key, qp->qplib_qp.id, pd);
445 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
446 if (rc) {
447 ibdev_err(&qp->rdev->ibdev, "Failed to bind fence-WQE\n");
448 return rc;
449 }
450 bnxt_qplib_post_send_db(&qp->qplib_qp);
451
452 return rc;
453}
454
455static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
456{
457 struct bnxt_re_fence_data *fence = &pd->fence;
458 struct bnxt_re_dev *rdev = pd->rdev;
459 struct device *dev = &rdev->en_dev->pdev->dev;
460 struct bnxt_re_mr *mr = fence->mr;
461
462 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
463 return;
464
465 if (fence->mw) {
466 bnxt_re_dealloc_mw(fence->mw);
467 fence->mw = NULL;
468 }
469 if (mr) {
470 if (mr->ib_mr.rkey)
471 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
472 true);
473 if (mr->ib_mr.lkey)
474 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
475 kfree(mr);
476 fence->mr = NULL;
477 }
478 if (fence->dma_addr) {
479 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
480 DMA_BIDIRECTIONAL);
481 fence->dma_addr = 0;
482 }
483}
484
485static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
486{
487 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
488 struct bnxt_re_fence_data *fence = &pd->fence;
489 struct bnxt_re_dev *rdev = pd->rdev;
490 struct device *dev = &rdev->en_dev->pdev->dev;
491 struct bnxt_re_mr *mr = NULL;
492 dma_addr_t dma_addr = 0;
493 struct ib_mw *mw;
494 int rc;
495
496 if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
497 return 0;
498
499 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
500 DMA_BIDIRECTIONAL);
501 rc = dma_mapping_error(dev, dma_addr);
502 if (rc) {
503 ibdev_err(&rdev->ibdev, "Failed to dma-map fence-MR-mem\n");
504 rc = -EIO;
505 fence->dma_addr = 0;
506 goto fail;
507 }
508 fence->dma_addr = dma_addr;
509
510 /* Allocate a MR */
511 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
512 if (!mr) {
513 rc = -ENOMEM;
514 goto fail;
515 }
516 fence->mr = mr;
517 mr->rdev = rdev;
518 mr->qplib_mr.pd = &pd->qplib_pd;
519 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
520 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
521 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
522 if (rc) {
523 ibdev_err(&rdev->ibdev, "Failed to alloc fence-HW-MR\n");
524 goto fail;
525 }
526
527 /* Register MR */
528 mr->ib_mr.lkey = mr->qplib_mr.lkey;
529 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
530 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
531 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
532 BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
533 if (rc) {
534 ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
535 goto fail;
536 }
537 mr->ib_mr.rkey = mr->qplib_mr.rkey;
538
539 /* Create a fence MW only for kernel consumers */
540 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
541 if (IS_ERR(mw)) {
542 ibdev_err(&rdev->ibdev,
543 "Failed to create fence-MW for PD: %p\n", pd);
544 rc = PTR_ERR(mw);
545 goto fail;
546 }
547 fence->mw = mw;
548
549 bnxt_re_create_fence_wqe(pd);
550 return 0;
551
552fail:
553 bnxt_re_destroy_fence_mr(pd);
554 return rc;
555}
556
557static struct bnxt_re_user_mmap_entry*
558bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
559 enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
560{
561 struct bnxt_re_user_mmap_entry *entry;
562 int ret;
563
564 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
565 if (!entry)
566 return NULL;
567
568 entry->mem_offset = mem_offset;
569 entry->mmap_flag = mmap_flag;
570 entry->uctx = uctx;
571
572 switch (mmap_flag) {
573 case BNXT_RE_MMAP_SH_PAGE:
574 ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
575 &entry->rdma_entry, PAGE_SIZE, 0);
576 break;
577 case BNXT_RE_MMAP_UC_DB:
578 case BNXT_RE_MMAP_WC_DB:
579 case BNXT_RE_MMAP_DBR_BAR:
580 case BNXT_RE_MMAP_DBR_PAGE:
581 case BNXT_RE_MMAP_TOGGLE_PAGE:
582 ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
583 &entry->rdma_entry, PAGE_SIZE);
584 break;
585 default:
586 ret = -EINVAL;
587 break;
588 }
589
590 if (ret) {
591 kfree(entry);
592 return NULL;
593 }
594 if (offset)
595 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
596
597 return entry;
598}
599
600/* Protection Domains */
601int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
602{
603 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
604 struct bnxt_re_dev *rdev = pd->rdev;
605
606 if (udata) {
607 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
608 pd->pd_db_mmap = NULL;
609 }
610
611 bnxt_re_destroy_fence_mr(pd);
612
613 if (pd->qplib_pd.id) {
614 if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
615 &rdev->qplib_res.pd_tbl,
616 &pd->qplib_pd))
617 atomic_dec(&rdev->stats.res.pd_count);
618 }
619 return 0;
620}
621
622int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
623{
624 struct ib_device *ibdev = ibpd->device;
625 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
626 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
627 udata, struct bnxt_re_ucontext, ib_uctx);
628 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
629 struct bnxt_re_user_mmap_entry *entry = NULL;
630 u32 active_pds;
631 int rc = 0;
632
633 pd->rdev = rdev;
634 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
635 ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
636 rc = -ENOMEM;
637 goto fail;
638 }
639
640 if (udata) {
641 struct bnxt_re_pd_resp resp = {};
642
643 if (!ucntx->dpi.dbr) {
644 /* Allocate DPI in alloc_pd to avoid failing of
645 * ibv_devinfo and family of application when DPIs
646 * are depleted.
647 */
648 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
649 &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
650 rc = -ENOMEM;
651 goto dbfail;
652 }
653 }
654
655 resp.pdid = pd->qplib_pd.id;
656 /* Still allow mapping this DBR to the new user PD. */
657 resp.dpi = ucntx->dpi.dpi;
658
659 entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
660 BNXT_RE_MMAP_UC_DB, &resp.dbr);
661
662 if (!entry) {
663 rc = -ENOMEM;
664 goto dbfail;
665 }
666
667 pd->pd_db_mmap = &entry->rdma_entry;
668
669 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
670 if (rc) {
671 rdma_user_mmap_entry_remove(pd->pd_db_mmap);
672 rc = -EFAULT;
673 goto dbfail;
674 }
675 }
676
677 if (!udata)
678 if (bnxt_re_create_fence_mr(pd))
679 ibdev_warn(&rdev->ibdev,
680 "Failed to create Fence-MR\n");
681 active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
682 if (active_pds > rdev->stats.res.pd_watermark)
683 rdev->stats.res.pd_watermark = active_pds;
684
685 return 0;
686dbfail:
687 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
688 &pd->qplib_pd);
689fail:
690 return rc;
691}
692
693/* Address Handles */
694int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
695{
696 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
697 struct bnxt_re_dev *rdev = ah->rdev;
698 bool block = true;
699 int rc;
700
701 block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
702 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
703 if (BNXT_RE_CHECK_RC(rc)) {
704 if (rc == -ETIMEDOUT)
705 rc = 0;
706 else
707 goto fail;
708 }
709 atomic_dec(&rdev->stats.res.ah_count);
710fail:
711 return rc;
712}
713
714static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
715{
716 u8 nw_type;
717
718 switch (ntype) {
719 case RDMA_NETWORK_IPV4:
720 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
721 break;
722 case RDMA_NETWORK_IPV6:
723 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
724 break;
725 default:
726 nw_type = CMDQ_CREATE_AH_TYPE_V1;
727 break;
728 }
729 return nw_type;
730}
731
732int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
733 struct ib_udata *udata)
734{
735 struct ib_pd *ib_pd = ib_ah->pd;
736 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
737 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
738 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
739 struct bnxt_re_dev *rdev = pd->rdev;
740 const struct ib_gid_attr *sgid_attr;
741 struct bnxt_re_gid_ctx *ctx;
742 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
743 u32 active_ahs;
744 u8 nw_type;
745 int rc;
746
747 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
748 ibdev_err(&rdev->ibdev, "Failed to alloc AH: GRH not set");
749 return -EINVAL;
750 }
751
752 ah->rdev = rdev;
753 ah->qplib_ah.pd = &pd->qplib_pd;
754
755 /* Supply the configuration for the HW */
756 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
757 sizeof(union ib_gid));
758 sgid_attr = grh->sgid_attr;
759 /* Get the HW context of the GID. The reference
760 * of GID table entry is already taken by the caller.
761 */
762 ctx = rdma_read_gid_hw_context(sgid_attr);
763 ah->qplib_ah.sgid_index = ctx->idx;
764 ah->qplib_ah.host_sgid_index = grh->sgid_index;
765 ah->qplib_ah.traffic_class = grh->traffic_class;
766 ah->qplib_ah.flow_label = grh->flow_label;
767 ah->qplib_ah.hop_limit = grh->hop_limit;
768 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
769
770 /* Get network header type for this GID */
771 nw_type = rdma_gid_attr_network_type(sgid_attr);
772 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
773
774 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
775 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
776 !(init_attr->flags &
777 RDMA_CREATE_AH_SLEEPABLE));
778 if (rc) {
779 ibdev_err(&rdev->ibdev, "Failed to allocate HW AH");
780 return rc;
781 }
782
783 /* Write AVID to shared page. */
784 if (udata) {
785 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
786 udata, struct bnxt_re_ucontext, ib_uctx);
787 unsigned long flag;
788 u32 *wrptr;
789
790 spin_lock_irqsave(&uctx->sh_lock, flag);
791 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
792 *wrptr = ah->qplib_ah.id;
793 wmb(); /* make sure cache is updated. */
794 spin_unlock_irqrestore(&uctx->sh_lock, flag);
795 }
796 active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
797 if (active_ahs > rdev->stats.res.ah_watermark)
798 rdev->stats.res.ah_watermark = active_ahs;
799
800 return 0;
801}
802
803int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
804{
805 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
806
807 ah_attr->type = ib_ah->type;
808 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
809 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
810 rdma_ah_set_grh(ah_attr, NULL, 0,
811 ah->qplib_ah.host_sgid_index,
812 0, ah->qplib_ah.traffic_class);
813 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
814 rdma_ah_set_port_num(ah_attr, 1);
815 rdma_ah_set_static_rate(ah_attr, 0);
816 return 0;
817}
818
819unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
820 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
821{
822 unsigned long flags;
823
824 spin_lock_irqsave(&qp->scq->cq_lock, flags);
825 if (qp->rcq != qp->scq)
826 spin_lock(&qp->rcq->cq_lock);
827 else
828 __acquire(&qp->rcq->cq_lock);
829
830 return flags;
831}
832
833void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
834 unsigned long flags)
835 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
836{
837 if (qp->rcq != qp->scq)
838 spin_unlock(&qp->rcq->cq_lock);
839 else
840 __release(&qp->rcq->cq_lock);
841 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
842}
843
844static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
845{
846 struct bnxt_re_qp *gsi_sqp;
847 struct bnxt_re_ah *gsi_sah;
848 struct bnxt_re_dev *rdev;
849 int rc;
850
851 rdev = qp->rdev;
852 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
853 gsi_sah = rdev->gsi_ctx.gsi_sah;
854
855 ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
856 bnxt_qplib_destroy_ah(&rdev->qplib_res,
857 &gsi_sah->qplib_ah,
858 true);
859 atomic_dec(&rdev->stats.res.ah_count);
860 bnxt_qplib_clean_qp(&qp->qplib_qp);
861
862 ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
863 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
864 if (rc) {
865 ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
866 goto fail;
867 }
868 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
869
870 /* remove from active qp list */
871 mutex_lock(&rdev->qp_lock);
872 list_del(&gsi_sqp->list);
873 mutex_unlock(&rdev->qp_lock);
874 atomic_dec(&rdev->stats.res.qp_count);
875
876 kfree(rdev->gsi_ctx.sqp_tbl);
877 kfree(gsi_sah);
878 kfree(gsi_sqp);
879 rdev->gsi_ctx.gsi_sqp = NULL;
880 rdev->gsi_ctx.gsi_sah = NULL;
881 rdev->gsi_ctx.sqp_tbl = NULL;
882
883 return 0;
884fail:
885 return rc;
886}
887
888/* Queue Pairs */
889int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
890{
891 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
892 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
893 struct bnxt_re_dev *rdev = qp->rdev;
894 struct bnxt_qplib_nq *scq_nq = NULL;
895 struct bnxt_qplib_nq *rcq_nq = NULL;
896 unsigned int flags;
897 int rc;
898
899 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
900
901 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
902 if (rc) {
903 ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
904 return rc;
905 }
906
907 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
908 flags = bnxt_re_lock_cqs(qp);
909 bnxt_qplib_clean_qp(&qp->qplib_qp);
910 bnxt_re_unlock_cqs(qp, flags);
911 }
912
913 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
914
915 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
916 rc = bnxt_re_destroy_gsi_sqp(qp);
917 if (rc)
918 return rc;
919 }
920
921 mutex_lock(&rdev->qp_lock);
922 list_del(&qp->list);
923 mutex_unlock(&rdev->qp_lock);
924 atomic_dec(&rdev->stats.res.qp_count);
925 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
926 atomic_dec(&rdev->stats.res.rc_qp_count);
927 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
928 atomic_dec(&rdev->stats.res.ud_qp_count);
929
930 ib_umem_release(qp->rumem);
931 ib_umem_release(qp->sumem);
932
933 /* Flush all the entries of notification queue associated with
934 * given qp.
935 */
936 scq_nq = qplib_qp->scq->nq;
937 rcq_nq = qplib_qp->rcq->nq;
938 bnxt_re_synchronize_nq(scq_nq);
939 if (scq_nq != rcq_nq)
940 bnxt_re_synchronize_nq(rcq_nq);
941
942 return 0;
943}
944
945static u8 __from_ib_qp_type(enum ib_qp_type type)
946{
947 switch (type) {
948 case IB_QPT_GSI:
949 return CMDQ_CREATE_QP1_TYPE_GSI;
950 case IB_QPT_RC:
951 return CMDQ_CREATE_QP_TYPE_RC;
952 case IB_QPT_UD:
953 return CMDQ_CREATE_QP_TYPE_UD;
954 default:
955 return IB_QPT_MAX;
956 }
957}
958
959static u16 bnxt_re_setup_rwqe_size(struct bnxt_qplib_qp *qplqp,
960 int rsge, int max)
961{
962 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
963 rsge = max;
964 return bnxt_re_get_rwqe_size(rsge);
965}
966
967static u16 bnxt_re_get_wqe_size(int ilsize, int nsge)
968{
969 u16 wqe_size, calc_ils;
970
971 wqe_size = bnxt_re_get_swqe_size(nsge);
972 if (ilsize) {
973 calc_ils = sizeof(struct sq_send_hdr) + ilsize;
974 wqe_size = max_t(u16, calc_ils, wqe_size);
975 wqe_size = ALIGN(wqe_size, sizeof(struct sq_send_hdr));
976 }
977 return wqe_size;
978}
979
980static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
981 struct ib_qp_init_attr *init_attr)
982{
983 struct bnxt_qplib_dev_attr *dev_attr;
984 struct bnxt_qplib_qp *qplqp;
985 struct bnxt_re_dev *rdev;
986 struct bnxt_qplib_q *sq;
987 int align, ilsize;
988
989 rdev = qp->rdev;
990 qplqp = &qp->qplib_qp;
991 sq = &qplqp->sq;
992 dev_attr = &rdev->dev_attr;
993
994 align = sizeof(struct sq_send_hdr);
995 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
996
997 sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
998 if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
999 return -EINVAL;
1000 /* For gen p4 and gen p5 backward compatibility mode
1001 * wqe size is fixed to 128 bytes
1002 */
1003 if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
1004 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1005 sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
1006
1007 if (init_attr->cap.max_inline_data) {
1008 qplqp->max_inline_data = sq->wqe_size -
1009 sizeof(struct sq_send_hdr);
1010 init_attr->cap.max_inline_data = qplqp->max_inline_data;
1011 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1012 sq->max_sge = qplqp->max_inline_data /
1013 sizeof(struct sq_sge);
1014 }
1015
1016 return 0;
1017}
1018
1019static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
1020 struct bnxt_re_qp *qp, struct ib_udata *udata)
1021{
1022 struct bnxt_qplib_qp *qplib_qp;
1023 struct bnxt_re_ucontext *cntx;
1024 struct bnxt_re_qp_req ureq;
1025 int bytes = 0, psn_sz;
1026 struct ib_umem *umem;
1027 int psn_nume;
1028
1029 qplib_qp = &qp->qplib_qp;
1030 cntx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
1031 ib_uctx);
1032 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1033 return -EFAULT;
1034
1035 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1036 /* Consider mapping PSN search memory only for RC QPs. */
1037 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1038 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
1039 sizeof(struct sq_psn_search_ext) :
1040 sizeof(struct sq_psn_search);
1041 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1042 qplib_qp->sq.max_wqe :
1043 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1044 sizeof(struct bnxt_qplib_sge));
1045 bytes += (psn_nume * psn_sz);
1046 }
1047
1048 bytes = PAGE_ALIGN(bytes);
1049 umem = ib_umem_get(&rdev->ibdev, ureq.qpsva, bytes,
1050 IB_ACCESS_LOCAL_WRITE);
1051 if (IS_ERR(umem))
1052 return PTR_ERR(umem);
1053
1054 qp->sumem = umem;
1055 qplib_qp->sq.sg_info.umem = umem;
1056 qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
1057 qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
1058 qplib_qp->qp_handle = ureq.qp_handle;
1059
1060 if (!qp->qplib_qp.srq) {
1061 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1062 bytes = PAGE_ALIGN(bytes);
1063 umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
1064 IB_ACCESS_LOCAL_WRITE);
1065 if (IS_ERR(umem))
1066 goto rqfail;
1067 qp->rumem = umem;
1068 qplib_qp->rq.sg_info.umem = umem;
1069 qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
1070 qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
1071 }
1072
1073 qplib_qp->dpi = &cntx->dpi;
1074 return 0;
1075rqfail:
1076 ib_umem_release(qp->sumem);
1077 qp->sumem = NULL;
1078 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
1079
1080 return PTR_ERR(umem);
1081}
1082
1083static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
1084 (struct bnxt_re_pd *pd,
1085 struct bnxt_qplib_res *qp1_res,
1086 struct bnxt_qplib_qp *qp1_qp)
1087{
1088 struct bnxt_re_dev *rdev = pd->rdev;
1089 struct bnxt_re_ah *ah;
1090 union ib_gid sgid;
1091 int rc;
1092
1093 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
1094 if (!ah)
1095 return NULL;
1096
1097 ah->rdev = rdev;
1098 ah->qplib_ah.pd = &pd->qplib_pd;
1099
1100 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1101 if (rc)
1102 goto fail;
1103
1104 /* supply the dgid data same as sgid */
1105 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1106 sizeof(union ib_gid));
1107 ah->qplib_ah.sgid_index = 0;
1108
1109 ah->qplib_ah.traffic_class = 0;
1110 ah->qplib_ah.flow_label = 0;
1111 ah->qplib_ah.hop_limit = 1;
1112 ah->qplib_ah.sl = 0;
1113 /* Have DMAC same as SMAC */
1114 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
1115
1116 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
1117 if (rc) {
1118 ibdev_err(&rdev->ibdev,
1119 "Failed to allocate HW AH for Shadow QP");
1120 goto fail;
1121 }
1122 atomic_inc(&rdev->stats.res.ah_count);
1123
1124 return ah;
1125
1126fail:
1127 kfree(ah);
1128 return NULL;
1129}
1130
1131static struct bnxt_re_qp *bnxt_re_create_shadow_qp
1132 (struct bnxt_re_pd *pd,
1133 struct bnxt_qplib_res *qp1_res,
1134 struct bnxt_qplib_qp *qp1_qp)
1135{
1136 struct bnxt_re_dev *rdev = pd->rdev;
1137 struct bnxt_re_qp *qp;
1138 int rc;
1139
1140 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1141 if (!qp)
1142 return NULL;
1143
1144 qp->rdev = rdev;
1145
1146 /* Initialize the shadow QP structure from the QP1 values */
1147 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1148
1149 qp->qplib_qp.pd = &pd->qplib_pd;
1150 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1151 qp->qplib_qp.type = IB_QPT_UD;
1152
1153 qp->qplib_qp.max_inline_data = 0;
1154 qp->qplib_qp.sig_type = true;
1155
1156 /* Shadow QP SQ depth should be same as QP1 RQ depth */
1157 qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
1158 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1159 qp->qplib_qp.sq.max_sge = 2;
1160 /* Q full delta can be 1 since it is internal QP */
1161 qp->qplib_qp.sq.q_full_delta = 1;
1162 qp->qplib_qp.sq.sg_info.pgsize = PAGE_SIZE;
1163 qp->qplib_qp.sq.sg_info.pgshft = PAGE_SHIFT;
1164
1165 qp->qplib_qp.scq = qp1_qp->scq;
1166 qp->qplib_qp.rcq = qp1_qp->rcq;
1167
1168 qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
1169 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1170 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1171 /* Q full delta can be 1 since it is internal QP */
1172 qp->qplib_qp.rq.q_full_delta = 1;
1173 qp->qplib_qp.rq.sg_info.pgsize = PAGE_SIZE;
1174 qp->qplib_qp.rq.sg_info.pgshft = PAGE_SHIFT;
1175
1176 qp->qplib_qp.mtu = qp1_qp->mtu;
1177
1178 qp->qplib_qp.sq_hdr_buf_size = 0;
1179 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
1180 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1181
1182 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1183 if (rc)
1184 goto fail;
1185
1186 spin_lock_init(&qp->sq_lock);
1187 INIT_LIST_HEAD(&qp->list);
1188 mutex_lock(&rdev->qp_lock);
1189 list_add_tail(&qp->list, &rdev->qp_list);
1190 atomic_inc(&rdev->stats.res.qp_count);
1191 mutex_unlock(&rdev->qp_lock);
1192 return qp;
1193fail:
1194 kfree(qp);
1195 return NULL;
1196}
1197
1198static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
1199 struct ib_qp_init_attr *init_attr,
1200 struct bnxt_re_ucontext *uctx)
1201{
1202 struct bnxt_qplib_dev_attr *dev_attr;
1203 struct bnxt_qplib_qp *qplqp;
1204 struct bnxt_re_dev *rdev;
1205 struct bnxt_qplib_q *rq;
1206 int entries;
1207
1208 rdev = qp->rdev;
1209 qplqp = &qp->qplib_qp;
1210 rq = &qplqp->rq;
1211 dev_attr = &rdev->dev_attr;
1212
1213 if (init_attr->srq) {
1214 struct bnxt_re_srq *srq;
1215
1216 srq = container_of(init_attr->srq, struct bnxt_re_srq, ib_srq);
1217 qplqp->srq = &srq->qplib_srq;
1218 rq->max_wqe = 0;
1219 } else {
1220 rq->max_sge = init_attr->cap.max_recv_sge;
1221 if (rq->max_sge > dev_attr->max_qp_sges)
1222 rq->max_sge = dev_attr->max_qp_sges;
1223 init_attr->cap.max_recv_sge = rq->max_sge;
1224 rq->wqe_size = bnxt_re_setup_rwqe_size(qplqp, rq->max_sge,
1225 dev_attr->max_qp_sges);
1226 /* Allocate 1 more than what's provided so posting max doesn't
1227 * mean empty.
1228 */
1229 entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
1230 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1231 rq->q_full_delta = 0;
1232 rq->sg_info.pgsize = PAGE_SIZE;
1233 rq->sg_info.pgshft = PAGE_SHIFT;
1234 }
1235
1236 return 0;
1237}
1238
1239static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
1240{
1241 struct bnxt_qplib_dev_attr *dev_attr;
1242 struct bnxt_qplib_qp *qplqp;
1243 struct bnxt_re_dev *rdev;
1244
1245 rdev = qp->rdev;
1246 qplqp = &qp->qplib_qp;
1247 dev_attr = &rdev->dev_attr;
1248
1249 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1250 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1251 if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
1252 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1253 qplqp->rq.max_sge = 6;
1254 }
1255}
1256
1257static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
1258 struct ib_qp_init_attr *init_attr,
1259 struct bnxt_re_ucontext *uctx)
1260{
1261 struct bnxt_qplib_dev_attr *dev_attr;
1262 struct bnxt_qplib_qp *qplqp;
1263 struct bnxt_re_dev *rdev;
1264 struct bnxt_qplib_q *sq;
1265 int entries;
1266 int diff;
1267 int rc;
1268
1269 rdev = qp->rdev;
1270 qplqp = &qp->qplib_qp;
1271 sq = &qplqp->sq;
1272 dev_attr = &rdev->dev_attr;
1273
1274 sq->max_sge = init_attr->cap.max_send_sge;
1275 if (sq->max_sge > dev_attr->max_qp_sges) {
1276 sq->max_sge = dev_attr->max_qp_sges;
1277 init_attr->cap.max_send_sge = sq->max_sge;
1278 }
1279
1280 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1281 if (rc)
1282 return rc;
1283
1284 entries = init_attr->cap.max_send_wr;
1285 /* Allocate 128 + 1 more than what's provided */
1286 diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
1287 0 : BNXT_QPLIB_RESERVED_QP_WRS;
1288 entries = bnxt_re_init_depth(entries + diff + 1, uctx);
1289 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
1290 sq->q_full_delta = diff + 1;
1291 /*
1292 * Reserving one slot for Phantom WQE. Application can
1293 * post one extra entry in this case. But allowing this to avoid
1294 * unexpected Queue full condition
1295 */
1296 qplqp->sq.q_full_delta -= 1;
1297 qplqp->sq.sg_info.pgsize = PAGE_SIZE;
1298 qplqp->sq.sg_info.pgshft = PAGE_SHIFT;
1299
1300 return 0;
1301}
1302
1303static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
1304 struct ib_qp_init_attr *init_attr,
1305 struct bnxt_re_ucontext *uctx)
1306{
1307 struct bnxt_qplib_dev_attr *dev_attr;
1308 struct bnxt_qplib_qp *qplqp;
1309 struct bnxt_re_dev *rdev;
1310 int entries;
1311
1312 rdev = qp->rdev;
1313 qplqp = &qp->qplib_qp;
1314 dev_attr = &rdev->dev_attr;
1315
1316 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
1317 entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
1318 qplqp->sq.max_wqe = min_t(u32, entries,
1319 dev_attr->max_qp_wqes + 1);
1320 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
1321 init_attr->cap.max_send_wr;
1322 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
1323 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
1324 qplqp->sq.max_sge = dev_attr->max_qp_sges;
1325 }
1326}
1327
1328static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
1329 struct ib_qp_init_attr *init_attr)
1330{
1331 struct bnxt_qplib_chip_ctx *chip_ctx;
1332 int qptype;
1333
1334 chip_ctx = rdev->chip_ctx;
1335
1336 qptype = __from_ib_qp_type(init_attr->qp_type);
1337 if (qptype == IB_QPT_MAX) {
1338 ibdev_err(&rdev->ibdev, "QP type 0x%x not supported", qptype);
1339 qptype = -EOPNOTSUPP;
1340 goto out;
1341 }
1342
1343 if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
1344 init_attr->qp_type == IB_QPT_GSI)
1345 qptype = CMDQ_CREATE_QP_TYPE_GSI;
1346out:
1347 return qptype;
1348}
1349
1350static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1351 struct ib_qp_init_attr *init_attr,
1352 struct ib_udata *udata)
1353{
1354 struct bnxt_qplib_dev_attr *dev_attr;
1355 struct bnxt_re_ucontext *uctx;
1356 struct bnxt_qplib_qp *qplqp;
1357 struct bnxt_re_dev *rdev;
1358 struct bnxt_re_cq *cq;
1359 int rc = 0, qptype;
1360
1361 rdev = qp->rdev;
1362 qplqp = &qp->qplib_qp;
1363 dev_attr = &rdev->dev_attr;
1364
1365 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1366 /* Setup misc params */
1367 ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
1368 qplqp->pd = &pd->qplib_pd;
1369 qplqp->qp_handle = (u64)qplqp;
1370 qplqp->max_inline_data = init_attr->cap.max_inline_data;
1371 qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1372 qptype = bnxt_re_init_qp_type(rdev, init_attr);
1373 if (qptype < 0) {
1374 rc = qptype;
1375 goto out;
1376 }
1377 qplqp->type = (u8)qptype;
1378 qplqp->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1379
1380 if (init_attr->qp_type == IB_QPT_RC) {
1381 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
1382 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1383 }
1384 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1385 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
1386 if (init_attr->create_flags) {
1387 ibdev_dbg(&rdev->ibdev,
1388 "QP create flags 0x%x not supported",
1389 init_attr->create_flags);
1390 return -EOPNOTSUPP;
1391 }
1392
1393 /* Setup CQs */
1394 if (init_attr->send_cq) {
1395 cq = container_of(init_attr->send_cq, struct bnxt_re_cq, ib_cq);
1396 qplqp->scq = &cq->qplib_cq;
1397 qp->scq = cq;
1398 }
1399
1400 if (init_attr->recv_cq) {
1401 cq = container_of(init_attr->recv_cq, struct bnxt_re_cq, ib_cq);
1402 qplqp->rcq = &cq->qplib_cq;
1403 qp->rcq = cq;
1404 }
1405
1406 /* Setup RQ/SRQ */
1407 rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
1408 if (rc)
1409 goto out;
1410 if (init_attr->qp_type == IB_QPT_GSI)
1411 bnxt_re_adjust_gsi_rq_attr(qp);
1412
1413 /* Setup SQ */
1414 rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
1415 if (rc)
1416 goto out;
1417 if (init_attr->qp_type == IB_QPT_GSI)
1418 bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
1419
1420 if (udata) /* This will update DPI and qp_handle */
1421 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1422out:
1423 return rc;
1424}
1425
1426static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
1427 struct bnxt_re_pd *pd)
1428{
1429 struct bnxt_re_sqp_entries *sqp_tbl;
1430 struct bnxt_re_dev *rdev;
1431 struct bnxt_re_qp *sqp;
1432 struct bnxt_re_ah *sah;
1433 int rc = 0;
1434
1435 rdev = qp->rdev;
1436 /* Create a shadow QP to handle the QP1 traffic */
1437 sqp_tbl = kcalloc(BNXT_RE_MAX_GSI_SQP_ENTRIES, sizeof(*sqp_tbl),
1438 GFP_KERNEL);
1439 if (!sqp_tbl)
1440 return -ENOMEM;
1441 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
1442
1443 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
1444 if (!sqp) {
1445 rc = -ENODEV;
1446 ibdev_err(&rdev->ibdev, "Failed to create Shadow QP for QP1");
1447 goto out;
1448 }
1449 rdev->gsi_ctx.gsi_sqp = sqp;
1450
1451 sqp->rcq = qp->rcq;
1452 sqp->scq = qp->scq;
1453 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1454 &qp->qplib_qp);
1455 if (!sah) {
1456 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1457 &sqp->qplib_qp);
1458 rc = -ENODEV;
1459 ibdev_err(&rdev->ibdev,
1460 "Failed to create AH entry for ShadowQP");
1461 goto out;
1462 }
1463 rdev->gsi_ctx.gsi_sah = sah;
1464
1465 return 0;
1466out:
1467 kfree(sqp_tbl);
1468 return rc;
1469}
1470
1471static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
1472 struct ib_qp_init_attr *init_attr)
1473{
1474 struct bnxt_re_dev *rdev;
1475 struct bnxt_qplib_qp *qplqp;
1476 int rc;
1477
1478 rdev = qp->rdev;
1479 qplqp = &qp->qplib_qp;
1480
1481 qplqp->rq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1482 qplqp->sq_hdr_buf_size = BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1483
1484 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, qplqp);
1485 if (rc) {
1486 ibdev_err(&rdev->ibdev, "create HW QP1 failed!");
1487 goto out;
1488 }
1489
1490 rc = bnxt_re_create_shadow_gsi(qp, pd);
1491out:
1492 return rc;
1493}
1494
1495static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
1496 struct ib_qp_init_attr *init_attr,
1497 struct bnxt_qplib_dev_attr *dev_attr)
1498{
1499 bool rc = true;
1500
1501 if (init_attr->cap.max_send_wr > dev_attr->max_qp_wqes ||
1502 init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes ||
1503 init_attr->cap.max_send_sge > dev_attr->max_qp_sges ||
1504 init_attr->cap.max_recv_sge > dev_attr->max_qp_sges ||
1505 init_attr->cap.max_inline_data > dev_attr->max_inline_data) {
1506 ibdev_err(&rdev->ibdev,
1507 "Create QP failed - max exceeded! 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x 0x%x/0x%x",
1508 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
1509 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
1510 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
1511 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
1512 init_attr->cap.max_inline_data,
1513 dev_attr->max_inline_data);
1514 rc = false;
1515 }
1516 return rc;
1517}
1518
1519int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
1520 struct ib_udata *udata)
1521{
1522 struct ib_pd *ib_pd = ib_qp->pd;
1523 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1524 struct bnxt_re_dev *rdev = pd->rdev;
1525 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1526 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1527 u32 active_qps;
1528 int rc;
1529
1530 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
1531 if (!rc) {
1532 rc = -EINVAL;
1533 goto fail;
1534 }
1535
1536 qp->rdev = rdev;
1537 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
1538 if (rc)
1539 goto fail;
1540
1541 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1542 !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
1543 rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
1544 if (rc == -ENODEV)
1545 goto qp_destroy;
1546 if (rc)
1547 goto fail;
1548 } else {
1549 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1550 if (rc) {
1551 ibdev_err(&rdev->ibdev, "Failed to create HW QP");
1552 goto free_umem;
1553 }
1554 if (udata) {
1555 struct bnxt_re_qp_resp resp;
1556
1557 resp.qpid = qp->qplib_qp.id;
1558 resp.rsvd = 0;
1559 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1560 if (rc) {
1561 ibdev_err(&rdev->ibdev, "Failed to copy QP udata");
1562 goto qp_destroy;
1563 }
1564 }
1565 }
1566
1567 qp->ib_qp.qp_num = qp->qplib_qp.id;
1568 if (qp_init_attr->qp_type == IB_QPT_GSI)
1569 rdev->gsi_ctx.gsi_qp = qp;
1570 spin_lock_init(&qp->sq_lock);
1571 spin_lock_init(&qp->rq_lock);
1572 INIT_LIST_HEAD(&qp->list);
1573 mutex_lock(&rdev->qp_lock);
1574 list_add_tail(&qp->list, &rdev->qp_list);
1575 mutex_unlock(&rdev->qp_lock);
1576 active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
1577 if (active_qps > rdev->stats.res.qp_watermark)
1578 rdev->stats.res.qp_watermark = active_qps;
1579 if (qp_init_attr->qp_type == IB_QPT_RC) {
1580 active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
1581 if (active_qps > rdev->stats.res.rc_qp_watermark)
1582 rdev->stats.res.rc_qp_watermark = active_qps;
1583 } else if (qp_init_attr->qp_type == IB_QPT_UD) {
1584 active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
1585 if (active_qps > rdev->stats.res.ud_qp_watermark)
1586 rdev->stats.res.ud_qp_watermark = active_qps;
1587 }
1588
1589 return 0;
1590qp_destroy:
1591 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1592free_umem:
1593 ib_umem_release(qp->rumem);
1594 ib_umem_release(qp->sumem);
1595fail:
1596 return rc;
1597}
1598
1599static u8 __from_ib_qp_state(enum ib_qp_state state)
1600{
1601 switch (state) {
1602 case IB_QPS_RESET:
1603 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1604 case IB_QPS_INIT:
1605 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1606 case IB_QPS_RTR:
1607 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1608 case IB_QPS_RTS:
1609 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1610 case IB_QPS_SQD:
1611 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1612 case IB_QPS_SQE:
1613 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1614 case IB_QPS_ERR:
1615 default:
1616 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1617 }
1618}
1619
1620static enum ib_qp_state __to_ib_qp_state(u8 state)
1621{
1622 switch (state) {
1623 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1624 return IB_QPS_RESET;
1625 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1626 return IB_QPS_INIT;
1627 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1628 return IB_QPS_RTR;
1629 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1630 return IB_QPS_RTS;
1631 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1632 return IB_QPS_SQD;
1633 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1634 return IB_QPS_SQE;
1635 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1636 default:
1637 return IB_QPS_ERR;
1638 }
1639}
1640
1641static u32 __from_ib_mtu(enum ib_mtu mtu)
1642{
1643 switch (mtu) {
1644 case IB_MTU_256:
1645 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1646 case IB_MTU_512:
1647 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1648 case IB_MTU_1024:
1649 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1650 case IB_MTU_2048:
1651 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1652 case IB_MTU_4096:
1653 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1654 default:
1655 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1656 }
1657}
1658
1659static enum ib_mtu __to_ib_mtu(u32 mtu)
1660{
1661 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1662 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1663 return IB_MTU_256;
1664 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1665 return IB_MTU_512;
1666 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1667 return IB_MTU_1024;
1668 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1669 return IB_MTU_2048;
1670 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1671 return IB_MTU_4096;
1672 default:
1673 return IB_MTU_2048;
1674 }
1675}
1676
1677/* Shared Receive Queues */
1678int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1679{
1680 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1681 ib_srq);
1682 struct bnxt_re_dev *rdev = srq->rdev;
1683 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1684 struct bnxt_qplib_nq *nq = NULL;
1685
1686 if (qplib_srq->cq)
1687 nq = qplib_srq->cq->nq;
1688 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1689 ib_umem_release(srq->umem);
1690 atomic_dec(&rdev->stats.res.srq_count);
1691 if (nq)
1692 nq->budget--;
1693 return 0;
1694}
1695
1696static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1697 struct bnxt_re_pd *pd,
1698 struct bnxt_re_srq *srq,
1699 struct ib_udata *udata)
1700{
1701 struct bnxt_re_srq_req ureq;
1702 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1703 struct ib_umem *umem;
1704 int bytes = 0;
1705 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1706 udata, struct bnxt_re_ucontext, ib_uctx);
1707
1708 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1709 return -EFAULT;
1710
1711 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1712 bytes = PAGE_ALIGN(bytes);
1713 umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
1714 IB_ACCESS_LOCAL_WRITE);
1715 if (IS_ERR(umem))
1716 return PTR_ERR(umem);
1717
1718 srq->umem = umem;
1719 qplib_srq->sg_info.umem = umem;
1720 qplib_srq->sg_info.pgsize = PAGE_SIZE;
1721 qplib_srq->sg_info.pgshft = PAGE_SHIFT;
1722 qplib_srq->srq_handle = ureq.srq_handle;
1723 qplib_srq->dpi = &cntx->dpi;
1724
1725 return 0;
1726}
1727
1728int bnxt_re_create_srq(struct ib_srq *ib_srq,
1729 struct ib_srq_init_attr *srq_init_attr,
1730 struct ib_udata *udata)
1731{
1732 struct bnxt_qplib_dev_attr *dev_attr;
1733 struct bnxt_qplib_nq *nq = NULL;
1734 struct bnxt_re_ucontext *uctx;
1735 struct bnxt_re_dev *rdev;
1736 struct bnxt_re_srq *srq;
1737 struct bnxt_re_pd *pd;
1738 struct ib_pd *ib_pd;
1739 u32 active_srqs;
1740 int rc, entries;
1741
1742 ib_pd = ib_srq->pd;
1743 pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1744 rdev = pd->rdev;
1745 dev_attr = &rdev->dev_attr;
1746 srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1747
1748 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1749 ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
1750 rc = -EINVAL;
1751 goto exit;
1752 }
1753
1754 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1755 rc = -EOPNOTSUPP;
1756 goto exit;
1757 }
1758
1759 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
1760 srq->rdev = rdev;
1761 srq->qplib_srq.pd = &pd->qplib_pd;
1762 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1763 /* Allocate 1 more than what's provided so posting max doesn't
1764 * mean empty
1765 */
1766 entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
1767 if (entries > dev_attr->max_srq_wqes + 1)
1768 entries = dev_attr->max_srq_wqes + 1;
1769 srq->qplib_srq.max_wqe = entries;
1770
1771 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1772 /* 128 byte wqe size for SRQ . So use max sges */
1773 srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size(dev_attr->max_srq_sges);
1774 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1775 srq->srq_limit = srq_init_attr->attr.srq_limit;
1776 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1777 nq = &rdev->nq[0];
1778
1779 if (udata) {
1780 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1781 if (rc)
1782 goto fail;
1783 }
1784
1785 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1786 if (rc) {
1787 ibdev_err(&rdev->ibdev, "Create HW SRQ failed!");
1788 goto fail;
1789 }
1790
1791 if (udata) {
1792 struct bnxt_re_srq_resp resp;
1793
1794 resp.srqid = srq->qplib_srq.id;
1795 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1796 if (rc) {
1797 ibdev_err(&rdev->ibdev, "SRQ copy to udata failed!");
1798 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1799 &srq->qplib_srq);
1800 goto fail;
1801 }
1802 }
1803 if (nq)
1804 nq->budget++;
1805 active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
1806 if (active_srqs > rdev->stats.res.srq_watermark)
1807 rdev->stats.res.srq_watermark = active_srqs;
1808 spin_lock_init(&srq->lock);
1809
1810 return 0;
1811
1812fail:
1813 ib_umem_release(srq->umem);
1814exit:
1815 return rc;
1816}
1817
1818int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1819 enum ib_srq_attr_mask srq_attr_mask,
1820 struct ib_udata *udata)
1821{
1822 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1823 ib_srq);
1824 struct bnxt_re_dev *rdev = srq->rdev;
1825 int rc;
1826
1827 switch (srq_attr_mask) {
1828 case IB_SRQ_MAX_WR:
1829 /* SRQ resize is not supported */
1830 return -EINVAL;
1831 case IB_SRQ_LIMIT:
1832 /* Change the SRQ threshold */
1833 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1834 return -EINVAL;
1835
1836 srq->qplib_srq.threshold = srq_attr->srq_limit;
1837 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1838 if (rc) {
1839 ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
1840 return rc;
1841 }
1842 /* On success, update the shadow */
1843 srq->srq_limit = srq_attr->srq_limit;
1844 /* No need to Build and send response back to udata */
1845 return 0;
1846 default:
1847 ibdev_err(&rdev->ibdev,
1848 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1849 return -EINVAL;
1850 }
1851}
1852
1853int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1854{
1855 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1856 ib_srq);
1857 struct bnxt_re_srq tsrq;
1858 struct bnxt_re_dev *rdev = srq->rdev;
1859 int rc;
1860
1861 /* Get live SRQ attr */
1862 tsrq.qplib_srq.id = srq->qplib_srq.id;
1863 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1864 if (rc) {
1865 ibdev_err(&rdev->ibdev, "Query HW SRQ failed!");
1866 return rc;
1867 }
1868 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1869 srq_attr->max_sge = srq->qplib_srq.max_sge;
1870 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1871
1872 return 0;
1873}
1874
1875int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1876 const struct ib_recv_wr **bad_wr)
1877{
1878 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1879 ib_srq);
1880 struct bnxt_qplib_swqe wqe;
1881 unsigned long flags;
1882 int rc = 0;
1883
1884 spin_lock_irqsave(&srq->lock, flags);
1885 while (wr) {
1886 /* Transcribe each ib_recv_wr to qplib_swqe */
1887 wqe.num_sge = wr->num_sge;
1888 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1889 wqe.wr_id = wr->wr_id;
1890 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1891
1892 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1893 if (rc) {
1894 *bad_wr = wr;
1895 break;
1896 }
1897 wr = wr->next;
1898 }
1899 spin_unlock_irqrestore(&srq->lock, flags);
1900
1901 return rc;
1902}
1903static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1904 struct bnxt_re_qp *qp1_qp,
1905 int qp_attr_mask)
1906{
1907 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
1908 int rc;
1909
1910 if (qp_attr_mask & IB_QP_STATE) {
1911 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1912 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1913 }
1914 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1915 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1916 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1917 }
1918
1919 if (qp_attr_mask & IB_QP_QKEY) {
1920 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1921 /* Using a Random QKEY */
1922 qp->qplib_qp.qkey = 0x81818181;
1923 }
1924 if (qp_attr_mask & IB_QP_SQ_PSN) {
1925 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1926 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1927 }
1928
1929 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1930 if (rc)
1931 ibdev_err(&rdev->ibdev, "Failed to modify Shadow QP for QP1");
1932 return rc;
1933}
1934
1935int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1936 int qp_attr_mask, struct ib_udata *udata)
1937{
1938 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1939 struct bnxt_re_dev *rdev = qp->rdev;
1940 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1941 enum ib_qp_state curr_qp_state, new_qp_state;
1942 int rc, entries;
1943 unsigned int flags;
1944 u8 nw_type;
1945
1946 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1947 return -EOPNOTSUPP;
1948
1949 qp->qplib_qp.modify_flags = 0;
1950 if (qp_attr_mask & IB_QP_STATE) {
1951 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1952 new_qp_state = qp_attr->qp_state;
1953 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1954 ib_qp->qp_type, qp_attr_mask)) {
1955 ibdev_err(&rdev->ibdev,
1956 "Invalid attribute mask: %#x specified ",
1957 qp_attr_mask);
1958 ibdev_err(&rdev->ibdev,
1959 "for qpn: %#x type: %#x",
1960 ib_qp->qp_num, ib_qp->qp_type);
1961 ibdev_err(&rdev->ibdev,
1962 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1963 curr_qp_state, new_qp_state);
1964 return -EINVAL;
1965 }
1966 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1967 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1968
1969 if (!qp->sumem &&
1970 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1971 ibdev_dbg(&rdev->ibdev,
1972 "Move QP = %p to flush list\n", qp);
1973 flags = bnxt_re_lock_cqs(qp);
1974 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1975 bnxt_re_unlock_cqs(qp, flags);
1976 }
1977 if (!qp->sumem &&
1978 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1979 ibdev_dbg(&rdev->ibdev,
1980 "Move QP = %p out of flush list\n", qp);
1981 flags = bnxt_re_lock_cqs(qp);
1982 bnxt_qplib_clean_qp(&qp->qplib_qp);
1983 bnxt_re_unlock_cqs(qp, flags);
1984 }
1985 }
1986 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1987 qp->qplib_qp.modify_flags |=
1988 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1989 qp->qplib_qp.en_sqd_async_notify = true;
1990 }
1991 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1992 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1993 qp->qplib_qp.access =
1994 __from_ib_access_flags(qp_attr->qp_access_flags);
1995 /* LOCAL_WRITE access must be set to allow RC receive */
1996 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1997 /* Temp: Set all params on QP as of now */
1998 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1999 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2000 }
2001 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
2002 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2003 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2004 }
2005 if (qp_attr_mask & IB_QP_QKEY) {
2006 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2007 qp->qplib_qp.qkey = qp_attr->qkey;
2008 }
2009 if (qp_attr_mask & IB_QP_AV) {
2010 const struct ib_global_route *grh =
2011 rdma_ah_read_grh(&qp_attr->ah_attr);
2012 const struct ib_gid_attr *sgid_attr;
2013 struct bnxt_re_gid_ctx *ctx;
2014
2015 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2016 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
2017 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
2018 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
2019 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
2020 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
2021 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
2022 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
2023 sizeof(qp->qplib_qp.ah.dgid.data));
2024 qp->qplib_qp.ah.flow_label = grh->flow_label;
2025 sgid_attr = grh->sgid_attr;
2026 /* Get the HW context of the GID. The reference
2027 * of GID table entry is already taken by the caller.
2028 */
2029 ctx = rdma_read_gid_hw_context(sgid_attr);
2030 qp->qplib_qp.ah.sgid_index = ctx->idx;
2031 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
2032 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
2033 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
2034 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
2035 ether_addr_copy(qp->qplib_qp.ah.dmac,
2036 qp_attr->ah_attr.roce.dmac);
2037
2038 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
2039 &qp->qplib_qp.smac[0]);
2040 if (rc)
2041 return rc;
2042
2043 nw_type = rdma_gid_attr_network_type(sgid_attr);
2044 switch (nw_type) {
2045 case RDMA_NETWORK_IPV4:
2046 qp->qplib_qp.nw_type =
2047 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
2048 break;
2049 case RDMA_NETWORK_IPV6:
2050 qp->qplib_qp.nw_type =
2051 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
2052 break;
2053 default:
2054 qp->qplib_qp.nw_type =
2055 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
2056 break;
2057 }
2058 }
2059
2060 if (qp_attr_mask & IB_QP_PATH_MTU) {
2061 qp->qplib_qp.modify_flags |=
2062 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2063 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2064 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2065 } else if (qp_attr->qp_state == IB_QPS_RTR) {
2066 qp->qplib_qp.modify_flags |=
2067 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2068 qp->qplib_qp.path_mtu =
2069 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2070 qp->qplib_qp.mtu =
2071 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2072 }
2073
2074 if (qp_attr_mask & IB_QP_TIMEOUT) {
2075 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2076 qp->qplib_qp.timeout = qp_attr->timeout;
2077 }
2078 if (qp_attr_mask & IB_QP_RETRY_CNT) {
2079 qp->qplib_qp.modify_flags |=
2080 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
2081 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2082 }
2083 if (qp_attr_mask & IB_QP_RNR_RETRY) {
2084 qp->qplib_qp.modify_flags |=
2085 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
2086 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2087 }
2088 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
2089 qp->qplib_qp.modify_flags |=
2090 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
2091 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2092 }
2093 if (qp_attr_mask & IB_QP_RQ_PSN) {
2094 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2095 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2096 }
2097 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2098 qp->qplib_qp.modify_flags |=
2099 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
2100 /* Cap the max_rd_atomic to device max */
2101 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2102 dev_attr->max_qp_rd_atom);
2103 }
2104 if (qp_attr_mask & IB_QP_SQ_PSN) {
2105 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2106 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2107 }
2108 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2109 if (qp_attr->max_dest_rd_atomic >
2110 dev_attr->max_qp_init_rd_atom) {
2111 ibdev_err(&rdev->ibdev,
2112 "max_dest_rd_atomic requested%d is > dev_max%d",
2113 qp_attr->max_dest_rd_atomic,
2114 dev_attr->max_qp_init_rd_atom);
2115 return -EINVAL;
2116 }
2117
2118 qp->qplib_qp.modify_flags |=
2119 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
2120 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2121 }
2122 if (qp_attr_mask & IB_QP_CAP) {
2123 struct bnxt_re_ucontext *uctx =
2124 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2125
2126 qp->qplib_qp.modify_flags |=
2127 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
2128 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
2129 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
2130 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
2131 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
2132 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2133 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2134 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2135 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2136 (qp_attr->cap.max_inline_data >=
2137 dev_attr->max_inline_data)) {
2138 ibdev_err(&rdev->ibdev,
2139 "Create QP failed - max exceeded");
2140 return -EINVAL;
2141 }
2142 entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
2143 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
2144 dev_attr->max_qp_wqes + 1);
2145 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2146 qp_attr->cap.max_send_wr;
2147 /*
2148 * Reserving one slot for Phantom WQE. Some application can
2149 * post one extra entry in this case. Allowing this to avoid
2150 * unexpected Queue full condition
2151 */
2152 qp->qplib_qp.sq.q_full_delta -= 1;
2153 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2154 if (qp->qplib_qp.rq.max_wqe) {
2155 entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
2156 qp->qplib_qp.rq.max_wqe =
2157 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
2158 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2159 qp_attr->cap.max_recv_wr;
2160 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2161 } else {
2162 /* SRQ was used prior, just ignore the RQ caps */
2163 }
2164 }
2165 if (qp_attr_mask & IB_QP_DEST_QPN) {
2166 qp->qplib_qp.modify_flags |=
2167 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
2168 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2169 }
2170 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2171 if (rc) {
2172 ibdev_err(&rdev->ibdev, "Failed to modify HW QP");
2173 return rc;
2174 }
2175 if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
2176 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2177 return rc;
2178}
2179
2180int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2181 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
2182{
2183 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2184 struct bnxt_re_dev *rdev = qp->rdev;
2185 struct bnxt_qplib_qp *qplib_qp;
2186 int rc;
2187
2188 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
2189 if (!qplib_qp)
2190 return -ENOMEM;
2191
2192 qplib_qp->id = qp->qplib_qp.id;
2193 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2194
2195 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2196 if (rc) {
2197 ibdev_err(&rdev->ibdev, "Failed to query HW QP");
2198 goto out;
2199 }
2200 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2201 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2202 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2203 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2204 qp_attr->pkey_index = qplib_qp->pkey_index;
2205 qp_attr->qkey = qplib_qp->qkey;
2206 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
2207 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
2208 qplib_qp->ah.host_sgid_index,
2209 qplib_qp->ah.hop_limit,
2210 qplib_qp->ah.traffic_class);
2211 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
2212 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
2213 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
2214 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2215 qp_attr->timeout = qplib_qp->timeout;
2216 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2217 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2218 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2219 qp_attr->rq_psn = qplib_qp->rq.psn;
2220 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2221 qp_attr->sq_psn = qplib_qp->sq.psn;
2222 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2223 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2224 IB_SIGNAL_REQ_WR;
2225 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2226
2227 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2228 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2229 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2230 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2231 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2232 qp_init_attr->cap = qp_attr->cap;
2233
2234out:
2235 kfree(qplib_qp);
2236 return rc;
2237}
2238
2239/* Routine for sending QP1 packets for RoCE V1 an V2
2240 */
2241static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
2242 const struct ib_send_wr *wr,
2243 struct bnxt_qplib_swqe *wqe,
2244 int payload_size)
2245{
2246 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
2247 ib_ah);
2248 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2249 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
2250 struct bnxt_qplib_sge sge;
2251 u8 nw_type;
2252 u16 ether_type;
2253 union ib_gid dgid;
2254 bool is_eth = false;
2255 bool is_vlan = false;
2256 bool is_grh = false;
2257 bool is_udp = false;
2258 u8 ip_version = 0;
2259 u16 vlan_id = 0xFFFF;
2260 void *buf;
2261 int i, rc;
2262
2263 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2264
2265 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
2266 if (rc)
2267 return rc;
2268
2269 /* Get network header type for this GID */
2270 nw_type = rdma_gid_attr_network_type(sgid_attr);
2271 switch (nw_type) {
2272 case RDMA_NETWORK_IPV4:
2273 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
2274 break;
2275 case RDMA_NETWORK_IPV6:
2276 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
2277 break;
2278 default:
2279 nw_type = BNXT_RE_ROCE_V1_PACKET;
2280 break;
2281 }
2282 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
2283 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
2284 if (is_udp) {
2285 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
2286 ip_version = 4;
2287 ether_type = ETH_P_IP;
2288 } else {
2289 ip_version = 6;
2290 ether_type = ETH_P_IPV6;
2291 }
2292 is_grh = false;
2293 } else {
2294 ether_type = ETH_P_IBOE;
2295 is_grh = true;
2296 }
2297
2298 is_eth = true;
2299 is_vlan = vlan_id && (vlan_id < 0x1000);
2300
2301 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
2302 ip_version, is_udp, 0, &qp->qp1_hdr);
2303
2304 /* ETH */
2305 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
2306 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
2307
2308 /* For vlan, check the sgid for vlan existence */
2309
2310 if (!is_vlan) {
2311 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
2312 } else {
2313 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
2314 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
2315 }
2316
2317 if (is_grh || (ip_version == 6)) {
2318 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
2319 sizeof(sgid_attr->gid));
2320 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2321 sizeof(sgid_attr->gid));
2322 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
2323 }
2324
2325 if (ip_version == 4) {
2326 qp->qp1_hdr.ip4.tos = 0;
2327 qp->qp1_hdr.ip4.id = 0;
2328 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
2329 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
2330
2331 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
2332 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
2333 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
2334 }
2335
2336 if (is_udp) {
2337 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
2338 qp->qp1_hdr.udp.sport = htons(0x8CD1);
2339 qp->qp1_hdr.udp.csum = 0;
2340 }
2341
2342 /* BTH */
2343 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2344 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2345 qp->qp1_hdr.immediate_present = 1;
2346 } else {
2347 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2348 }
2349 if (wr->send_flags & IB_SEND_SOLICITED)
2350 qp->qp1_hdr.bth.solicited_event = 1;
2351 /* pad_count */
2352 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2353
2354 /* P_key for QP1 is for all members */
2355 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2356 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2357 qp->qp1_hdr.bth.ack_req = 0;
2358 qp->send_psn++;
2359 qp->send_psn &= BTH_PSN_MASK;
2360 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2361 /* DETH */
2362 /* Use the priviledged Q_Key for QP1 */
2363 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2364 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2365
2366 /* Pack the QP1 to the transmit buffer */
2367 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
2368 if (buf) {
2369 ib_ud_header_pack(&qp->qp1_hdr, buf);
2370 for (i = wqe->num_sge; i; i--) {
2371 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
2372 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
2373 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
2374 }
2375
2376 /*
2377 * Max Header buf size for IPV6 RoCE V2 is 86,
2378 * which is same as the QP1 SQ header buffer.
2379 * Header buf size for IPV4 RoCE V2 can be 66.
2380 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
2381 * Subtract 20 bytes from QP1 SQ header buf size
2382 */
2383 if (is_udp && ip_version == 4)
2384 sge.size -= 20;
2385 /*
2386 * Max Header buf size for RoCE V1 is 78.
2387 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
2388 * Subtract 8 bytes from QP1 SQ header buf size
2389 */
2390 if (!is_udp)
2391 sge.size -= 8;
2392
2393 /* Subtract 4 bytes for non vlan packets */
2394 if (!is_vlan)
2395 sge.size -= 4;
2396
2397 wqe->sg_list[0].addr = sge.addr;
2398 wqe->sg_list[0].lkey = sge.lkey;
2399 wqe->sg_list[0].size = sge.size;
2400 wqe->num_sge++;
2401
2402 } else {
2403 ibdev_err(&qp->rdev->ibdev, "QP1 buffer is empty!");
2404 rc = -ENOMEM;
2405 }
2406 return rc;
2407}
2408
2409/* For the MAD layer, it only provides the recv SGE the size of
2410 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2411 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2412 * receive packet (334 bytes) with no VLAN and then copy the GRH
2413 * and the MAD datagram out to the provided SGE.
2414 */
2415static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2416 const struct ib_recv_wr *wr,
2417 struct bnxt_qplib_swqe *wqe,
2418 int payload_size)
2419{
2420 struct bnxt_re_sqp_entries *sqp_entry;
2421 struct bnxt_qplib_sge ref, sge;
2422 struct bnxt_re_dev *rdev;
2423 u32 rq_prod_index;
2424
2425 rdev = qp->rdev;
2426
2427 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2428
2429 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2430 return -ENOMEM;
2431
2432 /* Create 1 SGE to receive the entire
2433 * ethernet packet
2434 */
2435 /* Save the reference from ULP */
2436 ref.addr = wqe->sg_list[0].addr;
2437 ref.lkey = wqe->sg_list[0].lkey;
2438 ref.size = wqe->sg_list[0].size;
2439
2440 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
2441
2442 /* SGE 1 */
2443 wqe->sg_list[0].addr = sge.addr;
2444 wqe->sg_list[0].lkey = sge.lkey;
2445 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2446 sge.size -= wqe->sg_list[0].size;
2447
2448 sqp_entry->sge.addr = ref.addr;
2449 sqp_entry->sge.lkey = ref.lkey;
2450 sqp_entry->sge.size = ref.size;
2451 /* Store the wrid for reporting completion */
2452 sqp_entry->wrid = wqe->wr_id;
2453 /* change the wqe->wrid to table index */
2454 wqe->wr_id = rq_prod_index;
2455 return 0;
2456}
2457
2458static int is_ud_qp(struct bnxt_re_qp *qp)
2459{
2460 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2461 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2462}
2463
2464static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2465 const struct ib_send_wr *wr,
2466 struct bnxt_qplib_swqe *wqe)
2467{
2468 struct bnxt_re_ah *ah = NULL;
2469
2470 if (is_ud_qp(qp)) {
2471 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2472 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2473 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2474 wqe->send.avid = ah->qplib_ah.id;
2475 }
2476 switch (wr->opcode) {
2477 case IB_WR_SEND:
2478 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2479 break;
2480 case IB_WR_SEND_WITH_IMM:
2481 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2482 wqe->send.imm_data = wr->ex.imm_data;
2483 break;
2484 case IB_WR_SEND_WITH_INV:
2485 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2486 wqe->send.inv_key = wr->ex.invalidate_rkey;
2487 break;
2488 default:
2489 return -EINVAL;
2490 }
2491 if (wr->send_flags & IB_SEND_SIGNALED)
2492 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2493 if (wr->send_flags & IB_SEND_FENCE)
2494 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2495 if (wr->send_flags & IB_SEND_SOLICITED)
2496 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2497 if (wr->send_flags & IB_SEND_INLINE)
2498 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2499
2500 return 0;
2501}
2502
2503static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2504 struct bnxt_qplib_swqe *wqe)
2505{
2506 switch (wr->opcode) {
2507 case IB_WR_RDMA_WRITE:
2508 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2509 break;
2510 case IB_WR_RDMA_WRITE_WITH_IMM:
2511 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2512 wqe->rdma.imm_data = wr->ex.imm_data;
2513 break;
2514 case IB_WR_RDMA_READ:
2515 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2516 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2517 break;
2518 default:
2519 return -EINVAL;
2520 }
2521 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2522 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2523 if (wr->send_flags & IB_SEND_SIGNALED)
2524 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2525 if (wr->send_flags & IB_SEND_FENCE)
2526 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2527 if (wr->send_flags & IB_SEND_SOLICITED)
2528 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2529 if (wr->send_flags & IB_SEND_INLINE)
2530 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2531
2532 return 0;
2533}
2534
2535static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2536 struct bnxt_qplib_swqe *wqe)
2537{
2538 switch (wr->opcode) {
2539 case IB_WR_ATOMIC_CMP_AND_SWP:
2540 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2541 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2542 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2543 break;
2544 case IB_WR_ATOMIC_FETCH_AND_ADD:
2545 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2546 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2547 break;
2548 default:
2549 return -EINVAL;
2550 }
2551 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2552 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2553 if (wr->send_flags & IB_SEND_SIGNALED)
2554 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2555 if (wr->send_flags & IB_SEND_FENCE)
2556 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2557 if (wr->send_flags & IB_SEND_SOLICITED)
2558 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2559 return 0;
2560}
2561
2562static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2563 struct bnxt_qplib_swqe *wqe)
2564{
2565 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2566 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2567
2568 if (wr->send_flags & IB_SEND_SIGNALED)
2569 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2570 if (wr->send_flags & IB_SEND_SOLICITED)
2571 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2572
2573 return 0;
2574}
2575
2576static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2577 struct bnxt_qplib_swqe *wqe)
2578{
2579 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2580 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2581 int access = wr->access;
2582
2583 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2584 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2585 wqe->frmr.page_list = mr->pages;
2586 wqe->frmr.page_list_len = mr->npages;
2587 wqe->frmr.levels = qplib_frpl->hwq.level;
2588 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2589
2590 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2591 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2592
2593 if (access & IB_ACCESS_LOCAL_WRITE)
2594 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2595 if (access & IB_ACCESS_REMOTE_READ)
2596 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2597 if (access & IB_ACCESS_REMOTE_WRITE)
2598 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2599 if (access & IB_ACCESS_REMOTE_ATOMIC)
2600 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2601 if (access & IB_ACCESS_MW_BIND)
2602 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2603
2604 wqe->frmr.l_key = wr->key;
2605 wqe->frmr.length = wr->mr->length;
2606 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
2607 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
2608 wqe->frmr.va = wr->mr->iova;
2609 return 0;
2610}
2611
2612static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2613 const struct ib_send_wr *wr,
2614 struct bnxt_qplib_swqe *wqe)
2615{
2616 /* Copy the inline data to the data field */
2617 u8 *in_data;
2618 u32 i, sge_len;
2619 void *sge_addr;
2620
2621 in_data = wqe->inline_data;
2622 for (i = 0; i < wr->num_sge; i++) {
2623 sge_addr = (void *)(unsigned long)
2624 wr->sg_list[i].addr;
2625 sge_len = wr->sg_list[i].length;
2626
2627 if ((sge_len + wqe->inline_len) >
2628 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2629 ibdev_err(&rdev->ibdev,
2630 "Inline data size requested > supported value");
2631 return -EINVAL;
2632 }
2633 sge_len = wr->sg_list[i].length;
2634
2635 memcpy(in_data, sge_addr, sge_len);
2636 in_data += wr->sg_list[i].length;
2637 wqe->inline_len += wr->sg_list[i].length;
2638 }
2639 return wqe->inline_len;
2640}
2641
2642static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2643 const struct ib_send_wr *wr,
2644 struct bnxt_qplib_swqe *wqe)
2645{
2646 int payload_sz = 0;
2647
2648 if (wr->send_flags & IB_SEND_INLINE)
2649 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2650 else
2651 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2652 wqe->num_sge);
2653
2654 return payload_sz;
2655}
2656
2657static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2658{
2659 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2660 qp->ib_qp.qp_type == IB_QPT_GSI ||
2661 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2662 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2663 int qp_attr_mask;
2664 struct ib_qp_attr qp_attr;
2665
2666 qp_attr_mask = IB_QP_STATE;
2667 qp_attr.qp_state = IB_QPS_RTS;
2668 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2669 qp->qplib_qp.wqe_cnt = 0;
2670 }
2671}
2672
2673static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2674 struct bnxt_re_qp *qp,
2675 const struct ib_send_wr *wr)
2676{
2677 int rc = 0, payload_sz = 0;
2678 unsigned long flags;
2679
2680 spin_lock_irqsave(&qp->sq_lock, flags);
2681 while (wr) {
2682 struct bnxt_qplib_swqe wqe = {};
2683
2684 /* Common */
2685 wqe.num_sge = wr->num_sge;
2686 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2687 ibdev_err(&rdev->ibdev,
2688 "Limit exceeded for Send SGEs");
2689 rc = -EINVAL;
2690 goto bad;
2691 }
2692
2693 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2694 if (payload_sz < 0) {
2695 rc = -EINVAL;
2696 goto bad;
2697 }
2698 wqe.wr_id = wr->wr_id;
2699
2700 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2701
2702 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2703 if (!rc)
2704 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2705bad:
2706 if (rc) {
2707 ibdev_err(&rdev->ibdev,
2708 "Post send failed opcode = %#x rc = %d",
2709 wr->opcode, rc);
2710 break;
2711 }
2712 wr = wr->next;
2713 }
2714 bnxt_qplib_post_send_db(&qp->qplib_qp);
2715 bnxt_ud_qp_hw_stall_workaround(qp);
2716 spin_unlock_irqrestore(&qp->sq_lock, flags);
2717 return rc;
2718}
2719
2720static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
2721{
2722 /* Need unconditional fence for non-wire memory opcode
2723 * to work as expected.
2724 */
2725 if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
2726 wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
2727 wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
2728 wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
2729 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2730}
2731
2732int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2733 const struct ib_send_wr **bad_wr)
2734{
2735 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2736 struct bnxt_qplib_swqe wqe;
2737 int rc = 0, payload_sz = 0;
2738 unsigned long flags;
2739
2740 spin_lock_irqsave(&qp->sq_lock, flags);
2741 while (wr) {
2742 /* House keeping */
2743 memset(&wqe, 0, sizeof(wqe));
2744
2745 /* Common */
2746 wqe.num_sge = wr->num_sge;
2747 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2748 ibdev_err(&qp->rdev->ibdev,
2749 "Limit exceeded for Send SGEs");
2750 rc = -EINVAL;
2751 goto bad;
2752 }
2753
2754 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2755 if (payload_sz < 0) {
2756 rc = -EINVAL;
2757 goto bad;
2758 }
2759 wqe.wr_id = wr->wr_id;
2760
2761 switch (wr->opcode) {
2762 case IB_WR_SEND:
2763 case IB_WR_SEND_WITH_IMM:
2764 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2765 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2766 payload_sz);
2767 if (rc)
2768 goto bad;
2769 wqe.rawqp1.lflags |=
2770 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2771 }
2772 switch (wr->send_flags) {
2773 case IB_SEND_IP_CSUM:
2774 wqe.rawqp1.lflags |=
2775 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2776 break;
2777 default:
2778 break;
2779 }
2780 fallthrough;
2781 case IB_WR_SEND_WITH_INV:
2782 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2783 break;
2784 case IB_WR_RDMA_WRITE:
2785 case IB_WR_RDMA_WRITE_WITH_IMM:
2786 case IB_WR_RDMA_READ:
2787 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2788 break;
2789 case IB_WR_ATOMIC_CMP_AND_SWP:
2790 case IB_WR_ATOMIC_FETCH_AND_ADD:
2791 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2792 break;
2793 case IB_WR_RDMA_READ_WITH_INV:
2794 ibdev_err(&qp->rdev->ibdev,
2795 "RDMA Read with Invalidate is not supported");
2796 rc = -EINVAL;
2797 goto bad;
2798 case IB_WR_LOCAL_INV:
2799 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2800 break;
2801 case IB_WR_REG_MR:
2802 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2803 break;
2804 default:
2805 /* Unsupported WRs */
2806 ibdev_err(&qp->rdev->ibdev,
2807 "WR (%#x) is not supported", wr->opcode);
2808 rc = -EINVAL;
2809 goto bad;
2810 }
2811 if (!rc) {
2812 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2813 bnxt_re_legacy_set_uc_fence(&wqe);
2814 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2815 }
2816bad:
2817 if (rc) {
2818 ibdev_err(&qp->rdev->ibdev,
2819 "post_send failed op:%#x qps = %#x rc = %d\n",
2820 wr->opcode, qp->qplib_qp.state, rc);
2821 *bad_wr = wr;
2822 break;
2823 }
2824 wr = wr->next;
2825 }
2826 bnxt_qplib_post_send_db(&qp->qplib_qp);
2827 bnxt_ud_qp_hw_stall_workaround(qp);
2828 spin_unlock_irqrestore(&qp->sq_lock, flags);
2829
2830 return rc;
2831}
2832
2833static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2834 struct bnxt_re_qp *qp,
2835 const struct ib_recv_wr *wr)
2836{
2837 struct bnxt_qplib_swqe wqe;
2838 int rc = 0;
2839
2840 while (wr) {
2841 /* House keeping */
2842 memset(&wqe, 0, sizeof(wqe));
2843
2844 /* Common */
2845 wqe.num_sge = wr->num_sge;
2846 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2847 ibdev_err(&rdev->ibdev,
2848 "Limit exceeded for Receive SGEs");
2849 rc = -EINVAL;
2850 break;
2851 }
2852 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2853 wqe.wr_id = wr->wr_id;
2854 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2855
2856 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2857 if (rc)
2858 break;
2859
2860 wr = wr->next;
2861 }
2862 if (!rc)
2863 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2864 return rc;
2865}
2866
2867int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2868 const struct ib_recv_wr **bad_wr)
2869{
2870 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2871 struct bnxt_qplib_swqe wqe;
2872 int rc = 0, payload_sz = 0;
2873 unsigned long flags;
2874 u32 count = 0;
2875
2876 spin_lock_irqsave(&qp->rq_lock, flags);
2877 while (wr) {
2878 /* House keeping */
2879 memset(&wqe, 0, sizeof(wqe));
2880
2881 /* Common */
2882 wqe.num_sge = wr->num_sge;
2883 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2884 ibdev_err(&qp->rdev->ibdev,
2885 "Limit exceeded for Receive SGEs");
2886 rc = -EINVAL;
2887 *bad_wr = wr;
2888 break;
2889 }
2890
2891 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2892 wr->num_sge);
2893 wqe.wr_id = wr->wr_id;
2894 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2895
2896 if (ib_qp->qp_type == IB_QPT_GSI &&
2897 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2898 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2899 payload_sz);
2900 if (!rc)
2901 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2902 if (rc) {
2903 *bad_wr = wr;
2904 break;
2905 }
2906
2907 /* Ring DB if the RQEs posted reaches a threshold value */
2908 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2909 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2910 count = 0;
2911 }
2912
2913 wr = wr->next;
2914 }
2915
2916 if (count)
2917 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2918
2919 spin_unlock_irqrestore(&qp->rq_lock, flags);
2920
2921 return rc;
2922}
2923
2924/* Completion Queues */
2925int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2926{
2927 struct bnxt_qplib_chip_ctx *cctx;
2928 struct bnxt_qplib_nq *nq;
2929 struct bnxt_re_dev *rdev;
2930 struct bnxt_re_cq *cq;
2931
2932 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2933 rdev = cq->rdev;
2934 nq = cq->qplib_cq.nq;
2935 cctx = rdev->chip_ctx;
2936
2937 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
2938 free_page((unsigned long)cq->uctx_cq_page);
2939 hash_del(&cq->hash_entry);
2940 }
2941 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2942 ib_umem_release(cq->umem);
2943
2944 atomic_dec(&rdev->stats.res.cq_count);
2945 nq->budget--;
2946 kfree(cq->cql);
2947 return 0;
2948}
2949
2950int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2951 struct ib_udata *udata)
2952{
2953 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2954 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2955 struct bnxt_re_ucontext *uctx =
2956 rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
2957 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2958 struct bnxt_qplib_chip_ctx *cctx;
2959 struct bnxt_qplib_nq *nq = NULL;
2960 unsigned int nq_alloc_cnt;
2961 int cqe = attr->cqe;
2962 int rc, entries;
2963 u32 active_cqs;
2964
2965 if (attr->flags)
2966 return -EOPNOTSUPP;
2967
2968 /* Validate CQ fields */
2969 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2970 ibdev_err(&rdev->ibdev, "Failed to create CQ -max exceeded");
2971 return -EINVAL;
2972 }
2973
2974 cq->rdev = rdev;
2975 cctx = rdev->chip_ctx;
2976 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2977
2978 entries = bnxt_re_init_depth(cqe + 1, uctx);
2979 if (entries > dev_attr->max_cq_wqes + 1)
2980 entries = dev_attr->max_cq_wqes + 1;
2981
2982 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
2983 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
2984 if (udata) {
2985 struct bnxt_re_cq_req req;
2986 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2987 rc = -EFAULT;
2988 goto fail;
2989 }
2990
2991 cq->umem = ib_umem_get(&rdev->ibdev, req.cq_va,
2992 entries * sizeof(struct cq_base),
2993 IB_ACCESS_LOCAL_WRITE);
2994 if (IS_ERR(cq->umem)) {
2995 rc = PTR_ERR(cq->umem);
2996 goto fail;
2997 }
2998 cq->qplib_cq.sg_info.umem = cq->umem;
2999 cq->qplib_cq.dpi = &uctx->dpi;
3000 } else {
3001 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
3002 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3003 GFP_KERNEL);
3004 if (!cq->cql) {
3005 rc = -ENOMEM;
3006 goto fail;
3007 }
3008
3009 cq->qplib_cq.dpi = &rdev->dpi_privileged;
3010 }
3011 /*
3012 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
3013 * used for getting the NQ index.
3014 */
3015 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
3016 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
3017 cq->qplib_cq.max_wqe = entries;
3018 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
3019 cq->qplib_cq.nq = nq;
3020
3021 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
3022 if (rc) {
3023 ibdev_err(&rdev->ibdev, "Failed to create HW CQ");
3024 goto fail;
3025 }
3026
3027 cq->ib_cq.cqe = entries;
3028 cq->cq_period = cq->qplib_cq.period;
3029 nq->budget++;
3030
3031 active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
3032 if (active_cqs > rdev->stats.res.cq_watermark)
3033 rdev->stats.res.cq_watermark = active_cqs;
3034 spin_lock_init(&cq->cq_lock);
3035
3036 if (udata) {
3037 struct bnxt_re_cq_resp resp = {};
3038
3039 if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
3040 hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
3041 /* Allocate a page */
3042 cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
3043 if (!cq->uctx_cq_page) {
3044 rc = -ENOMEM;
3045 goto c2fail;
3046 }
3047 resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
3048 }
3049 resp.cqid = cq->qplib_cq.id;
3050 resp.tail = cq->qplib_cq.hwq.cons;
3051 resp.phase = cq->qplib_cq.period;
3052 resp.rsvd = 0;
3053 rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
3054 if (rc) {
3055 ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
3056 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3057 goto free_mem;
3058 }
3059 }
3060
3061 return 0;
3062
3063free_mem:
3064 free_page((unsigned long)cq->uctx_cq_page);
3065c2fail:
3066 ib_umem_release(cq->umem);
3067fail:
3068 kfree(cq->cql);
3069 return rc;
3070}
3071
3072static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
3073{
3074 struct bnxt_re_dev *rdev = cq->rdev;
3075
3076 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3077
3078 cq->qplib_cq.max_wqe = cq->resize_cqe;
3079 if (cq->resize_umem) {
3080 ib_umem_release(cq->umem);
3081 cq->umem = cq->resize_umem;
3082 cq->resize_umem = NULL;
3083 cq->resize_cqe = 0;
3084 }
3085}
3086
3087int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
3088{
3089 struct bnxt_qplib_sg_info sg_info = {};
3090 struct bnxt_qplib_dpi *orig_dpi = NULL;
3091 struct bnxt_qplib_dev_attr *dev_attr;
3092 struct bnxt_re_ucontext *uctx = NULL;
3093 struct bnxt_re_resize_cq_req req;
3094 struct bnxt_re_dev *rdev;
3095 struct bnxt_re_cq *cq;
3096 int rc, entries;
3097
3098 cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
3099 rdev = cq->rdev;
3100 dev_attr = &rdev->dev_attr;
3101 if (!ibcq->uobject) {
3102 ibdev_err(&rdev->ibdev, "Kernel CQ Resize not supported");
3103 return -EOPNOTSUPP;
3104 }
3105
3106 if (cq->resize_umem) {
3107 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - Busy",
3108 cq->qplib_cq.id);
3109 return -EBUSY;
3110 }
3111
3112 /* Check the requested cq depth out of supported depth */
3113 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3114 ibdev_err(&rdev->ibdev, "Resize CQ %#x failed - out of range cqe %d",
3115 cq->qplib_cq.id, cqe);
3116 return -EINVAL;
3117 }
3118
3119 uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
3120 entries = bnxt_re_init_depth(cqe + 1, uctx);
3121 if (entries > dev_attr->max_cq_wqes + 1)
3122 entries = dev_attr->max_cq_wqes + 1;
3123
3124 /* uverbs consumer */
3125 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
3126 rc = -EFAULT;
3127 goto fail;
3128 }
3129
3130 cq->resize_umem = ib_umem_get(&rdev->ibdev, req.cq_va,
3131 entries * sizeof(struct cq_base),
3132 IB_ACCESS_LOCAL_WRITE);
3133 if (IS_ERR(cq->resize_umem)) {
3134 rc = PTR_ERR(cq->resize_umem);
3135 cq->resize_umem = NULL;
3136 ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
3137 __func__, rc);
3138 goto fail;
3139 }
3140 cq->resize_cqe = entries;
3141 memcpy(&sg_info, &cq->qplib_cq.sg_info, sizeof(sg_info));
3142 orig_dpi = cq->qplib_cq.dpi;
3143
3144 cq->qplib_cq.sg_info.umem = cq->resize_umem;
3145 cq->qplib_cq.sg_info.pgsize = PAGE_SIZE;
3146 cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
3147 cq->qplib_cq.dpi = &uctx->dpi;
3148
3149 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
3150 if (rc) {
3151 ibdev_err(&rdev->ibdev, "Resize HW CQ %#x failed!",
3152 cq->qplib_cq.id);
3153 goto fail;
3154 }
3155
3156 cq->ib_cq.cqe = cq->resize_cqe;
3157 atomic_inc(&rdev->stats.res.resize_count);
3158
3159 return 0;
3160
3161fail:
3162 if (cq->resize_umem) {
3163 ib_umem_release(cq->resize_umem);
3164 cq->resize_umem = NULL;
3165 cq->resize_cqe = 0;
3166 memcpy(&cq->qplib_cq.sg_info, &sg_info, sizeof(sg_info));
3167 cq->qplib_cq.dpi = orig_dpi;
3168 }
3169 return rc;
3170}
3171
3172static u8 __req_to_ib_wc_status(u8 qstatus)
3173{
3174 switch (qstatus) {
3175 case CQ_REQ_STATUS_OK:
3176 return IB_WC_SUCCESS;
3177 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
3178 return IB_WC_BAD_RESP_ERR;
3179 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
3180 return IB_WC_LOC_LEN_ERR;
3181 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
3182 return IB_WC_LOC_QP_OP_ERR;
3183 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
3184 return IB_WC_LOC_PROT_ERR;
3185 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
3186 return IB_WC_GENERAL_ERR;
3187 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
3188 return IB_WC_REM_INV_REQ_ERR;
3189 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
3190 return IB_WC_REM_ACCESS_ERR;
3191 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
3192 return IB_WC_REM_OP_ERR;
3193 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
3194 return IB_WC_RNR_RETRY_EXC_ERR;
3195 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
3196 return IB_WC_RETRY_EXC_ERR;
3197 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
3198 return IB_WC_WR_FLUSH_ERR;
3199 default:
3200 return IB_WC_GENERAL_ERR;
3201 }
3202 return 0;
3203}
3204
3205static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
3206{
3207 switch (qstatus) {
3208 case CQ_RES_RAWETH_QP1_STATUS_OK:
3209 return IB_WC_SUCCESS;
3210 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
3211 return IB_WC_LOC_ACCESS_ERR;
3212 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
3213 return IB_WC_LOC_LEN_ERR;
3214 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
3215 return IB_WC_LOC_PROT_ERR;
3216 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
3217 return IB_WC_LOC_QP_OP_ERR;
3218 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
3219 return IB_WC_GENERAL_ERR;
3220 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
3221 return IB_WC_WR_FLUSH_ERR;
3222 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
3223 return IB_WC_WR_FLUSH_ERR;
3224 default:
3225 return IB_WC_GENERAL_ERR;
3226 }
3227}
3228
3229static u8 __rc_to_ib_wc_status(u8 qstatus)
3230{
3231 switch (qstatus) {
3232 case CQ_RES_RC_STATUS_OK:
3233 return IB_WC_SUCCESS;
3234 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
3235 return IB_WC_LOC_ACCESS_ERR;
3236 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
3237 return IB_WC_LOC_LEN_ERR;
3238 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
3239 return IB_WC_LOC_PROT_ERR;
3240 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
3241 return IB_WC_LOC_QP_OP_ERR;
3242 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
3243 return IB_WC_GENERAL_ERR;
3244 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
3245 return IB_WC_REM_INV_REQ_ERR;
3246 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
3247 return IB_WC_WR_FLUSH_ERR;
3248 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
3249 return IB_WC_WR_FLUSH_ERR;
3250 default:
3251 return IB_WC_GENERAL_ERR;
3252 }
3253}
3254
3255static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
3256{
3257 switch (cqe->type) {
3258 case BNXT_QPLIB_SWQE_TYPE_SEND:
3259 wc->opcode = IB_WC_SEND;
3260 break;
3261 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
3262 wc->opcode = IB_WC_SEND;
3263 wc->wc_flags |= IB_WC_WITH_IMM;
3264 break;
3265 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
3266 wc->opcode = IB_WC_SEND;
3267 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3268 break;
3269 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
3270 wc->opcode = IB_WC_RDMA_WRITE;
3271 break;
3272 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
3273 wc->opcode = IB_WC_RDMA_WRITE;
3274 wc->wc_flags |= IB_WC_WITH_IMM;
3275 break;
3276 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
3277 wc->opcode = IB_WC_RDMA_READ;
3278 break;
3279 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
3280 wc->opcode = IB_WC_COMP_SWAP;
3281 break;
3282 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
3283 wc->opcode = IB_WC_FETCH_ADD;
3284 break;
3285 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
3286 wc->opcode = IB_WC_LOCAL_INV;
3287 break;
3288 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
3289 wc->opcode = IB_WC_REG_MR;
3290 break;
3291 default:
3292 wc->opcode = IB_WC_SEND;
3293 break;
3294 }
3295
3296 wc->status = __req_to_ib_wc_status(cqe->status);
3297}
3298
3299static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
3300 u16 raweth_qp1_flags2)
3301{
3302 bool is_ipv6 = false, is_ipv4 = false;
3303
3304 /* raweth_qp1_flags Bit 9-6 indicates itype */
3305 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3306 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
3307 return -1;
3308
3309 if (raweth_qp1_flags2 &
3310 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
3311 raweth_qp1_flags2 &
3312 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
3313 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
3314 (raweth_qp1_flags2 &
3315 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
3316 (is_ipv6 = true) : (is_ipv4 = true);
3317 return ((is_ipv6) ?
3318 BNXT_RE_ROCEV2_IPV6_PACKET :
3319 BNXT_RE_ROCEV2_IPV4_PACKET);
3320 } else {
3321 return BNXT_RE_ROCE_V1_PACKET;
3322 }
3323}
3324
3325static int bnxt_re_to_ib_nw_type(int nw_type)
3326{
3327 u8 nw_hdr_type = 0xFF;
3328
3329 switch (nw_type) {
3330 case BNXT_RE_ROCE_V1_PACKET:
3331 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
3332 break;
3333 case BNXT_RE_ROCEV2_IPV4_PACKET:
3334 nw_hdr_type = RDMA_NETWORK_IPV4;
3335 break;
3336 case BNXT_RE_ROCEV2_IPV6_PACKET:
3337 nw_hdr_type = RDMA_NETWORK_IPV6;
3338 break;
3339 }
3340 return nw_hdr_type;
3341}
3342
3343static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
3344 void *rq_hdr_buf)
3345{
3346 u8 *tmp_buf = NULL;
3347 struct ethhdr *eth_hdr;
3348 u16 eth_type;
3349 bool rc = false;
3350
3351 tmp_buf = (u8 *)rq_hdr_buf;
3352 /*
3353 * If dest mac is not same as I/F mac, this could be a
3354 * loopback address or multicast address, check whether
3355 * it is a loopback packet
3356 */
3357 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
3358 tmp_buf += 4;
3359 /* Check the ether type */
3360 eth_hdr = (struct ethhdr *)tmp_buf;
3361 eth_type = ntohs(eth_hdr->h_proto);
3362 switch (eth_type) {
3363 case ETH_P_IBOE:
3364 rc = true;
3365 break;
3366 case ETH_P_IP:
3367 case ETH_P_IPV6: {
3368 u32 len;
3369 struct udphdr *udp_hdr;
3370
3371 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
3372 sizeof(struct ipv6hdr));
3373 tmp_buf += sizeof(struct ethhdr) + len;
3374 udp_hdr = (struct udphdr *)tmp_buf;
3375 if (ntohs(udp_hdr->dest) ==
3376 ROCE_V2_UDP_DPORT)
3377 rc = true;
3378 break;
3379 }
3380 default:
3381 break;
3382 }
3383 }
3384
3385 return rc;
3386}
3387
3388static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
3389 struct bnxt_qplib_cqe *cqe)
3390{
3391 struct bnxt_re_dev *rdev = gsi_qp->rdev;
3392 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3393 struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
3394 dma_addr_t shrq_hdr_buf_map;
3395 struct ib_sge s_sge[2] = {};
3396 struct ib_sge r_sge[2] = {};
3397 struct bnxt_re_ah *gsi_sah;
3398 struct ib_recv_wr rwr = {};
3399 dma_addr_t rq_hdr_buf_map;
3400 struct ib_ud_wr udwr = {};
3401 struct ib_send_wr *swr;
3402 u32 skip_bytes = 0;
3403 int pkt_type = 0;
3404 void *rq_hdr_buf;
3405 u32 offset = 0;
3406 u32 tbl_idx;
3407 int rc;
3408
3409 swr = &udwr.wr;
3410 tbl_idx = cqe->wr_id;
3411
3412 rq_hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf +
3413 (tbl_idx * gsi_qp->qplib_qp.rq_hdr_buf_size);
3414 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3415 tbl_idx);
3416
3417 /* Shadow QP header buffer */
3418 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
3419 tbl_idx);
3420 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3421
3422 /* Store this cqe */
3423 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
3424 sqp_entry->qp1_qp = gsi_qp;
3425
3426 /* Find packet type from the cqe */
3427
3428 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
3429 cqe->raweth_qp1_flags2);
3430 if (pkt_type < 0) {
3431 ibdev_err(&rdev->ibdev, "Invalid packet\n");
3432 return -EINVAL;
3433 }
3434
3435 /* Adjust the offset for the user buffer and post in the rq */
3436
3437 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
3438 offset = 20;
3439
3440 /*
3441 * QP1 loopback packet has 4 bytes of internal header before
3442 * ether header. Skip these four bytes.
3443 */
3444 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
3445 skip_bytes = 4;
3446
3447 /* First send SGE . Skip the ether header*/
3448 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
3449 + skip_bytes;
3450 s_sge[0].lkey = 0xFFFFFFFF;
3451 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
3452 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
3453
3454 /* Second Send SGE */
3455 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
3456 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
3457 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
3458 s_sge[1].addr += 8;
3459 s_sge[1].lkey = 0xFFFFFFFF;
3460 s_sge[1].length = 256;
3461
3462 /* First recv SGE */
3463
3464 r_sge[0].addr = shrq_hdr_buf_map;
3465 r_sge[0].lkey = 0xFFFFFFFF;
3466 r_sge[0].length = 40;
3467
3468 r_sge[1].addr = sqp_entry->sge.addr + offset;
3469 r_sge[1].lkey = sqp_entry->sge.lkey;
3470 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
3471
3472 /* Create receive work request */
3473 rwr.num_sge = 2;
3474 rwr.sg_list = r_sge;
3475 rwr.wr_id = tbl_idx;
3476 rwr.next = NULL;
3477
3478 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
3479 if (rc) {
3480 ibdev_err(&rdev->ibdev,
3481 "Failed to post Rx buffers to shadow QP");
3482 return -ENOMEM;
3483 }
3484
3485 swr->num_sge = 2;
3486 swr->sg_list = s_sge;
3487 swr->wr_id = tbl_idx;
3488 swr->opcode = IB_WR_SEND;
3489 swr->next = NULL;
3490 gsi_sah = rdev->gsi_ctx.gsi_sah;
3491 udwr.ah = &gsi_sah->ib_ah;
3492 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
3493 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
3494
3495 /* post data received in the send queue */
3496 return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
3497}
3498
3499static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
3500 struct bnxt_qplib_cqe *cqe)
3501{
3502 wc->opcode = IB_WC_RECV;
3503 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
3504 wc->wc_flags |= IB_WC_GRH;
3505}
3506
3507static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
3508 u16 vlan_id)
3509{
3510 /*
3511 * Check if the vlan is configured in the host. If not configured, it
3512 * can be a transparent VLAN. So dont report the vlan id.
3513 */
3514 if (!__vlan_find_dev_deep_rcu(rdev->netdev,
3515 htons(ETH_P_8021Q), vlan_id))
3516 return false;
3517 return true;
3518}
3519
3520static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
3521 u16 *vid, u8 *sl)
3522{
3523 bool ret = false;
3524 u32 metadata;
3525 u16 tpid;
3526
3527 metadata = orig_cqe->raweth_qp1_metadata;
3528 if (orig_cqe->raweth_qp1_flags2 &
3529 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
3530 tpid = ((metadata &
3531 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
3532 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
3533 if (tpid == ETH_P_8021Q) {
3534 *vid = metadata &
3535 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
3536 *sl = (metadata &
3537 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
3538 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
3539 ret = true;
3540 }
3541 }
3542
3543 return ret;
3544}
3545
3546static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3547 struct bnxt_qplib_cqe *cqe)
3548{
3549 wc->opcode = IB_WC_RECV;
3550 wc->status = __rc_to_ib_wc_status(cqe->status);
3551
3552 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3553 wc->wc_flags |= IB_WC_WITH_IMM;
3554 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3555 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3556 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3557 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3558 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3559}
3560
3561static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
3562 struct ib_wc *wc,
3563 struct bnxt_qplib_cqe *cqe)
3564{
3565 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
3566 struct bnxt_re_qp *gsi_qp = NULL;
3567 struct bnxt_qplib_cqe *orig_cqe = NULL;
3568 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3569 int nw_type;
3570 u32 tbl_idx;
3571 u16 vlan_id;
3572 u8 sl;
3573
3574 tbl_idx = cqe->wr_id;
3575
3576 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
3577 gsi_qp = sqp_entry->qp1_qp;
3578 orig_cqe = &sqp_entry->cqe;
3579
3580 wc->wr_id = sqp_entry->wrid;
3581 wc->byte_len = orig_cqe->length;
3582 wc->qp = &gsi_qp->ib_qp;
3583
3584 wc->ex.imm_data = orig_cqe->immdata;
3585 wc->src_qp = orig_cqe->src_qp;
3586 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3587 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3588 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3589 wc->vlan_id = vlan_id;
3590 wc->sl = sl;
3591 wc->wc_flags |= IB_WC_WITH_VLAN;
3592 }
3593 }
3594 wc->port_num = 1;
3595 wc->vendor_err = orig_cqe->status;
3596
3597 wc->opcode = IB_WC_RECV;
3598 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3599 wc->wc_flags |= IB_WC_GRH;
3600
3601 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3602 orig_cqe->raweth_qp1_flags2);
3603 if (nw_type >= 0) {
3604 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3605 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3606 }
3607}
3608
3609static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3610 struct ib_wc *wc,
3611 struct bnxt_qplib_cqe *cqe)
3612{
3613 struct bnxt_re_dev *rdev;
3614 u16 vlan_id = 0;
3615 u8 nw_type;
3616
3617 rdev = qp->rdev;
3618 wc->opcode = IB_WC_RECV;
3619 wc->status = __rc_to_ib_wc_status(cqe->status);
3620
3621 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3622 wc->wc_flags |= IB_WC_WITH_IMM;
3623 /* report only on GSI QP for Thor */
3624 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3625 wc->wc_flags |= IB_WC_GRH;
3626 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3627 wc->wc_flags |= IB_WC_WITH_SMAC;
3628 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3629 vlan_id = (cqe->cfa_meta & 0xFFF);
3630 }
3631 /* Mark only if vlan_id is non zero */
3632 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
3633 wc->vlan_id = vlan_id;
3634 wc->wc_flags |= IB_WC_WITH_VLAN;
3635 }
3636 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3637 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3638 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3639 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3640 }
3641
3642}
3643
3644static int send_phantom_wqe(struct bnxt_re_qp *qp)
3645{
3646 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3647 unsigned long flags;
3648 int rc;
3649
3650 spin_lock_irqsave(&qp->sq_lock, flags);
3651
3652 rc = bnxt_re_bind_fence_mw(lib_qp);
3653 if (!rc) {
3654 lib_qp->sq.phantom_wqe_cnt++;
3655 ibdev_dbg(&qp->rdev->ibdev,
3656 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3657 lib_qp->id, lib_qp->sq.hwq.prod,
3658 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3659 lib_qp->sq.phantom_wqe_cnt);
3660 }
3661
3662 spin_unlock_irqrestore(&qp->sq_lock, flags);
3663 return rc;
3664}
3665
3666int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3667{
3668 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3669 struct bnxt_re_qp *qp, *sh_qp;
3670 struct bnxt_qplib_cqe *cqe;
3671 int i, ncqe, budget;
3672 struct bnxt_qplib_q *sq;
3673 struct bnxt_qplib_qp *lib_qp;
3674 u32 tbl_idx;
3675 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3676 unsigned long flags;
3677
3678 /* User CQ; the only processing we do is to
3679 * complete any pending CQ resize operation.
3680 */
3681 if (cq->umem) {
3682 if (cq->resize_umem)
3683 bnxt_re_resize_cq_complete(cq);
3684 return 0;
3685 }
3686
3687 spin_lock_irqsave(&cq->cq_lock, flags);
3688 budget = min_t(u32, num_entries, cq->max_cql);
3689 num_entries = budget;
3690 if (!cq->cql) {
3691 ibdev_err(&cq->rdev->ibdev, "POLL CQ : no CQL to use");
3692 goto exit;
3693 }
3694 cqe = &cq->cql[0];
3695 while (budget) {
3696 lib_qp = NULL;
3697 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3698 if (lib_qp) {
3699 sq = &lib_qp->sq;
3700 if (sq->send_phantom) {
3701 qp = container_of(lib_qp,
3702 struct bnxt_re_qp, qplib_qp);
3703 if (send_phantom_wqe(qp) == -ENOMEM)
3704 ibdev_err(&cq->rdev->ibdev,
3705 "Phantom failed! Scheduled to send again\n");
3706 else
3707 sq->send_phantom = false;
3708 }
3709 }
3710 if (ncqe < budget)
3711 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3712 cqe + ncqe,
3713 budget - ncqe);
3714
3715 if (!ncqe)
3716 break;
3717
3718 for (i = 0; i < ncqe; i++, cqe++) {
3719 /* Transcribe each qplib_wqe back to ib_wc */
3720 memset(wc, 0, sizeof(*wc));
3721
3722 wc->wr_id = cqe->wr_id;
3723 wc->byte_len = cqe->length;
3724 qp = container_of
3725 ((struct bnxt_qplib_qp *)
3726 (unsigned long)(cqe->qp_handle),
3727 struct bnxt_re_qp, qplib_qp);
3728 wc->qp = &qp->ib_qp;
3729 wc->ex.imm_data = cqe->immdata;
3730 wc->src_qp = cqe->src_qp;
3731 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3732 wc->port_num = 1;
3733 wc->vendor_err = cqe->status;
3734
3735 switch (cqe->opcode) {
3736 case CQ_BASE_CQE_TYPE_REQ:
3737 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3738 if (sh_qp &&
3739 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3740 /* Handle this completion with
3741 * the stored completion
3742 */
3743 memset(wc, 0, sizeof(*wc));
3744 continue;
3745 }
3746 bnxt_re_process_req_wc(wc, cqe);
3747 break;
3748 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3749 if (!cqe->status) {
3750 int rc = 0;
3751
3752 rc = bnxt_re_process_raw_qp_pkt_rx
3753 (qp, cqe);
3754 if (!rc) {
3755 memset(wc, 0, sizeof(*wc));
3756 continue;
3757 }
3758 cqe->status = -1;
3759 }
3760 /* Errors need not be looped back.
3761 * But change the wr_id to the one
3762 * stored in the table
3763 */
3764 tbl_idx = cqe->wr_id;
3765 sqp_entry = &cq->rdev->gsi_ctx.sqp_tbl[tbl_idx];
3766 wc->wr_id = sqp_entry->wrid;
3767 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3768 break;
3769 case CQ_BASE_CQE_TYPE_RES_RC:
3770 bnxt_re_process_res_rc_wc(wc, cqe);
3771 break;
3772 case CQ_BASE_CQE_TYPE_RES_UD:
3773 sh_qp = qp->rdev->gsi_ctx.gsi_sqp;
3774 if (sh_qp &&
3775 qp->qplib_qp.id == sh_qp->qplib_qp.id) {
3776 /* Handle this completion with
3777 * the stored completion
3778 */
3779 if (cqe->status) {
3780 continue;
3781 } else {
3782 bnxt_re_process_res_shadow_qp_wc
3783 (qp, wc, cqe);
3784 break;
3785 }
3786 }
3787 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3788 break;
3789 default:
3790 ibdev_err(&cq->rdev->ibdev,
3791 "POLL CQ : type 0x%x not handled",
3792 cqe->opcode);
3793 continue;
3794 }
3795 wc++;
3796 budget--;
3797 }
3798 }
3799exit:
3800 spin_unlock_irqrestore(&cq->cq_lock, flags);
3801 return num_entries - budget;
3802}
3803
3804int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3805 enum ib_cq_notify_flags ib_cqn_flags)
3806{
3807 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3808 int type = 0, rc = 0;
3809 unsigned long flags;
3810
3811 spin_lock_irqsave(&cq->cq_lock, flags);
3812 /* Trigger on the very next completion */
3813 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3814 type = DBC_DBC_TYPE_CQ_ARMALL;
3815 /* Trigger on the next solicited completion */
3816 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3817 type = DBC_DBC_TYPE_CQ_ARMSE;
3818
3819 /* Poll to see if there are missed events */
3820 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3821 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3822 rc = 1;
3823 goto exit;
3824 }
3825 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3826
3827exit:
3828 spin_unlock_irqrestore(&cq->cq_lock, flags);
3829 return rc;
3830}
3831
3832/* Memory Regions */
3833struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3834{
3835 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3836 struct bnxt_re_dev *rdev = pd->rdev;
3837 struct bnxt_re_mr *mr;
3838 u32 active_mrs;
3839 int rc;
3840
3841 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3842 if (!mr)
3843 return ERR_PTR(-ENOMEM);
3844
3845 mr->rdev = rdev;
3846 mr->qplib_mr.pd = &pd->qplib_pd;
3847 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3848 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3849
3850 /* Allocate and register 0 as the address */
3851 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3852 if (rc)
3853 goto fail;
3854
3855 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3856 mr->qplib_mr.total_size = -1; /* Infinte length */
3857 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
3858 PAGE_SIZE);
3859 if (rc)
3860 goto fail_mr;
3861
3862 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3863 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3864 IB_ACCESS_REMOTE_ATOMIC))
3865 mr->ib_mr.rkey = mr->ib_mr.lkey;
3866 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3867 if (active_mrs > rdev->stats.res.mr_watermark)
3868 rdev->stats.res.mr_watermark = active_mrs;
3869
3870 return &mr->ib_mr;
3871
3872fail_mr:
3873 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3874fail:
3875 kfree(mr);
3876 return ERR_PTR(rc);
3877}
3878
3879int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3880{
3881 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3882 struct bnxt_re_dev *rdev = mr->rdev;
3883 int rc;
3884
3885 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3886 if (rc) {
3887 ibdev_err(&rdev->ibdev, "Dereg MR failed: %#x\n", rc);
3888 return rc;
3889 }
3890
3891 if (mr->pages) {
3892 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3893 &mr->qplib_frpl);
3894 kfree(mr->pages);
3895 mr->npages = 0;
3896 mr->pages = NULL;
3897 }
3898 ib_umem_release(mr->ib_umem);
3899
3900 kfree(mr);
3901 atomic_dec(&rdev->stats.res.mr_count);
3902 return rc;
3903}
3904
3905static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3906{
3907 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3908
3909 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3910 return -ENOMEM;
3911
3912 mr->pages[mr->npages++] = addr;
3913 return 0;
3914}
3915
3916int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3917 unsigned int *sg_offset)
3918{
3919 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3920
3921 mr->npages = 0;
3922 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3923}
3924
3925struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3926 u32 max_num_sg)
3927{
3928 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3929 struct bnxt_re_dev *rdev = pd->rdev;
3930 struct bnxt_re_mr *mr = NULL;
3931 u32 active_mrs;
3932 int rc;
3933
3934 if (type != IB_MR_TYPE_MEM_REG) {
3935 ibdev_dbg(&rdev->ibdev, "MR type 0x%x not supported", type);
3936 return ERR_PTR(-EINVAL);
3937 }
3938 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3939 return ERR_PTR(-EINVAL);
3940
3941 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3942 if (!mr)
3943 return ERR_PTR(-ENOMEM);
3944
3945 mr->rdev = rdev;
3946 mr->qplib_mr.pd = &pd->qplib_pd;
3947 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3948 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3949
3950 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3951 if (rc)
3952 goto bail;
3953
3954 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3955 mr->ib_mr.rkey = mr->ib_mr.lkey;
3956
3957 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3958 if (!mr->pages) {
3959 rc = -ENOMEM;
3960 goto fail;
3961 }
3962 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3963 &mr->qplib_frpl, max_num_sg);
3964 if (rc) {
3965 ibdev_err(&rdev->ibdev,
3966 "Failed to allocate HW FR page list");
3967 goto fail_mr;
3968 }
3969
3970 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
3971 if (active_mrs > rdev->stats.res.mr_watermark)
3972 rdev->stats.res.mr_watermark = active_mrs;
3973 return &mr->ib_mr;
3974
3975fail_mr:
3976 kfree(mr->pages);
3977fail:
3978 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3979bail:
3980 kfree(mr);
3981 return ERR_PTR(rc);
3982}
3983
3984struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3985 struct ib_udata *udata)
3986{
3987 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3988 struct bnxt_re_dev *rdev = pd->rdev;
3989 struct bnxt_re_mw *mw;
3990 u32 active_mws;
3991 int rc;
3992
3993 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3994 if (!mw)
3995 return ERR_PTR(-ENOMEM);
3996 mw->rdev = rdev;
3997 mw->qplib_mw.pd = &pd->qplib_pd;
3998
3999 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4000 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
4001 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
4002 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4003 if (rc) {
4004 ibdev_err(&rdev->ibdev, "Allocate MW failed!");
4005 goto fail;
4006 }
4007 mw->ib_mw.rkey = mw->qplib_mw.rkey;
4008
4009 active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
4010 if (active_mws > rdev->stats.res.mw_watermark)
4011 rdev->stats.res.mw_watermark = active_mws;
4012 return &mw->ib_mw;
4013
4014fail:
4015 kfree(mw);
4016 return ERR_PTR(rc);
4017}
4018
4019int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
4020{
4021 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
4022 struct bnxt_re_dev *rdev = mw->rdev;
4023 int rc;
4024
4025 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4026 if (rc) {
4027 ibdev_err(&rdev->ibdev, "Free MW failed: %#x\n", rc);
4028 return rc;
4029 }
4030
4031 kfree(mw);
4032 atomic_dec(&rdev->stats.res.mw_count);
4033 return rc;
4034}
4035
4036static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
4037 int mr_access_flags, struct ib_umem *umem)
4038{
4039 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4040 struct bnxt_re_dev *rdev = pd->rdev;
4041 unsigned long page_size;
4042 struct bnxt_re_mr *mr;
4043 int umem_pgs, rc;
4044 u32 active_mrs;
4045
4046 if (length > BNXT_RE_MAX_MR_SIZE) {
4047 ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
4048 length, BNXT_RE_MAX_MR_SIZE);
4049 return ERR_PTR(-ENOMEM);
4050 }
4051
4052 page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
4053 if (!page_size) {
4054 ibdev_err(&rdev->ibdev, "umem page size unsupported!");
4055 return ERR_PTR(-EINVAL);
4056 }
4057
4058 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
4059 if (!mr)
4060 return ERR_PTR(-ENOMEM);
4061
4062 mr->rdev = rdev;
4063 mr->qplib_mr.pd = &pd->qplib_pd;
4064 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4065 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
4066
4067 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4068 if (rc) {
4069 ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
4070 rc = -EIO;
4071 goto free_mr;
4072 }
4073 /* The fixed portion of the rkey is the same as the lkey */
4074 mr->ib_mr.rkey = mr->qplib_mr.rkey;
4075 mr->ib_umem = umem;
4076 mr->qplib_mr.va = virt_addr;
4077 mr->qplib_mr.total_size = length;
4078
4079 umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
4080 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
4081 umem_pgs, page_size);
4082 if (rc) {
4083 ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
4084 rc = -EIO;
4085 goto free_mrw;
4086 }
4087
4088 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4089 mr->ib_mr.rkey = mr->qplib_mr.lkey;
4090 active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
4091 if (active_mrs > rdev->stats.res.mr_watermark)
4092 rdev->stats.res.mr_watermark = active_mrs;
4093
4094 return &mr->ib_mr;
4095
4096free_mrw:
4097 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4098free_mr:
4099 kfree(mr);
4100 return ERR_PTR(rc);
4101}
4102
4103struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
4104 u64 virt_addr, int mr_access_flags,
4105 struct ib_udata *udata)
4106{
4107 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4108 struct bnxt_re_dev *rdev = pd->rdev;
4109 struct ib_umem *umem;
4110 struct ib_mr *ib_mr;
4111
4112 umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
4113 if (IS_ERR(umem))
4114 return ERR_CAST(umem);
4115
4116 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4117 if (IS_ERR(ib_mr))
4118 ib_umem_release(umem);
4119 return ib_mr;
4120}
4121
4122struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
4123 u64 length, u64 virt_addr, int fd,
4124 int mr_access_flags, struct ib_udata *udata)
4125{
4126 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
4127 struct bnxt_re_dev *rdev = pd->rdev;
4128 struct ib_umem_dmabuf *umem_dmabuf;
4129 struct ib_umem *umem;
4130 struct ib_mr *ib_mr;
4131
4132 umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
4133 fd, mr_access_flags);
4134 if (IS_ERR(umem_dmabuf))
4135 return ERR_CAST(umem_dmabuf);
4136
4137 umem = &umem_dmabuf->umem;
4138
4139 ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
4140 if (IS_ERR(ib_mr))
4141 ib_umem_release(umem);
4142 return ib_mr;
4143}
4144
4145int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
4146{
4147 struct ib_device *ibdev = ctx->device;
4148 struct bnxt_re_ucontext *uctx =
4149 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
4150 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
4151 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
4152 struct bnxt_re_user_mmap_entry *entry;
4153 struct bnxt_re_uctx_resp resp = {};
4154 struct bnxt_re_uctx_req ureq = {};
4155 u32 chip_met_rev_num = 0;
4156 int rc;
4157
4158 ibdev_dbg(ibdev, "ABI version requested %u", ibdev->ops.uverbs_abi_ver);
4159
4160 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
4161 ibdev_dbg(ibdev, " is different from the device %d ",
4162 BNXT_RE_ABI_VERSION);
4163 return -EPERM;
4164 }
4165
4166 uctx->rdev = rdev;
4167
4168 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
4169 if (!uctx->shpg) {
4170 rc = -ENOMEM;
4171 goto fail;
4172 }
4173 spin_lock_init(&uctx->sh_lock);
4174
4175 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
4176 chip_met_rev_num = rdev->chip_ctx->chip_num;
4177 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_rev & 0xFF) <<
4178 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
4179 chip_met_rev_num |= ((u32)rdev->chip_ctx->chip_metal & 0xFF) <<
4180 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
4181 resp.chip_id0 = chip_met_rev_num;
4182 /*Temp, Use xa_alloc instead */
4183 resp.dev_id = rdev->en_dev->pdev->devfn;
4184 resp.max_qp = rdev->qplib_ctx.qpc_count;
4185 resp.pg_size = PAGE_SIZE;
4186 resp.cqe_sz = sizeof(struct cq_base);
4187 resp.max_cqd = dev_attr->max_cq_wqes;
4188
4189 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
4190 resp.mode = rdev->chip_ctx->modes.wqe_mode;
4191
4192 if (rdev->chip_ctx->modes.db_push)
4193 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
4194
4195 entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
4196 if (!entry) {
4197 rc = -ENOMEM;
4198 goto cfail;
4199 }
4200 uctx->shpage_mmap = &entry->rdma_entry;
4201 if (rdev->pacing.dbr_pacing)
4202 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
4203
4204 if (udata->inlen >= sizeof(ureq)) {
4205 rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
4206 if (rc)
4207 goto cfail;
4208 if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
4209 resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4210 uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
4211 }
4212 }
4213
4214 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
4215 if (rc) {
4216 ibdev_err(ibdev, "Failed to copy user context");
4217 rc = -EFAULT;
4218 goto cfail;
4219 }
4220
4221 return 0;
4222cfail:
4223 free_page((unsigned long)uctx->shpg);
4224 uctx->shpg = NULL;
4225fail:
4226 return rc;
4227}
4228
4229void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
4230{
4231 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4232 struct bnxt_re_ucontext,
4233 ib_uctx);
4234
4235 struct bnxt_re_dev *rdev = uctx->rdev;
4236
4237 rdma_user_mmap_entry_remove(uctx->shpage_mmap);
4238 uctx->shpage_mmap = NULL;
4239 if (uctx->shpg)
4240 free_page((unsigned long)uctx->shpg);
4241
4242 if (uctx->dpi.dbr) {
4243 /* Free DPI only if this is the first PD allocated by the
4244 * application and mark the context dpi as NULL
4245 */
4246 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
4247 uctx->dpi.dbr = NULL;
4248 }
4249}
4250
4251static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
4252{
4253 struct bnxt_re_cq *cq = NULL, *tmp_cq;
4254
4255 hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
4256 if (tmp_cq->qplib_cq.id == cq_id) {
4257 cq = tmp_cq;
4258 break;
4259 }
4260 }
4261 return cq;
4262}
4263
4264/* Helper function to mmap the virtual memory from user app */
4265int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
4266{
4267 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
4268 struct bnxt_re_ucontext,
4269 ib_uctx);
4270 struct bnxt_re_user_mmap_entry *bnxt_entry;
4271 struct rdma_user_mmap_entry *rdma_entry;
4272 int ret = 0;
4273 u64 pfn;
4274
4275 rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
4276 if (!rdma_entry)
4277 return -EINVAL;
4278
4279 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4280 rdma_entry);
4281
4282 switch (bnxt_entry->mmap_flag) {
4283 case BNXT_RE_MMAP_WC_DB:
4284 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4285 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4286 pgprot_writecombine(vma->vm_page_prot),
4287 rdma_entry);
4288 break;
4289 case BNXT_RE_MMAP_UC_DB:
4290 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4291 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4292 pgprot_noncached(vma->vm_page_prot),
4293 rdma_entry);
4294 break;
4295 case BNXT_RE_MMAP_SH_PAGE:
4296 ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
4297 break;
4298 case BNXT_RE_MMAP_DBR_BAR:
4299 pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
4300 ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
4301 pgprot_noncached(vma->vm_page_prot),
4302 rdma_entry);
4303 break;
4304 case BNXT_RE_MMAP_DBR_PAGE:
4305 case BNXT_RE_MMAP_TOGGLE_PAGE:
4306 /* Driver doesn't expect write access for user space */
4307 if (vma->vm_flags & VM_WRITE)
4308 return -EFAULT;
4309 ret = vm_insert_page(vma, vma->vm_start,
4310 virt_to_page((void *)bnxt_entry->mem_offset));
4311 break;
4312 default:
4313 ret = -EINVAL;
4314 break;
4315 }
4316
4317 rdma_user_mmap_entry_put(rdma_entry);
4318 return ret;
4319}
4320
4321void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
4322{
4323 struct bnxt_re_user_mmap_entry *bnxt_entry;
4324
4325 bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
4326 rdma_entry);
4327
4328 kfree(bnxt_entry);
4329}
4330
4331static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
4332{
4333 struct bnxt_re_ucontext *uctx;
4334
4335 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4336 bnxt_re_pacing_alert(uctx->rdev);
4337 return 0;
4338}
4339
4340static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
4341{
4342 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4343 enum bnxt_re_alloc_page_type alloc_type;
4344 struct bnxt_re_user_mmap_entry *entry;
4345 enum bnxt_re_mmap_flag mmap_flag;
4346 struct bnxt_qplib_chip_ctx *cctx;
4347 struct bnxt_re_ucontext *uctx;
4348 struct bnxt_re_dev *rdev;
4349 u64 mmap_offset;
4350 u32 length;
4351 u32 dpi;
4352 u64 addr;
4353 int err;
4354
4355 uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
4356 if (IS_ERR(uctx))
4357 return PTR_ERR(uctx);
4358
4359 err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
4360 if (err)
4361 return err;
4362
4363 rdev = uctx->rdev;
4364 cctx = rdev->chip_ctx;
4365
4366 switch (alloc_type) {
4367 case BNXT_RE_ALLOC_WC_PAGE:
4368 if (cctx->modes.db_push) {
4369 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
4370 uctx, BNXT_QPLIB_DPI_TYPE_WC))
4371 return -ENOMEM;
4372 length = PAGE_SIZE;
4373 dpi = uctx->wcdpi.dpi;
4374 addr = (u64)uctx->wcdpi.umdbr;
4375 mmap_flag = BNXT_RE_MMAP_WC_DB;
4376 } else {
4377 return -EINVAL;
4378 }
4379
4380 break;
4381 case BNXT_RE_ALLOC_DBR_BAR_PAGE:
4382 length = PAGE_SIZE;
4383 addr = (u64)rdev->pacing.dbr_bar_addr;
4384 mmap_flag = BNXT_RE_MMAP_DBR_BAR;
4385 break;
4386
4387 case BNXT_RE_ALLOC_DBR_PAGE:
4388 length = PAGE_SIZE;
4389 addr = (u64)rdev->pacing.dbr_page;
4390 mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
4391 break;
4392
4393 default:
4394 return -EOPNOTSUPP;
4395 }
4396
4397 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
4398 if (!entry)
4399 return -ENOMEM;
4400
4401 uobj->object = entry;
4402 uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
4403 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4404 &mmap_offset, sizeof(mmap_offset));
4405 if (err)
4406 return err;
4407
4408 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4409 &length, sizeof(length));
4410 if (err)
4411 return err;
4412
4413 err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
4414 &dpi, sizeof(length));
4415 if (err)
4416 return err;
4417
4418 return 0;
4419}
4420
4421static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
4422 enum rdma_remove_reason why,
4423 struct uverbs_attr_bundle *attrs)
4424{
4425 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4426 struct bnxt_re_ucontext *uctx = entry->uctx;
4427
4428 switch (entry->mmap_flag) {
4429 case BNXT_RE_MMAP_WC_DB:
4430 if (uctx && uctx->wcdpi.dbr) {
4431 struct bnxt_re_dev *rdev = uctx->rdev;
4432
4433 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
4434 uctx->wcdpi.dbr = NULL;
4435 }
4436 break;
4437 case BNXT_RE_MMAP_DBR_BAR:
4438 case BNXT_RE_MMAP_DBR_PAGE:
4439 break;
4440 default:
4441 goto exit;
4442 }
4443 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4444exit:
4445 return 0;
4446}
4447
4448DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
4449 UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
4450 BNXT_RE_OBJECT_ALLOC_PAGE,
4451 UVERBS_ACCESS_NEW,
4452 UA_MANDATORY),
4453 UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
4454 enum bnxt_re_alloc_page_type,
4455 UA_MANDATORY),
4456 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
4457 UVERBS_ATTR_TYPE(u64),
4458 UA_MANDATORY),
4459 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
4460 UVERBS_ATTR_TYPE(u32),
4461 UA_MANDATORY),
4462 UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
4463 UVERBS_ATTR_TYPE(u32),
4464 UA_MANDATORY));
4465
4466DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
4467 UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
4468 BNXT_RE_OBJECT_ALLOC_PAGE,
4469 UVERBS_ACCESS_DESTROY,
4470 UA_MANDATORY));
4471
4472DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
4473 UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
4474 &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
4475 &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
4476
4477DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
4478
4479DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
4480 &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
4481
4482/* Toggle MEM */
4483static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
4484{
4485 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4486 enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4487 enum bnxt_re_get_toggle_mem_type res_type;
4488 struct bnxt_re_user_mmap_entry *entry;
4489 struct bnxt_re_ucontext *uctx;
4490 struct ib_ucontext *ib_uctx;
4491 struct bnxt_re_dev *rdev;
4492 struct bnxt_re_cq *cq;
4493 u64 mem_offset;
4494 u64 addr = 0;
4495 u32 length;
4496 u32 offset;
4497 u32 cq_id;
4498 int err;
4499
4500 ib_uctx = ib_uverbs_get_ucontext(attrs);
4501 if (IS_ERR(ib_uctx))
4502 return PTR_ERR(ib_uctx);
4503
4504 err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
4505 if (err)
4506 return err;
4507
4508 uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
4509 rdev = uctx->rdev;
4510
4511 switch (res_type) {
4512 case BNXT_RE_CQ_TOGGLE_MEM:
4513 err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
4514 if (err)
4515 return err;
4516
4517 cq = bnxt_re_search_for_cq(rdev, cq_id);
4518 if (!cq)
4519 return -EINVAL;
4520
4521 length = PAGE_SIZE;
4522 addr = (u64)cq->uctx_cq_page;
4523 mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
4524 offset = 0;
4525 break;
4526 case BNXT_RE_SRQ_TOGGLE_MEM:
4527 break;
4528
4529 default:
4530 return -EOPNOTSUPP;
4531 }
4532
4533 entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
4534 if (!entry)
4535 return -ENOMEM;
4536
4537 uobj->object = entry;
4538 uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
4539 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4540 &mem_offset, sizeof(mem_offset));
4541 if (err)
4542 return err;
4543
4544 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4545 &length, sizeof(length));
4546 if (err)
4547 return err;
4548
4549 err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4550 &offset, sizeof(length));
4551 if (err)
4552 return err;
4553
4554 return 0;
4555}
4556
4557static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
4558 enum rdma_remove_reason why,
4559 struct uverbs_attr_bundle *attrs)
4560{
4561 struct bnxt_re_user_mmap_entry *entry = uobject->object;
4562
4563 rdma_user_mmap_entry_remove(&entry->rdma_entry);
4564 return 0;
4565}
4566
4567DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
4568 UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
4569 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4570 UVERBS_ACCESS_NEW,
4571 UA_MANDATORY),
4572 UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
4573 enum bnxt_re_get_toggle_mem_type,
4574 UA_MANDATORY),
4575 UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
4576 UVERBS_ATTR_TYPE(u32),
4577 UA_MANDATORY),
4578 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
4579 UVERBS_ATTR_TYPE(u64),
4580 UA_MANDATORY),
4581 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
4582 UVERBS_ATTR_TYPE(u32),
4583 UA_MANDATORY),
4584 UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
4585 UVERBS_ATTR_TYPE(u32),
4586 UA_MANDATORY));
4587
4588DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
4589 UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
4590 BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4591 UVERBS_ACCESS_DESTROY,
4592 UA_MANDATORY));
4593
4594DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
4595 UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
4596 &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
4597 &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
4598
4599const struct uapi_definition bnxt_re_uapi_defs[] = {
4600 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
4601 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
4602 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
4603 {}
4604};
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: IB Verbs interpreter
37 */
38
39#include <linux/interrupt.h>
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/netdevice.h>
43#include <linux/if_ether.h>
44
45#include <rdma/ib_verbs.h>
46#include <rdma/ib_user_verbs.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
49#include <rdma/ib_mad.h>
50#include <rdma/ib_cache.h>
51#include <rdma/uverbs_ioctl.h>
52
53#include "bnxt_ulp.h"
54
55#include "roce_hsi.h"
56#include "qplib_res.h"
57#include "qplib_sp.h"
58#include "qplib_fp.h"
59#include "qplib_rcfw.h"
60
61#include "bnxt_re.h"
62#include "ib_verbs.h"
63#include <rdma/bnxt_re-abi.h>
64
65static int __from_ib_access_flags(int iflags)
66{
67 int qflags = 0;
68
69 if (iflags & IB_ACCESS_LOCAL_WRITE)
70 qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
71 if (iflags & IB_ACCESS_REMOTE_READ)
72 qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
73 if (iflags & IB_ACCESS_REMOTE_WRITE)
74 qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
75 if (iflags & IB_ACCESS_REMOTE_ATOMIC)
76 qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
77 if (iflags & IB_ACCESS_MW_BIND)
78 qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
79 if (iflags & IB_ZERO_BASED)
80 qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
81 if (iflags & IB_ACCESS_ON_DEMAND)
82 qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
83 return qflags;
84};
85
86static enum ib_access_flags __to_ib_access_flags(int qflags)
87{
88 enum ib_access_flags iflags = 0;
89
90 if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
91 iflags |= IB_ACCESS_LOCAL_WRITE;
92 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
93 iflags |= IB_ACCESS_REMOTE_WRITE;
94 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
95 iflags |= IB_ACCESS_REMOTE_READ;
96 if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
97 iflags |= IB_ACCESS_REMOTE_ATOMIC;
98 if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
99 iflags |= IB_ACCESS_MW_BIND;
100 if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
101 iflags |= IB_ZERO_BASED;
102 if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
103 iflags |= IB_ACCESS_ON_DEMAND;
104 return iflags;
105};
106
107static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
108 struct bnxt_qplib_sge *sg_list, int num)
109{
110 int i, total = 0;
111
112 for (i = 0; i < num; i++) {
113 sg_list[i].addr = ib_sg_list[i].addr;
114 sg_list[i].lkey = ib_sg_list[i].lkey;
115 sg_list[i].size = ib_sg_list[i].length;
116 total += sg_list[i].size;
117 }
118 return total;
119}
120
121/* Device */
122int bnxt_re_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *ib_attr,
124 struct ib_udata *udata)
125{
126 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
127 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
128
129 memset(ib_attr, 0, sizeof(*ib_attr));
130 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
131 min(sizeof(dev_attr->fw_ver),
132 sizeof(ib_attr->fw_ver)));
133 bnxt_qplib_get_guid(rdev->netdev->dev_addr,
134 (u8 *)&ib_attr->sys_image_guid);
135 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
136 ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
137
138 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
139 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
140 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
141 ib_attr->max_qp = dev_attr->max_qp;
142 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
143 ib_attr->device_cap_flags =
144 IB_DEVICE_CURR_QP_STATE_MOD
145 | IB_DEVICE_RC_RNR_NAK_GEN
146 | IB_DEVICE_SHUTDOWN_PORT
147 | IB_DEVICE_SYS_IMAGE_GUID
148 | IB_DEVICE_LOCAL_DMA_LKEY
149 | IB_DEVICE_RESIZE_MAX_WR
150 | IB_DEVICE_PORT_ACTIVE_EVENT
151 | IB_DEVICE_N_NOTIFY_CQ
152 | IB_DEVICE_MEM_WINDOW
153 | IB_DEVICE_MEM_WINDOW_TYPE_2B
154 | IB_DEVICE_MEM_MGT_EXTENSIONS;
155 ib_attr->max_send_sge = dev_attr->max_qp_sges;
156 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
157 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
158 ib_attr->max_cq = dev_attr->max_cq;
159 ib_attr->max_cqe = dev_attr->max_cq_wqes;
160 ib_attr->max_mr = dev_attr->max_mr;
161 ib_attr->max_pd = dev_attr->max_pd;
162 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
163 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
164 ib_attr->atomic_cap = IB_ATOMIC_NONE;
165 ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
166
167 ib_attr->max_ee_rd_atom = 0;
168 ib_attr->max_res_rd_atom = 0;
169 ib_attr->max_ee_init_rd_atom = 0;
170 ib_attr->max_ee = 0;
171 ib_attr->max_rdd = 0;
172 ib_attr->max_mw = dev_attr->max_mw;
173 ib_attr->max_raw_ipv6_qp = 0;
174 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
175 ib_attr->max_mcast_grp = 0;
176 ib_attr->max_mcast_qp_attach = 0;
177 ib_attr->max_total_mcast_qp_attach = 0;
178 ib_attr->max_ah = dev_attr->max_ah;
179
180 ib_attr->max_fmr = 0;
181 ib_attr->max_map_per_fmr = 0;
182
183 ib_attr->max_srq = dev_attr->max_srq;
184 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
185 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
186
187 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
188
189 ib_attr->max_pkeys = 1;
190 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
191 return 0;
192}
193
194int bnxt_re_modify_device(struct ib_device *ibdev,
195 int device_modify_mask,
196 struct ib_device_modify *device_modify)
197{
198 switch (device_modify_mask) {
199 case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
200 /* Modify the GUID requires the modification of the GID table */
201 /* GUID should be made as READ-ONLY */
202 break;
203 case IB_DEVICE_MODIFY_NODE_DESC:
204 /* Node Desc should be made as READ-ONLY */
205 break;
206 default:
207 break;
208 }
209 return 0;
210}
211
212/* Port */
213int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
214 struct ib_port_attr *port_attr)
215{
216 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
217 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
218
219 memset(port_attr, 0, sizeof(*port_attr));
220
221 if (netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev)) {
222 port_attr->state = IB_PORT_ACTIVE;
223 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
224 } else {
225 port_attr->state = IB_PORT_DOWN;
226 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
227 }
228 port_attr->max_mtu = IB_MTU_4096;
229 port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
230 port_attr->gid_tbl_len = dev_attr->max_sgid;
231 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
232 IB_PORT_DEVICE_MGMT_SUP |
233 IB_PORT_VENDOR_CLASS_SUP;
234 port_attr->ip_gids = true;
235
236 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
237 port_attr->bad_pkey_cntr = 0;
238 port_attr->qkey_viol_cntr = 0;
239 port_attr->pkey_tbl_len = dev_attr->max_pkey;
240 port_attr->lid = 0;
241 port_attr->sm_lid = 0;
242 port_attr->lmc = 0;
243 port_attr->max_vl_num = 4;
244 port_attr->sm_sl = 0;
245 port_attr->subnet_timeout = 0;
246 port_attr->init_type_reply = 0;
247 port_attr->active_speed = rdev->active_speed;
248 port_attr->active_width = rdev->active_width;
249
250 return 0;
251}
252
253int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
254 struct ib_port_immutable *immutable)
255{
256 struct ib_port_attr port_attr;
257
258 if (bnxt_re_query_port(ibdev, port_num, &port_attr))
259 return -EINVAL;
260
261 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
262 immutable->gid_tbl_len = port_attr.gid_tbl_len;
263 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
264 immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
265 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
266 return 0;
267}
268
269void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
270{
271 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
272
273 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
274 rdev->dev_attr.fw_ver[0], rdev->dev_attr.fw_ver[1],
275 rdev->dev_attr.fw_ver[2], rdev->dev_attr.fw_ver[3]);
276}
277
278int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
279 u16 index, u16 *pkey)
280{
281 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
282
283 /* Ignore port_num */
284
285 memset(pkey, 0, sizeof(*pkey));
286 return bnxt_qplib_get_pkey(&rdev->qplib_res,
287 &rdev->qplib_res.pkey_tbl, index, pkey);
288}
289
290int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
291 int index, union ib_gid *gid)
292{
293 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
294 int rc = 0;
295
296 /* Ignore port_num */
297 memset(gid, 0, sizeof(*gid));
298 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
299 &rdev->qplib_res.sgid_tbl, index,
300 (struct bnxt_qplib_gid *)gid);
301 return rc;
302}
303
304int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
305{
306 int rc = 0;
307 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
308 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
309 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
310 struct bnxt_qplib_gid *gid_to_del;
311 u16 vlan_id = 0xFFFF;
312
313 /* Delete the entry from the hardware */
314 ctx = *context;
315 if (!ctx)
316 return -EINVAL;
317
318 if (sgid_tbl && sgid_tbl->active) {
319 if (ctx->idx >= sgid_tbl->max)
320 return -EINVAL;
321 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
322 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
323 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
324 * or via the ib_unregister_device path. In the former case QP1
325 * may not be destroyed yet, in which case just return as FW
326 * needs that entry to be present and will fail it's deletion.
327 * We could get invoked again after QP1 is destroyed OR get an
328 * ADD_GID call with a different GID value for the same index
329 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
330 */
331 if (ctx->idx == 0 &&
332 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
333 ctx->refcnt == 1 && rdev->qp1_sqp) {
334 dev_dbg(rdev_to_dev(rdev),
335 "Trying to delete GID0 while QP1 is alive\n");
336 return -EFAULT;
337 }
338 ctx->refcnt--;
339 if (!ctx->refcnt) {
340 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
341 vlan_id, true);
342 if (rc) {
343 dev_err(rdev_to_dev(rdev),
344 "Failed to remove GID: %#x", rc);
345 } else {
346 ctx_tbl = sgid_tbl->ctx;
347 ctx_tbl[ctx->idx] = NULL;
348 kfree(ctx);
349 }
350 }
351 } else {
352 return -EINVAL;
353 }
354 return rc;
355}
356
357int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
358{
359 int rc;
360 u32 tbl_idx = 0;
361 u16 vlan_id = 0xFFFF;
362 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
363 struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
364 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
365
366 rc = rdma_read_gid_l2_fields(attr, &vlan_id, NULL);
367 if (rc)
368 return rc;
369
370 rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
371 rdev->qplib_res.netdev->dev_addr,
372 vlan_id, true, &tbl_idx);
373 if (rc == -EALREADY) {
374 ctx_tbl = sgid_tbl->ctx;
375 ctx_tbl[tbl_idx]->refcnt++;
376 *context = ctx_tbl[tbl_idx];
377 return 0;
378 }
379
380 if (rc < 0) {
381 dev_err(rdev_to_dev(rdev), "Failed to add GID: %#x", rc);
382 return rc;
383 }
384
385 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
386 if (!ctx)
387 return -ENOMEM;
388 ctx_tbl = sgid_tbl->ctx;
389 ctx->idx = tbl_idx;
390 ctx->refcnt = 1;
391 ctx_tbl[tbl_idx] = ctx;
392 *context = ctx;
393
394 return rc;
395}
396
397enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
398 u8 port_num)
399{
400 return IB_LINK_LAYER_ETHERNET;
401}
402
403#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
404
405static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
406{
407 struct bnxt_re_fence_data *fence = &pd->fence;
408 struct ib_mr *ib_mr = &fence->mr->ib_mr;
409 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
410
411 memset(wqe, 0, sizeof(*wqe));
412 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
413 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
414 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
415 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
416 wqe->bind.zero_based = false;
417 wqe->bind.parent_l_key = ib_mr->lkey;
418 wqe->bind.va = (u64)(unsigned long)fence->va;
419 wqe->bind.length = fence->size;
420 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
421 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
422
423 /* Save the initial rkey in fence structure for now;
424 * wqe->bind.r_key will be set at (re)bind time.
425 */
426 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
427}
428
429static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
430{
431 struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
432 qplib_qp);
433 struct ib_pd *ib_pd = qp->ib_qp.pd;
434 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
435 struct bnxt_re_fence_data *fence = &pd->fence;
436 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
437 struct bnxt_qplib_swqe wqe;
438 int rc;
439
440 memcpy(&wqe, fence_wqe, sizeof(wqe));
441 wqe.bind.r_key = fence->bind_rkey;
442 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
443
444 dev_dbg(rdev_to_dev(qp->rdev),
445 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
446 wqe.bind.r_key, qp->qplib_qp.id, pd);
447 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
448 if (rc) {
449 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
450 return rc;
451 }
452 bnxt_qplib_post_send_db(&qp->qplib_qp);
453
454 return rc;
455}
456
457static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
458{
459 struct bnxt_re_fence_data *fence = &pd->fence;
460 struct bnxt_re_dev *rdev = pd->rdev;
461 struct device *dev = &rdev->en_dev->pdev->dev;
462 struct bnxt_re_mr *mr = fence->mr;
463
464 if (fence->mw) {
465 bnxt_re_dealloc_mw(fence->mw);
466 fence->mw = NULL;
467 }
468 if (mr) {
469 if (mr->ib_mr.rkey)
470 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
471 true);
472 if (mr->ib_mr.lkey)
473 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
474 kfree(mr);
475 fence->mr = NULL;
476 }
477 if (fence->dma_addr) {
478 dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
479 DMA_BIDIRECTIONAL);
480 fence->dma_addr = 0;
481 }
482}
483
484static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
485{
486 int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
487 struct bnxt_re_fence_data *fence = &pd->fence;
488 struct bnxt_re_dev *rdev = pd->rdev;
489 struct device *dev = &rdev->en_dev->pdev->dev;
490 struct bnxt_re_mr *mr = NULL;
491 dma_addr_t dma_addr = 0;
492 struct ib_mw *mw;
493 u64 pbl_tbl;
494 int rc;
495
496 dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
497 DMA_BIDIRECTIONAL);
498 rc = dma_mapping_error(dev, dma_addr);
499 if (rc) {
500 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
501 rc = -EIO;
502 fence->dma_addr = 0;
503 goto fail;
504 }
505 fence->dma_addr = dma_addr;
506
507 /* Allocate a MR */
508 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
509 if (!mr) {
510 rc = -ENOMEM;
511 goto fail;
512 }
513 fence->mr = mr;
514 mr->rdev = rdev;
515 mr->qplib_mr.pd = &pd->qplib_pd;
516 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
517 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
518 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
519 if (rc) {
520 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
521 goto fail;
522 }
523
524 /* Register MR */
525 mr->ib_mr.lkey = mr->qplib_mr.lkey;
526 mr->qplib_mr.va = (u64)(unsigned long)fence->va;
527 mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
528 pbl_tbl = dma_addr;
529 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
530 BNXT_RE_FENCE_PBL_SIZE, false, PAGE_SIZE);
531 if (rc) {
532 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
533 goto fail;
534 }
535 mr->ib_mr.rkey = mr->qplib_mr.rkey;
536
537 /* Create a fence MW only for kernel consumers */
538 mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
539 if (IS_ERR(mw)) {
540 dev_err(rdev_to_dev(rdev),
541 "Failed to create fence-MW for PD: %p\n", pd);
542 rc = PTR_ERR(mw);
543 goto fail;
544 }
545 fence->mw = mw;
546
547 bnxt_re_create_fence_wqe(pd);
548 return 0;
549
550fail:
551 bnxt_re_destroy_fence_mr(pd);
552 return rc;
553}
554
555/* Protection Domains */
556void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
557{
558 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
559 struct bnxt_re_dev *rdev = pd->rdev;
560
561 bnxt_re_destroy_fence_mr(pd);
562
563 if (pd->qplib_pd.id)
564 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
565 &pd->qplib_pd);
566}
567
568int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
569{
570 struct ib_device *ibdev = ibpd->device;
571 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
572 struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
573 udata, struct bnxt_re_ucontext, ib_uctx);
574 struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
575 int rc;
576
577 pd->rdev = rdev;
578 if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
579 dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
580 rc = -ENOMEM;
581 goto fail;
582 }
583
584 if (udata) {
585 struct bnxt_re_pd_resp resp;
586
587 if (!ucntx->dpi.dbr) {
588 /* Allocate DPI in alloc_pd to avoid failing of
589 * ibv_devinfo and family of application when DPIs
590 * are depleted.
591 */
592 if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
593 &ucntx->dpi, ucntx)) {
594 rc = -ENOMEM;
595 goto dbfail;
596 }
597 }
598
599 resp.pdid = pd->qplib_pd.id;
600 /* Still allow mapping this DBR to the new user PD. */
601 resp.dpi = ucntx->dpi.dpi;
602 resp.dbr = (u64)ucntx->dpi.umdbr;
603
604 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
605 if (rc) {
606 dev_err(rdev_to_dev(rdev),
607 "Failed to copy user response\n");
608 goto dbfail;
609 }
610 }
611
612 if (!udata)
613 if (bnxt_re_create_fence_mr(pd))
614 dev_warn(rdev_to_dev(rdev),
615 "Failed to create Fence-MR\n");
616 return 0;
617dbfail:
618 bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
619 &pd->qplib_pd);
620fail:
621 return rc;
622}
623
624/* Address Handles */
625void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
626{
627 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
628 struct bnxt_re_dev *rdev = ah->rdev;
629
630 bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
631 !(flags & RDMA_DESTROY_AH_SLEEPABLE));
632}
633
634static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
635{
636 u8 nw_type;
637
638 switch (ntype) {
639 case RDMA_NETWORK_IPV4:
640 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
641 break;
642 case RDMA_NETWORK_IPV6:
643 nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
644 break;
645 default:
646 nw_type = CMDQ_CREATE_AH_TYPE_V1;
647 break;
648 }
649 return nw_type;
650}
651
652int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr,
653 u32 flags, struct ib_udata *udata)
654{
655 struct ib_pd *ib_pd = ib_ah->pd;
656 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
657 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
658 struct bnxt_re_dev *rdev = pd->rdev;
659 const struct ib_gid_attr *sgid_attr;
660 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
661 u8 nw_type;
662 int rc;
663
664 if (!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) {
665 dev_err(rdev_to_dev(rdev), "Failed to alloc AH: GRH not set");
666 return -EINVAL;
667 }
668
669 ah->rdev = rdev;
670 ah->qplib_ah.pd = &pd->qplib_pd;
671
672 /* Supply the configuration for the HW */
673 memcpy(ah->qplib_ah.dgid.data, grh->dgid.raw,
674 sizeof(union ib_gid));
675 /*
676 * If RoCE V2 is enabled, stack will have two entries for
677 * each GID entry. Avoiding this duplicte entry in HW. Dividing
678 * the GID index by 2 for RoCE V2
679 */
680 ah->qplib_ah.sgid_index = grh->sgid_index / 2;
681 ah->qplib_ah.host_sgid_index = grh->sgid_index;
682 ah->qplib_ah.traffic_class = grh->traffic_class;
683 ah->qplib_ah.flow_label = grh->flow_label;
684 ah->qplib_ah.hop_limit = grh->hop_limit;
685 ah->qplib_ah.sl = rdma_ah_get_sl(ah_attr);
686
687 sgid_attr = grh->sgid_attr;
688 /* Get network header type for this GID */
689 nw_type = rdma_gid_attr_network_type(sgid_attr);
690 ah->qplib_ah.nw_type = bnxt_re_stack_to_dev_nw_type(nw_type);
691
692 memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
693 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah,
694 !(flags & RDMA_CREATE_AH_SLEEPABLE));
695 if (rc) {
696 dev_err(rdev_to_dev(rdev), "Failed to allocate HW AH");
697 return rc;
698 }
699
700 /* Write AVID to shared page. */
701 if (udata) {
702 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
703 udata, struct bnxt_re_ucontext, ib_uctx);
704 unsigned long flag;
705 u32 *wrptr;
706
707 spin_lock_irqsave(&uctx->sh_lock, flag);
708 wrptr = (u32 *)(uctx->shpg + BNXT_RE_AVID_OFFT);
709 *wrptr = ah->qplib_ah.id;
710 wmb(); /* make sure cache is updated. */
711 spin_unlock_irqrestore(&uctx->sh_lock, flag);
712 }
713
714 return 0;
715}
716
717int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
718{
719 return 0;
720}
721
722int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
723{
724 struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
725
726 ah_attr->type = ib_ah->type;
727 rdma_ah_set_sl(ah_attr, ah->qplib_ah.sl);
728 memcpy(ah_attr->roce.dmac, ah->qplib_ah.dmac, ETH_ALEN);
729 rdma_ah_set_grh(ah_attr, NULL, 0,
730 ah->qplib_ah.host_sgid_index,
731 0, ah->qplib_ah.traffic_class);
732 rdma_ah_set_dgid_raw(ah_attr, ah->qplib_ah.dgid.data);
733 rdma_ah_set_port_num(ah_attr, 1);
734 rdma_ah_set_static_rate(ah_attr, 0);
735 return 0;
736}
737
738unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
739 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
740{
741 unsigned long flags;
742
743 spin_lock_irqsave(&qp->scq->cq_lock, flags);
744 if (qp->rcq != qp->scq)
745 spin_lock(&qp->rcq->cq_lock);
746 else
747 __acquire(&qp->rcq->cq_lock);
748
749 return flags;
750}
751
752void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
753 unsigned long flags)
754 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
755{
756 if (qp->rcq != qp->scq)
757 spin_unlock(&qp->rcq->cq_lock);
758 else
759 __release(&qp->rcq->cq_lock);
760 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
761}
762
763/* Queue Pairs */
764int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
765{
766 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
767 struct bnxt_re_dev *rdev = qp->rdev;
768 unsigned int flags;
769 int rc;
770
771 bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
772 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
773 if (rc) {
774 dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
775 return rc;
776 }
777
778 if (rdma_is_kernel_res(&qp->ib_qp.res)) {
779 flags = bnxt_re_lock_cqs(qp);
780 bnxt_qplib_clean_qp(&qp->qplib_qp);
781 bnxt_re_unlock_cqs(qp, flags);
782 }
783
784 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
785
786 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
787 bnxt_qplib_destroy_ah(&rdev->qplib_res, &rdev->sqp_ah->qplib_ah,
788 false);
789
790 bnxt_qplib_clean_qp(&qp->qplib_qp);
791 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
792 &rdev->qp1_sqp->qplib_qp);
793 if (rc) {
794 dev_err(rdev_to_dev(rdev),
795 "Failed to destroy Shadow QP");
796 return rc;
797 }
798 bnxt_qplib_free_qp_res(&rdev->qplib_res,
799 &rdev->qp1_sqp->qplib_qp);
800 mutex_lock(&rdev->qp_lock);
801 list_del(&rdev->qp1_sqp->list);
802 atomic_dec(&rdev->qp_count);
803 mutex_unlock(&rdev->qp_lock);
804
805 kfree(rdev->sqp_ah);
806 kfree(rdev->qp1_sqp);
807 rdev->qp1_sqp = NULL;
808 rdev->sqp_ah = NULL;
809 }
810
811 ib_umem_release(qp->rumem);
812 ib_umem_release(qp->sumem);
813
814 mutex_lock(&rdev->qp_lock);
815 list_del(&qp->list);
816 atomic_dec(&rdev->qp_count);
817 mutex_unlock(&rdev->qp_lock);
818 kfree(qp);
819 return 0;
820}
821
822static u8 __from_ib_qp_type(enum ib_qp_type type)
823{
824 switch (type) {
825 case IB_QPT_GSI:
826 return CMDQ_CREATE_QP1_TYPE_GSI;
827 case IB_QPT_RC:
828 return CMDQ_CREATE_QP_TYPE_RC;
829 case IB_QPT_UD:
830 return CMDQ_CREATE_QP_TYPE_UD;
831 default:
832 return IB_QPT_MAX;
833 }
834}
835
836static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
837 struct bnxt_re_qp *qp, struct ib_udata *udata)
838{
839 struct bnxt_re_qp_req ureq;
840 struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp;
841 struct ib_umem *umem;
842 int bytes = 0, psn_sz;
843 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
844 udata, struct bnxt_re_ucontext, ib_uctx);
845
846 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
847 return -EFAULT;
848
849 bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
850 /* Consider mapping PSN search memory only for RC QPs. */
851 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
852 psn_sz = bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx) ?
853 sizeof(struct sq_psn_search_ext) :
854 sizeof(struct sq_psn_search);
855 bytes += (qplib_qp->sq.max_wqe * psn_sz);
856 }
857 bytes = PAGE_ALIGN(bytes);
858 umem = ib_umem_get(udata, ureq.qpsva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
859 if (IS_ERR(umem))
860 return PTR_ERR(umem);
861
862 qp->sumem = umem;
863 qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl;
864 qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem);
865 qplib_qp->sq.sg_info.nmap = umem->nmap;
866 qplib_qp->qp_handle = ureq.qp_handle;
867
868 if (!qp->qplib_qp.srq) {
869 bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
870 bytes = PAGE_ALIGN(bytes);
871 umem = ib_umem_get(udata, ureq.qprva, bytes,
872 IB_ACCESS_LOCAL_WRITE, 1);
873 if (IS_ERR(umem))
874 goto rqfail;
875 qp->rumem = umem;
876 qplib_qp->rq.sg_info.sglist = umem->sg_head.sgl;
877 qplib_qp->rq.sg_info.npages = ib_umem_num_pages(umem);
878 qplib_qp->rq.sg_info.nmap = umem->nmap;
879 }
880
881 qplib_qp->dpi = &cntx->dpi;
882 return 0;
883rqfail:
884 ib_umem_release(qp->sumem);
885 qp->sumem = NULL;
886 memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info));
887
888 return PTR_ERR(umem);
889}
890
891static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
892 (struct bnxt_re_pd *pd,
893 struct bnxt_qplib_res *qp1_res,
894 struct bnxt_qplib_qp *qp1_qp)
895{
896 struct bnxt_re_dev *rdev = pd->rdev;
897 struct bnxt_re_ah *ah;
898 union ib_gid sgid;
899 int rc;
900
901 ah = kzalloc(sizeof(*ah), GFP_KERNEL);
902 if (!ah)
903 return NULL;
904
905 ah->rdev = rdev;
906 ah->qplib_ah.pd = &pd->qplib_pd;
907
908 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
909 if (rc)
910 goto fail;
911
912 /* supply the dgid data same as sgid */
913 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
914 sizeof(union ib_gid));
915 ah->qplib_ah.sgid_index = 0;
916
917 ah->qplib_ah.traffic_class = 0;
918 ah->qplib_ah.flow_label = 0;
919 ah->qplib_ah.hop_limit = 1;
920 ah->qplib_ah.sl = 0;
921 /* Have DMAC same as SMAC */
922 ether_addr_copy(ah->qplib_ah.dmac, rdev->netdev->dev_addr);
923
924 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, false);
925 if (rc) {
926 dev_err(rdev_to_dev(rdev),
927 "Failed to allocate HW AH for Shadow QP");
928 goto fail;
929 }
930
931 return ah;
932
933fail:
934 kfree(ah);
935 return NULL;
936}
937
938static struct bnxt_re_qp *bnxt_re_create_shadow_qp
939 (struct bnxt_re_pd *pd,
940 struct bnxt_qplib_res *qp1_res,
941 struct bnxt_qplib_qp *qp1_qp)
942{
943 struct bnxt_re_dev *rdev = pd->rdev;
944 struct bnxt_re_qp *qp;
945 int rc;
946
947 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
948 if (!qp)
949 return NULL;
950
951 qp->rdev = rdev;
952
953 /* Initialize the shadow QP structure from the QP1 values */
954 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
955
956 qp->qplib_qp.pd = &pd->qplib_pd;
957 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
958 qp->qplib_qp.type = IB_QPT_UD;
959
960 qp->qplib_qp.max_inline_data = 0;
961 qp->qplib_qp.sig_type = true;
962
963 /* Shadow QP SQ depth should be same as QP1 RQ depth */
964 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
965 qp->qplib_qp.sq.max_sge = 2;
966 /* Q full delta can be 1 since it is internal QP */
967 qp->qplib_qp.sq.q_full_delta = 1;
968
969 qp->qplib_qp.scq = qp1_qp->scq;
970 qp->qplib_qp.rcq = qp1_qp->rcq;
971
972 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
973 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
974 /* Q full delta can be 1 since it is internal QP */
975 qp->qplib_qp.rq.q_full_delta = 1;
976
977 qp->qplib_qp.mtu = qp1_qp->mtu;
978
979 qp->qplib_qp.sq_hdr_buf_size = 0;
980 qp->qplib_qp.rq_hdr_buf_size = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
981 qp->qplib_qp.dpi = &rdev->dpi_privileged;
982
983 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
984 if (rc)
985 goto fail;
986
987 rdev->sqp_id = qp->qplib_qp.id;
988
989 spin_lock_init(&qp->sq_lock);
990 INIT_LIST_HEAD(&qp->list);
991 mutex_lock(&rdev->qp_lock);
992 list_add_tail(&qp->list, &rdev->qp_list);
993 atomic_inc(&rdev->qp_count);
994 mutex_unlock(&rdev->qp_lock);
995 return qp;
996fail:
997 kfree(qp);
998 return NULL;
999}
1000
1001struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
1002 struct ib_qp_init_attr *qp_init_attr,
1003 struct ib_udata *udata)
1004{
1005 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1006 struct bnxt_re_dev *rdev = pd->rdev;
1007 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1008 struct bnxt_re_qp *qp;
1009 struct bnxt_re_cq *cq;
1010 struct bnxt_re_srq *srq;
1011 int rc, entries;
1012
1013 if ((qp_init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
1014 (qp_init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
1015 (qp_init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
1016 (qp_init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
1017 (qp_init_attr->cap.max_inline_data > dev_attr->max_inline_data))
1018 return ERR_PTR(-EINVAL);
1019
1020 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1021 if (!qp)
1022 return ERR_PTR(-ENOMEM);
1023
1024 qp->rdev = rdev;
1025 ether_addr_copy(qp->qplib_qp.smac, rdev->netdev->dev_addr);
1026 qp->qplib_qp.pd = &pd->qplib_pd;
1027 qp->qplib_qp.qp_handle = (u64)(unsigned long)(&qp->qplib_qp);
1028 qp->qplib_qp.type = __from_ib_qp_type(qp_init_attr->qp_type);
1029
1030 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1031 bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))
1032 qp->qplib_qp.type = CMDQ_CREATE_QP_TYPE_GSI;
1033 if (qp->qplib_qp.type == IB_QPT_MAX) {
1034 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported",
1035 qp->qplib_qp.type);
1036 rc = -EINVAL;
1037 goto fail;
1038 }
1039
1040 qp->qplib_qp.max_inline_data = qp_init_attr->cap.max_inline_data;
1041 qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
1042 IB_SIGNAL_ALL_WR) ? true : false);
1043
1044 qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
1045 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1046 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1047
1048 if (qp_init_attr->send_cq) {
1049 cq = container_of(qp_init_attr->send_cq, struct bnxt_re_cq,
1050 ib_cq);
1051 if (!cq) {
1052 dev_err(rdev_to_dev(rdev), "Send CQ not found");
1053 rc = -EINVAL;
1054 goto fail;
1055 }
1056 qp->qplib_qp.scq = &cq->qplib_cq;
1057 qp->scq = cq;
1058 }
1059
1060 if (qp_init_attr->recv_cq) {
1061 cq = container_of(qp_init_attr->recv_cq, struct bnxt_re_cq,
1062 ib_cq);
1063 if (!cq) {
1064 dev_err(rdev_to_dev(rdev), "Receive CQ not found");
1065 rc = -EINVAL;
1066 goto fail;
1067 }
1068 qp->qplib_qp.rcq = &cq->qplib_cq;
1069 qp->rcq = cq;
1070 }
1071
1072 if (qp_init_attr->srq) {
1073 srq = container_of(qp_init_attr->srq, struct bnxt_re_srq,
1074 ib_srq);
1075 if (!srq) {
1076 dev_err(rdev_to_dev(rdev), "SRQ not found");
1077 rc = -EINVAL;
1078 goto fail;
1079 }
1080 qp->qplib_qp.srq = &srq->qplib_srq;
1081 qp->qplib_qp.rq.max_wqe = 0;
1082 } else {
1083 /* Allocate 1 more than what's provided so posting max doesn't
1084 * mean empty
1085 */
1086 entries = roundup_pow_of_two(qp_init_attr->cap.max_recv_wr + 1);
1087 qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
1088 dev_attr->max_qp_wqes + 1);
1089
1090 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1091 qp_init_attr->cap.max_recv_wr;
1092
1093 qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
1094 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1095 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1096 }
1097
1098 qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1099
1100 if (qp_init_attr->qp_type == IB_QPT_GSI &&
1101 !(bnxt_qplib_is_chip_gen_p5(&rdev->chip_ctx))) {
1102 /* Allocate 1 more than what's provided */
1103 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
1104 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1105 dev_attr->max_qp_wqes + 1);
1106 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1107 qp_init_attr->cap.max_send_wr;
1108 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1109 if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
1110 qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
1111 qp->qplib_qp.sq.max_sge++;
1112 if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
1113 qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
1114
1115 qp->qplib_qp.rq_hdr_buf_size =
1116 BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
1117
1118 qp->qplib_qp.sq_hdr_buf_size =
1119 BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2;
1120 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1121 rc = bnxt_qplib_create_qp1(&rdev->qplib_res, &qp->qplib_qp);
1122 if (rc) {
1123 dev_err(rdev_to_dev(rdev), "Failed to create HW QP1");
1124 goto fail;
1125 }
1126 /* Create a shadow QP to handle the QP1 traffic */
1127 rdev->qp1_sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res,
1128 &qp->qplib_qp);
1129 if (!rdev->qp1_sqp) {
1130 rc = -EINVAL;
1131 dev_err(rdev_to_dev(rdev),
1132 "Failed to create Shadow QP for QP1");
1133 goto qp_destroy;
1134 }
1135 rdev->sqp_ah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1136 &qp->qplib_qp);
1137 if (!rdev->sqp_ah) {
1138 bnxt_qplib_destroy_qp(&rdev->qplib_res,
1139 &rdev->qp1_sqp->qplib_qp);
1140 rc = -EINVAL;
1141 dev_err(rdev_to_dev(rdev),
1142 "Failed to create AH entry for ShadowQP");
1143 goto qp_destroy;
1144 }
1145
1146 } else {
1147 /* Allocate 128 + 1 more than what's provided */
1148 entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
1149 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1150 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1151 dev_attr->max_qp_wqes +
1152 BNXT_QPLIB_RESERVED_QP_WRS + 1);
1153 qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
1154
1155 /*
1156 * Reserving one slot for Phantom WQE. Application can
1157 * post one extra entry in this case. But allowing this to avoid
1158 * unexpected Queue full condition
1159 */
1160
1161 qp->qplib_qp.sq.q_full_delta -= 1;
1162
1163 qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
1164 qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
1165 if (udata) {
1166 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
1167 if (rc)
1168 goto fail;
1169 } else {
1170 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1171 }
1172
1173 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
1174 if (rc) {
1175 dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
1176 goto free_umem;
1177 }
1178 }
1179
1180 qp->ib_qp.qp_num = qp->qplib_qp.id;
1181 spin_lock_init(&qp->sq_lock);
1182 spin_lock_init(&qp->rq_lock);
1183
1184 if (udata) {
1185 struct bnxt_re_qp_resp resp;
1186
1187 resp.qpid = qp->ib_qp.qp_num;
1188 resp.rsvd = 0;
1189 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1190 if (rc) {
1191 dev_err(rdev_to_dev(rdev), "Failed to copy QP udata");
1192 goto qp_destroy;
1193 }
1194 }
1195 INIT_LIST_HEAD(&qp->list);
1196 mutex_lock(&rdev->qp_lock);
1197 list_add_tail(&qp->list, &rdev->qp_list);
1198 atomic_inc(&rdev->qp_count);
1199 mutex_unlock(&rdev->qp_lock);
1200
1201 return &qp->ib_qp;
1202qp_destroy:
1203 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1204free_umem:
1205 ib_umem_release(qp->rumem);
1206 ib_umem_release(qp->sumem);
1207fail:
1208 kfree(qp);
1209 return ERR_PTR(rc);
1210}
1211
1212static u8 __from_ib_qp_state(enum ib_qp_state state)
1213{
1214 switch (state) {
1215 case IB_QPS_RESET:
1216 return CMDQ_MODIFY_QP_NEW_STATE_RESET;
1217 case IB_QPS_INIT:
1218 return CMDQ_MODIFY_QP_NEW_STATE_INIT;
1219 case IB_QPS_RTR:
1220 return CMDQ_MODIFY_QP_NEW_STATE_RTR;
1221 case IB_QPS_RTS:
1222 return CMDQ_MODIFY_QP_NEW_STATE_RTS;
1223 case IB_QPS_SQD:
1224 return CMDQ_MODIFY_QP_NEW_STATE_SQD;
1225 case IB_QPS_SQE:
1226 return CMDQ_MODIFY_QP_NEW_STATE_SQE;
1227 case IB_QPS_ERR:
1228 default:
1229 return CMDQ_MODIFY_QP_NEW_STATE_ERR;
1230 }
1231}
1232
1233static enum ib_qp_state __to_ib_qp_state(u8 state)
1234{
1235 switch (state) {
1236 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1237 return IB_QPS_RESET;
1238 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1239 return IB_QPS_INIT;
1240 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1241 return IB_QPS_RTR;
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1243 return IB_QPS_RTS;
1244 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1245 return IB_QPS_SQD;
1246 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1247 return IB_QPS_SQE;
1248 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1249 default:
1250 return IB_QPS_ERR;
1251 }
1252}
1253
1254static u32 __from_ib_mtu(enum ib_mtu mtu)
1255{
1256 switch (mtu) {
1257 case IB_MTU_256:
1258 return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
1259 case IB_MTU_512:
1260 return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
1261 case IB_MTU_1024:
1262 return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
1263 case IB_MTU_2048:
1264 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1265 case IB_MTU_4096:
1266 return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
1267 default:
1268 return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1269 }
1270}
1271
1272static enum ib_mtu __to_ib_mtu(u32 mtu)
1273{
1274 switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
1275 case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
1276 return IB_MTU_256;
1277 case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
1278 return IB_MTU_512;
1279 case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
1280 return IB_MTU_1024;
1281 case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
1282 return IB_MTU_2048;
1283 case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
1284 return IB_MTU_4096;
1285 default:
1286 return IB_MTU_2048;
1287 }
1288}
1289
1290/* Shared Receive Queues */
1291void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
1292{
1293 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1294 ib_srq);
1295 struct bnxt_re_dev *rdev = srq->rdev;
1296 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1297 struct bnxt_qplib_nq *nq = NULL;
1298
1299 if (qplib_srq->cq)
1300 nq = qplib_srq->cq->nq;
1301 bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1302 ib_umem_release(srq->umem);
1303 atomic_dec(&rdev->srq_count);
1304 if (nq)
1305 nq->budget--;
1306}
1307
1308static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1309 struct bnxt_re_pd *pd,
1310 struct bnxt_re_srq *srq,
1311 struct ib_udata *udata)
1312{
1313 struct bnxt_re_srq_req ureq;
1314 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1315 struct ib_umem *umem;
1316 int bytes = 0;
1317 struct bnxt_re_ucontext *cntx = rdma_udata_to_drv_context(
1318 udata, struct bnxt_re_ucontext, ib_uctx);
1319
1320 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1321 return -EFAULT;
1322
1323 bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1324 bytes = PAGE_ALIGN(bytes);
1325 umem = ib_umem_get(udata, ureq.srqva, bytes, IB_ACCESS_LOCAL_WRITE, 1);
1326 if (IS_ERR(umem))
1327 return PTR_ERR(umem);
1328
1329 srq->umem = umem;
1330 qplib_srq->sg_info.sglist = umem->sg_head.sgl;
1331 qplib_srq->sg_info.npages = ib_umem_num_pages(umem);
1332 qplib_srq->sg_info.nmap = umem->nmap;
1333 qplib_srq->srq_handle = ureq.srq_handle;
1334 qplib_srq->dpi = &cntx->dpi;
1335
1336 return 0;
1337}
1338
1339int bnxt_re_create_srq(struct ib_srq *ib_srq,
1340 struct ib_srq_init_attr *srq_init_attr,
1341 struct ib_udata *udata)
1342{
1343 struct ib_pd *ib_pd = ib_srq->pd;
1344 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
1345 struct bnxt_re_dev *rdev = pd->rdev;
1346 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1347 struct bnxt_re_srq *srq =
1348 container_of(ib_srq, struct bnxt_re_srq, ib_srq);
1349 struct bnxt_qplib_nq *nq = NULL;
1350 int rc, entries;
1351
1352 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1353 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded");
1354 rc = -EINVAL;
1355 goto exit;
1356 }
1357
1358 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1359 rc = -EOPNOTSUPP;
1360 goto exit;
1361 }
1362
1363 srq->rdev = rdev;
1364 srq->qplib_srq.pd = &pd->qplib_pd;
1365 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1366 /* Allocate 1 more than what's provided so posting max doesn't
1367 * mean empty
1368 */
1369 entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
1370 if (entries > dev_attr->max_srq_wqes + 1)
1371 entries = dev_attr->max_srq_wqes + 1;
1372
1373 srq->qplib_srq.max_wqe = entries;
1374 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1375 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1376 srq->srq_limit = srq_init_attr->attr.srq_limit;
1377 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id;
1378 nq = &rdev->nq[0];
1379
1380 if (udata) {
1381 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1382 if (rc)
1383 goto fail;
1384 }
1385
1386 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1387 if (rc) {
1388 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!");
1389 goto fail;
1390 }
1391
1392 if (udata) {
1393 struct bnxt_re_srq_resp resp;
1394
1395 resp.srqid = srq->qplib_srq.id;
1396 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
1397 if (rc) {
1398 dev_err(rdev_to_dev(rdev), "SRQ copy to udata failed!");
1399 bnxt_qplib_destroy_srq(&rdev->qplib_res,
1400 &srq->qplib_srq);
1401 goto fail;
1402 }
1403 }
1404 if (nq)
1405 nq->budget++;
1406 atomic_inc(&rdev->srq_count);
1407
1408 return 0;
1409
1410fail:
1411 ib_umem_release(srq->umem);
1412exit:
1413 return rc;
1414}
1415
1416int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
1417 enum ib_srq_attr_mask srq_attr_mask,
1418 struct ib_udata *udata)
1419{
1420 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1421 ib_srq);
1422 struct bnxt_re_dev *rdev = srq->rdev;
1423 int rc;
1424
1425 switch (srq_attr_mask) {
1426 case IB_SRQ_MAX_WR:
1427 /* SRQ resize is not supported */
1428 break;
1429 case IB_SRQ_LIMIT:
1430 /* Change the SRQ threshold */
1431 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1432 return -EINVAL;
1433
1434 srq->qplib_srq.threshold = srq_attr->srq_limit;
1435 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1436 if (rc) {
1437 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!");
1438 return rc;
1439 }
1440 /* On success, update the shadow */
1441 srq->srq_limit = srq_attr->srq_limit;
1442 /* No need to Build and send response back to udata */
1443 break;
1444 default:
1445 dev_err(rdev_to_dev(rdev),
1446 "Unsupported srq_attr_mask 0x%x", srq_attr_mask);
1447 return -EINVAL;
1448 }
1449 return 0;
1450}
1451
1452int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
1453{
1454 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1455 ib_srq);
1456 struct bnxt_re_srq tsrq;
1457 struct bnxt_re_dev *rdev = srq->rdev;
1458 int rc;
1459
1460 /* Get live SRQ attr */
1461 tsrq.qplib_srq.id = srq->qplib_srq.id;
1462 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &tsrq.qplib_srq);
1463 if (rc) {
1464 dev_err(rdev_to_dev(rdev), "Query HW SRQ failed!");
1465 return rc;
1466 }
1467 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1468 srq_attr->max_sge = srq->qplib_srq.max_sge;
1469 srq_attr->srq_limit = tsrq.qplib_srq.threshold;
1470
1471 return 0;
1472}
1473
1474int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
1475 const struct ib_recv_wr **bad_wr)
1476{
1477 struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
1478 ib_srq);
1479 struct bnxt_qplib_swqe wqe;
1480 unsigned long flags;
1481 int rc = 0;
1482
1483 spin_lock_irqsave(&srq->lock, flags);
1484 while (wr) {
1485 /* Transcribe each ib_recv_wr to qplib_swqe */
1486 wqe.num_sge = wr->num_sge;
1487 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
1488 wqe.wr_id = wr->wr_id;
1489 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
1490
1491 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1492 if (rc) {
1493 *bad_wr = wr;
1494 break;
1495 }
1496 wr = wr->next;
1497 }
1498 spin_unlock_irqrestore(&srq->lock, flags);
1499
1500 return rc;
1501}
1502static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
1503 struct bnxt_re_qp *qp1_qp,
1504 int qp_attr_mask)
1505{
1506 struct bnxt_re_qp *qp = rdev->qp1_sqp;
1507 int rc = 0;
1508
1509 if (qp_attr_mask & IB_QP_STATE) {
1510 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1511 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
1512 }
1513 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1514 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1515 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
1516 }
1517
1518 if (qp_attr_mask & IB_QP_QKEY) {
1519 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1520 /* Using a Random QKEY */
1521 qp->qplib_qp.qkey = 0x81818181;
1522 }
1523 if (qp_attr_mask & IB_QP_SQ_PSN) {
1524 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1525 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
1526 }
1527
1528 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1529 if (rc)
1530 dev_err(rdev_to_dev(rdev),
1531 "Failed to modify Shadow QP for QP1");
1532 return rc;
1533}
1534
1535int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1536 int qp_attr_mask, struct ib_udata *udata)
1537{
1538 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1539 struct bnxt_re_dev *rdev = qp->rdev;
1540 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
1541 enum ib_qp_state curr_qp_state, new_qp_state;
1542 int rc, entries;
1543 unsigned int flags;
1544 u8 nw_type;
1545
1546 qp->qplib_qp.modify_flags = 0;
1547 if (qp_attr_mask & IB_QP_STATE) {
1548 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
1549 new_qp_state = qp_attr->qp_state;
1550 if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
1551 ib_qp->qp_type, qp_attr_mask)) {
1552 dev_err(rdev_to_dev(rdev),
1553 "Invalid attribute mask: %#x specified ",
1554 qp_attr_mask);
1555 dev_err(rdev_to_dev(rdev),
1556 "for qpn: %#x type: %#x",
1557 ib_qp->qp_num, ib_qp->qp_type);
1558 dev_err(rdev_to_dev(rdev),
1559 "curr_qp_state=0x%x, new_qp_state=0x%x\n",
1560 curr_qp_state, new_qp_state);
1561 return -EINVAL;
1562 }
1563 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
1564 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
1565
1566 if (!qp->sumem &&
1567 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1568 dev_dbg(rdev_to_dev(rdev),
1569 "Move QP = %p to flush list\n",
1570 qp);
1571 flags = bnxt_re_lock_cqs(qp);
1572 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
1573 bnxt_re_unlock_cqs(qp, flags);
1574 }
1575 if (!qp->sumem &&
1576 qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
1577 dev_dbg(rdev_to_dev(rdev),
1578 "Move QP = %p out of flush list\n",
1579 qp);
1580 flags = bnxt_re_lock_cqs(qp);
1581 bnxt_qplib_clean_qp(&qp->qplib_qp);
1582 bnxt_re_unlock_cqs(qp, flags);
1583 }
1584 }
1585 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
1586 qp->qplib_qp.modify_flags |=
1587 CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
1588 qp->qplib_qp.en_sqd_async_notify = true;
1589 }
1590 if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
1591 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1592 qp->qplib_qp.access =
1593 __from_ib_access_flags(qp_attr->qp_access_flags);
1594 /* LOCAL_WRITE access must be set to allow RC receive */
1595 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
1596 /* Temp: Set all params on QP as of now */
1597 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
1598 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
1599 }
1600 if (qp_attr_mask & IB_QP_PKEY_INDEX) {
1601 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1602 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
1603 }
1604 if (qp_attr_mask & IB_QP_QKEY) {
1605 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1606 qp->qplib_qp.qkey = qp_attr->qkey;
1607 }
1608 if (qp_attr_mask & IB_QP_AV) {
1609 const struct ib_global_route *grh =
1610 rdma_ah_read_grh(&qp_attr->ah_attr);
1611 const struct ib_gid_attr *sgid_attr;
1612
1613 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1614 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1615 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1616 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1617 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1618 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1619 CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1620 memcpy(qp->qplib_qp.ah.dgid.data, grh->dgid.raw,
1621 sizeof(qp->qplib_qp.ah.dgid.data));
1622 qp->qplib_qp.ah.flow_label = grh->flow_label;
1623 /* If RoCE V2 is enabled, stack will have two entries for
1624 * each GID entry. Avoiding this duplicte entry in HW. Dividing
1625 * the GID index by 2 for RoCE V2
1626 */
1627 qp->qplib_qp.ah.sgid_index = grh->sgid_index / 2;
1628 qp->qplib_qp.ah.host_sgid_index = grh->sgid_index;
1629 qp->qplib_qp.ah.hop_limit = grh->hop_limit;
1630 qp->qplib_qp.ah.traffic_class = grh->traffic_class;
1631 qp->qplib_qp.ah.sl = rdma_ah_get_sl(&qp_attr->ah_attr);
1632 ether_addr_copy(qp->qplib_qp.ah.dmac,
1633 qp_attr->ah_attr.roce.dmac);
1634
1635 sgid_attr = qp_attr->ah_attr.grh.sgid_attr;
1636 rc = rdma_read_gid_l2_fields(sgid_attr, NULL,
1637 &qp->qplib_qp.smac[0]);
1638 if (rc)
1639 return rc;
1640
1641 nw_type = rdma_gid_attr_network_type(sgid_attr);
1642 switch (nw_type) {
1643 case RDMA_NETWORK_IPV4:
1644 qp->qplib_qp.nw_type =
1645 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
1646 break;
1647 case RDMA_NETWORK_IPV6:
1648 qp->qplib_qp.nw_type =
1649 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
1650 break;
1651 default:
1652 qp->qplib_qp.nw_type =
1653 CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
1654 break;
1655 }
1656 }
1657
1658 if (qp_attr_mask & IB_QP_PATH_MTU) {
1659 qp->qplib_qp.modify_flags |=
1660 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1661 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1662 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1663 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1664 qp->qplib_qp.modify_flags |=
1665 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1666 qp->qplib_qp.path_mtu =
1667 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1668 qp->qplib_qp.mtu =
1669 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1670 }
1671
1672 if (qp_attr_mask & IB_QP_TIMEOUT) {
1673 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
1674 qp->qplib_qp.timeout = qp_attr->timeout;
1675 }
1676 if (qp_attr_mask & IB_QP_RETRY_CNT) {
1677 qp->qplib_qp.modify_flags |=
1678 CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
1679 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
1680 }
1681 if (qp_attr_mask & IB_QP_RNR_RETRY) {
1682 qp->qplib_qp.modify_flags |=
1683 CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
1684 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
1685 }
1686 if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
1687 qp->qplib_qp.modify_flags |=
1688 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1689 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
1690 }
1691 if (qp_attr_mask & IB_QP_RQ_PSN) {
1692 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
1693 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
1694 }
1695 if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1696 qp->qplib_qp.modify_flags |=
1697 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
1698 /* Cap the max_rd_atomic to device max */
1699 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
1700 dev_attr->max_qp_rd_atom);
1701 }
1702 if (qp_attr_mask & IB_QP_SQ_PSN) {
1703 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
1704 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
1705 }
1706 if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1707 if (qp_attr->max_dest_rd_atomic >
1708 dev_attr->max_qp_init_rd_atom) {
1709 dev_err(rdev_to_dev(rdev),
1710 "max_dest_rd_atomic requested%d is > dev_max%d",
1711 qp_attr->max_dest_rd_atomic,
1712 dev_attr->max_qp_init_rd_atom);
1713 return -EINVAL;
1714 }
1715
1716 qp->qplib_qp.modify_flags |=
1717 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
1718 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
1719 }
1720 if (qp_attr_mask & IB_QP_CAP) {
1721 qp->qplib_qp.modify_flags |=
1722 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
1723 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
1724 CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
1725 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
1726 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
1727 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
1728 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
1729 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
1730 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
1731 (qp_attr->cap.max_inline_data >=
1732 dev_attr->max_inline_data)) {
1733 dev_err(rdev_to_dev(rdev),
1734 "Create QP failed - max exceeded");
1735 return -EINVAL;
1736 }
1737 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
1738 qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
1739 dev_attr->max_qp_wqes + 1);
1740 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
1741 qp_attr->cap.max_send_wr;
1742 /*
1743 * Reserving one slot for Phantom WQE. Some application can
1744 * post one extra entry in this case. Allowing this to avoid
1745 * unexpected Queue full condition
1746 */
1747 qp->qplib_qp.sq.q_full_delta -= 1;
1748 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
1749 if (qp->qplib_qp.rq.max_wqe) {
1750 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
1751 qp->qplib_qp.rq.max_wqe =
1752 min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1753 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
1754 qp_attr->cap.max_recv_wr;
1755 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
1756 } else {
1757 /* SRQ was used prior, just ignore the RQ caps */
1758 }
1759 }
1760 if (qp_attr_mask & IB_QP_DEST_QPN) {
1761 qp->qplib_qp.modify_flags |=
1762 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
1763 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
1764 }
1765 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
1766 if (rc) {
1767 dev_err(rdev_to_dev(rdev), "Failed to modify HW QP");
1768 return rc;
1769 }
1770 if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp)
1771 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
1772 return rc;
1773}
1774
1775int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1776 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1777{
1778 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1779 struct bnxt_re_dev *rdev = qp->rdev;
1780 struct bnxt_qplib_qp *qplib_qp;
1781 int rc;
1782
1783 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1784 if (!qplib_qp)
1785 return -ENOMEM;
1786
1787 qplib_qp->id = qp->qplib_qp.id;
1788 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1789
1790 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1791 if (rc) {
1792 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1793 goto out;
1794 }
1795 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1796 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1797 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1798 qp_attr->pkey_index = qplib_qp->pkey_index;
1799 qp_attr->qkey = qplib_qp->qkey;
1800 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1801 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1802 qplib_qp->ah.host_sgid_index,
1803 qplib_qp->ah.hop_limit,
1804 qplib_qp->ah.traffic_class);
1805 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1806 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1807 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1808 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1809 qp_attr->timeout = qplib_qp->timeout;
1810 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1811 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1812 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1813 qp_attr->rq_psn = qplib_qp->rq.psn;
1814 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1815 qp_attr->sq_psn = qplib_qp->sq.psn;
1816 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1817 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1818 IB_SIGNAL_REQ_WR;
1819 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1820
1821 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1822 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
1823 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
1824 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
1825 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1826 qp_init_attr->cap = qp_attr->cap;
1827
1828out:
1829 kfree(qplib_qp);
1830 return rc;
1831}
1832
1833/* Routine for sending QP1 packets for RoCE V1 an V2
1834 */
1835static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
1836 const struct ib_send_wr *wr,
1837 struct bnxt_qplib_swqe *wqe,
1838 int payload_size)
1839{
1840 struct bnxt_re_ah *ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah,
1841 ib_ah);
1842 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
1843 const struct ib_gid_attr *sgid_attr = ah->ib_ah.sgid_attr;
1844 struct bnxt_qplib_sge sge;
1845 u8 nw_type;
1846 u16 ether_type;
1847 union ib_gid dgid;
1848 bool is_eth = false;
1849 bool is_vlan = false;
1850 bool is_grh = false;
1851 bool is_udp = false;
1852 u8 ip_version = 0;
1853 u16 vlan_id = 0xFFFF;
1854 void *buf;
1855 int i, rc = 0;
1856
1857 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
1858
1859 rc = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, NULL);
1860 if (rc)
1861 return rc;
1862
1863 /* Get network header type for this GID */
1864 nw_type = rdma_gid_attr_network_type(sgid_attr);
1865 switch (nw_type) {
1866 case RDMA_NETWORK_IPV4:
1867 nw_type = BNXT_RE_ROCEV2_IPV4_PACKET;
1868 break;
1869 case RDMA_NETWORK_IPV6:
1870 nw_type = BNXT_RE_ROCEV2_IPV6_PACKET;
1871 break;
1872 default:
1873 nw_type = BNXT_RE_ROCE_V1_PACKET;
1874 break;
1875 }
1876 memcpy(&dgid.raw, &qplib_ah->dgid, 16);
1877 is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP;
1878 if (is_udp) {
1879 if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) {
1880 ip_version = 4;
1881 ether_type = ETH_P_IP;
1882 } else {
1883 ip_version = 6;
1884 ether_type = ETH_P_IPV6;
1885 }
1886 is_grh = false;
1887 } else {
1888 ether_type = ETH_P_IBOE;
1889 is_grh = true;
1890 }
1891
1892 is_eth = true;
1893 is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
1894
1895 ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
1896 ip_version, is_udp, 0, &qp->qp1_hdr);
1897
1898 /* ETH */
1899 ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->qplib_ah.dmac);
1900 ether_addr_copy(qp->qp1_hdr.eth.smac_h, qp->qplib_qp.smac);
1901
1902 /* For vlan, check the sgid for vlan existence */
1903
1904 if (!is_vlan) {
1905 qp->qp1_hdr.eth.type = cpu_to_be16(ether_type);
1906 } else {
1907 qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type);
1908 qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id);
1909 }
1910
1911 if (is_grh || (ip_version == 6)) {
1912 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw,
1913 sizeof(sgid_attr->gid));
1914 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
1915 sizeof(sgid_attr->gid));
1916 qp->qp1_hdr.grh.hop_limit = qplib_ah->hop_limit;
1917 }
1918
1919 if (ip_version == 4) {
1920 qp->qp1_hdr.ip4.tos = 0;
1921 qp->qp1_hdr.ip4.id = 0;
1922 qp->qp1_hdr.ip4.frag_off = htons(IP_DF);
1923 qp->qp1_hdr.ip4.ttl = qplib_ah->hop_limit;
1924
1925 memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4);
1926 memcpy(&qp->qp1_hdr.ip4.daddr, qplib_ah->dgid.data + 12, 4);
1927 qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr);
1928 }
1929
1930 if (is_udp) {
1931 qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT);
1932 qp->qp1_hdr.udp.sport = htons(0x8CD1);
1933 qp->qp1_hdr.udp.csum = 0;
1934 }
1935
1936 /* BTH */
1937 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
1938 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1939 qp->qp1_hdr.immediate_present = 1;
1940 } else {
1941 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1942 }
1943 if (wr->send_flags & IB_SEND_SOLICITED)
1944 qp->qp1_hdr.bth.solicited_event = 1;
1945 /* pad_count */
1946 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
1947
1948 /* P_key for QP1 is for all members */
1949 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
1950 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
1951 qp->qp1_hdr.bth.ack_req = 0;
1952 qp->send_psn++;
1953 qp->send_psn &= BTH_PSN_MASK;
1954 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
1955 /* DETH */
1956 /* Use the priviledged Q_Key for QP1 */
1957 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
1958 qp->qp1_hdr.deth.source_qpn = IB_QP1;
1959
1960 /* Pack the QP1 to the transmit buffer */
1961 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
1962 if (buf) {
1963 ib_ud_header_pack(&qp->qp1_hdr, buf);
1964 for (i = wqe->num_sge; i; i--) {
1965 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
1966 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
1967 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
1968 }
1969
1970 /*
1971 * Max Header buf size for IPV6 RoCE V2 is 86,
1972 * which is same as the QP1 SQ header buffer.
1973 * Header buf size for IPV4 RoCE V2 can be 66.
1974 * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20).
1975 * Subtract 20 bytes from QP1 SQ header buf size
1976 */
1977 if (is_udp && ip_version == 4)
1978 sge.size -= 20;
1979 /*
1980 * Max Header buf size for RoCE V1 is 78.
1981 * ETH(14) + VLAN(4) + GRH(40) + BTH(20).
1982 * Subtract 8 bytes from QP1 SQ header buf size
1983 */
1984 if (!is_udp)
1985 sge.size -= 8;
1986
1987 /* Subtract 4 bytes for non vlan packets */
1988 if (!is_vlan)
1989 sge.size -= 4;
1990
1991 wqe->sg_list[0].addr = sge.addr;
1992 wqe->sg_list[0].lkey = sge.lkey;
1993 wqe->sg_list[0].size = sge.size;
1994 wqe->num_sge++;
1995
1996 } else {
1997 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!");
1998 rc = -ENOMEM;
1999 }
2000 return rc;
2001}
2002
2003/* For the MAD layer, it only provides the recv SGE the size of
2004 * ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
2005 * nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
2006 * receive packet (334 bytes) with no VLAN and then copy the GRH
2007 * and the MAD datagram out to the provided SGE.
2008 */
2009static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
2010 const struct ib_recv_wr *wr,
2011 struct bnxt_qplib_swqe *wqe,
2012 int payload_size)
2013{
2014 struct bnxt_qplib_sge ref, sge;
2015 u32 rq_prod_index;
2016 struct bnxt_re_sqp_entries *sqp_entry;
2017
2018 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
2019
2020 if (!bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge))
2021 return -ENOMEM;
2022
2023 /* Create 1 SGE to receive the entire
2024 * ethernet packet
2025 */
2026 /* Save the reference from ULP */
2027 ref.addr = wqe->sg_list[0].addr;
2028 ref.lkey = wqe->sg_list[0].lkey;
2029 ref.size = wqe->sg_list[0].size;
2030
2031 sqp_entry = &qp->rdev->sqp_tbl[rq_prod_index];
2032
2033 /* SGE 1 */
2034 wqe->sg_list[0].addr = sge.addr;
2035 wqe->sg_list[0].lkey = sge.lkey;
2036 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
2037 sge.size -= wqe->sg_list[0].size;
2038
2039 sqp_entry->sge.addr = ref.addr;
2040 sqp_entry->sge.lkey = ref.lkey;
2041 sqp_entry->sge.size = ref.size;
2042 /* Store the wrid for reporting completion */
2043 sqp_entry->wrid = wqe->wr_id;
2044 /* change the wqe->wrid to table index */
2045 wqe->wr_id = rq_prod_index;
2046 return 0;
2047}
2048
2049static int is_ud_qp(struct bnxt_re_qp *qp)
2050{
2051 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
2052 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
2053}
2054
2055static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
2056 const struct ib_send_wr *wr,
2057 struct bnxt_qplib_swqe *wqe)
2058{
2059 struct bnxt_re_ah *ah = NULL;
2060
2061 if (is_ud_qp(qp)) {
2062 ah = container_of(ud_wr(wr)->ah, struct bnxt_re_ah, ib_ah);
2063 wqe->send.q_key = ud_wr(wr)->remote_qkey;
2064 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
2065 wqe->send.avid = ah->qplib_ah.id;
2066 }
2067 switch (wr->opcode) {
2068 case IB_WR_SEND:
2069 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
2070 break;
2071 case IB_WR_SEND_WITH_IMM:
2072 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
2073 wqe->send.imm_data = wr->ex.imm_data;
2074 break;
2075 case IB_WR_SEND_WITH_INV:
2076 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
2077 wqe->send.inv_key = wr->ex.invalidate_rkey;
2078 break;
2079 default:
2080 return -EINVAL;
2081 }
2082 if (wr->send_flags & IB_SEND_SIGNALED)
2083 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2084 if (wr->send_flags & IB_SEND_FENCE)
2085 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2086 if (wr->send_flags & IB_SEND_SOLICITED)
2087 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2088 if (wr->send_flags & IB_SEND_INLINE)
2089 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2090
2091 return 0;
2092}
2093
2094static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
2095 struct bnxt_qplib_swqe *wqe)
2096{
2097 switch (wr->opcode) {
2098 case IB_WR_RDMA_WRITE:
2099 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
2100 break;
2101 case IB_WR_RDMA_WRITE_WITH_IMM:
2102 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
2103 wqe->rdma.imm_data = wr->ex.imm_data;
2104 break;
2105 case IB_WR_RDMA_READ:
2106 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
2107 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
2108 break;
2109 default:
2110 return -EINVAL;
2111 }
2112 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
2113 wqe->rdma.r_key = rdma_wr(wr)->rkey;
2114 if (wr->send_flags & IB_SEND_SIGNALED)
2115 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2116 if (wr->send_flags & IB_SEND_FENCE)
2117 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2118 if (wr->send_flags & IB_SEND_SOLICITED)
2119 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2120 if (wr->send_flags & IB_SEND_INLINE)
2121 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
2122
2123 return 0;
2124}
2125
2126static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
2127 struct bnxt_qplib_swqe *wqe)
2128{
2129 switch (wr->opcode) {
2130 case IB_WR_ATOMIC_CMP_AND_SWP:
2131 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
2132 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2133 wqe->atomic.swap_data = atomic_wr(wr)->swap;
2134 break;
2135 case IB_WR_ATOMIC_FETCH_AND_ADD:
2136 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
2137 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
2138 break;
2139 default:
2140 return -EINVAL;
2141 }
2142 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
2143 wqe->atomic.r_key = atomic_wr(wr)->rkey;
2144 if (wr->send_flags & IB_SEND_SIGNALED)
2145 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2146 if (wr->send_flags & IB_SEND_FENCE)
2147 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2148 if (wr->send_flags & IB_SEND_SOLICITED)
2149 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2150 return 0;
2151}
2152
2153static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
2154 struct bnxt_qplib_swqe *wqe)
2155{
2156 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
2157 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
2158
2159 /* Need unconditional fence for local invalidate
2160 * opcode to work as expected.
2161 */
2162 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2163
2164 if (wr->send_flags & IB_SEND_SIGNALED)
2165 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2166 if (wr->send_flags & IB_SEND_SOLICITED)
2167 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
2168
2169 return 0;
2170}
2171
2172static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
2173 struct bnxt_qplib_swqe *wqe)
2174{
2175 struct bnxt_re_mr *mr = container_of(wr->mr, struct bnxt_re_mr, ib_mr);
2176 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
2177 int access = wr->access;
2178
2179 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
2180 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
2181 wqe->frmr.page_list = mr->pages;
2182 wqe->frmr.page_list_len = mr->npages;
2183 wqe->frmr.levels = qplib_frpl->hwq.level + 1;
2184 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
2185
2186 /* Need unconditional fence for reg_mr
2187 * opcode to function as expected.
2188 */
2189
2190 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
2191
2192 if (wr->wr.send_flags & IB_SEND_SIGNALED)
2193 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
2194
2195 if (access & IB_ACCESS_LOCAL_WRITE)
2196 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2197 if (access & IB_ACCESS_REMOTE_READ)
2198 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
2199 if (access & IB_ACCESS_REMOTE_WRITE)
2200 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
2201 if (access & IB_ACCESS_REMOTE_ATOMIC)
2202 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
2203 if (access & IB_ACCESS_MW_BIND)
2204 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
2205
2206 wqe->frmr.l_key = wr->key;
2207 wqe->frmr.length = wr->mr->length;
2208 wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
2209 wqe->frmr.va = wr->mr->iova;
2210 return 0;
2211}
2212
2213static int bnxt_re_copy_inline_data(struct bnxt_re_dev *rdev,
2214 const struct ib_send_wr *wr,
2215 struct bnxt_qplib_swqe *wqe)
2216{
2217 /* Copy the inline data to the data field */
2218 u8 *in_data;
2219 u32 i, sge_len;
2220 void *sge_addr;
2221
2222 in_data = wqe->inline_data;
2223 for (i = 0; i < wr->num_sge; i++) {
2224 sge_addr = (void *)(unsigned long)
2225 wr->sg_list[i].addr;
2226 sge_len = wr->sg_list[i].length;
2227
2228 if ((sge_len + wqe->inline_len) >
2229 BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
2230 dev_err(rdev_to_dev(rdev),
2231 "Inline data size requested > supported value");
2232 return -EINVAL;
2233 }
2234 sge_len = wr->sg_list[i].length;
2235
2236 memcpy(in_data, sge_addr, sge_len);
2237 in_data += wr->sg_list[i].length;
2238 wqe->inline_len += wr->sg_list[i].length;
2239 }
2240 return wqe->inline_len;
2241}
2242
2243static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
2244 const struct ib_send_wr *wr,
2245 struct bnxt_qplib_swqe *wqe)
2246{
2247 int payload_sz = 0;
2248
2249 if (wr->send_flags & IB_SEND_INLINE)
2250 payload_sz = bnxt_re_copy_inline_data(rdev, wr, wqe);
2251 else
2252 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe->sg_list,
2253 wqe->num_sge);
2254
2255 return payload_sz;
2256}
2257
2258static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
2259{
2260 if ((qp->ib_qp.qp_type == IB_QPT_UD ||
2261 qp->ib_qp.qp_type == IB_QPT_GSI ||
2262 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
2263 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
2264 int qp_attr_mask;
2265 struct ib_qp_attr qp_attr;
2266
2267 qp_attr_mask = IB_QP_STATE;
2268 qp_attr.qp_state = IB_QPS_RTS;
2269 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
2270 qp->qplib_qp.wqe_cnt = 0;
2271 }
2272}
2273
2274static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
2275 struct bnxt_re_qp *qp,
2276 const struct ib_send_wr *wr)
2277{
2278 struct bnxt_qplib_swqe wqe;
2279 int rc = 0, payload_sz = 0;
2280 unsigned long flags;
2281
2282 spin_lock_irqsave(&qp->sq_lock, flags);
2283 memset(&wqe, 0, sizeof(wqe));
2284 while (wr) {
2285 /* House keeping */
2286 memset(&wqe, 0, sizeof(wqe));
2287
2288 /* Common */
2289 wqe.num_sge = wr->num_sge;
2290 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2291 dev_err(rdev_to_dev(rdev),
2292 "Limit exceeded for Send SGEs");
2293 rc = -EINVAL;
2294 goto bad;
2295 }
2296
2297 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2298 if (payload_sz < 0) {
2299 rc = -EINVAL;
2300 goto bad;
2301 }
2302 wqe.wr_id = wr->wr_id;
2303
2304 wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
2305
2306 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2307 if (!rc)
2308 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2309bad:
2310 if (rc) {
2311 dev_err(rdev_to_dev(rdev),
2312 "Post send failed opcode = %#x rc = %d",
2313 wr->opcode, rc);
2314 break;
2315 }
2316 wr = wr->next;
2317 }
2318 bnxt_qplib_post_send_db(&qp->qplib_qp);
2319 bnxt_ud_qp_hw_stall_workaround(qp);
2320 spin_unlock_irqrestore(&qp->sq_lock, flags);
2321 return rc;
2322}
2323
2324int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
2325 const struct ib_send_wr **bad_wr)
2326{
2327 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2328 struct bnxt_qplib_swqe wqe;
2329 int rc = 0, payload_sz = 0;
2330 unsigned long flags;
2331
2332 spin_lock_irqsave(&qp->sq_lock, flags);
2333 while (wr) {
2334 /* House keeping */
2335 memset(&wqe, 0, sizeof(wqe));
2336
2337 /* Common */
2338 wqe.num_sge = wr->num_sge;
2339 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
2340 dev_err(rdev_to_dev(qp->rdev),
2341 "Limit exceeded for Send SGEs");
2342 rc = -EINVAL;
2343 goto bad;
2344 }
2345
2346 payload_sz = bnxt_re_copy_wr_payload(qp->rdev, wr, &wqe);
2347 if (payload_sz < 0) {
2348 rc = -EINVAL;
2349 goto bad;
2350 }
2351 wqe.wr_id = wr->wr_id;
2352
2353 switch (wr->opcode) {
2354 case IB_WR_SEND:
2355 case IB_WR_SEND_WITH_IMM:
2356 if (qp->qplib_qp.type == CMDQ_CREATE_QP1_TYPE_GSI) {
2357 rc = bnxt_re_build_qp1_send_v2(qp, wr, &wqe,
2358 payload_sz);
2359 if (rc)
2360 goto bad;
2361 wqe.rawqp1.lflags |=
2362 SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2363 }
2364 switch (wr->send_flags) {
2365 case IB_SEND_IP_CSUM:
2366 wqe.rawqp1.lflags |=
2367 SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2368 break;
2369 default:
2370 break;
2371 }
2372 /* fall through */
2373 case IB_WR_SEND_WITH_INV:
2374 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
2375 break;
2376 case IB_WR_RDMA_WRITE:
2377 case IB_WR_RDMA_WRITE_WITH_IMM:
2378 case IB_WR_RDMA_READ:
2379 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
2380 break;
2381 case IB_WR_ATOMIC_CMP_AND_SWP:
2382 case IB_WR_ATOMIC_FETCH_AND_ADD:
2383 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
2384 break;
2385 case IB_WR_RDMA_READ_WITH_INV:
2386 dev_err(rdev_to_dev(qp->rdev),
2387 "RDMA Read with Invalidate is not supported");
2388 rc = -EINVAL;
2389 goto bad;
2390 case IB_WR_LOCAL_INV:
2391 rc = bnxt_re_build_inv_wqe(wr, &wqe);
2392 break;
2393 case IB_WR_REG_MR:
2394 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
2395 break;
2396 default:
2397 /* Unsupported WRs */
2398 dev_err(rdev_to_dev(qp->rdev),
2399 "WR (%#x) is not supported", wr->opcode);
2400 rc = -EINVAL;
2401 goto bad;
2402 }
2403 if (!rc)
2404 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
2405bad:
2406 if (rc) {
2407 dev_err(rdev_to_dev(qp->rdev),
2408 "post_send failed op:%#x qps = %#x rc = %d\n",
2409 wr->opcode, qp->qplib_qp.state, rc);
2410 *bad_wr = wr;
2411 break;
2412 }
2413 wr = wr->next;
2414 }
2415 bnxt_qplib_post_send_db(&qp->qplib_qp);
2416 bnxt_ud_qp_hw_stall_workaround(qp);
2417 spin_unlock_irqrestore(&qp->sq_lock, flags);
2418
2419 return rc;
2420}
2421
2422static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
2423 struct bnxt_re_qp *qp,
2424 const struct ib_recv_wr *wr)
2425{
2426 struct bnxt_qplib_swqe wqe;
2427 int rc = 0;
2428
2429 memset(&wqe, 0, sizeof(wqe));
2430 while (wr) {
2431 /* House keeping */
2432 memset(&wqe, 0, sizeof(wqe));
2433
2434 /* Common */
2435 wqe.num_sge = wr->num_sge;
2436 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2437 dev_err(rdev_to_dev(rdev),
2438 "Limit exceeded for Receive SGEs");
2439 rc = -EINVAL;
2440 break;
2441 }
2442 bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
2443 wqe.wr_id = wr->wr_id;
2444 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2445
2446 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2447 if (rc)
2448 break;
2449
2450 wr = wr->next;
2451 }
2452 if (!rc)
2453 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2454 return rc;
2455}
2456
2457int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
2458 const struct ib_recv_wr **bad_wr)
2459{
2460 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
2461 struct bnxt_qplib_swqe wqe;
2462 int rc = 0, payload_sz = 0;
2463 unsigned long flags;
2464 u32 count = 0;
2465
2466 spin_lock_irqsave(&qp->rq_lock, flags);
2467 while (wr) {
2468 /* House keeping */
2469 memset(&wqe, 0, sizeof(wqe));
2470
2471 /* Common */
2472 wqe.num_sge = wr->num_sge;
2473 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
2474 dev_err(rdev_to_dev(qp->rdev),
2475 "Limit exceeded for Receive SGEs");
2476 rc = -EINVAL;
2477 *bad_wr = wr;
2478 break;
2479 }
2480
2481 payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
2482 wr->num_sge);
2483 wqe.wr_id = wr->wr_id;
2484 wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
2485
2486 if (ib_qp->qp_type == IB_QPT_GSI &&
2487 qp->qplib_qp.type != CMDQ_CREATE_QP_TYPE_GSI)
2488 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, &wqe,
2489 payload_sz);
2490 if (!rc)
2491 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
2492 if (rc) {
2493 *bad_wr = wr;
2494 break;
2495 }
2496
2497 /* Ring DB if the RQEs posted reaches a threshold value */
2498 if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
2499 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2500 count = 0;
2501 }
2502
2503 wr = wr->next;
2504 }
2505
2506 if (count)
2507 bnxt_qplib_post_recv_db(&qp->qplib_qp);
2508
2509 spin_unlock_irqrestore(&qp->rq_lock, flags);
2510
2511 return rc;
2512}
2513
2514/* Completion Queues */
2515void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
2516{
2517 struct bnxt_re_cq *cq;
2518 struct bnxt_qplib_nq *nq;
2519 struct bnxt_re_dev *rdev;
2520
2521 cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
2522 rdev = cq->rdev;
2523 nq = cq->qplib_cq.nq;
2524
2525 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2526 ib_umem_release(cq->umem);
2527
2528 atomic_dec(&rdev->cq_count);
2529 nq->budget--;
2530 kfree(cq->cql);
2531}
2532
2533int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
2534 struct ib_udata *udata)
2535{
2536 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
2537 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
2538 struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
2539 int rc, entries;
2540 int cqe = attr->cqe;
2541 struct bnxt_qplib_nq *nq = NULL;
2542 unsigned int nq_alloc_cnt;
2543
2544 /* Validate CQ fields */
2545 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
2546 dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
2547 return -EINVAL;
2548 }
2549
2550 cq->rdev = rdev;
2551 cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
2552
2553 entries = roundup_pow_of_two(cqe + 1);
2554 if (entries > dev_attr->max_cq_wqes + 1)
2555 entries = dev_attr->max_cq_wqes + 1;
2556
2557 if (udata) {
2558 struct bnxt_re_cq_req req;
2559 struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
2560 udata, struct bnxt_re_ucontext, ib_uctx);
2561 if (ib_copy_from_udata(&req, udata, sizeof(req))) {
2562 rc = -EFAULT;
2563 goto fail;
2564 }
2565
2566 cq->umem = ib_umem_get(udata, req.cq_va,
2567 entries * sizeof(struct cq_base),
2568 IB_ACCESS_LOCAL_WRITE, 1);
2569 if (IS_ERR(cq->umem)) {
2570 rc = PTR_ERR(cq->umem);
2571 goto fail;
2572 }
2573 cq->qplib_cq.sg_info.sglist = cq->umem->sg_head.sgl;
2574 cq->qplib_cq.sg_info.npages = ib_umem_num_pages(cq->umem);
2575 cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
2576 cq->qplib_cq.dpi = &uctx->dpi;
2577 } else {
2578 cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
2579 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
2580 GFP_KERNEL);
2581 if (!cq->cql) {
2582 rc = -ENOMEM;
2583 goto fail;
2584 }
2585
2586 cq->qplib_cq.dpi = &rdev->dpi_privileged;
2587 }
2588 /*
2589 * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
2590 * used for getting the NQ index.
2591 */
2592 nq_alloc_cnt = atomic_inc_return(&rdev->nq_alloc_cnt);
2593 nq = &rdev->nq[nq_alloc_cnt % (rdev->num_msix - 1)];
2594 cq->qplib_cq.max_wqe = entries;
2595 cq->qplib_cq.cnq_hw_ring_id = nq->ring_id;
2596 cq->qplib_cq.nq = nq;
2597
2598 rc = bnxt_qplib_create_cq(&rdev->qplib_res, &cq->qplib_cq);
2599 if (rc) {
2600 dev_err(rdev_to_dev(rdev), "Failed to create HW CQ");
2601 goto fail;
2602 }
2603
2604 cq->ib_cq.cqe = entries;
2605 cq->cq_period = cq->qplib_cq.period;
2606 nq->budget++;
2607
2608 atomic_inc(&rdev->cq_count);
2609 spin_lock_init(&cq->cq_lock);
2610
2611 if (udata) {
2612 struct bnxt_re_cq_resp resp;
2613
2614 resp.cqid = cq->qplib_cq.id;
2615 resp.tail = cq->qplib_cq.hwq.cons;
2616 resp.phase = cq->qplib_cq.period;
2617 resp.rsvd = 0;
2618 rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
2619 if (rc) {
2620 dev_err(rdev_to_dev(rdev), "Failed to copy CQ udata");
2621 bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
2622 goto c2fail;
2623 }
2624 }
2625
2626 return 0;
2627
2628c2fail:
2629 ib_umem_release(cq->umem);
2630fail:
2631 kfree(cq->cql);
2632 return rc;
2633}
2634
2635static u8 __req_to_ib_wc_status(u8 qstatus)
2636{
2637 switch (qstatus) {
2638 case CQ_REQ_STATUS_OK:
2639 return IB_WC_SUCCESS;
2640 case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
2641 return IB_WC_BAD_RESP_ERR;
2642 case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
2643 return IB_WC_LOC_LEN_ERR;
2644 case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
2645 return IB_WC_LOC_QP_OP_ERR;
2646 case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
2647 return IB_WC_LOC_PROT_ERR;
2648 case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
2649 return IB_WC_GENERAL_ERR;
2650 case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
2651 return IB_WC_REM_INV_REQ_ERR;
2652 case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
2653 return IB_WC_REM_ACCESS_ERR;
2654 case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
2655 return IB_WC_REM_OP_ERR;
2656 case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
2657 return IB_WC_RNR_RETRY_EXC_ERR;
2658 case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
2659 return IB_WC_RETRY_EXC_ERR;
2660 case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
2661 return IB_WC_WR_FLUSH_ERR;
2662 default:
2663 return IB_WC_GENERAL_ERR;
2664 }
2665 return 0;
2666}
2667
2668static u8 __rawqp1_to_ib_wc_status(u8 qstatus)
2669{
2670 switch (qstatus) {
2671 case CQ_RES_RAWETH_QP1_STATUS_OK:
2672 return IB_WC_SUCCESS;
2673 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
2674 return IB_WC_LOC_ACCESS_ERR;
2675 case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
2676 return IB_WC_LOC_LEN_ERR;
2677 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
2678 return IB_WC_LOC_PROT_ERR;
2679 case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
2680 return IB_WC_LOC_QP_OP_ERR;
2681 case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
2682 return IB_WC_GENERAL_ERR;
2683 case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
2684 return IB_WC_WR_FLUSH_ERR;
2685 case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
2686 return IB_WC_WR_FLUSH_ERR;
2687 default:
2688 return IB_WC_GENERAL_ERR;
2689 }
2690}
2691
2692static u8 __rc_to_ib_wc_status(u8 qstatus)
2693{
2694 switch (qstatus) {
2695 case CQ_RES_RC_STATUS_OK:
2696 return IB_WC_SUCCESS;
2697 case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
2698 return IB_WC_LOC_ACCESS_ERR;
2699 case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
2700 return IB_WC_LOC_LEN_ERR;
2701 case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
2702 return IB_WC_LOC_PROT_ERR;
2703 case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
2704 return IB_WC_LOC_QP_OP_ERR;
2705 case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
2706 return IB_WC_GENERAL_ERR;
2707 case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
2708 return IB_WC_REM_INV_REQ_ERR;
2709 case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
2710 return IB_WC_WR_FLUSH_ERR;
2711 case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
2712 return IB_WC_WR_FLUSH_ERR;
2713 default:
2714 return IB_WC_GENERAL_ERR;
2715 }
2716}
2717
2718static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
2719{
2720 switch (cqe->type) {
2721 case BNXT_QPLIB_SWQE_TYPE_SEND:
2722 wc->opcode = IB_WC_SEND;
2723 break;
2724 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
2725 wc->opcode = IB_WC_SEND;
2726 wc->wc_flags |= IB_WC_WITH_IMM;
2727 break;
2728 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
2729 wc->opcode = IB_WC_SEND;
2730 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2731 break;
2732 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
2733 wc->opcode = IB_WC_RDMA_WRITE;
2734 break;
2735 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
2736 wc->opcode = IB_WC_RDMA_WRITE;
2737 wc->wc_flags |= IB_WC_WITH_IMM;
2738 break;
2739 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2740 wc->opcode = IB_WC_RDMA_READ;
2741 break;
2742 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2743 wc->opcode = IB_WC_COMP_SWAP;
2744 break;
2745 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2746 wc->opcode = IB_WC_FETCH_ADD;
2747 break;
2748 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2749 wc->opcode = IB_WC_LOCAL_INV;
2750 break;
2751 case BNXT_QPLIB_SWQE_TYPE_REG_MR:
2752 wc->opcode = IB_WC_REG_MR;
2753 break;
2754 default:
2755 wc->opcode = IB_WC_SEND;
2756 break;
2757 }
2758
2759 wc->status = __req_to_ib_wc_status(cqe->status);
2760}
2761
2762static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
2763 u16 raweth_qp1_flags2)
2764{
2765 bool is_ipv6 = false, is_ipv4 = false;
2766
2767 /* raweth_qp1_flags Bit 9-6 indicates itype */
2768 if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2769 != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
2770 return -1;
2771
2772 if (raweth_qp1_flags2 &
2773 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
2774 raweth_qp1_flags2 &
2775 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
2776 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
2777 (raweth_qp1_flags2 &
2778 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
2779 (is_ipv6 = true) : (is_ipv4 = true);
2780 return ((is_ipv6) ?
2781 BNXT_RE_ROCEV2_IPV6_PACKET :
2782 BNXT_RE_ROCEV2_IPV4_PACKET);
2783 } else {
2784 return BNXT_RE_ROCE_V1_PACKET;
2785 }
2786}
2787
2788static int bnxt_re_to_ib_nw_type(int nw_type)
2789{
2790 u8 nw_hdr_type = 0xFF;
2791
2792 switch (nw_type) {
2793 case BNXT_RE_ROCE_V1_PACKET:
2794 nw_hdr_type = RDMA_NETWORK_ROCE_V1;
2795 break;
2796 case BNXT_RE_ROCEV2_IPV4_PACKET:
2797 nw_hdr_type = RDMA_NETWORK_IPV4;
2798 break;
2799 case BNXT_RE_ROCEV2_IPV6_PACKET:
2800 nw_hdr_type = RDMA_NETWORK_IPV6;
2801 break;
2802 }
2803 return nw_hdr_type;
2804}
2805
2806static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
2807 void *rq_hdr_buf)
2808{
2809 u8 *tmp_buf = NULL;
2810 struct ethhdr *eth_hdr;
2811 u16 eth_type;
2812 bool rc = false;
2813
2814 tmp_buf = (u8 *)rq_hdr_buf;
2815 /*
2816 * If dest mac is not same as I/F mac, this could be a
2817 * loopback address or multicast address, check whether
2818 * it is a loopback packet
2819 */
2820 if (!ether_addr_equal(tmp_buf, rdev->netdev->dev_addr)) {
2821 tmp_buf += 4;
2822 /* Check the ether type */
2823 eth_hdr = (struct ethhdr *)tmp_buf;
2824 eth_type = ntohs(eth_hdr->h_proto);
2825 switch (eth_type) {
2826 case ETH_P_IBOE:
2827 rc = true;
2828 break;
2829 case ETH_P_IP:
2830 case ETH_P_IPV6: {
2831 u32 len;
2832 struct udphdr *udp_hdr;
2833
2834 len = (eth_type == ETH_P_IP ? sizeof(struct iphdr) :
2835 sizeof(struct ipv6hdr));
2836 tmp_buf += sizeof(struct ethhdr) + len;
2837 udp_hdr = (struct udphdr *)tmp_buf;
2838 if (ntohs(udp_hdr->dest) ==
2839 ROCE_V2_UDP_DPORT)
2840 rc = true;
2841 break;
2842 }
2843 default:
2844 break;
2845 }
2846 }
2847
2848 return rc;
2849}
2850
2851static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *qp1_qp,
2852 struct bnxt_qplib_cqe *cqe)
2853{
2854 struct bnxt_re_dev *rdev = qp1_qp->rdev;
2855 struct bnxt_re_sqp_entries *sqp_entry = NULL;
2856 struct bnxt_re_qp *qp = rdev->qp1_sqp;
2857 struct ib_send_wr *swr;
2858 struct ib_ud_wr udwr;
2859 struct ib_recv_wr rwr;
2860 int pkt_type = 0;
2861 u32 tbl_idx;
2862 void *rq_hdr_buf;
2863 dma_addr_t rq_hdr_buf_map;
2864 dma_addr_t shrq_hdr_buf_map;
2865 u32 offset = 0;
2866 u32 skip_bytes = 0;
2867 struct ib_sge s_sge[2];
2868 struct ib_sge r_sge[2];
2869 int rc;
2870
2871 memset(&udwr, 0, sizeof(udwr));
2872 memset(&rwr, 0, sizeof(rwr));
2873 memset(&s_sge, 0, sizeof(s_sge));
2874 memset(&r_sge, 0, sizeof(r_sge));
2875
2876 swr = &udwr.wr;
2877 tbl_idx = cqe->wr_id;
2878
2879 rq_hdr_buf = qp1_qp->qplib_qp.rq_hdr_buf +
2880 (tbl_idx * qp1_qp->qplib_qp.rq_hdr_buf_size);
2881 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp1_qp->qplib_qp,
2882 tbl_idx);
2883
2884 /* Shadow QP header buffer */
2885 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&qp->qplib_qp,
2886 tbl_idx);
2887 sqp_entry = &rdev->sqp_tbl[tbl_idx];
2888
2889 /* Store this cqe */
2890 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
2891 sqp_entry->qp1_qp = qp1_qp;
2892
2893 /* Find packet type from the cqe */
2894
2895 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
2896 cqe->raweth_qp1_flags2);
2897 if (pkt_type < 0) {
2898 dev_err(rdev_to_dev(rdev), "Invalid packet\n");
2899 return -EINVAL;
2900 }
2901
2902 /* Adjust the offset for the user buffer and post in the rq */
2903
2904 if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
2905 offset = 20;
2906
2907 /*
2908 * QP1 loopback packet has 4 bytes of internal header before
2909 * ether header. Skip these four bytes.
2910 */
2911 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
2912 skip_bytes = 4;
2913
2914 /* First send SGE . Skip the ether header*/
2915 s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
2916 + skip_bytes;
2917 s_sge[0].lkey = 0xFFFFFFFF;
2918 s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
2919 BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
2920
2921 /* Second Send SGE */
2922 s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
2923 BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
2924 if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
2925 s_sge[1].addr += 8;
2926 s_sge[1].lkey = 0xFFFFFFFF;
2927 s_sge[1].length = 256;
2928
2929 /* First recv SGE */
2930
2931 r_sge[0].addr = shrq_hdr_buf_map;
2932 r_sge[0].lkey = 0xFFFFFFFF;
2933 r_sge[0].length = 40;
2934
2935 r_sge[1].addr = sqp_entry->sge.addr + offset;
2936 r_sge[1].lkey = sqp_entry->sge.lkey;
2937 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
2938
2939 /* Create receive work request */
2940 rwr.num_sge = 2;
2941 rwr.sg_list = r_sge;
2942 rwr.wr_id = tbl_idx;
2943 rwr.next = NULL;
2944
2945 rc = bnxt_re_post_recv_shadow_qp(rdev, qp, &rwr);
2946 if (rc) {
2947 dev_err(rdev_to_dev(rdev),
2948 "Failed to post Rx buffers to shadow QP");
2949 return -ENOMEM;
2950 }
2951
2952 swr->num_sge = 2;
2953 swr->sg_list = s_sge;
2954 swr->wr_id = tbl_idx;
2955 swr->opcode = IB_WR_SEND;
2956 swr->next = NULL;
2957
2958 udwr.ah = &rdev->sqp_ah->ib_ah;
2959 udwr.remote_qpn = rdev->qp1_sqp->qplib_qp.id;
2960 udwr.remote_qkey = rdev->qp1_sqp->qplib_qp.qkey;
2961
2962 /* post data received in the send queue */
2963 rc = bnxt_re_post_send_shadow_qp(rdev, qp, swr);
2964
2965 return 0;
2966}
2967
2968static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
2969 struct bnxt_qplib_cqe *cqe)
2970{
2971 wc->opcode = IB_WC_RECV;
2972 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
2973 wc->wc_flags |= IB_WC_GRH;
2974}
2975
2976static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
2977 u16 *vid, u8 *sl)
2978{
2979 bool ret = false;
2980 u32 metadata;
2981 u16 tpid;
2982
2983 metadata = orig_cqe->raweth_qp1_metadata;
2984 if (orig_cqe->raweth_qp1_flags2 &
2985 CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
2986 tpid = ((metadata &
2987 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
2988 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
2989 if (tpid == ETH_P_8021Q) {
2990 *vid = metadata &
2991 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
2992 *sl = (metadata &
2993 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
2994 CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
2995 ret = true;
2996 }
2997 }
2998
2999 return ret;
3000}
3001
3002static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
3003 struct bnxt_qplib_cqe *cqe)
3004{
3005 wc->opcode = IB_WC_RECV;
3006 wc->status = __rc_to_ib_wc_status(cqe->status);
3007
3008 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
3009 wc->wc_flags |= IB_WC_WITH_IMM;
3010 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
3011 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3012 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
3013 (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
3014 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3015}
3016
3017static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
3018 struct ib_wc *wc,
3019 struct bnxt_qplib_cqe *cqe)
3020{
3021 struct bnxt_re_dev *rdev = qp->rdev;
3022 struct bnxt_re_qp *qp1_qp = NULL;
3023 struct bnxt_qplib_cqe *orig_cqe = NULL;
3024 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3025 int nw_type;
3026 u32 tbl_idx;
3027 u16 vlan_id;
3028 u8 sl;
3029
3030 tbl_idx = cqe->wr_id;
3031
3032 sqp_entry = &rdev->sqp_tbl[tbl_idx];
3033 qp1_qp = sqp_entry->qp1_qp;
3034 orig_cqe = &sqp_entry->cqe;
3035
3036 wc->wr_id = sqp_entry->wrid;
3037 wc->byte_len = orig_cqe->length;
3038 wc->qp = &qp1_qp->ib_qp;
3039
3040 wc->ex.imm_data = orig_cqe->immdata;
3041 wc->src_qp = orig_cqe->src_qp;
3042 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
3043 if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
3044 wc->vlan_id = vlan_id;
3045 wc->sl = sl;
3046 wc->wc_flags |= IB_WC_WITH_VLAN;
3047 }
3048 wc->port_num = 1;
3049 wc->vendor_err = orig_cqe->status;
3050
3051 wc->opcode = IB_WC_RECV;
3052 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
3053 wc->wc_flags |= IB_WC_GRH;
3054
3055 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
3056 orig_cqe->raweth_qp1_flags2);
3057 if (nw_type >= 0) {
3058 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3059 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3060 }
3061}
3062
3063static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
3064 struct ib_wc *wc,
3065 struct bnxt_qplib_cqe *cqe)
3066{
3067 u8 nw_type;
3068
3069 wc->opcode = IB_WC_RECV;
3070 wc->status = __rc_to_ib_wc_status(cqe->status);
3071
3072 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
3073 wc->wc_flags |= IB_WC_WITH_IMM;
3074 /* report only on GSI QP for Thor */
3075 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI) {
3076 wc->wc_flags |= IB_WC_GRH;
3077 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3078 wc->wc_flags |= IB_WC_WITH_SMAC;
3079 if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
3080 wc->vlan_id = (cqe->cfa_meta & 0xFFF);
3081 if (wc->vlan_id < 0x1000)
3082 wc->wc_flags |= IB_WC_WITH_VLAN;
3083 }
3084 nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
3085 CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
3086 wc->network_hdr_type = bnxt_re_to_ib_nw_type(nw_type);
3087 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
3088 }
3089
3090}
3091
3092static int send_phantom_wqe(struct bnxt_re_qp *qp)
3093{
3094 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
3095 unsigned long flags;
3096 int rc = 0;
3097
3098 spin_lock_irqsave(&qp->sq_lock, flags);
3099
3100 rc = bnxt_re_bind_fence_mw(lib_qp);
3101 if (!rc) {
3102 lib_qp->sq.phantom_wqe_cnt++;
3103 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
3104 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
3105 lib_qp->id, lib_qp->sq.hwq.prod,
3106 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
3107 lib_qp->sq.phantom_wqe_cnt);
3108 }
3109
3110 spin_unlock_irqrestore(&qp->sq_lock, flags);
3111 return rc;
3112}
3113
3114int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
3115{
3116 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3117 struct bnxt_re_qp *qp;
3118 struct bnxt_qplib_cqe *cqe;
3119 int i, ncqe, budget;
3120 struct bnxt_qplib_q *sq;
3121 struct bnxt_qplib_qp *lib_qp;
3122 u32 tbl_idx;
3123 struct bnxt_re_sqp_entries *sqp_entry = NULL;
3124 unsigned long flags;
3125
3126 spin_lock_irqsave(&cq->cq_lock, flags);
3127 budget = min_t(u32, num_entries, cq->max_cql);
3128 num_entries = budget;
3129 if (!cq->cql) {
3130 dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use");
3131 goto exit;
3132 }
3133 cqe = &cq->cql[0];
3134 while (budget) {
3135 lib_qp = NULL;
3136 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
3137 if (lib_qp) {
3138 sq = &lib_qp->sq;
3139 if (sq->send_phantom) {
3140 qp = container_of(lib_qp,
3141 struct bnxt_re_qp, qplib_qp);
3142 if (send_phantom_wqe(qp) == -ENOMEM)
3143 dev_err(rdev_to_dev(cq->rdev),
3144 "Phantom failed! Scheduled to send again\n");
3145 else
3146 sq->send_phantom = false;
3147 }
3148 }
3149 if (ncqe < budget)
3150 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
3151 cqe + ncqe,
3152 budget - ncqe);
3153
3154 if (!ncqe)
3155 break;
3156
3157 for (i = 0; i < ncqe; i++, cqe++) {
3158 /* Transcribe each qplib_wqe back to ib_wc */
3159 memset(wc, 0, sizeof(*wc));
3160
3161 wc->wr_id = cqe->wr_id;
3162 wc->byte_len = cqe->length;
3163 qp = container_of
3164 ((struct bnxt_qplib_qp *)
3165 (unsigned long)(cqe->qp_handle),
3166 struct bnxt_re_qp, qplib_qp);
3167 if (!qp) {
3168 dev_err(rdev_to_dev(cq->rdev),
3169 "POLL CQ : bad QP handle");
3170 continue;
3171 }
3172 wc->qp = &qp->ib_qp;
3173 wc->ex.imm_data = cqe->immdata;
3174 wc->src_qp = cqe->src_qp;
3175 memcpy(wc->smac, cqe->smac, ETH_ALEN);
3176 wc->port_num = 1;
3177 wc->vendor_err = cqe->status;
3178
3179 switch (cqe->opcode) {
3180 case CQ_BASE_CQE_TYPE_REQ:
3181 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3182 qp->rdev->qp1_sqp->qplib_qp.id) {
3183 /* Handle this completion with
3184 * the stored completion
3185 */
3186 memset(wc, 0, sizeof(*wc));
3187 continue;
3188 }
3189 bnxt_re_process_req_wc(wc, cqe);
3190 break;
3191 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3192 if (!cqe->status) {
3193 int rc = 0;
3194
3195 rc = bnxt_re_process_raw_qp_pkt_rx
3196 (qp, cqe);
3197 if (!rc) {
3198 memset(wc, 0, sizeof(*wc));
3199 continue;
3200 }
3201 cqe->status = -1;
3202 }
3203 /* Errors need not be looped back.
3204 * But change the wr_id to the one
3205 * stored in the table
3206 */
3207 tbl_idx = cqe->wr_id;
3208 sqp_entry = &cq->rdev->sqp_tbl[tbl_idx];
3209 wc->wr_id = sqp_entry->wrid;
3210 bnxt_re_process_res_rawqp1_wc(wc, cqe);
3211 break;
3212 case CQ_BASE_CQE_TYPE_RES_RC:
3213 bnxt_re_process_res_rc_wc(wc, cqe);
3214 break;
3215 case CQ_BASE_CQE_TYPE_RES_UD:
3216 if (qp->rdev->qp1_sqp && qp->qplib_qp.id ==
3217 qp->rdev->qp1_sqp->qplib_qp.id) {
3218 /* Handle this completion with
3219 * the stored completion
3220 */
3221 if (cqe->status) {
3222 continue;
3223 } else {
3224 bnxt_re_process_res_shadow_qp_wc
3225 (qp, wc, cqe);
3226 break;
3227 }
3228 }
3229 bnxt_re_process_res_ud_wc(qp, wc, cqe);
3230 break;
3231 default:
3232 dev_err(rdev_to_dev(cq->rdev),
3233 "POLL CQ : type 0x%x not handled",
3234 cqe->opcode);
3235 continue;
3236 }
3237 wc++;
3238 budget--;
3239 }
3240 }
3241exit:
3242 spin_unlock_irqrestore(&cq->cq_lock, flags);
3243 return num_entries - budget;
3244}
3245
3246int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
3247 enum ib_cq_notify_flags ib_cqn_flags)
3248{
3249 struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
3250 int type = 0, rc = 0;
3251 unsigned long flags;
3252
3253 spin_lock_irqsave(&cq->cq_lock, flags);
3254 /* Trigger on the very next completion */
3255 if (ib_cqn_flags & IB_CQ_NEXT_COMP)
3256 type = DBC_DBC_TYPE_CQ_ARMALL;
3257 /* Trigger on the next solicited completion */
3258 else if (ib_cqn_flags & IB_CQ_SOLICITED)
3259 type = DBC_DBC_TYPE_CQ_ARMSE;
3260
3261 /* Poll to see if there are missed events */
3262 if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
3263 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
3264 rc = 1;
3265 goto exit;
3266 }
3267 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
3268
3269exit:
3270 spin_unlock_irqrestore(&cq->cq_lock, flags);
3271 return rc;
3272}
3273
3274/* Memory Regions */
3275struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
3276{
3277 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3278 struct bnxt_re_dev *rdev = pd->rdev;
3279 struct bnxt_re_mr *mr;
3280 u64 pbl = 0;
3281 int rc;
3282
3283 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3284 if (!mr)
3285 return ERR_PTR(-ENOMEM);
3286
3287 mr->rdev = rdev;
3288 mr->qplib_mr.pd = &pd->qplib_pd;
3289 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3290 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3291
3292 /* Allocate and register 0 as the address */
3293 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3294 if (rc)
3295 goto fail;
3296
3297 mr->qplib_mr.hwq.level = PBL_LVL_MAX;
3298 mr->qplib_mr.total_size = -1; /* Infinte length */
3299 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl, 0, false,
3300 PAGE_SIZE);
3301 if (rc)
3302 goto fail_mr;
3303
3304 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3305 if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
3306 IB_ACCESS_REMOTE_ATOMIC))
3307 mr->ib_mr.rkey = mr->ib_mr.lkey;
3308 atomic_inc(&rdev->mr_count);
3309
3310 return &mr->ib_mr;
3311
3312fail_mr:
3313 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3314fail:
3315 kfree(mr);
3316 return ERR_PTR(rc);
3317}
3318
3319int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
3320{
3321 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3322 struct bnxt_re_dev *rdev = mr->rdev;
3323 int rc;
3324
3325 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3326 if (rc)
3327 dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
3328
3329 if (mr->pages) {
3330 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3331 &mr->qplib_frpl);
3332 kfree(mr->pages);
3333 mr->npages = 0;
3334 mr->pages = NULL;
3335 }
3336 ib_umem_release(mr->ib_umem);
3337
3338 kfree(mr);
3339 atomic_dec(&rdev->mr_count);
3340 return rc;
3341}
3342
3343static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
3344{
3345 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3346
3347 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
3348 return -ENOMEM;
3349
3350 mr->pages[mr->npages++] = addr;
3351 return 0;
3352}
3353
3354int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
3355 unsigned int *sg_offset)
3356{
3357 struct bnxt_re_mr *mr = container_of(ib_mr, struct bnxt_re_mr, ib_mr);
3358
3359 mr->npages = 0;
3360 return ib_sg_to_pages(ib_mr, sg, sg_nents, sg_offset, bnxt_re_set_page);
3361}
3362
3363struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
3364 u32 max_num_sg, struct ib_udata *udata)
3365{
3366 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3367 struct bnxt_re_dev *rdev = pd->rdev;
3368 struct bnxt_re_mr *mr = NULL;
3369 int rc;
3370
3371 if (type != IB_MR_TYPE_MEM_REG) {
3372 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported", type);
3373 return ERR_PTR(-EINVAL);
3374 }
3375 if (max_num_sg > MAX_PBL_LVL_1_PGS)
3376 return ERR_PTR(-EINVAL);
3377
3378 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3379 if (!mr)
3380 return ERR_PTR(-ENOMEM);
3381
3382 mr->rdev = rdev;
3383 mr->qplib_mr.pd = &pd->qplib_pd;
3384 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
3385 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
3386
3387 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3388 if (rc)
3389 goto bail;
3390
3391 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3392 mr->ib_mr.rkey = mr->ib_mr.lkey;
3393
3394 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
3395 if (!mr->pages) {
3396 rc = -ENOMEM;
3397 goto fail;
3398 }
3399 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
3400 &mr->qplib_frpl, max_num_sg);
3401 if (rc) {
3402 dev_err(rdev_to_dev(rdev),
3403 "Failed to allocate HW FR page list");
3404 goto fail_mr;
3405 }
3406
3407 atomic_inc(&rdev->mr_count);
3408 return &mr->ib_mr;
3409
3410fail_mr:
3411 kfree(mr->pages);
3412fail:
3413 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3414bail:
3415 kfree(mr);
3416 return ERR_PTR(rc);
3417}
3418
3419struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
3420 struct ib_udata *udata)
3421{
3422 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3423 struct bnxt_re_dev *rdev = pd->rdev;
3424 struct bnxt_re_mw *mw;
3425 int rc;
3426
3427 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
3428 if (!mw)
3429 return ERR_PTR(-ENOMEM);
3430 mw->rdev = rdev;
3431 mw->qplib_mw.pd = &pd->qplib_pd;
3432
3433 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
3434 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
3435 CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
3436 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
3437 if (rc) {
3438 dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
3439 goto fail;
3440 }
3441 mw->ib_mw.rkey = mw->qplib_mw.rkey;
3442
3443 atomic_inc(&rdev->mw_count);
3444 return &mw->ib_mw;
3445
3446fail:
3447 kfree(mw);
3448 return ERR_PTR(rc);
3449}
3450
3451int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
3452{
3453 struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
3454 struct bnxt_re_dev *rdev = mw->rdev;
3455 int rc;
3456
3457 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
3458 if (rc) {
3459 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
3460 return rc;
3461 }
3462
3463 kfree(mw);
3464 atomic_dec(&rdev->mw_count);
3465 return rc;
3466}
3467
3468static int bnxt_re_page_size_ok(int page_shift)
3469{
3470 switch (page_shift) {
3471 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
3472 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
3473 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
3474 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
3475 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
3476 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
3477 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
3478 case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
3479 return 1;
3480 default:
3481 return 0;
3482 }
3483}
3484
3485static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
3486 int page_shift)
3487{
3488 u64 *pbl_tbl = pbl_tbl_orig;
3489 u64 page_size = BIT_ULL(page_shift);
3490 struct ib_block_iter biter;
3491
3492 rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap, page_size)
3493 *pbl_tbl++ = rdma_block_iter_dma_address(&biter);
3494
3495 return pbl_tbl - pbl_tbl_orig;
3496}
3497
3498/* uverbs */
3499struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3500 u64 virt_addr, int mr_access_flags,
3501 struct ib_udata *udata)
3502{
3503 struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
3504 struct bnxt_re_dev *rdev = pd->rdev;
3505 struct bnxt_re_mr *mr;
3506 struct ib_umem *umem;
3507 u64 *pbl_tbl = NULL;
3508 int umem_pgs, page_shift, rc;
3509
3510 if (length > BNXT_RE_MAX_MR_SIZE) {
3511 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3512 length, BNXT_RE_MAX_MR_SIZE);
3513 return ERR_PTR(-ENOMEM);
3514 }
3515
3516 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
3517 if (!mr)
3518 return ERR_PTR(-ENOMEM);
3519
3520 mr->rdev = rdev;
3521 mr->qplib_mr.pd = &pd->qplib_pd;
3522 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
3523 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
3524
3525 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
3526 if (rc) {
3527 dev_err(rdev_to_dev(rdev), "Failed to allocate MR");
3528 goto free_mr;
3529 }
3530 /* The fixed portion of the rkey is the same as the lkey */
3531 mr->ib_mr.rkey = mr->qplib_mr.rkey;
3532
3533 umem = ib_umem_get(udata, start, length, mr_access_flags, 0);
3534 if (IS_ERR(umem)) {
3535 dev_err(rdev_to_dev(rdev), "Failed to get umem");
3536 rc = -EFAULT;
3537 goto free_mrw;
3538 }
3539 mr->ib_umem = umem;
3540
3541 mr->qplib_mr.va = virt_addr;
3542 umem_pgs = ib_umem_page_count(umem);
3543 if (!umem_pgs) {
3544 dev_err(rdev_to_dev(rdev), "umem is invalid!");
3545 rc = -EINVAL;
3546 goto free_umem;
3547 }
3548 mr->qplib_mr.total_size = length;
3549
3550 pbl_tbl = kcalloc(umem_pgs, sizeof(u64 *), GFP_KERNEL);
3551 if (!pbl_tbl) {
3552 rc = -ENOMEM;
3553 goto free_umem;
3554 }
3555
3556 page_shift = __ffs(ib_umem_find_best_pgsz(umem,
3557 BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M,
3558 virt_addr));
3559
3560 if (!bnxt_re_page_size_ok(page_shift)) {
3561 dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
3562 rc = -EFAULT;
3563 goto fail;
3564 }
3565
3566 if (page_shift == BNXT_RE_PAGE_SHIFT_4K &&
3567 length > BNXT_RE_MAX_MR_SIZE_LOW) {
3568 dev_err(rdev_to_dev(rdev), "Requested MR Sz:%llu Max sup:%llu",
3569 length, (u64)BNXT_RE_MAX_MR_SIZE_LOW);
3570 rc = -EINVAL;
3571 goto fail;
3572 }
3573
3574 /* Map umem buf ptrs to the PBL */
3575 umem_pgs = fill_umem_pbl_tbl(umem, pbl_tbl, page_shift);
3576 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, pbl_tbl,
3577 umem_pgs, false, 1 << page_shift);
3578 if (rc) {
3579 dev_err(rdev_to_dev(rdev), "Failed to register user MR");
3580 goto fail;
3581 }
3582
3583 kfree(pbl_tbl);
3584
3585 mr->ib_mr.lkey = mr->qplib_mr.lkey;
3586 mr->ib_mr.rkey = mr->qplib_mr.lkey;
3587 atomic_inc(&rdev->mr_count);
3588
3589 return &mr->ib_mr;
3590fail:
3591 kfree(pbl_tbl);
3592free_umem:
3593 ib_umem_release(umem);
3594free_mrw:
3595 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
3596free_mr:
3597 kfree(mr);
3598 return ERR_PTR(rc);
3599}
3600
3601int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
3602{
3603 struct ib_device *ibdev = ctx->device;
3604 struct bnxt_re_ucontext *uctx =
3605 container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
3606 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
3607 struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
3608 struct bnxt_re_uctx_resp resp;
3609 u32 chip_met_rev_num = 0;
3610 int rc;
3611
3612 dev_dbg(rdev_to_dev(rdev), "ABI version requested %u",
3613 ibdev->ops.uverbs_abi_ver);
3614
3615 if (ibdev->ops.uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
3616 dev_dbg(rdev_to_dev(rdev), " is different from the device %d ",
3617 BNXT_RE_ABI_VERSION);
3618 return -EPERM;
3619 }
3620
3621 uctx->rdev = rdev;
3622
3623 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
3624 if (!uctx->shpg) {
3625 rc = -ENOMEM;
3626 goto fail;
3627 }
3628 spin_lock_init(&uctx->sh_lock);
3629
3630 resp.comp_mask = BNXT_RE_UCNTX_CMASK_HAVE_CCTX;
3631 chip_met_rev_num = rdev->chip_ctx.chip_num;
3632 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_rev & 0xFF) <<
3633 BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
3634 chip_met_rev_num |= ((u32)rdev->chip_ctx.chip_metal & 0xFF) <<
3635 BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
3636 resp.chip_id0 = chip_met_rev_num;
3637 /* Future extension of chip info */
3638 resp.chip_id1 = 0;
3639 /*Temp, Use xa_alloc instead */
3640 resp.dev_id = rdev->en_dev->pdev->devfn;
3641 resp.max_qp = rdev->qplib_ctx.qpc_count;
3642 resp.pg_size = PAGE_SIZE;
3643 resp.cqe_sz = sizeof(struct cq_base);
3644 resp.max_cqd = dev_attr->max_cq_wqes;
3645 resp.rsvd = 0;
3646
3647 rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
3648 if (rc) {
3649 dev_err(rdev_to_dev(rdev), "Failed to copy user context");
3650 rc = -EFAULT;
3651 goto cfail;
3652 }
3653
3654 return 0;
3655cfail:
3656 free_page((unsigned long)uctx->shpg);
3657 uctx->shpg = NULL;
3658fail:
3659 return rc;
3660}
3661
3662void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
3663{
3664 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3665 struct bnxt_re_ucontext,
3666 ib_uctx);
3667
3668 struct bnxt_re_dev *rdev = uctx->rdev;
3669
3670 if (uctx->shpg)
3671 free_page((unsigned long)uctx->shpg);
3672
3673 if (uctx->dpi.dbr) {
3674 /* Free DPI only if this is the first PD allocated by the
3675 * application and mark the context dpi as NULL
3676 */
3677 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
3678 &rdev->qplib_res.dpi_tbl, &uctx->dpi);
3679 uctx->dpi.dbr = NULL;
3680 }
3681}
3682
3683/* Helper function to mmap the virtual memory from user app */
3684int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
3685{
3686 struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
3687 struct bnxt_re_ucontext,
3688 ib_uctx);
3689 struct bnxt_re_dev *rdev = uctx->rdev;
3690 u64 pfn;
3691
3692 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
3693 return -EINVAL;
3694
3695 if (vma->vm_pgoff) {
3696 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
3697 if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
3698 PAGE_SIZE, vma->vm_page_prot)) {
3699 dev_err(rdev_to_dev(rdev), "Failed to map DPI");
3700 return -EAGAIN;
3701 }
3702 } else {
3703 pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
3704 if (remap_pfn_range(vma, vma->vm_start,
3705 pfn, PAGE_SIZE, vma->vm_page_prot)) {
3706 dev_err(rdev_to_dev(rdev),
3707 "Failed to map shared page");
3708 return -EAGAIN;
3709 }
3710 }
3711
3712 return 0;
3713}