Loading...
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Main component of the bnxt_re driver
37 */
38
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mutex.h>
43#include <linux/list.h>
44#include <linux/rculist.h>
45#include <linux/spinlock.h>
46#include <linux/pci.h>
47#include <net/dcbnl.h>
48#include <net/ipv6.h>
49#include <net/addrconf.h>
50#include <linux/if_ether.h>
51#include <linux/auxiliary_bus.h>
52
53#include <rdma/ib_verbs.h>
54#include <rdma/ib_user_verbs.h>
55#include <rdma/ib_umem.h>
56#include <rdma/ib_addr.h>
57#include <linux/hashtable.h>
58
59#include "bnxt_ulp.h"
60#include "roce_hsi.h"
61#include "qplib_res.h"
62#include "qplib_sp.h"
63#include "qplib_fp.h"
64#include "qplib_rcfw.h"
65#include "bnxt_re.h"
66#include "ib_verbs.h"
67#include <rdma/bnxt_re-abi.h>
68#include "bnxt.h"
69#include "hw_counters.h"
70
71static char version[] =
72 BNXT_RE_DESC "\n";
73
74MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
75MODULE_DESCRIPTION(BNXT_RE_DESC);
76MODULE_LICENSE("Dual BSD/GPL");
77
78/* globals */
79static DEFINE_MUTEX(bnxt_re_mutex);
80
81static void bnxt_re_stop_irq(void *handle);
82static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
83static int bnxt_re_netdev_event(struct notifier_block *notifier,
84 unsigned long event, void *ptr);
85static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
86static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
87static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
88
89static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
90 u32 *offset);
91static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
92{
93 struct bnxt_qplib_chip_ctx *cctx;
94 struct bnxt_en_dev *en_dev;
95 struct bnxt_qplib_res *res;
96 u32 l2db_len = 0;
97 u32 offset = 0;
98 u32 barlen;
99 int rc;
100
101 res = &rdev->qplib_res;
102 en_dev = rdev->en_dev;
103 cctx = rdev->chip_ctx;
104
105 /* Issue qcfg */
106 rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
107 if (rc)
108 dev_info(rdev_to_dev(rdev),
109 "Couldn't get DB bar size, Low latency framework is disabled\n");
110 /* set register offsets for both UC and WC */
111 if (bnxt_qplib_is_chip_gen_p7(cctx)) {
112 res->dpi_tbl.ucreg.offset = offset;
113 res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
114 } else {
115 res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
116 BNXT_QPLIB_DBR_PF_DB_OFFSET;
117 res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
118 }
119
120 /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
121 * is equal to the DB-Bar actual size. This indicates that L2
122 * is mapping entire bar as UC-. RoCE driver can't enable WC mapping
123 * in such cases and DB-push will be disabled.
124 */
125 barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
126 if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
127 res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
128 dev_info(rdev_to_dev(rdev), "Low latency framework is enabled\n");
129 }
130}
131
132static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
133{
134 struct bnxt_qplib_chip_ctx *cctx;
135
136 cctx = rdev->chip_ctx;
137 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
138 mode : BNXT_QPLIB_WQE_MODE_STATIC;
139 if (bnxt_re_hwrm_qcaps(rdev))
140 dev_err(rdev_to_dev(rdev),
141 "Failed to query hwrm qcaps\n");
142 if (bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx))
143 cctx->modes.toggle_bits |= BNXT_QPLIB_CQ_TOGGLE_BIT;
144}
145
146static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
147{
148 struct bnxt_qplib_chip_ctx *chip_ctx;
149
150 if (!rdev->chip_ctx)
151 return;
152 chip_ctx = rdev->chip_ctx;
153 rdev->chip_ctx = NULL;
154 rdev->rcfw.res = NULL;
155 rdev->qplib_res.cctx = NULL;
156 rdev->qplib_res.pdev = NULL;
157 rdev->qplib_res.netdev = NULL;
158 kfree(chip_ctx);
159}
160
161static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
162{
163 struct bnxt_qplib_chip_ctx *chip_ctx;
164 struct bnxt_en_dev *en_dev;
165 int rc;
166
167 en_dev = rdev->en_dev;
168
169 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
170 if (!chip_ctx)
171 return -ENOMEM;
172 chip_ctx->chip_num = en_dev->chip_num;
173 chip_ctx->hw_stats_size = en_dev->hw_ring_stats_size;
174
175 rdev->chip_ctx = chip_ctx;
176 /* rest members to follow eventually */
177
178 rdev->qplib_res.cctx = rdev->chip_ctx;
179 rdev->rcfw.res = &rdev->qplib_res;
180 rdev->qplib_res.dattr = &rdev->dev_attr;
181 rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
182
183 bnxt_re_set_drv_mode(rdev, wqe_mode);
184
185 bnxt_re_set_db_offset(rdev);
186 rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
187 if (rc)
188 return rc;
189
190 if (bnxt_qplib_determine_atomics(en_dev->pdev))
191 ibdev_info(&rdev->ibdev,
192 "platform doesn't support global atomics.");
193 return 0;
194}
195
196/* SR-IOV helper functions */
197
198static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
199{
200 if (BNXT_EN_VF(rdev->en_dev))
201 rdev->is_virtfn = 1;
202}
203
204/* Set the maximum number of each resource that the driver actually wants
205 * to allocate. This may be up to the maximum number the firmware has
206 * reserved for the function. The driver may choose to allocate fewer
207 * resources than the firmware maximum.
208 */
209static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
210{
211 struct bnxt_qplib_dev_attr *attr;
212 struct bnxt_qplib_ctx *ctx;
213 int i;
214
215 attr = &rdev->dev_attr;
216 ctx = &rdev->qplib_ctx;
217
218 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
219 attr->max_qp);
220 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
221 /* Use max_mr from fw since max_mrw does not get set */
222 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
223 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
224 attr->max_srq);
225 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
226 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
227 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
228 rdev->qplib_ctx.tqm_ctx.qcount[i] =
229 rdev->dev_attr.tqm_alloc_reqs[i];
230}
231
232static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
233{
234 struct bnxt_qplib_vf_res *vf_res;
235 u32 mrws = 0;
236 u32 vf_pct;
237 u32 nvfs;
238
239 vf_res = &qplib_ctx->vf_res;
240 /*
241 * Reserve a set of resources for the PF. Divide the remaining
242 * resources among the VFs
243 */
244 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
245 nvfs = num_vf;
246 num_vf = 100 * num_vf;
247 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
248 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
249 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
250 /*
251 * The driver allows many more MRs than other resources. If the
252 * firmware does also, then reserve a fixed amount for the PF and
253 * divide the rest among VFs. VFs may use many MRs for NFS
254 * mounts, ISER, NVME applications, etc. If the firmware severely
255 * restricts the number of MRs, then let PF have half and divide
256 * the rest among VFs, as for the other resource types.
257 */
258 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
259 mrws = qplib_ctx->mrw_count * vf_pct;
260 nvfs = num_vf;
261 } else {
262 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
263 }
264 vf_res->max_mrw_per_vf = (mrws / nvfs);
265 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
266}
267
268static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
269{
270 u32 num_vfs;
271
272 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
273 bnxt_re_limit_pf_res(rdev);
274
275 num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
276 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
277 if (num_vfs)
278 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
279}
280
281static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
282{
283 rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
284 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
285 bnxt_re_set_resource_limits(rdev);
286 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
287 &rdev->qplib_ctx);
288 }
289}
290
291static void bnxt_re_shutdown(struct auxiliary_device *adev)
292{
293 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
294
295 if (!rdev)
296 return;
297 ib_unregister_device(&rdev->ibdev);
298 bnxt_re_dev_uninit(rdev);
299}
300
301static void bnxt_re_stop_irq(void *handle)
302{
303 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
304 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
305 struct bnxt_qplib_nq *nq;
306 int indx;
307
308 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
309 nq = &rdev->nq[indx - 1];
310 bnxt_qplib_nq_stop_irq(nq, false);
311 }
312
313 bnxt_qplib_rcfw_stop_irq(rcfw, false);
314}
315
316static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
317{
318 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
319 struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
320 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
321 struct bnxt_qplib_nq *nq;
322 int indx, rc;
323
324 if (!ent) {
325 /* Not setting the f/w timeout bit in rcfw.
326 * During the driver unload the first command
327 * to f/w will timeout and that will set the
328 * timeout bit.
329 */
330 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
331 return;
332 }
333
334 /* Vectors may change after restart, so update with new vectors
335 * in device sctructure.
336 */
337 for (indx = 0; indx < rdev->num_msix; indx++)
338 rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
339
340 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
341 false);
342 if (rc) {
343 ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
344 return;
345 }
346 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
347 nq = &rdev->nq[indx - 1];
348 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
349 msix_ent[indx].vector, false);
350 if (rc) {
351 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
352 indx - 1);
353 return;
354 }
355 }
356}
357
358static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
359 .ulp_irq_stop = bnxt_re_stop_irq,
360 .ulp_irq_restart = bnxt_re_start_irq
361};
362
363/* RoCE -> Net driver */
364
365static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
366{
367 struct bnxt_en_dev *en_dev;
368 int rc;
369
370 en_dev = rdev->en_dev;
371
372 rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
373 if (!rc)
374 rdev->qplib_res.pdev = rdev->en_dev->pdev;
375 return rc;
376}
377
378static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
379{
380 hdr->req_type = cpu_to_le16(opcd);
381 hdr->cmpl_ring = cpu_to_le16(-1);
382 hdr->target_id = cpu_to_le16(-1);
383}
384
385static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
386 int msg_len, void *resp, int resp_max_len,
387 int timeout)
388{
389 fw_msg->msg = msg;
390 fw_msg->msg_len = msg_len;
391 fw_msg->resp = resp;
392 fw_msg->resp_max_len = resp_max_len;
393 fw_msg->timeout = timeout;
394}
395
396/* Query device config using common hwrm */
397static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
398 u32 *offset)
399{
400 struct bnxt_en_dev *en_dev = rdev->en_dev;
401 struct hwrm_func_qcfg_output resp = {0};
402 struct hwrm_func_qcfg_input req = {0};
403 struct bnxt_fw_msg fw_msg = {};
404 int rc;
405
406 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
407 req.fid = cpu_to_le16(0xffff);
408 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
409 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
410 rc = bnxt_send_msg(en_dev, &fw_msg);
411 if (!rc) {
412 *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
413 *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
414 }
415 return rc;
416}
417
418/* Query function capabilities using common hwrm */
419int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
420{
421 struct bnxt_en_dev *en_dev = rdev->en_dev;
422 struct hwrm_func_qcaps_output resp = {};
423 struct hwrm_func_qcaps_input req = {};
424 struct bnxt_qplib_chip_ctx *cctx;
425 struct bnxt_fw_msg fw_msg = {};
426 int rc;
427
428 cctx = rdev->chip_ctx;
429 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
430 req.fid = cpu_to_le16(0xffff);
431 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
432 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
433
434 rc = bnxt_send_msg(en_dev, &fw_msg);
435 if (rc)
436 return rc;
437 cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
438
439 cctx->modes.dbr_pacing =
440 le32_to_cpu(resp.flags_ext2) &
441 FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
442 return 0;
443}
444
445static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
446{
447 struct hwrm_func_dbr_pacing_qcfg_output resp = {};
448 struct hwrm_func_dbr_pacing_qcfg_input req = {};
449 struct bnxt_en_dev *en_dev = rdev->en_dev;
450 struct bnxt_qplib_chip_ctx *cctx;
451 struct bnxt_fw_msg fw_msg = {};
452 int rc;
453
454 cctx = rdev->chip_ctx;
455 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
456 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
457 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
458 rc = bnxt_send_msg(en_dev, &fw_msg);
459 if (rc)
460 return rc;
461
462 if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
463 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
464 FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
465 cctx->dbr_stat_db_fifo =
466 le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
467 ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
468 return 0;
469}
470
471/* Update the pacing tunable parameters to the default values */
472static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
473{
474 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
475
476 pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
477 pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
478 pacing_data->alarm_th =
479 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
480}
481
482static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
483{
484 u32 read_val, fifo_occup;
485
486 /* loop shouldn't run infintely as the occupancy usually goes
487 * below pacing algo threshold as soon as pacing kicks in.
488 */
489 while (1) {
490 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
491 fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
492 ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
493 BNXT_RE_DB_FIFO_ROOM_SHIFT);
494 /* Fifo occupancy cannot be greater the MAX FIFO depth */
495 if (fifo_occup > BNXT_RE_MAX_FIFO_DEPTH)
496 break;
497
498 if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
499 break;
500 }
501}
502
503static void bnxt_re_db_fifo_check(struct work_struct *work)
504{
505 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
506 dbq_fifo_check_work);
507 struct bnxt_qplib_db_pacing_data *pacing_data;
508 u32 pacing_save;
509
510 if (!mutex_trylock(&rdev->pacing.dbq_lock))
511 return;
512 pacing_data = rdev->qplib_res.pacing_data;
513 pacing_save = rdev->pacing.do_pacing_save;
514 __wait_for_fifo_occupancy_below_th(rdev);
515 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
516 if (pacing_save > rdev->pacing.dbr_def_do_pacing) {
517 /* Double the do_pacing value during the congestion */
518 pacing_save = pacing_save << 1;
519 } else {
520 /*
521 * when a new congestion is detected increase the do_pacing
522 * by 8 times. And also increase the pacing_th by 4 times. The
523 * reason to increase pacing_th is to give more space for the
524 * queue to oscillate down without getting empty, but also more
525 * room for the queue to increase without causing another alarm.
526 */
527 pacing_save = pacing_save << 3;
528 pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
529 }
530
531 if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
532 pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
533
534 pacing_data->do_pacing = pacing_save;
535 rdev->pacing.do_pacing_save = pacing_data->do_pacing;
536 pacing_data->alarm_th =
537 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
538 schedule_delayed_work(&rdev->dbq_pacing_work,
539 msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
540 rdev->stats.pacing.alerts++;
541 mutex_unlock(&rdev->pacing.dbq_lock);
542}
543
544static void bnxt_re_pacing_timer_exp(struct work_struct *work)
545{
546 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
547 dbq_pacing_work.work);
548 struct bnxt_qplib_db_pacing_data *pacing_data;
549 u32 read_val, fifo_occup;
550
551 if (!mutex_trylock(&rdev->pacing.dbq_lock))
552 return;
553
554 pacing_data = rdev->qplib_res.pacing_data;
555 read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
556 fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
557 ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
558 BNXT_RE_DB_FIFO_ROOM_SHIFT);
559
560 if (fifo_occup > pacing_data->pacing_th)
561 goto restart_timer;
562
563 /*
564 * Instead of immediately going back to the default do_pacing
565 * reduce it by 1/8 times and restart the timer.
566 */
567 pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
568 pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
569 if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
570 bnxt_re_set_default_pacing_data(rdev);
571 rdev->stats.pacing.complete++;
572 goto dbq_unlock;
573 }
574
575restart_timer:
576 schedule_delayed_work(&rdev->dbq_pacing_work,
577 msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
578 rdev->stats.pacing.resched++;
579dbq_unlock:
580 rdev->pacing.do_pacing_save = pacing_data->do_pacing;
581 mutex_unlock(&rdev->pacing.dbq_lock);
582}
583
584void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
585{
586 struct bnxt_qplib_db_pacing_data *pacing_data;
587
588 if (!rdev->pacing.dbr_pacing)
589 return;
590 mutex_lock(&rdev->pacing.dbq_lock);
591 pacing_data = rdev->qplib_res.pacing_data;
592
593 /*
594 * Increase the alarm_th to max so that other user lib instances do not
595 * keep alerting the driver.
596 */
597 pacing_data->alarm_th = BNXT_RE_MAX_FIFO_DEPTH;
598 pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
599 cancel_work_sync(&rdev->dbq_fifo_check_work);
600 schedule_work(&rdev->dbq_fifo_check_work);
601 mutex_unlock(&rdev->pacing.dbq_lock);
602}
603
604static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
605{
606 if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
607 return -EIO;
608
609 /* Allocate a page for app use */
610 rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
611 if (!rdev->pacing.dbr_page)
612 return -ENOMEM;
613
614 memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
615 rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
616
617 /* MAP HW window 2 for reading db fifo depth */
618 writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
619 rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
620 rdev->pacing.dbr_db_fifo_reg_off =
621 (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
622 BNXT_RE_GRC_FIFO_REG_BASE;
623 rdev->pacing.dbr_bar_addr =
624 pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
625
626 rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
627 rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
628 rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
629 rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
630 rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
631 rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
632 rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
633 rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
634 bnxt_re_set_default_pacing_data(rdev);
635 /* Initialize worker for DBR Pacing */
636 INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
637 INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
638 return 0;
639}
640
641static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
642{
643 cancel_work_sync(&rdev->dbq_fifo_check_work);
644 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
645 if (rdev->pacing.dbr_page)
646 free_page((u64)rdev->pacing.dbr_page);
647
648 rdev->pacing.dbr_page = NULL;
649 rdev->pacing.dbr_pacing = false;
650}
651
652static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
653 u16 fw_ring_id, int type)
654{
655 struct bnxt_en_dev *en_dev;
656 struct hwrm_ring_free_input req = {};
657 struct hwrm_ring_free_output resp;
658 struct bnxt_fw_msg fw_msg = {};
659 int rc = -EINVAL;
660
661 if (!rdev)
662 return rc;
663
664 en_dev = rdev->en_dev;
665
666 if (!en_dev)
667 return rc;
668
669 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
670 return 0;
671
672 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
673 req.ring_type = type;
674 req.ring_id = cpu_to_le16(fw_ring_id);
675 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
676 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
677 rc = bnxt_send_msg(en_dev, &fw_msg);
678 if (rc)
679 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
680 req.ring_id, rc);
681 return rc;
682}
683
684static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
685 struct bnxt_re_ring_attr *ring_attr,
686 u16 *fw_ring_id)
687{
688 struct bnxt_en_dev *en_dev = rdev->en_dev;
689 struct hwrm_ring_alloc_input req = {};
690 struct hwrm_ring_alloc_output resp;
691 struct bnxt_fw_msg fw_msg = {};
692 int rc = -EINVAL;
693
694 if (!en_dev)
695 return rc;
696
697 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
698 req.enables = 0;
699 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
700 if (ring_attr->pages > 1) {
701 /* Page size is in log2 units */
702 req.page_size = BNXT_PAGE_SHIFT;
703 req.page_tbl_depth = 1;
704 }
705 req.fbo = 0;
706 /* Association of ring index with doorbell index and MSIX number */
707 req.logical_id = cpu_to_le16(ring_attr->lrid);
708 req.length = cpu_to_le32(ring_attr->depth + 1);
709 req.ring_type = ring_attr->type;
710 req.int_mode = ring_attr->mode;
711 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
712 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
713 rc = bnxt_send_msg(en_dev, &fw_msg);
714 if (!rc)
715 *fw_ring_id = le16_to_cpu(resp.ring_id);
716
717 return rc;
718}
719
720static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
721 u32 fw_stats_ctx_id)
722{
723 struct bnxt_en_dev *en_dev = rdev->en_dev;
724 struct hwrm_stat_ctx_free_input req = {};
725 struct hwrm_stat_ctx_free_output resp = {};
726 struct bnxt_fw_msg fw_msg = {};
727 int rc = -EINVAL;
728
729 if (!en_dev)
730 return rc;
731
732 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
733 return 0;
734
735 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
736 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
737 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
738 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
739 rc = bnxt_send_msg(en_dev, &fw_msg);
740 if (rc)
741 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
742 rc);
743
744 return rc;
745}
746
747static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
748 dma_addr_t dma_map,
749 u32 *fw_stats_ctx_id)
750{
751 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
752 struct hwrm_stat_ctx_alloc_output resp = {};
753 struct hwrm_stat_ctx_alloc_input req = {};
754 struct bnxt_en_dev *en_dev = rdev->en_dev;
755 struct bnxt_fw_msg fw_msg = {};
756 int rc = -EINVAL;
757
758 *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
759
760 if (!en_dev)
761 return rc;
762
763 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
764 req.update_period_ms = cpu_to_le32(1000);
765 req.stats_dma_addr = cpu_to_le64(dma_map);
766 req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
767 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
768 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
769 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
770 rc = bnxt_send_msg(en_dev, &fw_msg);
771 if (!rc)
772 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
773
774 return rc;
775}
776
777static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
778{
779}
780
781/* Device */
782
783static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
784{
785 struct ib_device *ibdev =
786 ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
787 if (!ibdev)
788 return NULL;
789
790 return container_of(ibdev, struct bnxt_re_dev, ibdev);
791}
792
793static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
794 char *buf)
795{
796 struct bnxt_re_dev *rdev =
797 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
798
799 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
800}
801static DEVICE_ATTR_RO(hw_rev);
802
803static ssize_t hca_type_show(struct device *device,
804 struct device_attribute *attr, char *buf)
805{
806 struct bnxt_re_dev *rdev =
807 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
808
809 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
810}
811static DEVICE_ATTR_RO(hca_type);
812
813static struct attribute *bnxt_re_attributes[] = {
814 &dev_attr_hw_rev.attr,
815 &dev_attr_hca_type.attr,
816 NULL
817};
818
819static const struct attribute_group bnxt_re_dev_attr_group = {
820 .attrs = bnxt_re_attributes,
821};
822
823static const struct ib_device_ops bnxt_re_dev_ops = {
824 .owner = THIS_MODULE,
825 .driver_id = RDMA_DRIVER_BNXT_RE,
826 .uverbs_abi_ver = BNXT_RE_ABI_VERSION,
827
828 .add_gid = bnxt_re_add_gid,
829 .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats,
830 .alloc_mr = bnxt_re_alloc_mr,
831 .alloc_pd = bnxt_re_alloc_pd,
832 .alloc_ucontext = bnxt_re_alloc_ucontext,
833 .create_ah = bnxt_re_create_ah,
834 .create_cq = bnxt_re_create_cq,
835 .create_qp = bnxt_re_create_qp,
836 .create_srq = bnxt_re_create_srq,
837 .create_user_ah = bnxt_re_create_ah,
838 .dealloc_pd = bnxt_re_dealloc_pd,
839 .dealloc_ucontext = bnxt_re_dealloc_ucontext,
840 .del_gid = bnxt_re_del_gid,
841 .dereg_mr = bnxt_re_dereg_mr,
842 .destroy_ah = bnxt_re_destroy_ah,
843 .destroy_cq = bnxt_re_destroy_cq,
844 .destroy_qp = bnxt_re_destroy_qp,
845 .destroy_srq = bnxt_re_destroy_srq,
846 .device_group = &bnxt_re_dev_attr_group,
847 .disassociate_ucontext = bnxt_re_disassociate_ucontext,
848 .get_dev_fw_str = bnxt_re_query_fw_str,
849 .get_dma_mr = bnxt_re_get_dma_mr,
850 .get_hw_stats = bnxt_re_ib_get_hw_stats,
851 .get_link_layer = bnxt_re_get_link_layer,
852 .get_port_immutable = bnxt_re_get_port_immutable,
853 .map_mr_sg = bnxt_re_map_mr_sg,
854 .mmap = bnxt_re_mmap,
855 .mmap_free = bnxt_re_mmap_free,
856 .modify_qp = bnxt_re_modify_qp,
857 .modify_srq = bnxt_re_modify_srq,
858 .poll_cq = bnxt_re_poll_cq,
859 .post_recv = bnxt_re_post_recv,
860 .post_send = bnxt_re_post_send,
861 .post_srq_recv = bnxt_re_post_srq_recv,
862 .query_ah = bnxt_re_query_ah,
863 .query_device = bnxt_re_query_device,
864 .query_pkey = bnxt_re_query_pkey,
865 .query_port = bnxt_re_query_port,
866 .query_qp = bnxt_re_query_qp,
867 .query_srq = bnxt_re_query_srq,
868 .reg_user_mr = bnxt_re_reg_user_mr,
869 .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
870 .req_notify_cq = bnxt_re_req_notify_cq,
871 .resize_cq = bnxt_re_resize_cq,
872 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
873 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
874 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
875 INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
876 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
877 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
878};
879
880static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
881{
882 struct ib_device *ibdev = &rdev->ibdev;
883 int ret;
884
885 /* ib device init */
886 ibdev->node_type = RDMA_NODE_IB_CA;
887 strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
888 strlen(BNXT_RE_DESC) + 5);
889 ibdev->phys_port_cnt = 1;
890
891 addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
892
893 ibdev->num_comp_vectors = rdev->num_msix - 1;
894 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
895 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
896
897 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
898 ibdev->driver_def = bnxt_re_uapi_defs;
899
900 ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
901 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
902 if (ret)
903 return ret;
904
905 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
906 ibdev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ);
907 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
908}
909
910static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
911 struct bnxt_en_dev *en_dev)
912{
913 struct bnxt_re_dev *rdev;
914
915 /* Allocate bnxt_re_dev instance here */
916 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
917 if (!rdev) {
918 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
919 ROCE_DRV_MODULE_NAME);
920 return NULL;
921 }
922 /* Default values */
923 rdev->nb.notifier_call = NULL;
924 rdev->netdev = en_dev->net;
925 rdev->en_dev = en_dev;
926 rdev->id = rdev->en_dev->pdev->devfn;
927 INIT_LIST_HEAD(&rdev->qp_list);
928 mutex_init(&rdev->qp_lock);
929 mutex_init(&rdev->pacing.dbq_lock);
930 atomic_set(&rdev->stats.res.qp_count, 0);
931 atomic_set(&rdev->stats.res.cq_count, 0);
932 atomic_set(&rdev->stats.res.srq_count, 0);
933 atomic_set(&rdev->stats.res.mr_count, 0);
934 atomic_set(&rdev->stats.res.mw_count, 0);
935 atomic_set(&rdev->stats.res.ah_count, 0);
936 atomic_set(&rdev->stats.res.pd_count, 0);
937 rdev->cosq[0] = 0xFFFF;
938 rdev->cosq[1] = 0xFFFF;
939
940 return rdev;
941}
942
943static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
944 *unaffi_async)
945{
946 switch (unaffi_async->event) {
947 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
948 break;
949 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
950 break;
951 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
952 break;
953 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
954 break;
955 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
956 break;
957 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
958 break;
959 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
960 break;
961 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
962 break;
963 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
964 break;
965 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
966 break;
967 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
968 break;
969 default:
970 return -EINVAL;
971 }
972 return 0;
973}
974
975static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
976 struct bnxt_re_qp *qp)
977{
978 struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq,
979 qplib_srq);
980 struct creq_qp_error_notification *err_event;
981 struct ib_event event = {};
982 unsigned int flags;
983
984 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
985 rdma_is_kernel_res(&qp->ib_qp.res)) {
986 flags = bnxt_re_lock_cqs(qp);
987 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
988 bnxt_re_unlock_cqs(qp, flags);
989 }
990
991 event.device = &qp->rdev->ibdev;
992 event.element.qp = &qp->ib_qp;
993 event.event = IB_EVENT_QP_FATAL;
994
995 err_event = (struct creq_qp_error_notification *)qp_event;
996
997 switch (err_event->req_err_state_reason) {
998 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR:
999 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT:
1000 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT:
1001 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2:
1002 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3:
1003 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP:
1004 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND:
1005 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG:
1006 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE:
1007 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR:
1008 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR:
1009 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR:
1010 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR:
1011 event.event = IB_EVENT_QP_ACCESS_ERR;
1012 break;
1013 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1:
1014 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4:
1015 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH:
1016 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR:
1017 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR:
1018 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR:
1019 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR:
1020 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR:
1021 event.event = IB_EVENT_QP_REQ_ERR;
1022 break;
1023 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR:
1024 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR:
1025 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR:
1026 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR:
1027 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR:
1028 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR:
1029 case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR:
1030 event.event = IB_EVENT_QP_FATAL;
1031 break;
1032
1033 default:
1034 break;
1035 }
1036
1037 switch (err_event->res_err_state_reason) {
1038 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX:
1039 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH:
1040 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT:
1041 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY:
1042 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR:
1043 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION:
1044 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR:
1045 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY:
1046 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR:
1047 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION:
1048 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR:
1049 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC:
1050 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND:
1051 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY:
1052 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR:
1053 event.event = IB_EVENT_QP_ACCESS_ERR;
1054 break;
1055 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE:
1056 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR:
1057 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE:
1058 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE:
1059 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR:
1060 event.event = IB_EVENT_QP_REQ_ERR;
1061 break;
1062 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW:
1063 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR:
1064 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR:
1065 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR:
1066 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR:
1067 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR:
1068 event.event = IB_EVENT_QP_FATAL;
1069 break;
1070 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR:
1071 case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR:
1072 if (srq)
1073 event.event = IB_EVENT_SRQ_ERR;
1074 break;
1075 default:
1076 break;
1077 }
1078
1079 if (err_event->res_err_state_reason || err_event->req_err_state_reason) {
1080 ibdev_dbg(&qp->rdev->ibdev,
1081 "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n",
1082 __func__, rdma_is_kernel_res(&qp->ib_qp.res) ? "kernel" : "user",
1083 qp->qplib_qp.id,
1084 err_event->sq_cons_idx,
1085 err_event->rq_cons_idx,
1086 err_event->req_slow_path_state,
1087 err_event->req_err_state_reason,
1088 err_event->res_slow_path_state,
1089 err_event->res_err_state_reason);
1090 } else {
1091 if (srq)
1092 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
1093 }
1094
1095 if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) {
1096 (*srq->ib_srq.event_handler)(&event,
1097 srq->ib_srq.srq_context);
1098 } else if (event.device && qp->ib_qp.event_handler) {
1099 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
1100 }
1101
1102 return 0;
1103}
1104
1105static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq)
1106{
1107 struct creq_cq_error_notification *cqerr;
1108 struct ib_event ibevent = {};
1109
1110 cqerr = event;
1111 switch (cqerr->cq_err_reason) {
1112 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR:
1113 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR:
1114 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR:
1115 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR:
1116 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR:
1117 case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR:
1118 ibevent.event = IB_EVENT_CQ_ERR;
1119 break;
1120 default:
1121 break;
1122 }
1123
1124 if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) {
1125 ibevent.element.cq = &cq->ib_cq;
1126 ibevent.device = &cq->rdev->ibdev;
1127
1128 ibdev_dbg(&cq->rdev->ibdev,
1129 "%s err reason %d\n", __func__, cqerr->cq_err_reason);
1130 cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context);
1131 }
1132
1133 return 0;
1134}
1135
1136static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
1137 void *obj)
1138{
1139 struct bnxt_qplib_qp *lib_qp;
1140 struct bnxt_qplib_cq *lib_cq;
1141 struct bnxt_re_qp *qp;
1142 struct bnxt_re_cq *cq;
1143 int rc = 0;
1144 u8 event;
1145
1146 if (!obj)
1147 return rc; /* QP was already dead, still return success */
1148
1149 event = affi_async->event;
1150 switch (event) {
1151 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
1152 lib_qp = obj;
1153 qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
1154 rc = bnxt_re_handle_qp_async_event(affi_async, qp);
1155 break;
1156 case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:
1157 lib_cq = obj;
1158 cq = container_of(lib_cq, struct bnxt_re_cq, qplib_cq);
1159 rc = bnxt_re_handle_cq_async_error(affi_async, cq);
1160 break;
1161 default:
1162 rc = -EINVAL;
1163 }
1164 return rc;
1165}
1166
1167static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
1168 void *aeqe, void *obj)
1169{
1170 struct creq_qp_event *affi_async;
1171 struct creq_func_event *unaffi_async;
1172 u8 type;
1173 int rc;
1174
1175 type = ((struct creq_base *)aeqe)->type;
1176 if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
1177 unaffi_async = aeqe;
1178 rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
1179 } else {
1180 affi_async = aeqe;
1181 rc = bnxt_re_handle_affi_async_event(affi_async, obj);
1182 }
1183
1184 return rc;
1185}
1186
1187static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
1188 struct bnxt_qplib_srq *handle, u8 event)
1189{
1190 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
1191 qplib_srq);
1192 struct ib_event ib_event;
1193
1194 ib_event.device = &srq->rdev->ibdev;
1195 ib_event.element.srq = &srq->ib_srq;
1196
1197 if (srq->ib_srq.event_handler) {
1198 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
1199 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
1200 (*srq->ib_srq.event_handler)(&ib_event,
1201 srq->ib_srq.srq_context);
1202 }
1203 return 0;
1204}
1205
1206static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
1207 struct bnxt_qplib_cq *handle)
1208{
1209 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
1210 qplib_cq);
1211 u32 *cq_ptr;
1212
1213 if (cq->ib_cq.comp_handler) {
1214 if (cq->uctx_cq_page) {
1215 cq_ptr = (u32 *)cq->uctx_cq_page;
1216 *cq_ptr = cq->qplib_cq.toggle;
1217 }
1218 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
1219 }
1220
1221 return 0;
1222}
1223
1224static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
1225{
1226 int i;
1227
1228 for (i = 1; i < rdev->num_msix; i++)
1229 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
1230
1231 if (rdev->qplib_res.rcfw)
1232 bnxt_qplib_cleanup_res(&rdev->qplib_res);
1233}
1234
1235static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
1236{
1237 int num_vec_enabled = 0;
1238 int rc = 0, i;
1239 u32 db_offt;
1240
1241 bnxt_qplib_init_res(&rdev->qplib_res);
1242
1243 for (i = 1; i < rdev->num_msix ; i++) {
1244 db_offt = rdev->en_dev->msix_entries[i].db_offset;
1245 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
1246 i - 1, rdev->en_dev->msix_entries[i].vector,
1247 db_offt, &bnxt_re_cqn_handler,
1248 &bnxt_re_srqn_handler);
1249 if (rc) {
1250 ibdev_err(&rdev->ibdev,
1251 "Failed to enable NQ with rc = 0x%x", rc);
1252 goto fail;
1253 }
1254 num_vec_enabled++;
1255 }
1256 return 0;
1257fail:
1258 for (i = num_vec_enabled; i >= 0; i--)
1259 bnxt_qplib_disable_nq(&rdev->nq[i]);
1260 return rc;
1261}
1262
1263static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
1264{
1265 u8 type;
1266 int i;
1267
1268 for (i = 0; i < rdev->num_msix - 1; i++) {
1269 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1270 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1271 bnxt_qplib_free_nq(&rdev->nq[i]);
1272 rdev->nq[i].res = NULL;
1273 }
1274}
1275
1276static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
1277{
1278 bnxt_re_free_nq_res(rdev);
1279
1280 if (rdev->qplib_res.dpi_tbl.max) {
1281 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1282 &rdev->dpi_privileged);
1283 }
1284 if (rdev->qplib_res.rcfw) {
1285 bnxt_qplib_free_res(&rdev->qplib_res);
1286 rdev->qplib_res.rcfw = NULL;
1287 }
1288}
1289
1290static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1291{
1292 struct bnxt_re_ring_attr rattr = {};
1293 int num_vec_created = 0;
1294 int rc, i;
1295 u8 type;
1296
1297 /* Configure and allocate resources for qplib */
1298 rdev->qplib_res.rcfw = &rdev->rcfw;
1299 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
1300 if (rc)
1301 goto fail;
1302
1303 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1304 rdev->netdev, &rdev->dev_attr);
1305 if (rc)
1306 goto fail;
1307
1308 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res,
1309 &rdev->dpi_privileged,
1310 rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
1311 if (rc)
1312 goto dealloc_res;
1313
1314 for (i = 0; i < rdev->num_msix - 1; i++) {
1315 struct bnxt_qplib_nq *nq;
1316
1317 nq = &rdev->nq[i];
1318 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
1319 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1320 if (rc) {
1321 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1322 i, rc);
1323 goto free_nq;
1324 }
1325 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1326 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1327 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1328 rattr.type = type;
1329 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1330 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
1331 rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
1332 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1333 if (rc) {
1334 ibdev_err(&rdev->ibdev,
1335 "Failed to allocate NQ fw id with rc = 0x%x",
1336 rc);
1337 bnxt_qplib_free_nq(&rdev->nq[i]);
1338 goto free_nq;
1339 }
1340 num_vec_created++;
1341 }
1342 return 0;
1343free_nq:
1344 for (i = num_vec_created - 1; i >= 0; i--) {
1345 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1346 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1347 bnxt_qplib_free_nq(&rdev->nq[i]);
1348 }
1349 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1350 &rdev->dpi_privileged);
1351dealloc_res:
1352 bnxt_qplib_free_res(&rdev->qplib_res);
1353
1354fail:
1355 rdev->qplib_res.rcfw = NULL;
1356 return rc;
1357}
1358
1359static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
1360 u8 port_num, enum ib_event_type event)
1361{
1362 struct ib_event ib_event;
1363
1364 ib_event.device = ibdev;
1365 if (qp) {
1366 ib_event.element.qp = qp;
1367 ib_event.event = event;
1368 if (qp->event_handler)
1369 qp->event_handler(&ib_event, qp->qp_context);
1370
1371 } else {
1372 ib_event.element.port_num = port_num;
1373 ib_event.event = event;
1374 ib_dispatch_event(&ib_event);
1375 }
1376}
1377
1378static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1379 struct bnxt_re_qp *qp)
1380{
1381 return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
1382 (qp == rdev->gsi_ctx.gsi_sqp);
1383}
1384
1385static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1386{
1387 int mask = IB_QP_STATE;
1388 struct ib_qp_attr qp_attr;
1389 struct bnxt_re_qp *qp;
1390
1391 qp_attr.qp_state = IB_QPS_ERR;
1392 mutex_lock(&rdev->qp_lock);
1393 list_for_each_entry(qp, &rdev->qp_list, list) {
1394 /* Modify the state of all QPs except QP1/Shadow QP */
1395 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1396 if (qp->qplib_qp.state !=
1397 CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1398 qp->qplib_qp.state !=
1399 CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1400 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1401 1, IB_EVENT_QP_FATAL);
1402 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1403 NULL);
1404 }
1405 }
1406 }
1407 mutex_unlock(&rdev->qp_lock);
1408}
1409
1410static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1411{
1412 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1413 struct bnxt_qplib_gid gid;
1414 u16 gid_idx, index;
1415 int rc = 0;
1416
1417 if (!ib_device_try_get(&rdev->ibdev))
1418 return 0;
1419
1420 for (index = 0; index < sgid_tbl->active; index++) {
1421 gid_idx = sgid_tbl->hw_id[index];
1422
1423 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1424 sizeof(bnxt_qplib_gid_zero)))
1425 continue;
1426 /* need to modify the VLAN enable setting of non VLAN GID only
1427 * as setting is done for VLAN GID while adding GID
1428 */
1429 if (sgid_tbl->vlan[index])
1430 continue;
1431
1432 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1433
1434 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1435 rdev->qplib_res.netdev->dev_addr);
1436 }
1437
1438 ib_device_put(&rdev->ibdev);
1439 return rc;
1440}
1441
1442static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1443{
1444 u32 prio_map = 0, tmp_map = 0;
1445 struct net_device *netdev;
1446 struct dcb_app app = {};
1447
1448 netdev = rdev->netdev;
1449
1450 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1451 app.protocol = ETH_P_IBOE;
1452 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1453 prio_map = tmp_map;
1454
1455 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1456 app.protocol = ROCE_V2_UDP_DPORT;
1457 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1458 prio_map |= tmp_map;
1459
1460 return prio_map;
1461}
1462
1463static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1464{
1465 u8 prio_map = 0;
1466
1467 /* Get priority for roce */
1468 prio_map = bnxt_re_get_priority_mask(rdev);
1469
1470 if (prio_map == rdev->cur_prio_map)
1471 return 0;
1472 rdev->cur_prio_map = prio_map;
1473 /* Actual priorities are not programmed as they are already
1474 * done by L2 driver; just enable or disable priority vlan tagging
1475 */
1476 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1477 (prio_map != 0 && !rdev->qplib_res.prio)) {
1478 rdev->qplib_res.prio = prio_map;
1479 bnxt_re_update_gid(rdev);
1480 }
1481
1482 return 0;
1483}
1484
1485static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1486{
1487 struct bnxt_en_dev *en_dev = rdev->en_dev;
1488 struct hwrm_ver_get_output resp = {};
1489 struct hwrm_ver_get_input req = {};
1490 struct bnxt_qplib_chip_ctx *cctx;
1491 struct bnxt_fw_msg fw_msg = {};
1492 int rc;
1493
1494 bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
1495 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1496 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1497 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1498 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1499 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1500 rc = bnxt_send_msg(en_dev, &fw_msg);
1501 if (rc) {
1502 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1503 rc);
1504 return;
1505 }
1506
1507 cctx = rdev->chip_ctx;
1508 cctx->hwrm_intf_ver =
1509 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
1510 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
1511 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
1512 le16_to_cpu(resp.hwrm_intf_patch);
1513
1514 cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout);
1515
1516 if (!cctx->hwrm_cmd_max_timeout)
1517 cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
1518}
1519
1520static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1521{
1522 int rc;
1523 u32 event;
1524
1525 /* Register ib dev */
1526 rc = bnxt_re_register_ib(rdev);
1527 if (rc) {
1528 pr_err("Failed to register with IB: %#x\n", rc);
1529 return rc;
1530 }
1531 dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
1532 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1533
1534 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1535 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
1536
1537 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1538
1539 return rc;
1540}
1541
1542static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1543{
1544 u8 type;
1545 int rc;
1546
1547 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1548 cancel_delayed_work_sync(&rdev->worker);
1549
1550 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1551 &rdev->flags))
1552 bnxt_re_cleanup_res(rdev);
1553 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1554 bnxt_re_free_res(rdev);
1555
1556 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1557 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1558 if (rc)
1559 ibdev_warn(&rdev->ibdev,
1560 "Failed to deinitialize RCFW: %#x", rc);
1561 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1562 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1563 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1564 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1565 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1566 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1567 }
1568
1569 rdev->num_msix = 0;
1570
1571 if (rdev->pacing.dbr_pacing)
1572 bnxt_re_deinitialize_dbr_pacing(rdev);
1573
1574 bnxt_re_destroy_chip_ctx(rdev);
1575 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
1576 bnxt_unregister_dev(rdev->en_dev);
1577}
1578
1579/* worker thread for polling periodic events. Now used for QoS programming*/
1580static void bnxt_re_worker(struct work_struct *work)
1581{
1582 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1583 worker.work);
1584
1585 bnxt_re_setup_qos(rdev);
1586 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1587}
1588
1589static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1590{
1591 struct bnxt_re_ring_attr rattr = {};
1592 struct bnxt_qplib_creq_ctx *creq;
1593 u32 db_offt;
1594 int vid;
1595 u8 type;
1596 int rc;
1597
1598 /* Registered a new RoCE device instance to netdev */
1599 rc = bnxt_re_register_netdev(rdev);
1600 if (rc) {
1601 ibdev_err(&rdev->ibdev,
1602 "Failed to register with netedev: %#x\n", rc);
1603 return -EINVAL;
1604 }
1605 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1606
1607 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1608 if (rc) {
1609 bnxt_unregister_dev(rdev->en_dev);
1610 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1611 ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1612 return -EINVAL;
1613 }
1614
1615 /* Check whether VF or PF */
1616 bnxt_re_get_sriov_func_type(rdev);
1617
1618 if (!rdev->en_dev->ulp_tbl->msix_requested) {
1619 ibdev_err(&rdev->ibdev,
1620 "Failed to get MSI-X vectors: %#x\n", rc);
1621 rc = -EINVAL;
1622 goto fail;
1623 }
1624 ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
1625 rdev->en_dev->ulp_tbl->msix_requested);
1626 rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
1627
1628 bnxt_re_query_hwrm_intf_version(rdev);
1629
1630 /* Establish RCFW Communication Channel to initialize the context
1631 * memory for the function and all child VFs
1632 */
1633 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1634 &rdev->qplib_ctx,
1635 BNXT_RE_MAX_QPC_COUNT);
1636 if (rc) {
1637 ibdev_err(&rdev->ibdev,
1638 "Failed to allocate RCFW Channel: %#x\n", rc);
1639 goto fail;
1640 }
1641
1642 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1643 creq = &rdev->rcfw.creq;
1644 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1645 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
1646 rattr.type = type;
1647 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1648 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
1649 rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1650 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1651 if (rc) {
1652 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1653 goto free_rcfw;
1654 }
1655 db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
1656 vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1657 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1658 vid, db_offt,
1659 &bnxt_re_aeq_handler);
1660 if (rc) {
1661 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1662 rc);
1663 goto free_ring;
1664 }
1665
1666 if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
1667 rc = bnxt_re_initialize_dbr_pacing(rdev);
1668 if (!rc) {
1669 rdev->pacing.dbr_pacing = true;
1670 } else {
1671 ibdev_err(&rdev->ibdev,
1672 "DBR pacing disabled with error : %d\n", rc);
1673 rdev->pacing.dbr_pacing = false;
1674 }
1675 }
1676 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
1677 if (rc)
1678 goto disable_rcfw;
1679
1680 bnxt_re_set_resource_limits(rdev);
1681
1682 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1683 bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
1684 if (rc) {
1685 ibdev_err(&rdev->ibdev,
1686 "Failed to allocate QPLIB context: %#x\n", rc);
1687 goto disable_rcfw;
1688 }
1689 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1690 rdev->qplib_ctx.stats.dma_map,
1691 &rdev->qplib_ctx.stats.fw_id);
1692 if (rc) {
1693 ibdev_err(&rdev->ibdev,
1694 "Failed to allocate stats context: %#x\n", rc);
1695 goto free_ctx;
1696 }
1697
1698 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1699 rdev->is_virtfn);
1700 if (rc) {
1701 ibdev_err(&rdev->ibdev,
1702 "Failed to initialize RCFW: %#x\n", rc);
1703 goto free_sctx;
1704 }
1705 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1706
1707 /* Resources based on the 'new' device caps */
1708 rc = bnxt_re_alloc_res(rdev);
1709 if (rc) {
1710 ibdev_err(&rdev->ibdev,
1711 "Failed to allocate resources: %#x\n", rc);
1712 goto fail;
1713 }
1714 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1715 rc = bnxt_re_init_res(rdev);
1716 if (rc) {
1717 ibdev_err(&rdev->ibdev,
1718 "Failed to initialize resources: %#x\n", rc);
1719 goto fail;
1720 }
1721
1722 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1723
1724 if (!rdev->is_virtfn) {
1725 rc = bnxt_re_setup_qos(rdev);
1726 if (rc)
1727 ibdev_info(&rdev->ibdev,
1728 "RoCE priority not yet configured\n");
1729
1730 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1731 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1732 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1733 /*
1734 * Use the total VF count since the actual VF count may not be
1735 * available at this point.
1736 */
1737 bnxt_re_vf_res_config(rdev);
1738 }
1739 hash_init(rdev->cq_hash);
1740
1741 return 0;
1742free_sctx:
1743 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1744free_ctx:
1745 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1746disable_rcfw:
1747 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1748free_ring:
1749 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1750 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1751free_rcfw:
1752 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1753fail:
1754 bnxt_re_dev_uninit(rdev);
1755
1756 return rc;
1757}
1758
1759static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
1760{
1761 struct bnxt_aux_priv *aux_priv =
1762 container_of(adev, struct bnxt_aux_priv, aux_dev);
1763 struct bnxt_en_dev *en_dev;
1764 struct bnxt_re_dev *rdev;
1765 int rc;
1766
1767 /* en_dev should never be NULL as long as adev and aux_dev are valid. */
1768 en_dev = aux_priv->edev;
1769
1770 rdev = bnxt_re_dev_add(aux_priv, en_dev);
1771 if (!rdev || !rdev_to_dev(rdev)) {
1772 rc = -ENOMEM;
1773 goto exit;
1774 }
1775
1776 rc = bnxt_re_dev_init(rdev, wqe_mode);
1777 if (rc)
1778 goto re_dev_dealloc;
1779
1780 rc = bnxt_re_ib_init(rdev);
1781 if (rc) {
1782 pr_err("Failed to register with IB: %s",
1783 aux_priv->aux_dev.name);
1784 goto re_dev_uninit;
1785 }
1786 auxiliary_set_drvdata(adev, rdev);
1787
1788 return 0;
1789
1790re_dev_uninit:
1791 bnxt_re_dev_uninit(rdev);
1792re_dev_dealloc:
1793 ib_dealloc_device(&rdev->ibdev);
1794exit:
1795 return rc;
1796}
1797
1798static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
1799{
1800 struct bnxt_qplib_cc_param cc_param = {};
1801
1802 /* Do not enable congestion control on VFs */
1803 if (rdev->is_virtfn)
1804 return;
1805
1806 /* Currently enabling only for GenP5 adapters */
1807 if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
1808 return;
1809
1810 if (enable) {
1811 cc_param.enable = 1;
1812 cc_param.cc_mode = CMDQ_MODIFY_ROCE_CC_CC_MODE_PROBABILISTIC_CC_MODE;
1813 }
1814
1815 cc_param.mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE |
1816 CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
1817 CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
1818
1819 if (bnxt_qplib_modify_cc(&rdev->qplib_res, &cc_param))
1820 ibdev_err(&rdev->ibdev, "Failed to setup CC enable = %d\n", enable);
1821}
1822
1823/*
1824 * "Notifier chain callback can be invoked for the same chain from
1825 * different CPUs at the same time".
1826 *
1827 * For cases when the netdev is already present, our call to the
1828 * register_netdevice_notifier() will actually get the rtnl_lock()
1829 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1830 * events.
1831 *
1832 * But for cases when the netdev is not already present, the notifier
1833 * chain is subjected to be invoked from different CPUs simultaneously.
1834 *
1835 * This is protected by the netdev_mutex.
1836 */
1837static int bnxt_re_netdev_event(struct notifier_block *notifier,
1838 unsigned long event, void *ptr)
1839{
1840 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1841 struct bnxt_re_dev *rdev;
1842
1843 real_dev = rdma_vlan_dev_real_dev(netdev);
1844 if (!real_dev)
1845 real_dev = netdev;
1846
1847 if (real_dev != netdev)
1848 goto exit;
1849
1850 rdev = bnxt_re_from_netdev(real_dev);
1851 if (!rdev)
1852 return NOTIFY_DONE;
1853
1854
1855 switch (event) {
1856 case NETDEV_UP:
1857 case NETDEV_DOWN:
1858 case NETDEV_CHANGE:
1859 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1860 netif_carrier_ok(real_dev) ?
1861 IB_EVENT_PORT_ACTIVE :
1862 IB_EVENT_PORT_ERR);
1863 break;
1864 default:
1865 break;
1866 }
1867 ib_device_put(&rdev->ibdev);
1868exit:
1869 return NOTIFY_DONE;
1870}
1871
1872#define BNXT_ADEV_NAME "bnxt_en"
1873
1874static void bnxt_re_remove(struct auxiliary_device *adev)
1875{
1876 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1877
1878 if (!rdev)
1879 return;
1880
1881 mutex_lock(&bnxt_re_mutex);
1882 if (rdev->nb.notifier_call) {
1883 unregister_netdevice_notifier(&rdev->nb);
1884 rdev->nb.notifier_call = NULL;
1885 } else {
1886 /* If notifier is null, we should have already done a
1887 * clean up before coming here.
1888 */
1889 goto skip_remove;
1890 }
1891 bnxt_re_setup_cc(rdev, false);
1892 ib_unregister_device(&rdev->ibdev);
1893 bnxt_re_dev_uninit(rdev);
1894 ib_dealloc_device(&rdev->ibdev);
1895skip_remove:
1896 mutex_unlock(&bnxt_re_mutex);
1897}
1898
1899static int bnxt_re_probe(struct auxiliary_device *adev,
1900 const struct auxiliary_device_id *id)
1901{
1902 struct bnxt_re_dev *rdev;
1903 int rc;
1904
1905 mutex_lock(&bnxt_re_mutex);
1906 rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
1907 if (rc) {
1908 mutex_unlock(&bnxt_re_mutex);
1909 return rc;
1910 }
1911
1912 rdev = auxiliary_get_drvdata(adev);
1913
1914 rdev->nb.notifier_call = bnxt_re_netdev_event;
1915 rc = register_netdevice_notifier(&rdev->nb);
1916 if (rc) {
1917 rdev->nb.notifier_call = NULL;
1918 pr_err("%s: Cannot register to netdevice_notifier",
1919 ROCE_DRV_MODULE_NAME);
1920 goto err;
1921 }
1922
1923 bnxt_re_setup_cc(rdev, true);
1924 mutex_unlock(&bnxt_re_mutex);
1925 return 0;
1926
1927err:
1928 mutex_unlock(&bnxt_re_mutex);
1929 bnxt_re_remove(adev);
1930
1931 return rc;
1932}
1933
1934static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
1935{
1936 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1937
1938 if (!rdev)
1939 return 0;
1940
1941 mutex_lock(&bnxt_re_mutex);
1942 /* L2 driver may invoke this callback during device error/crash or device
1943 * reset. Current RoCE driver doesn't recover the device in case of
1944 * error. Handle the error by dispatching fatal events to all qps
1945 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
1946 * L2 driver want to modify the MSIx table.
1947 */
1948
1949 ibdev_info(&rdev->ibdev, "Handle device suspend call");
1950 /* Check the current device state from bnxt_en_dev and move the
1951 * device to detached state if FW_FATAL_COND is set.
1952 * This prevents more commands to HW during clean-up,
1953 * in case the device is already in error.
1954 */
1955 if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
1956 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
1957
1958 bnxt_re_dev_stop(rdev);
1959 bnxt_re_stop_irq(rdev);
1960 /* Move the device states to detached and avoid sending any more
1961 * commands to HW
1962 */
1963 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
1964 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
1965 wake_up_all(&rdev->rcfw.cmdq.waitq);
1966 mutex_unlock(&bnxt_re_mutex);
1967
1968 return 0;
1969}
1970
1971static int bnxt_re_resume(struct auxiliary_device *adev)
1972{
1973 struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
1974
1975 if (!rdev)
1976 return 0;
1977
1978 mutex_lock(&bnxt_re_mutex);
1979 /* L2 driver may invoke this callback during device recovery, resume.
1980 * reset. Current RoCE driver doesn't recover the device in case of
1981 * error. Handle the error by dispatching fatal events to all qps
1982 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
1983 * L2 driver want to modify the MSIx table.
1984 */
1985
1986 ibdev_info(&rdev->ibdev, "Handle device resume call");
1987 mutex_unlock(&bnxt_re_mutex);
1988
1989 return 0;
1990}
1991
1992static const struct auxiliary_device_id bnxt_re_id_table[] = {
1993 { .name = BNXT_ADEV_NAME ".rdma", },
1994 {},
1995};
1996
1997MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table);
1998
1999static struct auxiliary_driver bnxt_re_driver = {
2000 .name = "rdma",
2001 .probe = bnxt_re_probe,
2002 .remove = bnxt_re_remove,
2003 .shutdown = bnxt_re_shutdown,
2004 .suspend = bnxt_re_suspend,
2005 .resume = bnxt_re_resume,
2006 .id_table = bnxt_re_id_table,
2007};
2008
2009static int __init bnxt_re_mod_init(void)
2010{
2011 int rc;
2012
2013 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
2014 rc = auxiliary_driver_register(&bnxt_re_driver);
2015 if (rc) {
2016 pr_err("%s: Failed to register auxiliary driver\n",
2017 ROCE_DRV_MODULE_NAME);
2018 return rc;
2019 }
2020 return 0;
2021}
2022
2023static void __exit bnxt_re_mod_exit(void)
2024{
2025 auxiliary_driver_unregister(&bnxt_re_driver);
2026}
2027
2028module_init(bnxt_re_mod_init);
2029module_exit(bnxt_re_mod_exit);
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Main component of the bnxt_re driver
37 */
38
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/ethtool.h>
42#include <linux/mutex.h>
43#include <linux/list.h>
44#include <linux/rculist.h>
45#include <linux/spinlock.h>
46#include <linux/pci.h>
47#include <net/dcbnl.h>
48#include <net/ipv6.h>
49#include <net/addrconf.h>
50#include <linux/if_ether.h>
51
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_user_verbs.h>
54#include <rdma/ib_umem.h>
55#include <rdma/ib_addr.h>
56
57#include "bnxt_ulp.h"
58#include "roce_hsi.h"
59#include "qplib_res.h"
60#include "qplib_sp.h"
61#include "qplib_fp.h"
62#include "qplib_rcfw.h"
63#include "bnxt_re.h"
64#include "ib_verbs.h"
65#include <rdma/bnxt_re-abi.h>
66#include "bnxt.h"
67#include "hw_counters.h"
68
69static char version[] =
70 BNXT_RE_DESC "\n";
71
72MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
73MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
74MODULE_LICENSE("Dual BSD/GPL");
75
76/* globals */
77static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78/* Mutex to protect the list of bnxt_re devices added */
79static DEFINE_MUTEX(bnxt_re_dev_lock);
80static struct workqueue_struct *bnxt_re_wq;
81static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
82static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
83static void bnxt_re_stop_irq(void *handle);
84static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
85
86static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
87{
88 struct bnxt_qplib_chip_ctx *cctx;
89
90 cctx = rdev->chip_ctx;
91 cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
92 mode : BNXT_QPLIB_WQE_MODE_STATIC;
93}
94
95static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
96{
97 struct bnxt_qplib_chip_ctx *chip_ctx;
98
99 if (!rdev->chip_ctx)
100 return;
101 chip_ctx = rdev->chip_ctx;
102 rdev->chip_ctx = NULL;
103 rdev->rcfw.res = NULL;
104 rdev->qplib_res.cctx = NULL;
105 rdev->qplib_res.pdev = NULL;
106 rdev->qplib_res.netdev = NULL;
107 kfree(chip_ctx);
108}
109
110static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
111{
112 struct bnxt_qplib_chip_ctx *chip_ctx;
113 struct bnxt_en_dev *en_dev;
114 struct bnxt *bp;
115
116 en_dev = rdev->en_dev;
117 bp = netdev_priv(en_dev->net);
118
119 chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
120 if (!chip_ctx)
121 return -ENOMEM;
122 chip_ctx->chip_num = bp->chip_num;
123 chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
124
125 rdev->chip_ctx = chip_ctx;
126 /* rest members to follow eventually */
127
128 rdev->qplib_res.cctx = rdev->chip_ctx;
129 rdev->rcfw.res = &rdev->qplib_res;
130
131 bnxt_re_set_drv_mode(rdev, wqe_mode);
132 if (bnxt_qplib_determine_atomics(en_dev->pdev))
133 ibdev_info(&rdev->ibdev,
134 "platform doesn't support global atomics.");
135 return 0;
136}
137
138/* SR-IOV helper functions */
139
140static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
141{
142 struct bnxt *bp;
143
144 bp = netdev_priv(rdev->en_dev->net);
145 if (BNXT_VF(bp))
146 rdev->is_virtfn = 1;
147}
148
149/* Set the maximum number of each resource that the driver actually wants
150 * to allocate. This may be up to the maximum number the firmware has
151 * reserved for the function. The driver may choose to allocate fewer
152 * resources than the firmware maximum.
153 */
154static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
155{
156 struct bnxt_qplib_dev_attr *attr;
157 struct bnxt_qplib_ctx *ctx;
158 int i;
159
160 attr = &rdev->dev_attr;
161 ctx = &rdev->qplib_ctx;
162
163 ctx->qpc_count = min_t(u32, BNXT_RE_MAX_QPC_COUNT,
164 attr->max_qp);
165 ctx->mrw_count = BNXT_RE_MAX_MRW_COUNT_256K;
166 /* Use max_mr from fw since max_mrw does not get set */
167 ctx->mrw_count = min_t(u32, ctx->mrw_count, attr->max_mr);
168 ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
169 attr->max_srq);
170 ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
171 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
172 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
173 rdev->qplib_ctx.tqm_ctx.qcount[i] =
174 rdev->dev_attr.tqm_alloc_reqs[i];
175}
176
177static void bnxt_re_limit_vf_res(struct bnxt_qplib_ctx *qplib_ctx, u32 num_vf)
178{
179 struct bnxt_qplib_vf_res *vf_res;
180 u32 mrws = 0;
181 u32 vf_pct;
182 u32 nvfs;
183
184 vf_res = &qplib_ctx->vf_res;
185 /*
186 * Reserve a set of resources for the PF. Divide the remaining
187 * resources among the VFs
188 */
189 vf_pct = 100 - BNXT_RE_PCT_RSVD_FOR_PF;
190 nvfs = num_vf;
191 num_vf = 100 * num_vf;
192 vf_res->max_qp_per_vf = (qplib_ctx->qpc_count * vf_pct) / num_vf;
193 vf_res->max_srq_per_vf = (qplib_ctx->srqc_count * vf_pct) / num_vf;
194 vf_res->max_cq_per_vf = (qplib_ctx->cq_count * vf_pct) / num_vf;
195 /*
196 * The driver allows many more MRs than other resources. If the
197 * firmware does also, then reserve a fixed amount for the PF and
198 * divide the rest among VFs. VFs may use many MRs for NFS
199 * mounts, ISER, NVME applications, etc. If the firmware severely
200 * restricts the number of MRs, then let PF have half and divide
201 * the rest among VFs, as for the other resource types.
202 */
203 if (qplib_ctx->mrw_count < BNXT_RE_MAX_MRW_COUNT_64K) {
204 mrws = qplib_ctx->mrw_count * vf_pct;
205 nvfs = num_vf;
206 } else {
207 mrws = qplib_ctx->mrw_count - BNXT_RE_RESVD_MR_FOR_PF;
208 }
209 vf_res->max_mrw_per_vf = (mrws / nvfs);
210 vf_res->max_gid_per_vf = BNXT_RE_MAX_GID_PER_VF;
211}
212
213static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
214{
215 u32 num_vfs;
216
217 memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
218 bnxt_re_limit_pf_res(rdev);
219
220 num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
221 BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
222 if (num_vfs)
223 bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
224}
225
226/* for handling bnxt_en callbacks later */
227static void bnxt_re_stop(void *p)
228{
229 struct bnxt_re_dev *rdev = p;
230 struct bnxt *bp;
231
232 if (!rdev)
233 return;
234 ASSERT_RTNL();
235
236 /* L2 driver invokes this callback during device error/crash or device
237 * reset. Current RoCE driver doesn't recover the device in case of
238 * error. Handle the error by dispatching fatal events to all qps
239 * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
240 * L2 driver want to modify the MSIx table.
241 */
242 bp = netdev_priv(rdev->netdev);
243
244 ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
245 /* Check the current device state from L2 structure and move the
246 * device to detached state if FW_FATAL_COND is set.
247 * This prevents more commands to HW during clean-up,
248 * in case the device is already in error.
249 */
250 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
251 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
252
253 bnxt_re_dev_stop(rdev);
254 bnxt_re_stop_irq(rdev);
255 /* Move the device states to detached and avoid sending any more
256 * commands to HW
257 */
258 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
259 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
260}
261
262static void bnxt_re_start(void *p)
263{
264}
265
266static void bnxt_re_sriov_config(void *p, int num_vfs)
267{
268 struct bnxt_re_dev *rdev = p;
269
270 if (!rdev)
271 return;
272
273 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
274 return;
275 rdev->num_vfs = num_vfs;
276 if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
277 bnxt_re_set_resource_limits(rdev);
278 bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
279 &rdev->qplib_ctx);
280 }
281}
282
283static void bnxt_re_shutdown(void *p)
284{
285 struct bnxt_re_dev *rdev = p;
286
287 if (!rdev)
288 return;
289 ASSERT_RTNL();
290 /* Release the MSIx vectors before queuing unregister */
291 bnxt_re_stop_irq(rdev);
292 ib_unregister_device_queued(&rdev->ibdev);
293}
294
295static void bnxt_re_stop_irq(void *handle)
296{
297 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
298 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
299 struct bnxt_qplib_nq *nq;
300 int indx;
301
302 for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
303 nq = &rdev->nq[indx - 1];
304 bnxt_qplib_nq_stop_irq(nq, false);
305 }
306
307 bnxt_qplib_rcfw_stop_irq(rcfw, false);
308}
309
310static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
311{
312 struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
313 struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
314 struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
315 struct bnxt_qplib_nq *nq;
316 int indx, rc;
317
318 if (!ent) {
319 /* Not setting the f/w timeout bit in rcfw.
320 * During the driver unload the first command
321 * to f/w will timeout and that will set the
322 * timeout bit.
323 */
324 ibdev_err(&rdev->ibdev, "Failed to re-start IRQs\n");
325 return;
326 }
327
328 /* Vectors may change after restart, so update with new vectors
329 * in device sctructure.
330 */
331 for (indx = 0; indx < rdev->num_msix; indx++)
332 rdev->msix_entries[indx].vector = ent[indx].vector;
333
334 bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
335 false);
336 for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
337 nq = &rdev->nq[indx - 1];
338 rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
339 msix_ent[indx].vector, false);
340 if (rc)
341 ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
342 indx - 1);
343 }
344}
345
346static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
347 .ulp_async_notifier = NULL,
348 .ulp_stop = bnxt_re_stop,
349 .ulp_start = bnxt_re_start,
350 .ulp_sriov_config = bnxt_re_sriov_config,
351 .ulp_shutdown = bnxt_re_shutdown,
352 .ulp_irq_stop = bnxt_re_stop_irq,
353 .ulp_irq_restart = bnxt_re_start_irq
354};
355
356/* RoCE -> Net driver */
357
358/* Driver registration routines used to let the networking driver (bnxt_en)
359 * to know that the RoCE driver is now installed
360 */
361static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
362{
363 struct bnxt_en_dev *en_dev;
364 int rc;
365
366 if (!rdev)
367 return -EINVAL;
368
369 en_dev = rdev->en_dev;
370
371 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
372 BNXT_ROCE_ULP);
373 return rc;
374}
375
376static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
377{
378 struct bnxt_en_dev *en_dev;
379 int rc = 0;
380
381 if (!rdev)
382 return -EINVAL;
383
384 en_dev = rdev->en_dev;
385
386 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
387 &bnxt_re_ulp_ops, rdev);
388 rdev->qplib_res.pdev = rdev->en_dev->pdev;
389 return rc;
390}
391
392static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
393{
394 struct bnxt_en_dev *en_dev;
395 int rc;
396
397 if (!rdev)
398 return -EINVAL;
399
400 en_dev = rdev->en_dev;
401
402
403 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
404
405 return rc;
406}
407
408static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
409{
410 int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
411 struct bnxt_en_dev *en_dev;
412
413 if (!rdev)
414 return -EINVAL;
415
416 en_dev = rdev->en_dev;
417
418 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
419
420 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
421 rdev->msix_entries,
422 num_msix_want);
423 if (num_msix_got < BNXT_RE_MIN_MSIX) {
424 rc = -EINVAL;
425 goto done;
426 }
427 if (num_msix_got != num_msix_want) {
428 ibdev_warn(&rdev->ibdev,
429 "Requested %d MSI-X vectors, got %d\n",
430 num_msix_want, num_msix_got);
431 }
432 rdev->num_msix = num_msix_got;
433done:
434 return rc;
435}
436
437static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
438 u16 opcd, u16 crid, u16 trid)
439{
440 hdr->req_type = cpu_to_le16(opcd);
441 hdr->cmpl_ring = cpu_to_le16(crid);
442 hdr->target_id = cpu_to_le16(trid);
443}
444
445static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
446 int msg_len, void *resp, int resp_max_len,
447 int timeout)
448{
449 fw_msg->msg = msg;
450 fw_msg->msg_len = msg_len;
451 fw_msg->resp = resp;
452 fw_msg->resp_max_len = resp_max_len;
453 fw_msg->timeout = timeout;
454}
455
456static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
457 u16 fw_ring_id, int type)
458{
459 struct bnxt_en_dev *en_dev = rdev->en_dev;
460 struct hwrm_ring_free_input req = {0};
461 struct hwrm_ring_free_output resp;
462 struct bnxt_fw_msg fw_msg;
463 int rc = -EINVAL;
464
465 if (!en_dev)
466 return rc;
467
468 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
469 return 0;
470
471 memset(&fw_msg, 0, sizeof(fw_msg));
472
473 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
474 req.ring_type = type;
475 req.ring_id = cpu_to_le16(fw_ring_id);
476 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
477 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
478 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
479 if (rc)
480 ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
481 req.ring_id, rc);
482 return rc;
483}
484
485static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
486 struct bnxt_re_ring_attr *ring_attr,
487 u16 *fw_ring_id)
488{
489 struct bnxt_en_dev *en_dev = rdev->en_dev;
490 struct hwrm_ring_alloc_input req = {0};
491 struct hwrm_ring_alloc_output resp;
492 struct bnxt_fw_msg fw_msg;
493 int rc = -EINVAL;
494
495 if (!en_dev)
496 return rc;
497
498 memset(&fw_msg, 0, sizeof(fw_msg));
499 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
500 req.enables = 0;
501 req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
502 if (ring_attr->pages > 1) {
503 /* Page size is in log2 units */
504 req.page_size = BNXT_PAGE_SHIFT;
505 req.page_tbl_depth = 1;
506 }
507 req.fbo = 0;
508 /* Association of ring index with doorbell index and MSIX number */
509 req.logical_id = cpu_to_le16(ring_attr->lrid);
510 req.length = cpu_to_le32(ring_attr->depth + 1);
511 req.ring_type = ring_attr->type;
512 req.int_mode = ring_attr->mode;
513 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
514 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
515 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
516 if (!rc)
517 *fw_ring_id = le16_to_cpu(resp.ring_id);
518
519 return rc;
520}
521
522static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
523 u32 fw_stats_ctx_id)
524{
525 struct bnxt_en_dev *en_dev = rdev->en_dev;
526 struct hwrm_stat_ctx_free_input req = {0};
527 struct bnxt_fw_msg fw_msg;
528 int rc = -EINVAL;
529
530 if (!en_dev)
531 return rc;
532
533 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
534 return 0;
535
536 memset(&fw_msg, 0, sizeof(fw_msg));
537
538 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
539 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
540 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
541 sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
542 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
543 if (rc)
544 ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
545 rc);
546
547 return rc;
548}
549
550static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
551 dma_addr_t dma_map,
552 u32 *fw_stats_ctx_id)
553{
554 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
555 struct hwrm_stat_ctx_alloc_output resp = {0};
556 struct hwrm_stat_ctx_alloc_input req = {0};
557 struct bnxt_en_dev *en_dev = rdev->en_dev;
558 struct bnxt_fw_msg fw_msg;
559 int rc = -EINVAL;
560
561 *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
562
563 if (!en_dev)
564 return rc;
565
566 memset(&fw_msg, 0, sizeof(fw_msg));
567
568 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
569 req.update_period_ms = cpu_to_le32(1000);
570 req.stats_dma_addr = cpu_to_le64(dma_map);
571 req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
572 req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
573 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
574 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
575 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
576 if (!rc)
577 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
578
579 return rc;
580}
581
582/* Device */
583
584static bool is_bnxt_re_dev(struct net_device *netdev)
585{
586 struct ethtool_drvinfo drvinfo;
587
588 if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
589 memset(&drvinfo, 0, sizeof(drvinfo));
590 netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
591
592 if (strcmp(drvinfo.driver, "bnxt_en"))
593 return false;
594 return true;
595 }
596 return false;
597}
598
599static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
600{
601 struct ib_device *ibdev =
602 ib_device_get_by_netdev(netdev, RDMA_DRIVER_BNXT_RE);
603 if (!ibdev)
604 return NULL;
605
606 return container_of(ibdev, struct bnxt_re_dev, ibdev);
607}
608
609static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
610{
611 struct bnxt_en_dev *en_dev;
612 struct pci_dev *pdev;
613
614 en_dev = bnxt_ulp_probe(netdev);
615 if (IS_ERR(en_dev))
616 return en_dev;
617
618 pdev = en_dev->pdev;
619 if (!pdev)
620 return ERR_PTR(-EINVAL);
621
622 if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
623 dev_info(&pdev->dev,
624 "%s: probe error: RoCE is not supported on this device",
625 ROCE_DRV_MODULE_NAME);
626 return ERR_PTR(-ENODEV);
627 }
628
629 dev_hold(netdev);
630
631 return en_dev;
632}
633
634static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
635 char *buf)
636{
637 struct bnxt_re_dev *rdev =
638 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
639
640 return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
641}
642static DEVICE_ATTR_RO(hw_rev);
643
644static ssize_t hca_type_show(struct device *device,
645 struct device_attribute *attr, char *buf)
646{
647 struct bnxt_re_dev *rdev =
648 rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
649
650 return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
651}
652static DEVICE_ATTR_RO(hca_type);
653
654static struct attribute *bnxt_re_attributes[] = {
655 &dev_attr_hw_rev.attr,
656 &dev_attr_hca_type.attr,
657 NULL
658};
659
660static const struct attribute_group bnxt_re_dev_attr_group = {
661 .attrs = bnxt_re_attributes,
662};
663
664static const struct ib_device_ops bnxt_re_dev_ops = {
665 .owner = THIS_MODULE,
666 .driver_id = RDMA_DRIVER_BNXT_RE,
667 .uverbs_abi_ver = BNXT_RE_ABI_VERSION,
668
669 .add_gid = bnxt_re_add_gid,
670 .alloc_hw_port_stats = bnxt_re_ib_alloc_hw_port_stats,
671 .alloc_mr = bnxt_re_alloc_mr,
672 .alloc_pd = bnxt_re_alloc_pd,
673 .alloc_ucontext = bnxt_re_alloc_ucontext,
674 .create_ah = bnxt_re_create_ah,
675 .create_cq = bnxt_re_create_cq,
676 .create_qp = bnxt_re_create_qp,
677 .create_srq = bnxt_re_create_srq,
678 .create_user_ah = bnxt_re_create_ah,
679 .dealloc_driver = bnxt_re_dealloc_driver,
680 .dealloc_pd = bnxt_re_dealloc_pd,
681 .dealloc_ucontext = bnxt_re_dealloc_ucontext,
682 .del_gid = bnxt_re_del_gid,
683 .dereg_mr = bnxt_re_dereg_mr,
684 .destroy_ah = bnxt_re_destroy_ah,
685 .destroy_cq = bnxt_re_destroy_cq,
686 .destroy_qp = bnxt_re_destroy_qp,
687 .destroy_srq = bnxt_re_destroy_srq,
688 .device_group = &bnxt_re_dev_attr_group,
689 .get_dev_fw_str = bnxt_re_query_fw_str,
690 .get_dma_mr = bnxt_re_get_dma_mr,
691 .get_hw_stats = bnxt_re_ib_get_hw_stats,
692 .get_link_layer = bnxt_re_get_link_layer,
693 .get_port_immutable = bnxt_re_get_port_immutable,
694 .map_mr_sg = bnxt_re_map_mr_sg,
695 .mmap = bnxt_re_mmap,
696 .modify_ah = bnxt_re_modify_ah,
697 .modify_qp = bnxt_re_modify_qp,
698 .modify_srq = bnxt_re_modify_srq,
699 .poll_cq = bnxt_re_poll_cq,
700 .post_recv = bnxt_re_post_recv,
701 .post_send = bnxt_re_post_send,
702 .post_srq_recv = bnxt_re_post_srq_recv,
703 .query_ah = bnxt_re_query_ah,
704 .query_device = bnxt_re_query_device,
705 .query_pkey = bnxt_re_query_pkey,
706 .query_port = bnxt_re_query_port,
707 .query_qp = bnxt_re_query_qp,
708 .query_srq = bnxt_re_query_srq,
709 .reg_user_mr = bnxt_re_reg_user_mr,
710 .req_notify_cq = bnxt_re_req_notify_cq,
711 INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
712 INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
713 INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
714 INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
715 INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
716};
717
718static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
719{
720 struct ib_device *ibdev = &rdev->ibdev;
721 int ret;
722
723 /* ib device init */
724 ibdev->node_type = RDMA_NODE_IB_CA;
725 strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
726 strlen(BNXT_RE_DESC) + 5);
727 ibdev->phys_port_cnt = 1;
728
729 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
730
731 ibdev->num_comp_vectors = rdev->num_msix - 1;
732 ibdev->dev.parent = &rdev->en_dev->pdev->dev;
733 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
734
735 ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
736 ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
737 if (ret)
738 return ret;
739
740 dma_set_max_seg_size(&rdev->en_dev->pdev->dev, UINT_MAX);
741 return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
742}
743
744static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
745{
746 dev_put(rdev->netdev);
747 rdev->netdev = NULL;
748 mutex_lock(&bnxt_re_dev_lock);
749 list_del_rcu(&rdev->list);
750 mutex_unlock(&bnxt_re_dev_lock);
751
752 synchronize_rcu();
753}
754
755static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
756 struct bnxt_en_dev *en_dev)
757{
758 struct bnxt_re_dev *rdev;
759
760 /* Allocate bnxt_re_dev instance here */
761 rdev = ib_alloc_device(bnxt_re_dev, ibdev);
762 if (!rdev) {
763 ibdev_err(NULL, "%s: bnxt_re_dev allocation failure!",
764 ROCE_DRV_MODULE_NAME);
765 return NULL;
766 }
767 /* Default values */
768 rdev->netdev = netdev;
769 dev_hold(rdev->netdev);
770 rdev->en_dev = en_dev;
771 rdev->id = rdev->en_dev->pdev->devfn;
772 INIT_LIST_HEAD(&rdev->qp_list);
773 mutex_init(&rdev->qp_lock);
774 atomic_set(&rdev->qp_count, 0);
775 atomic_set(&rdev->cq_count, 0);
776 atomic_set(&rdev->srq_count, 0);
777 atomic_set(&rdev->mr_count, 0);
778 atomic_set(&rdev->mw_count, 0);
779 rdev->cosq[0] = 0xFFFF;
780 rdev->cosq[1] = 0xFFFF;
781
782 mutex_lock(&bnxt_re_dev_lock);
783 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
784 mutex_unlock(&bnxt_re_dev_lock);
785 return rdev;
786}
787
788static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
789 *unaffi_async)
790{
791 switch (unaffi_async->event) {
792 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
793 break;
794 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
795 break;
796 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
797 break;
798 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
799 break;
800 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
801 break;
802 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
803 break;
804 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
805 break;
806 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
807 break;
808 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
809 break;
810 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
811 break;
812 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
813 break;
814 default:
815 return -EINVAL;
816 }
817 return 0;
818}
819
820static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
821 struct bnxt_re_qp *qp)
822{
823 struct ib_event event;
824 unsigned int flags;
825
826 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
827 rdma_is_kernel_res(&qp->ib_qp.res)) {
828 flags = bnxt_re_lock_cqs(qp);
829 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
830 bnxt_re_unlock_cqs(qp, flags);
831 }
832
833 memset(&event, 0, sizeof(event));
834 if (qp->qplib_qp.srq) {
835 event.device = &qp->rdev->ibdev;
836 event.element.qp = &qp->ib_qp;
837 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
838 }
839
840 if (event.device && qp->ib_qp.event_handler)
841 qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
842
843 return 0;
844}
845
846static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
847 void *obj)
848{
849 int rc = 0;
850 u8 event;
851
852 if (!obj)
853 return rc; /* QP was already dead, still return success */
854
855 event = affi_async->event;
856 if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) {
857 struct bnxt_qplib_qp *lib_qp = obj;
858 struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp,
859 qplib_qp);
860 rc = bnxt_re_handle_qp_async_event(affi_async, qp);
861 }
862 return rc;
863}
864
865static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
866 void *aeqe, void *obj)
867{
868 struct creq_qp_event *affi_async;
869 struct creq_func_event *unaffi_async;
870 u8 type;
871 int rc;
872
873 type = ((struct creq_base *)aeqe)->type;
874 if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
875 unaffi_async = aeqe;
876 rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
877 } else {
878 affi_async = aeqe;
879 rc = bnxt_re_handle_affi_async_event(affi_async, obj);
880 }
881
882 return rc;
883}
884
885static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
886 struct bnxt_qplib_srq *handle, u8 event)
887{
888 struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
889 qplib_srq);
890 struct ib_event ib_event;
891 int rc = 0;
892
893 ib_event.device = &srq->rdev->ibdev;
894 ib_event.element.srq = &srq->ib_srq;
895 if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
896 ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
897 else
898 ib_event.event = IB_EVENT_SRQ_ERR;
899
900 if (srq->ib_srq.event_handler) {
901 /* Lock event_handler? */
902 (*srq->ib_srq.event_handler)(&ib_event,
903 srq->ib_srq.srq_context);
904 }
905 return rc;
906}
907
908static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
909 struct bnxt_qplib_cq *handle)
910{
911 struct bnxt_re_cq *cq = container_of(handle, struct bnxt_re_cq,
912 qplib_cq);
913
914 if (cq->ib_cq.comp_handler) {
915 /* Lock comp_handler? */
916 (*cq->ib_cq.comp_handler)(&cq->ib_cq, cq->ib_cq.cq_context);
917 }
918
919 return 0;
920}
921
922#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
923#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
924static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
925{
926 return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
927 (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
928 BNXT_RE_GEN_P5_PF_NQ_DB) :
929 rdev->msix_entries[indx].db_offset;
930}
931
932static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
933{
934 int i;
935
936 for (i = 1; i < rdev->num_msix; i++)
937 bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
938
939 if (rdev->qplib_res.rcfw)
940 bnxt_qplib_cleanup_res(&rdev->qplib_res);
941}
942
943static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
944{
945 int num_vec_enabled = 0;
946 int rc = 0, i;
947 u32 db_offt;
948
949 bnxt_qplib_init_res(&rdev->qplib_res);
950
951 for (i = 1; i < rdev->num_msix ; i++) {
952 db_offt = bnxt_re_get_nqdb_offset(rdev, i);
953 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
954 i - 1, rdev->msix_entries[i].vector,
955 db_offt, &bnxt_re_cqn_handler,
956 &bnxt_re_srqn_handler);
957 if (rc) {
958 ibdev_err(&rdev->ibdev,
959 "Failed to enable NQ with rc = 0x%x", rc);
960 goto fail;
961 }
962 num_vec_enabled++;
963 }
964 return 0;
965fail:
966 for (i = num_vec_enabled; i >= 0; i--)
967 bnxt_qplib_disable_nq(&rdev->nq[i]);
968 return rc;
969}
970
971static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
972{
973 u8 type;
974 int i;
975
976 for (i = 0; i < rdev->num_msix - 1; i++) {
977 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
978 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
979 bnxt_qplib_free_nq(&rdev->nq[i]);
980 rdev->nq[i].res = NULL;
981 }
982}
983
984static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
985{
986 bnxt_re_free_nq_res(rdev);
987
988 if (rdev->qplib_res.dpi_tbl.max) {
989 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
990 &rdev->qplib_res.dpi_tbl,
991 &rdev->dpi_privileged);
992 }
993 if (rdev->qplib_res.rcfw) {
994 bnxt_qplib_free_res(&rdev->qplib_res);
995 rdev->qplib_res.rcfw = NULL;
996 }
997}
998
999static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
1000{
1001 struct bnxt_re_ring_attr rattr = {};
1002 int num_vec_created = 0;
1003 int rc = 0, i;
1004 u8 type;
1005
1006 /* Configure and allocate resources for qplib */
1007 rdev->qplib_res.rcfw = &rdev->rcfw;
1008 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1009 rdev->is_virtfn);
1010 if (rc)
1011 goto fail;
1012
1013 rc = bnxt_qplib_alloc_res(&rdev->qplib_res, rdev->en_dev->pdev,
1014 rdev->netdev, &rdev->dev_attr);
1015 if (rc)
1016 goto fail;
1017
1018 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
1019 &rdev->dpi_privileged,
1020 rdev);
1021 if (rc)
1022 goto dealloc_res;
1023
1024 for (i = 0; i < rdev->num_msix - 1; i++) {
1025 struct bnxt_qplib_nq *nq;
1026
1027 nq = &rdev->nq[i];
1028 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
1029 rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
1030 if (rc) {
1031 ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
1032 i, rc);
1033 goto free_nq;
1034 }
1035 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1036 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1037 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count;
1038 rattr.type = type;
1039 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1040 rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
1041 rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
1042 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
1043 if (rc) {
1044 ibdev_err(&rdev->ibdev,
1045 "Failed to allocate NQ fw id with rc = 0x%x",
1046 rc);
1047 bnxt_qplib_free_nq(&rdev->nq[i]);
1048 goto free_nq;
1049 }
1050 num_vec_created++;
1051 }
1052 return 0;
1053free_nq:
1054 for (i = num_vec_created - 1; i >= 0; i--) {
1055 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1056 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, type);
1057 bnxt_qplib_free_nq(&rdev->nq[i]);
1058 }
1059 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
1060 &rdev->qplib_res.dpi_tbl,
1061 &rdev->dpi_privileged);
1062dealloc_res:
1063 bnxt_qplib_free_res(&rdev->qplib_res);
1064
1065fail:
1066 rdev->qplib_res.rcfw = NULL;
1067 return rc;
1068}
1069
1070static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
1071 u8 port_num, enum ib_event_type event)
1072{
1073 struct ib_event ib_event;
1074
1075 ib_event.device = ibdev;
1076 if (qp) {
1077 ib_event.element.qp = qp;
1078 ib_event.event = event;
1079 if (qp->event_handler)
1080 qp->event_handler(&ib_event, qp->qp_context);
1081
1082 } else {
1083 ib_event.element.port_num = port_num;
1084 ib_event.event = event;
1085 ib_dispatch_event(&ib_event);
1086 }
1087}
1088
1089#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
1090static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
1091 u64 *cid_map)
1092{
1093 struct hwrm_queue_pri2cos_qcfg_input req = {0};
1094 struct bnxt *bp = netdev_priv(rdev->netdev);
1095 struct hwrm_queue_pri2cos_qcfg_output resp;
1096 struct bnxt_en_dev *en_dev = rdev->en_dev;
1097 struct bnxt_fw_msg fw_msg;
1098 u32 flags = 0;
1099 u8 *qcfgmap, *tmp_map;
1100 int rc = 0, i;
1101
1102 if (!cid_map)
1103 return -EINVAL;
1104
1105 memset(&fw_msg, 0, sizeof(fw_msg));
1106 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1107 HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
1108 flags |= (dir & 0x01);
1109 flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
1110 req.flags = cpu_to_le32(flags);
1111 req.port_id = bp->pf.port_id;
1112
1113 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1114 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1115 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1116 if (rc)
1117 return rc;
1118
1119 if (resp.queue_cfg_info) {
1120 ibdev_warn(&rdev->ibdev,
1121 "Asymmetric cos queue configuration detected");
1122 ibdev_warn(&rdev->ibdev,
1123 " on device, QoS may not be fully functional\n");
1124 }
1125 qcfgmap = &resp.pri0_cos_queue_id;
1126 tmp_map = (u8 *)cid_map;
1127 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1128 tmp_map[i] = qcfgmap[i];
1129
1130 return rc;
1131}
1132
1133static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
1134 struct bnxt_re_qp *qp)
1135{
1136 return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
1137 (qp == rdev->gsi_ctx.gsi_sqp);
1138}
1139
1140static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
1141{
1142 int mask = IB_QP_STATE;
1143 struct ib_qp_attr qp_attr;
1144 struct bnxt_re_qp *qp;
1145
1146 qp_attr.qp_state = IB_QPS_ERR;
1147 mutex_lock(&rdev->qp_lock);
1148 list_for_each_entry(qp, &rdev->qp_list, list) {
1149 /* Modify the state of all QPs except QP1/Shadow QP */
1150 if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
1151 if (qp->qplib_qp.state !=
1152 CMDQ_MODIFY_QP_NEW_STATE_RESET &&
1153 qp->qplib_qp.state !=
1154 CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1155 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
1156 1, IB_EVENT_QP_FATAL);
1157 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1158 NULL);
1159 }
1160 }
1161 }
1162 mutex_unlock(&rdev->qp_lock);
1163}
1164
1165static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
1166{
1167 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
1168 struct bnxt_qplib_gid gid;
1169 u16 gid_idx, index;
1170 int rc = 0;
1171
1172 if (!ib_device_try_get(&rdev->ibdev))
1173 return 0;
1174
1175 if (!sgid_tbl) {
1176 ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
1177 rc = -EINVAL;
1178 goto out;
1179 }
1180
1181 for (index = 0; index < sgid_tbl->active; index++) {
1182 gid_idx = sgid_tbl->hw_id[index];
1183
1184 if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
1185 sizeof(bnxt_qplib_gid_zero)))
1186 continue;
1187 /* need to modify the VLAN enable setting of non VLAN GID only
1188 * as setting is done for VLAN GID while adding GID
1189 */
1190 if (sgid_tbl->vlan[index])
1191 continue;
1192
1193 memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
1194
1195 rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
1196 rdev->qplib_res.netdev->dev_addr);
1197 }
1198out:
1199 ib_device_put(&rdev->ibdev);
1200 return rc;
1201}
1202
1203static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
1204{
1205 u32 prio_map = 0, tmp_map = 0;
1206 struct net_device *netdev;
1207 struct dcb_app app;
1208
1209 netdev = rdev->netdev;
1210
1211 memset(&app, 0, sizeof(app));
1212 app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
1213 app.protocol = ETH_P_IBOE;
1214 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1215 prio_map = tmp_map;
1216
1217 app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
1218 app.protocol = ROCE_V2_UDP_DPORT;
1219 tmp_map = dcb_ieee_getapp_mask(netdev, &app);
1220 prio_map |= tmp_map;
1221
1222 return prio_map;
1223}
1224
1225static void bnxt_re_parse_cid_map(u8 prio_map, u8 *cid_map, u16 *cosq)
1226{
1227 u16 prio;
1228 u8 id;
1229
1230 for (prio = 0, id = 0; prio < 8; prio++) {
1231 if (prio_map & (1 << prio)) {
1232 cosq[id] = cid_map[prio];
1233 id++;
1234 if (id == 2) /* Max 2 tcs supported */
1235 break;
1236 }
1237 }
1238}
1239
1240static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1241{
1242 u8 prio_map = 0;
1243 u64 cid_map;
1244 int rc;
1245
1246 /* Get priority for roce */
1247 prio_map = bnxt_re_get_priority_mask(rdev);
1248
1249 if (prio_map == rdev->cur_prio_map)
1250 return 0;
1251 rdev->cur_prio_map = prio_map;
1252 /* Get cosq id for this priority */
1253 rc = bnxt_re_query_hwrm_pri2cos(rdev, 0, &cid_map);
1254 if (rc) {
1255 ibdev_warn(&rdev->ibdev, "no cos for p_mask %x\n", prio_map);
1256 return rc;
1257 }
1258 /* Parse CoS IDs for app priority */
1259 bnxt_re_parse_cid_map(prio_map, (u8 *)&cid_map, rdev->cosq);
1260
1261 /* Config BONO. */
1262 rc = bnxt_qplib_map_tc2cos(&rdev->qplib_res, rdev->cosq);
1263 if (rc) {
1264 ibdev_warn(&rdev->ibdev, "no tc for cos{%x, %x}\n",
1265 rdev->cosq[0], rdev->cosq[1]);
1266 return rc;
1267 }
1268
1269 /* Actual priorities are not programmed as they are already
1270 * done by L2 driver; just enable or disable priority vlan tagging
1271 */
1272 if ((prio_map == 0 && rdev->qplib_res.prio) ||
1273 (prio_map != 0 && !rdev->qplib_res.prio)) {
1274 rdev->qplib_res.prio = prio_map ? true : false;
1275
1276 bnxt_re_update_gid(rdev);
1277 }
1278
1279 return 0;
1280}
1281
1282static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1283{
1284 struct bnxt_en_dev *en_dev = rdev->en_dev;
1285 struct hwrm_ver_get_output resp = {0};
1286 struct hwrm_ver_get_input req = {0};
1287 struct bnxt_fw_msg fw_msg;
1288 int rc = 0;
1289
1290 memset(&fw_msg, 0, sizeof(fw_msg));
1291 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1292 HWRM_VER_GET, -1, -1);
1293 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1294 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1295 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1296 bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
1297 sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
1298 rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
1299 if (rc) {
1300 ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
1301 rc);
1302 return;
1303 }
1304 rdev->qplib_ctx.hwrm_intf_ver =
1305 (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
1306 (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
1307 (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
1308 le16_to_cpu(resp.hwrm_intf_patch);
1309}
1310
1311static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
1312{
1313 int rc = 0;
1314 u32 event;
1315
1316 /* Register ib dev */
1317 rc = bnxt_re_register_ib(rdev);
1318 if (rc) {
1319 pr_err("Failed to register with IB: %#x\n", rc);
1320 return rc;
1321 }
1322 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1323 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1324 &rdev->active_width);
1325 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
1326
1327 event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
1328 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
1329
1330 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, event);
1331
1332 return rc;
1333}
1334
1335static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
1336{
1337 u8 type;
1338 int rc;
1339
1340 if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
1341 cancel_delayed_work_sync(&rdev->worker);
1342
1343 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
1344 &rdev->flags))
1345 bnxt_re_cleanup_res(rdev);
1346 if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
1347 bnxt_re_free_res(rdev);
1348
1349 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1350 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1351 if (rc)
1352 ibdev_warn(&rdev->ibdev,
1353 "Failed to deinitialize RCFW: %#x", rc);
1354 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1355 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1356 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1357 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1358 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1359 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1360 }
1361 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1362 rc = bnxt_re_free_msix(rdev);
1363 if (rc)
1364 ibdev_warn(&rdev->ibdev,
1365 "Failed to free MSI-X vectors: %#x", rc);
1366 }
1367
1368 bnxt_re_destroy_chip_ctx(rdev);
1369 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1370 rc = bnxt_re_unregister_netdev(rdev);
1371 if (rc)
1372 ibdev_warn(&rdev->ibdev,
1373 "Failed to unregister with netdev: %#x", rc);
1374 }
1375}
1376
1377/* worker thread for polling periodic events. Now used for QoS programming*/
1378static void bnxt_re_worker(struct work_struct *work)
1379{
1380 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
1381 worker.work);
1382
1383 bnxt_re_setup_qos(rdev);
1384 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1385}
1386
1387static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
1388{
1389 struct bnxt_qplib_creq_ctx *creq;
1390 struct bnxt_re_ring_attr rattr;
1391 u32 db_offt;
1392 int vid;
1393 u8 type;
1394 int rc;
1395
1396 /* Registered a new RoCE device instance to netdev */
1397 memset(&rattr, 0, sizeof(rattr));
1398 rc = bnxt_re_register_netdev(rdev);
1399 if (rc) {
1400 ibdev_err(&rdev->ibdev,
1401 "Failed to register with netedev: %#x\n", rc);
1402 return -EINVAL;
1403 }
1404 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1405
1406 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
1407 if (rc) {
1408 ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
1409 return -EINVAL;
1410 }
1411
1412 /* Check whether VF or PF */
1413 bnxt_re_get_sriov_func_type(rdev);
1414
1415 rc = bnxt_re_request_msix(rdev);
1416 if (rc) {
1417 ibdev_err(&rdev->ibdev,
1418 "Failed to get MSI-X vectors: %#x\n", rc);
1419 rc = -EINVAL;
1420 goto fail;
1421 }
1422 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
1423
1424 bnxt_re_query_hwrm_intf_version(rdev);
1425
1426 /* Establish RCFW Communication Channel to initialize the context
1427 * memory for the function and all child VFs
1428 */
1429 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw,
1430 &rdev->qplib_ctx,
1431 BNXT_RE_MAX_QPC_COUNT);
1432 if (rc) {
1433 ibdev_err(&rdev->ibdev,
1434 "Failed to allocate RCFW Channel: %#x\n", rc);
1435 goto fail;
1436 }
1437
1438 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1439 creq = &rdev->rcfw.creq;
1440 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
1441 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
1442 rattr.type = type;
1443 rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
1444 rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
1445 rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
1446 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
1447 if (rc) {
1448 ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
1449 goto free_rcfw;
1450 }
1451 db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
1452 vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
1453 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
1454 vid, db_offt, rdev->is_virtfn,
1455 &bnxt_re_aeq_handler);
1456 if (rc) {
1457 ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
1458 rc);
1459 goto free_ring;
1460 }
1461
1462 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
1463 rdev->is_virtfn);
1464 if (rc)
1465 goto disable_rcfw;
1466
1467 bnxt_re_set_resource_limits(rdev);
1468
1469 rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
1470 bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
1471 if (rc) {
1472 ibdev_err(&rdev->ibdev,
1473 "Failed to allocate QPLIB context: %#x\n", rc);
1474 goto disable_rcfw;
1475 }
1476 rc = bnxt_re_net_stats_ctx_alloc(rdev,
1477 rdev->qplib_ctx.stats.dma_map,
1478 &rdev->qplib_ctx.stats.fw_id);
1479 if (rc) {
1480 ibdev_err(&rdev->ibdev,
1481 "Failed to allocate stats context: %#x\n", rc);
1482 goto free_ctx;
1483 }
1484
1485 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, &rdev->qplib_ctx,
1486 rdev->is_virtfn);
1487 if (rc) {
1488 ibdev_err(&rdev->ibdev,
1489 "Failed to initialize RCFW: %#x\n", rc);
1490 goto free_sctx;
1491 }
1492 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
1493
1494 /* Resources based on the 'new' device caps */
1495 rc = bnxt_re_alloc_res(rdev);
1496 if (rc) {
1497 ibdev_err(&rdev->ibdev,
1498 "Failed to allocate resources: %#x\n", rc);
1499 goto fail;
1500 }
1501 set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
1502 rc = bnxt_re_init_res(rdev);
1503 if (rc) {
1504 ibdev_err(&rdev->ibdev,
1505 "Failed to initialize resources: %#x\n", rc);
1506 goto fail;
1507 }
1508
1509 set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
1510
1511 if (!rdev->is_virtfn) {
1512 rc = bnxt_re_setup_qos(rdev);
1513 if (rc)
1514 ibdev_info(&rdev->ibdev,
1515 "RoCE priority not yet configured\n");
1516
1517 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
1518 set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
1519 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1520 }
1521
1522 return 0;
1523free_sctx:
1524 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1525free_ctx:
1526 bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
1527disable_rcfw:
1528 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1529free_ring:
1530 type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
1531 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
1532free_rcfw:
1533 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1534fail:
1535 bnxt_re_dev_uninit(rdev);
1536
1537 return rc;
1538}
1539
1540static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
1541{
1542 struct net_device *netdev = rdev->netdev;
1543
1544 bnxt_re_dev_remove(rdev);
1545
1546 if (netdev)
1547 dev_put(netdev);
1548}
1549
1550static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
1551{
1552 struct bnxt_en_dev *en_dev;
1553 int rc = 0;
1554
1555 if (!is_bnxt_re_dev(netdev))
1556 return -ENODEV;
1557
1558 en_dev = bnxt_re_dev_probe(netdev);
1559 if (IS_ERR(en_dev)) {
1560 if (en_dev != ERR_PTR(-ENODEV))
1561 ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
1562 ROCE_DRV_MODULE_NAME);
1563 rc = PTR_ERR(en_dev);
1564 goto exit;
1565 }
1566 *rdev = bnxt_re_dev_add(netdev, en_dev);
1567 if (!*rdev) {
1568 rc = -ENOMEM;
1569 dev_put(netdev);
1570 goto exit;
1571 }
1572exit:
1573 return rc;
1574}
1575
1576static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
1577{
1578 bnxt_re_dev_uninit(rdev);
1579 pci_dev_put(rdev->en_dev->pdev);
1580 bnxt_re_dev_unreg(rdev);
1581}
1582
1583static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
1584 struct net_device *netdev, u8 wqe_mode)
1585{
1586 int rc;
1587
1588 rc = bnxt_re_dev_reg(rdev, netdev);
1589 if (rc == -ENODEV)
1590 return rc;
1591 if (rc) {
1592 pr_err("Failed to register with the device %s: %#x\n",
1593 netdev->name, rc);
1594 return rc;
1595 }
1596
1597 pci_dev_get((*rdev)->en_dev->pdev);
1598 rc = bnxt_re_dev_init(*rdev, wqe_mode);
1599 if (rc) {
1600 pci_dev_put((*rdev)->en_dev->pdev);
1601 bnxt_re_dev_unreg(*rdev);
1602 }
1603
1604 return rc;
1605}
1606
1607static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
1608{
1609 struct bnxt_re_dev *rdev =
1610 container_of(ib_dev, struct bnxt_re_dev, ibdev);
1611
1612 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1613
1614 rtnl_lock();
1615 bnxt_re_remove_device(rdev);
1616 rtnl_unlock();
1617}
1618
1619/* Handle all deferred netevents tasks */
1620static void bnxt_re_task(struct work_struct *work)
1621{
1622 struct bnxt_re_work *re_work;
1623 struct bnxt_re_dev *rdev;
1624 int rc = 0;
1625
1626 re_work = container_of(work, struct bnxt_re_work, work);
1627 rdev = re_work->rdev;
1628
1629 if (re_work->event == NETDEV_REGISTER) {
1630 rc = bnxt_re_ib_init(rdev);
1631 if (rc) {
1632 ibdev_err(&rdev->ibdev,
1633 "Failed to register with IB: %#x", rc);
1634 rtnl_lock();
1635 bnxt_re_remove_device(rdev);
1636 rtnl_unlock();
1637 goto exit;
1638 }
1639 goto exit;
1640 }
1641
1642 if (!ib_device_try_get(&rdev->ibdev))
1643 goto exit;
1644
1645 switch (re_work->event) {
1646 case NETDEV_UP:
1647 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1648 IB_EVENT_PORT_ACTIVE);
1649 break;
1650 case NETDEV_DOWN:
1651 bnxt_re_dev_stop(rdev);
1652 break;
1653 case NETDEV_CHANGE:
1654 if (!netif_carrier_ok(rdev->netdev))
1655 bnxt_re_dev_stop(rdev);
1656 else if (netif_carrier_ok(rdev->netdev))
1657 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1658 IB_EVENT_PORT_ACTIVE);
1659 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1660 &rdev->active_width);
1661 break;
1662 default:
1663 break;
1664 }
1665 ib_device_put(&rdev->ibdev);
1666exit:
1667 put_device(&rdev->ibdev.dev);
1668 kfree(re_work);
1669}
1670
1671/*
1672 * "Notifier chain callback can be invoked for the same chain from
1673 * different CPUs at the same time".
1674 *
1675 * For cases when the netdev is already present, our call to the
1676 * register_netdevice_notifier() will actually get the rtnl_lock()
1677 * before sending NETDEV_REGISTER and (if up) NETDEV_UP
1678 * events.
1679 *
1680 * But for cases when the netdev is not already present, the notifier
1681 * chain is subjected to be invoked from different CPUs simultaneously.
1682 *
1683 * This is protected by the netdev_mutex.
1684 */
1685static int bnxt_re_netdev_event(struct notifier_block *notifier,
1686 unsigned long event, void *ptr)
1687{
1688 struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
1689 struct bnxt_re_work *re_work;
1690 struct bnxt_re_dev *rdev;
1691 int rc = 0;
1692 bool sch_work = false;
1693 bool release = true;
1694
1695 real_dev = rdma_vlan_dev_real_dev(netdev);
1696 if (!real_dev)
1697 real_dev = netdev;
1698
1699 rdev = bnxt_re_from_netdev(real_dev);
1700 if (!rdev && event != NETDEV_REGISTER)
1701 return NOTIFY_OK;
1702
1703 if (real_dev != netdev)
1704 goto exit;
1705
1706 switch (event) {
1707 case NETDEV_REGISTER:
1708 if (rdev)
1709 break;
1710 rc = bnxt_re_add_device(&rdev, real_dev,
1711 BNXT_QPLIB_WQE_MODE_STATIC);
1712 if (!rc)
1713 sch_work = true;
1714 release = false;
1715 break;
1716
1717 case NETDEV_UNREGISTER:
1718 ib_unregister_device_queued(&rdev->ibdev);
1719 break;
1720
1721 default:
1722 sch_work = true;
1723 break;
1724 }
1725 if (sch_work) {
1726 /* Allocate for the deferred task */
1727 re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
1728 if (re_work) {
1729 get_device(&rdev->ibdev.dev);
1730 re_work->rdev = rdev;
1731 re_work->event = event;
1732 re_work->vlan_dev = (real_dev == netdev ?
1733 NULL : netdev);
1734 INIT_WORK(&re_work->work, bnxt_re_task);
1735 queue_work(bnxt_re_wq, &re_work->work);
1736 }
1737 }
1738
1739exit:
1740 if (rdev && release)
1741 ib_device_put(&rdev->ibdev);
1742 return NOTIFY_DONE;
1743}
1744
1745static struct notifier_block bnxt_re_netdev_notifier = {
1746 .notifier_call = bnxt_re_netdev_event
1747};
1748
1749static int __init bnxt_re_mod_init(void)
1750{
1751 int rc = 0;
1752
1753 pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
1754
1755 bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
1756 if (!bnxt_re_wq)
1757 return -ENOMEM;
1758
1759 INIT_LIST_HEAD(&bnxt_re_dev_list);
1760
1761 rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
1762 if (rc) {
1763 pr_err("%s: Cannot register to netdevice_notifier",
1764 ROCE_DRV_MODULE_NAME);
1765 goto err_netdev;
1766 }
1767 return 0;
1768
1769err_netdev:
1770 destroy_workqueue(bnxt_re_wq);
1771
1772 return rc;
1773}
1774
1775static void __exit bnxt_re_mod_exit(void)
1776{
1777 struct bnxt_re_dev *rdev;
1778
1779 unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1780 if (bnxt_re_wq)
1781 destroy_workqueue(bnxt_re_wq);
1782 list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
1783 /* VF device removal should be called before the removal
1784 * of PF device. Queue VFs unregister first, so that VFs
1785 * shall be removed before the PF during the call of
1786 * ib_unregister_driver.
1787 */
1788 if (rdev->is_virtfn)
1789 ib_unregister_device(&rdev->ibdev);
1790 }
1791 ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
1792}
1793
1794module_init(bnxt_re_mod_init);
1795module_exit(bnxt_re_mod_exit);