Loading...
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: QPLib resource manager
37 */
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/spinlock.h>
42#include <linux/pci.h>
43#include <linux/interrupt.h>
44#include <linux/inetdevice.h>
45#include <linux/dma-mapping.h>
46#include <linux/if_vlan.h>
47#include <linux/vmalloc.h>
48#include <rdma/ib_verbs.h>
49#include <rdma/ib_umem.h>
50
51#include "roce_hsi.h"
52#include "qplib_res.h"
53#include "qplib_sp.h"
54#include "qplib_rcfw.h"
55
56static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 struct bnxt_qplib_stats *stats);
58static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 struct bnxt_qplib_chip_ctx *cctx,
60 struct bnxt_qplib_stats *stats);
61
62/* PBL */
63static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
64 bool is_umem)
65{
66 struct pci_dev *pdev = res->pdev;
67 int i;
68
69 if (!is_umem) {
70 for (i = 0; i < pbl->pg_count; i++) {
71 if (pbl->pg_arr[i])
72 dma_free_coherent(&pdev->dev, pbl->pg_size,
73 (void *)((unsigned long)
74 pbl->pg_arr[i] &
75 PAGE_MASK),
76 pbl->pg_map_arr[i]);
77 else
78 dev_warn(&pdev->dev,
79 "PBL free pg_arr[%d] empty?!\n", i);
80 pbl->pg_arr[i] = NULL;
81 }
82 }
83 vfree(pbl->pg_arr);
84 pbl->pg_arr = NULL;
85 vfree(pbl->pg_map_arr);
86 pbl->pg_map_arr = NULL;
87 pbl->pg_count = 0;
88 pbl->pg_size = 0;
89}
90
91static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 struct bnxt_qplib_sg_info *sginfo)
93{
94 struct ib_block_iter biter;
95 int i = 0;
96
97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 pbl->pg_arr[i] = NULL;
100 pbl->pg_count++;
101 i++;
102 }
103}
104
105static int __alloc_pbl(struct bnxt_qplib_res *res,
106 struct bnxt_qplib_pbl *pbl,
107 struct bnxt_qplib_sg_info *sginfo)
108{
109 struct pci_dev *pdev = res->pdev;
110 bool is_umem = false;
111 u32 pages;
112 int i;
113
114 if (sginfo->nopte)
115 return 0;
116 if (sginfo->umem)
117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
118 else
119 pages = sginfo->npages;
120 /* page ptr arrays */
121 pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
122 if (!pbl->pg_arr)
123 return -ENOMEM;
124
125 pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
126 if (!pbl->pg_map_arr) {
127 vfree(pbl->pg_arr);
128 pbl->pg_arr = NULL;
129 return -ENOMEM;
130 }
131 pbl->pg_count = 0;
132 pbl->pg_size = sginfo->pgsize;
133
134 if (!sginfo->umem) {
135 for (i = 0; i < pages; i++) {
136 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
137 pbl->pg_size,
138 &pbl->pg_map_arr[i],
139 GFP_KERNEL);
140 if (!pbl->pg_arr[i])
141 goto fail;
142 pbl->pg_count++;
143 }
144 } else {
145 is_umem = true;
146 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
147 }
148
149 return 0;
150fail:
151 __free_pbl(res, pbl, is_umem);
152 return -ENOMEM;
153}
154
155/* HWQ */
156void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 struct bnxt_qplib_hwq *hwq)
158{
159 int i;
160
161 if (!hwq->max_elements)
162 return;
163 if (hwq->level >= PBL_LVL_MAX)
164 return;
165
166 for (i = 0; i < hwq->level + 1; i++) {
167 if (i == hwq->level)
168 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
169 else
170 __free_pbl(res, &hwq->pbl[i], false);
171 }
172
173 hwq->level = PBL_LVL_MAX;
174 hwq->max_elements = 0;
175 hwq->element_size = 0;
176 hwq->prod = 0;
177 hwq->cons = 0;
178 hwq->cp_bit = 0;
179}
180
181/* All HWQs are power of 2 in size */
182
183int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 struct bnxt_qplib_hwq_attr *hwq_attr)
185{
186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 struct bnxt_qplib_sg_info sginfo = {};
188 u32 depth, stride, npbl, npde;
189 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 struct bnxt_qplib_res *res;
191 struct pci_dev *pdev;
192 int i, rc, lvl;
193
194 res = hwq_attr->res;
195 pdev = res->pdev;
196 pg_size = hwq_attr->sginfo->pgsize;
197 hwq->level = PBL_LVL_MAX;
198
199 depth = roundup_pow_of_two(hwq_attr->depth);
200 stride = roundup_pow_of_two(hwq_attr->stride);
201 if (hwq_attr->aux_depth) {
202 aux_slots = hwq_attr->aux_depth;
203 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 aux_pages = (aux_slots * aux_size) / pg_size;
205 if ((aux_slots * aux_size) % pg_size)
206 aux_pages++;
207 }
208
209 if (!hwq_attr->sginfo->umem) {
210 hwq->is_user = false;
211 npages = (depth * stride) / pg_size + aux_pages;
212 if ((depth * stride) % pg_size)
213 npages++;
214 if (!npages)
215 return -EINVAL;
216 hwq_attr->sginfo->npages = npages;
217 } else {
218 npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
219 hwq_attr->sginfo->pgsize);
220 hwq->is_user = true;
221 }
222
223 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
224 /* This request is Level 0, map PTE */
225 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
226 if (rc)
227 goto fail;
228 hwq->level = PBL_LVL_0;
229 goto done;
230 }
231
232 if (npages >= MAX_PBL_LVL_0_PGS) {
233 if (npages > MAX_PBL_LVL_1_PGS) {
234 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
235 0 : PTU_PTE_VALID;
236 /* 2 levels of indirection */
237 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
238 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
239 npbl++;
240 npde = npbl >> MAX_PDL_LVL_SHIFT;
241 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
242 npde++;
243 /* Alloc PDE pages */
244 sginfo.pgsize = npde * pg_size;
245 sginfo.npages = 1;
246 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
247
248 /* Alloc PBL pages */
249 sginfo.npages = npbl;
250 sginfo.pgsize = PAGE_SIZE;
251 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
252 if (rc)
253 goto fail;
254 /* Fill PDL with PBL page pointers */
255 dst_virt_ptr =
256 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
257 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
258 if (hwq_attr->type == HWQ_TYPE_MR) {
259 /* For MR it is expected that we supply only 1 contigous
260 * page i.e only 1 entry in the PDL that will contain
261 * all the PBLs for the user supplied memory region
262 */
263 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
264 i++)
265 dst_virt_ptr[0][i] = src_phys_ptr[i] |
266 flag;
267 } else {
268 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
269 i++)
270 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
271 src_phys_ptr[i] |
272 PTU_PDE_VALID;
273 }
274 /* Alloc or init PTEs */
275 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
276 hwq_attr->sginfo);
277 if (rc)
278 goto fail;
279 hwq->level = PBL_LVL_2;
280 if (hwq_attr->sginfo->nopte)
281 goto done;
282 /* Fill PBLs with PTE pointers */
283 dst_virt_ptr =
284 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
285 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
286 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
287 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
288 src_phys_ptr[i] | PTU_PTE_VALID;
289 }
290 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
291 /* Find the last pg of the size */
292 i = hwq->pbl[PBL_LVL_2].pg_count;
293 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
294 PTU_PTE_LAST;
295 if (i > 1)
296 dst_virt_ptr[PTR_PG(i - 2)]
297 [PTR_IDX(i - 2)] |=
298 PTU_PTE_NEXT_TO_LAST;
299 }
300 } else { /* pages < 512 npbl = 1, npde = 0 */
301 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
302 0 : PTU_PTE_VALID;
303
304 /* 1 level of indirection */
305 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
306 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
307 npbl++;
308 sginfo.npages = npbl;
309 sginfo.pgsize = PAGE_SIZE;
310 /* Alloc PBL page */
311 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
312 if (rc)
313 goto fail;
314 /* Alloc or init PTEs */
315 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
316 hwq_attr->sginfo);
317 if (rc)
318 goto fail;
319 hwq->level = PBL_LVL_1;
320 if (hwq_attr->sginfo->nopte)
321 goto done;
322 /* Fill PBL with PTE pointers */
323 dst_virt_ptr =
324 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
325 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
326 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
327 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
328 src_phys_ptr[i] | flag;
329 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
330 /* Find the last pg of the size */
331 i = hwq->pbl[PBL_LVL_1].pg_count;
332 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
333 PTU_PTE_LAST;
334 if (i > 1)
335 dst_virt_ptr[PTR_PG(i - 2)]
336 [PTR_IDX(i - 2)] |=
337 PTU_PTE_NEXT_TO_LAST;
338 }
339 }
340 }
341done:
342 hwq->prod = 0;
343 hwq->cons = 0;
344 hwq->pdev = pdev;
345 hwq->depth = hwq_attr->depth;
346 hwq->max_elements = hwq->depth;
347 hwq->element_size = stride;
348 hwq->qe_ppg = pg_size / stride;
349 /* For direct access to the elements */
350 lvl = hwq->level;
351 if (hwq_attr->sginfo->nopte && hwq->level)
352 lvl = hwq->level - 1;
353 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
354 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
355 spin_lock_init(&hwq->lock);
356
357 return 0;
358fail:
359 bnxt_qplib_free_hwq(res, hwq);
360 return -ENOMEM;
361}
362
363/* Context Tables */
364void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
365 struct bnxt_qplib_ctx *ctx)
366{
367 int i;
368
369 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
370 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
371 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
372 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
373 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
374 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
375 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
376 /* restore original pde level before destroy */
377 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
378 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
379 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
380}
381
382static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
383 struct bnxt_qplib_ctx *ctx)
384{
385 struct bnxt_qplib_hwq_attr hwq_attr = {};
386 struct bnxt_qplib_sg_info sginfo = {};
387 struct bnxt_qplib_tqm_ctx *tqmctx;
388 int rc;
389 int i;
390
391 tqmctx = &ctx->tqm_ctx;
392
393 sginfo.pgsize = PAGE_SIZE;
394 sginfo.pgshft = PAGE_SHIFT;
395 hwq_attr.sginfo = &sginfo;
396 hwq_attr.res = res;
397 hwq_attr.type = HWQ_TYPE_CTX;
398 hwq_attr.depth = 512;
399 hwq_attr.stride = sizeof(u64);
400 /* Alloc pdl buffer */
401 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
402 if (rc)
403 goto out;
404 /* Save original pdl level */
405 tqmctx->pde_level = tqmctx->pde.level;
406
407 hwq_attr.stride = 1;
408 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
409 if (!tqmctx->qcount[i])
410 continue;
411 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
412 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
413 if (rc)
414 goto out;
415 }
416out:
417 return rc;
418}
419
420static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
421{
422 struct bnxt_qplib_hwq *tbl;
423 dma_addr_t *dma_ptr;
424 __le64 **pbl_ptr, *ptr;
425 int i, j, k;
426 int fnz_idx = -1;
427 int pg_count;
428
429 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
430
431 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
432 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
433 tbl = &ctx->qtbl[i];
434 if (!tbl->max_elements)
435 continue;
436 if (fnz_idx == -1)
437 fnz_idx = i; /* first non-zero index */
438 switch (tbl->level) {
439 case PBL_LVL_2:
440 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
441 for (k = 0; k < pg_count; k++) {
442 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
443 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
444 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
445 }
446 break;
447 case PBL_LVL_1:
448 case PBL_LVL_0:
449 default:
450 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
451 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
452 PTU_PTE_VALID);
453 break;
454 }
455 }
456 if (fnz_idx == -1)
457 fnz_idx = 0;
458 /* update pde level as per page table programming */
459 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
460 ctx->qtbl[fnz_idx].level + 1;
461}
462
463static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
464 struct bnxt_qplib_ctx *ctx)
465{
466 int rc;
467
468 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
469 if (rc)
470 goto fail;
471
472 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
473fail:
474 return rc;
475}
476
477/*
478 * Routine: bnxt_qplib_alloc_ctx
479 * Description:
480 * Context tables are memories which are used by the chip fw.
481 * The 6 tables defined are:
482 * QPC ctx - holds QP states
483 * MRW ctx - holds memory region and window
484 * SRQ ctx - holds shared RQ states
485 * CQ ctx - holds completion queue states
486 * TQM ctx - holds Tx Queue Manager context
487 * TIM ctx - holds timer context
488 * Depending on the size of the tbl requested, either a 1 Page Buffer List
489 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
490 * instead.
491 * Table might be employed as follows:
492 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
493 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
494 * For 512 < ctx size <= MAX, 2 levels of ind is used
495 * Returns:
496 * 0 if success, else -ERRORS
497 */
498int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
499 struct bnxt_qplib_ctx *ctx,
500 bool virt_fn, bool is_p5)
501{
502 struct bnxt_qplib_hwq_attr hwq_attr = {};
503 struct bnxt_qplib_sg_info sginfo = {};
504 int rc;
505
506 if (virt_fn || is_p5)
507 goto stats_alloc;
508
509 /* QPC Tables */
510 sginfo.pgsize = PAGE_SIZE;
511 sginfo.pgshft = PAGE_SHIFT;
512 hwq_attr.sginfo = &sginfo;
513
514 hwq_attr.res = res;
515 hwq_attr.depth = ctx->qpc_count;
516 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
517 hwq_attr.type = HWQ_TYPE_CTX;
518 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
519 if (rc)
520 goto fail;
521
522 /* MRW Tables */
523 hwq_attr.depth = ctx->mrw_count;
524 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
525 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
526 if (rc)
527 goto fail;
528
529 /* SRQ Tables */
530 hwq_attr.depth = ctx->srqc_count;
531 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
532 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
533 if (rc)
534 goto fail;
535
536 /* CQ Tables */
537 hwq_attr.depth = ctx->cq_count;
538 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
539 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
540 if (rc)
541 goto fail;
542
543 /* TQM Buffer */
544 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
545 if (rc)
546 goto fail;
547 /* TIM Buffer */
548 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
549 hwq_attr.depth = ctx->qpc_count * 16;
550 hwq_attr.stride = 1;
551 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
552 if (rc)
553 goto fail;
554stats_alloc:
555 /* Stats */
556 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
557 if (rc)
558 goto fail;
559
560 return 0;
561
562fail:
563 bnxt_qplib_free_ctx(res, ctx);
564 return rc;
565}
566
567static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
568 struct bnxt_qplib_sgid_tbl *sgid_tbl)
569{
570 kfree(sgid_tbl->tbl);
571 kfree(sgid_tbl->hw_id);
572 kfree(sgid_tbl->ctx);
573 kfree(sgid_tbl->vlan);
574 sgid_tbl->tbl = NULL;
575 sgid_tbl->hw_id = NULL;
576 sgid_tbl->ctx = NULL;
577 sgid_tbl->vlan = NULL;
578 sgid_tbl->max = 0;
579 sgid_tbl->active = 0;
580}
581
582static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
583 struct bnxt_qplib_sgid_tbl *sgid_tbl,
584 u16 max)
585{
586 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
587 if (!sgid_tbl->tbl)
588 return -ENOMEM;
589
590 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
591 if (!sgid_tbl->hw_id)
592 goto out_free1;
593
594 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
595 if (!sgid_tbl->ctx)
596 goto out_free2;
597
598 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
599 if (!sgid_tbl->vlan)
600 goto out_free3;
601
602 sgid_tbl->max = max;
603 return 0;
604out_free3:
605 kfree(sgid_tbl->ctx);
606 sgid_tbl->ctx = NULL;
607out_free2:
608 kfree(sgid_tbl->hw_id);
609 sgid_tbl->hw_id = NULL;
610out_free1:
611 kfree(sgid_tbl->tbl);
612 sgid_tbl->tbl = NULL;
613 return -ENOMEM;
614};
615
616static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
617 struct bnxt_qplib_sgid_tbl *sgid_tbl)
618{
619 int i;
620
621 for (i = 0; i < sgid_tbl->max; i++) {
622 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
623 sizeof(bnxt_qplib_gid_zero)))
624 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
625 sgid_tbl->tbl[i].vlan_id, true);
626 }
627 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
628 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
629 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
630 sgid_tbl->active = 0;
631}
632
633static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
634 struct net_device *netdev)
635{
636 u32 i;
637
638 for (i = 0; i < sgid_tbl->max; i++)
639 sgid_tbl->tbl[i].vlan_id = 0xffff;
640
641 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
642}
643
644/* PDs */
645int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
646{
647 struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
648 u32 bit_num;
649 int rc = 0;
650
651 mutex_lock(&res->pd_tbl_lock);
652 bit_num = find_first_bit(pdt->tbl, pdt->max);
653 if (bit_num == pdt->max) {
654 rc = -ENOMEM;
655 goto exit;
656 }
657
658 /* Found unused PD */
659 clear_bit(bit_num, pdt->tbl);
660 pd->id = bit_num;
661exit:
662 mutex_unlock(&res->pd_tbl_lock);
663 return rc;
664}
665
666int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
667 struct bnxt_qplib_pd_tbl *pdt,
668 struct bnxt_qplib_pd *pd)
669{
670 int rc = 0;
671
672 mutex_lock(&res->pd_tbl_lock);
673 if (test_and_set_bit(pd->id, pdt->tbl)) {
674 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
675 pd->id);
676 rc = -EINVAL;
677 goto exit;
678 }
679 pd->id = 0;
680exit:
681 mutex_unlock(&res->pd_tbl_lock);
682 return rc;
683}
684
685static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
686{
687 kfree(pdt->tbl);
688 pdt->tbl = NULL;
689 pdt->max = 0;
690}
691
692static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
693 struct bnxt_qplib_pd_tbl *pdt,
694 u32 max)
695{
696 u32 bytes;
697
698 bytes = max >> 3;
699 if (!bytes)
700 bytes = 1;
701 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
702 if (!pdt->tbl)
703 return -ENOMEM;
704
705 pdt->max = max;
706 memset((u8 *)pdt->tbl, 0xFF, bytes);
707 mutex_init(&res->pd_tbl_lock);
708
709 return 0;
710}
711
712/* DPIs */
713int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
714 struct bnxt_qplib_dpi *dpi,
715 void *app, u8 type)
716{
717 struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
718 struct bnxt_qplib_reg_desc *reg;
719 u32 bit_num;
720 u64 umaddr;
721
722 reg = &dpit->wcreg;
723 mutex_lock(&res->dpi_tbl_lock);
724
725 bit_num = find_first_bit(dpit->tbl, dpit->max);
726 if (bit_num == dpit->max) {
727 mutex_unlock(&res->dpi_tbl_lock);
728 return -ENOMEM;
729 }
730
731 /* Found unused DPI */
732 clear_bit(bit_num, dpit->tbl);
733 dpit->app_tbl[bit_num] = app;
734
735 dpi->bit = bit_num;
736 dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
737
738 umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
739 dpi->umdbr = umaddr;
740
741 switch (type) {
742 case BNXT_QPLIB_DPI_TYPE_KERNEL:
743 /* privileged dbr was already mapped just initialize it. */
744 dpi->umdbr = dpit->ucreg.bar_base +
745 dpit->ucreg.offset + bit_num * PAGE_SIZE;
746 dpi->dbr = dpit->priv_db;
747 dpi->dpi = dpi->bit;
748 break;
749 case BNXT_QPLIB_DPI_TYPE_WC:
750 dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
751 break;
752 default:
753 dpi->dbr = ioremap(umaddr, PAGE_SIZE);
754 break;
755 }
756
757 dpi->type = type;
758 mutex_unlock(&res->dpi_tbl_lock);
759 return 0;
760
761}
762
763int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
764 struct bnxt_qplib_dpi *dpi)
765{
766 struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
767
768 mutex_lock(&res->dpi_tbl_lock);
769 if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
770 pci_iounmap(res->pdev, dpi->dbr);
771
772 if (test_and_set_bit(dpi->bit, dpit->tbl)) {
773 dev_warn(&res->pdev->dev,
774 "Freeing an unused DPI? dpi = %d, bit = %d\n",
775 dpi->dpi, dpi->bit);
776 mutex_unlock(&res->dpi_tbl_lock);
777 return -EINVAL;
778 }
779 if (dpit->app_tbl)
780 dpit->app_tbl[dpi->bit] = NULL;
781 memset(dpi, 0, sizeof(*dpi));
782 mutex_unlock(&res->dpi_tbl_lock);
783 return 0;
784}
785
786static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
787 struct bnxt_qplib_dpi_tbl *dpit)
788{
789 kfree(dpit->tbl);
790 kfree(dpit->app_tbl);
791 dpit->tbl = NULL;
792 dpit->app_tbl = NULL;
793 dpit->max = 0;
794}
795
796static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
797 struct bnxt_qplib_dev_attr *dev_attr)
798{
799 struct bnxt_qplib_dpi_tbl *dpit;
800 struct bnxt_qplib_reg_desc *reg;
801 unsigned long bar_len;
802 u32 dbr_offset;
803 u32 bytes;
804
805 dpit = &res->dpi_tbl;
806 reg = &dpit->wcreg;
807
808 if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
809 /* Offest should come from L2 driver */
810 dbr_offset = dev_attr->l2_db_size;
811 dpit->ucreg.offset = dbr_offset;
812 dpit->wcreg.offset = dbr_offset;
813 }
814
815 bar_len = pci_resource_len(res->pdev, reg->bar_id);
816 dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
817 if (dev_attr->max_dpi)
818 dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
819
820 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
821 if (!dpit->app_tbl)
822 return -ENOMEM;
823
824 bytes = dpit->max >> 3;
825 if (!bytes)
826 bytes = 1;
827
828 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
829 if (!dpit->tbl) {
830 kfree(dpit->app_tbl);
831 dpit->app_tbl = NULL;
832 return -ENOMEM;
833 }
834
835 memset((u8 *)dpit->tbl, 0xFF, bytes);
836 mutex_init(&res->dpi_tbl_lock);
837 dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
838
839 return 0;
840
841}
842
843/* Stats */
844static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
845 struct bnxt_qplib_stats *stats)
846{
847 if (stats->dma) {
848 dma_free_coherent(&pdev->dev, stats->size,
849 stats->dma, stats->dma_map);
850 }
851 memset(stats, 0, sizeof(*stats));
852 stats->fw_id = -1;
853}
854
855static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
856 struct bnxt_qplib_chip_ctx *cctx,
857 struct bnxt_qplib_stats *stats)
858{
859 memset(stats, 0, sizeof(*stats));
860 stats->fw_id = -1;
861 stats->size = cctx->hw_stats_size;
862 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
863 &stats->dma_map, GFP_KERNEL);
864 if (!stats->dma) {
865 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
866 return -ENOMEM;
867 }
868 return 0;
869}
870
871void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
872{
873 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
874}
875
876int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
877{
878 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
879
880 return 0;
881}
882
883void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
884{
885 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
886 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
887 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
888}
889
890int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
891 struct net_device *netdev,
892 struct bnxt_qplib_dev_attr *dev_attr)
893{
894 int rc;
895
896 res->pdev = pdev;
897 res->netdev = netdev;
898
899 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
900 if (rc)
901 goto fail;
902
903 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
904 if (rc)
905 goto fail;
906
907 rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
908 if (rc)
909 goto fail;
910
911 return 0;
912fail:
913 bnxt_qplib_free_res(res);
914 return rc;
915}
916
917void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
918{
919 struct bnxt_qplib_reg_desc *reg;
920
921 reg = &res->dpi_tbl.ucreg;
922 if (reg->bar_reg)
923 pci_iounmap(res->pdev, reg->bar_reg);
924 reg->bar_reg = NULL;
925 reg->bar_base = 0;
926 reg->len = 0;
927 reg->bar_id = 0;
928}
929
930int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
931{
932 struct bnxt_qplib_reg_desc *ucreg;
933 struct bnxt_qplib_reg_desc *wcreg;
934
935 wcreg = &res->dpi_tbl.wcreg;
936 wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
937 wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
938
939 ucreg = &res->dpi_tbl.ucreg;
940 ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
941 ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
942 ucreg->len = ucreg->offset + PAGE_SIZE;
943 if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
944 dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
945 (int)ucreg->len);
946 return -EINVAL;
947 }
948 ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
949 if (!ucreg->bar_reg) {
950 dev_err(&res->pdev->dev, "privileged dpi map failed!");
951 return -ENOMEM;
952 }
953
954 return 0;
955}
956
957int bnxt_qplib_determine_atomics(struct pci_dev *dev)
958{
959 int comp;
960 u16 ctl2;
961
962 comp = pci_enable_atomic_ops_to_root(dev,
963 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
964 if (comp)
965 return -EOPNOTSUPP;
966 comp = pci_enable_atomic_ops_to_root(dev,
967 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
968 if (comp)
969 return -EOPNOTSUPP;
970 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
971 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
972}
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: QPLib resource manager
37 */
38
39#include <linux/spinlock.h>
40#include <linux/pci.h>
41#include <linux/interrupt.h>
42#include <linux/inetdevice.h>
43#include <linux/dma-mapping.h>
44#include <linux/if_vlan.h>
45#include "roce_hsi.h"
46#include "qplib_res.h"
47#include "qplib_sp.h"
48#include "qplib_rcfw.h"
49
50static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
51 struct bnxt_qplib_stats *stats);
52static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
53 struct bnxt_qplib_stats *stats);
54
55/* PBL */
56static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
57 bool is_umem)
58{
59 int i;
60
61 if (!is_umem) {
62 for (i = 0; i < pbl->pg_count; i++) {
63 if (pbl->pg_arr[i])
64 dma_free_coherent(&pdev->dev, pbl->pg_size,
65 (void *)((unsigned long)
66 pbl->pg_arr[i] &
67 PAGE_MASK),
68 pbl->pg_map_arr[i]);
69 else
70 dev_warn(&pdev->dev,
71 "QPLIB: PBL free pg_arr[%d] empty?!",
72 i);
73 pbl->pg_arr[i] = NULL;
74 }
75 }
76 kfree(pbl->pg_arr);
77 pbl->pg_arr = NULL;
78 kfree(pbl->pg_map_arr);
79 pbl->pg_map_arr = NULL;
80 pbl->pg_count = 0;
81 pbl->pg_size = 0;
82}
83
84static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
85 struct scatterlist *sghead, u32 pages, u32 pg_size)
86{
87 struct scatterlist *sg;
88 bool is_umem = false;
89 int i;
90
91 /* page ptr arrays */
92 pbl->pg_arr = kcalloc(pages, sizeof(void *), GFP_KERNEL);
93 if (!pbl->pg_arr)
94 return -ENOMEM;
95
96 pbl->pg_map_arr = kcalloc(pages, sizeof(dma_addr_t), GFP_KERNEL);
97 if (!pbl->pg_map_arr) {
98 kfree(pbl->pg_arr);
99 pbl->pg_arr = NULL;
100 return -ENOMEM;
101 }
102 pbl->pg_count = 0;
103 pbl->pg_size = pg_size;
104
105 if (!sghead) {
106 for (i = 0; i < pages; i++) {
107 pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
108 pbl->pg_size,
109 &pbl->pg_map_arr[i],
110 GFP_KERNEL);
111 if (!pbl->pg_arr[i])
112 goto fail;
113 pbl->pg_count++;
114 }
115 } else {
116 i = 0;
117 is_umem = true;
118 for_each_sg(sghead, sg, pages, i) {
119 pbl->pg_map_arr[i] = sg_dma_address(sg);
120 pbl->pg_arr[i] = sg_virt(sg);
121 if (!pbl->pg_arr[i])
122 goto fail;
123
124 pbl->pg_count++;
125 }
126 }
127
128 return 0;
129
130fail:
131 __free_pbl(pdev, pbl, is_umem);
132 return -ENOMEM;
133}
134
135/* HWQ */
136void bnxt_qplib_free_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq)
137{
138 int i;
139
140 if (!hwq->max_elements)
141 return;
142 if (hwq->level >= PBL_LVL_MAX)
143 return;
144
145 for (i = 0; i < hwq->level + 1; i++) {
146 if (i == hwq->level)
147 __free_pbl(pdev, &hwq->pbl[i], hwq->is_user);
148 else
149 __free_pbl(pdev, &hwq->pbl[i], false);
150 }
151
152 hwq->level = PBL_LVL_MAX;
153 hwq->max_elements = 0;
154 hwq->element_size = 0;
155 hwq->prod = 0;
156 hwq->cons = 0;
157 hwq->cp_bit = 0;
158}
159
160/* All HWQs are power of 2 in size */
161int bnxt_qplib_alloc_init_hwq(struct pci_dev *pdev, struct bnxt_qplib_hwq *hwq,
162 struct scatterlist *sghead, int nmap,
163 u32 *elements, u32 element_size, u32 aux,
164 u32 pg_size, enum bnxt_qplib_hwq_type hwq_type)
165{
166 u32 pages, slots, size, aux_pages = 0, aux_size = 0;
167 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
168 int i, rc;
169
170 hwq->level = PBL_LVL_MAX;
171
172 slots = roundup_pow_of_two(*elements);
173 if (aux) {
174 aux_size = roundup_pow_of_two(aux);
175 aux_pages = (slots * aux_size) / pg_size;
176 if ((slots * aux_size) % pg_size)
177 aux_pages++;
178 }
179 size = roundup_pow_of_two(element_size);
180
181 if (!sghead) {
182 hwq->is_user = false;
183 pages = (slots * size) / pg_size + aux_pages;
184 if ((slots * size) % pg_size)
185 pages++;
186 if (!pages)
187 return -EINVAL;
188 } else {
189 hwq->is_user = true;
190 pages = nmap;
191 }
192
193 /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
194 if (sghead && (pages == MAX_PBL_LVL_0_PGS))
195 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], sghead,
196 pages, pg_size);
197 else
198 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_0], NULL, 1, pg_size);
199 if (rc)
200 goto fail;
201
202 hwq->level = PBL_LVL_0;
203
204 if (pages > MAX_PBL_LVL_0_PGS) {
205 if (pages > MAX_PBL_LVL_1_PGS) {
206 /* 2 levels of indirection */
207 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], NULL,
208 MAX_PBL_LVL_1_PGS_FOR_LVL_2, pg_size);
209 if (rc)
210 goto fail;
211 /* Fill in lvl0 PBL */
212 dst_virt_ptr =
213 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
214 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
215 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
216 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
217 src_phys_ptr[i] | PTU_PDE_VALID;
218 hwq->level = PBL_LVL_1;
219
220 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_2], sghead,
221 pages, pg_size);
222 if (rc)
223 goto fail;
224
225 /* Fill in lvl1 PBL */
226 dst_virt_ptr =
227 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
228 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
229 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
230 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
231 src_phys_ptr[i] | PTU_PTE_VALID;
232 }
233 if (hwq_type == HWQ_TYPE_QUEUE) {
234 /* Find the last pg of the size */
235 i = hwq->pbl[PBL_LVL_2].pg_count;
236 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
237 PTU_PTE_LAST;
238 if (i > 1)
239 dst_virt_ptr[PTR_PG(i - 2)]
240 [PTR_IDX(i - 2)] |=
241 PTU_PTE_NEXT_TO_LAST;
242 }
243 hwq->level = PBL_LVL_2;
244 } else {
245 u32 flag = hwq_type == HWQ_TYPE_L2_CMPL ? 0 :
246 PTU_PTE_VALID;
247
248 /* 1 level of indirection */
249 rc = __alloc_pbl(pdev, &hwq->pbl[PBL_LVL_1], sghead,
250 pages, pg_size);
251 if (rc)
252 goto fail;
253 /* Fill in lvl0 PBL */
254 dst_virt_ptr =
255 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
256 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
257 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++) {
258 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
259 src_phys_ptr[i] | flag;
260 }
261 if (hwq_type == HWQ_TYPE_QUEUE) {
262 /* Find the last pg of the size */
263 i = hwq->pbl[PBL_LVL_1].pg_count;
264 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
265 PTU_PTE_LAST;
266 if (i > 1)
267 dst_virt_ptr[PTR_PG(i - 2)]
268 [PTR_IDX(i - 2)] |=
269 PTU_PTE_NEXT_TO_LAST;
270 }
271 hwq->level = PBL_LVL_1;
272 }
273 }
274 hwq->pdev = pdev;
275 spin_lock_init(&hwq->lock);
276 hwq->prod = 0;
277 hwq->cons = 0;
278 *elements = hwq->max_elements = slots;
279 hwq->element_size = size;
280
281 /* For direct access to the elements */
282 hwq->pbl_ptr = hwq->pbl[hwq->level].pg_arr;
283 hwq->pbl_dma_ptr = hwq->pbl[hwq->level].pg_map_arr;
284
285 return 0;
286
287fail:
288 bnxt_qplib_free_hwq(pdev, hwq);
289 return -ENOMEM;
290}
291
292/* Context Tables */
293void bnxt_qplib_free_ctx(struct pci_dev *pdev,
294 struct bnxt_qplib_ctx *ctx)
295{
296 int i;
297
298 bnxt_qplib_free_hwq(pdev, &ctx->qpc_tbl);
299 bnxt_qplib_free_hwq(pdev, &ctx->mrw_tbl);
300 bnxt_qplib_free_hwq(pdev, &ctx->srqc_tbl);
301 bnxt_qplib_free_hwq(pdev, &ctx->cq_tbl);
302 bnxt_qplib_free_hwq(pdev, &ctx->tim_tbl);
303 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
304 bnxt_qplib_free_hwq(pdev, &ctx->tqm_tbl[i]);
305 bnxt_qplib_free_hwq(pdev, &ctx->tqm_pde);
306 bnxt_qplib_free_stats_ctx(pdev, &ctx->stats);
307}
308
309/*
310 * Routine: bnxt_qplib_alloc_ctx
311 * Description:
312 * Context tables are memories which are used by the chip fw.
313 * The 6 tables defined are:
314 * QPC ctx - holds QP states
315 * MRW ctx - holds memory region and window
316 * SRQ ctx - holds shared RQ states
317 * CQ ctx - holds completion queue states
318 * TQM ctx - holds Tx Queue Manager context
319 * TIM ctx - holds timer context
320 * Depending on the size of the tbl requested, either a 1 Page Buffer List
321 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
322 * instead.
323 * Table might be employed as follows:
324 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
325 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
326 * For 512 < ctx size <= MAX, 2 levels of ind is used
327 * Returns:
328 * 0 if success, else -ERRORS
329 */
330int bnxt_qplib_alloc_ctx(struct pci_dev *pdev,
331 struct bnxt_qplib_ctx *ctx,
332 bool virt_fn)
333{
334 int i, j, k, rc = 0;
335 int fnz_idx = -1;
336 __le64 **pbl_ptr;
337
338 if (virt_fn)
339 goto stats_alloc;
340
341 /* QPC Tables */
342 ctx->qpc_tbl.max_elements = ctx->qpc_count;
343 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->qpc_tbl, NULL, 0,
344 &ctx->qpc_tbl.max_elements,
345 BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE, 0,
346 PAGE_SIZE, HWQ_TYPE_CTX);
347 if (rc)
348 goto fail;
349
350 /* MRW Tables */
351 ctx->mrw_tbl.max_elements = ctx->mrw_count;
352 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->mrw_tbl, NULL, 0,
353 &ctx->mrw_tbl.max_elements,
354 BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE, 0,
355 PAGE_SIZE, HWQ_TYPE_CTX);
356 if (rc)
357 goto fail;
358
359 /* SRQ Tables */
360 ctx->srqc_tbl.max_elements = ctx->srqc_count;
361 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->srqc_tbl, NULL, 0,
362 &ctx->srqc_tbl.max_elements,
363 BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE, 0,
364 PAGE_SIZE, HWQ_TYPE_CTX);
365 if (rc)
366 goto fail;
367
368 /* CQ Tables */
369 ctx->cq_tbl.max_elements = ctx->cq_count;
370 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->cq_tbl, NULL, 0,
371 &ctx->cq_tbl.max_elements,
372 BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE, 0,
373 PAGE_SIZE, HWQ_TYPE_CTX);
374 if (rc)
375 goto fail;
376
377 /* TQM Buffer */
378 ctx->tqm_pde.max_elements = 512;
379 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_pde, NULL, 0,
380 &ctx->tqm_pde.max_elements, sizeof(u64),
381 0, PAGE_SIZE, HWQ_TYPE_CTX);
382 if (rc)
383 goto fail;
384
385 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
386 if (!ctx->tqm_count[i])
387 continue;
388 ctx->tqm_tbl[i].max_elements = ctx->qpc_count *
389 ctx->tqm_count[i];
390 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tqm_tbl[i], NULL, 0,
391 &ctx->tqm_tbl[i].max_elements, 1,
392 0, PAGE_SIZE, HWQ_TYPE_CTX);
393 if (rc)
394 goto fail;
395 }
396 pbl_ptr = (__le64 **)ctx->tqm_pde.pbl_ptr;
397 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
398 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
399 if (!ctx->tqm_tbl[i].max_elements)
400 continue;
401 if (fnz_idx == -1)
402 fnz_idx = i;
403 switch (ctx->tqm_tbl[i].level) {
404 case PBL_LVL_2:
405 for (k = 0; k < ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_count;
406 k++)
407 pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)] =
408 cpu_to_le64(
409 ctx->tqm_tbl[i].pbl[PBL_LVL_1].pg_map_arr[k]
410 | PTU_PTE_VALID);
411 break;
412 case PBL_LVL_1:
413 case PBL_LVL_0:
414 default:
415 pbl_ptr[PTR_PG(j)][PTR_IDX(j)] = cpu_to_le64(
416 ctx->tqm_tbl[i].pbl[PBL_LVL_0].pg_map_arr[0] |
417 PTU_PTE_VALID);
418 break;
419 }
420 }
421 if (fnz_idx == -1)
422 fnz_idx = 0;
423 ctx->tqm_pde_level = ctx->tqm_tbl[fnz_idx].level == PBL_LVL_2 ?
424 PBL_LVL_2 : ctx->tqm_tbl[fnz_idx].level + 1;
425
426 /* TIM Buffer */
427 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
428 rc = bnxt_qplib_alloc_init_hwq(pdev, &ctx->tim_tbl, NULL, 0,
429 &ctx->tim_tbl.max_elements, 1,
430 0, PAGE_SIZE, HWQ_TYPE_CTX);
431 if (rc)
432 goto fail;
433
434stats_alloc:
435 /* Stats */
436 rc = bnxt_qplib_alloc_stats_ctx(pdev, &ctx->stats);
437 if (rc)
438 goto fail;
439
440 return 0;
441
442fail:
443 bnxt_qplib_free_ctx(pdev, ctx);
444 return rc;
445}
446
447/* GUID */
448void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
449{
450 u8 mac[ETH_ALEN];
451
452 /* MAC-48 to EUI-64 mapping */
453 memcpy(mac, dev_addr, ETH_ALEN);
454 guid[0] = mac[0] ^ 2;
455 guid[1] = mac[1];
456 guid[2] = mac[2];
457 guid[3] = 0xff;
458 guid[4] = 0xfe;
459 guid[5] = mac[3];
460 guid[6] = mac[4];
461 guid[7] = mac[5];
462}
463
464static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
465 struct bnxt_qplib_sgid_tbl *sgid_tbl)
466{
467 kfree(sgid_tbl->tbl);
468 kfree(sgid_tbl->hw_id);
469 kfree(sgid_tbl->ctx);
470 kfree(sgid_tbl->vlan);
471 sgid_tbl->tbl = NULL;
472 sgid_tbl->hw_id = NULL;
473 sgid_tbl->ctx = NULL;
474 sgid_tbl->vlan = NULL;
475 sgid_tbl->max = 0;
476 sgid_tbl->active = 0;
477}
478
479static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
480 struct bnxt_qplib_sgid_tbl *sgid_tbl,
481 u16 max)
482{
483 sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
484 if (!sgid_tbl->tbl)
485 return -ENOMEM;
486
487 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
488 if (!sgid_tbl->hw_id)
489 goto out_free1;
490
491 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
492 if (!sgid_tbl->ctx)
493 goto out_free2;
494
495 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
496 if (!sgid_tbl->vlan)
497 goto out_free3;
498
499 sgid_tbl->max = max;
500 return 0;
501out_free3:
502 kfree(sgid_tbl->ctx);
503 sgid_tbl->ctx = NULL;
504out_free2:
505 kfree(sgid_tbl->hw_id);
506 sgid_tbl->hw_id = NULL;
507out_free1:
508 kfree(sgid_tbl->tbl);
509 sgid_tbl->tbl = NULL;
510 return -ENOMEM;
511};
512
513static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
514 struct bnxt_qplib_sgid_tbl *sgid_tbl)
515{
516 int i;
517
518 for (i = 0; i < sgid_tbl->max; i++) {
519 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
520 sizeof(bnxt_qplib_gid_zero)))
521 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
522 }
523 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
524 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
525 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
526 sgid_tbl->active = 0;
527}
528
529static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
530 struct net_device *netdev)
531{
532 memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
533 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
534}
535
536static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
537 struct bnxt_qplib_pkey_tbl *pkey_tbl)
538{
539 if (!pkey_tbl->tbl)
540 dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
541 else
542 kfree(pkey_tbl->tbl);
543
544 pkey_tbl->tbl = NULL;
545 pkey_tbl->max = 0;
546 pkey_tbl->active = 0;
547}
548
549static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
550 struct bnxt_qplib_pkey_tbl *pkey_tbl,
551 u16 max)
552{
553 pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
554 if (!pkey_tbl->tbl)
555 return -ENOMEM;
556
557 pkey_tbl->max = max;
558 return 0;
559};
560
561/* PDs */
562int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
563{
564 u32 bit_num;
565
566 bit_num = find_first_bit(pdt->tbl, pdt->max);
567 if (bit_num == pdt->max)
568 return -ENOMEM;
569
570 /* Found unused PD */
571 clear_bit(bit_num, pdt->tbl);
572 pd->id = bit_num;
573 return 0;
574}
575
576int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
577 struct bnxt_qplib_pd_tbl *pdt,
578 struct bnxt_qplib_pd *pd)
579{
580 if (test_and_set_bit(pd->id, pdt->tbl)) {
581 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
582 pd->id);
583 return -EINVAL;
584 }
585 pd->id = 0;
586 return 0;
587}
588
589static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
590{
591 kfree(pdt->tbl);
592 pdt->tbl = NULL;
593 pdt->max = 0;
594}
595
596static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
597 struct bnxt_qplib_pd_tbl *pdt,
598 u32 max)
599{
600 u32 bytes;
601
602 bytes = max >> 3;
603 if (!bytes)
604 bytes = 1;
605 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
606 if (!pdt->tbl)
607 return -ENOMEM;
608
609 pdt->max = max;
610 memset((u8 *)pdt->tbl, 0xFF, bytes);
611
612 return 0;
613}
614
615/* DPIs */
616int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
617 struct bnxt_qplib_dpi *dpi,
618 void *app)
619{
620 u32 bit_num;
621
622 bit_num = find_first_bit(dpit->tbl, dpit->max);
623 if (bit_num == dpit->max)
624 return -ENOMEM;
625
626 /* Found unused DPI */
627 clear_bit(bit_num, dpit->tbl);
628 dpit->app_tbl[bit_num] = app;
629
630 dpi->dpi = bit_num;
631 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
632 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
633
634 return 0;
635}
636
637int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
638 struct bnxt_qplib_dpi_tbl *dpit,
639 struct bnxt_qplib_dpi *dpi)
640{
641 if (dpi->dpi >= dpit->max) {
642 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
643 return -EINVAL;
644 }
645 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
646 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
647 dpi->dpi);
648 return -EINVAL;
649 }
650 if (dpit->app_tbl)
651 dpit->app_tbl[dpi->dpi] = NULL;
652 memset(dpi, 0, sizeof(*dpi));
653
654 return 0;
655}
656
657static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
658 struct bnxt_qplib_dpi_tbl *dpit)
659{
660 kfree(dpit->tbl);
661 kfree(dpit->app_tbl);
662 if (dpit->dbr_bar_reg_iomem)
663 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
664 memset(dpit, 0, sizeof(*dpit));
665}
666
667static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
668 struct bnxt_qplib_dpi_tbl *dpit,
669 u32 dbr_offset)
670{
671 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
672 resource_size_t bar_reg_base;
673 u32 dbr_len, bytes;
674
675 if (dpit->dbr_bar_reg_iomem) {
676 dev_err(&res->pdev->dev,
677 "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
678 return -EALREADY;
679 }
680
681 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
682 if (!bar_reg_base) {
683 dev_err(&res->pdev->dev,
684 "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
685 return -ENOMEM;
686 }
687
688 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
689 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
690 dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
691 dbr_len);
692 return -ENOMEM;
693 }
694
695 dpit->dbr_bar_reg_iomem = ioremap_nocache(bar_reg_base + dbr_offset,
696 dbr_len);
697 if (!dpit->dbr_bar_reg_iomem) {
698 dev_err(&res->pdev->dev,
699 "QPLIB: FP: DBR BAR region %d mapping failed",
700 dbr_bar_reg);
701 return -ENOMEM;
702 }
703
704 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
705 dpit->max = dbr_len / PAGE_SIZE;
706
707 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
708 if (!dpit->app_tbl)
709 goto unmap_io;
710
711 bytes = dpit->max >> 3;
712 if (!bytes)
713 bytes = 1;
714
715 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
716 if (!dpit->tbl) {
717 kfree(dpit->app_tbl);
718 dpit->app_tbl = NULL;
719 goto unmap_io;
720 }
721
722 memset((u8 *)dpit->tbl, 0xFF, bytes);
723
724 return 0;
725
726unmap_io:
727 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
728 return -ENOMEM;
729}
730
731/* PKEYs */
732static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
733{
734 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
735 pkey_tbl->active = 0;
736}
737
738static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
739 struct bnxt_qplib_pkey_tbl *pkey_tbl)
740{
741 u16 pkey = 0xFFFF;
742
743 memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
744
745 /* pkey default = 0xFFFF */
746 bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
747}
748
749/* Stats */
750static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
751 struct bnxt_qplib_stats *stats)
752{
753 if (stats->dma) {
754 dma_free_coherent(&pdev->dev, stats->size,
755 stats->dma, stats->dma_map);
756 }
757 memset(stats, 0, sizeof(*stats));
758 stats->fw_id = -1;
759}
760
761static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
762 struct bnxt_qplib_stats *stats)
763{
764 memset(stats, 0, sizeof(*stats));
765 stats->fw_id = -1;
766 stats->size = sizeof(struct ctx_hw_stats);
767 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
768 &stats->dma_map, GFP_KERNEL);
769 if (!stats->dma) {
770 dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
771 return -ENOMEM;
772 }
773 return 0;
774}
775
776void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
777{
778 bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
779 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
780}
781
782int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
783{
784 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
785 bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
786
787 return 0;
788}
789
790void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
791{
792 bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
793 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
794 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
795 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
796
797 res->netdev = NULL;
798 res->pdev = NULL;
799}
800
801int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
802 struct net_device *netdev,
803 struct bnxt_qplib_dev_attr *dev_attr)
804{
805 int rc = 0;
806
807 res->pdev = pdev;
808 res->netdev = netdev;
809
810 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
811 if (rc)
812 goto fail;
813
814 rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
815 if (rc)
816 goto fail;
817
818 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
819 if (rc)
820 goto fail;
821
822 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
823 if (rc)
824 goto fail;
825
826 return 0;
827fail:
828 bnxt_qplib_free_res(res);
829 return rc;
830}