Loading...
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/interrupt.h>
42#include <linux/spinlock.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/pci.h>
46#include <linux/delay.h>
47#include <linux/prefetch.h>
48#include <linux/if_ether.h>
49#include <rdma/ib_mad.h>
50
51#include "roce_hsi.h"
52
53#include "qplib_res.h"
54#include "qplib_rcfw.h"
55#include "qplib_sp.h"
56#include "qplib_fp.h"
57
58static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
59
60static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
61{
62 qp->sq.condition = false;
63 qp->sq.send_phantom = false;
64 qp->sq.single = false;
65}
66
67/* Flush list */
68static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
69{
70 struct bnxt_qplib_cq *scq, *rcq;
71
72 scq = qp->scq;
73 rcq = qp->rcq;
74
75 if (!qp->sq.flushed) {
76 dev_dbg(&scq->hwq.pdev->dev,
77 "FP: Adding to SQ Flush list = %p\n", qp);
78 bnxt_qplib_cancel_phantom_processing(qp);
79 list_add_tail(&qp->sq_flush, &scq->sqf_head);
80 qp->sq.flushed = true;
81 }
82 if (!qp->srq) {
83 if (!qp->rq.flushed) {
84 dev_dbg(&rcq->hwq.pdev->dev,
85 "FP: Adding to RQ Flush list = %p\n", qp);
86 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
87 qp->rq.flushed = true;
88 }
89 }
90}
91
92static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
93 unsigned long *flags)
94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
95{
96 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
97 if (qp->scq == qp->rcq)
98 __acquire(&qp->rcq->flush_lock);
99 else
100 spin_lock(&qp->rcq->flush_lock);
101}
102
103static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
104 unsigned long *flags)
105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
106{
107 if (qp->scq == qp->rcq)
108 __release(&qp->rcq->flush_lock);
109 else
110 spin_unlock(&qp->rcq->flush_lock);
111 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
112}
113
114void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
115{
116 unsigned long flags;
117
118 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
119 __bnxt_qplib_add_flush_qp(qp);
120 bnxt_qplib_release_cq_flush_locks(qp, &flags);
121}
122
123static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
124{
125 if (qp->sq.flushed) {
126 qp->sq.flushed = false;
127 list_del(&qp->sq_flush);
128 }
129 if (!qp->srq) {
130 if (qp->rq.flushed) {
131 qp->rq.flushed = false;
132 list_del(&qp->rq_flush);
133 }
134 }
135}
136
137void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
138{
139 unsigned long flags;
140
141 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
142 __clean_cq(qp->scq, (u64)(unsigned long)qp);
143 qp->sq.hwq.prod = 0;
144 qp->sq.hwq.cons = 0;
145 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
146 qp->rq.hwq.prod = 0;
147 qp->rq.hwq.cons = 0;
148
149 __bnxt_qplib_del_flush_qp(qp);
150 bnxt_qplib_release_cq_flush_locks(qp, &flags);
151}
152
153static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
154{
155 struct bnxt_qplib_nq_work *nq_work =
156 container_of(work, struct bnxt_qplib_nq_work, work);
157
158 struct bnxt_qplib_cq *cq = nq_work->cq;
159 struct bnxt_qplib_nq *nq = nq_work->nq;
160
161 if (cq && nq) {
162 spin_lock_bh(&cq->compl_lock);
163 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
164 dev_dbg(&nq->pdev->dev,
165 "%s:Trigger cq = %p event nq = %p\n",
166 __func__, cq, nq);
167 nq->cqn_handler(nq, cq);
168 }
169 spin_unlock_bh(&cq->compl_lock);
170 }
171 kfree(nq_work);
172}
173
174static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
175 struct bnxt_qplib_qp *qp)
176{
177 struct bnxt_qplib_q *rq = &qp->rq;
178 struct bnxt_qplib_q *sq = &qp->sq;
179
180 if (qp->rq_hdr_buf)
181 dma_free_coherent(&res->pdev->dev,
182 rq->max_wqe * qp->rq_hdr_buf_size,
183 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
184 if (qp->sq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 sq->max_wqe * qp->sq_hdr_buf_size,
187 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
188 qp->rq_hdr_buf = NULL;
189 qp->sq_hdr_buf = NULL;
190 qp->rq_hdr_buf_map = 0;
191 qp->sq_hdr_buf_map = 0;
192 qp->sq_hdr_buf_size = 0;
193 qp->rq_hdr_buf_size = 0;
194}
195
196static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
197 struct bnxt_qplib_qp *qp)
198{
199 struct bnxt_qplib_q *rq = &qp->rq;
200 struct bnxt_qplib_q *sq = &qp->sq;
201 int rc = 0;
202
203 if (qp->sq_hdr_buf_size && sq->max_wqe) {
204 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
205 sq->max_wqe * qp->sq_hdr_buf_size,
206 &qp->sq_hdr_buf_map, GFP_KERNEL);
207 if (!qp->sq_hdr_buf) {
208 rc = -ENOMEM;
209 dev_err(&res->pdev->dev,
210 "Failed to create sq_hdr_buf\n");
211 goto fail;
212 }
213 }
214
215 if (qp->rq_hdr_buf_size && rq->max_wqe) {
216 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
217 rq->max_wqe *
218 qp->rq_hdr_buf_size,
219 &qp->rq_hdr_buf_map,
220 GFP_KERNEL);
221 if (!qp->rq_hdr_buf) {
222 rc = -ENOMEM;
223 dev_err(&res->pdev->dev,
224 "Failed to create rq_hdr_buf\n");
225 goto fail;
226 }
227 }
228 return 0;
229
230fail:
231 bnxt_qplib_free_qp_hdr_buf(res, qp);
232 return rc;
233}
234
235static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
236{
237 struct bnxt_qplib_hwq *hwq = &nq->hwq;
238 struct nq_base *nqe, **nq_ptr;
239 int budget = nq->budget;
240 uintptr_t q_handle;
241 u16 type;
242
243 spin_lock_bh(&hwq->lock);
244 /* Service the NQ until empty */
245 while (budget--) {
246 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
247 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
248 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
249 break;
250
251 /*
252 * The valid test of the entry must be done first before
253 * reading any further.
254 */
255 dma_rmb();
256
257 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
258 switch (type) {
259 case NQ_BASE_TYPE_CQ_NOTIFICATION:
260 {
261 struct nq_cn *nqcne = (struct nq_cn *)nqe;
262
263 q_handle = le32_to_cpu(nqcne->cq_handle_low);
264 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
265 << 32;
266 if ((unsigned long)cq == q_handle) {
267 nqcne->cq_handle_low = 0;
268 nqcne->cq_handle_high = 0;
269 cq->cnq_events++;
270 }
271 break;
272 }
273 default:
274 break;
275 }
276 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
277 1, &nq->nq_db.dbinfo.flags);
278 }
279 spin_unlock_bh(&hwq->lock);
280}
281
282/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
283 * this CQ.
284 */
285static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
286{
287 u32 retry_cnt = 100;
288
289 while (retry_cnt--) {
290 if (cnq_events == cq->cnq_events)
291 return;
292 usleep_range(50, 100);
293 clean_nq(cq->nq, cq);
294 }
295}
296
297static void bnxt_qplib_service_nq(struct tasklet_struct *t)
298{
299 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
300 struct bnxt_qplib_hwq *hwq = &nq->hwq;
301 struct bnxt_qplib_cq *cq;
302 int budget = nq->budget;
303 struct nq_base *nqe;
304 uintptr_t q_handle;
305 u32 hw_polled = 0;
306 u16 type;
307
308 spin_lock_bh(&hwq->lock);
309 /* Service the NQ until empty */
310 while (budget--) {
311 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
312 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
313 break;
314
315 /*
316 * The valid test of the entry must be done first before
317 * reading any further.
318 */
319 dma_rmb();
320
321 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
322 switch (type) {
323 case NQ_BASE_TYPE_CQ_NOTIFICATION:
324 {
325 struct nq_cn *nqcne = (struct nq_cn *)nqe;
326
327 q_handle = le32_to_cpu(nqcne->cq_handle_low);
328 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
329 << 32;
330 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
331 if (!cq)
332 break;
333 cq->toggle = (le16_to_cpu(nqe->info10_type) &
334 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
335 cq->dbinfo.toggle = cq->toggle;
336 bnxt_qplib_armen_db(&cq->dbinfo,
337 DBC_DBC_TYPE_CQ_ARMENA);
338 spin_lock_bh(&cq->compl_lock);
339 atomic_set(&cq->arm_state, 0);
340 if (nq->cqn_handler(nq, (cq)))
341 dev_warn(&nq->pdev->dev,
342 "cqn - type 0x%x not handled\n", type);
343 cq->cnq_events++;
344 spin_unlock_bh(&cq->compl_lock);
345 break;
346 }
347 case NQ_BASE_TYPE_SRQ_EVENT:
348 {
349 struct bnxt_qplib_srq *srq;
350 struct nq_srq_event *nqsrqe =
351 (struct nq_srq_event *)nqe;
352
353 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
354 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
355 << 32;
356 srq = (struct bnxt_qplib_srq *)q_handle;
357 bnxt_qplib_armen_db(&srq->dbinfo,
358 DBC_DBC_TYPE_SRQ_ARMENA);
359 if (nq->srqn_handler(nq,
360 (struct bnxt_qplib_srq *)q_handle,
361 nqsrqe->event))
362 dev_warn(&nq->pdev->dev,
363 "SRQ event 0x%x not handled\n",
364 nqsrqe->event);
365 break;
366 }
367 case NQ_BASE_TYPE_DBQ_EVENT:
368 break;
369 default:
370 dev_warn(&nq->pdev->dev,
371 "nqe with type = 0x%x not handled\n", type);
372 break;
373 }
374 hw_polled++;
375 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
376 1, &nq->nq_db.dbinfo.flags);
377 }
378 if (hw_polled)
379 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
380 spin_unlock_bh(&hwq->lock);
381}
382
383/* bnxt_re_synchronize_nq - self polling notification queue.
384 * @nq - notification queue pointer
385 *
386 * This function will start polling entries of a given notification queue
387 * for all pending entries.
388 * This function is useful to synchronize notification entries while resources
389 * are going away.
390 */
391
392void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
393{
394 int budget = nq->budget;
395
396 nq->budget = nq->hwq.max_elements;
397 bnxt_qplib_service_nq(&nq->nq_tasklet);
398 nq->budget = budget;
399}
400
401static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
402{
403 struct bnxt_qplib_nq *nq = dev_instance;
404 struct bnxt_qplib_hwq *hwq = &nq->hwq;
405 u32 sw_cons;
406
407 /* Prefetch the NQ element */
408 sw_cons = HWQ_CMP(hwq->cons, hwq);
409 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
410
411 /* Fan out to CPU affinitized kthreads? */
412 tasklet_schedule(&nq->nq_tasklet);
413
414 return IRQ_HANDLED;
415}
416
417void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
418{
419 if (!nq->requested)
420 return;
421
422 nq->requested = false;
423 /* Mask h/w interrupt */
424 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
425 /* Sync with last running IRQ handler */
426 synchronize_irq(nq->msix_vec);
427 irq_set_affinity_hint(nq->msix_vec, NULL);
428 free_irq(nq->msix_vec, nq);
429 kfree(nq->name);
430 nq->name = NULL;
431
432 if (kill)
433 tasklet_kill(&nq->nq_tasklet);
434 tasklet_disable(&nq->nq_tasklet);
435}
436
437void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
438{
439 if (nq->cqn_wq) {
440 destroy_workqueue(nq->cqn_wq);
441 nq->cqn_wq = NULL;
442 }
443
444 /* Make sure the HW is stopped! */
445 bnxt_qplib_nq_stop_irq(nq, true);
446
447 if (nq->nq_db.reg.bar_reg) {
448 iounmap(nq->nq_db.reg.bar_reg);
449 nq->nq_db.reg.bar_reg = NULL;
450 }
451
452 nq->cqn_handler = NULL;
453 nq->srqn_handler = NULL;
454 nq->msix_vec = 0;
455}
456
457int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
458 int msix_vector, bool need_init)
459{
460 struct bnxt_qplib_res *res = nq->res;
461 int rc;
462
463 if (nq->requested)
464 return -EFAULT;
465
466 nq->msix_vec = msix_vector;
467 if (need_init)
468 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
469 else
470 tasklet_enable(&nq->nq_tasklet);
471
472 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
473 nq_indx, pci_name(res->pdev));
474 if (!nq->name)
475 return -ENOMEM;
476 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
477 if (rc) {
478 kfree(nq->name);
479 nq->name = NULL;
480 tasklet_disable(&nq->nq_tasklet);
481 return rc;
482 }
483
484 cpumask_clear(&nq->mask);
485 cpumask_set_cpu(nq_indx, &nq->mask);
486 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
487 if (rc) {
488 dev_warn(&nq->pdev->dev,
489 "set affinity failed; vector: %d nq_idx: %d\n",
490 nq->msix_vec, nq_indx);
491 }
492 nq->requested = true;
493 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
494
495 return rc;
496}
497
498static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
499{
500 resource_size_t reg_base;
501 struct bnxt_qplib_nq_db *nq_db;
502 struct pci_dev *pdev;
503
504 pdev = nq->pdev;
505 nq_db = &nq->nq_db;
506
507 nq_db->dbinfo.flags = 0;
508 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
509 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
510 if (!nq_db->reg.bar_base) {
511 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
512 nq_db->reg.bar_id);
513 return -ENOMEM;
514 }
515
516 reg_base = nq_db->reg.bar_base + reg_offt;
517 /* Unconditionally map 8 bytes to support 57500 series */
518 nq_db->reg.len = 8;
519 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
520 if (!nq_db->reg.bar_reg) {
521 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
522 nq_db->reg.bar_id);
523 return -ENOMEM;
524 }
525
526 nq_db->dbinfo.db = nq_db->reg.bar_reg;
527 nq_db->dbinfo.hwq = &nq->hwq;
528 nq_db->dbinfo.xid = nq->ring_id;
529
530 return 0;
531}
532
533int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
534 int nq_idx, int msix_vector, int bar_reg_offset,
535 cqn_handler_t cqn_handler,
536 srqn_handler_t srqn_handler)
537{
538 int rc;
539
540 nq->pdev = pdev;
541 nq->cqn_handler = cqn_handler;
542 nq->srqn_handler = srqn_handler;
543
544 /* Have a task to schedule CQ notifiers in post send case */
545 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
546 if (!nq->cqn_wq)
547 return -ENOMEM;
548
549 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
550 if (rc)
551 goto fail;
552
553 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
554 if (rc) {
555 dev_err(&nq->pdev->dev,
556 "Failed to request irq for nq-idx %d\n", nq_idx);
557 goto fail;
558 }
559
560 return 0;
561fail:
562 bnxt_qplib_disable_nq(nq);
563 return rc;
564}
565
566void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
567{
568 if (nq->hwq.max_elements) {
569 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
570 nq->hwq.max_elements = 0;
571 }
572}
573
574int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
575{
576 struct bnxt_qplib_hwq_attr hwq_attr = {};
577 struct bnxt_qplib_sg_info sginfo = {};
578
579 nq->pdev = res->pdev;
580 nq->res = res;
581 if (!nq->hwq.max_elements ||
582 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
583 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
584
585 sginfo.pgsize = PAGE_SIZE;
586 sginfo.pgshft = PAGE_SHIFT;
587 hwq_attr.res = res;
588 hwq_attr.sginfo = &sginfo;
589 hwq_attr.depth = nq->hwq.max_elements;
590 hwq_attr.stride = sizeof(struct nq_base);
591 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
592 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
593 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
594 return -ENOMEM;
595 }
596 nq->budget = 8;
597 return 0;
598}
599
600/* SRQ */
601void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
602 struct bnxt_qplib_srq *srq)
603{
604 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
605 struct creq_destroy_srq_resp resp = {};
606 struct bnxt_qplib_cmdqmsg msg = {};
607 struct cmdq_destroy_srq req = {};
608 int rc;
609
610 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
611 CMDQ_BASE_OPCODE_DESTROY_SRQ,
612 sizeof(req));
613
614 /* Configure the request */
615 req.srq_cid = cpu_to_le32(srq->id);
616
617 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
619 kfree(srq->swq);
620 if (rc)
621 return;
622 bnxt_qplib_free_hwq(res, &srq->hwq);
623}
624
625int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
626 struct bnxt_qplib_srq *srq)
627{
628 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
629 struct bnxt_qplib_hwq_attr hwq_attr = {};
630 struct creq_create_srq_resp resp = {};
631 struct bnxt_qplib_cmdqmsg msg = {};
632 struct cmdq_create_srq req = {};
633 struct bnxt_qplib_pbl *pbl;
634 u16 pg_sz_lvl;
635 int rc, idx;
636
637 hwq_attr.res = res;
638 hwq_attr.sginfo = &srq->sg_info;
639 hwq_attr.depth = srq->max_wqe;
640 hwq_attr.stride = srq->wqe_size;
641 hwq_attr.type = HWQ_TYPE_QUEUE;
642 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
643 if (rc)
644 return rc;
645
646 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
647 GFP_KERNEL);
648 if (!srq->swq) {
649 rc = -ENOMEM;
650 goto fail;
651 }
652 srq->dbinfo.flags = 0;
653 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
654 CMDQ_BASE_OPCODE_CREATE_SRQ,
655 sizeof(req));
656
657 /* Configure the request */
658 req.dpi = cpu_to_le32(srq->dpi->dpi);
659 req.srq_handle = cpu_to_le64((uintptr_t)srq);
660
661 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
662 pbl = &srq->hwq.pbl[PBL_LVL_0];
663 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
664 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
665 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
666 CMDQ_CREATE_SRQ_LVL_SFT;
667 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
668 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
669 req.pd_id = cpu_to_le32(srq->pd->id);
670 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
671
672 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
673 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
674 if (rc)
675 goto fail;
676
677 spin_lock_init(&srq->lock);
678 srq->start_idx = 0;
679 srq->last_idx = srq->hwq.max_elements - 1;
680 for (idx = 0; idx < srq->hwq.max_elements; idx++)
681 srq->swq[idx].next_idx = idx + 1;
682 srq->swq[srq->last_idx].next_idx = -1;
683
684 srq->id = le32_to_cpu(resp.xid);
685 srq->dbinfo.hwq = &srq->hwq;
686 srq->dbinfo.xid = srq->id;
687 srq->dbinfo.db = srq->dpi->dbr;
688 srq->dbinfo.max_slot = 1;
689 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
690 if (srq->threshold)
691 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
692 srq->arm_req = false;
693
694 return 0;
695fail:
696 bnxt_qplib_free_hwq(res, &srq->hwq);
697 kfree(srq->swq);
698
699 return rc;
700}
701
702int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
703 struct bnxt_qplib_srq *srq)
704{
705 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
706 u32 count;
707
708 count = __bnxt_qplib_get_avail(srq_hwq);
709 if (count > srq->threshold) {
710 srq->arm_req = false;
711 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
712 } else {
713 /* Deferred arming */
714 srq->arm_req = true;
715 }
716
717 return 0;
718}
719
720int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
722{
723 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
724 struct creq_query_srq_resp resp = {};
725 struct bnxt_qplib_cmdqmsg msg = {};
726 struct bnxt_qplib_rcfw_sbuf sbuf;
727 struct creq_query_srq_resp_sb *sb;
728 struct cmdq_query_srq req = {};
729 int rc;
730
731 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
732 CMDQ_BASE_OPCODE_QUERY_SRQ,
733 sizeof(req));
734
735 /* Configure the request */
736 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
737 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
738 &sbuf.dma_addr, GFP_KERNEL);
739 if (!sbuf.sb)
740 return -ENOMEM;
741 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
742 req.srq_cid = cpu_to_le32(srq->id);
743 sb = sbuf.sb;
744 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
745 sizeof(resp), 0);
746 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
747 if (!rc)
748 srq->threshold = le16_to_cpu(sb->srq_limit);
749 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
750 sbuf.sb, sbuf.dma_addr);
751
752 return rc;
753}
754
755int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
756 struct bnxt_qplib_swqe *wqe)
757{
758 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
759 struct rq_wqe *srqe;
760 struct sq_sge *hw_sge;
761 u32 count = 0;
762 int i, next;
763
764 spin_lock(&srq_hwq->lock);
765 if (srq->start_idx == srq->last_idx) {
766 dev_err(&srq_hwq->pdev->dev,
767 "FP: SRQ (0x%x) is full!\n", srq->id);
768 spin_unlock(&srq_hwq->lock);
769 return -EINVAL;
770 }
771 next = srq->start_idx;
772 srq->start_idx = srq->swq[next].next_idx;
773 spin_unlock(&srq_hwq->lock);
774
775 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
776 memset(srqe, 0, srq->wqe_size);
777 /* Calculate wqe_size16 and data_len */
778 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
779 i < wqe->num_sge; i++, hw_sge++) {
780 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
781 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
782 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
783 }
784 srqe->wqe_type = wqe->type;
785 srqe->flags = wqe->flags;
786 srqe->wqe_size = wqe->num_sge +
787 ((offsetof(typeof(*srqe), data) + 15) >> 4);
788 srqe->wr_id[0] = cpu_to_le32((u32)next);
789 srq->swq[next].wr_id = wqe->wr_id;
790
791 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
792
793 spin_lock(&srq_hwq->lock);
794 count = __bnxt_qplib_get_avail(srq_hwq);
795 spin_unlock(&srq_hwq->lock);
796 /* Ring DB */
797 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
798 if (srq->arm_req == true && count > srq->threshold) {
799 srq->arm_req = false;
800 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
801 }
802
803 return 0;
804}
805
806/* QP */
807
808static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
809{
810 int indx;
811
812 que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
813 if (!que->swq)
814 return -ENOMEM;
815
816 que->swq_start = 0;
817 que->swq_last = que->max_wqe - 1;
818 for (indx = 0; indx < que->max_wqe; indx++)
819 que->swq[indx].next_idx = indx + 1;
820 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
821 que->swq_last = 0;
822
823 return 0;
824}
825
826int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
827{
828 struct bnxt_qplib_hwq_attr hwq_attr = {};
829 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
830 struct creq_create_qp1_resp resp = {};
831 struct bnxt_qplib_cmdqmsg msg = {};
832 struct bnxt_qplib_q *sq = &qp->sq;
833 struct bnxt_qplib_q *rq = &qp->rq;
834 struct cmdq_create_qp1 req = {};
835 struct bnxt_qplib_pbl *pbl;
836 u32 qp_flags = 0;
837 u8 pg_sz_lvl;
838 u32 tbl_indx;
839 int rc;
840
841 sq->dbinfo.flags = 0;
842 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
843 CMDQ_BASE_OPCODE_CREATE_QP1,
844 sizeof(req));
845 /* General */
846 req.type = qp->type;
847 req.dpi = cpu_to_le32(qp->dpi->dpi);
848 req.qp_handle = cpu_to_le64(qp->qp_handle);
849
850 /* SQ */
851 hwq_attr.res = res;
852 hwq_attr.sginfo = &sq->sg_info;
853 hwq_attr.stride = sizeof(struct sq_sge);
854 hwq_attr.depth = bnxt_qplib_get_depth(sq);
855 hwq_attr.type = HWQ_TYPE_QUEUE;
856 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
857 if (rc)
858 return rc;
859
860 rc = bnxt_qplib_alloc_init_swq(sq);
861 if (rc)
862 goto fail_sq;
863
864 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
865 pbl = &sq->hwq.pbl[PBL_LVL_0];
866 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
867 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
868 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
869 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
870 req.sq_pg_size_sq_lvl = pg_sz_lvl;
871 req.sq_fwo_sq_sge =
872 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
873 CMDQ_CREATE_QP1_SQ_SGE_SFT);
874 req.scq_cid = cpu_to_le32(qp->scq->id);
875
876 /* RQ */
877 if (rq->max_wqe) {
878 rq->dbinfo.flags = 0;
879 hwq_attr.res = res;
880 hwq_attr.sginfo = &rq->sg_info;
881 hwq_attr.stride = sizeof(struct sq_sge);
882 hwq_attr.depth = bnxt_qplib_get_depth(rq);
883 hwq_attr.type = HWQ_TYPE_QUEUE;
884 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
885 if (rc)
886 goto sq_swq;
887 rc = bnxt_qplib_alloc_init_swq(rq);
888 if (rc)
889 goto fail_rq;
890 req.rq_size = cpu_to_le32(rq->max_wqe);
891 pbl = &rq->hwq.pbl[PBL_LVL_0];
892 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
893 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
894 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
895 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
896 req.rq_pg_size_rq_lvl = pg_sz_lvl;
897 req.rq_fwo_rq_sge =
898 cpu_to_le16((rq->max_sge &
899 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
900 CMDQ_CREATE_QP1_RQ_SGE_SFT);
901 }
902 req.rcq_cid = cpu_to_le32(qp->rcq->id);
903 /* Header buffer - allow hdr_buf pass in */
904 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
905 if (rc) {
906 rc = -ENOMEM;
907 goto rq_rwq;
908 }
909 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
910 req.qp_flags = cpu_to_le32(qp_flags);
911 req.pd_id = cpu_to_le32(qp->pd->id);
912
913 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
914 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
915 if (rc)
916 goto fail;
917
918 qp->id = le32_to_cpu(resp.xid);
919 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
920 qp->cctx = res->cctx;
921 sq->dbinfo.hwq = &sq->hwq;
922 sq->dbinfo.xid = qp->id;
923 sq->dbinfo.db = qp->dpi->dbr;
924 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
925 if (rq->max_wqe) {
926 rq->dbinfo.hwq = &rq->hwq;
927 rq->dbinfo.xid = qp->id;
928 rq->dbinfo.db = qp->dpi->dbr;
929 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
930 }
931 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
932 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
933 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
934
935 return 0;
936
937fail:
938 bnxt_qplib_free_qp_hdr_buf(res, qp);
939rq_rwq:
940 kfree(rq->swq);
941fail_rq:
942 bnxt_qplib_free_hwq(res, &rq->hwq);
943sq_swq:
944 kfree(sq->swq);
945fail_sq:
946 bnxt_qplib_free_hwq(res, &sq->hwq);
947 return rc;
948}
949
950static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
951{
952 struct bnxt_qplib_hwq *hwq;
953 struct bnxt_qplib_q *sq;
954 u64 fpsne, psn_pg;
955 u16 indx_pad = 0;
956
957 sq = &qp->sq;
958 hwq = &sq->hwq;
959 /* First psn entry */
960 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
961 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
962 indx_pad = (fpsne & ~PAGE_MASK) / size;
963 hwq->pad_pgofft = indx_pad;
964 hwq->pad_pg = (u64 *)psn_pg;
965 hwq->pad_stride = size;
966}
967
968int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
969{
970 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
971 struct bnxt_qplib_hwq_attr hwq_attr = {};
972 struct bnxt_qplib_sg_info sginfo = {};
973 struct creq_create_qp_resp resp = {};
974 struct bnxt_qplib_cmdqmsg msg = {};
975 struct bnxt_qplib_q *sq = &qp->sq;
976 struct bnxt_qplib_q *rq = &qp->rq;
977 struct cmdq_create_qp req = {};
978 int rc, req_size, psn_sz = 0;
979 struct bnxt_qplib_hwq *xrrq;
980 struct bnxt_qplib_pbl *pbl;
981 u32 qp_flags = 0;
982 u8 pg_sz_lvl;
983 u32 tbl_indx;
984 u16 nsge;
985
986 if (res->dattr)
987 qp->dev_cap_flags = res->dattr->dev_cap_flags;
988
989 sq->dbinfo.flags = 0;
990 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
991 CMDQ_BASE_OPCODE_CREATE_QP,
992 sizeof(req));
993
994 /* General */
995 req.type = qp->type;
996 req.dpi = cpu_to_le32(qp->dpi->dpi);
997 req.qp_handle = cpu_to_le64(qp->qp_handle);
998
999 /* SQ */
1000 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1001 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1002 sizeof(struct sq_psn_search_ext) :
1003 sizeof(struct sq_psn_search);
1004
1005 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1006 psn_sz = sizeof(struct sq_msn_search);
1007 qp->msn = 0;
1008 }
1009 }
1010
1011 hwq_attr.res = res;
1012 hwq_attr.sginfo = &sq->sg_info;
1013 hwq_attr.stride = sizeof(struct sq_sge);
1014 hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015 hwq_attr.aux_stride = psn_sz;
1016 hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1017 /* Update msn tbl size */
1018 if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1019 hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1020 qp->msn_tbl_sz = hwq_attr.aux_depth;
1021 qp->msn = 0;
1022 }
1023
1024 hwq_attr.type = HWQ_TYPE_QUEUE;
1025 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1026 if (rc)
1027 return rc;
1028
1029 rc = bnxt_qplib_alloc_init_swq(sq);
1030 if (rc)
1031 goto fail_sq;
1032
1033 if (psn_sz)
1034 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1035
1036 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1037 pbl = &sq->hwq.pbl[PBL_LVL_0];
1038 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1040 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1041 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1042 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1043 req.sq_fwo_sq_sge =
1044 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1045 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1046 req.scq_cid = cpu_to_le32(qp->scq->id);
1047
1048 /* RQ */
1049 if (!qp->srq) {
1050 rq->dbinfo.flags = 0;
1051 hwq_attr.res = res;
1052 hwq_attr.sginfo = &rq->sg_info;
1053 hwq_attr.stride = sizeof(struct sq_sge);
1054 hwq_attr.depth = bnxt_qplib_get_depth(rq);
1055 hwq_attr.aux_stride = 0;
1056 hwq_attr.aux_depth = 0;
1057 hwq_attr.type = HWQ_TYPE_QUEUE;
1058 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1059 if (rc)
1060 goto sq_swq;
1061 rc = bnxt_qplib_alloc_init_swq(rq);
1062 if (rc)
1063 goto fail_rq;
1064
1065 req.rq_size = cpu_to_le32(rq->max_wqe);
1066 pbl = &rq->hwq.pbl[PBL_LVL_0];
1067 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1068 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1069 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1070 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1071 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1072 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1073 6 : rq->max_sge;
1074 req.rq_fwo_rq_sge =
1075 cpu_to_le16(((nsge &
1076 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1077 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1078 } else {
1079 /* SRQ */
1080 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1081 req.srq_cid = cpu_to_le32(qp->srq->id);
1082 }
1083 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1084
1085 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1086 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1087 if (qp->sig_type)
1088 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1089 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1090 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1091 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1092 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1093
1094 req.qp_flags = cpu_to_le32(qp_flags);
1095
1096 /* ORRQ and IRRQ */
1097 if (psn_sz) {
1098 xrrq = &qp->orrq;
1099 xrrq->max_elements =
1100 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1101 req_size = xrrq->max_elements *
1102 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1103 req_size &= ~(PAGE_SIZE - 1);
1104 sginfo.pgsize = req_size;
1105 sginfo.pgshft = PAGE_SHIFT;
1106
1107 hwq_attr.res = res;
1108 hwq_attr.sginfo = &sginfo;
1109 hwq_attr.depth = xrrq->max_elements;
1110 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1111 hwq_attr.aux_stride = 0;
1112 hwq_attr.aux_depth = 0;
1113 hwq_attr.type = HWQ_TYPE_CTX;
1114 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1115 if (rc)
1116 goto rq_swq;
1117 pbl = &xrrq->pbl[PBL_LVL_0];
1118 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1119
1120 xrrq = &qp->irrq;
1121 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1122 qp->max_dest_rd_atomic);
1123 req_size = xrrq->max_elements *
1124 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1125 req_size &= ~(PAGE_SIZE - 1);
1126 sginfo.pgsize = req_size;
1127 hwq_attr.depth = xrrq->max_elements;
1128 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1129 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1130 if (rc)
1131 goto fail_orrq;
1132
1133 pbl = &xrrq->pbl[PBL_LVL_0];
1134 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1135 }
1136 req.pd_id = cpu_to_le32(qp->pd->id);
1137
1138 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1139 sizeof(resp), 0);
1140 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1141 if (rc)
1142 goto fail;
1143
1144 qp->id = le32_to_cpu(resp.xid);
1145 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1146 INIT_LIST_HEAD(&qp->sq_flush);
1147 INIT_LIST_HEAD(&qp->rq_flush);
1148 qp->cctx = res->cctx;
1149 sq->dbinfo.hwq = &sq->hwq;
1150 sq->dbinfo.xid = qp->id;
1151 sq->dbinfo.db = qp->dpi->dbr;
1152 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1153 if (rq->max_wqe) {
1154 rq->dbinfo.hwq = &rq->hwq;
1155 rq->dbinfo.xid = qp->id;
1156 rq->dbinfo.db = qp->dpi->dbr;
1157 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1158 }
1159 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1160 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1161 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1162
1163 return 0;
1164fail:
1165 bnxt_qplib_free_hwq(res, &qp->irrq);
1166fail_orrq:
1167 bnxt_qplib_free_hwq(res, &qp->orrq);
1168rq_swq:
1169 kfree(rq->swq);
1170fail_rq:
1171 bnxt_qplib_free_hwq(res, &rq->hwq);
1172sq_swq:
1173 kfree(sq->swq);
1174fail_sq:
1175 bnxt_qplib_free_hwq(res, &sq->hwq);
1176 return rc;
1177}
1178
1179static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1180{
1181 switch (qp->state) {
1182 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1183 /* INIT->RTR, configure the path_mtu to the default
1184 * 2048 if not being requested
1185 */
1186 if (!(qp->modify_flags &
1187 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1188 qp->modify_flags |=
1189 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1190 qp->path_mtu =
1191 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1192 }
1193 qp->modify_flags &=
1194 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1195 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1196 if (qp->max_dest_rd_atomic < 1)
1197 qp->max_dest_rd_atomic = 1;
1198 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1199 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1200 if (!(qp->modify_flags &
1201 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1202 qp->modify_flags |=
1203 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1204 qp->ah.sgid_index = 0;
1205 }
1206 break;
1207 default:
1208 break;
1209 }
1210}
1211
1212static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1213{
1214 switch (qp->state) {
1215 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1216 /* Bono FW requires the max_rd_atomic to be >= 1 */
1217 if (qp->max_rd_atomic < 1)
1218 qp->max_rd_atomic = 1;
1219 /* Bono FW does not allow PKEY_INDEX,
1220 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1221 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1222 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1223 * modification
1224 */
1225 qp->modify_flags &=
1226 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1227 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1228 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1229 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1230 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1231 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1232 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1233 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1234 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1235 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1236 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1237 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1238 break;
1239 default:
1240 break;
1241 }
1242}
1243
1244static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1245{
1246 switch (qp->cur_qp_state) {
1247 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1248 break;
1249 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1250 __modify_flags_from_init_state(qp);
1251 break;
1252 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1253 __modify_flags_from_rtr_state(qp);
1254 break;
1255 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1256 break;
1257 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1258 break;
1259 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1260 break;
1261 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1262 break;
1263 default:
1264 break;
1265 }
1266}
1267
1268int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1269{
1270 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1271 struct creq_modify_qp_resp resp = {};
1272 struct bnxt_qplib_cmdqmsg msg = {};
1273 struct cmdq_modify_qp req = {};
1274 u32 temp32[4];
1275 u32 bmask;
1276 int rc;
1277
1278 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1279 CMDQ_BASE_OPCODE_MODIFY_QP,
1280 sizeof(req));
1281
1282 /* Filter out the qp_attr_mask based on the state->new transition */
1283 __filter_modify_flags(qp);
1284 bmask = qp->modify_flags;
1285 req.modify_mask = cpu_to_le32(qp->modify_flags);
1286 req.qp_cid = cpu_to_le32(qp->id);
1287 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1288 req.network_type_en_sqd_async_notify_new_state =
1289 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1290 (qp->en_sqd_async_notify ?
1291 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1292 }
1293 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1294
1295 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1296 req.access = qp->access;
1297
1298 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1299 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1300
1301 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1302 req.qkey = cpu_to_le32(qp->qkey);
1303
1304 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1305 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1306 req.dgid[0] = cpu_to_le32(temp32[0]);
1307 req.dgid[1] = cpu_to_le32(temp32[1]);
1308 req.dgid[2] = cpu_to_le32(temp32[2]);
1309 req.dgid[3] = cpu_to_le32(temp32[3]);
1310 }
1311 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1312 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1313
1314 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1315 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1316 [qp->ah.sgid_index]);
1317
1318 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1319 req.hop_limit = qp->ah.hop_limit;
1320
1321 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1322 req.traffic_class = qp->ah.traffic_class;
1323
1324 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1325 memcpy(req.dest_mac, qp->ah.dmac, 6);
1326
1327 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1328 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1329
1330 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1331 req.timeout = qp->timeout;
1332
1333 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1334 req.retry_cnt = qp->retry_cnt;
1335
1336 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1337 req.rnr_retry = qp->rnr_retry;
1338
1339 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1340 req.min_rnr_timer = qp->min_rnr_timer;
1341
1342 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1343 req.rq_psn = cpu_to_le32(qp->rq.psn);
1344
1345 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1346 req.sq_psn = cpu_to_le32(qp->sq.psn);
1347
1348 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1349 req.max_rd_atomic =
1350 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1351
1352 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1353 req.max_dest_rd_atomic =
1354 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1355
1356 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1357 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1358 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1359 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1360 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1361 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1362 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1363
1364 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1365
1366 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1367 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1368 if (rc)
1369 return rc;
1370 qp->cur_qp_state = qp->state;
1371 return 0;
1372}
1373
1374int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1375{
1376 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1377 struct creq_query_qp_resp resp = {};
1378 struct bnxt_qplib_cmdqmsg msg = {};
1379 struct bnxt_qplib_rcfw_sbuf sbuf;
1380 struct creq_query_qp_resp_sb *sb;
1381 struct cmdq_query_qp req = {};
1382 u32 temp32[4];
1383 int i, rc;
1384
1385 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1386 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1387 &sbuf.dma_addr, GFP_KERNEL);
1388 if (!sbuf.sb)
1389 return -ENOMEM;
1390 sb = sbuf.sb;
1391
1392 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1393 CMDQ_BASE_OPCODE_QUERY_QP,
1394 sizeof(req));
1395
1396 req.qp_cid = cpu_to_le32(qp->id);
1397 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1398 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1399 sizeof(resp), 0);
1400 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1401 if (rc)
1402 goto bail;
1403 /* Extract the context from the side buffer */
1404 qp->state = sb->en_sqd_async_notify_state &
1405 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1406 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1407 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1408 qp->access = sb->access;
1409 qp->pkey_index = le16_to_cpu(sb->pkey);
1410 qp->qkey = le32_to_cpu(sb->qkey);
1411
1412 temp32[0] = le32_to_cpu(sb->dgid[0]);
1413 temp32[1] = le32_to_cpu(sb->dgid[1]);
1414 temp32[2] = le32_to_cpu(sb->dgid[2]);
1415 temp32[3] = le32_to_cpu(sb->dgid[3]);
1416 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1417
1418 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1419
1420 qp->ah.sgid_index = 0;
1421 for (i = 0; i < res->sgid_tbl.max; i++) {
1422 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1423 qp->ah.sgid_index = i;
1424 break;
1425 }
1426 }
1427 if (i == res->sgid_tbl.max)
1428 dev_warn(&res->pdev->dev, "SGID not found??\n");
1429
1430 qp->ah.hop_limit = sb->hop_limit;
1431 qp->ah.traffic_class = sb->traffic_class;
1432 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1433 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1434 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1435 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1436 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1437 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1438 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1439 qp->timeout = sb->timeout;
1440 qp->retry_cnt = sb->retry_cnt;
1441 qp->rnr_retry = sb->rnr_retry;
1442 qp->min_rnr_timer = sb->min_rnr_timer;
1443 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1444 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1445 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1446 qp->max_dest_rd_atomic =
1447 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1448 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1449 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1450 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1451 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1452 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1453 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1454 memcpy(qp->smac, sb->src_mac, 6);
1455 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1456bail:
1457 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1458 sbuf.sb, sbuf.dma_addr);
1459 return rc;
1460}
1461
1462static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1463{
1464 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1465 u32 peek_flags, peek_cons;
1466 struct cq_base *hw_cqe;
1467 int i;
1468
1469 peek_flags = cq->dbinfo.flags;
1470 peek_cons = cq_hwq->cons;
1471 for (i = 0; i < cq_hwq->max_elements; i++) {
1472 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1473 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1474 continue;
1475 /*
1476 * The valid test of the entry must be done first before
1477 * reading any further.
1478 */
1479 dma_rmb();
1480 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1481 case CQ_BASE_CQE_TYPE_REQ:
1482 case CQ_BASE_CQE_TYPE_TERMINAL:
1483 {
1484 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1485
1486 if (qp == le64_to_cpu(cqe->qp_handle))
1487 cqe->qp_handle = 0;
1488 break;
1489 }
1490 case CQ_BASE_CQE_TYPE_RES_RC:
1491 case CQ_BASE_CQE_TYPE_RES_UD:
1492 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1493 {
1494 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1495
1496 if (qp == le64_to_cpu(cqe->qp_handle))
1497 cqe->qp_handle = 0;
1498 break;
1499 }
1500 default:
1501 break;
1502 }
1503 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1504 1, &peek_flags);
1505 }
1506}
1507
1508int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1509 struct bnxt_qplib_qp *qp)
1510{
1511 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1512 struct creq_destroy_qp_resp resp = {};
1513 struct bnxt_qplib_cmdqmsg msg = {};
1514 struct cmdq_destroy_qp req = {};
1515 u32 tbl_indx;
1516 int rc;
1517
1518 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1519 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1520 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1521
1522 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1523 CMDQ_BASE_OPCODE_DESTROY_QP,
1524 sizeof(req));
1525
1526 req.qp_cid = cpu_to_le32(qp->id);
1527 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1528 sizeof(resp), 0);
1529 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1530 if (rc) {
1531 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1532 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1533 return rc;
1534 }
1535
1536 return 0;
1537}
1538
1539void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1540 struct bnxt_qplib_qp *qp)
1541{
1542 bnxt_qplib_free_qp_hdr_buf(res, qp);
1543 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1544 kfree(qp->sq.swq);
1545
1546 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1547 kfree(qp->rq.swq);
1548
1549 if (qp->irrq.max_elements)
1550 bnxt_qplib_free_hwq(res, &qp->irrq);
1551 if (qp->orrq.max_elements)
1552 bnxt_qplib_free_hwq(res, &qp->orrq);
1553
1554}
1555
1556void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1557 struct bnxt_qplib_sge *sge)
1558{
1559 struct bnxt_qplib_q *sq = &qp->sq;
1560 u32 sw_prod;
1561
1562 memset(sge, 0, sizeof(*sge));
1563
1564 if (qp->sq_hdr_buf) {
1565 sw_prod = sq->swq_start;
1566 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1567 sw_prod * qp->sq_hdr_buf_size);
1568 sge->lkey = 0xFFFFFFFF;
1569 sge->size = qp->sq_hdr_buf_size;
1570 return qp->sq_hdr_buf + sw_prod * sge->size;
1571 }
1572 return NULL;
1573}
1574
1575u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1576{
1577 struct bnxt_qplib_q *rq = &qp->rq;
1578
1579 return rq->swq_start;
1580}
1581
1582dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1583{
1584 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1585}
1586
1587void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1588 struct bnxt_qplib_sge *sge)
1589{
1590 struct bnxt_qplib_q *rq = &qp->rq;
1591 u32 sw_prod;
1592
1593 memset(sge, 0, sizeof(*sge));
1594
1595 if (qp->rq_hdr_buf) {
1596 sw_prod = rq->swq_start;
1597 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1598 sw_prod * qp->rq_hdr_buf_size);
1599 sge->lkey = 0xFFFFFFFF;
1600 sge->size = qp->rq_hdr_buf_size;
1601 return qp->rq_hdr_buf + sw_prod * sge->size;
1602 }
1603 return NULL;
1604}
1605
1606/* Fil the MSN table into the next psn row */
1607static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1608 struct bnxt_qplib_swqe *wqe,
1609 struct bnxt_qplib_swq *swq)
1610{
1611 struct sq_msn_search *msns;
1612 u32 start_psn, next_psn;
1613 u16 start_idx;
1614
1615 msns = (struct sq_msn_search *)swq->psn_search;
1616 msns->start_idx_next_psn_start_psn = 0;
1617
1618 start_psn = swq->start_psn;
1619 next_psn = swq->next_psn;
1620 start_idx = swq->slot_idx;
1621 msns->start_idx_next_psn_start_psn |=
1622 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1623 qp->msn++;
1624 qp->msn %= qp->msn_tbl_sz;
1625}
1626
1627static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1628 struct bnxt_qplib_swqe *wqe,
1629 struct bnxt_qplib_swq *swq)
1630{
1631 struct sq_psn_search_ext *psns_ext;
1632 struct sq_psn_search *psns;
1633 u32 flg_npsn;
1634 u32 op_spsn;
1635
1636 if (!swq->psn_search)
1637 return;
1638 /* Handle MSN differently on cap flags */
1639 if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1640 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1641 return;
1642 }
1643 psns = (struct sq_psn_search *)swq->psn_search;
1644 psns = swq->psn_search;
1645 psns_ext = swq->psn_ext;
1646
1647 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1648 SQ_PSN_SEARCH_START_PSN_MASK);
1649 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1650 SQ_PSN_SEARCH_OPCODE_MASK);
1651 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1652 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1653
1654 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1655 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1656 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1657 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1658 } else {
1659 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1660 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1661 }
1662}
1663
1664static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1665 struct bnxt_qplib_swqe *wqe,
1666 u16 *idx)
1667{
1668 struct bnxt_qplib_hwq *hwq;
1669 int len, t_len, offt;
1670 bool pull_dst = true;
1671 void *il_dst = NULL;
1672 void *il_src = NULL;
1673 int t_cplen, cplen;
1674 int indx;
1675
1676 hwq = &qp->sq.hwq;
1677 t_len = 0;
1678 for (indx = 0; indx < wqe->num_sge; indx++) {
1679 len = wqe->sg_list[indx].size;
1680 il_src = (void *)wqe->sg_list[indx].addr;
1681 t_len += len;
1682 if (t_len > qp->max_inline_data)
1683 return -ENOMEM;
1684 while (len) {
1685 if (pull_dst) {
1686 pull_dst = false;
1687 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1688 (*idx)++;
1689 t_cplen = 0;
1690 offt = 0;
1691 }
1692 cplen = min_t(int, len, sizeof(struct sq_sge));
1693 cplen = min_t(int, cplen,
1694 (sizeof(struct sq_sge) - offt));
1695 memcpy(il_dst, il_src, cplen);
1696 t_cplen += cplen;
1697 il_src += cplen;
1698 il_dst += cplen;
1699 offt += cplen;
1700 len -= cplen;
1701 if (t_cplen == sizeof(struct sq_sge))
1702 pull_dst = true;
1703 }
1704 }
1705
1706 return t_len;
1707}
1708
1709static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1710 struct bnxt_qplib_sge *ssge,
1711 u16 nsge, u16 *idx)
1712{
1713 struct sq_sge *dsge;
1714 int indx, len = 0;
1715
1716 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1717 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1718 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1719 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1720 dsge->size = cpu_to_le32(ssge[indx].size);
1721 len += ssge[indx].size;
1722 }
1723
1724 return len;
1725}
1726
1727static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1728 struct bnxt_qplib_swqe *wqe,
1729 u16 *wqe_sz, u16 *qdf, u8 mode)
1730{
1731 u32 ilsize, bytes;
1732 u16 nsge;
1733 u16 slot;
1734
1735 nsge = wqe->num_sge;
1736 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1737 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1738 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1739 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1740 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1741 bytes += sizeof(struct sq_send_hdr);
1742 }
1743
1744 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1745 slot = bytes >> 4;
1746 *wqe_sz = slot;
1747 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1748 slot = 8;
1749 return slot;
1750}
1751
1752static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1753 struct bnxt_qplib_swq *swq, bool hw_retx)
1754{
1755 struct bnxt_qplib_hwq *hwq;
1756 u32 pg_num, pg_indx;
1757 void *buff;
1758 u32 tail;
1759
1760 hwq = &sq->hwq;
1761 if (!hwq->pad_pg)
1762 return;
1763 tail = swq->slot_idx / sq->dbinfo.max_slot;
1764 if (hw_retx) {
1765 /* For HW retx use qp msn index */
1766 tail = qp->msn;
1767 tail %= qp->msn_tbl_sz;
1768 }
1769 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1770 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1771 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1772 swq->psn_ext = buff;
1773 swq->psn_search = buff;
1774}
1775
1776void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1777{
1778 struct bnxt_qplib_q *sq = &qp->sq;
1779
1780 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1781}
1782
1783int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1784 struct bnxt_qplib_swqe *wqe)
1785{
1786 struct bnxt_qplib_nq_work *nq_work = NULL;
1787 int i, rc = 0, data_len = 0, pkt_num = 0;
1788 struct bnxt_qplib_q *sq = &qp->sq;
1789 struct bnxt_qplib_hwq *hwq;
1790 struct bnxt_qplib_swq *swq;
1791 bool sch_handler = false;
1792 u16 wqe_sz, qdf = 0;
1793 bool msn_update;
1794 void *base_hdr;
1795 void *ext_hdr;
1796 __le32 temp32;
1797 u32 wqe_idx;
1798 u32 slots;
1799 u16 idx;
1800
1801 hwq = &sq->hwq;
1802 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1803 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1804 dev_err(&hwq->pdev->dev,
1805 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1806 qp->id, qp->state);
1807 rc = -EINVAL;
1808 goto done;
1809 }
1810
1811 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1812 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1813 dev_err(&hwq->pdev->dev,
1814 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1815 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1816 rc = -ENOMEM;
1817 goto done;
1818 }
1819
1820 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1821 bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1822
1823 idx = 0;
1824 swq->slot_idx = hwq->prod;
1825 swq->slots = slots;
1826 swq->wr_id = wqe->wr_id;
1827 swq->type = wqe->type;
1828 swq->flags = wqe->flags;
1829 swq->start_psn = sq->psn & BTH_PSN_MASK;
1830 if (qp->sig_type)
1831 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1832
1833 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834 sch_handler = true;
1835 dev_dbg(&hwq->pdev->dev,
1836 "%s Error QP. Scheduling for poll_cq\n", __func__);
1837 goto queue_err;
1838 }
1839
1840 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842 memset(base_hdr, 0, sizeof(struct sq_sge));
1843 memset(ext_hdr, 0, sizeof(struct sq_sge));
1844
1845 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1846 /* Copy the inline data */
1847 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1848 else
1849 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1850 &idx);
1851 if (data_len < 0)
1852 goto queue_err;
1853 /* Make sure we update MSN table only for wired wqes */
1854 msn_update = true;
1855 /* Specifics */
1856 switch (wqe->type) {
1857 case BNXT_QPLIB_SWQE_TYPE_SEND:
1858 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1859 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1860 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1861 /* Assemble info for Raw Ethertype QPs */
1862
1863 sqe->wqe_type = wqe->type;
1864 sqe->flags = wqe->flags;
1865 sqe->wqe_size = wqe_sz;
1866 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1867 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1868 sqe->length = cpu_to_le32(data_len);
1869 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1870 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1871 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1872
1873 break;
1874 }
1875 fallthrough;
1876 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1877 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1878 {
1879 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1880 struct sq_send_hdr *sqe = base_hdr;
1881
1882 sqe->wqe_type = wqe->type;
1883 sqe->flags = wqe->flags;
1884 sqe->wqe_size = wqe_sz;
1885 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1886 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1887 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1888 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1889 sqe->length = cpu_to_le32(data_len);
1890 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1891 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1892 SQ_SEND_DST_QP_MASK);
1893 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1894 SQ_SEND_AVID_MASK);
1895 msn_update = false;
1896 } else {
1897 sqe->length = cpu_to_le32(data_len);
1898 if (qp->mtu)
1899 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1900 if (!pkt_num)
1901 pkt_num = 1;
1902 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1903 }
1904 break;
1905 }
1906 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1907 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1908 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1909 {
1910 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1911 struct sq_rdma_hdr *sqe = base_hdr;
1912
1913 sqe->wqe_type = wqe->type;
1914 sqe->flags = wqe->flags;
1915 sqe->wqe_size = wqe_sz;
1916 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1917 sqe->length = cpu_to_le32((u32)data_len);
1918 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1919 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1920 if (qp->mtu)
1921 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1922 if (!pkt_num)
1923 pkt_num = 1;
1924 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1925 break;
1926 }
1927 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1928 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1929 {
1930 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1931 struct sq_atomic_hdr *sqe = base_hdr;
1932
1933 sqe->wqe_type = wqe->type;
1934 sqe->flags = wqe->flags;
1935 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1936 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1937 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1938 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1939 if (qp->mtu)
1940 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1941 if (!pkt_num)
1942 pkt_num = 1;
1943 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1944 break;
1945 }
1946 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1947 {
1948 struct sq_localinvalidate *sqe = base_hdr;
1949
1950 sqe->wqe_type = wqe->type;
1951 sqe->flags = wqe->flags;
1952 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1953 msn_update = false;
1954 break;
1955 }
1956 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1957 {
1958 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1959 struct sq_fr_pmr_hdr *sqe = base_hdr;
1960
1961 sqe->wqe_type = wqe->type;
1962 sqe->flags = wqe->flags;
1963 sqe->access_cntl = wqe->frmr.access_cntl |
1964 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1965 sqe->zero_based_page_size_log =
1966 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1967 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1968 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1969 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1970 temp32 = cpu_to_le32(wqe->frmr.length);
1971 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1972 sqe->numlevels_pbl_page_size_log =
1973 ((wqe->frmr.pbl_pg_sz_log <<
1974 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1975 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1976 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1977 SQ_FR_PMR_NUMLEVELS_MASK);
1978
1979 for (i = 0; i < wqe->frmr.page_list_len; i++)
1980 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1981 wqe->frmr.page_list[i] |
1982 PTU_PTE_VALID);
1983 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1984 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1985 msn_update = false;
1986
1987 break;
1988 }
1989 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1990 {
1991 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1992 struct sq_bind_hdr *sqe = base_hdr;
1993
1994 sqe->wqe_type = wqe->type;
1995 sqe->flags = wqe->flags;
1996 sqe->access_cntl = wqe->bind.access_cntl;
1997 sqe->mw_type_zero_based = wqe->bind.mw_type |
1998 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1999 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2000 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2001 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2002 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2003 msn_update = false;
2004 break;
2005 }
2006 default:
2007 /* Bad wqe, return error */
2008 rc = -EINVAL;
2009 goto done;
2010 }
2011 if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2012 swq->next_psn = sq->psn & BTH_PSN_MASK;
2013 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2014 }
2015queue_err:
2016 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2017 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2018 qp->wqe_cnt++;
2019done:
2020 if (sch_handler) {
2021 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2022 if (nq_work) {
2023 nq_work->cq = qp->scq;
2024 nq_work->nq = qp->scq->nq;
2025 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2026 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2027 } else {
2028 dev_err(&hwq->pdev->dev,
2029 "FP: Failed to allocate SQ nq_work!\n");
2030 rc = -ENOMEM;
2031 }
2032 }
2033 return rc;
2034}
2035
2036void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2037{
2038 struct bnxt_qplib_q *rq = &qp->rq;
2039
2040 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2041}
2042
2043int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2044 struct bnxt_qplib_swqe *wqe)
2045{
2046 struct bnxt_qplib_nq_work *nq_work = NULL;
2047 struct bnxt_qplib_q *rq = &qp->rq;
2048 struct rq_wqe_hdr *base_hdr;
2049 struct rq_ext_hdr *ext_hdr;
2050 struct bnxt_qplib_hwq *hwq;
2051 struct bnxt_qplib_swq *swq;
2052 bool sch_handler = false;
2053 u16 wqe_sz, idx;
2054 u32 wqe_idx;
2055 int rc = 0;
2056
2057 hwq = &rq->hwq;
2058 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2059 dev_err(&hwq->pdev->dev,
2060 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2061 qp->id, qp->state);
2062 rc = -EINVAL;
2063 goto done;
2064 }
2065
2066 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2067 dev_err(&hwq->pdev->dev,
2068 "FP: QP (0x%x) RQ is full!\n", qp->id);
2069 rc = -EINVAL;
2070 goto done;
2071 }
2072
2073 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2074 swq->wr_id = wqe->wr_id;
2075 swq->slots = rq->dbinfo.max_slot;
2076
2077 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2078 sch_handler = true;
2079 dev_dbg(&hwq->pdev->dev,
2080 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2081 goto queue_err;
2082 }
2083
2084 idx = 0;
2085 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087 memset(base_hdr, 0, sizeof(struct sq_sge));
2088 memset(ext_hdr, 0, sizeof(struct sq_sge));
2089 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2090 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2091 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2092 if (!wqe->num_sge) {
2093 struct sq_sge *sge;
2094
2095 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2096 sge->size = 0;
2097 wqe_sz++;
2098 }
2099 base_hdr->wqe_type = wqe->type;
2100 base_hdr->flags = wqe->flags;
2101 base_hdr->wqe_size = wqe_sz;
2102 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2103queue_err:
2104 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2105 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2106done:
2107 if (sch_handler) {
2108 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2109 if (nq_work) {
2110 nq_work->cq = qp->rcq;
2111 nq_work->nq = qp->rcq->nq;
2112 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2113 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2114 } else {
2115 dev_err(&hwq->pdev->dev,
2116 "FP: Failed to allocate RQ nq_work!\n");
2117 rc = -ENOMEM;
2118 }
2119 }
2120
2121 return rc;
2122}
2123
2124/* CQ */
2125int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2126{
2127 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2128 struct bnxt_qplib_hwq_attr hwq_attr = {};
2129 struct creq_create_cq_resp resp = {};
2130 struct bnxt_qplib_cmdqmsg msg = {};
2131 struct cmdq_create_cq req = {};
2132 struct bnxt_qplib_pbl *pbl;
2133 u32 pg_sz_lvl;
2134 int rc;
2135
2136 if (!cq->dpi) {
2137 dev_err(&rcfw->pdev->dev,
2138 "FP: CREATE_CQ failed due to NULL DPI\n");
2139 return -EINVAL;
2140 }
2141
2142 cq->dbinfo.flags = 0;
2143 hwq_attr.res = res;
2144 hwq_attr.depth = cq->max_wqe;
2145 hwq_attr.stride = sizeof(struct cq_base);
2146 hwq_attr.type = HWQ_TYPE_QUEUE;
2147 hwq_attr.sginfo = &cq->sg_info;
2148 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2149 if (rc)
2150 return rc;
2151
2152 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2153 CMDQ_BASE_OPCODE_CREATE_CQ,
2154 sizeof(req));
2155
2156 req.dpi = cpu_to_le32(cq->dpi->dpi);
2157 req.cq_handle = cpu_to_le64(cq->cq_handle);
2158 req.cq_size = cpu_to_le32(cq->max_wqe);
2159 pbl = &cq->hwq.pbl[PBL_LVL_0];
2160 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2161 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2162 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2163 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2164 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2165 req.cq_fco_cnq_id = cpu_to_le32(
2166 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2167 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2168 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2169 sizeof(resp), 0);
2170 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2171 if (rc)
2172 goto fail;
2173
2174 cq->id = le32_to_cpu(resp.xid);
2175 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2176 init_waitqueue_head(&cq->waitq);
2177 INIT_LIST_HEAD(&cq->sqf_head);
2178 INIT_LIST_HEAD(&cq->rqf_head);
2179 spin_lock_init(&cq->compl_lock);
2180 spin_lock_init(&cq->flush_lock);
2181
2182 cq->dbinfo.hwq = &cq->hwq;
2183 cq->dbinfo.xid = cq->id;
2184 cq->dbinfo.db = cq->dpi->dbr;
2185 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2186 cq->dbinfo.flags = 0;
2187 cq->dbinfo.toggle = 0;
2188
2189 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2190
2191 return 0;
2192
2193fail:
2194 bnxt_qplib_free_hwq(res, &cq->hwq);
2195 return rc;
2196}
2197
2198void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2199 struct bnxt_qplib_cq *cq)
2200{
2201 bnxt_qplib_free_hwq(res, &cq->hwq);
2202 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2203 /* Reset only the cons bit in the flags */
2204 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2205}
2206
2207int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2208 int new_cqes)
2209{
2210 struct bnxt_qplib_hwq_attr hwq_attr = {};
2211 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2212 struct creq_resize_cq_resp resp = {};
2213 struct bnxt_qplib_cmdqmsg msg = {};
2214 struct cmdq_resize_cq req = {};
2215 struct bnxt_qplib_pbl *pbl;
2216 u32 pg_sz, lvl, new_sz;
2217 int rc;
2218
2219 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2220 CMDQ_BASE_OPCODE_RESIZE_CQ,
2221 sizeof(req));
2222 hwq_attr.sginfo = &cq->sg_info;
2223 hwq_attr.res = res;
2224 hwq_attr.depth = new_cqes;
2225 hwq_attr.stride = sizeof(struct cq_base);
2226 hwq_attr.type = HWQ_TYPE_QUEUE;
2227 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2228 if (rc)
2229 return rc;
2230
2231 req.cq_cid = cpu_to_le32(cq->id);
2232 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2233 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2234 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2235 CMDQ_RESIZE_CQ_LVL_MASK;
2236 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2237 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2238 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2239 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2240
2241 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2242 sizeof(resp), 0);
2243 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2244 return rc;
2245}
2246
2247int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2248{
2249 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2250 struct creq_destroy_cq_resp resp = {};
2251 struct bnxt_qplib_cmdqmsg msg = {};
2252 struct cmdq_destroy_cq req = {};
2253 u16 total_cnq_events;
2254 int rc;
2255
2256 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2257 CMDQ_BASE_OPCODE_DESTROY_CQ,
2258 sizeof(req));
2259
2260 req.cq_cid = cpu_to_le32(cq->id);
2261 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2262 sizeof(resp), 0);
2263 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2264 if (rc)
2265 return rc;
2266 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2267 __wait_for_all_nqes(cq, total_cnq_events);
2268 bnxt_qplib_free_hwq(res, &cq->hwq);
2269 return 0;
2270}
2271
2272static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2273 struct bnxt_qplib_cqe **pcqe, int *budget)
2274{
2275 struct bnxt_qplib_cqe *cqe;
2276 u32 start, last;
2277 int rc = 0;
2278
2279 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2280 start = sq->swq_start;
2281 cqe = *pcqe;
2282 while (*budget) {
2283 last = sq->swq_last;
2284 if (start == last)
2285 break;
2286 /* Skip the FENCE WQE completions */
2287 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2288 bnxt_qplib_cancel_phantom_processing(qp);
2289 goto skip_compl;
2290 }
2291 memset(cqe, 0, sizeof(*cqe));
2292 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2293 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2294 cqe->qp_handle = (u64)(unsigned long)qp;
2295 cqe->wr_id = sq->swq[last].wr_id;
2296 cqe->src_qp = qp->id;
2297 cqe->type = sq->swq[last].type;
2298 cqe++;
2299 (*budget)--;
2300skip_compl:
2301 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2302 sq->swq[last].slots, &sq->dbinfo.flags);
2303 sq->swq_last = sq->swq[last].next_idx;
2304 }
2305 *pcqe = cqe;
2306 if (!(*budget) && sq->swq_last != start)
2307 /* Out of budget */
2308 rc = -EAGAIN;
2309
2310 return rc;
2311}
2312
2313static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2314 struct bnxt_qplib_cqe **pcqe, int *budget)
2315{
2316 struct bnxt_qplib_cqe *cqe;
2317 u32 start, last;
2318 int opcode = 0;
2319 int rc = 0;
2320
2321 switch (qp->type) {
2322 case CMDQ_CREATE_QP1_TYPE_GSI:
2323 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2324 break;
2325 case CMDQ_CREATE_QP_TYPE_RC:
2326 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2327 break;
2328 case CMDQ_CREATE_QP_TYPE_UD:
2329 case CMDQ_CREATE_QP_TYPE_GSI:
2330 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2331 break;
2332 }
2333
2334 /* Flush the rest of the RQ */
2335 start = rq->swq_start;
2336 cqe = *pcqe;
2337 while (*budget) {
2338 last = rq->swq_last;
2339 if (last == start)
2340 break;
2341 memset(cqe, 0, sizeof(*cqe));
2342 cqe->status =
2343 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2344 cqe->opcode = opcode;
2345 cqe->qp_handle = (unsigned long)qp;
2346 cqe->wr_id = rq->swq[last].wr_id;
2347 cqe++;
2348 (*budget)--;
2349 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2350 rq->swq[last].slots, &rq->dbinfo.flags);
2351 rq->swq_last = rq->swq[last].next_idx;
2352 }
2353 *pcqe = cqe;
2354 if (!*budget && rq->swq_last != start)
2355 /* Out of budget */
2356 rc = -EAGAIN;
2357
2358 return rc;
2359}
2360
2361void bnxt_qplib_mark_qp_error(void *qp_handle)
2362{
2363 struct bnxt_qplib_qp *qp = qp_handle;
2364
2365 if (!qp)
2366 return;
2367
2368 /* Must block new posting of SQ and RQ */
2369 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2370 bnxt_qplib_cancel_phantom_processing(qp);
2371}
2372
2373/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2374 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2375 */
2376static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2377 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2378{
2379 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2380 struct bnxt_qplib_q *sq = &qp->sq;
2381 struct cq_req *peek_req_hwcqe;
2382 struct bnxt_qplib_qp *peek_qp;
2383 struct bnxt_qplib_q *peek_sq;
2384 struct bnxt_qplib_swq *swq;
2385 struct cq_base *peek_hwcqe;
2386 int i, rc = 0;
2387
2388 /* Normal mode */
2389 /* Check for the psn_search marking before completing */
2390 swq = &sq->swq[swq_last];
2391 if (swq->psn_search &&
2392 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2393 /* Unmark */
2394 swq->psn_search->flags_next_psn = cpu_to_le32
2395 (le32_to_cpu(swq->psn_search->flags_next_psn)
2396 & ~0x80000000);
2397 dev_dbg(&cq->hwq.pdev->dev,
2398 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2399 cq_cons, qp->id, swq_last, cqe_sq_cons);
2400 sq->condition = true;
2401 sq->send_phantom = true;
2402
2403 /* TODO: Only ARM if the previous SQE is ARMALL */
2404 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2405 rc = -EAGAIN;
2406 goto out;
2407 }
2408 if (sq->condition) {
2409 /* Peek at the completions */
2410 peek_flags = cq->dbinfo.flags;
2411 peek_sw_cq_cons = cq_cons;
2412 i = cq->hwq.max_elements;
2413 while (i--) {
2414 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2415 peek_sw_cq_cons, NULL);
2416 /* If the next hwcqe is VALID */
2417 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2418 /*
2419 * The valid test of the entry must be done first before
2420 * reading any further.
2421 */
2422 dma_rmb();
2423 /* If the next hwcqe is a REQ */
2424 if ((peek_hwcqe->cqe_type_toggle &
2425 CQ_BASE_CQE_TYPE_MASK) ==
2426 CQ_BASE_CQE_TYPE_REQ) {
2427 peek_req_hwcqe = (struct cq_req *)
2428 peek_hwcqe;
2429 peek_qp = (struct bnxt_qplib_qp *)
2430 ((unsigned long)
2431 le64_to_cpu
2432 (peek_req_hwcqe->qp_handle));
2433 peek_sq = &peek_qp->sq;
2434 peek_sq_cons_idx =
2435 ((le16_to_cpu(
2436 peek_req_hwcqe->sq_cons_idx)
2437 - 1) % sq->max_wqe);
2438 /* If the hwcqe's sq's wr_id matches */
2439 if (peek_sq == sq &&
2440 sq->swq[peek_sq_cons_idx].wr_id ==
2441 BNXT_QPLIB_FENCE_WRID) {
2442 /*
2443 * Unbreak only if the phantom
2444 * comes back
2445 */
2446 dev_dbg(&cq->hwq.pdev->dev,
2447 "FP: Got Phantom CQE\n");
2448 sq->condition = false;
2449 sq->single = true;
2450 rc = 0;
2451 goto out;
2452 }
2453 }
2454 /* Valid but not the phantom, so keep looping */
2455 } else {
2456 /* Not valid yet, just exit and wait */
2457 rc = -EINVAL;
2458 goto out;
2459 }
2460 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2461 &peek_sw_cq_cons,
2462 1, &peek_flags);
2463 }
2464 dev_err(&cq->hwq.pdev->dev,
2465 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2466 cq_cons, qp->id, swq_last, cqe_sq_cons);
2467 rc = -EINVAL;
2468 }
2469out:
2470 return rc;
2471}
2472
2473static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2474 struct cq_req *hwcqe,
2475 struct bnxt_qplib_cqe **pcqe, int *budget,
2476 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2477{
2478 struct bnxt_qplib_swq *swq;
2479 struct bnxt_qplib_cqe *cqe;
2480 struct bnxt_qplib_qp *qp;
2481 struct bnxt_qplib_q *sq;
2482 u32 cqe_sq_cons;
2483 int rc = 0;
2484
2485 qp = (struct bnxt_qplib_qp *)((unsigned long)
2486 le64_to_cpu(hwcqe->qp_handle));
2487 if (!qp) {
2488 dev_err(&cq->hwq.pdev->dev,
2489 "FP: Process Req qp is NULL\n");
2490 return -EINVAL;
2491 }
2492 sq = &qp->sq;
2493
2494 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
2495 if (qp->sq.flushed) {
2496 dev_dbg(&cq->hwq.pdev->dev,
2497 "%s: QP in Flush QP = %p\n", __func__, qp);
2498 goto done;
2499 }
2500 /* Require to walk the sq's swq to fabricate CQEs for all previously
2501 * signaled SWQEs due to CQE aggregation from the current sq cons
2502 * to the cqe_sq_cons
2503 */
2504 cqe = *pcqe;
2505 while (*budget) {
2506 if (sq->swq_last == cqe_sq_cons)
2507 /* Done */
2508 break;
2509
2510 swq = &sq->swq[sq->swq_last];
2511 memset(cqe, 0, sizeof(*cqe));
2512 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2513 cqe->qp_handle = (u64)(unsigned long)qp;
2514 cqe->src_qp = qp->id;
2515 cqe->wr_id = swq->wr_id;
2516 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2517 goto skip;
2518 cqe->type = swq->type;
2519
2520 /* For the last CQE, check for status. For errors, regardless
2521 * of the request being signaled or not, it must complete with
2522 * the hwcqe error status
2523 */
2524 if (swq->next_idx == cqe_sq_cons &&
2525 hwcqe->status != CQ_REQ_STATUS_OK) {
2526 cqe->status = hwcqe->status;
2527 dev_err(&cq->hwq.pdev->dev,
2528 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2529 sq->swq_last, cqe->wr_id, cqe->status);
2530 cqe++;
2531 (*budget)--;
2532 bnxt_qplib_mark_qp_error(qp);
2533 /* Add qp to flush list of the CQ */
2534 bnxt_qplib_add_flush_qp(qp);
2535 } else {
2536 /* Before we complete, do WA 9060 */
2537 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2538 cqe_sq_cons)) {
2539 *lib_qp = qp;
2540 goto out;
2541 }
2542 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2543 cqe->status = CQ_REQ_STATUS_OK;
2544 cqe++;
2545 (*budget)--;
2546 }
2547 }
2548skip:
2549 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2550 swq->slots, &sq->dbinfo.flags);
2551 sq->swq_last = swq->next_idx;
2552 if (sq->single)
2553 break;
2554 }
2555out:
2556 *pcqe = cqe;
2557 if (sq->swq_last != cqe_sq_cons) {
2558 /* Out of budget */
2559 rc = -EAGAIN;
2560 goto done;
2561 }
2562 /*
2563 * Back to normal completion mode only after it has completed all of
2564 * the WC for this CQE
2565 */
2566 sq->single = false;
2567done:
2568 return rc;
2569}
2570
2571static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2572{
2573 spin_lock(&srq->hwq.lock);
2574 srq->swq[srq->last_idx].next_idx = (int)tag;
2575 srq->last_idx = (int)tag;
2576 srq->swq[srq->last_idx].next_idx = -1;
2577 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2578 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2579 spin_unlock(&srq->hwq.lock);
2580}
2581
2582static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2583 struct cq_res_rc *hwcqe,
2584 struct bnxt_qplib_cqe **pcqe,
2585 int *budget)
2586{
2587 struct bnxt_qplib_srq *srq;
2588 struct bnxt_qplib_cqe *cqe;
2589 struct bnxt_qplib_qp *qp;
2590 struct bnxt_qplib_q *rq;
2591 u32 wr_id_idx;
2592
2593 qp = (struct bnxt_qplib_qp *)((unsigned long)
2594 le64_to_cpu(hwcqe->qp_handle));
2595 if (!qp) {
2596 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2597 return -EINVAL;
2598 }
2599 if (qp->rq.flushed) {
2600 dev_dbg(&cq->hwq.pdev->dev,
2601 "%s: QP in Flush QP = %p\n", __func__, qp);
2602 return 0;
2603 }
2604
2605 cqe = *pcqe;
2606 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2607 cqe->length = le32_to_cpu(hwcqe->length);
2608 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2609 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2610 cqe->flags = le16_to_cpu(hwcqe->flags);
2611 cqe->status = hwcqe->status;
2612 cqe->qp_handle = (u64)(unsigned long)qp;
2613
2614 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2615 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2616 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2617 srq = qp->srq;
2618 if (!srq)
2619 return -EINVAL;
2620 if (wr_id_idx >= srq->hwq.max_elements) {
2621 dev_err(&cq->hwq.pdev->dev,
2622 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2623 wr_id_idx, srq->hwq.max_elements);
2624 return -EINVAL;
2625 }
2626 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2627 bnxt_qplib_release_srqe(srq, wr_id_idx);
2628 cqe++;
2629 (*budget)--;
2630 *pcqe = cqe;
2631 } else {
2632 struct bnxt_qplib_swq *swq;
2633
2634 rq = &qp->rq;
2635 if (wr_id_idx > (rq->max_wqe - 1)) {
2636 dev_err(&cq->hwq.pdev->dev,
2637 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2638 wr_id_idx, rq->max_wqe);
2639 return -EINVAL;
2640 }
2641 if (wr_id_idx != rq->swq_last)
2642 return -EINVAL;
2643 swq = &rq->swq[rq->swq_last];
2644 cqe->wr_id = swq->wr_id;
2645 cqe++;
2646 (*budget)--;
2647 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2648 swq->slots, &rq->dbinfo.flags);
2649 rq->swq_last = swq->next_idx;
2650 *pcqe = cqe;
2651
2652 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2653 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2654 /* Add qp to flush list of the CQ */
2655 bnxt_qplib_add_flush_qp(qp);
2656 }
2657 }
2658
2659 return 0;
2660}
2661
2662static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2663 struct cq_res_ud *hwcqe,
2664 struct bnxt_qplib_cqe **pcqe,
2665 int *budget)
2666{
2667 struct bnxt_qplib_srq *srq;
2668 struct bnxt_qplib_cqe *cqe;
2669 struct bnxt_qplib_qp *qp;
2670 struct bnxt_qplib_q *rq;
2671 u32 wr_id_idx;
2672
2673 qp = (struct bnxt_qplib_qp *)((unsigned long)
2674 le64_to_cpu(hwcqe->qp_handle));
2675 if (!qp) {
2676 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2677 return -EINVAL;
2678 }
2679 if (qp->rq.flushed) {
2680 dev_dbg(&cq->hwq.pdev->dev,
2681 "%s: QP in Flush QP = %p\n", __func__, qp);
2682 return 0;
2683 }
2684 cqe = *pcqe;
2685 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2686 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2687 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2688 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2689 cqe->flags = le16_to_cpu(hwcqe->flags);
2690 cqe->status = hwcqe->status;
2691 cqe->qp_handle = (u64)(unsigned long)qp;
2692 /*FIXME: Endianness fix needed for smace */
2693 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2694 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2695 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2696 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2697 ((le32_to_cpu(
2698 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2699 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2700
2701 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2702 srq = qp->srq;
2703 if (!srq)
2704 return -EINVAL;
2705
2706 if (wr_id_idx >= srq->hwq.max_elements) {
2707 dev_err(&cq->hwq.pdev->dev,
2708 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2709 wr_id_idx, srq->hwq.max_elements);
2710 return -EINVAL;
2711 }
2712 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2713 bnxt_qplib_release_srqe(srq, wr_id_idx);
2714 cqe++;
2715 (*budget)--;
2716 *pcqe = cqe;
2717 } else {
2718 struct bnxt_qplib_swq *swq;
2719
2720 rq = &qp->rq;
2721 if (wr_id_idx > (rq->max_wqe - 1)) {
2722 dev_err(&cq->hwq.pdev->dev,
2723 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2724 wr_id_idx, rq->max_wqe);
2725 return -EINVAL;
2726 }
2727
2728 if (rq->swq_last != wr_id_idx)
2729 return -EINVAL;
2730 swq = &rq->swq[rq->swq_last];
2731 cqe->wr_id = swq->wr_id;
2732 cqe++;
2733 (*budget)--;
2734 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2735 swq->slots, &rq->dbinfo.flags);
2736 rq->swq_last = swq->next_idx;
2737 *pcqe = cqe;
2738
2739 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2740 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2741 /* Add qp to flush list of the CQ */
2742 bnxt_qplib_add_flush_qp(qp);
2743 }
2744 }
2745
2746 return 0;
2747}
2748
2749bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2750{
2751 struct cq_base *hw_cqe;
2752 bool rc = true;
2753
2754 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2755 /* Check for Valid bit. If the CQE is valid, return false */
2756 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2757 return rc;
2758}
2759
2760static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2761 struct cq_res_raweth_qp1 *hwcqe,
2762 struct bnxt_qplib_cqe **pcqe,
2763 int *budget)
2764{
2765 struct bnxt_qplib_qp *qp;
2766 struct bnxt_qplib_q *rq;
2767 struct bnxt_qplib_srq *srq;
2768 struct bnxt_qplib_cqe *cqe;
2769 u32 wr_id_idx;
2770
2771 qp = (struct bnxt_qplib_qp *)((unsigned long)
2772 le64_to_cpu(hwcqe->qp_handle));
2773 if (!qp) {
2774 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2775 return -EINVAL;
2776 }
2777 if (qp->rq.flushed) {
2778 dev_dbg(&cq->hwq.pdev->dev,
2779 "%s: QP in Flush QP = %p\n", __func__, qp);
2780 return 0;
2781 }
2782 cqe = *pcqe;
2783 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2784 cqe->flags = le16_to_cpu(hwcqe->flags);
2785 cqe->qp_handle = (u64)(unsigned long)qp;
2786
2787 wr_id_idx =
2788 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2789 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2790 cqe->src_qp = qp->id;
2791 if (qp->id == 1 && !cqe->length) {
2792 /* Add workaround for the length misdetection */
2793 cqe->length = 296;
2794 } else {
2795 cqe->length = le16_to_cpu(hwcqe->length);
2796 }
2797 cqe->pkey_index = qp->pkey_index;
2798 memcpy(cqe->smac, qp->smac, 6);
2799
2800 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2801 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2802 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2803
2804 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2805 srq = qp->srq;
2806 if (!srq) {
2807 dev_err(&cq->hwq.pdev->dev,
2808 "FP: SRQ used but not defined??\n");
2809 return -EINVAL;
2810 }
2811 if (wr_id_idx >= srq->hwq.max_elements) {
2812 dev_err(&cq->hwq.pdev->dev,
2813 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2814 wr_id_idx, srq->hwq.max_elements);
2815 return -EINVAL;
2816 }
2817 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2818 bnxt_qplib_release_srqe(srq, wr_id_idx);
2819 cqe++;
2820 (*budget)--;
2821 *pcqe = cqe;
2822 } else {
2823 struct bnxt_qplib_swq *swq;
2824
2825 rq = &qp->rq;
2826 if (wr_id_idx > (rq->max_wqe - 1)) {
2827 dev_err(&cq->hwq.pdev->dev,
2828 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2829 wr_id_idx, rq->max_wqe);
2830 return -EINVAL;
2831 }
2832 if (rq->swq_last != wr_id_idx)
2833 return -EINVAL;
2834 swq = &rq->swq[rq->swq_last];
2835 cqe->wr_id = swq->wr_id;
2836 cqe++;
2837 (*budget)--;
2838 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2839 swq->slots, &rq->dbinfo.flags);
2840 rq->swq_last = swq->next_idx;
2841 *pcqe = cqe;
2842
2843 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2844 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2845 /* Add qp to flush list of the CQ */
2846 bnxt_qplib_add_flush_qp(qp);
2847 }
2848 }
2849
2850 return 0;
2851}
2852
2853static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2854 struct cq_terminal *hwcqe,
2855 struct bnxt_qplib_cqe **pcqe,
2856 int *budget)
2857{
2858 struct bnxt_qplib_qp *qp;
2859 struct bnxt_qplib_q *sq, *rq;
2860 struct bnxt_qplib_cqe *cqe;
2861 u32 swq_last = 0, cqe_cons;
2862 int rc = 0;
2863
2864 /* Check the Status */
2865 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2866 dev_warn(&cq->hwq.pdev->dev,
2867 "FP: CQ Process Terminal Error status = 0x%x\n",
2868 hwcqe->status);
2869
2870 qp = (struct bnxt_qplib_qp *)((unsigned long)
2871 le64_to_cpu(hwcqe->qp_handle));
2872 if (!qp)
2873 return -EINVAL;
2874
2875 /* Must block new posting of SQ and RQ */
2876 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2877
2878 sq = &qp->sq;
2879 rq = &qp->rq;
2880
2881 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2882 if (cqe_cons == 0xFFFF)
2883 goto do_rq;
2884 cqe_cons %= sq->max_wqe;
2885
2886 if (qp->sq.flushed) {
2887 dev_dbg(&cq->hwq.pdev->dev,
2888 "%s: QP in Flush QP = %p\n", __func__, qp);
2889 goto sq_done;
2890 }
2891
2892 /* Terminal CQE can also include aggregated successful CQEs prior.
2893 * So we must complete all CQEs from the current sq's cons to the
2894 * cq_cons with status OK
2895 */
2896 cqe = *pcqe;
2897 while (*budget) {
2898 swq_last = sq->swq_last;
2899 if (swq_last == cqe_cons)
2900 break;
2901 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2902 memset(cqe, 0, sizeof(*cqe));
2903 cqe->status = CQ_REQ_STATUS_OK;
2904 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2905 cqe->qp_handle = (u64)(unsigned long)qp;
2906 cqe->src_qp = qp->id;
2907 cqe->wr_id = sq->swq[swq_last].wr_id;
2908 cqe->type = sq->swq[swq_last].type;
2909 cqe++;
2910 (*budget)--;
2911 }
2912 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2913 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2914 sq->swq_last = sq->swq[swq_last].next_idx;
2915 }
2916 *pcqe = cqe;
2917 if (!(*budget) && swq_last != cqe_cons) {
2918 /* Out of budget */
2919 rc = -EAGAIN;
2920 goto sq_done;
2921 }
2922sq_done:
2923 if (rc)
2924 return rc;
2925do_rq:
2926 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2927 if (cqe_cons == 0xFFFF) {
2928 goto done;
2929 } else if (cqe_cons > rq->max_wqe - 1) {
2930 dev_err(&cq->hwq.pdev->dev,
2931 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2932 cqe_cons, rq->max_wqe);
2933 rc = -EINVAL;
2934 goto done;
2935 }
2936
2937 if (qp->rq.flushed) {
2938 dev_dbg(&cq->hwq.pdev->dev,
2939 "%s: QP in Flush QP = %p\n", __func__, qp);
2940 rc = 0;
2941 goto done;
2942 }
2943
2944 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2945 * from the current rq->cons to the rq->prod regardless what the
2946 * rq->cons the terminal CQE indicates
2947 */
2948
2949 /* Add qp to flush list of the CQ */
2950 bnxt_qplib_add_flush_qp(qp);
2951done:
2952 return rc;
2953}
2954
2955static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2956 struct cq_cutoff *hwcqe)
2957{
2958 /* Check the Status */
2959 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2960 dev_err(&cq->hwq.pdev->dev,
2961 "FP: CQ Process Cutoff Error status = 0x%x\n",
2962 hwcqe->status);
2963 return -EINVAL;
2964 }
2965 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2966 wake_up_interruptible(&cq->waitq);
2967
2968 return 0;
2969}
2970
2971int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2972 struct bnxt_qplib_cqe *cqe,
2973 int num_cqes)
2974{
2975 struct bnxt_qplib_qp *qp = NULL;
2976 u32 budget = num_cqes;
2977 unsigned long flags;
2978
2979 spin_lock_irqsave(&cq->flush_lock, flags);
2980 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2981 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2982 __flush_sq(&qp->sq, qp, &cqe, &budget);
2983 }
2984
2985 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2986 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2987 __flush_rq(&qp->rq, qp, &cqe, &budget);
2988 }
2989 spin_unlock_irqrestore(&cq->flush_lock, flags);
2990
2991 return num_cqes - budget;
2992}
2993
2994int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2995 int num_cqes, struct bnxt_qplib_qp **lib_qp)
2996{
2997 struct cq_base *hw_cqe;
2998 int budget, rc = 0;
2999 u32 hw_polled = 0;
3000 u8 type;
3001
3002 budget = num_cqes;
3003
3004 while (budget) {
3005 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3006
3007 /* Check for Valid bit */
3008 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3009 break;
3010
3011 /*
3012 * The valid test of the entry must be done first before
3013 * reading any further.
3014 */
3015 dma_rmb();
3016 /* From the device's respective CQE format to qplib_wc*/
3017 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3018 switch (type) {
3019 case CQ_BASE_CQE_TYPE_REQ:
3020 rc = bnxt_qplib_cq_process_req(cq,
3021 (struct cq_req *)hw_cqe,
3022 &cqe, &budget,
3023 cq->hwq.cons, lib_qp);
3024 break;
3025 case CQ_BASE_CQE_TYPE_RES_RC:
3026 rc = bnxt_qplib_cq_process_res_rc(cq,
3027 (struct cq_res_rc *)
3028 hw_cqe, &cqe,
3029 &budget);
3030 break;
3031 case CQ_BASE_CQE_TYPE_RES_UD:
3032 rc = bnxt_qplib_cq_process_res_ud
3033 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3034 &budget);
3035 break;
3036 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3037 rc = bnxt_qplib_cq_process_res_raweth_qp1
3038 (cq, (struct cq_res_raweth_qp1 *)
3039 hw_cqe, &cqe, &budget);
3040 break;
3041 case CQ_BASE_CQE_TYPE_TERMINAL:
3042 rc = bnxt_qplib_cq_process_terminal
3043 (cq, (struct cq_terminal *)hw_cqe,
3044 &cqe, &budget);
3045 break;
3046 case CQ_BASE_CQE_TYPE_CUT_OFF:
3047 bnxt_qplib_cq_process_cutoff
3048 (cq, (struct cq_cutoff *)hw_cqe);
3049 /* Done processing this CQ */
3050 goto exit;
3051 default:
3052 dev_err(&cq->hwq.pdev->dev,
3053 "process_cq unknown type 0x%lx\n",
3054 hw_cqe->cqe_type_toggle &
3055 CQ_BASE_CQE_TYPE_MASK);
3056 rc = -EINVAL;
3057 break;
3058 }
3059 if (rc < 0) {
3060 if (rc == -EAGAIN)
3061 break;
3062 /* Error while processing the CQE, just skip to the
3063 * next one
3064 */
3065 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3066 dev_err(&cq->hwq.pdev->dev,
3067 "process_cqe error rc = 0x%x\n", rc);
3068 }
3069 hw_polled++;
3070 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3071 1, &cq->dbinfo.flags);
3072
3073 }
3074 if (hw_polled)
3075 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3076exit:
3077 return num_cqes - budget;
3078}
3079
3080void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3081{
3082 cq->dbinfo.toggle = cq->toggle;
3083 if (arm_type)
3084 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3085 /* Using cq->arm_state variable to track whether to issue cq handler */
3086 atomic_set(&cq->arm_state, 1);
3087}
3088
3089void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3090{
3091 flush_workqueue(qp->scq->nq->cqn_wq);
3092 if (qp->scq != qp->rcq)
3093 flush_workqueue(qp->rcq->nq->cqn_wq);
3094}
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: Fast Path Operators
37 */
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
41#include <linux/interrupt.h>
42#include <linux/spinlock.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/pci.h>
46#include <linux/delay.h>
47#include <linux/prefetch.h>
48#include <linux/if_ether.h>
49#include <rdma/ib_mad.h>
50
51#include "roce_hsi.h"
52
53#include "qplib_res.h"
54#include "qplib_rcfw.h"
55#include "qplib_sp.h"
56#include "qplib_fp.h"
57#include <rdma/ib_addr.h>
58#include "bnxt_ulp.h"
59#include "bnxt_re.h"
60#include "ib_verbs.h"
61
62static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
63
64static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
65{
66 qp->sq.condition = false;
67 qp->sq.send_phantom = false;
68 qp->sq.single = false;
69}
70
71/* Flush list */
72static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
73{
74 struct bnxt_qplib_cq *scq, *rcq;
75
76 scq = qp->scq;
77 rcq = qp->rcq;
78
79 if (!qp->sq.flushed) {
80 dev_dbg(&scq->hwq.pdev->dev,
81 "FP: Adding to SQ Flush list = %p\n", qp);
82 bnxt_qplib_cancel_phantom_processing(qp);
83 list_add_tail(&qp->sq_flush, &scq->sqf_head);
84 qp->sq.flushed = true;
85 }
86 if (!qp->srq) {
87 if (!qp->rq.flushed) {
88 dev_dbg(&rcq->hwq.pdev->dev,
89 "FP: Adding to RQ Flush list = %p\n", qp);
90 list_add_tail(&qp->rq_flush, &rcq->rqf_head);
91 qp->rq.flushed = true;
92 }
93 }
94}
95
96static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
97 unsigned long *flags)
98 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
99{
100 spin_lock_irqsave(&qp->scq->flush_lock, *flags);
101 if (qp->scq == qp->rcq)
102 __acquire(&qp->rcq->flush_lock);
103 else
104 spin_lock(&qp->rcq->flush_lock);
105}
106
107static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
108 unsigned long *flags)
109 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
110{
111 if (qp->scq == qp->rcq)
112 __release(&qp->rcq->flush_lock);
113 else
114 spin_unlock(&qp->rcq->flush_lock);
115 spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
116}
117
118void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
119{
120 unsigned long flags;
121
122 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
123 __bnxt_qplib_add_flush_qp(qp);
124 bnxt_qplib_release_cq_flush_locks(qp, &flags);
125}
126
127static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
128{
129 if (qp->sq.flushed) {
130 qp->sq.flushed = false;
131 list_del(&qp->sq_flush);
132 }
133 if (!qp->srq) {
134 if (qp->rq.flushed) {
135 qp->rq.flushed = false;
136 list_del(&qp->rq_flush);
137 }
138 }
139}
140
141void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
142{
143 unsigned long flags;
144
145 bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
146 __clean_cq(qp->scq, (u64)(unsigned long)qp);
147 qp->sq.hwq.prod = 0;
148 qp->sq.hwq.cons = 0;
149 __clean_cq(qp->rcq, (u64)(unsigned long)qp);
150 qp->rq.hwq.prod = 0;
151 qp->rq.hwq.cons = 0;
152
153 __bnxt_qplib_del_flush_qp(qp);
154 bnxt_qplib_release_cq_flush_locks(qp, &flags);
155}
156
157static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
158{
159 struct bnxt_qplib_nq_work *nq_work =
160 container_of(work, struct bnxt_qplib_nq_work, work);
161
162 struct bnxt_qplib_cq *cq = nq_work->cq;
163 struct bnxt_qplib_nq *nq = nq_work->nq;
164
165 if (cq && nq) {
166 spin_lock_bh(&cq->compl_lock);
167 if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
168 dev_dbg(&nq->pdev->dev,
169 "%s:Trigger cq = %p event nq = %p\n",
170 __func__, cq, nq);
171 nq->cqn_handler(nq, cq);
172 }
173 spin_unlock_bh(&cq->compl_lock);
174 }
175 kfree(nq_work);
176}
177
178static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
179 struct bnxt_qplib_qp *qp)
180{
181 struct bnxt_qplib_q *rq = &qp->rq;
182 struct bnxt_qplib_q *sq = &qp->sq;
183
184 if (qp->rq_hdr_buf)
185 dma_free_coherent(&res->pdev->dev,
186 rq->max_wqe * qp->rq_hdr_buf_size,
187 qp->rq_hdr_buf, qp->rq_hdr_buf_map);
188 if (qp->sq_hdr_buf)
189 dma_free_coherent(&res->pdev->dev,
190 sq->max_wqe * qp->sq_hdr_buf_size,
191 qp->sq_hdr_buf, qp->sq_hdr_buf_map);
192 qp->rq_hdr_buf = NULL;
193 qp->sq_hdr_buf = NULL;
194 qp->rq_hdr_buf_map = 0;
195 qp->sq_hdr_buf_map = 0;
196 qp->sq_hdr_buf_size = 0;
197 qp->rq_hdr_buf_size = 0;
198}
199
200static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
201 struct bnxt_qplib_qp *qp)
202{
203 struct bnxt_qplib_q *rq = &qp->rq;
204 struct bnxt_qplib_q *sq = &qp->sq;
205 int rc = 0;
206
207 if (qp->sq_hdr_buf_size && sq->max_wqe) {
208 qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
209 sq->max_wqe * qp->sq_hdr_buf_size,
210 &qp->sq_hdr_buf_map, GFP_KERNEL);
211 if (!qp->sq_hdr_buf) {
212 rc = -ENOMEM;
213 dev_err(&res->pdev->dev,
214 "Failed to create sq_hdr_buf\n");
215 goto fail;
216 }
217 }
218
219 if (qp->rq_hdr_buf_size && rq->max_wqe) {
220 qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
221 rq->max_wqe *
222 qp->rq_hdr_buf_size,
223 &qp->rq_hdr_buf_map,
224 GFP_KERNEL);
225 if (!qp->rq_hdr_buf) {
226 rc = -ENOMEM;
227 dev_err(&res->pdev->dev,
228 "Failed to create rq_hdr_buf\n");
229 goto fail;
230 }
231 }
232 return 0;
233
234fail:
235 bnxt_qplib_free_qp_hdr_buf(res, qp);
236 return rc;
237}
238
239static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
240{
241 struct bnxt_qplib_hwq *hwq = &nq->hwq;
242 struct nq_base *nqe, **nq_ptr;
243 int budget = nq->budget;
244 uintptr_t q_handle;
245 u16 type;
246
247 spin_lock_bh(&hwq->lock);
248 /* Service the NQ until empty */
249 while (budget--) {
250 nq_ptr = (struct nq_base **)hwq->pbl_ptr;
251 nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
252 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
253 break;
254
255 /*
256 * The valid test of the entry must be done first before
257 * reading any further.
258 */
259 dma_rmb();
260
261 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
262 switch (type) {
263 case NQ_BASE_TYPE_CQ_NOTIFICATION:
264 {
265 struct nq_cn *nqcne = (struct nq_cn *)nqe;
266
267 q_handle = le32_to_cpu(nqcne->cq_handle_low);
268 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
269 << 32;
270 if ((unsigned long)cq == q_handle) {
271 nqcne->cq_handle_low = 0;
272 nqcne->cq_handle_high = 0;
273 cq->cnq_events++;
274 }
275 break;
276 }
277 default:
278 break;
279 }
280 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
281 1, &nq->nq_db.dbinfo.flags);
282 }
283 spin_unlock_bh(&hwq->lock);
284}
285
286/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
287 * this CQ.
288 */
289static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
290{
291 u32 retry_cnt = 100;
292
293 while (retry_cnt--) {
294 if (cnq_events == cq->cnq_events)
295 return;
296 usleep_range(50, 100);
297 clean_nq(cq->nq, cq);
298 }
299}
300
301static void bnxt_qplib_service_nq(struct tasklet_struct *t)
302{
303 struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
304 struct bnxt_qplib_hwq *hwq = &nq->hwq;
305 struct bnxt_qplib_cq *cq;
306 int budget = nq->budget;
307 struct nq_base *nqe;
308 uintptr_t q_handle;
309 u32 hw_polled = 0;
310 u16 type;
311
312 spin_lock_bh(&hwq->lock);
313 /* Service the NQ until empty */
314 while (budget--) {
315 nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
316 if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
317 break;
318
319 /*
320 * The valid test of the entry must be done first before
321 * reading any further.
322 */
323 dma_rmb();
324
325 type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
326 switch (type) {
327 case NQ_BASE_TYPE_CQ_NOTIFICATION:
328 {
329 struct nq_cn *nqcne = (struct nq_cn *)nqe;
330 struct bnxt_re_cq *cq_p;
331
332 q_handle = le32_to_cpu(nqcne->cq_handle_low);
333 q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
334 << 32;
335 cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
336 if (!cq)
337 break;
338 cq->toggle = (le16_to_cpu(nqe->info10_type) &
339 NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
340 cq->dbinfo.toggle = cq->toggle;
341 cq_p = container_of(cq, struct bnxt_re_cq, qplib_cq);
342 if (cq_p->uctx_cq_page)
343 *((u32 *)cq_p->uctx_cq_page) = cq->toggle;
344
345 bnxt_qplib_armen_db(&cq->dbinfo,
346 DBC_DBC_TYPE_CQ_ARMENA);
347 spin_lock_bh(&cq->compl_lock);
348 atomic_set(&cq->arm_state, 0);
349 if (nq->cqn_handler(nq, (cq)))
350 dev_warn(&nq->pdev->dev,
351 "cqn - type 0x%x not handled\n", type);
352 cq->cnq_events++;
353 spin_unlock_bh(&cq->compl_lock);
354 break;
355 }
356 case NQ_BASE_TYPE_SRQ_EVENT:
357 {
358 struct bnxt_qplib_srq *srq;
359 struct bnxt_re_srq *srq_p;
360 struct nq_srq_event *nqsrqe =
361 (struct nq_srq_event *)nqe;
362
363 q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
364 q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
365 << 32;
366 srq = (struct bnxt_qplib_srq *)q_handle;
367 srq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK)
368 >> NQ_CN_TOGGLE_SFT;
369 srq->dbinfo.toggle = srq->toggle;
370 srq_p = container_of(srq, struct bnxt_re_srq, qplib_srq);
371 if (srq_p->uctx_srq_page)
372 *((u32 *)srq_p->uctx_srq_page) = srq->toggle;
373 bnxt_qplib_armen_db(&srq->dbinfo,
374 DBC_DBC_TYPE_SRQ_ARMENA);
375 if (nq->srqn_handler(nq,
376 (struct bnxt_qplib_srq *)q_handle,
377 nqsrqe->event))
378 dev_warn(&nq->pdev->dev,
379 "SRQ event 0x%x not handled\n",
380 nqsrqe->event);
381 break;
382 }
383 case NQ_BASE_TYPE_DBQ_EVENT:
384 break;
385 default:
386 dev_warn(&nq->pdev->dev,
387 "nqe with type = 0x%x not handled\n", type);
388 break;
389 }
390 hw_polled++;
391 bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
392 1, &nq->nq_db.dbinfo.flags);
393 }
394 if (hw_polled)
395 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
396 spin_unlock_bh(&hwq->lock);
397}
398
399/* bnxt_re_synchronize_nq - self polling notification queue.
400 * @nq - notification queue pointer
401 *
402 * This function will start polling entries of a given notification queue
403 * for all pending entries.
404 * This function is useful to synchronize notification entries while resources
405 * are going away.
406 */
407
408void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
409{
410 int budget = nq->budget;
411
412 nq->budget = nq->hwq.max_elements;
413 bnxt_qplib_service_nq(&nq->nq_tasklet);
414 nq->budget = budget;
415}
416
417static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
418{
419 struct bnxt_qplib_nq *nq = dev_instance;
420 struct bnxt_qplib_hwq *hwq = &nq->hwq;
421 u32 sw_cons;
422
423 /* Prefetch the NQ element */
424 sw_cons = HWQ_CMP(hwq->cons, hwq);
425 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
426
427 /* Fan out to CPU affinitized kthreads? */
428 tasklet_schedule(&nq->nq_tasklet);
429
430 return IRQ_HANDLED;
431}
432
433void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
434{
435 if (!nq->requested)
436 return;
437
438 nq->requested = false;
439 /* Mask h/w interrupt */
440 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
441 /* Sync with last running IRQ handler */
442 synchronize_irq(nq->msix_vec);
443 irq_set_affinity_hint(nq->msix_vec, NULL);
444 free_irq(nq->msix_vec, nq);
445 kfree(nq->name);
446 nq->name = NULL;
447
448 if (kill)
449 tasklet_kill(&nq->nq_tasklet);
450 tasklet_disable(&nq->nq_tasklet);
451}
452
453void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
454{
455 if (nq->cqn_wq) {
456 destroy_workqueue(nq->cqn_wq);
457 nq->cqn_wq = NULL;
458 }
459
460 /* Make sure the HW is stopped! */
461 bnxt_qplib_nq_stop_irq(nq, true);
462
463 if (nq->nq_db.reg.bar_reg) {
464 iounmap(nq->nq_db.reg.bar_reg);
465 nq->nq_db.reg.bar_reg = NULL;
466 }
467
468 nq->cqn_handler = NULL;
469 nq->srqn_handler = NULL;
470 nq->msix_vec = 0;
471}
472
473int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
474 int msix_vector, bool need_init)
475{
476 struct bnxt_qplib_res *res = nq->res;
477 int rc;
478
479 if (nq->requested)
480 return -EFAULT;
481
482 nq->msix_vec = msix_vector;
483 if (need_init)
484 tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
485 else
486 tasklet_enable(&nq->nq_tasklet);
487
488 nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
489 nq_indx, pci_name(res->pdev));
490 if (!nq->name)
491 return -ENOMEM;
492 rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
493 if (rc) {
494 kfree(nq->name);
495 nq->name = NULL;
496 tasklet_disable(&nq->nq_tasklet);
497 return rc;
498 }
499
500 cpumask_clear(&nq->mask);
501 cpumask_set_cpu(nq_indx, &nq->mask);
502 rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
503 if (rc) {
504 dev_warn(&nq->pdev->dev,
505 "set affinity failed; vector: %d nq_idx: %d\n",
506 nq->msix_vec, nq_indx);
507 }
508 nq->requested = true;
509 bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
510
511 return rc;
512}
513
514static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
515{
516 resource_size_t reg_base;
517 struct bnxt_qplib_nq_db *nq_db;
518 struct pci_dev *pdev;
519
520 pdev = nq->pdev;
521 nq_db = &nq->nq_db;
522
523 nq_db->dbinfo.flags = 0;
524 nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
525 nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
526 if (!nq_db->reg.bar_base) {
527 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
528 nq_db->reg.bar_id);
529 return -ENOMEM;
530 }
531
532 reg_base = nq_db->reg.bar_base + reg_offt;
533 /* Unconditionally map 8 bytes to support 57500 series */
534 nq_db->reg.len = 8;
535 nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
536 if (!nq_db->reg.bar_reg) {
537 dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
538 nq_db->reg.bar_id);
539 return -ENOMEM;
540 }
541
542 nq_db->dbinfo.db = nq_db->reg.bar_reg;
543 nq_db->dbinfo.hwq = &nq->hwq;
544 nq_db->dbinfo.xid = nq->ring_id;
545
546 return 0;
547}
548
549int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
550 int nq_idx, int msix_vector, int bar_reg_offset,
551 cqn_handler_t cqn_handler,
552 srqn_handler_t srqn_handler)
553{
554 int rc;
555
556 nq->pdev = pdev;
557 nq->cqn_handler = cqn_handler;
558 nq->srqn_handler = srqn_handler;
559 nq->load = 0;
560
561 /* Have a task to schedule CQ notifiers in post send case */
562 nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
563 if (!nq->cqn_wq)
564 return -ENOMEM;
565
566 rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
567 if (rc)
568 goto fail;
569
570 rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
571 if (rc) {
572 dev_err(&nq->pdev->dev,
573 "Failed to request irq for nq-idx %d\n", nq_idx);
574 goto fail;
575 }
576
577 return 0;
578fail:
579 bnxt_qplib_disable_nq(nq);
580 return rc;
581}
582
583void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
584{
585 if (nq->hwq.max_elements) {
586 bnxt_qplib_free_hwq(nq->res, &nq->hwq);
587 nq->hwq.max_elements = 0;
588 }
589}
590
591int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
592{
593 struct bnxt_qplib_hwq_attr hwq_attr = {};
594 struct bnxt_qplib_sg_info sginfo = {};
595
596 nq->pdev = res->pdev;
597 nq->res = res;
598 if (!nq->hwq.max_elements ||
599 nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
600 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
601
602 sginfo.pgsize = PAGE_SIZE;
603 sginfo.pgshft = PAGE_SHIFT;
604 hwq_attr.res = res;
605 hwq_attr.sginfo = &sginfo;
606 hwq_attr.depth = nq->hwq.max_elements;
607 hwq_attr.stride = sizeof(struct nq_base);
608 hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
609 if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
610 dev_err(&nq->pdev->dev, "FP NQ allocation failed");
611 return -ENOMEM;
612 }
613 nq->budget = 8;
614 return 0;
615}
616
617/* SRQ */
618void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
619 struct bnxt_qplib_srq *srq)
620{
621 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
622 struct creq_destroy_srq_resp resp = {};
623 struct bnxt_qplib_cmdqmsg msg = {};
624 struct cmdq_destroy_srq req = {};
625 int rc;
626
627 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
628 CMDQ_BASE_OPCODE_DESTROY_SRQ,
629 sizeof(req));
630
631 /* Configure the request */
632 req.srq_cid = cpu_to_le32(srq->id);
633
634 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
635 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
636 kfree(srq->swq);
637 if (rc)
638 return;
639 bnxt_qplib_free_hwq(res, &srq->hwq);
640}
641
642int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
643 struct bnxt_qplib_srq *srq)
644{
645 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
646 struct bnxt_qplib_hwq_attr hwq_attr = {};
647 struct creq_create_srq_resp resp = {};
648 struct bnxt_qplib_cmdqmsg msg = {};
649 struct cmdq_create_srq req = {};
650 struct bnxt_qplib_pbl *pbl;
651 u16 pg_sz_lvl;
652 int rc, idx;
653
654 hwq_attr.res = res;
655 hwq_attr.sginfo = &srq->sg_info;
656 hwq_attr.depth = srq->max_wqe;
657 hwq_attr.stride = srq->wqe_size;
658 hwq_attr.type = HWQ_TYPE_QUEUE;
659 rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
660 if (rc)
661 return rc;
662 srq->dbinfo.flags = 0;
663 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
664 CMDQ_BASE_OPCODE_CREATE_SRQ,
665 sizeof(req));
666
667 /* Configure the request */
668 req.dpi = cpu_to_le32(srq->dpi->dpi);
669 req.srq_handle = cpu_to_le64((uintptr_t)srq);
670
671 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
672 pbl = &srq->hwq.pbl[PBL_LVL_0];
673 pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
674 CMDQ_CREATE_SRQ_PG_SIZE_SFT);
675 pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
676 CMDQ_CREATE_SRQ_LVL_SFT;
677 req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
678 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
679 req.pd_id = cpu_to_le32(srq->pd->id);
680 req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
681
682 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
683 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
684 if (rc)
685 goto fail;
686
687 spin_lock_init(&srq->lock);
688 srq->start_idx = 0;
689 srq->last_idx = srq->hwq.max_elements - 1;
690 if (!srq->hwq.is_user) {
691 srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
692 GFP_KERNEL);
693 if (!srq->swq) {
694 rc = -ENOMEM;
695 goto fail;
696 }
697 for (idx = 0; idx < srq->hwq.max_elements; idx++)
698 srq->swq[idx].next_idx = idx + 1;
699 srq->swq[srq->last_idx].next_idx = -1;
700 }
701
702 srq->id = le32_to_cpu(resp.xid);
703 srq->dbinfo.hwq = &srq->hwq;
704 srq->dbinfo.xid = srq->id;
705 srq->dbinfo.db = srq->dpi->dbr;
706 srq->dbinfo.max_slot = 1;
707 srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
708 if (srq->threshold)
709 bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
710 srq->arm_req = false;
711
712 return 0;
713fail:
714 bnxt_qplib_free_hwq(res, &srq->hwq);
715 kfree(srq->swq);
716
717 return rc;
718}
719
720int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
721 struct bnxt_qplib_srq *srq)
722{
723 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
724 u32 count;
725
726 count = __bnxt_qplib_get_avail(srq_hwq);
727 if (count > srq->threshold) {
728 srq->arm_req = false;
729 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
730 } else {
731 /* Deferred arming */
732 srq->arm_req = true;
733 }
734
735 return 0;
736}
737
738int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
739 struct bnxt_qplib_srq *srq)
740{
741 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
742 struct creq_query_srq_resp resp = {};
743 struct bnxt_qplib_cmdqmsg msg = {};
744 struct bnxt_qplib_rcfw_sbuf sbuf;
745 struct creq_query_srq_resp_sb *sb;
746 struct cmdq_query_srq req = {};
747 int rc;
748
749 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
750 CMDQ_BASE_OPCODE_QUERY_SRQ,
751 sizeof(req));
752
753 /* Configure the request */
754 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
755 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
756 &sbuf.dma_addr, GFP_KERNEL);
757 if (!sbuf.sb)
758 return -ENOMEM;
759 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
760 req.srq_cid = cpu_to_le32(srq->id);
761 sb = sbuf.sb;
762 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
763 sizeof(resp), 0);
764 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
765 if (!rc)
766 srq->threshold = le16_to_cpu(sb->srq_limit);
767 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
768 sbuf.sb, sbuf.dma_addr);
769
770 return rc;
771}
772
773int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
774 struct bnxt_qplib_swqe *wqe)
775{
776 struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
777 struct rq_wqe *srqe;
778 struct sq_sge *hw_sge;
779 u32 count = 0;
780 int i, next;
781
782 spin_lock(&srq_hwq->lock);
783 if (srq->start_idx == srq->last_idx) {
784 dev_err(&srq_hwq->pdev->dev,
785 "FP: SRQ (0x%x) is full!\n", srq->id);
786 spin_unlock(&srq_hwq->lock);
787 return -EINVAL;
788 }
789 next = srq->start_idx;
790 srq->start_idx = srq->swq[next].next_idx;
791 spin_unlock(&srq_hwq->lock);
792
793 srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
794 memset(srqe, 0, srq->wqe_size);
795 /* Calculate wqe_size16 and data_len */
796 for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
797 i < wqe->num_sge; i++, hw_sge++) {
798 hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
799 hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
800 hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
801 }
802 srqe->wqe_type = wqe->type;
803 srqe->flags = wqe->flags;
804 srqe->wqe_size = wqe->num_sge +
805 ((offsetof(typeof(*srqe), data) + 15) >> 4);
806 srqe->wr_id[0] = cpu_to_le32((u32)next);
807 srq->swq[next].wr_id = wqe->wr_id;
808
809 bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
810
811 spin_lock(&srq_hwq->lock);
812 count = __bnxt_qplib_get_avail(srq_hwq);
813 spin_unlock(&srq_hwq->lock);
814 /* Ring DB */
815 bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
816 if (srq->arm_req == true && count > srq->threshold) {
817 srq->arm_req = false;
818 bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
819 }
820
821 return 0;
822}
823
824/* QP */
825
826static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
827{
828 int indx;
829
830 que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
831 if (!que->swq)
832 return -ENOMEM;
833
834 que->swq_start = 0;
835 que->swq_last = que->max_sw_wqe - 1;
836 for (indx = 0; indx < que->max_sw_wqe; indx++)
837 que->swq[indx].next_idx = indx + 1;
838 que->swq[que->swq_last].next_idx = 0; /* Make it circular */
839 que->swq_last = 0;
840
841 return 0;
842}
843
844int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
845{
846 struct bnxt_qplib_hwq_attr hwq_attr = {};
847 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
848 struct creq_create_qp1_resp resp = {};
849 struct bnxt_qplib_cmdqmsg msg = {};
850 struct bnxt_qplib_q *sq = &qp->sq;
851 struct bnxt_qplib_q *rq = &qp->rq;
852 struct cmdq_create_qp1 req = {};
853 struct bnxt_qplib_pbl *pbl;
854 u32 qp_flags = 0;
855 u8 pg_sz_lvl;
856 u32 tbl_indx;
857 int rc;
858
859 sq->dbinfo.flags = 0;
860 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
861 CMDQ_BASE_OPCODE_CREATE_QP1,
862 sizeof(req));
863 /* General */
864 req.type = qp->type;
865 req.dpi = cpu_to_le32(qp->dpi->dpi);
866 req.qp_handle = cpu_to_le64(qp->qp_handle);
867
868 /* SQ */
869 hwq_attr.res = res;
870 hwq_attr.sginfo = &sq->sg_info;
871 hwq_attr.stride = sizeof(struct sq_sge);
872 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
873 hwq_attr.type = HWQ_TYPE_QUEUE;
874 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
875 if (rc)
876 return rc;
877
878 rc = bnxt_qplib_alloc_init_swq(sq);
879 if (rc)
880 goto fail_sq;
881
882 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
883 pbl = &sq->hwq.pbl[PBL_LVL_0];
884 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
885 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
886 CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
887 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
888 req.sq_pg_size_sq_lvl = pg_sz_lvl;
889 req.sq_fwo_sq_sge =
890 cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
891 CMDQ_CREATE_QP1_SQ_SGE_SFT);
892 req.scq_cid = cpu_to_le32(qp->scq->id);
893
894 /* RQ */
895 if (rq->max_wqe) {
896 rq->dbinfo.flags = 0;
897 hwq_attr.res = res;
898 hwq_attr.sginfo = &rq->sg_info;
899 hwq_attr.stride = sizeof(struct sq_sge);
900 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
901 hwq_attr.type = HWQ_TYPE_QUEUE;
902 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
903 if (rc)
904 goto sq_swq;
905 rc = bnxt_qplib_alloc_init_swq(rq);
906 if (rc)
907 goto fail_rq;
908 req.rq_size = cpu_to_le32(rq->max_wqe);
909 pbl = &rq->hwq.pbl[PBL_LVL_0];
910 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
911 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
912 CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
913 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
914 req.rq_pg_size_rq_lvl = pg_sz_lvl;
915 req.rq_fwo_rq_sge =
916 cpu_to_le16((rq->max_sge &
917 CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
918 CMDQ_CREATE_QP1_RQ_SGE_SFT);
919 }
920 req.rcq_cid = cpu_to_le32(qp->rcq->id);
921 /* Header buffer - allow hdr_buf pass in */
922 rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
923 if (rc) {
924 rc = -ENOMEM;
925 goto rq_rwq;
926 }
927 qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
928 req.qp_flags = cpu_to_le32(qp_flags);
929 req.pd_id = cpu_to_le32(qp->pd->id);
930
931 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
932 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
933 if (rc)
934 goto fail;
935
936 qp->id = le32_to_cpu(resp.xid);
937 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
938 qp->cctx = res->cctx;
939 sq->dbinfo.hwq = &sq->hwq;
940 sq->dbinfo.xid = qp->id;
941 sq->dbinfo.db = qp->dpi->dbr;
942 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
943 if (rq->max_wqe) {
944 rq->dbinfo.hwq = &rq->hwq;
945 rq->dbinfo.xid = qp->id;
946 rq->dbinfo.db = qp->dpi->dbr;
947 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
948 }
949 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
950 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
951 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
952
953 return 0;
954
955fail:
956 bnxt_qplib_free_qp_hdr_buf(res, qp);
957rq_rwq:
958 kfree(rq->swq);
959fail_rq:
960 bnxt_qplib_free_hwq(res, &rq->hwq);
961sq_swq:
962 kfree(sq->swq);
963fail_sq:
964 bnxt_qplib_free_hwq(res, &sq->hwq);
965 return rc;
966}
967
968static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
969{
970 struct bnxt_qplib_hwq *hwq;
971 struct bnxt_qplib_q *sq;
972 u64 fpsne, psn_pg;
973 u16 indx_pad = 0;
974
975 sq = &qp->sq;
976 hwq = &sq->hwq;
977 /* First psn entry */
978 fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
979 if (!IS_ALIGNED(fpsne, PAGE_SIZE))
980 indx_pad = (fpsne & ~PAGE_MASK) / size;
981 hwq->pad_pgofft = indx_pad;
982 hwq->pad_pg = (u64 *)psn_pg;
983 hwq->pad_stride = size;
984}
985
986int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
987{
988 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
989 struct bnxt_qplib_hwq_attr hwq_attr = {};
990 struct bnxt_qplib_sg_info sginfo = {};
991 struct creq_create_qp_resp resp = {};
992 struct bnxt_qplib_cmdqmsg msg = {};
993 struct bnxt_qplib_q *sq = &qp->sq;
994 struct bnxt_qplib_q *rq = &qp->rq;
995 struct cmdq_create_qp req = {};
996 int rc, req_size, psn_sz = 0;
997 struct bnxt_qplib_hwq *xrrq;
998 struct bnxt_qplib_pbl *pbl;
999 u32 qp_flags = 0;
1000 u8 pg_sz_lvl;
1001 u32 tbl_indx;
1002 u16 nsge;
1003
1004 qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
1005 sq->dbinfo.flags = 0;
1006 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1007 CMDQ_BASE_OPCODE_CREATE_QP,
1008 sizeof(req));
1009
1010 /* General */
1011 req.type = qp->type;
1012 req.dpi = cpu_to_le32(qp->dpi->dpi);
1013 req.qp_handle = cpu_to_le64(qp->qp_handle);
1014
1015 /* SQ */
1016 if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1017 psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1018 sizeof(struct sq_psn_search_ext) :
1019 sizeof(struct sq_psn_search);
1020
1021 if (qp->is_host_msn_tbl) {
1022 psn_sz = sizeof(struct sq_msn_search);
1023 qp->msn = 0;
1024 }
1025 }
1026
1027 hwq_attr.res = res;
1028 hwq_attr.sginfo = &sq->sg_info;
1029 hwq_attr.stride = sizeof(struct sq_sge);
1030 hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
1031 hwq_attr.aux_stride = psn_sz;
1032 hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
1033 : 0;
1034 /* Update msn tbl size */
1035 if (qp->is_host_msn_tbl && psn_sz) {
1036 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1037 hwq_attr.aux_depth =
1038 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1039 else
1040 hwq_attr.aux_depth =
1041 roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
1042 qp->msn_tbl_sz = hwq_attr.aux_depth;
1043 qp->msn = 0;
1044 }
1045
1046 hwq_attr.type = HWQ_TYPE_QUEUE;
1047 rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1048 if (rc)
1049 return rc;
1050
1051 if (!sq->hwq.is_user) {
1052 rc = bnxt_qplib_alloc_init_swq(sq);
1053 if (rc)
1054 goto fail_sq;
1055
1056 if (psn_sz)
1057 bnxt_qplib_init_psn_ptr(qp, psn_sz);
1058 }
1059 req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1060 pbl = &sq->hwq.pbl[PBL_LVL_0];
1061 req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1062 pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1063 CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1064 pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1065 req.sq_pg_size_sq_lvl = pg_sz_lvl;
1066 req.sq_fwo_sq_sge =
1067 cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1068 CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1069 req.scq_cid = cpu_to_le32(qp->scq->id);
1070
1071 /* RQ */
1072 if (!qp->srq) {
1073 rq->dbinfo.flags = 0;
1074 hwq_attr.res = res;
1075 hwq_attr.sginfo = &rq->sg_info;
1076 hwq_attr.stride = sizeof(struct sq_sge);
1077 hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
1078 hwq_attr.aux_stride = 0;
1079 hwq_attr.aux_depth = 0;
1080 hwq_attr.type = HWQ_TYPE_QUEUE;
1081 rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1082 if (rc)
1083 goto sq_swq;
1084 if (!rq->hwq.is_user) {
1085 rc = bnxt_qplib_alloc_init_swq(rq);
1086 if (rc)
1087 goto fail_rq;
1088 }
1089
1090 req.rq_size = cpu_to_le32(rq->max_wqe);
1091 pbl = &rq->hwq.pbl[PBL_LVL_0];
1092 req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1093 pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1094 CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1095 pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1096 req.rq_pg_size_rq_lvl = pg_sz_lvl;
1097 nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1098 6 : rq->max_sge;
1099 req.rq_fwo_rq_sge =
1100 cpu_to_le16(((nsge &
1101 CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1102 CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1103 } else {
1104 /* SRQ */
1105 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1106 req.srq_cid = cpu_to_le32(qp->srq->id);
1107 }
1108 req.rcq_cid = cpu_to_le32(qp->rcq->id);
1109
1110 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1111 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1112 if (qp->sig_type)
1113 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1114 if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1115 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1116 if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1117 qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1118
1119 req.qp_flags = cpu_to_le32(qp_flags);
1120
1121 /* ORRQ and IRRQ */
1122 if (psn_sz) {
1123 xrrq = &qp->orrq;
1124 xrrq->max_elements =
1125 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1126 req_size = xrrq->max_elements *
1127 BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1128 req_size &= ~(PAGE_SIZE - 1);
1129 sginfo.pgsize = req_size;
1130 sginfo.pgshft = PAGE_SHIFT;
1131
1132 hwq_attr.res = res;
1133 hwq_attr.sginfo = &sginfo;
1134 hwq_attr.depth = xrrq->max_elements;
1135 hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1136 hwq_attr.aux_stride = 0;
1137 hwq_attr.aux_depth = 0;
1138 hwq_attr.type = HWQ_TYPE_CTX;
1139 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1140 if (rc)
1141 goto rq_swq;
1142 pbl = &xrrq->pbl[PBL_LVL_0];
1143 req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1144
1145 xrrq = &qp->irrq;
1146 xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1147 qp->max_dest_rd_atomic);
1148 req_size = xrrq->max_elements *
1149 BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1150 req_size &= ~(PAGE_SIZE - 1);
1151 sginfo.pgsize = req_size;
1152 hwq_attr.depth = xrrq->max_elements;
1153 hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1154 rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1155 if (rc)
1156 goto fail_orrq;
1157
1158 pbl = &xrrq->pbl[PBL_LVL_0];
1159 req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1160 }
1161 req.pd_id = cpu_to_le32(qp->pd->id);
1162
1163 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1164 sizeof(resp), 0);
1165 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1166 if (rc)
1167 goto fail;
1168
1169 qp->id = le32_to_cpu(resp.xid);
1170 qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1171 INIT_LIST_HEAD(&qp->sq_flush);
1172 INIT_LIST_HEAD(&qp->rq_flush);
1173 qp->cctx = res->cctx;
1174 sq->dbinfo.hwq = &sq->hwq;
1175 sq->dbinfo.xid = qp->id;
1176 sq->dbinfo.db = qp->dpi->dbr;
1177 sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1178 if (rq->max_wqe) {
1179 rq->dbinfo.hwq = &rq->hwq;
1180 rq->dbinfo.xid = qp->id;
1181 rq->dbinfo.db = qp->dpi->dbr;
1182 rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1183 }
1184 spin_lock_bh(&rcfw->tbl_lock);
1185 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1186 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1187 rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1188 spin_unlock_bh(&rcfw->tbl_lock);
1189
1190 return 0;
1191fail:
1192 bnxt_qplib_free_hwq(res, &qp->irrq);
1193fail_orrq:
1194 bnxt_qplib_free_hwq(res, &qp->orrq);
1195rq_swq:
1196 kfree(rq->swq);
1197fail_rq:
1198 bnxt_qplib_free_hwq(res, &rq->hwq);
1199sq_swq:
1200 kfree(sq->swq);
1201fail_sq:
1202 bnxt_qplib_free_hwq(res, &sq->hwq);
1203 return rc;
1204}
1205
1206static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1207{
1208 switch (qp->state) {
1209 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1210 /* INIT->RTR, configure the path_mtu to the default
1211 * 2048 if not being requested
1212 */
1213 if (!(qp->modify_flags &
1214 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1215 qp->modify_flags |=
1216 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1217 qp->path_mtu =
1218 CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1219 }
1220 qp->modify_flags &=
1221 ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1222 /* Bono FW require the max_dest_rd_atomic to be >= 1 */
1223 if (qp->max_dest_rd_atomic < 1)
1224 qp->max_dest_rd_atomic = 1;
1225 qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1226 /* Bono FW 20.6.5 requires SGID_INDEX configuration */
1227 if (!(qp->modify_flags &
1228 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1229 qp->modify_flags |=
1230 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1231 qp->ah.sgid_index = 0;
1232 }
1233 break;
1234 default:
1235 break;
1236 }
1237}
1238
1239static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1240{
1241 switch (qp->state) {
1242 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1243 /* Bono FW requires the max_rd_atomic to be >= 1 */
1244 if (qp->max_rd_atomic < 1)
1245 qp->max_rd_atomic = 1;
1246 /* Bono FW does not allow PKEY_INDEX,
1247 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1248 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1249 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1250 * modification
1251 */
1252 qp->modify_flags &=
1253 ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1254 CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1255 CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1256 CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1257 CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1258 CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1259 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1260 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1261 CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1262 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1263 CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1264 CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1265 break;
1266 default:
1267 break;
1268 }
1269}
1270
1271static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1272{
1273 switch (qp->cur_qp_state) {
1274 case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1275 break;
1276 case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1277 __modify_flags_from_init_state(qp);
1278 break;
1279 case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1280 __modify_flags_from_rtr_state(qp);
1281 break;
1282 case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1283 break;
1284 case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1285 break;
1286 case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1287 break;
1288 case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1289 break;
1290 default:
1291 break;
1292 }
1293}
1294
1295static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
1296 struct bnxt_qplib_qp *qp,
1297 struct cmdq_modify_qp *req)
1298{
1299 u32 mandatory_flags = 0;
1300
1301 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1302 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
1303
1304 if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1305 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) {
1306 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC && qp->srq)
1307 req->flags = cpu_to_le16(CMDQ_MODIFY_QP_FLAGS_SRQ_USED);
1308 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
1309 }
1310
1311 if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
1312 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1313 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
1314 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1315 mandatory_flags |=
1316 CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1317 }
1318
1319 if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
1320 qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
1321 mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
1322
1323 qp->modify_flags |= mandatory_flags;
1324 req->qp_type = qp->type;
1325}
1326
1327static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
1328{
1329 if ((qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
1330 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR) ||
1331 (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1332 qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS))
1333 return true;
1334
1335 return false;
1336}
1337
1338int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1339{
1340 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1341 struct creq_modify_qp_resp resp = {};
1342 struct bnxt_qplib_cmdqmsg msg = {};
1343 struct cmdq_modify_qp req = {};
1344 u16 vlan_pcp_vlan_dei_vlan_id;
1345 u32 temp32[4];
1346 u32 bmask;
1347 int rc;
1348
1349 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1350 CMDQ_BASE_OPCODE_MODIFY_QP,
1351 sizeof(req));
1352
1353 /* Filter out the qp_attr_mask based on the state->new transition */
1354 __filter_modify_flags(qp);
1355 if (qp->modify_flags & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1356 /* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
1357 if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
1358 is_optimized_state_transition(qp))
1359 bnxt_set_mandatory_attributes(res, qp, &req);
1360 }
1361 bmask = qp->modify_flags;
1362 req.modify_mask = cpu_to_le32(qp->modify_flags);
1363 req.qp_cid = cpu_to_le32(qp->id);
1364 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1365 req.network_type_en_sqd_async_notify_new_state =
1366 (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1367 (qp->en_sqd_async_notify ?
1368 CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1369 }
1370 req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1371
1372 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1373 req.access = qp->access;
1374
1375 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1376 req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1377
1378 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1379 req.qkey = cpu_to_le32(qp->qkey);
1380
1381 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1382 memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1383 req.dgid[0] = cpu_to_le32(temp32[0]);
1384 req.dgid[1] = cpu_to_le32(temp32[1]);
1385 req.dgid[2] = cpu_to_le32(temp32[2]);
1386 req.dgid[3] = cpu_to_le32(temp32[3]);
1387 }
1388 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1389 req.flow_label = cpu_to_le32(qp->ah.flow_label);
1390
1391 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1392 req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1393 [qp->ah.sgid_index]);
1394
1395 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1396 req.hop_limit = qp->ah.hop_limit;
1397
1398 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1399 req.traffic_class = qp->ah.traffic_class;
1400
1401 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1402 memcpy(req.dest_mac, qp->ah.dmac, 6);
1403
1404 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1405 req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1406
1407 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1408 req.timeout = qp->timeout;
1409
1410 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1411 req.retry_cnt = qp->retry_cnt;
1412
1413 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1414 req.rnr_retry = qp->rnr_retry;
1415
1416 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1417 req.min_rnr_timer = qp->min_rnr_timer;
1418
1419 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1420 req.rq_psn = cpu_to_le32(qp->rq.psn);
1421
1422 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1423 req.sq_psn = cpu_to_le32(qp->sq.psn);
1424
1425 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1426 req.max_rd_atomic =
1427 ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1428
1429 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1430 req.max_dest_rd_atomic =
1431 IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1432
1433 req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1434 req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1435 req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1436 req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1437 req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1438 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1439 req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1440
1441 if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID) {
1442 vlan_pcp_vlan_dei_vlan_id =
1443 ((res->sgid_tbl.tbl[qp->ah.sgid_index].vlan_id <<
1444 CMDQ_MODIFY_QP_VLAN_ID_SFT) &
1445 CMDQ_MODIFY_QP_VLAN_ID_MASK);
1446 vlan_pcp_vlan_dei_vlan_id |=
1447 ((qp->ah.sl << CMDQ_MODIFY_QP_VLAN_PCP_SFT) &
1448 CMDQ_MODIFY_QP_VLAN_PCP_MASK);
1449 req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(vlan_pcp_vlan_dei_vlan_id);
1450 }
1451
1452 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
1453 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1454 if (rc)
1455 return rc;
1456 qp->cur_qp_state = qp->state;
1457 return 0;
1458}
1459
1460int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1461{
1462 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1463 struct creq_query_qp_resp resp = {};
1464 struct bnxt_qplib_cmdqmsg msg = {};
1465 struct bnxt_qplib_rcfw_sbuf sbuf;
1466 struct creq_query_qp_resp_sb *sb;
1467 struct cmdq_query_qp req = {};
1468 u32 temp32[4];
1469 int i, rc;
1470
1471 sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1472 sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1473 &sbuf.dma_addr, GFP_KERNEL);
1474 if (!sbuf.sb)
1475 return -ENOMEM;
1476 sb = sbuf.sb;
1477
1478 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1479 CMDQ_BASE_OPCODE_QUERY_QP,
1480 sizeof(req));
1481
1482 req.qp_cid = cpu_to_le32(qp->id);
1483 req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1484 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1485 sizeof(resp), 0);
1486 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1487 if (rc)
1488 goto bail;
1489 /* Extract the context from the side buffer */
1490 qp->state = sb->en_sqd_async_notify_state &
1491 CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1492 qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1493 CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
1494 qp->access = sb->access;
1495 qp->pkey_index = le16_to_cpu(sb->pkey);
1496 qp->qkey = le32_to_cpu(sb->qkey);
1497
1498 temp32[0] = le32_to_cpu(sb->dgid[0]);
1499 temp32[1] = le32_to_cpu(sb->dgid[1]);
1500 temp32[2] = le32_to_cpu(sb->dgid[2]);
1501 temp32[3] = le32_to_cpu(sb->dgid[3]);
1502 memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1503
1504 qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1505
1506 qp->ah.sgid_index = 0;
1507 for (i = 0; i < res->sgid_tbl.max; i++) {
1508 if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1509 qp->ah.sgid_index = i;
1510 break;
1511 }
1512 }
1513 if (i == res->sgid_tbl.max)
1514 dev_warn(&res->pdev->dev, "SGID not found??\n");
1515
1516 qp->ah.hop_limit = sb->hop_limit;
1517 qp->ah.traffic_class = sb->traffic_class;
1518 memcpy(qp->ah.dmac, sb->dest_mac, 6);
1519 qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1520 CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1521 CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1522 qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1523 CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1524 CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1525 qp->timeout = sb->timeout;
1526 qp->retry_cnt = sb->retry_cnt;
1527 qp->rnr_retry = sb->rnr_retry;
1528 qp->min_rnr_timer = sb->min_rnr_timer;
1529 qp->rq.psn = le32_to_cpu(sb->rq_psn);
1530 qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1531 qp->sq.psn = le32_to_cpu(sb->sq_psn);
1532 qp->max_dest_rd_atomic =
1533 IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1534 qp->sq.max_wqe = qp->sq.hwq.max_elements;
1535 qp->rq.max_wqe = qp->rq.hwq.max_elements;
1536 qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1537 qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1538 qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1539 qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1540 memcpy(qp->smac, sb->src_mac, 6);
1541 qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1542 qp->port_id = le16_to_cpu(sb->port_id);
1543bail:
1544 dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1545 sbuf.sb, sbuf.dma_addr);
1546 return rc;
1547}
1548
1549static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1550{
1551 struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1552 u32 peek_flags, peek_cons;
1553 struct cq_base *hw_cqe;
1554 int i;
1555
1556 peek_flags = cq->dbinfo.flags;
1557 peek_cons = cq_hwq->cons;
1558 for (i = 0; i < cq_hwq->max_elements; i++) {
1559 hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1560 if (!CQE_CMP_VALID(hw_cqe, peek_flags))
1561 continue;
1562 /*
1563 * The valid test of the entry must be done first before
1564 * reading any further.
1565 */
1566 dma_rmb();
1567 switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1568 case CQ_BASE_CQE_TYPE_REQ:
1569 case CQ_BASE_CQE_TYPE_TERMINAL:
1570 {
1571 struct cq_req *cqe = (struct cq_req *)hw_cqe;
1572
1573 if (qp == le64_to_cpu(cqe->qp_handle))
1574 cqe->qp_handle = 0;
1575 break;
1576 }
1577 case CQ_BASE_CQE_TYPE_RES_RC:
1578 case CQ_BASE_CQE_TYPE_RES_UD:
1579 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1580 {
1581 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1582
1583 if (qp == le64_to_cpu(cqe->qp_handle))
1584 cqe->qp_handle = 0;
1585 break;
1586 }
1587 default:
1588 break;
1589 }
1590 bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1591 1, &peek_flags);
1592 }
1593}
1594
1595int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1596 struct bnxt_qplib_qp *qp)
1597{
1598 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1599 struct creq_destroy_qp_resp resp = {};
1600 struct bnxt_qplib_cmdqmsg msg = {};
1601 struct cmdq_destroy_qp req = {};
1602 u32 tbl_indx;
1603 int rc;
1604
1605 spin_lock_bh(&rcfw->tbl_lock);
1606 tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1607 rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1608 rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1609 spin_unlock_bh(&rcfw->tbl_lock);
1610
1611 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1612 CMDQ_BASE_OPCODE_DESTROY_QP,
1613 sizeof(req));
1614
1615 req.qp_cid = cpu_to_le32(qp->id);
1616 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1617 sizeof(resp), 0);
1618 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1619 if (rc) {
1620 spin_lock_bh(&rcfw->tbl_lock);
1621 rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1622 rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1623 spin_unlock_bh(&rcfw->tbl_lock);
1624 return rc;
1625 }
1626
1627 return 0;
1628}
1629
1630void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1631 struct bnxt_qplib_qp *qp)
1632{
1633 bnxt_qplib_free_qp_hdr_buf(res, qp);
1634 bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1635 kfree(qp->sq.swq);
1636
1637 bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1638 kfree(qp->rq.swq);
1639
1640 if (qp->irrq.max_elements)
1641 bnxt_qplib_free_hwq(res, &qp->irrq);
1642 if (qp->orrq.max_elements)
1643 bnxt_qplib_free_hwq(res, &qp->orrq);
1644
1645}
1646
1647void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1648 struct bnxt_qplib_sge *sge)
1649{
1650 struct bnxt_qplib_q *sq = &qp->sq;
1651 u32 sw_prod;
1652
1653 memset(sge, 0, sizeof(*sge));
1654
1655 if (qp->sq_hdr_buf) {
1656 sw_prod = sq->swq_start;
1657 sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1658 sw_prod * qp->sq_hdr_buf_size);
1659 sge->lkey = 0xFFFFFFFF;
1660 sge->size = qp->sq_hdr_buf_size;
1661 return qp->sq_hdr_buf + sw_prod * sge->size;
1662 }
1663 return NULL;
1664}
1665
1666u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1667{
1668 struct bnxt_qplib_q *rq = &qp->rq;
1669
1670 return rq->swq_start;
1671}
1672
1673dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1674{
1675 return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1676}
1677
1678void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1679 struct bnxt_qplib_sge *sge)
1680{
1681 struct bnxt_qplib_q *rq = &qp->rq;
1682 u32 sw_prod;
1683
1684 memset(sge, 0, sizeof(*sge));
1685
1686 if (qp->rq_hdr_buf) {
1687 sw_prod = rq->swq_start;
1688 sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1689 sw_prod * qp->rq_hdr_buf_size);
1690 sge->lkey = 0xFFFFFFFF;
1691 sge->size = qp->rq_hdr_buf_size;
1692 return qp->rq_hdr_buf + sw_prod * sge->size;
1693 }
1694 return NULL;
1695}
1696
1697/* Fil the MSN table into the next psn row */
1698static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1699 struct bnxt_qplib_swqe *wqe,
1700 struct bnxt_qplib_swq *swq)
1701{
1702 struct sq_msn_search *msns;
1703 u32 start_psn, next_psn;
1704 u16 start_idx;
1705
1706 msns = (struct sq_msn_search *)swq->psn_search;
1707 msns->start_idx_next_psn_start_psn = 0;
1708
1709 start_psn = swq->start_psn;
1710 next_psn = swq->next_psn;
1711 start_idx = swq->slot_idx;
1712 msns->start_idx_next_psn_start_psn |=
1713 bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1714 qp->msn++;
1715 qp->msn %= qp->msn_tbl_sz;
1716}
1717
1718static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1719 struct bnxt_qplib_swqe *wqe,
1720 struct bnxt_qplib_swq *swq)
1721{
1722 struct sq_psn_search_ext *psns_ext;
1723 struct sq_psn_search *psns;
1724 u32 flg_npsn;
1725 u32 op_spsn;
1726
1727 if (!swq->psn_search)
1728 return;
1729 /* Handle MSN differently on cap flags */
1730 if (qp->is_host_msn_tbl) {
1731 bnxt_qplib_fill_msn_search(qp, wqe, swq);
1732 return;
1733 }
1734 psns = (struct sq_psn_search *)swq->psn_search;
1735 psns = swq->psn_search;
1736 psns_ext = swq->psn_ext;
1737
1738 op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1739 SQ_PSN_SEARCH_START_PSN_MASK);
1740 op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1741 SQ_PSN_SEARCH_OPCODE_MASK);
1742 flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1743 SQ_PSN_SEARCH_NEXT_PSN_MASK);
1744
1745 if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1746 psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1747 psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1748 psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1749 } else {
1750 psns->opcode_start_psn = cpu_to_le32(op_spsn);
1751 psns->flags_next_psn = cpu_to_le32(flg_npsn);
1752 }
1753}
1754
1755static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1756 struct bnxt_qplib_swqe *wqe,
1757 u16 *idx)
1758{
1759 struct bnxt_qplib_hwq *hwq;
1760 int len, t_len, offt;
1761 bool pull_dst = true;
1762 void *il_dst = NULL;
1763 void *il_src = NULL;
1764 int t_cplen, cplen;
1765 int indx;
1766
1767 hwq = &qp->sq.hwq;
1768 t_len = 0;
1769 for (indx = 0; indx < wqe->num_sge; indx++) {
1770 len = wqe->sg_list[indx].size;
1771 il_src = (void *)wqe->sg_list[indx].addr;
1772 t_len += len;
1773 if (t_len > qp->max_inline_data)
1774 return -ENOMEM;
1775 while (len) {
1776 if (pull_dst) {
1777 pull_dst = false;
1778 il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1779 (*idx)++;
1780 t_cplen = 0;
1781 offt = 0;
1782 }
1783 cplen = min_t(int, len, sizeof(struct sq_sge));
1784 cplen = min_t(int, cplen,
1785 (sizeof(struct sq_sge) - offt));
1786 memcpy(il_dst, il_src, cplen);
1787 t_cplen += cplen;
1788 il_src += cplen;
1789 il_dst += cplen;
1790 offt += cplen;
1791 len -= cplen;
1792 if (t_cplen == sizeof(struct sq_sge))
1793 pull_dst = true;
1794 }
1795 }
1796
1797 return t_len;
1798}
1799
1800static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1801 struct bnxt_qplib_sge *ssge,
1802 u16 nsge, u16 *idx)
1803{
1804 struct sq_sge *dsge;
1805 int indx, len = 0;
1806
1807 for (indx = 0; indx < nsge; indx++, (*idx)++) {
1808 dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1809 dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1810 dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1811 dsge->size = cpu_to_le32(ssge[indx].size);
1812 len += ssge[indx].size;
1813 }
1814
1815 return len;
1816}
1817
1818static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1819 struct bnxt_qplib_swqe *wqe,
1820 u16 *wqe_sz, u16 *qdf, u8 mode)
1821{
1822 u32 ilsize, bytes;
1823 u16 nsge;
1824 u16 slot;
1825
1826 nsge = wqe->num_sge;
1827 /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1828 bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1829 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1830 ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1831 bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1832 bytes += sizeof(struct sq_send_hdr);
1833 }
1834
1835 *qdf = __xlate_qfd(qp->sq.q_full_delta, bytes);
1836 slot = bytes >> 4;
1837 *wqe_sz = slot;
1838 if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1839 slot = 8;
1840 return slot;
1841}
1842
1843static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1844 struct bnxt_qplib_swq *swq, bool hw_retx)
1845{
1846 struct bnxt_qplib_hwq *hwq;
1847 u32 pg_num, pg_indx;
1848 void *buff;
1849 u32 tail;
1850
1851 hwq = &sq->hwq;
1852 if (!hwq->pad_pg)
1853 return;
1854 tail = swq->slot_idx / sq->dbinfo.max_slot;
1855 if (hw_retx) {
1856 /* For HW retx use qp msn index */
1857 tail = qp->msn;
1858 tail %= qp->msn_tbl_sz;
1859 }
1860 pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1861 pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1862 buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1863 swq->psn_ext = buff;
1864 swq->psn_search = buff;
1865}
1866
1867void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1868{
1869 struct bnxt_qplib_q *sq = &qp->sq;
1870
1871 bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
1872}
1873
1874int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1875 struct bnxt_qplib_swqe *wqe)
1876{
1877 struct bnxt_qplib_nq_work *nq_work = NULL;
1878 int i, rc = 0, data_len = 0, pkt_num = 0;
1879 struct bnxt_qplib_q *sq = &qp->sq;
1880 struct bnxt_qplib_hwq *hwq;
1881 struct bnxt_qplib_swq *swq;
1882 bool sch_handler = false;
1883 u16 wqe_sz, qdf = 0;
1884 bool msn_update;
1885 void *base_hdr;
1886 void *ext_hdr;
1887 __le32 temp32;
1888 u32 wqe_idx;
1889 u32 slots;
1890 u16 idx;
1891
1892 hwq = &sq->hwq;
1893 if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1894 qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1895 dev_err(&hwq->pdev->dev,
1896 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
1897 qp->id, qp->state);
1898 rc = -EINVAL;
1899 goto done;
1900 }
1901
1902 slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1903 if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1904 dev_err(&hwq->pdev->dev,
1905 "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1906 hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
1907 rc = -ENOMEM;
1908 goto done;
1909 }
1910
1911 swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1912 bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
1913
1914 idx = 0;
1915 swq->slot_idx = hwq->prod;
1916 swq->slots = slots;
1917 swq->wr_id = wqe->wr_id;
1918 swq->type = wqe->type;
1919 swq->flags = wqe->flags;
1920 swq->start_psn = sq->psn & BTH_PSN_MASK;
1921 if (qp->sig_type)
1922 swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1923
1924 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1925 sch_handler = true;
1926 dev_dbg(&hwq->pdev->dev,
1927 "%s Error QP. Scheduling for poll_cq\n", __func__);
1928 goto queue_err;
1929 }
1930
1931 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1932 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1933 memset(base_hdr, 0, sizeof(struct sq_sge));
1934 memset(ext_hdr, 0, sizeof(struct sq_sge));
1935
1936 if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1937 /* Copy the inline data */
1938 data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1939 else
1940 data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1941 &idx);
1942 if (data_len < 0)
1943 goto queue_err;
1944 /* Make sure we update MSN table only for wired wqes */
1945 msn_update = true;
1946 /* Specifics */
1947 switch (wqe->type) {
1948 case BNXT_QPLIB_SWQE_TYPE_SEND:
1949 if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1950 struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1951 struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1952 /* Assemble info for Raw Ethertype QPs */
1953
1954 sqe->wqe_type = wqe->type;
1955 sqe->flags = wqe->flags;
1956 sqe->wqe_size = wqe_sz;
1957 sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1958 sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1959 sqe->length = cpu_to_le32(data_len);
1960 ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1961 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1962 SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1963
1964 break;
1965 }
1966 fallthrough;
1967 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1968 case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1969 {
1970 struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1971 struct sq_send_hdr *sqe = base_hdr;
1972
1973 sqe->wqe_type = wqe->type;
1974 sqe->flags = wqe->flags;
1975 sqe->wqe_size = wqe_sz;
1976 sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
1977 if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1978 qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1979 sqe->q_key = cpu_to_le32(wqe->send.q_key);
1980 sqe->length = cpu_to_le32(data_len);
1981 sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1982 ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1983 SQ_SEND_DST_QP_MASK);
1984 ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1985 SQ_SEND_AVID_MASK);
1986 msn_update = false;
1987 } else {
1988 sqe->length = cpu_to_le32(data_len);
1989 if (qp->mtu)
1990 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1991 if (!pkt_num)
1992 pkt_num = 1;
1993 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1994 }
1995 break;
1996 }
1997 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1998 case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1999 case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
2000 {
2001 struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
2002 struct sq_rdma_hdr *sqe = base_hdr;
2003
2004 sqe->wqe_type = wqe->type;
2005 sqe->flags = wqe->flags;
2006 sqe->wqe_size = wqe_sz;
2007 sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
2008 sqe->length = cpu_to_le32((u32)data_len);
2009 ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
2010 ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
2011 if (qp->mtu)
2012 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2013 if (!pkt_num)
2014 pkt_num = 1;
2015 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2016 break;
2017 }
2018 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
2019 case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
2020 {
2021 struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
2022 struct sq_atomic_hdr *sqe = base_hdr;
2023
2024 sqe->wqe_type = wqe->type;
2025 sqe->flags = wqe->flags;
2026 sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
2027 sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
2028 ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
2029 ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
2030 if (qp->mtu)
2031 pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
2032 if (!pkt_num)
2033 pkt_num = 1;
2034 sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
2035 break;
2036 }
2037 case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
2038 {
2039 struct sq_localinvalidate *sqe = base_hdr;
2040
2041 sqe->wqe_type = wqe->type;
2042 sqe->flags = wqe->flags;
2043 sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
2044 msn_update = false;
2045 break;
2046 }
2047 case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
2048 {
2049 struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
2050 struct sq_fr_pmr_hdr *sqe = base_hdr;
2051
2052 sqe->wqe_type = wqe->type;
2053 sqe->flags = wqe->flags;
2054 sqe->access_cntl = wqe->frmr.access_cntl |
2055 SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
2056 sqe->zero_based_page_size_log =
2057 (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
2058 SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
2059 (wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
2060 sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
2061 temp32 = cpu_to_le32(wqe->frmr.length);
2062 memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
2063 sqe->numlevels_pbl_page_size_log =
2064 ((wqe->frmr.pbl_pg_sz_log <<
2065 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
2066 SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
2067 ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
2068 SQ_FR_PMR_NUMLEVELS_MASK);
2069
2070 for (i = 0; i < wqe->frmr.page_list_len; i++)
2071 wqe->frmr.pbl_ptr[i] = cpu_to_le64(
2072 wqe->frmr.page_list[i] |
2073 PTU_PTE_VALID);
2074 ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
2075 ext_sqe->va = cpu_to_le64(wqe->frmr.va);
2076 msn_update = false;
2077
2078 break;
2079 }
2080 case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
2081 {
2082 struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
2083 struct sq_bind_hdr *sqe = base_hdr;
2084
2085 sqe->wqe_type = wqe->type;
2086 sqe->flags = wqe->flags;
2087 sqe->access_cntl = wqe->bind.access_cntl;
2088 sqe->mw_type_zero_based = wqe->bind.mw_type |
2089 (wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
2090 sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2091 sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2092 ext_sqe->va = cpu_to_le64(wqe->bind.va);
2093 ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2094 msn_update = false;
2095 break;
2096 }
2097 default:
2098 /* Bad wqe, return error */
2099 rc = -EINVAL;
2100 goto done;
2101 }
2102 if (!qp->is_host_msn_tbl || msn_update) {
2103 swq->next_psn = sq->psn & BTH_PSN_MASK;
2104 bnxt_qplib_fill_psn_search(qp, wqe, swq);
2105 }
2106queue_err:
2107 bnxt_qplib_swq_mod_start(sq, wqe_idx);
2108 bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
2109 qp->wqe_cnt++;
2110done:
2111 if (sch_handler) {
2112 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2113 if (nq_work) {
2114 nq_work->cq = qp->scq;
2115 nq_work->nq = qp->scq->nq;
2116 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2117 queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2118 } else {
2119 dev_err(&hwq->pdev->dev,
2120 "FP: Failed to allocate SQ nq_work!\n");
2121 rc = -ENOMEM;
2122 }
2123 }
2124 return rc;
2125}
2126
2127void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2128{
2129 struct bnxt_qplib_q *rq = &qp->rq;
2130
2131 bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
2132}
2133
2134int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2135 struct bnxt_qplib_swqe *wqe)
2136{
2137 struct bnxt_qplib_nq_work *nq_work = NULL;
2138 struct bnxt_qplib_q *rq = &qp->rq;
2139 struct rq_wqe_hdr *base_hdr;
2140 struct rq_ext_hdr *ext_hdr;
2141 struct bnxt_qplib_hwq *hwq;
2142 struct bnxt_qplib_swq *swq;
2143 bool sch_handler = false;
2144 u16 wqe_sz, idx;
2145 u32 wqe_idx;
2146 int rc = 0;
2147
2148 hwq = &rq->hwq;
2149 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2150 dev_err(&hwq->pdev->dev,
2151 "QPLIB: FP: QP (0x%x) is in the 0x%x state",
2152 qp->id, qp->state);
2153 rc = -EINVAL;
2154 goto done;
2155 }
2156
2157 if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2158 dev_err(&hwq->pdev->dev,
2159 "FP: QP (0x%x) RQ is full!\n", qp->id);
2160 rc = -EINVAL;
2161 goto done;
2162 }
2163
2164 swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2165 swq->wr_id = wqe->wr_id;
2166 swq->slots = rq->dbinfo.max_slot;
2167
2168 if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2169 sch_handler = true;
2170 dev_dbg(&hwq->pdev->dev,
2171 "%s: Error QP. Scheduling for poll_cq\n", __func__);
2172 goto queue_err;
2173 }
2174
2175 idx = 0;
2176 base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2177 ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2178 memset(base_hdr, 0, sizeof(struct sq_sge));
2179 memset(ext_hdr, 0, sizeof(struct sq_sge));
2180 wqe_sz = (sizeof(struct rq_wqe_hdr) +
2181 wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2182 bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2183 if (!wqe->num_sge) {
2184 struct sq_sge *sge;
2185
2186 sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2187 sge->size = 0;
2188 wqe_sz++;
2189 }
2190 base_hdr->wqe_type = wqe->type;
2191 base_hdr->flags = wqe->flags;
2192 base_hdr->wqe_size = wqe_sz;
2193 base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2194queue_err:
2195 bnxt_qplib_swq_mod_start(rq, wqe_idx);
2196 bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2197done:
2198 if (sch_handler) {
2199 nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2200 if (nq_work) {
2201 nq_work->cq = qp->rcq;
2202 nq_work->nq = qp->rcq->nq;
2203 INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2204 queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2205 } else {
2206 dev_err(&hwq->pdev->dev,
2207 "FP: Failed to allocate RQ nq_work!\n");
2208 rc = -ENOMEM;
2209 }
2210 }
2211
2212 return rc;
2213}
2214
2215/* CQ */
2216int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2217{
2218 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2219 struct bnxt_qplib_hwq_attr hwq_attr = {};
2220 struct creq_create_cq_resp resp = {};
2221 struct bnxt_qplib_cmdqmsg msg = {};
2222 struct cmdq_create_cq req = {};
2223 struct bnxt_qplib_pbl *pbl;
2224 u32 coalescing = 0;
2225 u32 pg_sz_lvl;
2226 int rc;
2227
2228 if (!cq->dpi) {
2229 dev_err(&rcfw->pdev->dev,
2230 "FP: CREATE_CQ failed due to NULL DPI\n");
2231 return -EINVAL;
2232 }
2233
2234 cq->dbinfo.flags = 0;
2235 hwq_attr.res = res;
2236 hwq_attr.depth = cq->max_wqe;
2237 hwq_attr.stride = sizeof(struct cq_base);
2238 hwq_attr.type = HWQ_TYPE_QUEUE;
2239 hwq_attr.sginfo = &cq->sg_info;
2240 rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2241 if (rc)
2242 return rc;
2243
2244 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2245 CMDQ_BASE_OPCODE_CREATE_CQ,
2246 sizeof(req));
2247
2248 req.dpi = cpu_to_le32(cq->dpi->dpi);
2249 req.cq_handle = cpu_to_le64(cq->cq_handle);
2250 req.cq_size = cpu_to_le32(cq->max_wqe);
2251
2252 if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
2253 req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
2254 coalescing |= ((cq->coalescing->buf_maxtime <<
2255 CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
2256 CMDQ_CREATE_CQ_BUF_MAXTIME_MASK);
2257 coalescing |= ((cq->coalescing->normal_maxbuf <<
2258 CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT) &
2259 CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK);
2260 coalescing |= ((cq->coalescing->during_maxbuf <<
2261 CMDQ_CREATE_CQ_DURING_MAXBUF_SFT) &
2262 CMDQ_CREATE_CQ_DURING_MAXBUF_MASK);
2263 if (cq->coalescing->en_ring_idle_mode)
2264 coalescing |= CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2265 else
2266 coalescing &= ~CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE;
2267 req.coalescing = cpu_to_le32(coalescing);
2268 }
2269
2270 pbl = &cq->hwq.pbl[PBL_LVL_0];
2271 pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2272 CMDQ_CREATE_CQ_PG_SIZE_SFT);
2273 pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2274 req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
2275 req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2276 req.cq_fco_cnq_id = cpu_to_le32(
2277 (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2278 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2279 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2280 sizeof(resp), 0);
2281 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2282 if (rc)
2283 goto fail;
2284
2285 cq->id = le32_to_cpu(resp.xid);
2286 cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2287 init_waitqueue_head(&cq->waitq);
2288 INIT_LIST_HEAD(&cq->sqf_head);
2289 INIT_LIST_HEAD(&cq->rqf_head);
2290 spin_lock_init(&cq->compl_lock);
2291 spin_lock_init(&cq->flush_lock);
2292
2293 cq->dbinfo.hwq = &cq->hwq;
2294 cq->dbinfo.xid = cq->id;
2295 cq->dbinfo.db = cq->dpi->dbr;
2296 cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2297 cq->dbinfo.flags = 0;
2298 cq->dbinfo.toggle = 0;
2299
2300 bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2301
2302 return 0;
2303
2304fail:
2305 bnxt_qplib_free_hwq(res, &cq->hwq);
2306 return rc;
2307}
2308
2309void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2310 struct bnxt_qplib_cq *cq)
2311{
2312 bnxt_qplib_free_hwq(res, &cq->hwq);
2313 memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2314 /* Reset only the cons bit in the flags */
2315 cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2316}
2317
2318int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2319 int new_cqes)
2320{
2321 struct bnxt_qplib_hwq_attr hwq_attr = {};
2322 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2323 struct creq_resize_cq_resp resp = {};
2324 struct bnxt_qplib_cmdqmsg msg = {};
2325 struct cmdq_resize_cq req = {};
2326 struct bnxt_qplib_pbl *pbl;
2327 u32 pg_sz, lvl, new_sz;
2328 int rc;
2329
2330 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2331 CMDQ_BASE_OPCODE_RESIZE_CQ,
2332 sizeof(req));
2333 hwq_attr.sginfo = &cq->sg_info;
2334 hwq_attr.res = res;
2335 hwq_attr.depth = new_cqes;
2336 hwq_attr.stride = sizeof(struct cq_base);
2337 hwq_attr.type = HWQ_TYPE_QUEUE;
2338 rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2339 if (rc)
2340 return rc;
2341
2342 req.cq_cid = cpu_to_le32(cq->id);
2343 pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2344 pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2345 lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2346 CMDQ_RESIZE_CQ_LVL_MASK;
2347 new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2348 CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2349 req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2350 req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2351
2352 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2353 sizeof(resp), 0);
2354 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2355 return rc;
2356}
2357
2358int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2359{
2360 struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2361 struct creq_destroy_cq_resp resp = {};
2362 struct bnxt_qplib_cmdqmsg msg = {};
2363 struct cmdq_destroy_cq req = {};
2364 u16 total_cnq_events;
2365 int rc;
2366
2367 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2368 CMDQ_BASE_OPCODE_DESTROY_CQ,
2369 sizeof(req));
2370
2371 req.cq_cid = cpu_to_le32(cq->id);
2372 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2373 sizeof(resp), 0);
2374 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2375 if (rc)
2376 return rc;
2377 total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2378 __wait_for_all_nqes(cq, total_cnq_events);
2379 bnxt_qplib_free_hwq(res, &cq->hwq);
2380 return 0;
2381}
2382
2383static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2384 struct bnxt_qplib_cqe **pcqe, int *budget)
2385{
2386 struct bnxt_qplib_cqe *cqe;
2387 u32 start, last;
2388 int rc = 0;
2389
2390 /* Now complete all outstanding SQEs with FLUSHED_ERR */
2391 start = sq->swq_start;
2392 cqe = *pcqe;
2393 while (*budget) {
2394 last = sq->swq_last;
2395 if (start == last)
2396 break;
2397 /* Skip the FENCE WQE completions */
2398 if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2399 bnxt_qplib_cancel_phantom_processing(qp);
2400 goto skip_compl;
2401 }
2402 memset(cqe, 0, sizeof(*cqe));
2403 cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2404 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2405 cqe->qp_handle = (u64)(unsigned long)qp;
2406 cqe->wr_id = sq->swq[last].wr_id;
2407 cqe->src_qp = qp->id;
2408 cqe->type = sq->swq[last].type;
2409 cqe++;
2410 (*budget)--;
2411skip_compl:
2412 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2413 sq->swq[last].slots, &sq->dbinfo.flags);
2414 sq->swq_last = sq->swq[last].next_idx;
2415 }
2416 *pcqe = cqe;
2417 if (!(*budget) && sq->swq_last != start)
2418 /* Out of budget */
2419 rc = -EAGAIN;
2420
2421 return rc;
2422}
2423
2424static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2425 struct bnxt_qplib_cqe **pcqe, int *budget)
2426{
2427 struct bnxt_qplib_cqe *cqe;
2428 u32 start, last;
2429 int opcode = 0;
2430 int rc = 0;
2431
2432 switch (qp->type) {
2433 case CMDQ_CREATE_QP1_TYPE_GSI:
2434 opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2435 break;
2436 case CMDQ_CREATE_QP_TYPE_RC:
2437 opcode = CQ_BASE_CQE_TYPE_RES_RC;
2438 break;
2439 case CMDQ_CREATE_QP_TYPE_UD:
2440 case CMDQ_CREATE_QP_TYPE_GSI:
2441 opcode = CQ_BASE_CQE_TYPE_RES_UD;
2442 break;
2443 }
2444
2445 /* Flush the rest of the RQ */
2446 start = rq->swq_start;
2447 cqe = *pcqe;
2448 while (*budget) {
2449 last = rq->swq_last;
2450 if (last == start)
2451 break;
2452 memset(cqe, 0, sizeof(*cqe));
2453 cqe->status =
2454 CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2455 cqe->opcode = opcode;
2456 cqe->qp_handle = (unsigned long)qp;
2457 cqe->wr_id = rq->swq[last].wr_id;
2458 cqe++;
2459 (*budget)--;
2460 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2461 rq->swq[last].slots, &rq->dbinfo.flags);
2462 rq->swq_last = rq->swq[last].next_idx;
2463 }
2464 *pcqe = cqe;
2465 if (!*budget && rq->swq_last != start)
2466 /* Out of budget */
2467 rc = -EAGAIN;
2468
2469 return rc;
2470}
2471
2472void bnxt_qplib_mark_qp_error(void *qp_handle)
2473{
2474 struct bnxt_qplib_qp *qp = qp_handle;
2475
2476 if (!qp)
2477 return;
2478
2479 /* Must block new posting of SQ and RQ */
2480 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2481 bnxt_qplib_cancel_phantom_processing(qp);
2482}
2483
2484/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2485 * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2486 */
2487static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2488 u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2489{
2490 u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2491 struct bnxt_qplib_q *sq = &qp->sq;
2492 struct cq_req *peek_req_hwcqe;
2493 struct bnxt_qplib_qp *peek_qp;
2494 struct bnxt_qplib_q *peek_sq;
2495 struct bnxt_qplib_swq *swq;
2496 struct cq_base *peek_hwcqe;
2497 int i, rc = 0;
2498
2499 /* Normal mode */
2500 /* Check for the psn_search marking before completing */
2501 swq = &sq->swq[swq_last];
2502 if (swq->psn_search &&
2503 le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2504 /* Unmark */
2505 swq->psn_search->flags_next_psn = cpu_to_le32
2506 (le32_to_cpu(swq->psn_search->flags_next_psn)
2507 & ~0x80000000);
2508 dev_dbg(&cq->hwq.pdev->dev,
2509 "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2510 cq_cons, qp->id, swq_last, cqe_sq_cons);
2511 sq->condition = true;
2512 sq->send_phantom = true;
2513
2514 /* TODO: Only ARM if the previous SQE is ARMALL */
2515 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
2516 rc = -EAGAIN;
2517 goto out;
2518 }
2519 if (sq->condition) {
2520 /* Peek at the completions */
2521 peek_flags = cq->dbinfo.flags;
2522 peek_sw_cq_cons = cq_cons;
2523 i = cq->hwq.max_elements;
2524 while (i--) {
2525 peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2526 peek_sw_cq_cons, NULL);
2527 /* If the next hwcqe is VALID */
2528 if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
2529 /*
2530 * The valid test of the entry must be done first before
2531 * reading any further.
2532 */
2533 dma_rmb();
2534 /* If the next hwcqe is a REQ */
2535 if ((peek_hwcqe->cqe_type_toggle &
2536 CQ_BASE_CQE_TYPE_MASK) ==
2537 CQ_BASE_CQE_TYPE_REQ) {
2538 peek_req_hwcqe = (struct cq_req *)
2539 peek_hwcqe;
2540 peek_qp = (struct bnxt_qplib_qp *)
2541 ((unsigned long)
2542 le64_to_cpu
2543 (peek_req_hwcqe->qp_handle));
2544 peek_sq = &peek_qp->sq;
2545 peek_sq_cons_idx =
2546 ((le16_to_cpu(
2547 peek_req_hwcqe->sq_cons_idx)
2548 - 1) % sq->max_wqe);
2549 /* If the hwcqe's sq's wr_id matches */
2550 if (peek_sq == sq &&
2551 sq->swq[peek_sq_cons_idx].wr_id ==
2552 BNXT_QPLIB_FENCE_WRID) {
2553 /*
2554 * Unbreak only if the phantom
2555 * comes back
2556 */
2557 dev_dbg(&cq->hwq.pdev->dev,
2558 "FP: Got Phantom CQE\n");
2559 sq->condition = false;
2560 sq->single = true;
2561 rc = 0;
2562 goto out;
2563 }
2564 }
2565 /* Valid but not the phantom, so keep looping */
2566 } else {
2567 /* Not valid yet, just exit and wait */
2568 rc = -EINVAL;
2569 goto out;
2570 }
2571 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2572 &peek_sw_cq_cons,
2573 1, &peek_flags);
2574 }
2575 dev_err(&cq->hwq.pdev->dev,
2576 "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2577 cq_cons, qp->id, swq_last, cqe_sq_cons);
2578 rc = -EINVAL;
2579 }
2580out:
2581 return rc;
2582}
2583
2584static int bnxt_qplib_get_cqe_sq_cons(struct bnxt_qplib_q *sq, u32 cqe_slot)
2585{
2586 struct bnxt_qplib_hwq *sq_hwq;
2587 struct bnxt_qplib_swq *swq;
2588 int cqe_sq_cons = -1;
2589 u32 start, last;
2590
2591 sq_hwq = &sq->hwq;
2592
2593 start = sq->swq_start;
2594 last = sq->swq_last;
2595
2596 while (last != start) {
2597 swq = &sq->swq[last];
2598 if (swq->slot_idx == cqe_slot) {
2599 cqe_sq_cons = swq->next_idx;
2600 dev_err(&sq_hwq->pdev->dev, "%s: Found cons wqe = %d slot = %d\n",
2601 __func__, cqe_sq_cons, cqe_slot);
2602 break;
2603 }
2604
2605 last = swq->next_idx;
2606 }
2607 return cqe_sq_cons;
2608}
2609
2610static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2611 struct cq_req *hwcqe,
2612 struct bnxt_qplib_cqe **pcqe, int *budget,
2613 u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2614{
2615 struct bnxt_qplib_swq *swq;
2616 struct bnxt_qplib_cqe *cqe;
2617 u32 cqe_sq_cons, slot_num;
2618 struct bnxt_qplib_qp *qp;
2619 struct bnxt_qplib_q *sq;
2620 int cqe_cons;
2621 int rc = 0;
2622
2623 qp = (struct bnxt_qplib_qp *)((unsigned long)
2624 le64_to_cpu(hwcqe->qp_handle));
2625 if (!qp) {
2626 dev_err(&cq->hwq.pdev->dev,
2627 "FP: Process Req qp is NULL\n");
2628 return -EINVAL;
2629 }
2630 sq = &qp->sq;
2631
2632 cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
2633 if (qp->sq.flushed) {
2634 dev_dbg(&cq->hwq.pdev->dev,
2635 "%s: QP in Flush QP = %p\n", __func__, qp);
2636 goto done;
2637 }
2638
2639 if (__is_err_cqe_for_var_wqe(qp, hwcqe->status)) {
2640 slot_num = le16_to_cpu(hwcqe->sq_cons_idx);
2641 cqe_cons = bnxt_qplib_get_cqe_sq_cons(sq, slot_num);
2642 if (cqe_cons < 0) {
2643 dev_err(&cq->hwq.pdev->dev, "%s: Wrong SQ cons cqe_slot_indx = %d\n",
2644 __func__, slot_num);
2645 goto done;
2646 }
2647 cqe_sq_cons = cqe_cons;
2648 dev_err(&cq->hwq.pdev->dev, "%s: cqe_sq_cons = %d swq_last = %d swq_start = %d\n",
2649 __func__, cqe_sq_cons, sq->swq_last, sq->swq_start);
2650 }
2651
2652 /* Require to walk the sq's swq to fabricate CQEs for all previously
2653 * signaled SWQEs due to CQE aggregation from the current sq cons
2654 * to the cqe_sq_cons
2655 */
2656 cqe = *pcqe;
2657 while (*budget) {
2658 if (sq->swq_last == cqe_sq_cons)
2659 /* Done */
2660 break;
2661
2662 swq = &sq->swq[sq->swq_last];
2663 memset(cqe, 0, sizeof(*cqe));
2664 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2665 cqe->qp_handle = (u64)(unsigned long)qp;
2666 cqe->src_qp = qp->id;
2667 cqe->wr_id = swq->wr_id;
2668 if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2669 goto skip;
2670 cqe->type = swq->type;
2671
2672 /* For the last CQE, check for status. For errors, regardless
2673 * of the request being signaled or not, it must complete with
2674 * the hwcqe error status
2675 */
2676 if (swq->next_idx == cqe_sq_cons &&
2677 hwcqe->status != CQ_REQ_STATUS_OK) {
2678 cqe->status = hwcqe->status;
2679 dev_err(&cq->hwq.pdev->dev,
2680 "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2681 sq->swq_last, cqe->wr_id, cqe->status);
2682 cqe++;
2683 (*budget)--;
2684 bnxt_qplib_mark_qp_error(qp);
2685 /* Add qp to flush list of the CQ */
2686 bnxt_qplib_add_flush_qp(qp);
2687 } else {
2688 /* Before we complete, do WA 9060 */
2689 if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2690 if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2691 cqe_sq_cons)) {
2692 *lib_qp = qp;
2693 goto out;
2694 }
2695 }
2696 if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2697 cqe->status = CQ_REQ_STATUS_OK;
2698 cqe++;
2699 (*budget)--;
2700 }
2701 }
2702skip:
2703 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2704 swq->slots, &sq->dbinfo.flags);
2705 sq->swq_last = swq->next_idx;
2706 if (sq->single)
2707 break;
2708 }
2709out:
2710 *pcqe = cqe;
2711 if (sq->swq_last != cqe_sq_cons) {
2712 /* Out of budget */
2713 rc = -EAGAIN;
2714 goto done;
2715 }
2716 /*
2717 * Back to normal completion mode only after it has completed all of
2718 * the WC for this CQE
2719 */
2720 sq->single = false;
2721done:
2722 return rc;
2723}
2724
2725static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2726{
2727 spin_lock(&srq->hwq.lock);
2728 srq->swq[srq->last_idx].next_idx = (int)tag;
2729 srq->last_idx = (int)tag;
2730 srq->swq[srq->last_idx].next_idx = -1;
2731 bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2732 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2733 spin_unlock(&srq->hwq.lock);
2734}
2735
2736static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2737 struct cq_res_rc *hwcqe,
2738 struct bnxt_qplib_cqe **pcqe,
2739 int *budget)
2740{
2741 struct bnxt_qplib_srq *srq;
2742 struct bnxt_qplib_cqe *cqe;
2743 struct bnxt_qplib_qp *qp;
2744 struct bnxt_qplib_q *rq;
2745 u32 wr_id_idx;
2746
2747 qp = (struct bnxt_qplib_qp *)((unsigned long)
2748 le64_to_cpu(hwcqe->qp_handle));
2749 if (!qp) {
2750 dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2751 return -EINVAL;
2752 }
2753 if (qp->rq.flushed) {
2754 dev_dbg(&cq->hwq.pdev->dev,
2755 "%s: QP in Flush QP = %p\n", __func__, qp);
2756 return 0;
2757 }
2758
2759 cqe = *pcqe;
2760 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2761 cqe->length = le32_to_cpu(hwcqe->length);
2762 cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2763 cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2764 cqe->flags = le16_to_cpu(hwcqe->flags);
2765 cqe->status = hwcqe->status;
2766 cqe->qp_handle = (u64)(unsigned long)qp;
2767
2768 wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2769 CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2770 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2771 srq = qp->srq;
2772 if (!srq)
2773 return -EINVAL;
2774 if (wr_id_idx >= srq->hwq.max_elements) {
2775 dev_err(&cq->hwq.pdev->dev,
2776 "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2777 wr_id_idx, srq->hwq.max_elements);
2778 return -EINVAL;
2779 }
2780 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2781 bnxt_qplib_release_srqe(srq, wr_id_idx);
2782 cqe++;
2783 (*budget)--;
2784 *pcqe = cqe;
2785 } else {
2786 struct bnxt_qplib_swq *swq;
2787
2788 rq = &qp->rq;
2789 if (wr_id_idx > (rq->max_wqe - 1)) {
2790 dev_err(&cq->hwq.pdev->dev,
2791 "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2792 wr_id_idx, rq->max_wqe);
2793 return -EINVAL;
2794 }
2795 if (wr_id_idx != rq->swq_last)
2796 return -EINVAL;
2797 swq = &rq->swq[rq->swq_last];
2798 cqe->wr_id = swq->wr_id;
2799 cqe++;
2800 (*budget)--;
2801 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2802 swq->slots, &rq->dbinfo.flags);
2803 rq->swq_last = swq->next_idx;
2804 *pcqe = cqe;
2805
2806 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2807 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2808 /* Add qp to flush list of the CQ */
2809 bnxt_qplib_add_flush_qp(qp);
2810 }
2811 }
2812
2813 return 0;
2814}
2815
2816static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2817 struct cq_res_ud *hwcqe,
2818 struct bnxt_qplib_cqe **pcqe,
2819 int *budget)
2820{
2821 struct bnxt_qplib_srq *srq;
2822 struct bnxt_qplib_cqe *cqe;
2823 struct bnxt_qplib_qp *qp;
2824 struct bnxt_qplib_q *rq;
2825 u32 wr_id_idx;
2826
2827 qp = (struct bnxt_qplib_qp *)((unsigned long)
2828 le64_to_cpu(hwcqe->qp_handle));
2829 if (!qp) {
2830 dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2831 return -EINVAL;
2832 }
2833 if (qp->rq.flushed) {
2834 dev_dbg(&cq->hwq.pdev->dev,
2835 "%s: QP in Flush QP = %p\n", __func__, qp);
2836 return 0;
2837 }
2838 cqe = *pcqe;
2839 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2840 cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2841 cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2842 cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2843 cqe->flags = le16_to_cpu(hwcqe->flags);
2844 cqe->status = hwcqe->status;
2845 cqe->qp_handle = (u64)(unsigned long)qp;
2846 /*FIXME: Endianness fix needed for smace */
2847 memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2848 wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2849 & CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2850 cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2851 ((le32_to_cpu(
2852 hwcqe->src_qp_high_srq_or_rq_wr_id) &
2853 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2854
2855 if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2856 srq = qp->srq;
2857 if (!srq)
2858 return -EINVAL;
2859
2860 if (wr_id_idx >= srq->hwq.max_elements) {
2861 dev_err(&cq->hwq.pdev->dev,
2862 "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2863 wr_id_idx, srq->hwq.max_elements);
2864 return -EINVAL;
2865 }
2866 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2867 bnxt_qplib_release_srqe(srq, wr_id_idx);
2868 cqe++;
2869 (*budget)--;
2870 *pcqe = cqe;
2871 } else {
2872 struct bnxt_qplib_swq *swq;
2873
2874 rq = &qp->rq;
2875 if (wr_id_idx > (rq->max_wqe - 1)) {
2876 dev_err(&cq->hwq.pdev->dev,
2877 "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2878 wr_id_idx, rq->max_wqe);
2879 return -EINVAL;
2880 }
2881
2882 if (rq->swq_last != wr_id_idx)
2883 return -EINVAL;
2884 swq = &rq->swq[rq->swq_last];
2885 cqe->wr_id = swq->wr_id;
2886 cqe++;
2887 (*budget)--;
2888 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2889 swq->slots, &rq->dbinfo.flags);
2890 rq->swq_last = swq->next_idx;
2891 *pcqe = cqe;
2892
2893 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2894 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2895 /* Add qp to flush list of the CQ */
2896 bnxt_qplib_add_flush_qp(qp);
2897 }
2898 }
2899
2900 return 0;
2901}
2902
2903bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2904{
2905 struct cq_base *hw_cqe;
2906 bool rc = true;
2907
2908 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
2909 /* Check for Valid bit. If the CQE is valid, return false */
2910 rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2911 return rc;
2912}
2913
2914static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2915 struct cq_res_raweth_qp1 *hwcqe,
2916 struct bnxt_qplib_cqe **pcqe,
2917 int *budget)
2918{
2919 struct bnxt_qplib_qp *qp;
2920 struct bnxt_qplib_q *rq;
2921 struct bnxt_qplib_srq *srq;
2922 struct bnxt_qplib_cqe *cqe;
2923 u32 wr_id_idx;
2924
2925 qp = (struct bnxt_qplib_qp *)((unsigned long)
2926 le64_to_cpu(hwcqe->qp_handle));
2927 if (!qp) {
2928 dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2929 return -EINVAL;
2930 }
2931 if (qp->rq.flushed) {
2932 dev_dbg(&cq->hwq.pdev->dev,
2933 "%s: QP in Flush QP = %p\n", __func__, qp);
2934 return 0;
2935 }
2936 cqe = *pcqe;
2937 cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2938 cqe->flags = le16_to_cpu(hwcqe->flags);
2939 cqe->qp_handle = (u64)(unsigned long)qp;
2940
2941 wr_id_idx =
2942 le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2943 & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2944 cqe->src_qp = qp->id;
2945 if (qp->id == 1 && !cqe->length) {
2946 /* Add workaround for the length misdetection */
2947 cqe->length = 296;
2948 } else {
2949 cqe->length = le16_to_cpu(hwcqe->length);
2950 }
2951 cqe->pkey_index = qp->pkey_index;
2952 memcpy(cqe->smac, qp->smac, 6);
2953
2954 cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2955 cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2956 cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2957
2958 if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2959 srq = qp->srq;
2960 if (!srq) {
2961 dev_err(&cq->hwq.pdev->dev,
2962 "FP: SRQ used but not defined??\n");
2963 return -EINVAL;
2964 }
2965 if (wr_id_idx >= srq->hwq.max_elements) {
2966 dev_err(&cq->hwq.pdev->dev,
2967 "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2968 wr_id_idx, srq->hwq.max_elements);
2969 return -EINVAL;
2970 }
2971 cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2972 bnxt_qplib_release_srqe(srq, wr_id_idx);
2973 cqe++;
2974 (*budget)--;
2975 *pcqe = cqe;
2976 } else {
2977 struct bnxt_qplib_swq *swq;
2978
2979 rq = &qp->rq;
2980 if (wr_id_idx > (rq->max_wqe - 1)) {
2981 dev_err(&cq->hwq.pdev->dev,
2982 "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2983 wr_id_idx, rq->max_wqe);
2984 return -EINVAL;
2985 }
2986 if (rq->swq_last != wr_id_idx)
2987 return -EINVAL;
2988 swq = &rq->swq[rq->swq_last];
2989 cqe->wr_id = swq->wr_id;
2990 cqe++;
2991 (*budget)--;
2992 bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2993 swq->slots, &rq->dbinfo.flags);
2994 rq->swq_last = swq->next_idx;
2995 *pcqe = cqe;
2996
2997 if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2998 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2999 /* Add qp to flush list of the CQ */
3000 bnxt_qplib_add_flush_qp(qp);
3001 }
3002 }
3003
3004 return 0;
3005}
3006
3007static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
3008 struct cq_terminal *hwcqe,
3009 struct bnxt_qplib_cqe **pcqe,
3010 int *budget)
3011{
3012 struct bnxt_qplib_qp *qp;
3013 struct bnxt_qplib_q *sq, *rq;
3014 struct bnxt_qplib_cqe *cqe;
3015 u32 swq_last = 0, cqe_cons;
3016 int rc = 0;
3017
3018 /* Check the Status */
3019 if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
3020 dev_warn(&cq->hwq.pdev->dev,
3021 "FP: CQ Process Terminal Error status = 0x%x\n",
3022 hwcqe->status);
3023
3024 qp = (struct bnxt_qplib_qp *)((unsigned long)
3025 le64_to_cpu(hwcqe->qp_handle));
3026 if (!qp)
3027 return -EINVAL;
3028
3029 /* Must block new posting of SQ and RQ */
3030 qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
3031
3032 sq = &qp->sq;
3033 rq = &qp->rq;
3034
3035 cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
3036 if (cqe_cons == 0xFFFF)
3037 goto do_rq;
3038 cqe_cons %= sq->max_sw_wqe;
3039
3040 if (qp->sq.flushed) {
3041 dev_dbg(&cq->hwq.pdev->dev,
3042 "%s: QP in Flush QP = %p\n", __func__, qp);
3043 goto sq_done;
3044 }
3045
3046 /* Terminal CQE can also include aggregated successful CQEs prior.
3047 * So we must complete all CQEs from the current sq's cons to the
3048 * cq_cons with status OK
3049 */
3050 cqe = *pcqe;
3051 while (*budget) {
3052 swq_last = sq->swq_last;
3053 if (swq_last == cqe_cons)
3054 break;
3055 if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
3056 memset(cqe, 0, sizeof(*cqe));
3057 cqe->status = CQ_REQ_STATUS_OK;
3058 cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
3059 cqe->qp_handle = (u64)(unsigned long)qp;
3060 cqe->src_qp = qp->id;
3061 cqe->wr_id = sq->swq[swq_last].wr_id;
3062 cqe->type = sq->swq[swq_last].type;
3063 cqe++;
3064 (*budget)--;
3065 }
3066 bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
3067 sq->swq[swq_last].slots, &sq->dbinfo.flags);
3068 sq->swq_last = sq->swq[swq_last].next_idx;
3069 }
3070 *pcqe = cqe;
3071 if (!(*budget) && swq_last != cqe_cons) {
3072 /* Out of budget */
3073 rc = -EAGAIN;
3074 goto sq_done;
3075 }
3076sq_done:
3077 if (rc)
3078 return rc;
3079do_rq:
3080 cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
3081 if (cqe_cons == 0xFFFF) {
3082 goto done;
3083 } else if (cqe_cons > rq->max_wqe - 1) {
3084 dev_err(&cq->hwq.pdev->dev,
3085 "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
3086 cqe_cons, rq->max_wqe);
3087 rc = -EINVAL;
3088 goto done;
3089 }
3090
3091 if (qp->rq.flushed) {
3092 dev_dbg(&cq->hwq.pdev->dev,
3093 "%s: QP in Flush QP = %p\n", __func__, qp);
3094 rc = 0;
3095 goto done;
3096 }
3097
3098 /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
3099 * from the current rq->cons to the rq->prod regardless what the
3100 * rq->cons the terminal CQE indicates
3101 */
3102
3103 /* Add qp to flush list of the CQ */
3104 bnxt_qplib_add_flush_qp(qp);
3105done:
3106 return rc;
3107}
3108
3109static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
3110 struct cq_cutoff *hwcqe)
3111{
3112 /* Check the Status */
3113 if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
3114 dev_err(&cq->hwq.pdev->dev,
3115 "FP: CQ Process Cutoff Error status = 0x%x\n",
3116 hwcqe->status);
3117 return -EINVAL;
3118 }
3119 clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
3120 wake_up_interruptible(&cq->waitq);
3121
3122 return 0;
3123}
3124
3125int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
3126 struct bnxt_qplib_cqe *cqe,
3127 int num_cqes)
3128{
3129 struct bnxt_qplib_qp *qp = NULL;
3130 u32 budget = num_cqes;
3131 unsigned long flags;
3132
3133 spin_lock_irqsave(&cq->flush_lock, flags);
3134 list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
3135 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
3136 __flush_sq(&qp->sq, qp, &cqe, &budget);
3137 }
3138
3139 list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
3140 dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
3141 __flush_rq(&qp->rq, qp, &cqe, &budget);
3142 }
3143 spin_unlock_irqrestore(&cq->flush_lock, flags);
3144
3145 return num_cqes - budget;
3146}
3147
3148int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
3149 int num_cqes, struct bnxt_qplib_qp **lib_qp)
3150{
3151 struct cq_base *hw_cqe;
3152 int budget, rc = 0;
3153 u32 hw_polled = 0;
3154 u8 type;
3155
3156 budget = num_cqes;
3157
3158 while (budget) {
3159 hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
3160
3161 /* Check for Valid bit */
3162 if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3163 break;
3164
3165 /*
3166 * The valid test of the entry must be done first before
3167 * reading any further.
3168 */
3169 dma_rmb();
3170 /* From the device's respective CQE format to qplib_wc*/
3171 type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3172 switch (type) {
3173 case CQ_BASE_CQE_TYPE_REQ:
3174 rc = bnxt_qplib_cq_process_req(cq,
3175 (struct cq_req *)hw_cqe,
3176 &cqe, &budget,
3177 cq->hwq.cons, lib_qp);
3178 break;
3179 case CQ_BASE_CQE_TYPE_RES_RC:
3180 rc = bnxt_qplib_cq_process_res_rc(cq,
3181 (struct cq_res_rc *)
3182 hw_cqe, &cqe,
3183 &budget);
3184 break;
3185 case CQ_BASE_CQE_TYPE_RES_UD:
3186 rc = bnxt_qplib_cq_process_res_ud
3187 (cq, (struct cq_res_ud *)hw_cqe, &cqe,
3188 &budget);
3189 break;
3190 case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3191 rc = bnxt_qplib_cq_process_res_raweth_qp1
3192 (cq, (struct cq_res_raweth_qp1 *)
3193 hw_cqe, &cqe, &budget);
3194 break;
3195 case CQ_BASE_CQE_TYPE_TERMINAL:
3196 rc = bnxt_qplib_cq_process_terminal
3197 (cq, (struct cq_terminal *)hw_cqe,
3198 &cqe, &budget);
3199 break;
3200 case CQ_BASE_CQE_TYPE_CUT_OFF:
3201 bnxt_qplib_cq_process_cutoff
3202 (cq, (struct cq_cutoff *)hw_cqe);
3203 /* Done processing this CQ */
3204 goto exit;
3205 default:
3206 dev_err(&cq->hwq.pdev->dev,
3207 "process_cq unknown type 0x%lx\n",
3208 hw_cqe->cqe_type_toggle &
3209 CQ_BASE_CQE_TYPE_MASK);
3210 rc = -EINVAL;
3211 break;
3212 }
3213 if (rc < 0) {
3214 if (rc == -EAGAIN)
3215 break;
3216 /* Error while processing the CQE, just skip to the
3217 * next one
3218 */
3219 if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3220 dev_err(&cq->hwq.pdev->dev,
3221 "process_cqe error rc = 0x%x\n", rc);
3222 }
3223 hw_polled++;
3224 bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3225 1, &cq->dbinfo.flags);
3226
3227 }
3228 if (hw_polled)
3229 bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3230exit:
3231 return num_cqes - budget;
3232}
3233
3234void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3235{
3236 cq->dbinfo.toggle = cq->toggle;
3237 if (arm_type)
3238 bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3239 /* Using cq->arm_state variable to track whether to issue cq handler */
3240 atomic_set(&cq->arm_state, 1);
3241}
3242
3243void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3244{
3245 flush_workqueue(qp->scq->nq->cqn_wq);
3246 if (qp->scq != qp->rcq)
3247 flush_workqueue(qp->rcq->nq->cqn_wq);
3248}