Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: Fast Path Operators
  37 */
  38
  39#define dev_fmt(fmt) "QPLIB: " fmt
  40
  41#include <linux/interrupt.h>
  42#include <linux/spinlock.h>
  43#include <linux/sched.h>
  44#include <linux/slab.h>
  45#include <linux/pci.h>
  46#include <linux/delay.h>
  47#include <linux/prefetch.h>
  48#include <linux/if_ether.h>
  49#include <rdma/ib_mad.h>
  50
  51#include "roce_hsi.h"
  52
  53#include "qplib_res.h"
  54#include "qplib_rcfw.h"
  55#include "qplib_sp.h"
  56#include "qplib_fp.h"
  57
 
  58static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
 
  59
  60static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
  61{
  62	qp->sq.condition = false;
  63	qp->sq.send_phantom = false;
  64	qp->sq.single = false;
  65}
  66
  67/* Flush list */
  68static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  69{
  70	struct bnxt_qplib_cq *scq, *rcq;
  71
  72	scq = qp->scq;
  73	rcq = qp->rcq;
  74
  75	if (!qp->sq.flushed) {
  76		dev_dbg(&scq->hwq.pdev->dev,
  77			"FP: Adding to SQ Flush list = %p\n", qp);
  78		bnxt_qplib_cancel_phantom_processing(qp);
  79		list_add_tail(&qp->sq_flush, &scq->sqf_head);
  80		qp->sq.flushed = true;
  81	}
  82	if (!qp->srq) {
  83		if (!qp->rq.flushed) {
  84			dev_dbg(&rcq->hwq.pdev->dev,
  85				"FP: Adding to RQ Flush list = %p\n", qp);
  86			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
  87			qp->rq.flushed = true;
  88		}
  89	}
  90}
  91
  92static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
  93				       unsigned long *flags)
  94	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
  95{
  96	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
  97	if (qp->scq == qp->rcq)
  98		__acquire(&qp->rcq->flush_lock);
  99	else
 100		spin_lock(&qp->rcq->flush_lock);
 101}
 102
 103static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
 104				       unsigned long *flags)
 105	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
 106{
 107	if (qp->scq == qp->rcq)
 108		__release(&qp->rcq->flush_lock);
 109	else
 110		spin_unlock(&qp->rcq->flush_lock);
 111	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
 112}
 113
 114void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
 115{
 116	unsigned long flags;
 117
 118	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
 119	__bnxt_qplib_add_flush_qp(qp);
 120	bnxt_qplib_release_cq_flush_locks(qp, &flags);
 121}
 122
 123static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
 124{
 125	if (qp->sq.flushed) {
 126		qp->sq.flushed = false;
 127		list_del(&qp->sq_flush);
 128	}
 129	if (!qp->srq) {
 130		if (qp->rq.flushed) {
 131			qp->rq.flushed = false;
 132			list_del(&qp->rq_flush);
 133		}
 134	}
 135}
 136
 137void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
 138{
 139	unsigned long flags;
 140
 141	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
 142	__clean_cq(qp->scq, (u64)(unsigned long)qp);
 143	qp->sq.hwq.prod = 0;
 144	qp->sq.hwq.cons = 0;
 145	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
 146	qp->rq.hwq.prod = 0;
 147	qp->rq.hwq.cons = 0;
 148
 149	__bnxt_qplib_del_flush_qp(qp);
 150	bnxt_qplib_release_cq_flush_locks(qp, &flags);
 151}
 152
 153static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
 154{
 155	struct bnxt_qplib_nq_work *nq_work =
 156			container_of(work, struct bnxt_qplib_nq_work, work);
 157
 158	struct bnxt_qplib_cq *cq = nq_work->cq;
 159	struct bnxt_qplib_nq *nq = nq_work->nq;
 160
 161	if (cq && nq) {
 162		spin_lock_bh(&cq->compl_lock);
 163		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
 164			dev_dbg(&nq->pdev->dev,
 165				"%s:Trigger cq  = %p event nq = %p\n",
 166				__func__, cq, nq);
 167			nq->cqn_handler(nq, cq);
 168		}
 169		spin_unlock_bh(&cq->compl_lock);
 170	}
 171	kfree(nq_work);
 172}
 173
 174static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
 175				       struct bnxt_qplib_qp *qp)
 176{
 177	struct bnxt_qplib_q *rq = &qp->rq;
 178	struct bnxt_qplib_q *sq = &qp->sq;
 179
 180	if (qp->rq_hdr_buf)
 181		dma_free_coherent(&res->pdev->dev,
 182				  rq->max_wqe * qp->rq_hdr_buf_size,
 183				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
 184	if (qp->sq_hdr_buf)
 185		dma_free_coherent(&res->pdev->dev,
 186				  sq->max_wqe * qp->sq_hdr_buf_size,
 187				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
 188	qp->rq_hdr_buf = NULL;
 189	qp->sq_hdr_buf = NULL;
 190	qp->rq_hdr_buf_map = 0;
 191	qp->sq_hdr_buf_map = 0;
 192	qp->sq_hdr_buf_size = 0;
 193	qp->rq_hdr_buf_size = 0;
 194}
 195
 196static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
 197				       struct bnxt_qplib_qp *qp)
 198{
 199	struct bnxt_qplib_q *rq = &qp->rq;
 200	struct bnxt_qplib_q *sq = &qp->sq;
 201	int rc = 0;
 202
 203	if (qp->sq_hdr_buf_size && sq->max_wqe) {
 204		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
 205					sq->max_wqe * qp->sq_hdr_buf_size,
 
 206					&qp->sq_hdr_buf_map, GFP_KERNEL);
 207		if (!qp->sq_hdr_buf) {
 208			rc = -ENOMEM;
 209			dev_err(&res->pdev->dev,
 210				"Failed to create sq_hdr_buf\n");
 211			goto fail;
 212		}
 213	}
 214
 215	if (qp->rq_hdr_buf_size && rq->max_wqe) {
 216		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
 217						    rq->max_wqe *
 218						    qp->rq_hdr_buf_size,
 219						    &qp->rq_hdr_buf_map,
 220						    GFP_KERNEL);
 221		if (!qp->rq_hdr_buf) {
 222			rc = -ENOMEM;
 223			dev_err(&res->pdev->dev,
 224				"Failed to create rq_hdr_buf\n");
 225			goto fail;
 226		}
 227	}
 228	return 0;
 229
 230fail:
 231	bnxt_qplib_free_qp_hdr_buf(res, qp);
 232	return rc;
 233}
 234
 235static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq)
 236{
 
 237	struct bnxt_qplib_hwq *hwq = &nq->hwq;
 238	struct nq_base *nqe, **nq_ptr;
 239	int budget = nq->budget;
 240	uintptr_t q_handle;
 241	u16 type;
 242
 243	spin_lock_bh(&hwq->lock);
 244	/* Service the NQ until empty */
 245	while (budget--) {
 246		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
 247		nqe = &nq_ptr[NQE_PG(hwq->cons)][NQE_IDX(hwq->cons)];
 248		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
 249			break;
 250
 251		/*
 252		 * The valid test of the entry must be done first before
 253		 * reading any further.
 254		 */
 255		dma_rmb();
 256
 257		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
 258		switch (type) {
 259		case NQ_BASE_TYPE_CQ_NOTIFICATION:
 260		{
 261			struct nq_cn *nqcne = (struct nq_cn *)nqe;
 262
 263			q_handle = le32_to_cpu(nqcne->cq_handle_low);
 264			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
 265						     << 32;
 266			if ((unsigned long)cq == q_handle) {
 267				nqcne->cq_handle_low = 0;
 268				nqcne->cq_handle_high = 0;
 269				cq->cnq_events++;
 270			}
 271			break;
 272		}
 273		default:
 274			break;
 275		}
 276		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
 277					 1, &nq->nq_db.dbinfo.flags);
 278	}
 279	spin_unlock_bh(&hwq->lock);
 280}
 281
 282/* Wait for receiving all NQEs for this CQ and clean the NQEs associated with
 283 * this CQ.
 284 */
 285static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
 286{
 287	u32 retry_cnt = 100;
 288
 289	while (retry_cnt--) {
 290		if (cnq_events == cq->cnq_events)
 291			return;
 292		usleep_range(50, 100);
 293		clean_nq(cq->nq, cq);
 294	}
 295}
 296
 297static void bnxt_qplib_service_nq(struct tasklet_struct *t)
 298{
 299	struct bnxt_qplib_nq *nq = from_tasklet(nq, t, nq_tasklet);
 300	struct bnxt_qplib_hwq *hwq = &nq->hwq;
 301	struct bnxt_qplib_cq *cq;
 
 
 
 
 302	int budget = nq->budget;
 303	struct nq_base *nqe;
 304	uintptr_t q_handle;
 305	u32 hw_polled = 0;
 306	u16 type;
 307
 308	spin_lock_bh(&hwq->lock);
 309	/* Service the NQ until empty */
 
 310	while (budget--) {
 311		nqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
 312		if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
 
 
 313			break;
 314
 315		/*
 316		 * The valid test of the entry must be done first before
 317		 * reading any further.
 318		 */
 319		dma_rmb();
 320
 321		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
 322		switch (type) {
 323		case NQ_BASE_TYPE_CQ_NOTIFICATION:
 324		{
 325			struct nq_cn *nqcne = (struct nq_cn *)nqe;
 326
 327			q_handle = le32_to_cpu(nqcne->cq_handle_low);
 328			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
 329						     << 32;
 330			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
 331			if (!cq)
 332				break;
 333			cq->toggle = (le16_to_cpu(nqe->info10_type) &
 334					NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
 335			cq->dbinfo.toggle = cq->toggle;
 336			bnxt_qplib_armen_db(&cq->dbinfo,
 337					    DBC_DBC_TYPE_CQ_ARMENA);
 338			spin_lock_bh(&cq->compl_lock);
 339			atomic_set(&cq->arm_state, 0);
 340			if (nq->cqn_handler(nq, (cq)))
 
 
 341				dev_warn(&nq->pdev->dev,
 342					 "cqn - type 0x%x not handled\n", type);
 343			cq->cnq_events++;
 344			spin_unlock_bh(&cq->compl_lock);
 345			break;
 346		}
 347		case NQ_BASE_TYPE_SRQ_EVENT:
 348		{
 349			struct bnxt_qplib_srq *srq;
 350			struct nq_srq_event *nqsrqe =
 351						(struct nq_srq_event *)nqe;
 352
 353			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
 354			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
 355				     << 32;
 356			srq = (struct bnxt_qplib_srq *)q_handle;
 357			bnxt_qplib_armen_db(&srq->dbinfo,
 358					    DBC_DBC_TYPE_SRQ_ARMENA);
 359			if (nq->srqn_handler(nq,
 360					     (struct bnxt_qplib_srq *)q_handle,
 361					     nqsrqe->event))
 
 362				dev_warn(&nq->pdev->dev,
 363					 "SRQ event 0x%x not handled\n",
 364					 nqsrqe->event);
 365			break;
 366		}
 367		case NQ_BASE_TYPE_DBQ_EVENT:
 368			break;
 369		default:
 370			dev_warn(&nq->pdev->dev,
 371				 "nqe with type = 0x%x not handled\n", type);
 372			break;
 373		}
 374		hw_polled++;
 375		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
 376					 1, &nq->nq_db.dbinfo.flags);
 377	}
 378	if (hw_polled)
 379		bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
 380	spin_unlock_bh(&hwq->lock);
 381}
 382
 383/* bnxt_re_synchronize_nq - self polling notification queue.
 384 * @nq      -     notification queue pointer
 385 *
 386 * This function will start polling entries of a given notification queue
 387 * for all pending  entries.
 388 * This function is useful to synchronize notification entries while resources
 389 * are going away.
 390 */
 391
 392void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq)
 393{
 394	int budget = nq->budget;
 395
 396	nq->budget = nq->hwq.max_elements;
 397	bnxt_qplib_service_nq(&nq->nq_tasklet);
 398	nq->budget = budget;
 399}
 400
 401static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
 402{
 403	struct bnxt_qplib_nq *nq = dev_instance;
 404	struct bnxt_qplib_hwq *hwq = &nq->hwq;
 
 405	u32 sw_cons;
 406
 407	/* Prefetch the NQ element */
 408	sw_cons = HWQ_CMP(hwq->cons, hwq);
 409	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
 
 410
 411	/* Fan out to CPU affinitized kthreads? */
 412	tasklet_schedule(&nq->nq_tasklet);
 413
 414	return IRQ_HANDLED;
 415}
 416
 417void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
 418{
 419	if (!nq->requested)
 420		return;
 421
 422	nq->requested = false;
 423	/* Mask h/w interrupt */
 424	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
 
 425	/* Sync with last running IRQ handler */
 426	synchronize_irq(nq->msix_vec);
 427	irq_set_affinity_hint(nq->msix_vec, NULL);
 428	free_irq(nq->msix_vec, nq);
 429	kfree(nq->name);
 430	nq->name = NULL;
 431
 432	if (kill)
 433		tasklet_kill(&nq->nq_tasklet);
 434	tasklet_disable(&nq->nq_tasklet);
 
 
 
 
 435}
 436
 437void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 438{
 439	if (nq->cqn_wq) {
 440		destroy_workqueue(nq->cqn_wq);
 441		nq->cqn_wq = NULL;
 442	}
 443
 444	/* Make sure the HW is stopped! */
 445	bnxt_qplib_nq_stop_irq(nq, true);
 
 446
 447	if (nq->nq_db.reg.bar_reg) {
 448		iounmap(nq->nq_db.reg.bar_reg);
 449		nq->nq_db.reg.bar_reg = NULL;
 450	}
 451
 452	nq->cqn_handler = NULL;
 453	nq->srqn_handler = NULL;
 454	nq->msix_vec = 0;
 455}
 456
 457int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
 458			    int msix_vector, bool need_init)
 459{
 460	struct bnxt_qplib_res *res = nq->res;
 461	int rc;
 462
 463	if (nq->requested)
 464		return -EFAULT;
 465
 466	nq->msix_vec = msix_vector;
 467	if (need_init)
 468		tasklet_setup(&nq->nq_tasklet, bnxt_qplib_service_nq);
 
 469	else
 470		tasklet_enable(&nq->nq_tasklet);
 471
 472	nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
 473			     nq_indx, pci_name(res->pdev));
 474	if (!nq->name)
 475		return -ENOMEM;
 476	rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
 477	if (rc) {
 478		kfree(nq->name);
 479		nq->name = NULL;
 480		tasklet_disable(&nq->nq_tasklet);
 481		return rc;
 482	}
 483
 484	cpumask_clear(&nq->mask);
 485	cpumask_set_cpu(nq_indx, &nq->mask);
 486	rc = irq_set_affinity_hint(nq->msix_vec, &nq->mask);
 487	if (rc) {
 488		dev_warn(&nq->pdev->dev,
 489			 "set affinity failed; vector: %d nq_idx: %d\n",
 490			 nq->msix_vec, nq_indx);
 491	}
 492	nq->requested = true;
 493	bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
 
 494
 495	return rc;
 496}
 497
 498static int bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq,  u32 reg_offt)
 499{
 500	resource_size_t reg_base;
 501	struct bnxt_qplib_nq_db *nq_db;
 502	struct pci_dev *pdev;
 503
 504	pdev = nq->pdev;
 505	nq_db = &nq->nq_db;
 506
 507	nq_db->dbinfo.flags = 0;
 508	nq_db->reg.bar_id = NQ_CONS_PCI_BAR_REGION;
 509	nq_db->reg.bar_base = pci_resource_start(pdev, nq_db->reg.bar_id);
 510	if (!nq_db->reg.bar_base) {
 511		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
 512			nq_db->reg.bar_id);
 513		return -ENOMEM;
 514	}
 515
 516	reg_base = nq_db->reg.bar_base + reg_offt;
 517	/* Unconditionally map 8 bytes to support 57500 series */
 518	nq_db->reg.len = 8;
 519	nq_db->reg.bar_reg = ioremap(reg_base, nq_db->reg.len);
 520	if (!nq_db->reg.bar_reg) {
 521		dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
 522			nq_db->reg.bar_id);
 523		return -ENOMEM;
 524	}
 525
 526	nq_db->dbinfo.db = nq_db->reg.bar_reg;
 527	nq_db->dbinfo.hwq = &nq->hwq;
 528	nq_db->dbinfo.xid = nq->ring_id;
 529
 530	return 0;
 531}
 532
 533int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 534			 int nq_idx, int msix_vector, int bar_reg_offset,
 535			 cqn_handler_t cqn_handler,
 536			 srqn_handler_t srqn_handler)
 
 
 
 537{
 538	int rc;
 
 539
 540	nq->pdev = pdev;
 541	nq->cqn_handler = cqn_handler;
 542	nq->srqn_handler = srqn_handler;
 
 
 543
 544	/* Have a task to schedule CQ notifiers in post send case */
 545	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 546	if (!nq->cqn_wq)
 547		return -ENOMEM;
 548
 549	rc = bnxt_qplib_map_nq_db(nq, bar_reg_offset);
 550	if (rc)
 
 
 
 551		goto fail;
 
 
 
 
 
 
 
 552
 553	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
 554	if (rc) {
 555		dev_err(&nq->pdev->dev,
 556			"Failed to request irq for nq-idx %d\n", nq_idx);
 557		goto fail;
 558	}
 559
 560	return 0;
 561fail:
 562	bnxt_qplib_disable_nq(nq);
 563	return rc;
 564}
 565
 566void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
 567{
 568	if (nq->hwq.max_elements) {
 569		bnxt_qplib_free_hwq(nq->res, &nq->hwq);
 570		nq->hwq.max_elements = 0;
 571	}
 572}
 573
 574int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq)
 575{
 576	struct bnxt_qplib_hwq_attr hwq_attr = {};
 577	struct bnxt_qplib_sg_info sginfo = {};
 578
 579	nq->pdev = res->pdev;
 580	nq->res = res;
 581	if (!nq->hwq.max_elements ||
 582	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
 583		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
 584
 585	sginfo.pgsize = PAGE_SIZE;
 586	sginfo.pgshft = PAGE_SHIFT;
 587	hwq_attr.res = res;
 588	hwq_attr.sginfo = &sginfo;
 589	hwq_attr.depth = nq->hwq.max_elements;
 590	hwq_attr.stride = sizeof(struct nq_base);
 591	hwq_attr.type = bnxt_qplib_get_hwq_type(nq->res);
 592	if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
 593		dev_err(&nq->pdev->dev, "FP NQ allocation failed");
 594		return -ENOMEM;
 595	}
 596	nq->budget = 8;
 597	return 0;
 598}
 599
 600/* SRQ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 602			   struct bnxt_qplib_srq *srq)
 603{
 604	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 605	struct creq_destroy_srq_resp resp = {};
 606	struct bnxt_qplib_cmdqmsg msg = {};
 607	struct cmdq_destroy_srq req = {};
 608	int rc;
 609
 610	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 611				 CMDQ_BASE_OPCODE_DESTROY_SRQ,
 612				 sizeof(req));
 613
 614	/* Configure the request */
 615	req.srq_cid = cpu_to_le32(srq->id);
 616
 617	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
 618	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 619	kfree(srq->swq);
 620	if (rc)
 621		return;
 622	bnxt_qplib_free_hwq(res, &srq->hwq);
 623}
 624
 625int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
 626			  struct bnxt_qplib_srq *srq)
 627{
 628	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 629	struct bnxt_qplib_hwq_attr hwq_attr = {};
 630	struct creq_create_srq_resp resp = {};
 631	struct bnxt_qplib_cmdqmsg msg = {};
 632	struct cmdq_create_srq req = {};
 633	struct bnxt_qplib_pbl *pbl;
 634	u16 pg_sz_lvl;
 635	int rc, idx;
 636
 637	hwq_attr.res = res;
 638	hwq_attr.sginfo = &srq->sg_info;
 639	hwq_attr.depth = srq->max_wqe;
 640	hwq_attr.stride = srq->wqe_size;
 641	hwq_attr.type = HWQ_TYPE_QUEUE;
 642	rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
 643	if (rc)
 644		return rc;
 645
 646	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
 647			   GFP_KERNEL);
 648	if (!srq->swq) {
 649		rc = -ENOMEM;
 650		goto fail;
 651	}
 652	srq->dbinfo.flags = 0;
 653	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 654				 CMDQ_BASE_OPCODE_CREATE_SRQ,
 655				 sizeof(req));
 656
 657	/* Configure the request */
 658	req.dpi = cpu_to_le32(srq->dpi->dpi);
 659	req.srq_handle = cpu_to_le64((uintptr_t)srq);
 660
 661	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
 662	pbl = &srq->hwq.pbl[PBL_LVL_0];
 663	pg_sz_lvl = ((u16)bnxt_qplib_base_pg_size(&srq->hwq) <<
 664		     CMDQ_CREATE_SRQ_PG_SIZE_SFT);
 665	pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK) <<
 666		      CMDQ_CREATE_SRQ_LVL_SFT;
 667	req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
 
 
 
 
 
 
 
 
 
 
 
 668	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 669	req.pd_id = cpu_to_le32(srq->pd->id);
 670	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
 671
 672	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
 673	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 674	if (rc)
 675		goto fail;
 676
 677	spin_lock_init(&srq->lock);
 678	srq->start_idx = 0;
 679	srq->last_idx = srq->hwq.max_elements - 1;
 680	for (idx = 0; idx < srq->hwq.max_elements; idx++)
 681		srq->swq[idx].next_idx = idx + 1;
 682	srq->swq[srq->last_idx].next_idx = -1;
 683
 684	srq->id = le32_to_cpu(resp.xid);
 685	srq->dbinfo.hwq = &srq->hwq;
 686	srq->dbinfo.xid = srq->id;
 687	srq->dbinfo.db = srq->dpi->dbr;
 688	srq->dbinfo.max_slot = 1;
 689	srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
 690	if (srq->threshold)
 691		bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
 692	srq->arm_req = false;
 693
 694	return 0;
 695fail:
 696	bnxt_qplib_free_hwq(res, &srq->hwq);
 697	kfree(srq->swq);
 698
 699	return rc;
 700}
 701
 702int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
 703			  struct bnxt_qplib_srq *srq)
 704{
 705	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
 706	u32 count;
 
 
 
 707
 708	count = __bnxt_qplib_get_avail(srq_hwq);
 
 709	if (count > srq->threshold) {
 710		srq->arm_req = false;
 711		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
 712	} else {
 713		/* Deferred arming */
 714		srq->arm_req = true;
 715	}
 716
 717	return 0;
 718}
 719
 720int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
 721			 struct bnxt_qplib_srq *srq)
 722{
 723	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 724	struct creq_query_srq_resp resp = {};
 725	struct bnxt_qplib_cmdqmsg msg = {};
 726	struct bnxt_qplib_rcfw_sbuf sbuf;
 727	struct creq_query_srq_resp_sb *sb;
 728	struct cmdq_query_srq req = {};
 729	int rc;
 730
 731	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 732				 CMDQ_BASE_OPCODE_QUERY_SRQ,
 733				 sizeof(req));
 734
 735	/* Configure the request */
 736	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
 737	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
 738				     &sbuf.dma_addr, GFP_KERNEL);
 739	if (!sbuf.sb)
 740		return -ENOMEM;
 741	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
 742	req.srq_cid = cpu_to_le32(srq->id);
 743	sb = sbuf.sb;
 744	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
 745				sizeof(resp), 0);
 746	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 747	if (!rc)
 748		srq->threshold = le16_to_cpu(sb->srq_limit);
 749	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
 750			  sbuf.sb, sbuf.dma_addr);
 751
 752	return rc;
 753}
 754
 755int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
 756			     struct bnxt_qplib_swqe *wqe)
 757{
 758	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
 759	struct rq_wqe *srqe;
 760	struct sq_sge *hw_sge;
 761	u32 count = 0;
 762	int i, next;
 763
 764	spin_lock(&srq_hwq->lock);
 765	if (srq->start_idx == srq->last_idx) {
 766		dev_err(&srq_hwq->pdev->dev,
 767			"FP: SRQ (0x%x) is full!\n", srq->id);
 
 768		spin_unlock(&srq_hwq->lock);
 769		return -EINVAL;
 770	}
 771	next = srq->start_idx;
 772	srq->start_idx = srq->swq[next].next_idx;
 773	spin_unlock(&srq_hwq->lock);
 774
 775	srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
 776	memset(srqe, 0, srq->wqe_size);
 
 
 777	/* Calculate wqe_size16 and data_len */
 778	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
 779	     i < wqe->num_sge; i++, hw_sge++) {
 780		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
 781		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
 782		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
 783	}
 784	srqe->wqe_type = wqe->type;
 785	srqe->flags = wqe->flags;
 786	srqe->wqe_size = wqe->num_sge +
 787			((offsetof(typeof(*srqe), data) + 15) >> 4);
 788	srqe->wr_id[0] = cpu_to_le32((u32)next);
 789	srq->swq[next].wr_id = wqe->wr_id;
 790
 791	bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
 792
 793	spin_lock(&srq_hwq->lock);
 794	count = __bnxt_qplib_get_avail(srq_hwq);
 
 
 
 
 
 
 
 795	spin_unlock(&srq_hwq->lock);
 796	/* Ring DB */
 797	bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
 798	if (srq->arm_req == true && count > srq->threshold) {
 799		srq->arm_req = false;
 800		bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
 801	}
 802
 803	return 0;
 804}
 805
 806/* QP */
 807
 808static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
 809{
 810	int indx;
 811
 812	que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
 813	if (!que->swq)
 814		return -ENOMEM;
 815
 816	que->swq_start = 0;
 817	que->swq_last = que->max_wqe - 1;
 818	for (indx = 0; indx < que->max_wqe; indx++)
 819		que->swq[indx].next_idx = indx + 1;
 820	que->swq[que->swq_last].next_idx = 0; /* Make it circular */
 821	que->swq_last = 0;
 822
 823	return 0;
 824}
 825
 826int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 827{
 828	struct bnxt_qplib_hwq_attr hwq_attr = {};
 829	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 830	struct creq_create_qp1_resp resp = {};
 831	struct bnxt_qplib_cmdqmsg msg = {};
 
 832	struct bnxt_qplib_q *sq = &qp->sq;
 833	struct bnxt_qplib_q *rq = &qp->rq;
 834	struct cmdq_create_qp1 req = {};
 835	struct bnxt_qplib_pbl *pbl;
 836	u32 qp_flags = 0;
 837	u8 pg_sz_lvl;
 838	u32 tbl_indx;
 839	int rc;
 
 
 
 
 840
 841	sq->dbinfo.flags = 0;
 842	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 843				 CMDQ_BASE_OPCODE_CREATE_QP1,
 844				 sizeof(req));
 845	/* General */
 846	req.type = qp->type;
 847	req.dpi = cpu_to_le32(qp->dpi->dpi);
 848	req.qp_handle = cpu_to_le64(qp->qp_handle);
 849
 850	/* SQ */
 851	hwq_attr.res = res;
 852	hwq_attr.sginfo = &sq->sg_info;
 853	hwq_attr.stride = sizeof(struct sq_sge);
 854	hwq_attr.depth = bnxt_qplib_get_depth(sq);
 855	hwq_attr.type = HWQ_TYPE_QUEUE;
 856	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
 857	if (rc)
 858		return rc;
 859
 860	rc = bnxt_qplib_alloc_init_swq(sq);
 861	if (rc)
 
 862		goto fail_sq;
 863
 864	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
 865	pbl = &sq->hwq.pbl[PBL_LVL_0];
 866	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 867	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
 868		     CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT);
 869	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK);
 870	req.sq_pg_size_sq_lvl = pg_sz_lvl;
 871	req.sq_fwo_sq_sge =
 872		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
 873			     CMDQ_CREATE_QP1_SQ_SGE_SFT);
 874	req.scq_cid = cpu_to_le32(qp->scq->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 875
 876	/* RQ */
 877	if (rq->max_wqe) {
 878		rq->dbinfo.flags = 0;
 879		hwq_attr.res = res;
 880		hwq_attr.sginfo = &rq->sg_info;
 881		hwq_attr.stride = sizeof(struct sq_sge);
 882		hwq_attr.depth = bnxt_qplib_get_depth(rq);
 883		hwq_attr.type = HWQ_TYPE_QUEUE;
 884		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
 885		if (rc)
 886			goto sq_swq;
 887		rc = bnxt_qplib_alloc_init_swq(rq);
 888		if (rc)
 
 
 
 
 
 
 889			goto fail_rq;
 890		req.rq_size = cpu_to_le32(rq->max_wqe);
 891		pbl = &rq->hwq.pbl[PBL_LVL_0];
 892		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 893		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
 894			     CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT);
 895		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK);
 896		req.rq_pg_size_rq_lvl = pg_sz_lvl;
 897		req.rq_fwo_rq_sge =
 898			cpu_to_le16((rq->max_sge &
 899				     CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
 900				    CMDQ_CREATE_QP1_RQ_SGE_SFT);
 
 
 
 
 
 
 
 
 
 
 901	}
 902	req.rcq_cid = cpu_to_le32(qp->rcq->id);
 903	/* Header buffer - allow hdr_buf pass in */
 904	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
 905	if (rc) {
 906		rc = -ENOMEM;
 907		goto rq_rwq;
 908	}
 909	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
 910	req.qp_flags = cpu_to_le32(qp_flags);
 
 
 
 
 
 
 
 
 
 
 911	req.pd_id = cpu_to_le32(qp->pd->id);
 912
 913	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
 914	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 915	if (rc)
 916		goto fail;
 917
 918	qp->id = le32_to_cpu(resp.xid);
 919	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 920	qp->cctx = res->cctx;
 921	sq->dbinfo.hwq = &sq->hwq;
 922	sq->dbinfo.xid = qp->id;
 923	sq->dbinfo.db = qp->dpi->dbr;
 924	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
 925	if (rq->max_wqe) {
 926		rq->dbinfo.hwq = &rq->hwq;
 927		rq->dbinfo.xid = qp->id;
 928		rq->dbinfo.db = qp->dpi->dbr;
 929		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
 930	}
 931	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
 932	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
 933	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
 934
 935	return 0;
 936
 937fail:
 938	bnxt_qplib_free_qp_hdr_buf(res, qp);
 939rq_rwq:
 940	kfree(rq->swq);
 941fail_rq:
 942	bnxt_qplib_free_hwq(res, &rq->hwq);
 943sq_swq:
 944	kfree(sq->swq);
 945fail_sq:
 946	bnxt_qplib_free_hwq(res, &sq->hwq);
 
 
 947	return rc;
 948}
 949
 950static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
 951{
 952	struct bnxt_qplib_hwq *hwq;
 953	struct bnxt_qplib_q *sq;
 954	u64 fpsne, psn_pg;
 955	u16 indx_pad = 0;
 956
 957	sq = &qp->sq;
 958	hwq = &sq->hwq;
 959	/* First psn entry */
 960	fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
 961	if (!IS_ALIGNED(fpsne, PAGE_SIZE))
 962		indx_pad = (fpsne & ~PAGE_MASK) / size;
 963	hwq->pad_pgofft = indx_pad;
 964	hwq->pad_pg = (u64 *)psn_pg;
 965	hwq->pad_stride = size;
 966}
 967
 968int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 969{
 970	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 971	struct bnxt_qplib_hwq_attr hwq_attr = {};
 972	struct bnxt_qplib_sg_info sginfo = {};
 973	struct creq_create_qp_resp resp = {};
 974	struct bnxt_qplib_cmdqmsg msg = {};
 975	struct bnxt_qplib_q *sq = &qp->sq;
 976	struct bnxt_qplib_q *rq = &qp->rq;
 977	struct cmdq_create_qp req = {};
 978	int rc, req_size, psn_sz = 0;
 
 979	struct bnxt_qplib_hwq *xrrq;
 
 
 980	struct bnxt_qplib_pbl *pbl;
 981	u32 qp_flags = 0;
 982	u8 pg_sz_lvl;
 983	u32 tbl_indx;
 984	u16 nsge;
 985
 986	if (res->dattr)
 987		qp->dev_cap_flags = res->dattr->dev_cap_flags;
 988
 989	sq->dbinfo.flags = 0;
 990	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 991				 CMDQ_BASE_OPCODE_CREATE_QP,
 992				 sizeof(req));
 993
 994	/* General */
 995	req.type = qp->type;
 996	req.dpi = cpu_to_le32(qp->dpi->dpi);
 997	req.qp_handle = cpu_to_le64(qp->qp_handle);
 998
 999	/* SQ */
1000	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1001		psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
1002			 sizeof(struct sq_psn_search_ext) :
1003			 sizeof(struct sq_psn_search);
1004
1005		if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1006			psn_sz = sizeof(struct sq_msn_search);
1007			qp->msn = 0;
1008		}
1009	}
1010
1011	hwq_attr.res = res;
1012	hwq_attr.sginfo = &sq->sg_info;
1013	hwq_attr.stride = sizeof(struct sq_sge);
1014	hwq_attr.depth = bnxt_qplib_get_depth(sq);
1015	hwq_attr.aux_stride = psn_sz;
1016	hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
1017	/* Update msn tbl size */
1018	if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
1019		hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1020		qp->msn_tbl_sz = hwq_attr.aux_depth;
1021		qp->msn = 0;
1022	}
1023
1024	hwq_attr.type = HWQ_TYPE_QUEUE;
1025	rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
1026	if (rc)
1027		return rc;
1028
1029	rc = bnxt_qplib_alloc_init_swq(sq);
1030	if (rc)
 
1031		goto fail_sq;
1032
1033	if (psn_sz)
1034		bnxt_qplib_init_psn_ptr(qp, psn_sz);
1035
1036	req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037	pbl = &sq->hwq.pbl[PBL_LVL_0];
1038	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1039	pg_sz_lvl = (bnxt_qplib_base_pg_size(&sq->hwq) <<
1040		     CMDQ_CREATE_QP_SQ_PG_SIZE_SFT);
1041	pg_sz_lvl |= (sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK);
1042	req.sq_pg_size_sq_lvl = pg_sz_lvl;
1043	req.sq_fwo_sq_sge =
1044		cpu_to_le16(((sq->max_sge & CMDQ_CREATE_QP_SQ_SGE_MASK) <<
1045			     CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1046	req.scq_cid = cpu_to_le32(qp->scq->id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1047
1048	/* RQ */
1049	if (!qp->srq) {
1050		rq->dbinfo.flags = 0;
1051		hwq_attr.res = res;
1052		hwq_attr.sginfo = &rq->sg_info;
1053		hwq_attr.stride = sizeof(struct sq_sge);
1054		hwq_attr.depth = bnxt_qplib_get_depth(rq);
1055		hwq_attr.aux_stride = 0;
1056		hwq_attr.aux_depth = 0;
1057		hwq_attr.type = HWQ_TYPE_QUEUE;
1058		rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
1059		if (rc)
1060			goto sq_swq;
1061		rc = bnxt_qplib_alloc_init_swq(rq);
1062		if (rc)
1063			goto fail_rq;
1064
1065		req.rq_size = cpu_to_le32(rq->max_wqe);
 
 
 
 
 
1066		pbl = &rq->hwq.pbl[PBL_LVL_0];
1067		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1068		pg_sz_lvl = (bnxt_qplib_base_pg_size(&rq->hwq) <<
1069			     CMDQ_CREATE_QP_RQ_PG_SIZE_SFT);
1070		pg_sz_lvl |= (rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK);
1071		req.rq_pg_size_rq_lvl = pg_sz_lvl;
1072		nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1073			6 : rq->max_sge;
1074		req.rq_fwo_rq_sge =
1075			cpu_to_le16(((nsge &
1076				      CMDQ_CREATE_QP_RQ_SGE_MASK) <<
1077				     CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
 
 
 
 
 
 
1078	} else {
1079		/* SRQ */
1080		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
1081		req.srq_cid = cpu_to_le32(qp->srq->id);
 
 
1082	}
1083	req.rcq_cid = cpu_to_le32(qp->rcq->id);
1084
1085	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
1086	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
1087	if (qp->sig_type)
1088		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
1089	if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
1090		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
1091	if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
1092		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
1093
 
 
1094	req.qp_flags = cpu_to_le32(qp_flags);
 
 
 
 
1095
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1096	/* ORRQ and IRRQ */
1097	if (psn_sz) {
1098		xrrq = &qp->orrq;
1099		xrrq->max_elements =
1100			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1101		req_size = xrrq->max_elements *
1102			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1103		req_size &= ~(PAGE_SIZE - 1);
1104		sginfo.pgsize = req_size;
1105		sginfo.pgshft = PAGE_SHIFT;
1106
1107		hwq_attr.res = res;
1108		hwq_attr.sginfo = &sginfo;
1109		hwq_attr.depth = xrrq->max_elements;
1110		hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
1111		hwq_attr.aux_stride = 0;
1112		hwq_attr.aux_depth = 0;
1113		hwq_attr.type = HWQ_TYPE_CTX;
1114		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
1115		if (rc)
1116			goto rq_swq;
1117		pbl = &xrrq->pbl[PBL_LVL_0];
1118		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1119
1120		xrrq = &qp->irrq;
1121		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1122						qp->max_dest_rd_atomic);
1123		req_size = xrrq->max_elements *
1124			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1125		req_size &= ~(PAGE_SIZE - 1);
1126		sginfo.pgsize = req_size;
1127		hwq_attr.depth =  xrrq->max_elements;
1128		hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
1129		rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
 
1130		if (rc)
1131			goto fail_orrq;
1132
1133		pbl = &xrrq->pbl[PBL_LVL_0];
1134		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1135	}
1136	req.pd_id = cpu_to_le32(qp->pd->id);
1137
1138	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1139				sizeof(resp), 0);
1140	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1141	if (rc)
1142		goto fail;
1143
1144	qp->id = le32_to_cpu(resp.xid);
1145	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 
1146	INIT_LIST_HEAD(&qp->sq_flush);
1147	INIT_LIST_HEAD(&qp->rq_flush);
1148	qp->cctx = res->cctx;
1149	sq->dbinfo.hwq = &sq->hwq;
1150	sq->dbinfo.xid = qp->id;
1151	sq->dbinfo.db = qp->dpi->dbr;
1152	sq->dbinfo.max_slot = bnxt_qplib_set_sq_max_slot(qp->wqe_mode);
1153	if (rq->max_wqe) {
1154		rq->dbinfo.hwq = &rq->hwq;
1155		rq->dbinfo.xid = qp->id;
1156		rq->dbinfo.db = qp->dpi->dbr;
1157		rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
1158	}
1159	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1160	rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1161	rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1162
1163	return 0;
 
1164fail:
1165	bnxt_qplib_free_hwq(res, &qp->irrq);
 
1166fail_orrq:
1167	bnxt_qplib_free_hwq(res, &qp->orrq);
1168rq_swq:
1169	kfree(rq->swq);
 
1170fail_rq:
1171	bnxt_qplib_free_hwq(res, &rq->hwq);
1172sq_swq:
1173	kfree(sq->swq);
1174fail_sq:
1175	bnxt_qplib_free_hwq(res, &sq->hwq);
 
 
1176	return rc;
1177}
1178
1179static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1180{
1181	switch (qp->state) {
1182	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1183		/* INIT->RTR, configure the path_mtu to the default
1184		 * 2048 if not being requested
1185		 */
1186		if (!(qp->modify_flags &
1187		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1188			qp->modify_flags |=
1189				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1190			qp->path_mtu =
1191				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1192		}
1193		qp->modify_flags &=
1194			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1195		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1196		if (qp->max_dest_rd_atomic < 1)
1197			qp->max_dest_rd_atomic = 1;
1198		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1199		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1200		if (!(qp->modify_flags &
1201		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1202			qp->modify_flags |=
1203				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1204			qp->ah.sgid_index = 0;
1205		}
1206		break;
1207	default:
1208		break;
1209	}
1210}
1211
1212static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1213{
1214	switch (qp->state) {
1215	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1216		/* Bono FW requires the max_rd_atomic to be >= 1 */
1217		if (qp->max_rd_atomic < 1)
1218			qp->max_rd_atomic = 1;
1219		/* Bono FW does not allow PKEY_INDEX,
1220		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1221		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1222		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1223		 * modification
1224		 */
1225		qp->modify_flags &=
1226			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1227			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1228			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1229			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1230			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1231			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1232			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1233			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1234			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1235			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1236			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1237			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1238		break;
1239	default:
1240		break;
1241	}
1242}
1243
1244static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1245{
1246	switch (qp->cur_qp_state) {
1247	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1248		break;
1249	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1250		__modify_flags_from_init_state(qp);
1251		break;
1252	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1253		__modify_flags_from_rtr_state(qp);
1254		break;
1255	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1256		break;
1257	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1258		break;
1259	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1260		break;
1261	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1262		break;
1263	default:
1264		break;
1265	}
1266}
1267
1268int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1269{
1270	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1271	struct creq_modify_qp_resp resp = {};
1272	struct bnxt_qplib_cmdqmsg msg = {};
1273	struct cmdq_modify_qp req = {};
1274	u32 temp32[4];
1275	u32 bmask;
1276	int rc;
1277
1278	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1279				 CMDQ_BASE_OPCODE_MODIFY_QP,
1280				 sizeof(req));
1281
1282	/* Filter out the qp_attr_mask based on the state->new transition */
1283	__filter_modify_flags(qp);
1284	bmask = qp->modify_flags;
1285	req.modify_mask = cpu_to_le32(qp->modify_flags);
1286	req.qp_cid = cpu_to_le32(qp->id);
1287	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1288		req.network_type_en_sqd_async_notify_new_state =
1289				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1290				(qp->en_sqd_async_notify ?
1291					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1292	}
1293	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1294
1295	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1296		req.access = qp->access;
1297
1298	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
1299		req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
1300
 
 
1301	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1302		req.qkey = cpu_to_le32(qp->qkey);
1303
1304	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1305		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1306		req.dgid[0] = cpu_to_le32(temp32[0]);
1307		req.dgid[1] = cpu_to_le32(temp32[1]);
1308		req.dgid[2] = cpu_to_le32(temp32[2]);
1309		req.dgid[3] = cpu_to_le32(temp32[3]);
1310	}
1311	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1312		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1313
1314	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1315		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1316					     [qp->ah.sgid_index]);
1317
1318	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1319		req.hop_limit = qp->ah.hop_limit;
1320
1321	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1322		req.traffic_class = qp->ah.traffic_class;
1323
1324	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1325		memcpy(req.dest_mac, qp->ah.dmac, 6);
1326
1327	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1328		req.path_mtu_pingpong_push_enable |= qp->path_mtu;
1329
1330	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1331		req.timeout = qp->timeout;
1332
1333	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1334		req.retry_cnt = qp->retry_cnt;
1335
1336	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1337		req.rnr_retry = qp->rnr_retry;
1338
1339	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1340		req.min_rnr_timer = qp->min_rnr_timer;
1341
1342	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1343		req.rq_psn = cpu_to_le32(qp->rq.psn);
1344
1345	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1346		req.sq_psn = cpu_to_le32(qp->sq.psn);
1347
1348	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1349		req.max_rd_atomic =
1350			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1351
1352	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1353		req.max_dest_rd_atomic =
1354			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1355
1356	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1357	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1358	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1359	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1360	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1361	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1362		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1363
1364	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1365
1366	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),  sizeof(resp), 0);
1367	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1368	if (rc)
1369		return rc;
1370	qp->cur_qp_state = qp->state;
1371	return 0;
1372}
1373
1374int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1375{
1376	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1377	struct creq_query_qp_resp resp = {};
1378	struct bnxt_qplib_cmdqmsg msg = {};
1379	struct bnxt_qplib_rcfw_sbuf sbuf;
1380	struct creq_query_qp_resp_sb *sb;
1381	struct cmdq_query_qp req = {};
1382	u32 temp32[4];
1383	int i, rc;
1384
1385	sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
1386	sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
1387				     &sbuf.dma_addr, GFP_KERNEL);
1388	if (!sbuf.sb)
1389		return -ENOMEM;
1390	sb = sbuf.sb;
1391
1392	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1393				 CMDQ_BASE_OPCODE_QUERY_QP,
1394				 sizeof(req));
 
1395
1396	req.qp_cid = cpu_to_le32(qp->id);
1397	req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
1398	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
1399				sizeof(resp), 0);
1400	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1401	if (rc)
1402		goto bail;
1403	/* Extract the context from the side buffer */
1404	qp->state = sb->en_sqd_async_notify_state &
1405			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1406	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1407				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
 
1408	qp->access = sb->access;
1409	qp->pkey_index = le16_to_cpu(sb->pkey);
1410	qp->qkey = le32_to_cpu(sb->qkey);
1411
1412	temp32[0] = le32_to_cpu(sb->dgid[0]);
1413	temp32[1] = le32_to_cpu(sb->dgid[1]);
1414	temp32[2] = le32_to_cpu(sb->dgid[2]);
1415	temp32[3] = le32_to_cpu(sb->dgid[3]);
1416	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1417
1418	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1419
1420	qp->ah.sgid_index = 0;
1421	for (i = 0; i < res->sgid_tbl.max; i++) {
1422		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1423			qp->ah.sgid_index = i;
1424			break;
1425		}
1426	}
1427	if (i == res->sgid_tbl.max)
1428		dev_warn(&res->pdev->dev, "SGID not found??\n");
1429
1430	qp->ah.hop_limit = sb->hop_limit;
1431	qp->ah.traffic_class = sb->traffic_class;
1432	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1433	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1434				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1435				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1436	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1437				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1438				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1439	qp->timeout = sb->timeout;
1440	qp->retry_cnt = sb->retry_cnt;
1441	qp->rnr_retry = sb->rnr_retry;
1442	qp->min_rnr_timer = sb->min_rnr_timer;
1443	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1444	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1445	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1446	qp->max_dest_rd_atomic =
1447			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1448	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1449	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1450	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1451	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1452	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1453	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1454	memcpy(qp->smac, sb->src_mac, 6);
1455	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1456bail:
1457	dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
1458			  sbuf.sb, sbuf.dma_addr);
1459	return rc;
1460}
1461
1462static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1463{
1464	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1465	u32 peek_flags, peek_cons;
1466	struct cq_base *hw_cqe;
1467	int i;
1468
1469	peek_flags = cq->dbinfo.flags;
1470	peek_cons = cq_hwq->cons;
1471	for (i = 0; i < cq_hwq->max_elements; i++) {
1472		hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
1473		if (!CQE_CMP_VALID(hw_cqe, peek_flags))
 
1474			continue;
1475		/*
1476		 * The valid test of the entry must be done first before
1477		 * reading any further.
1478		 */
1479		dma_rmb();
1480		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1481		case CQ_BASE_CQE_TYPE_REQ:
1482		case CQ_BASE_CQE_TYPE_TERMINAL:
1483		{
1484			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1485
1486			if (qp == le64_to_cpu(cqe->qp_handle))
1487				cqe->qp_handle = 0;
1488			break;
1489		}
1490		case CQ_BASE_CQE_TYPE_RES_RC:
1491		case CQ_BASE_CQE_TYPE_RES_UD:
1492		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1493		{
1494			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1495
1496			if (qp == le64_to_cpu(cqe->qp_handle))
1497				cqe->qp_handle = 0;
1498			break;
1499		}
1500		default:
1501			break;
1502		}
1503		bnxt_qplib_hwq_incr_cons(cq_hwq->max_elements, &peek_cons,
1504					 1, &peek_flags);
1505	}
1506}
1507
1508int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1509			  struct bnxt_qplib_qp *qp)
1510{
1511	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1512	struct creq_destroy_qp_resp resp = {};
1513	struct bnxt_qplib_cmdqmsg msg = {};
1514	struct cmdq_destroy_qp req = {};
1515	u32 tbl_indx;
1516	int rc;
1517
1518	tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
1519	rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1520	rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
1521
1522	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
1523				 CMDQ_BASE_OPCODE_DESTROY_QP,
1524				 sizeof(req));
1525
1526	req.qp_cid = cpu_to_le32(qp->id);
1527	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1528				sizeof(resp), 0);
1529	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1530	if (rc) {
1531		rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
1532		rcfw->qp_tbl[tbl_indx].qp_handle = qp;
1533		return rc;
1534	}
1535
1536	return 0;
1537}
1538
1539void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1540			    struct bnxt_qplib_qp *qp)
1541{
1542	bnxt_qplib_free_qp_hdr_buf(res, qp);
1543	bnxt_qplib_free_hwq(res, &qp->sq.hwq);
1544	kfree(qp->sq.swq);
1545
1546	bnxt_qplib_free_hwq(res, &qp->rq.hwq);
1547	kfree(qp->rq.swq);
1548
1549	if (qp->irrq.max_elements)
1550		bnxt_qplib_free_hwq(res, &qp->irrq);
1551	if (qp->orrq.max_elements)
1552		bnxt_qplib_free_hwq(res, &qp->orrq);
1553
1554}
1555
1556void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1557				struct bnxt_qplib_sge *sge)
1558{
1559	struct bnxt_qplib_q *sq = &qp->sq;
1560	u32 sw_prod;
1561
1562	memset(sge, 0, sizeof(*sge));
1563
1564	if (qp->sq_hdr_buf) {
1565		sw_prod = sq->swq_start;
1566		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1567					 sw_prod * qp->sq_hdr_buf_size);
1568		sge->lkey = 0xFFFFFFFF;
1569		sge->size = qp->sq_hdr_buf_size;
1570		return qp->sq_hdr_buf + sw_prod * sge->size;
1571	}
1572	return NULL;
1573}
1574
1575u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1576{
1577	struct bnxt_qplib_q *rq = &qp->rq;
1578
1579	return rq->swq_start;
1580}
1581
1582dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1583{
1584	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1585}
1586
1587void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1588				struct bnxt_qplib_sge *sge)
1589{
1590	struct bnxt_qplib_q *rq = &qp->rq;
1591	u32 sw_prod;
1592
1593	memset(sge, 0, sizeof(*sge));
1594
1595	if (qp->rq_hdr_buf) {
1596		sw_prod = rq->swq_start;
1597		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1598					 sw_prod * qp->rq_hdr_buf_size);
1599		sge->lkey = 0xFFFFFFFF;
1600		sge->size = qp->rq_hdr_buf_size;
1601		return qp->rq_hdr_buf + sw_prod * sge->size;
1602	}
1603	return NULL;
1604}
1605
1606/* Fil the MSN table into the next psn row */
1607static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
1608				       struct bnxt_qplib_swqe *wqe,
1609				       struct bnxt_qplib_swq *swq)
1610{
1611	struct sq_msn_search *msns;
1612	u32 start_psn, next_psn;
1613	u16 start_idx;
1614
1615	msns = (struct sq_msn_search *)swq->psn_search;
1616	msns->start_idx_next_psn_start_psn = 0;
1617
1618	start_psn = swq->start_psn;
1619	next_psn = swq->next_psn;
1620	start_idx = swq->slot_idx;
1621	msns->start_idx_next_psn_start_psn |=
1622		bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
1623	qp->msn++;
1624	qp->msn %= qp->msn_tbl_sz;
1625}
1626
1627static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
1628				       struct bnxt_qplib_swqe *wqe,
1629				       struct bnxt_qplib_swq *swq)
1630{
1631	struct sq_psn_search_ext *psns_ext;
1632	struct sq_psn_search *psns;
1633	u32 flg_npsn;
1634	u32 op_spsn;
1635
1636	if (!swq->psn_search)
1637		return;
1638	/* Handle MSN differently on cap flags  */
1639	if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
1640		bnxt_qplib_fill_msn_search(qp, wqe, swq);
1641		return;
1642	}
1643	psns = (struct sq_psn_search *)swq->psn_search;
1644	psns = swq->psn_search;
1645	psns_ext = swq->psn_ext;
1646
1647	op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1648		    SQ_PSN_SEARCH_START_PSN_MASK);
1649	op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1650		     SQ_PSN_SEARCH_OPCODE_MASK);
1651	flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1652		     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1653
1654	if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
1655		psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
1656		psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
1657		psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
1658	} else {
1659		psns->opcode_start_psn = cpu_to_le32(op_spsn);
1660		psns->flags_next_psn = cpu_to_le32(flg_npsn);
1661	}
1662}
1663
1664static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
1665				 struct bnxt_qplib_swqe *wqe,
1666				 u16 *idx)
1667{
1668	struct bnxt_qplib_hwq *hwq;
1669	int len, t_len, offt;
1670	bool pull_dst = true;
1671	void *il_dst = NULL;
1672	void *il_src = NULL;
1673	int t_cplen, cplen;
1674	int indx;
1675
1676	hwq = &qp->sq.hwq;
1677	t_len = 0;
1678	for (indx = 0; indx < wqe->num_sge; indx++) {
1679		len = wqe->sg_list[indx].size;
1680		il_src = (void *)wqe->sg_list[indx].addr;
1681		t_len += len;
1682		if (t_len > qp->max_inline_data)
1683			return -ENOMEM;
1684		while (len) {
1685			if (pull_dst) {
1686				pull_dst = false;
1687				il_dst = bnxt_qplib_get_prod_qe(hwq, *idx);
1688				(*idx)++;
1689				t_cplen = 0;
1690				offt = 0;
1691			}
1692			cplen = min_t(int, len, sizeof(struct sq_sge));
1693			cplen = min_t(int, cplen,
1694					(sizeof(struct sq_sge) - offt));
1695			memcpy(il_dst, il_src, cplen);
1696			t_cplen += cplen;
1697			il_src += cplen;
1698			il_dst += cplen;
1699			offt += cplen;
1700			len -= cplen;
1701			if (t_cplen == sizeof(struct sq_sge))
1702				pull_dst = true;
1703		}
1704	}
1705
1706	return t_len;
1707}
1708
1709static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
1710			       struct bnxt_qplib_sge *ssge,
1711			       u16 nsge, u16 *idx)
1712{
1713	struct sq_sge *dsge;
1714	int indx, len = 0;
1715
1716	for (indx = 0; indx < nsge; indx++, (*idx)++) {
1717		dsge = bnxt_qplib_get_prod_qe(hwq, *idx);
1718		dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
1719		dsge->l_key = cpu_to_le32(ssge[indx].lkey);
1720		dsge->size = cpu_to_le32(ssge[indx].size);
1721		len += ssge[indx].size;
1722	}
1723
1724	return len;
1725}
1726
1727static u16 bnxt_qplib_required_slots(struct bnxt_qplib_qp *qp,
1728				     struct bnxt_qplib_swqe *wqe,
1729				     u16 *wqe_sz, u16 *qdf, u8 mode)
1730{
1731	u32 ilsize, bytes;
1732	u16 nsge;
1733	u16 slot;
1734
1735	nsge = wqe->num_sge;
1736	/* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
1737	bytes = sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
1738	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1739		ilsize = bnxt_qplib_calc_ilsize(wqe, qp->max_inline_data);
1740		bytes = ALIGN(ilsize, sizeof(struct sq_sge));
1741		bytes += sizeof(struct sq_send_hdr);
1742	}
1743
1744	*qdf =  __xlate_qfd(qp->sq.q_full_delta, bytes);
1745	slot = bytes >> 4;
1746	*wqe_sz = slot;
1747	if (mode == BNXT_QPLIB_WQE_MODE_STATIC)
1748		slot = 8;
1749	return slot;
1750}
1751
1752static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
1753				     struct bnxt_qplib_swq *swq, bool hw_retx)
1754{
1755	struct bnxt_qplib_hwq *hwq;
1756	u32 pg_num, pg_indx;
1757	void *buff;
1758	u32 tail;
1759
1760	hwq = &sq->hwq;
1761	if (!hwq->pad_pg)
1762		return;
1763	tail = swq->slot_idx / sq->dbinfo.max_slot;
1764	if (hw_retx) {
1765		/* For HW retx use qp msn index */
1766		tail = qp->msn;
1767		tail %= qp->msn_tbl_sz;
1768	}
1769	pg_num = (tail + hwq->pad_pgofft) / (PAGE_SIZE / hwq->pad_stride);
1770	pg_indx = (tail + hwq->pad_pgofft) % (PAGE_SIZE / hwq->pad_stride);
1771	buff = (void *)(hwq->pad_pg[pg_num] + pg_indx * hwq->pad_stride);
1772	swq->psn_ext = buff;
1773	swq->psn_search = buff;
1774}
1775
1776void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1777{
1778	struct bnxt_qplib_q *sq = &qp->sq;
 
 
1779
1780	bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
 
 
 
 
 
 
1781}
1782
1783int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1784			 struct bnxt_qplib_swqe *wqe)
1785{
1786	struct bnxt_qplib_nq_work *nq_work = NULL;
1787	int i, rc = 0, data_len = 0, pkt_num = 0;
1788	struct bnxt_qplib_q *sq = &qp->sq;
1789	struct bnxt_qplib_hwq *hwq;
1790	struct bnxt_qplib_swq *swq;
 
 
 
1791	bool sch_handler = false;
1792	u16 wqe_sz, qdf = 0;
1793	bool msn_update;
1794	void *base_hdr;
1795	void *ext_hdr;
1796	__le32 temp32;
1797	u32 wqe_idx;
1798	u32 slots;
1799	u16 idx;
1800
1801	hwq = &sq->hwq;
1802	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
1803	    qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1804		dev_err(&hwq->pdev->dev,
1805			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
1806			qp->id, qp->state);
1807		rc = -EINVAL;
1808		goto done;
1809	}
1810
1811	slots = bnxt_qplib_required_slots(qp, wqe, &wqe_sz, &qdf, qp->wqe_mode);
1812	if (bnxt_qplib_queue_full(sq, slots + qdf)) {
1813		dev_err(&hwq->pdev->dev,
1814			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1815			hwq->prod, hwq->cons, hwq->depth, sq->q_full_delta);
 
1816		rc = -ENOMEM;
1817		goto done;
1818	}
1819
1820	swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
1821	bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
1822
1823	idx = 0;
1824	swq->slot_idx = hwq->prod;
1825	swq->slots = slots;
1826	swq->wr_id = wqe->wr_id;
1827	swq->type = wqe->type;
1828	swq->flags = wqe->flags;
1829	swq->start_psn = sq->psn & BTH_PSN_MASK;
1830	if (qp->sig_type)
1831		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
 
1832
1833	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834		sch_handler = true;
1835		dev_dbg(&hwq->pdev->dev,
1836			"%s Error QP. Scheduling for poll_cq\n", __func__);
1837		goto queue_err;
1838	}
1839
1840	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1841	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
1842	memset(base_hdr, 0, sizeof(struct sq_sge));
1843	memset(ext_hdr, 0, sizeof(struct sq_sge));
1844
1845	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
1846		/* Copy the inline data */
1847		data_len = bnxt_qplib_put_inline(qp, wqe, &idx);
1848	else
1849		data_len = bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge,
1850					       &idx);
1851	if (data_len < 0)
1852		goto queue_err;
1853	/* Make sure we update MSN table only for wired wqes */
1854	msn_update = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1855	/* Specifics */
1856	switch (wqe->type) {
1857	case BNXT_QPLIB_SWQE_TYPE_SEND:
1858		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
1859			struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
1860			struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
1861			/* Assemble info for Raw Ethertype QPs */
 
 
1862
1863			sqe->wqe_type = wqe->type;
1864			sqe->flags = wqe->flags;
1865			sqe->wqe_size = wqe_sz;
 
1866			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1867			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1868			sqe->length = cpu_to_le32(data_len);
1869			ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1870				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1871				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1872
1873			break;
1874		}
1875		fallthrough;
1876	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1877	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1878	{
1879		struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
1880		struct sq_send_hdr *sqe = base_hdr;
1881
1882		sqe->wqe_type = wqe->type;
1883		sqe->flags = wqe->flags;
1884		sqe->wqe_size = wqe_sz;
1885		sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
 
 
1886		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1887		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1888			sqe->q_key = cpu_to_le32(wqe->send.q_key);
 
 
1889			sqe->length = cpu_to_le32(data_len);
 
 
1890			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
1891			ext_sqe->dst_qp = cpu_to_le32(wqe->send.dst_qp &
1892						      SQ_SEND_DST_QP_MASK);
1893			ext_sqe->avid = cpu_to_le32(wqe->send.avid &
1894						    SQ_SEND_AVID_MASK);
1895			msn_update = false;
1896		} else {
1897			sqe->length = cpu_to_le32(data_len);
 
 
1898			if (qp->mtu)
1899				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1900			if (!pkt_num)
1901				pkt_num = 1;
1902			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1903		}
1904		break;
1905	}
1906	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1907	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1908	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1909	{
1910		struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
1911		struct sq_rdma_hdr *sqe = base_hdr;
1912
1913		sqe->wqe_type = wqe->type;
1914		sqe->flags = wqe->flags;
1915		sqe->wqe_size = wqe_sz;
 
1916		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1917		sqe->length = cpu_to_le32((u32)data_len);
1918		ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1919		ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1920		if (qp->mtu)
1921			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1922		if (!pkt_num)
1923			pkt_num = 1;
1924		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1925		break;
1926	}
1927	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1928	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1929	{
1930		struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
1931		struct sq_atomic_hdr *sqe = base_hdr;
1932
1933		sqe->wqe_type = wqe->type;
1934		sqe->flags = wqe->flags;
1935		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1936		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1937		ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1938		ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1939		if (qp->mtu)
1940			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1941		if (!pkt_num)
1942			pkt_num = 1;
1943		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1944		break;
1945	}
1946	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1947	{
1948		struct sq_localinvalidate *sqe = base_hdr;
 
1949
1950		sqe->wqe_type = wqe->type;
1951		sqe->flags = wqe->flags;
1952		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1953		msn_update = false;
1954		break;
1955	}
1956	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1957	{
1958		struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
1959		struct sq_fr_pmr_hdr *sqe = base_hdr;
1960
1961		sqe->wqe_type = wqe->type;
1962		sqe->flags = wqe->flags;
1963		sqe->access_cntl = wqe->frmr.access_cntl |
1964				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1965		sqe->zero_based_page_size_log =
1966			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1967			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1968			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1969		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1970		temp32 = cpu_to_le32(wqe->frmr.length);
1971		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1972		sqe->numlevels_pbl_page_size_log =
1973			((wqe->frmr.pbl_pg_sz_log <<
1974					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1975					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1976			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1977					SQ_FR_PMR_NUMLEVELS_MASK);
1978
1979		for (i = 0; i < wqe->frmr.page_list_len; i++)
1980			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1981						wqe->frmr.page_list[i] |
1982						PTU_PTE_VALID);
1983		ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1984		ext_sqe->va = cpu_to_le64(wqe->frmr.va);
1985		msn_update = false;
1986
1987		break;
1988	}
1989	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1990	{
1991		struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
1992		struct sq_bind_hdr *sqe = base_hdr;
1993
1994		sqe->wqe_type = wqe->type;
1995		sqe->flags = wqe->flags;
1996		sqe->access_cntl = wqe->bind.access_cntl;
1997		sqe->mw_type_zero_based = wqe->bind.mw_type |
1998			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1999		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
2000		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
2001		ext_sqe->va = cpu_to_le64(wqe->bind.va);
2002		ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
2003		msn_update = false;
2004		break;
2005	}
2006	default:
2007		/* Bad wqe, return error */
2008		rc = -EINVAL;
2009		goto done;
2010	}
2011	if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
2012		swq->next_psn = sq->psn & BTH_PSN_MASK;
2013		bnxt_qplib_fill_psn_search(qp, wqe, swq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2014	}
2015queue_err:
2016	bnxt_qplib_swq_mod_start(sq, wqe_idx);
2017	bnxt_qplib_hwq_incr_prod(&sq->dbinfo, hwq, swq->slots);
 
 
 
 
 
 
 
 
 
 
2018	qp->wqe_cnt++;
 
2019done:
2020	if (sch_handler) {
2021		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2022		if (nq_work) {
2023			nq_work->cq = qp->scq;
2024			nq_work->nq = qp->scq->nq;
2025			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2026			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
2027		} else {
2028			dev_err(&hwq->pdev->dev,
2029				"FP: Failed to allocate SQ nq_work!\n");
2030			rc = -ENOMEM;
2031		}
2032	}
2033	return rc;
2034}
2035
2036void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
2037{
2038	struct bnxt_qplib_q *rq = &qp->rq;
 
 
2039
2040	bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
 
 
 
 
 
 
2041}
2042
2043int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
2044			 struct bnxt_qplib_swqe *wqe)
2045{
2046	struct bnxt_qplib_nq_work *nq_work = NULL;
2047	struct bnxt_qplib_q *rq = &qp->rq;
2048	struct rq_wqe_hdr *base_hdr;
2049	struct rq_ext_hdr *ext_hdr;
2050	struct bnxt_qplib_hwq *hwq;
2051	struct bnxt_qplib_swq *swq;
2052	bool sch_handler = false;
2053	u16 wqe_sz, idx;
2054	u32 wqe_idx;
2055	int rc = 0;
2056
2057	hwq = &rq->hwq;
2058	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2059		dev_err(&hwq->pdev->dev,
2060			"QPLIB: FP: QP (0x%x) is in the 0x%x state",
2061			qp->id, qp->state);
2062		rc = -EINVAL;
2063		goto done;
2064	}
2065
2066	if (bnxt_qplib_queue_full(rq, rq->dbinfo.max_slot)) {
2067		dev_err(&hwq->pdev->dev,
2068			"FP: QP (0x%x) RQ is full!\n", qp->id);
2069		rc = -EINVAL;
2070		goto done;
2071	}
 
 
2072
2073	swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
2074	swq->wr_id = wqe->wr_id;
2075	swq->slots = rq->dbinfo.max_slot;
2076
2077	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2078		sch_handler = true;
2079		dev_dbg(&hwq->pdev->dev,
2080			"%s: Error QP. Scheduling for poll_cq\n", __func__);
2081		goto queue_err;
 
 
 
2082	}
 
 
 
 
 
 
 
 
 
 
 
 
2083
2084	idx = 0;
2085	base_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2086	ext_hdr = bnxt_qplib_get_prod_qe(hwq, idx++);
2087	memset(base_hdr, 0, sizeof(struct sq_sge));
2088	memset(ext_hdr, 0, sizeof(struct sq_sge));
2089	wqe_sz = (sizeof(struct rq_wqe_hdr) +
2090	wqe->num_sge * sizeof(struct sq_sge)) >> 4;
2091	bnxt_qplib_put_sges(hwq, wqe->sg_list, wqe->num_sge, &idx);
2092	if (!wqe->num_sge) {
2093		struct sq_sge *sge;
2094
2095		sge = bnxt_qplib_get_prod_qe(hwq, idx++);
2096		sge->size = 0;
2097		wqe_sz++;
2098	}
2099	base_hdr->wqe_type = wqe->type;
2100	base_hdr->flags = wqe->flags;
2101	base_hdr->wqe_size = wqe_sz;
2102	base_hdr->wr_id[0] = cpu_to_le32(wqe_idx);
2103queue_err:
2104	bnxt_qplib_swq_mod_start(rq, wqe_idx);
2105	bnxt_qplib_hwq_incr_prod(&rq->dbinfo, hwq, swq->slots);
2106done:
 
 
 
 
2107	if (sch_handler) {
2108		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
2109		if (nq_work) {
2110			nq_work->cq = qp->rcq;
2111			nq_work->nq = qp->rcq->nq;
2112			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
2113			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
2114		} else {
2115			dev_err(&hwq->pdev->dev,
2116				"FP: Failed to allocate RQ nq_work!\n");
2117			rc = -ENOMEM;
2118		}
2119	}
2120
2121	return rc;
2122}
2123
2124/* CQ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2125int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2126{
2127	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2128	struct bnxt_qplib_hwq_attr hwq_attr = {};
2129	struct creq_create_cq_resp resp = {};
2130	struct bnxt_qplib_cmdqmsg msg = {};
2131	struct cmdq_create_cq req = {};
2132	struct bnxt_qplib_pbl *pbl;
2133	u32 pg_sz_lvl;
2134	int rc;
2135
 
 
 
 
 
 
 
 
 
 
2136	if (!cq->dpi) {
2137		dev_err(&rcfw->pdev->dev,
2138			"FP: CREATE_CQ failed due to NULL DPI\n");
2139		return -EINVAL;
2140	}
2141
2142	cq->dbinfo.flags = 0;
2143	hwq_attr.res = res;
2144	hwq_attr.depth = cq->max_wqe;
2145	hwq_attr.stride = sizeof(struct cq_base);
2146	hwq_attr.type = HWQ_TYPE_QUEUE;
2147	hwq_attr.sginfo = &cq->sg_info;
2148	rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
2149	if (rc)
2150		return rc;
2151
2152	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2153				 CMDQ_BASE_OPCODE_CREATE_CQ,
2154				 sizeof(req));
2155
2156	req.dpi = cpu_to_le32(cq->dpi->dpi);
2157	req.cq_handle = cpu_to_le64(cq->cq_handle);
2158	req.cq_size = cpu_to_le32(cq->max_wqe);
 
2159	pbl = &cq->hwq.pbl[PBL_LVL_0];
2160	pg_sz_lvl = (bnxt_qplib_base_pg_size(&cq->hwq) <<
2161		     CMDQ_CREATE_CQ_PG_SIZE_SFT);
2162	pg_sz_lvl |= (cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK);
2163	req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
 
 
 
 
 
 
 
2164	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 
2165	req.cq_fco_cnq_id = cpu_to_le32(
2166			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
2167			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
2168	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2169				sizeof(resp), 0);
2170	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2171	if (rc)
2172		goto fail;
2173
2174	cq->id = le32_to_cpu(resp.xid);
 
2175	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
2176	init_waitqueue_head(&cq->waitq);
2177	INIT_LIST_HEAD(&cq->sqf_head);
2178	INIT_LIST_HEAD(&cq->rqf_head);
2179	spin_lock_init(&cq->compl_lock);
2180	spin_lock_init(&cq->flush_lock);
2181
2182	cq->dbinfo.hwq = &cq->hwq;
2183	cq->dbinfo.xid = cq->id;
2184	cq->dbinfo.db = cq->dpi->dbr;
2185	cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
2186	cq->dbinfo.flags = 0;
2187	cq->dbinfo.toggle = 0;
2188
2189	bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
2190
2191	return 0;
2192
2193fail:
2194	bnxt_qplib_free_hwq(res, &cq->hwq);
2195	return rc;
2196}
2197
2198void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
2199				   struct bnxt_qplib_cq *cq)
2200{
2201	bnxt_qplib_free_hwq(res, &cq->hwq);
2202	memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
2203       /* Reset only the cons bit in the flags */
2204	cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
2205}
2206
2207int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
2208			 int new_cqes)
2209{
2210	struct bnxt_qplib_hwq_attr hwq_attr = {};
2211	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2212	struct creq_resize_cq_resp resp = {};
2213	struct bnxt_qplib_cmdqmsg msg = {};
2214	struct cmdq_resize_cq req = {};
2215	struct bnxt_qplib_pbl *pbl;
2216	u32 pg_sz, lvl, new_sz;
2217	int rc;
2218
2219	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2220				 CMDQ_BASE_OPCODE_RESIZE_CQ,
2221				 sizeof(req));
2222	hwq_attr.sginfo = &cq->sg_info;
2223	hwq_attr.res = res;
2224	hwq_attr.depth = new_cqes;
2225	hwq_attr.stride = sizeof(struct cq_base);
2226	hwq_attr.type = HWQ_TYPE_QUEUE;
2227	rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
2228	if (rc)
2229		return rc;
2230
2231	req.cq_cid = cpu_to_le32(cq->id);
2232	pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
2233	pg_sz = bnxt_qplib_base_pg_size(&cq->resize_hwq);
2234	lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
2235				       CMDQ_RESIZE_CQ_LVL_MASK;
2236	new_sz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
2237		  CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
2238	req.new_cq_size_pg_size_lvl = cpu_to_le32(new_sz | pg_sz | lvl);
2239	req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
2240
2241	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2242				sizeof(resp), 0);
2243	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2244	return rc;
2245}
2246
2247int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
2248{
2249	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
2250	struct creq_destroy_cq_resp resp = {};
2251	struct bnxt_qplib_cmdqmsg msg = {};
2252	struct cmdq_destroy_cq req = {};
2253	u16 total_cnq_events;
2254	int rc;
2255
2256	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
2257				 CMDQ_BASE_OPCODE_DESTROY_CQ,
2258				 sizeof(req));
2259
2260	req.cq_cid = cpu_to_le32(cq->id);
2261	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
2262				sizeof(resp), 0);
2263	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
2264	if (rc)
2265		return rc;
2266	total_cnq_events = le16_to_cpu(resp.total_cnq_events);
2267	__wait_for_all_nqes(cq, total_cnq_events);
2268	bnxt_qplib_free_hwq(res, &cq->hwq);
2269	return 0;
2270}
2271
2272static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2273		      struct bnxt_qplib_cqe **pcqe, int *budget)
2274{
 
2275	struct bnxt_qplib_cqe *cqe;
2276	u32 start, last;
2277	int rc = 0;
2278
2279	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2280	start = sq->swq_start;
2281	cqe = *pcqe;
2282	while (*budget) {
2283		last = sq->swq_last;
2284		if (start == last)
2285			break;
 
2286		/* Skip the FENCE WQE completions */
2287		if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
2288			bnxt_qplib_cancel_phantom_processing(qp);
2289			goto skip_compl;
2290		}
2291		memset(cqe, 0, sizeof(*cqe));
2292		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2293		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2294		cqe->qp_handle = (u64)(unsigned long)qp;
2295		cqe->wr_id = sq->swq[last].wr_id;
2296		cqe->src_qp = qp->id;
2297		cqe->type = sq->swq[last].type;
2298		cqe++;
2299		(*budget)--;
2300skip_compl:
2301		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2302					 sq->swq[last].slots, &sq->dbinfo.flags);
2303		sq->swq_last = sq->swq[last].next_idx;
2304	}
2305	*pcqe = cqe;
2306	if (!(*budget) && sq->swq_last != start)
2307		/* Out of budget */
2308		rc = -EAGAIN;
2309
2310	return rc;
2311}
2312
2313static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2314		      struct bnxt_qplib_cqe **pcqe, int *budget)
2315{
2316	struct bnxt_qplib_cqe *cqe;
2317	u32 start, last;
2318	int opcode = 0;
2319	int rc = 0;
 
2320
2321	switch (qp->type) {
2322	case CMDQ_CREATE_QP1_TYPE_GSI:
2323		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2324		break;
2325	case CMDQ_CREATE_QP_TYPE_RC:
2326		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2327		break;
2328	case CMDQ_CREATE_QP_TYPE_UD:
2329	case CMDQ_CREATE_QP_TYPE_GSI:
2330		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2331		break;
2332	}
2333
2334	/* Flush the rest of the RQ */
2335	start = rq->swq_start;
2336	cqe = *pcqe;
2337	while (*budget) {
2338		last = rq->swq_last;
2339		if (last == start)
2340			break;
2341		memset(cqe, 0, sizeof(*cqe));
2342		cqe->status =
2343		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2344		cqe->opcode = opcode;
2345		cqe->qp_handle = (unsigned long)qp;
2346		cqe->wr_id = rq->swq[last].wr_id;
2347		cqe++;
2348		(*budget)--;
2349		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2350					 rq->swq[last].slots, &rq->dbinfo.flags);
2351		rq->swq_last = rq->swq[last].next_idx;
2352	}
2353	*pcqe = cqe;
2354	if (!*budget && rq->swq_last != start)
2355		/* Out of budget */
2356		rc = -EAGAIN;
2357
2358	return rc;
2359}
2360
2361void bnxt_qplib_mark_qp_error(void *qp_handle)
2362{
2363	struct bnxt_qplib_qp *qp = qp_handle;
2364
2365	if (!qp)
2366		return;
2367
2368	/* Must block new posting of SQ and RQ */
2369	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2370	bnxt_qplib_cancel_phantom_processing(qp);
2371}
2372
2373/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2374 *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2375 */
2376static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2377		     u32 cq_cons, u32 swq_last, u32 cqe_sq_cons)
2378{
2379	u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
2380	struct bnxt_qplib_q *sq = &qp->sq;
 
 
 
2381	struct cq_req *peek_req_hwcqe;
2382	struct bnxt_qplib_qp *peek_qp;
2383	struct bnxt_qplib_q *peek_sq;
2384	struct bnxt_qplib_swq *swq;
2385	struct cq_base *peek_hwcqe;
2386	int i, rc = 0;
2387
2388	/* Normal mode */
2389	/* Check for the psn_search marking before completing */
2390	swq = &sq->swq[swq_last];
2391	if (swq->psn_search &&
2392	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2393		/* Unmark */
2394		swq->psn_search->flags_next_psn = cpu_to_le32
2395			(le32_to_cpu(swq->psn_search->flags_next_psn)
2396				     & ~0x80000000);
2397		dev_dbg(&cq->hwq.pdev->dev,
2398			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2399			cq_cons, qp->id, swq_last, cqe_sq_cons);
2400		sq->condition = true;
2401		sq->send_phantom = true;
2402
2403		/* TODO: Only ARM if the previous SQE is ARMALL */
2404		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
 
2405		rc = -EAGAIN;
2406		goto out;
2407	}
2408	if (sq->condition) {
2409		/* Peek at the completions */
2410		peek_flags = cq->dbinfo.flags;
2411		peek_sw_cq_cons = cq_cons;
2412		i = cq->hwq.max_elements;
2413		while (i--) {
2414			peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
2415						       peek_sw_cq_cons, NULL);
 
 
2416			/* If the next hwcqe is VALID */
2417			if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
 
2418			/*
2419			 * The valid test of the entry must be done first before
2420			 * reading any further.
2421			 */
2422				dma_rmb();
2423				/* If the next hwcqe is a REQ */
2424				if ((peek_hwcqe->cqe_type_toggle &
2425				    CQ_BASE_CQE_TYPE_MASK) ==
2426				    CQ_BASE_CQE_TYPE_REQ) {
2427					peek_req_hwcqe = (struct cq_req *)
2428							 peek_hwcqe;
2429					peek_qp = (struct bnxt_qplib_qp *)
2430						((unsigned long)
2431						 le64_to_cpu
2432						 (peek_req_hwcqe->qp_handle));
2433					peek_sq = &peek_qp->sq;
2434					peek_sq_cons_idx =
2435						((le16_to_cpu(
2436						  peek_req_hwcqe->sq_cons_idx)
2437						  - 1) % sq->max_wqe);
2438					/* If the hwcqe's sq's wr_id matches */
2439					if (peek_sq == sq &&
2440					    sq->swq[peek_sq_cons_idx].wr_id ==
2441					    BNXT_QPLIB_FENCE_WRID) {
2442						/*
2443						 *  Unbreak only if the phantom
2444						 *  comes back
2445						 */
2446						dev_dbg(&cq->hwq.pdev->dev,
2447							"FP: Got Phantom CQE\n");
2448						sq->condition = false;
2449						sq->single = true;
2450						rc = 0;
2451						goto out;
2452					}
2453				}
2454				/* Valid but not the phantom, so keep looping */
2455			} else {
2456				/* Not valid yet, just exit and wait */
2457				rc = -EINVAL;
2458				goto out;
2459			}
2460			bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements,
2461						 &peek_sw_cq_cons,
2462						 1, &peek_flags);
2463		}
2464		dev_err(&cq->hwq.pdev->dev,
2465			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2466			cq_cons, qp->id, swq_last, cqe_sq_cons);
2467		rc = -EINVAL;
2468	}
2469out:
2470	return rc;
2471}
2472
2473static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2474				     struct cq_req *hwcqe,
2475				     struct bnxt_qplib_cqe **pcqe, int *budget,
2476				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2477{
2478	struct bnxt_qplib_swq *swq;
2479	struct bnxt_qplib_cqe *cqe;
2480	struct bnxt_qplib_qp *qp;
2481	struct bnxt_qplib_q *sq;
2482	u32 cqe_sq_cons;
 
 
2483	int rc = 0;
2484
2485	qp = (struct bnxt_qplib_qp *)((unsigned long)
2486				      le64_to_cpu(hwcqe->qp_handle));
2487	if (!qp) {
2488		dev_err(&cq->hwq.pdev->dev,
2489			"FP: Process Req qp is NULL\n");
2490		return -EINVAL;
2491	}
2492	sq = &qp->sq;
2493
2494	cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
 
 
 
 
 
 
 
2495	if (qp->sq.flushed) {
2496		dev_dbg(&cq->hwq.pdev->dev,
2497			"%s: QP in Flush QP = %p\n", __func__, qp);
2498		goto done;
2499	}
2500	/* Require to walk the sq's swq to fabricate CQEs for all previously
2501	 * signaled SWQEs due to CQE aggregation from the current sq cons
2502	 * to the cqe_sq_cons
2503	 */
2504	cqe = *pcqe;
2505	while (*budget) {
2506		if (sq->swq_last == cqe_sq_cons)
 
2507			/* Done */
2508			break;
2509
2510		swq = &sq->swq[sq->swq_last];
2511		memset(cqe, 0, sizeof(*cqe));
2512		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2513		cqe->qp_handle = (u64)(unsigned long)qp;
2514		cqe->src_qp = qp->id;
2515		cqe->wr_id = swq->wr_id;
2516		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2517			goto skip;
2518		cqe->type = swq->type;
2519
2520		/* For the last CQE, check for status.  For errors, regardless
2521		 * of the request being signaled or not, it must complete with
2522		 * the hwcqe error status
2523		 */
2524		if (swq->next_idx == cqe_sq_cons &&
2525		    hwcqe->status != CQ_REQ_STATUS_OK) {
2526			cqe->status = hwcqe->status;
2527			dev_err(&cq->hwq.pdev->dev,
2528				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2529				sq->swq_last, cqe->wr_id, cqe->status);
2530			cqe++;
2531			(*budget)--;
2532			bnxt_qplib_mark_qp_error(qp);
2533			/* Add qp to flush list of the CQ */
2534			bnxt_qplib_add_flush_qp(qp);
2535		} else {
2536			/* Before we complete, do WA 9060 */
2537			if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2538				      cqe_sq_cons)) {
2539				*lib_qp = qp;
2540				goto out;
2541			}
2542			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
 
 
 
 
 
 
2543				cqe->status = CQ_REQ_STATUS_OK;
2544				cqe++;
2545				(*budget)--;
2546			}
2547		}
2548skip:
2549		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2550					 swq->slots, &sq->dbinfo.flags);
2551		sq->swq_last = swq->next_idx;
2552		if (sq->single)
2553			break;
2554	}
2555out:
2556	*pcqe = cqe;
2557	if (sq->swq_last != cqe_sq_cons) {
2558		/* Out of budget */
2559		rc = -EAGAIN;
2560		goto done;
2561	}
2562	/*
2563	 * Back to normal completion mode only after it has completed all of
2564	 * the WC for this CQE
2565	 */
2566	sq->single = false;
2567done:
2568	return rc;
2569}
2570
2571static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2572{
2573	spin_lock(&srq->hwq.lock);
2574	srq->swq[srq->last_idx].next_idx = (int)tag;
2575	srq->last_idx = (int)tag;
2576	srq->swq[srq->last_idx].next_idx = -1;
2577	bnxt_qplib_hwq_incr_cons(srq->hwq.max_elements, &srq->hwq.cons,
2578				 srq->dbinfo.max_slot, &srq->dbinfo.flags);
2579	spin_unlock(&srq->hwq.lock);
2580}
2581
2582static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2583					struct cq_res_rc *hwcqe,
2584					struct bnxt_qplib_cqe **pcqe,
2585					int *budget)
2586{
2587	struct bnxt_qplib_srq *srq;
2588	struct bnxt_qplib_cqe *cqe;
2589	struct bnxt_qplib_qp *qp;
2590	struct bnxt_qplib_q *rq;
 
 
2591	u32 wr_id_idx;
 
2592
2593	qp = (struct bnxt_qplib_qp *)((unsigned long)
2594				      le64_to_cpu(hwcqe->qp_handle));
2595	if (!qp) {
2596		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2597		return -EINVAL;
2598	}
2599	if (qp->rq.flushed) {
2600		dev_dbg(&cq->hwq.pdev->dev,
2601			"%s: QP in Flush QP = %p\n", __func__, qp);
2602		return 0;
2603	}
2604
2605	cqe = *pcqe;
2606	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2607	cqe->length = le32_to_cpu(hwcqe->length);
2608	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2609	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2610	cqe->flags = le16_to_cpu(hwcqe->flags);
2611	cqe->status = hwcqe->status;
2612	cqe->qp_handle = (u64)(unsigned long)qp;
2613
2614	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2615				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2616	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2617		srq = qp->srq;
2618		if (!srq)
2619			return -EINVAL;
2620		if (wr_id_idx >= srq->hwq.max_elements) {
2621			dev_err(&cq->hwq.pdev->dev,
2622				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2623				wr_id_idx, srq->hwq.max_elements);
2624			return -EINVAL;
2625		}
2626		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2627		bnxt_qplib_release_srqe(srq, wr_id_idx);
2628		cqe++;
2629		(*budget)--;
2630		*pcqe = cqe;
2631	} else {
2632		struct bnxt_qplib_swq *swq;
2633
2634		rq = &qp->rq;
2635		if (wr_id_idx > (rq->max_wqe - 1)) {
2636			dev_err(&cq->hwq.pdev->dev,
2637				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2638				wr_id_idx, rq->max_wqe);
2639			return -EINVAL;
2640		}
2641		if (wr_id_idx != rq->swq_last)
2642			return -EINVAL;
2643		swq = &rq->swq[rq->swq_last];
2644		cqe->wr_id = swq->wr_id;
2645		cqe++;
2646		(*budget)--;
2647		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2648					 swq->slots, &rq->dbinfo.flags);
2649		rq->swq_last = swq->next_idx;
2650		*pcqe = cqe;
2651
2652		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2653			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2654			/* Add qp to flush list of the CQ */
2655			bnxt_qplib_add_flush_qp(qp);
2656		}
2657	}
2658
2659	return 0;
 
2660}
2661
2662static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2663					struct cq_res_ud *hwcqe,
2664					struct bnxt_qplib_cqe **pcqe,
2665					int *budget)
2666{
2667	struct bnxt_qplib_srq *srq;
2668	struct bnxt_qplib_cqe *cqe;
2669	struct bnxt_qplib_qp *qp;
2670	struct bnxt_qplib_q *rq;
 
 
2671	u32 wr_id_idx;
 
2672
2673	qp = (struct bnxt_qplib_qp *)((unsigned long)
2674				      le64_to_cpu(hwcqe->qp_handle));
2675	if (!qp) {
2676		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2677		return -EINVAL;
2678	}
2679	if (qp->rq.flushed) {
2680		dev_dbg(&cq->hwq.pdev->dev,
2681			"%s: QP in Flush QP = %p\n", __func__, qp);
2682		return 0;
2683	}
2684	cqe = *pcqe;
2685	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2686	cqe->length = le16_to_cpu(hwcqe->length) & CQ_RES_UD_LENGTH_MASK;
2687	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2688	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2689	cqe->flags = le16_to_cpu(hwcqe->flags);
2690	cqe->status = hwcqe->status;
2691	cqe->qp_handle = (u64)(unsigned long)qp;
2692	/*FIXME: Endianness fix needed for smace */
2693	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2694	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2695				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2696	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2697				  ((le32_to_cpu(
2698				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2699				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2700
2701	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2702		srq = qp->srq;
2703		if (!srq)
2704			return -EINVAL;
2705
2706		if (wr_id_idx >= srq->hwq.max_elements) {
2707			dev_err(&cq->hwq.pdev->dev,
2708				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2709				wr_id_idx, srq->hwq.max_elements);
2710			return -EINVAL;
2711		}
2712		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2713		bnxt_qplib_release_srqe(srq, wr_id_idx);
2714		cqe++;
2715		(*budget)--;
2716		*pcqe = cqe;
2717	} else {
2718		struct bnxt_qplib_swq *swq;
2719
2720		rq = &qp->rq;
2721		if (wr_id_idx > (rq->max_wqe - 1)) {
2722			dev_err(&cq->hwq.pdev->dev,
2723				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2724				wr_id_idx, rq->max_wqe);
2725			return -EINVAL;
2726		}
2727
2728		if (rq->swq_last != wr_id_idx)
2729			return -EINVAL;
2730		swq = &rq->swq[rq->swq_last];
2731		cqe->wr_id = swq->wr_id;
2732		cqe++;
2733		(*budget)--;
2734		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2735					 swq->slots, &rq->dbinfo.flags);
2736		rq->swq_last = swq->next_idx;
2737		*pcqe = cqe;
2738
2739		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2740			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2741			/* Add qp to flush list of the CQ */
2742			bnxt_qplib_add_flush_qp(qp);
2743		}
2744	}
2745
2746	return 0;
2747}
2748
2749bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2750{
2751	struct cq_base *hw_cqe;
 
2752	bool rc = true;
2753
2754	hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
 
 
 
 
2755	 /* Check for Valid bit. If the CQE is valid, return false */
2756	rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
2757	return rc;
2758}
2759
2760static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2761						struct cq_res_raweth_qp1 *hwcqe,
2762						struct bnxt_qplib_cqe **pcqe,
2763						int *budget)
2764{
2765	struct bnxt_qplib_qp *qp;
2766	struct bnxt_qplib_q *rq;
2767	struct bnxt_qplib_srq *srq;
2768	struct bnxt_qplib_cqe *cqe;
2769	u32 wr_id_idx;
 
2770
2771	qp = (struct bnxt_qplib_qp *)((unsigned long)
2772				      le64_to_cpu(hwcqe->qp_handle));
2773	if (!qp) {
2774		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2775		return -EINVAL;
2776	}
2777	if (qp->rq.flushed) {
2778		dev_dbg(&cq->hwq.pdev->dev,
2779			"%s: QP in Flush QP = %p\n", __func__, qp);
2780		return 0;
2781	}
2782	cqe = *pcqe;
2783	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2784	cqe->flags = le16_to_cpu(hwcqe->flags);
2785	cqe->qp_handle = (u64)(unsigned long)qp;
2786
2787	wr_id_idx =
2788		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2789				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2790	cqe->src_qp = qp->id;
2791	if (qp->id == 1 && !cqe->length) {
2792		/* Add workaround for the length misdetection */
2793		cqe->length = 296;
2794	} else {
2795		cqe->length = le16_to_cpu(hwcqe->length);
2796	}
2797	cqe->pkey_index = qp->pkey_index;
2798	memcpy(cqe->smac, qp->smac, 6);
2799
2800	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2801	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2802	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2803
2804	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2805		srq = qp->srq;
2806		if (!srq) {
2807			dev_err(&cq->hwq.pdev->dev,
2808				"FP: SRQ used but not defined??\n");
2809			return -EINVAL;
2810		}
2811		if (wr_id_idx >= srq->hwq.max_elements) {
2812			dev_err(&cq->hwq.pdev->dev,
2813				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2814				wr_id_idx, srq->hwq.max_elements);
2815			return -EINVAL;
2816		}
2817		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2818		bnxt_qplib_release_srqe(srq, wr_id_idx);
2819		cqe++;
2820		(*budget)--;
2821		*pcqe = cqe;
2822	} else {
2823		struct bnxt_qplib_swq *swq;
2824
2825		rq = &qp->rq;
2826		if (wr_id_idx > (rq->max_wqe - 1)) {
2827			dev_err(&cq->hwq.pdev->dev,
2828				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2829				wr_id_idx, rq->max_wqe);
2830			return -EINVAL;
2831		}
2832		if (rq->swq_last != wr_id_idx)
2833			return -EINVAL;
2834		swq = &rq->swq[rq->swq_last];
2835		cqe->wr_id = swq->wr_id;
2836		cqe++;
2837		(*budget)--;
2838		bnxt_qplib_hwq_incr_cons(rq->hwq.max_elements, &rq->hwq.cons,
2839					 swq->slots, &rq->dbinfo.flags);
2840		rq->swq_last = swq->next_idx;
2841		*pcqe = cqe;
2842
2843		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2844			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2845			/* Add qp to flush list of the CQ */
2846			bnxt_qplib_add_flush_qp(qp);
2847		}
2848	}
2849
2850	return 0;
 
2851}
2852
2853static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2854					  struct cq_terminal *hwcqe,
2855					  struct bnxt_qplib_cqe **pcqe,
2856					  int *budget)
2857{
2858	struct bnxt_qplib_qp *qp;
2859	struct bnxt_qplib_q *sq, *rq;
2860	struct bnxt_qplib_cqe *cqe;
2861	u32 swq_last = 0, cqe_cons;
2862	int rc = 0;
2863
2864	/* Check the Status */
2865	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2866		dev_warn(&cq->hwq.pdev->dev,
2867			 "FP: CQ Process Terminal Error status = 0x%x\n",
2868			 hwcqe->status);
2869
2870	qp = (struct bnxt_qplib_qp *)((unsigned long)
2871				      le64_to_cpu(hwcqe->qp_handle));
2872	if (!qp)
 
 
2873		return -EINVAL;
 
2874
2875	/* Must block new posting of SQ and RQ */
2876	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2877
2878	sq = &qp->sq;
2879	rq = &qp->rq;
2880
2881	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2882	if (cqe_cons == 0xFFFF)
2883		goto do_rq;
2884	cqe_cons %= sq->max_wqe;
 
 
 
 
 
 
2885
2886	if (qp->sq.flushed) {
2887		dev_dbg(&cq->hwq.pdev->dev,
2888			"%s: QP in Flush QP = %p\n", __func__, qp);
2889		goto sq_done;
2890	}
2891
2892	/* Terminal CQE can also include aggregated successful CQEs prior.
2893	 * So we must complete all CQEs from the current sq's cons to the
2894	 * cq_cons with status OK
2895	 */
2896	cqe = *pcqe;
2897	while (*budget) {
2898		swq_last = sq->swq_last;
2899		if (swq_last == cqe_cons)
2900			break;
2901		if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2902			memset(cqe, 0, sizeof(*cqe));
2903			cqe->status = CQ_REQ_STATUS_OK;
2904			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2905			cqe->qp_handle = (u64)(unsigned long)qp;
2906			cqe->src_qp = qp->id;
2907			cqe->wr_id = sq->swq[swq_last].wr_id;
2908			cqe->type = sq->swq[swq_last].type;
2909			cqe++;
2910			(*budget)--;
2911		}
2912		bnxt_qplib_hwq_incr_cons(sq->hwq.max_elements, &sq->hwq.cons,
2913					 sq->swq[swq_last].slots, &sq->dbinfo.flags);
2914		sq->swq_last = sq->swq[swq_last].next_idx;
2915	}
2916	*pcqe = cqe;
2917	if (!(*budget) && swq_last != cqe_cons) {
2918		/* Out of budget */
2919		rc = -EAGAIN;
2920		goto sq_done;
2921	}
2922sq_done:
2923	if (rc)
2924		return rc;
2925do_rq:
2926	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2927	if (cqe_cons == 0xFFFF) {
2928		goto done;
2929	} else if (cqe_cons > rq->max_wqe - 1) {
2930		dev_err(&cq->hwq.pdev->dev,
2931			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2932			cqe_cons, rq->max_wqe);
2933		rc = -EINVAL;
2934		goto done;
2935	}
2936
2937	if (qp->rq.flushed) {
2938		dev_dbg(&cq->hwq.pdev->dev,
2939			"%s: QP in Flush QP = %p\n", __func__, qp);
2940		rc = 0;
2941		goto done;
2942	}
2943
2944	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2945	 * from the current rq->cons to the rq->prod regardless what the
2946	 * rq->cons the terminal CQE indicates
2947	 */
2948
2949	/* Add qp to flush list of the CQ */
2950	bnxt_qplib_add_flush_qp(qp);
2951done:
2952	return rc;
2953}
2954
2955static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2956					struct cq_cutoff *hwcqe)
2957{
2958	/* Check the Status */
2959	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2960		dev_err(&cq->hwq.pdev->dev,
2961			"FP: CQ Process Cutoff Error status = 0x%x\n",
2962			hwcqe->status);
2963		return -EINVAL;
2964	}
2965	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2966	wake_up_interruptible(&cq->waitq);
2967
2968	return 0;
2969}
2970
2971int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2972				  struct bnxt_qplib_cqe *cqe,
2973				  int num_cqes)
2974{
2975	struct bnxt_qplib_qp *qp = NULL;
2976	u32 budget = num_cqes;
2977	unsigned long flags;
2978
2979	spin_lock_irqsave(&cq->flush_lock, flags);
2980	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2981		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2982		__flush_sq(&qp->sq, qp, &cqe, &budget);
2983	}
2984
2985	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2986		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2987		__flush_rq(&qp->rq, qp, &cqe, &budget);
2988	}
2989	spin_unlock_irqrestore(&cq->flush_lock, flags);
2990
2991	return num_cqes - budget;
2992}
2993
2994int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2995		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2996{
2997	struct cq_base *hw_cqe;
 
2998	int budget, rc = 0;
2999	u32 hw_polled = 0;
3000	u8 type;
3001
 
3002	budget = num_cqes;
3003
3004	while (budget) {
3005		hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
 
 
3006
3007		/* Check for Valid bit */
3008		if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
3009			break;
3010
3011		/*
3012		 * The valid test of the entry must be done first before
3013		 * reading any further.
3014		 */
3015		dma_rmb();
3016		/* From the device's respective CQE format to qplib_wc*/
3017		type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
3018		switch (type) {
3019		case CQ_BASE_CQE_TYPE_REQ:
3020			rc = bnxt_qplib_cq_process_req(cq,
3021						       (struct cq_req *)hw_cqe,
3022						       &cqe, &budget,
3023						       cq->hwq.cons, lib_qp);
3024			break;
3025		case CQ_BASE_CQE_TYPE_RES_RC:
3026			rc = bnxt_qplib_cq_process_res_rc(cq,
3027							  (struct cq_res_rc *)
3028							  hw_cqe, &cqe,
3029							  &budget);
3030			break;
3031		case CQ_BASE_CQE_TYPE_RES_UD:
3032			rc = bnxt_qplib_cq_process_res_ud
3033					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
3034					 &budget);
3035			break;
3036		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
3037			rc = bnxt_qplib_cq_process_res_raweth_qp1
3038					(cq, (struct cq_res_raweth_qp1 *)
3039					 hw_cqe, &cqe, &budget);
3040			break;
3041		case CQ_BASE_CQE_TYPE_TERMINAL:
3042			rc = bnxt_qplib_cq_process_terminal
3043					(cq, (struct cq_terminal *)hw_cqe,
3044					 &cqe, &budget);
3045			break;
3046		case CQ_BASE_CQE_TYPE_CUT_OFF:
3047			bnxt_qplib_cq_process_cutoff
3048					(cq, (struct cq_cutoff *)hw_cqe);
3049			/* Done processing this CQ */
3050			goto exit;
3051		default:
3052			dev_err(&cq->hwq.pdev->dev,
3053				"process_cq unknown type 0x%lx\n",
3054				hw_cqe->cqe_type_toggle &
3055				CQ_BASE_CQE_TYPE_MASK);
3056			rc = -EINVAL;
3057			break;
3058		}
3059		if (rc < 0) {
3060			if (rc == -EAGAIN)
3061				break;
3062			/* Error while processing the CQE, just skip to the
3063			 * next one
3064			 */
3065			if (type != CQ_BASE_CQE_TYPE_TERMINAL)
3066				dev_err(&cq->hwq.pdev->dev,
3067					"process_cqe error rc = 0x%x\n", rc);
3068		}
3069		hw_polled++;
3070		bnxt_qplib_hwq_incr_cons(cq->hwq.max_elements, &cq->hwq.cons,
3071					 1, &cq->dbinfo.flags);
3072
 
3073	}
3074	if (hw_polled)
3075		bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
3076exit:
3077	return num_cqes - budget;
3078}
3079
3080void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
3081{
3082	cq->dbinfo.toggle = cq->toggle;
3083	if (arm_type)
3084		bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
3085	/* Using cq->arm_state variable to track whether to issue cq handler */
3086	atomic_set(&cq->arm_state, 1);
3087}
3088
3089void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
3090{
3091	flush_workqueue(qp->scq->nq->cqn_wq);
3092	if (qp->scq != qp->rcq)
3093		flush_workqueue(qp->rcq->nq->cqn_wq);
3094}
v5.4
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: Fast Path Operators
  37 */
  38
  39#define dev_fmt(fmt) "QPLIB: " fmt
  40
  41#include <linux/interrupt.h>
  42#include <linux/spinlock.h>
  43#include <linux/sched.h>
  44#include <linux/slab.h>
  45#include <linux/pci.h>
 
  46#include <linux/prefetch.h>
  47#include <linux/if_ether.h>
 
  48
  49#include "roce_hsi.h"
  50
  51#include "qplib_res.h"
  52#include "qplib_rcfw.h"
  53#include "qplib_sp.h"
  54#include "qplib_fp.h"
  55
  56static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq);
  57static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
  58static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type);
  59
  60static void bnxt_qplib_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
  61{
  62	qp->sq.condition = false;
  63	qp->sq.send_phantom = false;
  64	qp->sq.single = false;
  65}
  66
  67/* Flush list */
  68static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
  69{
  70	struct bnxt_qplib_cq *scq, *rcq;
  71
  72	scq = qp->scq;
  73	rcq = qp->rcq;
  74
  75	if (!qp->sq.flushed) {
  76		dev_dbg(&scq->hwq.pdev->dev,
  77			"FP: Adding to SQ Flush list = %p\n", qp);
  78		bnxt_qplib_cancel_phantom_processing(qp);
  79		list_add_tail(&qp->sq_flush, &scq->sqf_head);
  80		qp->sq.flushed = true;
  81	}
  82	if (!qp->srq) {
  83		if (!qp->rq.flushed) {
  84			dev_dbg(&rcq->hwq.pdev->dev,
  85				"FP: Adding to RQ Flush list = %p\n", qp);
  86			list_add_tail(&qp->rq_flush, &rcq->rqf_head);
  87			qp->rq.flushed = true;
  88		}
  89	}
  90}
  91
  92static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp,
  93				       unsigned long *flags)
  94	__acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
  95{
  96	spin_lock_irqsave(&qp->scq->flush_lock, *flags);
  97	if (qp->scq == qp->rcq)
  98		__acquire(&qp->rcq->flush_lock);
  99	else
 100		spin_lock(&qp->rcq->flush_lock);
 101}
 102
 103static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp,
 104				       unsigned long *flags)
 105	__releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
 106{
 107	if (qp->scq == qp->rcq)
 108		__release(&qp->rcq->flush_lock);
 109	else
 110		spin_unlock(&qp->rcq->flush_lock);
 111	spin_unlock_irqrestore(&qp->scq->flush_lock, *flags);
 112}
 113
 114void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
 115{
 116	unsigned long flags;
 117
 118	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
 119	__bnxt_qplib_add_flush_qp(qp);
 120	bnxt_qplib_release_cq_flush_locks(qp, &flags);
 121}
 122
 123static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
 124{
 125	if (qp->sq.flushed) {
 126		qp->sq.flushed = false;
 127		list_del(&qp->sq_flush);
 128	}
 129	if (!qp->srq) {
 130		if (qp->rq.flushed) {
 131			qp->rq.flushed = false;
 132			list_del(&qp->rq_flush);
 133		}
 134	}
 135}
 136
 137void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
 138{
 139	unsigned long flags;
 140
 141	bnxt_qplib_acquire_cq_flush_locks(qp, &flags);
 142	__clean_cq(qp->scq, (u64)(unsigned long)qp);
 143	qp->sq.hwq.prod = 0;
 144	qp->sq.hwq.cons = 0;
 145	__clean_cq(qp->rcq, (u64)(unsigned long)qp);
 146	qp->rq.hwq.prod = 0;
 147	qp->rq.hwq.cons = 0;
 148
 149	__bnxt_qplib_del_flush_qp(qp);
 150	bnxt_qplib_release_cq_flush_locks(qp, &flags);
 151}
 152
 153static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
 154{
 155	struct bnxt_qplib_nq_work *nq_work =
 156			container_of(work, struct bnxt_qplib_nq_work, work);
 157
 158	struct bnxt_qplib_cq *cq = nq_work->cq;
 159	struct bnxt_qplib_nq *nq = nq_work->nq;
 160
 161	if (cq && nq) {
 162		spin_lock_bh(&cq->compl_lock);
 163		if (atomic_read(&cq->arm_state) && nq->cqn_handler) {
 164			dev_dbg(&nq->pdev->dev,
 165				"%s:Trigger cq  = %p event nq = %p\n",
 166				__func__, cq, nq);
 167			nq->cqn_handler(nq, cq);
 168		}
 169		spin_unlock_bh(&cq->compl_lock);
 170	}
 171	kfree(nq_work);
 172}
 173
 174static void bnxt_qplib_free_qp_hdr_buf(struct bnxt_qplib_res *res,
 175				       struct bnxt_qplib_qp *qp)
 176{
 177	struct bnxt_qplib_q *rq = &qp->rq;
 178	struct bnxt_qplib_q *sq = &qp->sq;
 179
 180	if (qp->rq_hdr_buf)
 181		dma_free_coherent(&res->pdev->dev,
 182				  rq->hwq.max_elements * qp->rq_hdr_buf_size,
 183				  qp->rq_hdr_buf, qp->rq_hdr_buf_map);
 184	if (qp->sq_hdr_buf)
 185		dma_free_coherent(&res->pdev->dev,
 186				  sq->hwq.max_elements * qp->sq_hdr_buf_size,
 187				  qp->sq_hdr_buf, qp->sq_hdr_buf_map);
 188	qp->rq_hdr_buf = NULL;
 189	qp->sq_hdr_buf = NULL;
 190	qp->rq_hdr_buf_map = 0;
 191	qp->sq_hdr_buf_map = 0;
 192	qp->sq_hdr_buf_size = 0;
 193	qp->rq_hdr_buf_size = 0;
 194}
 195
 196static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
 197				       struct bnxt_qplib_qp *qp)
 198{
 199	struct bnxt_qplib_q *rq = &qp->rq;
 200	struct bnxt_qplib_q *sq = &qp->sq;
 201	int rc = 0;
 202
 203	if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
 204		qp->sq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
 205					sq->hwq.max_elements *
 206					qp->sq_hdr_buf_size,
 207					&qp->sq_hdr_buf_map, GFP_KERNEL);
 208		if (!qp->sq_hdr_buf) {
 209			rc = -ENOMEM;
 210			dev_err(&res->pdev->dev,
 211				"Failed to create sq_hdr_buf\n");
 212			goto fail;
 213		}
 214	}
 215
 216	if (qp->rq_hdr_buf_size && rq->hwq.max_elements) {
 217		qp->rq_hdr_buf = dma_alloc_coherent(&res->pdev->dev,
 218						    rq->hwq.max_elements *
 219						    qp->rq_hdr_buf_size,
 220						    &qp->rq_hdr_buf_map,
 221						    GFP_KERNEL);
 222		if (!qp->rq_hdr_buf) {
 223			rc = -ENOMEM;
 224			dev_err(&res->pdev->dev,
 225				"Failed to create rq_hdr_buf\n");
 226			goto fail;
 227		}
 228	}
 229	return 0;
 230
 231fail:
 232	bnxt_qplib_free_qp_hdr_buf(res, qp);
 233	return rc;
 234}
 235
 236static void bnxt_qplib_service_nq(unsigned long data)
 237{
 238	struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
 239	struct bnxt_qplib_hwq *hwq = &nq->hwq;
 240	struct nq_base *nqe, **nq_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241	struct bnxt_qplib_cq *cq;
 242	int num_cqne_processed = 0;
 243	int num_srqne_processed = 0;
 244	u32 sw_cons, raw_cons;
 245	u16 type;
 246	int budget = nq->budget;
 
 247	uintptr_t q_handle;
 248	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
 
 249
 
 250	/* Service the NQ until empty */
 251	raw_cons = hwq->cons;
 252	while (budget--) {
 253		sw_cons = HWQ_CMP(raw_cons, hwq);
 254		nq_ptr = (struct nq_base **)hwq->pbl_ptr;
 255		nqe = &nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)];
 256		if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
 257			break;
 258
 259		/*
 260		 * The valid test of the entry must be done first before
 261		 * reading any further.
 262		 */
 263		dma_rmb();
 264
 265		type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
 266		switch (type) {
 267		case NQ_BASE_TYPE_CQ_NOTIFICATION:
 268		{
 269			struct nq_cn *nqcne = (struct nq_cn *)nqe;
 270
 271			q_handle = le32_to_cpu(nqcne->cq_handle_low);
 272			q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high)
 273						     << 32;
 274			cq = (struct bnxt_qplib_cq *)(unsigned long)q_handle;
 275			bnxt_qplib_arm_cq_enable(cq);
 
 
 
 
 
 
 276			spin_lock_bh(&cq->compl_lock);
 277			atomic_set(&cq->arm_state, 0);
 278			if (!nq->cqn_handler(nq, (cq)))
 279				num_cqne_processed++;
 280			else
 281				dev_warn(&nq->pdev->dev,
 282					 "cqn - type 0x%x not handled\n", type);
 
 283			spin_unlock_bh(&cq->compl_lock);
 284			break;
 285		}
 286		case NQ_BASE_TYPE_SRQ_EVENT:
 287		{
 
 288			struct nq_srq_event *nqsrqe =
 289						(struct nq_srq_event *)nqe;
 290
 291			q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
 292			q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high)
 293				     << 32;
 294			bnxt_qplib_arm_srq((struct bnxt_qplib_srq *)q_handle,
 295					   DBC_DBC_TYPE_SRQ_ARMENA);
 296			if (!nq->srqn_handler(nq,
 297					      (struct bnxt_qplib_srq *)q_handle,
 298					      nqsrqe->event))
 299				num_srqne_processed++;
 300			else
 301				dev_warn(&nq->pdev->dev,
 302					 "SRQ event 0x%x not handled\n",
 303					 nqsrqe->event);
 304			break;
 305		}
 306		case NQ_BASE_TYPE_DBQ_EVENT:
 307			break;
 308		default:
 309			dev_warn(&nq->pdev->dev,
 310				 "nqe with type = 0x%x not handled\n", type);
 311			break;
 312		}
 313		raw_cons++;
 314	}
 315	if (hwq->cons != raw_cons) {
 316		hwq->cons = raw_cons;
 317		bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, hwq->cons,
 318					    hwq->max_elements, nq->ring_id,
 319					    gen_p5);
 320	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321}
 322
 323static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
 324{
 325	struct bnxt_qplib_nq *nq = dev_instance;
 326	struct bnxt_qplib_hwq *hwq = &nq->hwq;
 327	struct nq_base **nq_ptr;
 328	u32 sw_cons;
 329
 330	/* Prefetch the NQ element */
 331	sw_cons = HWQ_CMP(hwq->cons, hwq);
 332	nq_ptr = (struct nq_base **)nq->hwq.pbl_ptr;
 333	prefetch(&nq_ptr[NQE_PG(sw_cons)][NQE_IDX(sw_cons)]);
 334
 335	/* Fan out to CPU affinitized kthreads? */
 336	tasklet_schedule(&nq->worker);
 337
 338	return IRQ_HANDLED;
 339}
 340
 341void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
 342{
 343	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
 344	tasklet_disable(&nq->worker);
 
 
 345	/* Mask h/w interrupt */
 346	bnxt_qplib_ring_nq_db(nq->bar_reg_iomem, nq->hwq.cons,
 347			      nq->hwq.max_elements, nq->ring_id, gen_p5);
 348	/* Sync with last running IRQ handler */
 349	synchronize_irq(nq->vector);
 
 
 
 
 
 350	if (kill)
 351		tasklet_kill(&nq->worker);
 352	if (nq->requested) {
 353		irq_set_affinity_hint(nq->vector, NULL);
 354		free_irq(nq->vector, nq);
 355		nq->requested = false;
 356	}
 357}
 358
 359void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
 360{
 361	if (nq->cqn_wq) {
 362		destroy_workqueue(nq->cqn_wq);
 363		nq->cqn_wq = NULL;
 364	}
 365
 366	/* Make sure the HW is stopped! */
 367	if (nq->requested)
 368		bnxt_qplib_nq_stop_irq(nq, true);
 369
 370	if (nq->bar_reg_iomem)
 371		iounmap(nq->bar_reg_iomem);
 372	nq->bar_reg_iomem = NULL;
 
 373
 374	nq->cqn_handler = NULL;
 375	nq->srqn_handler = NULL;
 376	nq->vector = 0;
 377}
 378
 379int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
 380			    int msix_vector, bool need_init)
 381{
 382	bool gen_p5 = bnxt_qplib_is_chip_gen_p5(nq->res->cctx);
 383	int rc;
 384
 385	if (nq->requested)
 386		return -EFAULT;
 387
 388	nq->vector = msix_vector;
 389	if (need_init)
 390		tasklet_init(&nq->worker, bnxt_qplib_service_nq,
 391			     (unsigned long)nq);
 392	else
 393		tasklet_enable(&nq->worker);
 394
 395	snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
 396	rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
 397	if (rc)
 
 
 
 
 
 
 398		return rc;
 
 399
 400	cpumask_clear(&nq->mask);
 401	cpumask_set_cpu(nq_indx, &nq->mask);
 402	rc = irq_set_affinity_hint(nq->vector, &nq->mask);
 403	if (rc) {
 404		dev_warn(&nq->pdev->dev,
 405			 "set affinity failed; vector: %d nq_idx: %d\n",
 406			 nq->vector, nq_indx);
 407	}
 408	nq->requested = true;
 409	bnxt_qplib_ring_nq_db_rearm(nq->bar_reg_iomem, nq->hwq.cons,
 410				    nq->hwq.max_elements, nq->ring_id, gen_p5);
 411
 412	return rc;
 413}
 414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
 416			 int nq_idx, int msix_vector, int bar_reg_offset,
 417			 int (*cqn_handler)(struct bnxt_qplib_nq *nq,
 418					    struct bnxt_qplib_cq *),
 419			 int (*srqn_handler)(struct bnxt_qplib_nq *nq,
 420					     struct bnxt_qplib_srq *,
 421					     u8 event))
 422{
 423	resource_size_t nq_base;
 424	int rc = -1;
 425
 426	if (cqn_handler)
 427		nq->cqn_handler = cqn_handler;
 428
 429	if (srqn_handler)
 430		nq->srqn_handler = srqn_handler;
 431
 432	/* Have a task to schedule CQ notifiers in post send case */
 433	nq->cqn_wq  = create_singlethread_workqueue("bnxt_qplib_nq");
 434	if (!nq->cqn_wq)
 435		return -ENOMEM;
 436
 437	nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
 438	nq->bar_reg_off = bar_reg_offset;
 439	nq_base = pci_resource_start(pdev, nq->bar_reg);
 440	if (!nq_base) {
 441		rc = -ENOMEM;
 442		goto fail;
 443	}
 444	/* Unconditionally map 8 bytes to support 57500 series */
 445	nq->bar_reg_iomem = ioremap_nocache(nq_base + nq->bar_reg_off, 8);
 446	if (!nq->bar_reg_iomem) {
 447		rc = -ENOMEM;
 448		goto fail;
 449	}
 450
 451	rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
 452	if (rc) {
 453		dev_err(&nq->pdev->dev,
 454			"Failed to request irq for nq-idx %d\n", nq_idx);
 455		goto fail;
 456	}
 457
 458	return 0;
 459fail:
 460	bnxt_qplib_disable_nq(nq);
 461	return rc;
 462}
 463
 464void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq)
 465{
 466	if (nq->hwq.max_elements) {
 467		bnxt_qplib_free_hwq(nq->pdev, &nq->hwq);
 468		nq->hwq.max_elements = 0;
 469	}
 470}
 471
 472int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq)
 473{
 474	u8 hwq_type;
 
 475
 476	nq->pdev = pdev;
 
 477	if (!nq->hwq.max_elements ||
 478	    nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
 479		nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
 480	hwq_type = bnxt_qplib_get_hwq_type(nq->res);
 481	if (bnxt_qplib_alloc_init_hwq(nq->pdev, &nq->hwq, NULL,
 482				      &nq->hwq.max_elements,
 483				      BNXT_QPLIB_MAX_NQE_ENTRY_SIZE, 0,
 484				      PAGE_SIZE, hwq_type))
 
 
 
 
 
 485		return -ENOMEM;
 486
 487	nq->budget = 8;
 488	return 0;
 489}
 490
 491/* SRQ */
 492static void bnxt_qplib_arm_srq(struct bnxt_qplib_srq *srq, u32 arm_type)
 493{
 494	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
 495	void __iomem *db;
 496	u32 sw_prod;
 497	u64 val = 0;
 498
 499	/* Ring DB */
 500	sw_prod = (arm_type == DBC_DBC_TYPE_SRQ_ARM) ?
 501		   srq->threshold : HWQ_CMP(srq_hwq->prod, srq_hwq);
 502	db = (arm_type == DBC_DBC_TYPE_SRQ_ARMENA) ? srq->dbr_base :
 503						     srq->dpi->dbr;
 504	val = ((srq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
 505	val <<= 32;
 506	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
 507	writeq(val, db);
 508}
 509
 510void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
 511			   struct bnxt_qplib_srq *srq)
 512{
 513	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 514	struct cmdq_destroy_srq req;
 515	struct creq_destroy_srq_resp resp;
 516	u16 cmd_flags = 0;
 517	int rc;
 518
 519	RCFW_CMD_PREP(req, DESTROY_SRQ, cmd_flags);
 
 
 520
 521	/* Configure the request */
 522	req.srq_cid = cpu_to_le32(srq->id);
 523
 524	rc = bnxt_qplib_rcfw_send_message(rcfw, (struct cmdq_base *)&req,
 525					  (struct creq_base *)&resp, NULL, 0);
 526	kfree(srq->swq);
 527	if (rc)
 528		return;
 529	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
 530}
 531
 532int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
 533			  struct bnxt_qplib_srq *srq)
 534{
 535	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 536	struct cmdq_create_srq req;
 537	struct creq_create_srq_resp resp;
 
 
 538	struct bnxt_qplib_pbl *pbl;
 539	u16 cmd_flags = 0;
 540	int rc, idx;
 541
 542	srq->hwq.max_elements = srq->max_wqe;
 543	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &srq->hwq, &srq->sg_info,
 544				       &srq->hwq.max_elements,
 545				       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
 546				       PAGE_SIZE, HWQ_TYPE_QUEUE);
 
 547	if (rc)
 548		goto exit;
 549
 550	srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
 551			   GFP_KERNEL);
 552	if (!srq->swq) {
 553		rc = -ENOMEM;
 554		goto fail;
 555	}
 556
 557	RCFW_CMD_PREP(req, CREATE_SRQ, cmd_flags);
 
 
 558
 559	/* Configure the request */
 560	req.dpi = cpu_to_le32(srq->dpi->dpi);
 561	req.srq_handle = cpu_to_le64((uintptr_t)srq);
 562
 563	req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
 564	pbl = &srq->hwq.pbl[PBL_LVL_0];
 565	req.pg_size_lvl = cpu_to_le16((((u16)srq->hwq.level &
 566				      CMDQ_CREATE_SRQ_LVL_MASK) <<
 567				      CMDQ_CREATE_SRQ_LVL_SFT) |
 568				      (pbl->pg_size == ROCE_PG_SIZE_4K ?
 569				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K :
 570				       pbl->pg_size == ROCE_PG_SIZE_8K ?
 571				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8K :
 572				       pbl->pg_size == ROCE_PG_SIZE_64K ?
 573				       CMDQ_CREATE_SRQ_PG_SIZE_PG_64K :
 574				       pbl->pg_size == ROCE_PG_SIZE_2M ?
 575				       CMDQ_CREATE_SRQ_PG_SIZE_PG_2M :
 576				       pbl->pg_size == ROCE_PG_SIZE_8M ?
 577				       CMDQ_CREATE_SRQ_PG_SIZE_PG_8M :
 578				       pbl->pg_size == ROCE_PG_SIZE_1G ?
 579				       CMDQ_CREATE_SRQ_PG_SIZE_PG_1G :
 580				       CMDQ_CREATE_SRQ_PG_SIZE_PG_4K));
 581	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 582	req.pd_id = cpu_to_le32(srq->pd->id);
 583	req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
 584
 585	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 586					  (void *)&resp, NULL, 0);
 587	if (rc)
 588		goto fail;
 589
 590	spin_lock_init(&srq->lock);
 591	srq->start_idx = 0;
 592	srq->last_idx = srq->hwq.max_elements - 1;
 593	for (idx = 0; idx < srq->hwq.max_elements; idx++)
 594		srq->swq[idx].next_idx = idx + 1;
 595	srq->swq[srq->last_idx].next_idx = -1;
 596
 597	srq->id = le32_to_cpu(resp.xid);
 598	srq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
 
 
 
 
 599	if (srq->threshold)
 600		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARMENA);
 601	srq->arm_req = false;
 602
 603	return 0;
 604fail:
 605	bnxt_qplib_free_hwq(res->pdev, &srq->hwq);
 606	kfree(srq->swq);
 607exit:
 608	return rc;
 609}
 610
 611int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
 612			  struct bnxt_qplib_srq *srq)
 613{
 614	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
 615	u32 sw_prod, sw_cons, count = 0;
 616
 617	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
 618	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
 619
 620	count = sw_prod > sw_cons ? sw_prod - sw_cons :
 621				    srq_hwq->max_elements - sw_cons + sw_prod;
 622	if (count > srq->threshold) {
 623		srq->arm_req = false;
 624		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
 625	} else {
 626		/* Deferred arming */
 627		srq->arm_req = true;
 628	}
 629
 630	return 0;
 631}
 632
 633int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
 634			 struct bnxt_qplib_srq *srq)
 635{
 636	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 637	struct cmdq_query_srq req;
 638	struct creq_query_srq_resp resp;
 639	struct bnxt_qplib_rcfw_sbuf *sbuf;
 640	struct creq_query_srq_resp_sb *sb;
 641	u16 cmd_flags = 0;
 642	int rc = 0;
 643
 644	RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
 645	req.srq_cid = cpu_to_le32(srq->id);
 
 646
 647	/* Configure the request */
 648	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
 649	if (!sbuf)
 
 
 650		return -ENOMEM;
 651	sb = sbuf->sb;
 652	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
 653					  (void *)sbuf, 0);
 654	srq->threshold = le16_to_cpu(sb->srq_limit);
 655	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
 
 
 
 
 
 656
 657	return rc;
 658}
 659
 660int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
 661			     struct bnxt_qplib_swqe *wqe)
 662{
 663	struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
 664	struct rq_wqe *srqe, **srqe_ptr;
 665	struct sq_sge *hw_sge;
 666	u32 sw_prod, sw_cons, count = 0;
 667	int i, rc = 0, next;
 668
 669	spin_lock(&srq_hwq->lock);
 670	if (srq->start_idx == srq->last_idx) {
 671		dev_err(&srq_hwq->pdev->dev,
 672			"FP: SRQ (0x%x) is full!\n", srq->id);
 673		rc = -EINVAL;
 674		spin_unlock(&srq_hwq->lock);
 675		goto done;
 676	}
 677	next = srq->start_idx;
 678	srq->start_idx = srq->swq[next].next_idx;
 679	spin_unlock(&srq_hwq->lock);
 680
 681	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
 682	srqe_ptr = (struct rq_wqe **)srq_hwq->pbl_ptr;
 683	srqe = &srqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
 684	memset(srqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
 685	/* Calculate wqe_size16 and data_len */
 686	for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
 687	     i < wqe->num_sge; i++, hw_sge++) {
 688		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
 689		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
 690		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
 691	}
 692	srqe->wqe_type = wqe->type;
 693	srqe->flags = wqe->flags;
 694	srqe->wqe_size = wqe->num_sge +
 695			((offsetof(typeof(*srqe), data) + 15) >> 4);
 696	srqe->wr_id[0] = cpu_to_le32((u32)next);
 697	srq->swq[next].wr_id = wqe->wr_id;
 698
 699	srq_hwq->prod++;
 700
 701	spin_lock(&srq_hwq->lock);
 702	sw_prod = HWQ_CMP(srq_hwq->prod, srq_hwq);
 703	/* retaining srq_hwq->cons for this logic
 704	 * actually the lock is only required to
 705	 * read srq_hwq->cons.
 706	 */
 707	sw_cons = HWQ_CMP(srq_hwq->cons, srq_hwq);
 708	count = sw_prod > sw_cons ? sw_prod - sw_cons :
 709				    srq_hwq->max_elements - sw_cons + sw_prod;
 710	spin_unlock(&srq_hwq->lock);
 711	/* Ring DB */
 712	bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ);
 713	if (srq->arm_req == true && count > srq->threshold) {
 714		srq->arm_req = false;
 715		bnxt_qplib_arm_srq(srq, DBC_DBC_TYPE_SRQ_ARM);
 716	}
 717done:
 718	return rc;
 719}
 720
 721/* QP */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 722int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 723{
 
 724	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 725	struct cmdq_create_qp1 req;
 726	struct creq_create_qp1_resp resp;
 727	struct bnxt_qplib_pbl *pbl;
 728	struct bnxt_qplib_q *sq = &qp->sq;
 729	struct bnxt_qplib_q *rq = &qp->rq;
 
 
 
 
 
 730	int rc;
 731	u16 cmd_flags = 0;
 732	u32 qp_flags = 0;
 733
 734	RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
 735
 
 
 
 
 736	/* General */
 737	req.type = qp->type;
 738	req.dpi = cpu_to_le32(qp->dpi->dpi);
 739	req.qp_handle = cpu_to_le64(qp->qp_handle);
 740
 741	/* SQ */
 742	sq->hwq.max_elements = sq->max_wqe;
 743	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL,
 744				       &sq->hwq.max_elements,
 745				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE, 0,
 746				       PAGE_SIZE, HWQ_TYPE_QUEUE);
 
 747	if (rc)
 748		goto exit;
 749
 750	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
 751	if (!sq->swq) {
 752		rc = -ENOMEM;
 753		goto fail_sq;
 754	}
 
 755	pbl = &sq->hwq.pbl[PBL_LVL_0];
 756	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 757	req.sq_pg_size_sq_lvl =
 758		((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK)
 759				<<  CMDQ_CREATE_QP1_SQ_LVL_SFT) |
 760		(pbl->pg_size == ROCE_PG_SIZE_4K ?
 761				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K :
 762		 pbl->pg_size == ROCE_PG_SIZE_8K ?
 763				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8K :
 764		 pbl->pg_size == ROCE_PG_SIZE_64K ?
 765				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_64K :
 766		 pbl->pg_size == ROCE_PG_SIZE_2M ?
 767				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_2M :
 768		 pbl->pg_size == ROCE_PG_SIZE_8M ?
 769				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_8M :
 770		 pbl->pg_size == ROCE_PG_SIZE_1G ?
 771				CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_1G :
 772		 CMDQ_CREATE_QP1_SQ_PG_SIZE_PG_4K);
 773
 774	if (qp->scq)
 775		req.scq_cid = cpu_to_le32(qp->scq->id);
 776
 777	qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
 778
 779	/* RQ */
 780	if (rq->max_wqe) {
 781		rq->hwq.max_elements = qp->rq.max_wqe;
 782		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq, NULL,
 783					       &rq->hwq.max_elements,
 784					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
 785					       PAGE_SIZE, HWQ_TYPE_QUEUE);
 
 
 
 
 
 786		if (rc)
 787			goto fail_sq;
 788
 789		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
 790				  GFP_KERNEL);
 791		if (!rq->swq) {
 792			rc = -ENOMEM;
 793			goto fail_rq;
 794		}
 795		pbl = &rq->hwq.pbl[PBL_LVL_0];
 796		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 797		req.rq_pg_size_rq_lvl =
 798			((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
 799			 CMDQ_CREATE_QP1_RQ_LVL_SFT) |
 800				(pbl->pg_size == ROCE_PG_SIZE_4K ?
 801					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K :
 802				 pbl->pg_size == ROCE_PG_SIZE_8K ?
 803					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8K :
 804				 pbl->pg_size == ROCE_PG_SIZE_64K ?
 805					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_64K :
 806				 pbl->pg_size == ROCE_PG_SIZE_2M ?
 807					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_2M :
 808				 pbl->pg_size == ROCE_PG_SIZE_8M ?
 809					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_8M :
 810				 pbl->pg_size == ROCE_PG_SIZE_1G ?
 811					CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_1G :
 812				 CMDQ_CREATE_QP1_RQ_PG_SIZE_PG_4K);
 813		if (qp->rcq)
 814			req.rcq_cid = cpu_to_le32(qp->rcq->id);
 815	}
 816
 817	/* Header buffer - allow hdr_buf pass in */
 818	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
 819	if (rc) {
 820		rc = -ENOMEM;
 821		goto fail;
 822	}
 
 823	req.qp_flags = cpu_to_le32(qp_flags);
 824	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
 825	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
 826
 827	req.sq_fwo_sq_sge =
 828		cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) <<
 829			    CMDQ_CREATE_QP1_SQ_SGE_SFT);
 830	req.rq_fwo_rq_sge =
 831		cpu_to_le16((rq->max_sge & CMDQ_CREATE_QP1_RQ_SGE_MASK) <<
 832			    CMDQ_CREATE_QP1_RQ_SGE_SFT);
 833
 834	req.pd_id = cpu_to_le32(qp->pd->id);
 835
 836	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
 837					  (void *)&resp, NULL, 0);
 838	if (rc)
 839		goto fail;
 840
 841	qp->id = le32_to_cpu(resp.xid);
 842	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
 843	rcfw->qp_tbl[qp->id].qp_id = qp->id;
 844	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
 
 
 
 
 
 
 
 
 
 
 
 
 845
 846	return 0;
 847
 848fail:
 849	bnxt_qplib_free_qp_hdr_buf(res, qp);
 
 
 850fail_rq:
 851	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
 852	kfree(rq->swq);
 
 853fail_sq:
 854	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
 855	kfree(sq->swq);
 856exit:
 857	return rc;
 858}
 859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
 861{
 862	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
 863	unsigned long int psn_search, poff = 0;
 864	struct sq_psn_search **psn_search_ptr;
 
 
 865	struct bnxt_qplib_q *sq = &qp->sq;
 866	struct bnxt_qplib_q *rq = &qp->rq;
 867	int i, rc, req_size, psn_sz = 0;
 868	struct sq_send **hw_sq_send_ptr;
 869	struct creq_create_qp_resp resp;
 870	struct bnxt_qplib_hwq *xrrq;
 871	u16 cmd_flags = 0, max_ssge;
 872	struct cmdq_create_qp req;
 873	struct bnxt_qplib_pbl *pbl;
 874	u32 qp_flags = 0;
 875	u16 max_rsge;
 876
 877	RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
 
 
 
 
 
 
 
 
 878
 879	/* General */
 880	req.type = qp->type;
 881	req.dpi = cpu_to_le32(qp->dpi->dpi);
 882	req.qp_handle = cpu_to_le64(qp->qp_handle);
 883
 884	/* SQ */
 885	if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
 886		psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
 887			 sizeof(struct sq_psn_search_ext) :
 888			 sizeof(struct sq_psn_search);
 
 
 
 
 
 889	}
 890	sq->hwq.max_elements = sq->max_wqe;
 891	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info,
 892				       &sq->hwq.max_elements,
 893				       BNXT_QPLIB_MAX_SQE_ENTRY_SIZE,
 894				       psn_sz,
 895				       PAGE_SIZE, HWQ_TYPE_QUEUE);
 
 
 
 
 
 
 
 
 
 
 896	if (rc)
 897		goto exit;
 898
 899	sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL);
 900	if (!sq->swq) {
 901		rc = -ENOMEM;
 902		goto fail_sq;
 903	}
 904	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
 905	if (psn_sz) {
 906		psn_search_ptr = (struct sq_psn_search **)
 907				  &hw_sq_send_ptr[get_sqe_pg
 908					(sq->hwq.max_elements)];
 909		psn_search = (unsigned long int)
 910			      &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)]
 911			      [get_sqe_idx(sq->hwq.max_elements)];
 912		if (psn_search & ~PAGE_MASK) {
 913			/* If the psn_search does not start on a page boundary,
 914			 * then calculate the offset
 915			 */
 916			poff = (psn_search & ~PAGE_MASK) /
 917				BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE;
 918		}
 919		for (i = 0; i < sq->hwq.max_elements; i++) {
 920			sq->swq[i].psn_search =
 921				&psn_search_ptr[get_psne_pg(i + poff)]
 922					       [get_psne_idx(i + poff)];
 923			/*psns_ext will be used only for P5 chips. */
 924			sq->swq[i].psn_ext =
 925				(struct sq_psn_search_ext *)
 926				&psn_search_ptr[get_psne_pg(i + poff)]
 927					       [get_psne_idx(i + poff)];
 928		}
 929	}
 930	pbl = &sq->hwq.pbl[PBL_LVL_0];
 931	req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 932	req.sq_pg_size_sq_lvl =
 933		((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK)
 934				 <<  CMDQ_CREATE_QP_SQ_LVL_SFT) |
 935		(pbl->pg_size == ROCE_PG_SIZE_4K ?
 936				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K :
 937		 pbl->pg_size == ROCE_PG_SIZE_8K ?
 938				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8K :
 939		 pbl->pg_size == ROCE_PG_SIZE_64K ?
 940				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_64K :
 941		 pbl->pg_size == ROCE_PG_SIZE_2M ?
 942				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_2M :
 943		 pbl->pg_size == ROCE_PG_SIZE_8M ?
 944				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_8M :
 945		 pbl->pg_size == ROCE_PG_SIZE_1G ?
 946				CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
 947		 CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
 948
 949	if (qp->scq)
 950		req.scq_cid = cpu_to_le32(qp->scq->id);
 951
 952	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
 953	qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
 954	if (qp->sig_type)
 955		qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
 956
 957	/* RQ */
 958	if (rq->max_wqe) {
 959		rq->hwq.max_elements = rq->max_wqe;
 960		rc = bnxt_qplib_alloc_init_hwq(res->pdev, &rq->hwq,
 961					       &rq->sg_info,
 962					       &rq->hwq.max_elements,
 963					       BNXT_QPLIB_MAX_RQE_ENTRY_SIZE, 0,
 964					       PAGE_SIZE, HWQ_TYPE_QUEUE);
 
 
 
 
 
 
 965		if (rc)
 966			goto fail_sq;
 967
 968		rq->swq = kcalloc(rq->hwq.max_elements, sizeof(*rq->swq),
 969				  GFP_KERNEL);
 970		if (!rq->swq) {
 971			rc = -ENOMEM;
 972			goto fail_rq;
 973		}
 974		pbl = &rq->hwq.pbl[PBL_LVL_0];
 975		req.rq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
 976		req.rq_pg_size_rq_lvl =
 977			((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
 978			 CMDQ_CREATE_QP_RQ_LVL_SFT) |
 979				(pbl->pg_size == ROCE_PG_SIZE_4K ?
 980					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K :
 981				 pbl->pg_size == ROCE_PG_SIZE_8K ?
 982					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8K :
 983				 pbl->pg_size == ROCE_PG_SIZE_64K ?
 984					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_64K :
 985				 pbl->pg_size == ROCE_PG_SIZE_2M ?
 986					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_2M :
 987				 pbl->pg_size == ROCE_PG_SIZE_8M ?
 988					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_8M :
 989				 pbl->pg_size == ROCE_PG_SIZE_1G ?
 990					CMDQ_CREATE_QP_RQ_PG_SIZE_PG_1G :
 991				 CMDQ_CREATE_QP_RQ_PG_SIZE_PG_4K);
 992	} else {
 993		/* SRQ */
 994		if (qp->srq) {
 995			qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
 996			req.srq_cid = cpu_to_le32(qp->srq->id);
 997		}
 998	}
 
 
 
 
 
 
 
 
 
 
 999
1000	if (qp->rcq)
1001		req.rcq_cid = cpu_to_le32(qp->rcq->id);
1002	req.qp_flags = cpu_to_le32(qp_flags);
1003	req.sq_size = cpu_to_le32(sq->hwq.max_elements);
1004	req.rq_size = cpu_to_le32(rq->hwq.max_elements);
1005	qp->sq_hdr_buf = NULL;
1006	qp->rq_hdr_buf = NULL;
1007
1008	rc = bnxt_qplib_alloc_qp_hdr_buf(res, qp);
1009	if (rc)
1010		goto fail_rq;
1011
1012	/* CTRL-22434: Irrespective of the requested SGE count on the SQ
1013	 * always create the QP with max send sges possible if the requested
1014	 * inline size is greater than 0.
1015	 */
1016	max_ssge = qp->max_inline_data ? 6 : sq->max_sge;
1017	req.sq_fwo_sq_sge = cpu_to_le16(
1018				((max_ssge & CMDQ_CREATE_QP_SQ_SGE_MASK)
1019				 << CMDQ_CREATE_QP_SQ_SGE_SFT) | 0);
1020	max_rsge = bnxt_qplib_is_chip_gen_p5(res->cctx) ? 6 : rq->max_sge;
1021	req.rq_fwo_rq_sge = cpu_to_le16(
1022				((max_rsge & CMDQ_CREATE_QP_RQ_SGE_MASK)
1023				 << CMDQ_CREATE_QP_RQ_SGE_SFT) | 0);
1024	/* ORRQ and IRRQ */
1025	if (psn_sz) {
1026		xrrq = &qp->orrq;
1027		xrrq->max_elements =
1028			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1029		req_size = xrrq->max_elements *
1030			   BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1031		req_size &= ~(PAGE_SIZE - 1);
1032		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
1033					       &xrrq->max_elements,
1034					       BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE,
1035					       0, req_size, HWQ_TYPE_CTX);
 
 
 
 
 
 
 
1036		if (rc)
1037			goto fail_buf_free;
1038		pbl = &xrrq->pbl[PBL_LVL_0];
1039		req.orrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1040
1041		xrrq = &qp->irrq;
1042		xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
1043						qp->max_dest_rd_atomic);
1044		req_size = xrrq->max_elements *
1045			   BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
1046		req_size &= ~(PAGE_SIZE - 1);
1047
1048		rc = bnxt_qplib_alloc_init_hwq(res->pdev, xrrq, NULL,
1049					       &xrrq->max_elements,
1050					       BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE,
1051					       0, req_size, HWQ_TYPE_CTX);
1052		if (rc)
1053			goto fail_orrq;
1054
1055		pbl = &xrrq->pbl[PBL_LVL_0];
1056		req.irrq_addr = cpu_to_le64(pbl->pg_map_arr[0]);
1057	}
1058	req.pd_id = cpu_to_le32(qp->pd->id);
1059
1060	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1061					  (void *)&resp, NULL, 0);
 
1062	if (rc)
1063		goto fail;
1064
1065	qp->id = le32_to_cpu(resp.xid);
1066	qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
1067	qp->cctx = res->cctx;
1068	INIT_LIST_HEAD(&qp->sq_flush);
1069	INIT_LIST_HEAD(&qp->rq_flush);
1070	rcfw->qp_tbl[qp->id].qp_id = qp->id;
1071	rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
 
 
 
 
 
 
 
 
 
 
 
 
1072
1073	return 0;
1074
1075fail:
1076	if (qp->irrq.max_elements)
1077		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1078fail_orrq:
1079	if (qp->orrq.max_elements)
1080		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1081fail_buf_free:
1082	bnxt_qplib_free_qp_hdr_buf(res, qp);
1083fail_rq:
1084	bnxt_qplib_free_hwq(res->pdev, &rq->hwq);
1085	kfree(rq->swq);
 
1086fail_sq:
1087	bnxt_qplib_free_hwq(res->pdev, &sq->hwq);
1088	kfree(sq->swq);
1089exit:
1090	return rc;
1091}
1092
1093static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp)
1094{
1095	switch (qp->state) {
1096	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1097		/* INIT->RTR, configure the path_mtu to the default
1098		 * 2048 if not being requested
1099		 */
1100		if (!(qp->modify_flags &
1101		    CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
1102			qp->modify_flags |=
1103				CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1104			qp->path_mtu =
1105				CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
1106		}
1107		qp->modify_flags &=
1108			~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
1109		/* Bono FW require the max_dest_rd_atomic to be >= 1 */
1110		if (qp->max_dest_rd_atomic < 1)
1111			qp->max_dest_rd_atomic = 1;
1112		qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
1113		/* Bono FW 20.6.5 requires SGID_INDEX configuration */
1114		if (!(qp->modify_flags &
1115		    CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
1116			qp->modify_flags |=
1117				CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
1118			qp->ah.sgid_index = 0;
1119		}
1120		break;
1121	default:
1122		break;
1123	}
1124}
1125
1126static void __modify_flags_from_rtr_state(struct bnxt_qplib_qp *qp)
1127{
1128	switch (qp->state) {
1129	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1130		/* Bono FW requires the max_rd_atomic to be >= 1 */
1131		if (qp->max_rd_atomic < 1)
1132			qp->max_rd_atomic = 1;
1133		/* Bono FW does not allow PKEY_INDEX,
1134		 * DGID, FLOW_LABEL, SGID_INDEX, HOP_LIMIT,
1135		 * TRAFFIC_CLASS, DEST_MAC, PATH_MTU, RQ_PSN,
1136		 * MIN_RNR_TIMER, MAX_DEST_RD_ATOMIC, DEST_QP_ID
1137		 * modification
1138		 */
1139		qp->modify_flags &=
1140			~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
1141			  CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
1142			  CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
1143			  CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
1144			  CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
1145			  CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
1146			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
1147			  CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
1148			  CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
1149			  CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
1150			  CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
1151			  CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
1152		break;
1153	default:
1154		break;
1155	}
1156}
1157
1158static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
1159{
1160	switch (qp->cur_qp_state) {
1161	case CMDQ_MODIFY_QP_NEW_STATE_RESET:
1162		break;
1163	case CMDQ_MODIFY_QP_NEW_STATE_INIT:
1164		__modify_flags_from_init_state(qp);
1165		break;
1166	case CMDQ_MODIFY_QP_NEW_STATE_RTR:
1167		__modify_flags_from_rtr_state(qp);
1168		break;
1169	case CMDQ_MODIFY_QP_NEW_STATE_RTS:
1170		break;
1171	case CMDQ_MODIFY_QP_NEW_STATE_SQD:
1172		break;
1173	case CMDQ_MODIFY_QP_NEW_STATE_SQE:
1174		break;
1175	case CMDQ_MODIFY_QP_NEW_STATE_ERR:
1176		break;
1177	default:
1178		break;
1179	}
1180}
1181
1182int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1183{
1184	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1185	struct cmdq_modify_qp req;
1186	struct creq_modify_qp_resp resp;
1187	u16 cmd_flags = 0, pkey;
1188	u32 temp32[4];
1189	u32 bmask;
1190	int rc;
1191
1192	RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
 
 
1193
1194	/* Filter out the qp_attr_mask based on the state->new transition */
1195	__filter_modify_flags(qp);
1196	bmask = qp->modify_flags;
1197	req.modify_mask = cpu_to_le32(qp->modify_flags);
1198	req.qp_cid = cpu_to_le32(qp->id);
1199	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
1200		req.network_type_en_sqd_async_notify_new_state =
1201				(qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
1202				(qp->en_sqd_async_notify ?
1203					CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
1204	}
1205	req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
1206
1207	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
1208		req.access = qp->access;
1209
1210	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
1211		if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
1212					 qp->pkey_index, &pkey))
1213			req.pkey = cpu_to_le16(pkey);
1214	}
1215	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
1216		req.qkey = cpu_to_le32(qp->qkey);
1217
1218	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
1219		memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
1220		req.dgid[0] = cpu_to_le32(temp32[0]);
1221		req.dgid[1] = cpu_to_le32(temp32[1]);
1222		req.dgid[2] = cpu_to_le32(temp32[2]);
1223		req.dgid[3] = cpu_to_le32(temp32[3]);
1224	}
1225	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
1226		req.flow_label = cpu_to_le32(qp->ah.flow_label);
1227
1228	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
1229		req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
1230					     [qp->ah.sgid_index]);
1231
1232	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
1233		req.hop_limit = qp->ah.hop_limit;
1234
1235	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS)
1236		req.traffic_class = qp->ah.traffic_class;
1237
1238	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC)
1239		memcpy(req.dest_mac, qp->ah.dmac, 6);
1240
1241	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)
1242		req.path_mtu = qp->path_mtu;
1243
1244	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT)
1245		req.timeout = qp->timeout;
1246
1247	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT)
1248		req.retry_cnt = qp->retry_cnt;
1249
1250	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY)
1251		req.rnr_retry = qp->rnr_retry;
1252
1253	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER)
1254		req.min_rnr_timer = qp->min_rnr_timer;
1255
1256	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN)
1257		req.rq_psn = cpu_to_le32(qp->rq.psn);
1258
1259	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN)
1260		req.sq_psn = cpu_to_le32(qp->sq.psn);
1261
1262	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC)
1263		req.max_rd_atomic =
1264			ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
1265
1266	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC)
1267		req.max_dest_rd_atomic =
1268			IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
1269
1270	req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
1271	req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
1272	req.sq_sge = cpu_to_le16(qp->sq.max_sge);
1273	req.rq_sge = cpu_to_le16(qp->rq.max_sge);
1274	req.max_inline_data = cpu_to_le32(qp->max_inline_data);
1275	if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
1276		req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
1277
1278	req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
1279
1280	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1281					  (void *)&resp, NULL, 0);
1282	if (rc)
1283		return rc;
1284	qp->cur_qp_state = qp->state;
1285	return 0;
1286}
1287
1288int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
1289{
1290	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1291	struct cmdq_query_qp req;
1292	struct creq_query_qp_resp resp;
1293	struct bnxt_qplib_rcfw_sbuf *sbuf;
1294	struct creq_query_qp_resp_sb *sb;
1295	u16 cmd_flags = 0;
1296	u32 temp32[4];
1297	int i, rc = 0;
1298
1299	RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
 
 
 
 
 
1300
1301	sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
1302	if (!sbuf)
1303		return -ENOMEM;
1304	sb = sbuf->sb;
1305
1306	req.qp_cid = cpu_to_le32(qp->id);
1307	req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
1308	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
1309					  (void *)sbuf, 0);
 
1310	if (rc)
1311		goto bail;
1312	/* Extract the context from the side buffer */
1313	qp->state = sb->en_sqd_async_notify_state &
1314			CREQ_QUERY_QP_RESP_SB_STATE_MASK;
1315	qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
1316				  CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
1317				  true : false;
1318	qp->access = sb->access;
1319	qp->pkey_index = le16_to_cpu(sb->pkey);
1320	qp->qkey = le32_to_cpu(sb->qkey);
1321
1322	temp32[0] = le32_to_cpu(sb->dgid[0]);
1323	temp32[1] = le32_to_cpu(sb->dgid[1]);
1324	temp32[2] = le32_to_cpu(sb->dgid[2]);
1325	temp32[3] = le32_to_cpu(sb->dgid[3]);
1326	memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
1327
1328	qp->ah.flow_label = le32_to_cpu(sb->flow_label);
1329
1330	qp->ah.sgid_index = 0;
1331	for (i = 0; i < res->sgid_tbl.max; i++) {
1332		if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
1333			qp->ah.sgid_index = i;
1334			break;
1335		}
1336	}
1337	if (i == res->sgid_tbl.max)
1338		dev_warn(&res->pdev->dev, "SGID not found??\n");
1339
1340	qp->ah.hop_limit = sb->hop_limit;
1341	qp->ah.traffic_class = sb->traffic_class;
1342	memcpy(qp->ah.dmac, sb->dest_mac, 6);
1343	qp->ah.vlan_id = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1344				CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK) >>
1345				CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
1346	qp->path_mtu = (le16_to_cpu(sb->path_mtu_dest_vlan_id) &
1347				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) >>
1348				    CREQ_QUERY_QP_RESP_SB_PATH_MTU_SFT;
1349	qp->timeout = sb->timeout;
1350	qp->retry_cnt = sb->retry_cnt;
1351	qp->rnr_retry = sb->rnr_retry;
1352	qp->min_rnr_timer = sb->min_rnr_timer;
1353	qp->rq.psn = le32_to_cpu(sb->rq_psn);
1354	qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
1355	qp->sq.psn = le32_to_cpu(sb->sq_psn);
1356	qp->max_dest_rd_atomic =
1357			IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
1358	qp->sq.max_wqe = qp->sq.hwq.max_elements;
1359	qp->rq.max_wqe = qp->rq.hwq.max_elements;
1360	qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
1361	qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
1362	qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
1363	qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
1364	memcpy(qp->smac, sb->src_mac, 6);
1365	qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1366bail:
1367	bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
 
1368	return rc;
1369}
1370
1371static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
1372{
1373	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1374	struct cq_base *hw_cqe, **hw_cqe_ptr;
 
1375	int i;
1376
 
 
1377	for (i = 0; i < cq_hwq->max_elements; i++) {
1378		hw_cqe_ptr = (struct cq_base **)cq_hwq->pbl_ptr;
1379		hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
1380		if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
1381			continue;
1382		/*
1383		 * The valid test of the entry must be done first before
1384		 * reading any further.
1385		 */
1386		dma_rmb();
1387		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
1388		case CQ_BASE_CQE_TYPE_REQ:
1389		case CQ_BASE_CQE_TYPE_TERMINAL:
1390		{
1391			struct cq_req *cqe = (struct cq_req *)hw_cqe;
1392
1393			if (qp == le64_to_cpu(cqe->qp_handle))
1394				cqe->qp_handle = 0;
1395			break;
1396		}
1397		case CQ_BASE_CQE_TYPE_RES_RC:
1398		case CQ_BASE_CQE_TYPE_RES_UD:
1399		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
1400		{
1401			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
1402
1403			if (qp == le64_to_cpu(cqe->qp_handle))
1404				cqe->qp_handle = 0;
1405			break;
1406		}
1407		default:
1408			break;
1409		}
 
 
1410	}
1411}
1412
1413int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
1414			  struct bnxt_qplib_qp *qp)
1415{
1416	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1417	struct cmdq_destroy_qp req;
1418	struct creq_destroy_qp_resp resp;
1419	u16 cmd_flags = 0;
 
1420	int rc;
1421
1422	rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
1423	rcfw->qp_tbl[qp->id].qp_handle = NULL;
1424
1425	RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
 
 
 
1426
1427	req.qp_cid = cpu_to_le32(qp->id);
1428	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1429					  (void *)&resp, NULL, 0);
 
1430	if (rc) {
1431		rcfw->qp_tbl[qp->id].qp_id = qp->id;
1432		rcfw->qp_tbl[qp->id].qp_handle = qp;
1433		return rc;
1434	}
1435
1436	return 0;
1437}
1438
1439void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
1440			    struct bnxt_qplib_qp *qp)
1441{
1442	bnxt_qplib_free_qp_hdr_buf(res, qp);
1443	bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
1444	kfree(qp->sq.swq);
1445
1446	bnxt_qplib_free_hwq(res->pdev, &qp->rq.hwq);
1447	kfree(qp->rq.swq);
1448
1449	if (qp->irrq.max_elements)
1450		bnxt_qplib_free_hwq(res->pdev, &qp->irrq);
1451	if (qp->orrq.max_elements)
1452		bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
1453
1454}
1455
1456void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
1457				struct bnxt_qplib_sge *sge)
1458{
1459	struct bnxt_qplib_q *sq = &qp->sq;
1460	u32 sw_prod;
1461
1462	memset(sge, 0, sizeof(*sge));
1463
1464	if (qp->sq_hdr_buf) {
1465		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1466		sge->addr = (dma_addr_t)(qp->sq_hdr_buf_map +
1467					 sw_prod * qp->sq_hdr_buf_size);
1468		sge->lkey = 0xFFFFFFFF;
1469		sge->size = qp->sq_hdr_buf_size;
1470		return qp->sq_hdr_buf + sw_prod * sge->size;
1471	}
1472	return NULL;
1473}
1474
1475u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
1476{
1477	struct bnxt_qplib_q *rq = &qp->rq;
1478
1479	return HWQ_CMP(rq->hwq.prod, &rq->hwq);
1480}
1481
1482dma_addr_t bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
1483{
1484	return (qp->rq_hdr_buf_map + index * qp->rq_hdr_buf_size);
1485}
1486
1487void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
1488				struct bnxt_qplib_sge *sge)
1489{
1490	struct bnxt_qplib_q *rq = &qp->rq;
1491	u32 sw_prod;
1492
1493	memset(sge, 0, sizeof(*sge));
1494
1495	if (qp->rq_hdr_buf) {
1496		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1497		sge->addr = (dma_addr_t)(qp->rq_hdr_buf_map +
1498					 sw_prod * qp->rq_hdr_buf_size);
1499		sge->lkey = 0xFFFFFFFF;
1500		sge->size = qp->rq_hdr_buf_size;
1501		return qp->rq_hdr_buf + sw_prod * sge->size;
1502	}
1503	return NULL;
1504}
1505
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
1507{
1508	struct bnxt_qplib_q *sq = &qp->sq;
1509	u32 sw_prod;
1510	u64 val = 0;
1511
1512	val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1513	       DBC_DBC_TYPE_SQ);
1514	val <<= 32;
1515	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1516	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1517	/* Flush all the WQE writes to HW */
1518	writeq(val, qp->dpi->dbr);
1519}
1520
1521int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
1522			 struct bnxt_qplib_swqe *wqe)
1523{
 
 
1524	struct bnxt_qplib_q *sq = &qp->sq;
 
1525	struct bnxt_qplib_swq *swq;
1526	struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
1527	struct sq_sge *hw_sge;
1528	struct bnxt_qplib_nq_work *nq_work = NULL;
1529	bool sch_handler = false;
1530	u32 sw_prod;
1531	u8 wqe_size16;
1532	int i, rc = 0, data_len = 0, pkt_num = 0;
 
1533	__le32 temp32;
1534
1535	if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS) {
1536		if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1537			sch_handler = true;
1538			dev_dbg(&sq->hwq.pdev->dev,
1539				"%s Error QP. Scheduling for poll_cq\n",
1540				__func__);
1541			goto queue_err;
1542		}
 
 
 
1543	}
1544
1545	if (bnxt_qplib_queue_full(sq)) {
1546		dev_err(&sq->hwq.pdev->dev,
 
1547			"prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
1548			sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
1549			sq->q_full_delta);
1550		rc = -ENOMEM;
1551		goto done;
1552	}
1553	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1554	swq = &sq->swq[sw_prod];
 
 
 
 
 
1555	swq->wr_id = wqe->wr_id;
1556	swq->type = wqe->type;
1557	swq->flags = wqe->flags;
 
1558	if (qp->sig_type)
1559		swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1560	swq->start_psn = sq->psn & BTH_PSN_MASK;
1561
1562	hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
1563	hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
1564					[get_sqe_idx(sw_prod)];
 
 
 
1565
1566	memset(hw_sq_send_hdr, 0, BNXT_QPLIB_MAX_SQE_ENTRY_SIZE);
 
 
 
1567
1568	if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
1569		/* Copy the inline data */
1570		if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
1571			dev_warn(&sq->hwq.pdev->dev,
1572				 "Inline data length > 96 detected\n");
1573			data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
1574		} else {
1575			data_len = wqe->inline_len;
1576		}
1577		memcpy(hw_sq_send_hdr->data, wqe->inline_data, data_len);
1578		wqe_size16 = (data_len + 15) >> 4;
1579	} else {
1580		for (i = 0, hw_sge = (struct sq_sge *)hw_sq_send_hdr->data;
1581		     i < wqe->num_sge; i++, hw_sge++) {
1582			hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1583			hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1584			hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1585			data_len += wqe->sg_list[i].size;
1586		}
1587		/* Each SGE entry = 1 WQE size16 */
1588		wqe_size16 = wqe->num_sge;
1589		/* HW requires wqe size has room for atleast one SGE even if
1590		 * none was supplied by ULP
1591		 */
1592		if (!wqe->num_sge)
1593			wqe_size16++;
1594	}
1595
1596	/* Specifics */
1597	switch (wqe->type) {
1598	case BNXT_QPLIB_SWQE_TYPE_SEND:
1599		if (qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
 
 
1600			/* Assemble info for Raw Ethertype QPs */
1601			struct sq_send_raweth_qp1 *sqe =
1602				(struct sq_send_raweth_qp1 *)hw_sq_send_hdr;
1603
1604			sqe->wqe_type = wqe->type;
1605			sqe->flags = wqe->flags;
1606			sqe->wqe_size = wqe_size16 +
1607				((offsetof(typeof(*sqe), data) + 15) >> 4);
1608			sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
1609			sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
1610			sqe->length = cpu_to_le32(data_len);
1611			sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
1612				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
1613				SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
1614
1615			break;
1616		}
1617		/* fall thru */
1618	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
1619	case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
1620	{
1621		struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr;
 
1622
1623		sqe->wqe_type = wqe->type;
1624		sqe->flags = wqe->flags;
1625		sqe->wqe_size = wqe_size16 +
1626				((offsetof(typeof(*sqe), data) + 15) >> 4);
1627		sqe->inv_key_or_imm_data = cpu_to_le32(
1628						wqe->send.inv_key);
1629		if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
1630		    qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
1631			sqe->q_key = cpu_to_le32(wqe->send.q_key);
1632			sqe->dst_qp = cpu_to_le32(
1633					wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
1634			sqe->length = cpu_to_le32(data_len);
1635			sqe->avid = cpu_to_le32(wqe->send.avid &
1636						SQ_SEND_AVID_MASK);
1637			sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
 
 
 
 
 
1638		} else {
1639			sqe->length = cpu_to_le32(data_len);
1640			sqe->dst_qp = 0;
1641			sqe->avid = 0;
1642			if (qp->mtu)
1643				pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1644			if (!pkt_num)
1645				pkt_num = 1;
1646			sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1647		}
1648		break;
1649	}
1650	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
1651	case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
1652	case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
1653	{
1654		struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr;
 
1655
1656		sqe->wqe_type = wqe->type;
1657		sqe->flags = wqe->flags;
1658		sqe->wqe_size = wqe_size16 +
1659				((offsetof(typeof(*sqe), data) + 15) >> 4);
1660		sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
1661		sqe->length = cpu_to_le32((u32)data_len);
1662		sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
1663		sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
1664		if (qp->mtu)
1665			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1666		if (!pkt_num)
1667			pkt_num = 1;
1668		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1669		break;
1670	}
1671	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
1672	case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
1673	{
1674		struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr;
 
1675
1676		sqe->wqe_type = wqe->type;
1677		sqe->flags = wqe->flags;
1678		sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
1679		sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
1680		sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
1681		sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
1682		if (qp->mtu)
1683			pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
1684		if (!pkt_num)
1685			pkt_num = 1;
1686		sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
1687		break;
1688	}
1689	case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
1690	{
1691		struct sq_localinvalidate *sqe =
1692				(struct sq_localinvalidate *)hw_sq_send_hdr;
1693
1694		sqe->wqe_type = wqe->type;
1695		sqe->flags = wqe->flags;
1696		sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
1697
1698		break;
1699	}
1700	case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
1701	{
1702		struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr;
 
1703
1704		sqe->wqe_type = wqe->type;
1705		sqe->flags = wqe->flags;
1706		sqe->access_cntl = wqe->frmr.access_cntl |
1707				   SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
1708		sqe->zero_based_page_size_log =
1709			(wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
1710			SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
1711			(wqe->frmr.zero_based ? SQ_FR_PMR_ZERO_BASED : 0);
1712		sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
1713		temp32 = cpu_to_le32(wqe->frmr.length);
1714		memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
1715		sqe->numlevels_pbl_page_size_log =
1716			((wqe->frmr.pbl_pg_sz_log <<
1717					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
1718					SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
1719			((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
1720					SQ_FR_PMR_NUMLEVELS_MASK);
1721
1722		for (i = 0; i < wqe->frmr.page_list_len; i++)
1723			wqe->frmr.pbl_ptr[i] = cpu_to_le64(
1724						wqe->frmr.page_list[i] |
1725						PTU_PTE_VALID);
1726		sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
1727		sqe->va = cpu_to_le64(wqe->frmr.va);
 
1728
1729		break;
1730	}
1731	case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
1732	{
1733		struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr;
 
1734
1735		sqe->wqe_type = wqe->type;
1736		sqe->flags = wqe->flags;
1737		sqe->access_cntl = wqe->bind.access_cntl;
1738		sqe->mw_type_zero_based = wqe->bind.mw_type |
1739			(wqe->bind.zero_based ? SQ_BIND_ZERO_BASED : 0);
1740		sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
1741		sqe->l_key = cpu_to_le32(wqe->bind.r_key);
1742		sqe->va = cpu_to_le64(wqe->bind.va);
1743		temp32 = cpu_to_le32(wqe->bind.length);
1744		memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length));
1745		break;
1746	}
1747	default:
1748		/* Bad wqe, return error */
1749		rc = -EINVAL;
1750		goto done;
1751	}
1752	swq->next_psn = sq->psn & BTH_PSN_MASK;
1753	if (swq->psn_search) {
1754		u32 opcd_spsn;
1755		u32 flg_npsn;
1756
1757		opcd_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
1758			      SQ_PSN_SEARCH_START_PSN_MASK);
1759		opcd_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
1760			       SQ_PSN_SEARCH_OPCODE_MASK);
1761		flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
1762			     SQ_PSN_SEARCH_NEXT_PSN_MASK);
1763		if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
1764			swq->psn_ext->opcode_start_psn =
1765						cpu_to_le32(opcd_spsn);
1766			swq->psn_ext->flags_next_psn =
1767						cpu_to_le32(flg_npsn);
1768		} else {
1769			swq->psn_search->opcode_start_psn =
1770						cpu_to_le32(opcd_spsn);
1771			swq->psn_search->flags_next_psn =
1772						cpu_to_le32(flg_npsn);
1773		}
1774	}
1775queue_err:
1776	if (sch_handler) {
1777		/* Store the ULP info in the software structures */
1778		sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
1779		swq = &sq->swq[sw_prod];
1780		swq->wr_id = wqe->wr_id;
1781		swq->type = wqe->type;
1782		swq->flags = wqe->flags;
1783		if (qp->sig_type)
1784			swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
1785		swq->start_psn = sq->psn & BTH_PSN_MASK;
1786	}
1787	sq->hwq.prod++;
1788	qp->wqe_cnt++;
1789
1790done:
1791	if (sch_handler) {
1792		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1793		if (nq_work) {
1794			nq_work->cq = qp->scq;
1795			nq_work->nq = qp->scq->nq;
1796			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1797			queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
1798		} else {
1799			dev_err(&sq->hwq.pdev->dev,
1800				"FP: Failed to allocate SQ nq_work!\n");
1801			rc = -ENOMEM;
1802		}
1803	}
1804	return rc;
1805}
1806
1807void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
1808{
1809	struct bnxt_qplib_q *rq = &qp->rq;
1810	u32 sw_prod;
1811	u64 val = 0;
1812
1813	val = (((qp->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1814	       DBC_DBC_TYPE_RQ);
1815	val <<= 32;
1816	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1817	val |= (sw_prod << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1818	/* Flush the writes to HW Rx WQE before the ringing Rx DB */
1819	writeq(val, qp->dpi->dbr);
1820}
1821
1822int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
1823			 struct bnxt_qplib_swqe *wqe)
1824{
 
1825	struct bnxt_qplib_q *rq = &qp->rq;
1826	struct rq_wqe *rqe, **rqe_ptr;
1827	struct sq_sge *hw_sge;
1828	struct bnxt_qplib_nq_work *nq_work = NULL;
 
1829	bool sch_handler = false;
1830	u32 sw_prod;
1831	int i, rc = 0;
 
1832
1833	if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1834		sch_handler = true;
1835		dev_dbg(&rq->hwq.pdev->dev,
1836			"%s: Error QP. Scheduling for poll_cq\n", __func__);
1837		goto queue_err;
 
 
1838	}
1839	if (bnxt_qplib_queue_full(rq)) {
1840		dev_err(&rq->hwq.pdev->dev,
 
1841			"FP: QP (0x%x) RQ is full!\n", qp->id);
1842		rc = -EINVAL;
1843		goto done;
1844	}
1845	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1846	rq->swq[sw_prod].wr_id = wqe->wr_id;
1847
1848	rqe_ptr = (struct rq_wqe **)rq->hwq.pbl_ptr;
1849	rqe = &rqe_ptr[RQE_PG(sw_prod)][RQE_IDX(sw_prod)];
 
1850
1851	memset(rqe, 0, BNXT_QPLIB_MAX_RQE_ENTRY_SIZE);
1852
1853	/* Calculate wqe_size16 and data_len */
1854	for (i = 0, hw_sge = (struct sq_sge *)rqe->data;
1855	     i < wqe->num_sge; i++, hw_sge++) {
1856		hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
1857		hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
1858		hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
1859	}
1860	rqe->wqe_type = wqe->type;
1861	rqe->flags = wqe->flags;
1862	rqe->wqe_size = wqe->num_sge +
1863			((offsetof(typeof(*rqe), data) + 15) >> 4);
1864	/* HW requires wqe size has room for atleast one SGE even if none
1865	 * was supplied by ULP
1866	 */
1867	if (!wqe->num_sge)
1868		rqe->wqe_size++;
1869
1870	/* Supply the rqe->wr_id index to the wr_id_tbl for now */
1871	rqe->wr_id[0] = cpu_to_le32(sw_prod);
1872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1873queue_err:
1874	if (sch_handler) {
1875		/* Store the ULP info in the software structures */
1876		sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
1877		rq->swq[sw_prod].wr_id = wqe->wr_id;
1878	}
1879
1880	rq->hwq.prod++;
1881	if (sch_handler) {
1882		nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
1883		if (nq_work) {
1884			nq_work->cq = qp->rcq;
1885			nq_work->nq = qp->rcq->nq;
1886			INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
1887			queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
1888		} else {
1889			dev_err(&rq->hwq.pdev->dev,
1890				"FP: Failed to allocate RQ nq_work!\n");
1891			rc = -ENOMEM;
1892		}
1893	}
1894done:
1895	return rc;
1896}
1897
1898/* CQ */
1899
1900/* Spinlock must be held */
1901static void bnxt_qplib_arm_cq_enable(struct bnxt_qplib_cq *cq)
1902{
1903	u64 val = 0;
1904
1905	val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) |
1906	       DBC_DBC_TYPE_CQ_ARMENA;
1907	val <<= 32;
1908	/* Flush memory writes before enabling the CQ */
1909	writeq(val, cq->dbr_base);
1910}
1911
1912static void bnxt_qplib_arm_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
1913{
1914	struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
1915	u32 sw_cons;
1916	u64 val = 0;
1917
1918	/* Ring DB */
1919	val = ((cq->id << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | arm_type;
1920	val <<= 32;
1921	sw_cons = HWQ_CMP(cq_hwq->cons, cq_hwq);
1922	val |= (sw_cons << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK;
1923	/* flush memory writes before arming the CQ */
1924	writeq(val, cq->dpi->dbr);
1925}
1926
1927int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1928{
1929	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1930	struct cmdq_create_cq req;
1931	struct creq_create_cq_resp resp;
 
 
1932	struct bnxt_qplib_pbl *pbl;
1933	u16 cmd_flags = 0;
1934	int rc;
1935
1936	cq->hwq.max_elements = cq->max_wqe;
1937	rc = bnxt_qplib_alloc_init_hwq(res->pdev, &cq->hwq, &cq->sg_info,
1938				       &cq->hwq.max_elements,
1939				       BNXT_QPLIB_MAX_CQE_ENTRY_SIZE, 0,
1940				       PAGE_SIZE, HWQ_TYPE_QUEUE);
1941	if (rc)
1942		goto exit;
1943
1944	RCFW_CMD_PREP(req, CREATE_CQ, cmd_flags);
1945
1946	if (!cq->dpi) {
1947		dev_err(&rcfw->pdev->dev,
1948			"FP: CREATE_CQ failed due to NULL DPI\n");
1949		return -EINVAL;
1950	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951	req.dpi = cpu_to_le32(cq->dpi->dpi);
1952	req.cq_handle = cpu_to_le64(cq->cq_handle);
1953
1954	req.cq_size = cpu_to_le32(cq->hwq.max_elements);
1955	pbl = &cq->hwq.pbl[PBL_LVL_0];
1956	req.pg_size_lvl = cpu_to_le32(
1957	    ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
1958						CMDQ_CREATE_CQ_LVL_SFT) |
1959	    (pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_CREATE_CQ_PG_SIZE_PG_4K :
1960	     pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_CREATE_CQ_PG_SIZE_PG_8K :
1961	     pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_CREATE_CQ_PG_SIZE_PG_64K :
1962	     pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_CREATE_CQ_PG_SIZE_PG_2M :
1963	     pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_CREATE_CQ_PG_SIZE_PG_8M :
1964	     pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_CREATE_CQ_PG_SIZE_PG_1G :
1965	     CMDQ_CREATE_CQ_PG_SIZE_PG_4K));
1966
1967	req.pbl = cpu_to_le64(pbl->pg_map_arr[0]);
1968
1969	req.cq_fco_cnq_id = cpu_to_le32(
1970			(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
1971			 CMDQ_CREATE_CQ_CNQ_ID_SFT);
1972
1973	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
1974					  (void *)&resp, NULL, 0);
1975	if (rc)
1976		goto fail;
1977
1978	cq->id = le32_to_cpu(resp.xid);
1979	cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
1980	cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
1981	init_waitqueue_head(&cq->waitq);
1982	INIT_LIST_HEAD(&cq->sqf_head);
1983	INIT_LIST_HEAD(&cq->rqf_head);
1984	spin_lock_init(&cq->compl_lock);
1985	spin_lock_init(&cq->flush_lock);
1986
1987	bnxt_qplib_arm_cq_enable(cq);
 
 
 
 
 
 
 
 
1988	return 0;
1989
1990fail:
1991	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
1992exit:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1993	return rc;
1994}
1995
1996int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
1997{
1998	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1999	struct cmdq_destroy_cq req;
2000	struct creq_destroy_cq_resp resp;
2001	u16 cmd_flags = 0;
 
2002	int rc;
2003
2004	RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
 
 
2005
2006	req.cq_cid = cpu_to_le32(cq->id);
2007	rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
2008					  (void *)&resp, NULL, 0);
 
2009	if (rc)
2010		return rc;
2011	bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
 
 
2012	return 0;
2013}
2014
2015static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
2016		      struct bnxt_qplib_cqe **pcqe, int *budget)
2017{
2018	u32 sw_prod, sw_cons;
2019	struct bnxt_qplib_cqe *cqe;
 
2020	int rc = 0;
2021
2022	/* Now complete all outstanding SQEs with FLUSHED_ERR */
2023	sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq);
2024	cqe = *pcqe;
2025	while (*budget) {
2026		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2027		if (sw_cons == sw_prod) {
2028			break;
2029		}
2030		/* Skip the FENCE WQE completions */
2031		if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) {
2032			bnxt_qplib_cancel_phantom_processing(qp);
2033			goto skip_compl;
2034		}
2035		memset(cqe, 0, sizeof(*cqe));
2036		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
2037		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2038		cqe->qp_handle = (u64)(unsigned long)qp;
2039		cqe->wr_id = sq->swq[sw_cons].wr_id;
2040		cqe->src_qp = qp->id;
2041		cqe->type = sq->swq[sw_cons].type;
2042		cqe++;
2043		(*budget)--;
2044skip_compl:
2045		sq->hwq.cons++;
 
 
2046	}
2047	*pcqe = cqe;
2048	if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod)
2049		/* Out of budget */
2050		rc = -EAGAIN;
2051
2052	return rc;
2053}
2054
2055static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
2056		      struct bnxt_qplib_cqe **pcqe, int *budget)
2057{
2058	struct bnxt_qplib_cqe *cqe;
2059	u32 sw_prod, sw_cons;
 
2060	int rc = 0;
2061	int opcode = 0;
2062
2063	switch (qp->type) {
2064	case CMDQ_CREATE_QP1_TYPE_GSI:
2065		opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
2066		break;
2067	case CMDQ_CREATE_QP_TYPE_RC:
2068		opcode = CQ_BASE_CQE_TYPE_RES_RC;
2069		break;
2070	case CMDQ_CREATE_QP_TYPE_UD:
2071	case CMDQ_CREATE_QP_TYPE_GSI:
2072		opcode = CQ_BASE_CQE_TYPE_RES_UD;
2073		break;
2074	}
2075
2076	/* Flush the rest of the RQ */
2077	sw_prod = HWQ_CMP(rq->hwq.prod, &rq->hwq);
2078	cqe = *pcqe;
2079	while (*budget) {
2080		sw_cons = HWQ_CMP(rq->hwq.cons, &rq->hwq);
2081		if (sw_cons == sw_prod)
2082			break;
2083		memset(cqe, 0, sizeof(*cqe));
2084		cqe->status =
2085		    CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
2086		cqe->opcode = opcode;
2087		cqe->qp_handle = (unsigned long)qp;
2088		cqe->wr_id = rq->swq[sw_cons].wr_id;
2089		cqe++;
2090		(*budget)--;
2091		rq->hwq.cons++;
 
 
2092	}
2093	*pcqe = cqe;
2094	if (!*budget && HWQ_CMP(rq->hwq.cons, &rq->hwq) != sw_prod)
2095		/* Out of budget */
2096		rc = -EAGAIN;
2097
2098	return rc;
2099}
2100
2101void bnxt_qplib_mark_qp_error(void *qp_handle)
2102{
2103	struct bnxt_qplib_qp *qp = qp_handle;
2104
2105	if (!qp)
2106		return;
2107
2108	/* Must block new posting of SQ and RQ */
2109	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2110	bnxt_qplib_cancel_phantom_processing(qp);
2111}
2112
2113/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
2114 *       CQE is track from sw_cq_cons to max_element but valid only if VALID=1
2115 */
2116static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
2117		     u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
2118{
 
2119	struct bnxt_qplib_q *sq = &qp->sq;
2120	struct bnxt_qplib_swq *swq;
2121	u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
2122	struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
2123	struct cq_req *peek_req_hwcqe;
2124	struct bnxt_qplib_qp *peek_qp;
2125	struct bnxt_qplib_q *peek_sq;
 
 
2126	int i, rc = 0;
2127
2128	/* Normal mode */
2129	/* Check for the psn_search marking before completing */
2130	swq = &sq->swq[sw_sq_cons];
2131	if (swq->psn_search &&
2132	    le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
2133		/* Unmark */
2134		swq->psn_search->flags_next_psn = cpu_to_le32
2135			(le32_to_cpu(swq->psn_search->flags_next_psn)
2136				     & ~0x80000000);
2137		dev_dbg(&cq->hwq.pdev->dev,
2138			"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
2139			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2140		sq->condition = true;
2141		sq->send_phantom = true;
2142
2143		/* TODO: Only ARM if the previous SQE is ARMALL */
2144		bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ_ARMALL);
2145
2146		rc = -EAGAIN;
2147		goto out;
2148	}
2149	if (sq->condition) {
2150		/* Peek at the completions */
2151		peek_raw_cq_cons = cq->hwq.cons;
2152		peek_sw_cq_cons = cq_cons;
2153		i = cq->hwq.max_elements;
2154		while (i--) {
2155			peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
2156			peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2157			peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
2158						     [CQE_IDX(peek_sw_cq_cons)];
2159			/* If the next hwcqe is VALID */
2160			if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
2161					  cq->hwq.max_elements)) {
2162			/*
2163			 * The valid test of the entry must be done first before
2164			 * reading any further.
2165			 */
2166				dma_rmb();
2167				/* If the next hwcqe is a REQ */
2168				if ((peek_hwcqe->cqe_type_toggle &
2169				    CQ_BASE_CQE_TYPE_MASK) ==
2170				    CQ_BASE_CQE_TYPE_REQ) {
2171					peek_req_hwcqe = (struct cq_req *)
2172							 peek_hwcqe;
2173					peek_qp = (struct bnxt_qplib_qp *)
2174						((unsigned long)
2175						 le64_to_cpu
2176						 (peek_req_hwcqe->qp_handle));
2177					peek_sq = &peek_qp->sq;
2178					peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
2179						peek_req_hwcqe->sq_cons_idx) - 1
2180						, &sq->hwq);
 
2181					/* If the hwcqe's sq's wr_id matches */
2182					if (peek_sq == sq &&
2183					    sq->swq[peek_sq_cons_idx].wr_id ==
2184					    BNXT_QPLIB_FENCE_WRID) {
2185						/*
2186						 *  Unbreak only if the phantom
2187						 *  comes back
2188						 */
2189						dev_dbg(&cq->hwq.pdev->dev,
2190							"FP: Got Phantom CQE\n");
2191						sq->condition = false;
2192						sq->single = true;
2193						rc = 0;
2194						goto out;
2195					}
2196				}
2197				/* Valid but not the phantom, so keep looping */
2198			} else {
2199				/* Not valid yet, just exit and wait */
2200				rc = -EINVAL;
2201				goto out;
2202			}
2203			peek_sw_cq_cons++;
2204			peek_raw_cq_cons++;
 
2205		}
2206		dev_err(&cq->hwq.pdev->dev,
2207			"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
2208			cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
2209		rc = -EINVAL;
2210	}
2211out:
2212	return rc;
2213}
2214
2215static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
2216				     struct cq_req *hwcqe,
2217				     struct bnxt_qplib_cqe **pcqe, int *budget,
2218				     u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
2219{
 
 
2220	struct bnxt_qplib_qp *qp;
2221	struct bnxt_qplib_q *sq;
2222	struct bnxt_qplib_cqe *cqe;
2223	u32 sw_sq_cons, cqe_sq_cons;
2224	struct bnxt_qplib_swq *swq;
2225	int rc = 0;
2226
2227	qp = (struct bnxt_qplib_qp *)((unsigned long)
2228				      le64_to_cpu(hwcqe->qp_handle));
2229	if (!qp) {
2230		dev_err(&cq->hwq.pdev->dev,
2231			"FP: Process Req qp is NULL\n");
2232		return -EINVAL;
2233	}
2234	sq = &qp->sq;
2235
2236	cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
2237	if (cqe_sq_cons > sq->hwq.max_elements) {
2238		dev_err(&cq->hwq.pdev->dev,
2239			"FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2240			cqe_sq_cons, sq->hwq.max_elements);
2241		return -EINVAL;
2242	}
2243
2244	if (qp->sq.flushed) {
2245		dev_dbg(&cq->hwq.pdev->dev,
2246			"%s: QP in Flush QP = %p\n", __func__, qp);
2247		goto done;
2248	}
2249	/* Require to walk the sq's swq to fabricate CQEs for all previously
2250	 * signaled SWQEs due to CQE aggregation from the current sq cons
2251	 * to the cqe_sq_cons
2252	 */
2253	cqe = *pcqe;
2254	while (*budget) {
2255		sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2256		if (sw_sq_cons == cqe_sq_cons)
2257			/* Done */
2258			break;
2259
2260		swq = &sq->swq[sw_sq_cons];
2261		memset(cqe, 0, sizeof(*cqe));
2262		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2263		cqe->qp_handle = (u64)(unsigned long)qp;
2264		cqe->src_qp = qp->id;
2265		cqe->wr_id = swq->wr_id;
2266		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
2267			goto skip;
2268		cqe->type = swq->type;
2269
2270		/* For the last CQE, check for status.  For errors, regardless
2271		 * of the request being signaled or not, it must complete with
2272		 * the hwcqe error status
2273		 */
2274		if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
2275		    hwcqe->status != CQ_REQ_STATUS_OK) {
2276			cqe->status = hwcqe->status;
2277			dev_err(&cq->hwq.pdev->dev,
2278				"FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
2279				sw_sq_cons, cqe->wr_id, cqe->status);
2280			cqe++;
2281			(*budget)--;
2282			bnxt_qplib_mark_qp_error(qp);
2283			/* Add qp to flush list of the CQ */
2284			bnxt_qplib_add_flush_qp(qp);
2285		} else {
 
 
 
 
 
 
2286			if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2287				/* Before we complete, do WA 9060 */
2288				if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
2289					      cqe_sq_cons)) {
2290					*lib_qp = qp;
2291					goto out;
2292				}
2293				cqe->status = CQ_REQ_STATUS_OK;
2294				cqe++;
2295				(*budget)--;
2296			}
2297		}
2298skip:
2299		sq->hwq.cons++;
 
 
2300		if (sq->single)
2301			break;
2302	}
2303out:
2304	*pcqe = cqe;
2305	if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
2306		/* Out of budget */
2307		rc = -EAGAIN;
2308		goto done;
2309	}
2310	/*
2311	 * Back to normal completion mode only after it has completed all of
2312	 * the WC for this CQE
2313	 */
2314	sq->single = false;
2315done:
2316	return rc;
2317}
2318
2319static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
2320{
2321	spin_lock(&srq->hwq.lock);
2322	srq->swq[srq->last_idx].next_idx = (int)tag;
2323	srq->last_idx = (int)tag;
2324	srq->swq[srq->last_idx].next_idx = -1;
2325	srq->hwq.cons++; /* Support for SRQE counter */
 
2326	spin_unlock(&srq->hwq.lock);
2327}
2328
2329static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
2330					struct cq_res_rc *hwcqe,
2331					struct bnxt_qplib_cqe **pcqe,
2332					int *budget)
2333{
 
 
2334	struct bnxt_qplib_qp *qp;
2335	struct bnxt_qplib_q *rq;
2336	struct bnxt_qplib_srq *srq;
2337	struct bnxt_qplib_cqe *cqe;
2338	u32 wr_id_idx;
2339	int rc = 0;
2340
2341	qp = (struct bnxt_qplib_qp *)((unsigned long)
2342				      le64_to_cpu(hwcqe->qp_handle));
2343	if (!qp) {
2344		dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
2345		return -EINVAL;
2346	}
2347	if (qp->rq.flushed) {
2348		dev_dbg(&cq->hwq.pdev->dev,
2349			"%s: QP in Flush QP = %p\n", __func__, qp);
2350		goto done;
2351	}
2352
2353	cqe = *pcqe;
2354	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2355	cqe->length = le32_to_cpu(hwcqe->length);
2356	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
2357	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
2358	cqe->flags = le16_to_cpu(hwcqe->flags);
2359	cqe->status = hwcqe->status;
2360	cqe->qp_handle = (u64)(unsigned long)qp;
2361
2362	wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
2363				CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
2364	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2365		srq = qp->srq;
2366		if (!srq)
2367			return -EINVAL;
2368		if (wr_id_idx >= srq->hwq.max_elements) {
2369			dev_err(&cq->hwq.pdev->dev,
2370				"FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2371				wr_id_idx, srq->hwq.max_elements);
2372			return -EINVAL;
2373		}
2374		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2375		bnxt_qplib_release_srqe(srq, wr_id_idx);
2376		cqe++;
2377		(*budget)--;
2378		*pcqe = cqe;
2379	} else {
 
 
2380		rq = &qp->rq;
2381		if (wr_id_idx >= rq->hwq.max_elements) {
2382			dev_err(&cq->hwq.pdev->dev,
2383				"FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
2384				wr_id_idx, rq->hwq.max_elements);
2385			return -EINVAL;
2386		}
2387		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
 
 
 
2388		cqe++;
2389		(*budget)--;
2390		rq->hwq.cons++;
 
 
2391		*pcqe = cqe;
2392
2393		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2394			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2395			/* Add qp to flush list of the CQ */
2396			bnxt_qplib_add_flush_qp(qp);
2397		}
2398	}
2399
2400done:
2401	return rc;
2402}
2403
2404static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
2405					struct cq_res_ud *hwcqe,
2406					struct bnxt_qplib_cqe **pcqe,
2407					int *budget)
2408{
 
 
2409	struct bnxt_qplib_qp *qp;
2410	struct bnxt_qplib_q *rq;
2411	struct bnxt_qplib_srq *srq;
2412	struct bnxt_qplib_cqe *cqe;
2413	u32 wr_id_idx;
2414	int rc = 0;
2415
2416	qp = (struct bnxt_qplib_qp *)((unsigned long)
2417				      le64_to_cpu(hwcqe->qp_handle));
2418	if (!qp) {
2419		dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
2420		return -EINVAL;
2421	}
2422	if (qp->rq.flushed) {
2423		dev_dbg(&cq->hwq.pdev->dev,
2424			"%s: QP in Flush QP = %p\n", __func__, qp);
2425		goto done;
2426	}
2427	cqe = *pcqe;
2428	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2429	cqe->length = (u32)le16_to_cpu(hwcqe->length);
2430	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
2431	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
2432	cqe->flags = le16_to_cpu(hwcqe->flags);
2433	cqe->status = hwcqe->status;
2434	cqe->qp_handle = (u64)(unsigned long)qp;
2435	/*FIXME: Endianness fix needed for smace */
2436	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
2437	wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
2438				& CQ_RES_UD_SRQ_OR_RQ_WR_ID_MASK;
2439	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
2440				  ((le32_to_cpu(
2441				  hwcqe->src_qp_high_srq_or_rq_wr_id) &
2442				 CQ_RES_UD_SRC_QP_HIGH_MASK) >> 8);
2443
2444	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
2445		srq = qp->srq;
2446		if (!srq)
2447			return -EINVAL;
2448
2449		if (wr_id_idx >= srq->hwq.max_elements) {
2450			dev_err(&cq->hwq.pdev->dev,
2451				"FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2452				wr_id_idx, srq->hwq.max_elements);
2453			return -EINVAL;
2454		}
2455		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2456		bnxt_qplib_release_srqe(srq, wr_id_idx);
2457		cqe++;
2458		(*budget)--;
2459		*pcqe = cqe;
2460	} else {
 
 
2461		rq = &qp->rq;
2462		if (wr_id_idx >= rq->hwq.max_elements) {
2463			dev_err(&cq->hwq.pdev->dev,
2464				"FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
2465				wr_id_idx, rq->hwq.max_elements);
2466			return -EINVAL;
2467		}
2468
2469		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
 
 
 
2470		cqe++;
2471		(*budget)--;
2472		rq->hwq.cons++;
 
 
2473		*pcqe = cqe;
2474
2475		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2476			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2477			/* Add qp to flush list of the CQ */
2478			bnxt_qplib_add_flush_qp(qp);
2479		}
2480	}
2481done:
2482	return rc;
2483}
2484
2485bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
2486{
2487	struct cq_base *hw_cqe, **hw_cqe_ptr;
2488	u32 sw_cons, raw_cons;
2489	bool rc = true;
2490
2491	raw_cons = cq->hwq.cons;
2492	sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2493	hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2494	hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2495
2496	 /* Check for Valid bit. If the CQE is valid, return false */
2497	rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements);
2498	return rc;
2499}
2500
2501static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
2502						struct cq_res_raweth_qp1 *hwcqe,
2503						struct bnxt_qplib_cqe **pcqe,
2504						int *budget)
2505{
2506	struct bnxt_qplib_qp *qp;
2507	struct bnxt_qplib_q *rq;
2508	struct bnxt_qplib_srq *srq;
2509	struct bnxt_qplib_cqe *cqe;
2510	u32 wr_id_idx;
2511	int rc = 0;
2512
2513	qp = (struct bnxt_qplib_qp *)((unsigned long)
2514				      le64_to_cpu(hwcqe->qp_handle));
2515	if (!qp) {
2516		dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
2517		return -EINVAL;
2518	}
2519	if (qp->rq.flushed) {
2520		dev_dbg(&cq->hwq.pdev->dev,
2521			"%s: QP in Flush QP = %p\n", __func__, qp);
2522		goto done;
2523	}
2524	cqe = *pcqe;
2525	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
2526	cqe->flags = le16_to_cpu(hwcqe->flags);
2527	cqe->qp_handle = (u64)(unsigned long)qp;
2528
2529	wr_id_idx =
2530		le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
2531				& CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
2532	cqe->src_qp = qp->id;
2533	if (qp->id == 1 && !cqe->length) {
2534		/* Add workaround for the length misdetection */
2535		cqe->length = 296;
2536	} else {
2537		cqe->length = le16_to_cpu(hwcqe->length);
2538	}
2539	cqe->pkey_index = qp->pkey_index;
2540	memcpy(cqe->smac, qp->smac, 6);
2541
2542	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
2543	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
2544	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
2545
2546	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
2547		srq = qp->srq;
2548		if (!srq) {
2549			dev_err(&cq->hwq.pdev->dev,
2550				"FP: SRQ used but not defined??\n");
2551			return -EINVAL;
2552		}
2553		if (wr_id_idx >= srq->hwq.max_elements) {
2554			dev_err(&cq->hwq.pdev->dev,
2555				"FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
2556				wr_id_idx, srq->hwq.max_elements);
2557			return -EINVAL;
2558		}
2559		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
2560		bnxt_qplib_release_srqe(srq, wr_id_idx);
2561		cqe++;
2562		(*budget)--;
2563		*pcqe = cqe;
2564	} else {
 
 
2565		rq = &qp->rq;
2566		if (wr_id_idx >= rq->hwq.max_elements) {
2567			dev_err(&cq->hwq.pdev->dev,
2568				"FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
2569				wr_id_idx, rq->hwq.max_elements);
2570			return -EINVAL;
2571		}
2572		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
 
 
 
2573		cqe++;
2574		(*budget)--;
2575		rq->hwq.cons++;
 
 
2576		*pcqe = cqe;
2577
2578		if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
2579			qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2580			/* Add qp to flush list of the CQ */
2581			bnxt_qplib_add_flush_qp(qp);
2582		}
2583	}
2584
2585done:
2586	return rc;
2587}
2588
2589static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
2590					  struct cq_terminal *hwcqe,
2591					  struct bnxt_qplib_cqe **pcqe,
2592					  int *budget)
2593{
2594	struct bnxt_qplib_qp *qp;
2595	struct bnxt_qplib_q *sq, *rq;
2596	struct bnxt_qplib_cqe *cqe;
2597	u32 sw_cons = 0, cqe_cons;
2598	int rc = 0;
2599
2600	/* Check the Status */
2601	if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
2602		dev_warn(&cq->hwq.pdev->dev,
2603			 "FP: CQ Process Terminal Error status = 0x%x\n",
2604			 hwcqe->status);
2605
2606	qp = (struct bnxt_qplib_qp *)((unsigned long)
2607				      le64_to_cpu(hwcqe->qp_handle));
2608	if (!qp) {
2609		dev_err(&cq->hwq.pdev->dev,
2610			"FP: CQ Process terminal qp is NULL\n");
2611		return -EINVAL;
2612	}
2613
2614	/* Must block new posting of SQ and RQ */
2615	qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
2616
2617	sq = &qp->sq;
2618	rq = &qp->rq;
2619
2620	cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
2621	if (cqe_cons == 0xFFFF)
2622		goto do_rq;
2623
2624	if (cqe_cons > sq->hwq.max_elements) {
2625		dev_err(&cq->hwq.pdev->dev,
2626			"FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
2627			cqe_cons, sq->hwq.max_elements);
2628		goto do_rq;
2629	}
2630
2631	if (qp->sq.flushed) {
2632		dev_dbg(&cq->hwq.pdev->dev,
2633			"%s: QP in Flush QP = %p\n", __func__, qp);
2634		goto sq_done;
2635	}
2636
2637	/* Terminal CQE can also include aggregated successful CQEs prior.
2638	 * So we must complete all CQEs from the current sq's cons to the
2639	 * cq_cons with status OK
2640	 */
2641	cqe = *pcqe;
2642	while (*budget) {
2643		sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
2644		if (sw_cons == cqe_cons)
2645			break;
2646		if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
2647			memset(cqe, 0, sizeof(*cqe));
2648			cqe->status = CQ_REQ_STATUS_OK;
2649			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
2650			cqe->qp_handle = (u64)(unsigned long)qp;
2651			cqe->src_qp = qp->id;
2652			cqe->wr_id = sq->swq[sw_cons].wr_id;
2653			cqe->type = sq->swq[sw_cons].type;
2654			cqe++;
2655			(*budget)--;
2656		}
2657		sq->hwq.cons++;
 
 
2658	}
2659	*pcqe = cqe;
2660	if (!(*budget) && sw_cons != cqe_cons) {
2661		/* Out of budget */
2662		rc = -EAGAIN;
2663		goto sq_done;
2664	}
2665sq_done:
2666	if (rc)
2667		return rc;
2668do_rq:
2669	cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
2670	if (cqe_cons == 0xFFFF) {
2671		goto done;
2672	} else if (cqe_cons > rq->hwq.max_elements) {
2673		dev_err(&cq->hwq.pdev->dev,
2674			"FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
2675			cqe_cons, rq->hwq.max_elements);
 
2676		goto done;
2677	}
2678
2679	if (qp->rq.flushed) {
2680		dev_dbg(&cq->hwq.pdev->dev,
2681			"%s: QP in Flush QP = %p\n", __func__, qp);
2682		rc = 0;
2683		goto done;
2684	}
2685
2686	/* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
2687	 * from the current rq->cons to the rq->prod regardless what the
2688	 * rq->cons the terminal CQE indicates
2689	 */
2690
2691	/* Add qp to flush list of the CQ */
2692	bnxt_qplib_add_flush_qp(qp);
2693done:
2694	return rc;
2695}
2696
2697static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
2698					struct cq_cutoff *hwcqe)
2699{
2700	/* Check the Status */
2701	if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
2702		dev_err(&cq->hwq.pdev->dev,
2703			"FP: CQ Process Cutoff Error status = 0x%x\n",
2704			hwcqe->status);
2705		return -EINVAL;
2706	}
2707	clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
2708	wake_up_interruptible(&cq->waitq);
2709
2710	return 0;
2711}
2712
2713int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
2714				  struct bnxt_qplib_cqe *cqe,
2715				  int num_cqes)
2716{
2717	struct bnxt_qplib_qp *qp = NULL;
2718	u32 budget = num_cqes;
2719	unsigned long flags;
2720
2721	spin_lock_irqsave(&cq->flush_lock, flags);
2722	list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
2723		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
2724		__flush_sq(&qp->sq, qp, &cqe, &budget);
2725	}
2726
2727	list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
2728		dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
2729		__flush_rq(&qp->rq, qp, &cqe, &budget);
2730	}
2731	spin_unlock_irqrestore(&cq->flush_lock, flags);
2732
2733	return num_cqes - budget;
2734}
2735
2736int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
2737		       int num_cqes, struct bnxt_qplib_qp **lib_qp)
2738{
2739	struct cq_base *hw_cqe, **hw_cqe_ptr;
2740	u32 sw_cons, raw_cons;
2741	int budget, rc = 0;
 
 
2742
2743	raw_cons = cq->hwq.cons;
2744	budget = num_cqes;
2745
2746	while (budget) {
2747		sw_cons = HWQ_CMP(raw_cons, &cq->hwq);
2748		hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
2749		hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)];
2750
2751		/* Check for Valid bit */
2752		if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
2753			break;
2754
2755		/*
2756		 * The valid test of the entry must be done first before
2757		 * reading any further.
2758		 */
2759		dma_rmb();
2760		/* From the device's respective CQE format to qplib_wc*/
2761		switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
 
2762		case CQ_BASE_CQE_TYPE_REQ:
2763			rc = bnxt_qplib_cq_process_req(cq,
2764						       (struct cq_req *)hw_cqe,
2765						       &cqe, &budget,
2766						       sw_cons, lib_qp);
2767			break;
2768		case CQ_BASE_CQE_TYPE_RES_RC:
2769			rc = bnxt_qplib_cq_process_res_rc(cq,
2770							  (struct cq_res_rc *)
2771							  hw_cqe, &cqe,
2772							  &budget);
2773			break;
2774		case CQ_BASE_CQE_TYPE_RES_UD:
2775			rc = bnxt_qplib_cq_process_res_ud
2776					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
2777					 &budget);
2778			break;
2779		case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
2780			rc = bnxt_qplib_cq_process_res_raweth_qp1
2781					(cq, (struct cq_res_raweth_qp1 *)
2782					 hw_cqe, &cqe, &budget);
2783			break;
2784		case CQ_BASE_CQE_TYPE_TERMINAL:
2785			rc = bnxt_qplib_cq_process_terminal
2786					(cq, (struct cq_terminal *)hw_cqe,
2787					 &cqe, &budget);
2788			break;
2789		case CQ_BASE_CQE_TYPE_CUT_OFF:
2790			bnxt_qplib_cq_process_cutoff
2791					(cq, (struct cq_cutoff *)hw_cqe);
2792			/* Done processing this CQ */
2793			goto exit;
2794		default:
2795			dev_err(&cq->hwq.pdev->dev,
2796				"process_cq unknown type 0x%lx\n",
2797				hw_cqe->cqe_type_toggle &
2798				CQ_BASE_CQE_TYPE_MASK);
2799			rc = -EINVAL;
2800			break;
2801		}
2802		if (rc < 0) {
2803			if (rc == -EAGAIN)
2804				break;
2805			/* Error while processing the CQE, just skip to the
2806			 * next one
2807			 */
2808			dev_err(&cq->hwq.pdev->dev,
2809				"process_cqe error rc = 0x%x\n", rc);
 
2810		}
2811		raw_cons++;
2812	}
2813	if (cq->hwq.cons != raw_cons) {
2814		cq->hwq.cons = raw_cons;
2815		bnxt_qplib_arm_cq(cq, DBC_DBC_TYPE_CQ);
2816	}
 
 
2817exit:
2818	return num_cqes - budget;
2819}
2820
2821void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
2822{
 
2823	if (arm_type)
2824		bnxt_qplib_arm_cq(cq, arm_type);
2825	/* Using cq->arm_state variable to track whether to issue cq handler */
2826	atomic_set(&cq->arm_state, 1);
2827}
2828
2829void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
2830{
2831	flush_workqueue(qp->scq->nq->cqn_wq);
2832	if (qp->scq != qp->rcq)
2833		flush_workqueue(qp->rcq->nq->cqn_wq);
2834}