Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Broadcom NetXtreme-E RoCE driver.
   3 *
   4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
   5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * BSD license below:
  12 *
  13 * Redistribution and use in source and binary forms, with or without
  14 * modification, are permitted provided that the following conditions
  15 * are met:
  16 *
  17 * 1. Redistributions of source code must retain the above copyright
  18 *    notice, this list of conditions and the following disclaimer.
  19 * 2. Redistributions in binary form must reproduce the above copyright
  20 *    notice, this list of conditions and the following disclaimer in
  21 *    the documentation and/or other materials provided with the
  22 *    distribution.
  23 *
  24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
  25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
  28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
  33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35 *
  36 * Description: RDMA Controller HW interface
  37 */
  38
  39#define dev_fmt(fmt) "QPLIB: " fmt
  40
  41#include <linux/interrupt.h>
  42#include <linux/spinlock.h>
  43#include <linux/pci.h>
  44#include <linux/prefetch.h>
  45#include <linux/delay.h>
  46
  47#include "roce_hsi.h"
  48#include "qplib_res.h"
  49#include "qplib_rcfw.h"
  50#include "qplib_sp.h"
  51#include "qplib_fp.h"
  52#include "qplib_tlv.h"
  53
  54static void bnxt_qplib_service_creq(struct tasklet_struct *t);
  55
  56/**
  57 * bnxt_qplib_map_rc  -  map return type based on opcode
  58 * @opcode:  roce slow path opcode
  59 *
  60 * case #1
  61 * Firmware initiated error recovery is a safe state machine and
  62 * driver can consider all the underlying rdma resources are free.
  63 * In this state, it is safe to return success for opcodes related to
  64 * destroying rdma resources (like destroy qp, destroy cq etc.).
  65 *
  66 * case #2
  67 * If driver detect potential firmware stall, it is not safe state machine
  68 * and the driver can not consider all the underlying rdma resources are
  69 * freed.
  70 * In this state, it is not safe to return success for opcodes related to
  71 * destroying rdma resources (like destroy qp, destroy cq etc.).
  72 *
  73 * Scope of this helper function is only for case #1.
  74 *
  75 * Returns:
  76 * 0 to communicate success to caller.
  77 * Non zero error code to communicate failure to caller.
  78 */
  79static int bnxt_qplib_map_rc(u8 opcode)
  80{
  81	switch (opcode) {
  82	case CMDQ_BASE_OPCODE_DESTROY_QP:
  83	case CMDQ_BASE_OPCODE_DESTROY_SRQ:
  84	case CMDQ_BASE_OPCODE_DESTROY_CQ:
  85	case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
  86	case CMDQ_BASE_OPCODE_DEREGISTER_MR:
  87	case CMDQ_BASE_OPCODE_DELETE_GID:
  88	case CMDQ_BASE_OPCODE_DESTROY_QP1:
  89	case CMDQ_BASE_OPCODE_DESTROY_AH:
  90	case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
  91	case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
  92	case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
  93		return 0;
  94	default:
  95		return -ETIMEDOUT;
  96	}
  97}
  98
  99/**
 100 * bnxt_re_is_fw_stalled   -	Check firmware health
 101 * @rcfw:     rcfw channel instance of rdev
 102 * @cookie:   cookie to track the command
 103 *
 104 * If firmware has not responded any rcfw command within
 105 * rcfw->max_timeout, consider firmware as stalled.
 106 *
 107 * Returns:
 108 * 0 if firmware is responding
 109 * -ENODEV if firmware is not responding
 110 */
 111static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
 112				 u16 cookie)
 113{
 114	struct bnxt_qplib_cmdq_ctx *cmdq;
 115	struct bnxt_qplib_crsqe *crsqe;
 116
 117	crsqe = &rcfw->crsqe_tbl[cookie];
 118	cmdq = &rcfw->cmdq;
 119
 120	if (time_after(jiffies, cmdq->last_seen +
 121		      (rcfw->max_timeout * HZ))) {
 122		dev_warn_ratelimited(&rcfw->pdev->dev,
 123				     "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ",
 124				     __func__, cookie, crsqe->opcode,
 125				     jiffies_to_msecs(jiffies - cmdq->last_seen),
 126				     rcfw->max_timeout * 1000,
 127				     crsqe->is_in_used);
 128		return -ENODEV;
 129	}
 130
 131	return 0;
 132}
 133
 134/**
 135 * __wait_for_resp   -	Don't hold the cpu context and wait for response
 136 * @rcfw:    rcfw channel instance of rdev
 137 * @cookie:  cookie to track the command
 138 *
 139 * Wait for command completion in sleepable context.
 140 *
 141 * Returns:
 142 * 0 if command is completed by firmware.
 143 * Non zero error code for rest of the case.
 144 */
 145static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 146{
 147	struct bnxt_qplib_cmdq_ctx *cmdq;
 148	struct bnxt_qplib_crsqe *crsqe;
 149	int ret;
 150
 151	cmdq = &rcfw->cmdq;
 152	crsqe = &rcfw->crsqe_tbl[cookie];
 153
 154	do {
 155		if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
 156			return bnxt_qplib_map_rc(crsqe->opcode);
 157		if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
 158			return -ETIMEDOUT;
 159
 160		wait_event_timeout(cmdq->waitq,
 161				   !crsqe->is_in_used ||
 162				   test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
 163				   msecs_to_jiffies(rcfw->max_timeout * 1000));
 164
 165		if (!crsqe->is_in_used)
 166			return 0;
 167
 168		bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
 169
 170		if (!crsqe->is_in_used)
 171			return 0;
 172
 173		ret = bnxt_re_is_fw_stalled(rcfw, cookie);
 174		if (ret)
 175			return ret;
 176
 177	} while (true);
 178};
 179
 180/**
 181 * __block_for_resp   -	hold the cpu context and wait for response
 182 * @rcfw:    rcfw channel instance of rdev
 183 * @cookie:  cookie to track the command
 184 *
 185 * This function will hold the cpu (non-sleepable context) and
 186 * wait for command completion. Maximum holding interval is 8 second.
 187 *
 188 * Returns:
 189 * -ETIMEOUT if command is not completed in specific time interval.
 190 * 0 if command is completed by firmware.
 191 */
 192static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 193{
 194	struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
 195	struct bnxt_qplib_crsqe *crsqe;
 196	unsigned long issue_time = 0;
 197
 198	issue_time = jiffies;
 199	crsqe = &rcfw->crsqe_tbl[cookie];
 200
 201	do {
 202		if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
 203			return bnxt_qplib_map_rc(crsqe->opcode);
 204		if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
 205			return -ETIMEDOUT;
 206
 207		udelay(1);
 208
 209		bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
 210		if (!crsqe->is_in_used)
 211			return 0;
 212
 213	} while (time_before(jiffies, issue_time + (8 * HZ)));
 214
 215	return -ETIMEDOUT;
 216};
 217
 218/*  __send_message_no_waiter -	get cookie and post the message.
 219 * @rcfw:   rcfw channel instance of rdev
 220 * @msg:    qplib message internal
 221 *
 222 * This function will just post and don't bother about completion.
 223 * Current design of this function is -
 224 * user must hold the completion queue hwq->lock.
 225 * user must have used existing completion and free the resources.
 226 * this function will not check queue full condition.
 227 * this function will explicitly set is_waiter_alive=false.
 228 * current use case is - send destroy_ah if create_ah is return
 229 * after waiter of create_ah is lost. It can be extended for other
 230 * use case as well.
 231 *
 232 * Returns: Nothing
 233 *
 234 */
 235static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,
 236				     struct bnxt_qplib_cmdqmsg *msg)
 237{
 238	struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
 239	struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
 240	struct bnxt_qplib_crsqe *crsqe;
 241	struct bnxt_qplib_cmdqe *cmdqe;
 242	u32 sw_prod, cmdq_prod;
 243	u16 cookie;
 244	u32 bsize;
 245	u8 *preq;
 246
 247	cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
 248	__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
 249	crsqe = &rcfw->crsqe_tbl[cookie];
 250
 251	/* Set cmd_size in terms of 16B slots in req. */
 252	bsize = bnxt_qplib_set_cmd_slots(msg->req);
 253	/* GET_CMD_SIZE would return number of slots in either case of tlv
 254	 * and non-tlv commands after call to bnxt_qplib_set_cmd_slots()
 255	 */
 256	crsqe->is_internal_cmd = true;
 257	crsqe->is_waiter_alive = false;
 258	crsqe->is_in_used = true;
 259	crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
 260
 261	preq = (u8 *)msg->req;
 262	do {
 263		/* Locate the next cmdq slot */
 264		sw_prod = HWQ_CMP(hwq->prod, hwq);
 265		cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
 266		/* Copy a segment of the req cmd to the cmdq */
 267		memset(cmdqe, 0, sizeof(*cmdqe));
 268		memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
 269		preq += min_t(u32, bsize, sizeof(*cmdqe));
 270		bsize -= min_t(u32, bsize, sizeof(*cmdqe));
 271		hwq->prod++;
 272	} while (bsize > 0);
 273	cmdq->seq_num++;
 274
 275	cmdq_prod = hwq->prod;
 276	atomic_inc(&rcfw->timeout_send);
 277	/* ring CMDQ DB */
 278	wmb();
 279	writel(cmdq_prod, cmdq->cmdq_mbox.prod);
 280	writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
 281}
 282
 283static int __send_message(struct bnxt_qplib_rcfw *rcfw,
 284			  struct bnxt_qplib_cmdqmsg *msg, u8 opcode)
 285{
 286	u32 bsize, free_slots, required_slots;
 287	struct bnxt_qplib_cmdq_ctx *cmdq;
 288	struct bnxt_qplib_crsqe *crsqe;
 289	struct bnxt_qplib_cmdqe *cmdqe;
 290	struct bnxt_qplib_hwq *hwq;
 291	u32 sw_prod, cmdq_prod;
 292	struct pci_dev *pdev;
 293	unsigned long flags;
 294	u16 cookie;
 295	u8 *preq;
 296
 297	cmdq = &rcfw->cmdq;
 298	hwq = &cmdq->hwq;
 299	pdev = rcfw->pdev;
 300
 301	/* Cmdq are in 16-byte units, each request can consume 1 or more
 302	 * cmdqe
 303	 */
 304	spin_lock_irqsave(&hwq->lock, flags);
 305	required_slots = bnxt_qplib_get_cmd_slots(msg->req);
 306	free_slots = HWQ_FREE_SLOTS(hwq);
 307	cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
 308	crsqe = &rcfw->crsqe_tbl[cookie];
 309
 310	if (required_slots >= free_slots) {
 311		dev_info_ratelimited(&pdev->dev,
 312				     "CMDQ is full req/free %d/%d!",
 313				     required_slots, free_slots);
 314		spin_unlock_irqrestore(&hwq->lock, flags);
 315		return -EAGAIN;
 316	}
 317	if (msg->block)
 318		cookie |= RCFW_CMD_IS_BLOCKING;
 319	__set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
 320
 321	bsize = bnxt_qplib_set_cmd_slots(msg->req);
 322	crsqe->free_slots = free_slots;
 323	crsqe->resp = (struct creq_qp_event *)msg->resp;
 324	crsqe->resp->cookie = cpu_to_le16(cookie);
 325	crsqe->is_internal_cmd = false;
 326	crsqe->is_waiter_alive = true;
 327	crsqe->is_in_used = true;
 328	crsqe->opcode = opcode;
 329
 330	crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
 331	if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
 332		struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;
 333
 334		__set_cmdq_base_resp_addr(msg->req, msg->req_sz,
 335					  cpu_to_le64(sbuf->dma_addr));
 336		__set_cmdq_base_resp_size(msg->req, msg->req_sz,
 337					  ALIGN(sbuf->size,
 338						BNXT_QPLIB_CMDQE_UNITS) /
 339						BNXT_QPLIB_CMDQE_UNITS);
 340	}
 341
 342	preq = (u8 *)msg->req;
 343	do {
 344		/* Locate the next cmdq slot */
 345		sw_prod = HWQ_CMP(hwq->prod, hwq);
 346		cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
 347		/* Copy a segment of the req cmd to the cmdq */
 348		memset(cmdqe, 0, sizeof(*cmdqe));
 349		memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
 350		preq += min_t(u32, bsize, sizeof(*cmdqe));
 351		bsize -= min_t(u32, bsize, sizeof(*cmdqe));
 352		hwq->prod++;
 353	} while (bsize > 0);
 354	cmdq->seq_num++;
 355
 356	cmdq_prod = hwq->prod & 0xFFFF;
 357	if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
 358		/* The very first doorbell write
 359		 * is required to set this flag
 360		 * which prompts the FW to reset
 361		 * its internal pointers
 362		 */
 363		cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
 364		clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
 365	}
 366	/* ring CMDQ DB */
 367	wmb();
 368	writel(cmdq_prod, cmdq->cmdq_mbox.prod);
 369	writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
 370	spin_unlock_irqrestore(&hwq->lock, flags);
 371	/* Return the CREQ response pointer */
 372	return 0;
 373}
 374
 375/**
 376 * __poll_for_resp   -	self poll completion for rcfw command
 377 * @rcfw:     rcfw channel instance of rdev
 378 * @cookie:   cookie to track the command
 379 *
 380 * It works same as __wait_for_resp except this function will
 381 * do self polling in sort interval since interrupt is disabled.
 382 * This function can not be called from non-sleepable context.
 383 *
 384 * Returns:
 385 * -ETIMEOUT if command is not completed in specific time interval.
 386 * 0 if command is completed by firmware.
 387 */
 388static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
 389{
 390	struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
 391	struct bnxt_qplib_crsqe *crsqe;
 392	unsigned long issue_time;
 393	int ret;
 394
 395	issue_time = jiffies;
 396	crsqe = &rcfw->crsqe_tbl[cookie];
 397
 398	do {
 399		if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
 400			return bnxt_qplib_map_rc(crsqe->opcode);
 401		if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
 402			return -ETIMEDOUT;
 403
 404		usleep_range(1000, 1001);
 405
 406		bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
 407		if (!crsqe->is_in_used)
 408			return 0;
 409		if (jiffies_to_msecs(jiffies - issue_time) >
 410		    (rcfw->max_timeout * 1000)) {
 411			ret = bnxt_re_is_fw_stalled(rcfw, cookie);
 412			if (ret)
 413				return ret;
 414		}
 415	} while (true);
 416};
 417
 418static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
 419				       struct bnxt_qplib_cmdqmsg *msg,
 420				       u8 opcode)
 421{
 422	struct bnxt_qplib_cmdq_ctx *cmdq;
 423
 424	cmdq = &rcfw->cmdq;
 425
 426	/* Prevent posting if f/w is not in a state to process */
 427	if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
 428		return bnxt_qplib_map_rc(opcode);
 429	if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
 430		return -ETIMEDOUT;
 431
 432	if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
 433	    opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
 434		dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
 435		return -EINVAL;
 436	}
 437
 438	if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
 439	    (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
 440	     opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
 441	     opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
 442		dev_err(&rcfw->pdev->dev,
 443			"QPLIB: RCFW not initialized, reject opcode 0x%x",
 444			opcode);
 445		return -EOPNOTSUPP;
 446	}
 447
 448	return 0;
 449}
 450
 451/* This function will just post and do not bother about completion */
 452static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
 453				  struct creq_create_ah_resp *create_ah_resp)
 454{
 455	struct bnxt_qplib_cmdqmsg msg = {};
 456	struct cmdq_destroy_ah req = {};
 457
 458	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 459				 CMDQ_BASE_OPCODE_DESTROY_AH,
 460				 sizeof(req));
 461	req.ah_cid = create_ah_resp->xid;
 462	msg.req = (struct cmdq_base *)&req;
 463	msg.req_sz = sizeof(req);
 464	__send_message_no_waiter(rcfw, &msg);
 465	dev_info_ratelimited(&rcfw->pdev->dev,
 466			     "From %s: ah_cid = %d timeout_send %d\n",
 467			     __func__, req.ah_cid,
 468			     atomic_read(&rcfw->timeout_send));
 469}
 470
 471/**
 472 * __bnxt_qplib_rcfw_send_message   -	qplib interface to send
 473 * and complete rcfw command.
 474 * @rcfw:   rcfw channel instance of rdev
 475 * @msg:    qplib message internal
 476 *
 477 * This function does not account shadow queue depth. It will send
 478 * all the command unconditionally as long as send queue is not full.
 479 *
 480 * Returns:
 481 * 0 if command completed by firmware.
 482 * Non zero if the command is not completed by firmware.
 483 */
 484static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
 485					  struct bnxt_qplib_cmdqmsg *msg)
 486{
 487	struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
 488	struct bnxt_qplib_crsqe *crsqe;
 489	unsigned long flags;
 490	u16 cookie;
 491	int rc;
 492	u8 opcode;
 493
 494	opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
 495
 496	rc = __send_message_basic_sanity(rcfw, msg, opcode);
 497	if (rc)
 498		return rc;
 499
 500	rc = __send_message(rcfw, msg, opcode);
 501	if (rc)
 502		return rc;
 503
 504	cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
 505				& RCFW_MAX_COOKIE_VALUE;
 506
 507	if (msg->block)
 508		rc = __block_for_resp(rcfw, cookie);
 509	else if (atomic_read(&rcfw->rcfw_intr_enabled))
 510		rc = __wait_for_resp(rcfw, cookie);
 511	else
 512		rc = __poll_for_resp(rcfw, cookie);
 513
 514	if (rc) {
 515		spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
 516		crsqe = &rcfw->crsqe_tbl[cookie];
 517		crsqe->is_waiter_alive = false;
 518		if (rc == -ENODEV)
 519			set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
 520		spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
 521		return -ETIMEDOUT;
 522	}
 523
 524	if (evnt->status) {
 525		/* failed with status */
 526		dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
 527			cookie, opcode, evnt->status);
 528		rc = -EFAULT;
 529	}
 530
 531	return rc;
 532}
 533
 534/**
 535 * bnxt_qplib_rcfw_send_message   -	qplib interface to send
 536 * and complete rcfw command.
 537 * @rcfw:   rcfw channel instance of rdev
 538 * @msg:    qplib message internal
 539 *
 540 * Driver interact with Firmware through rcfw channel/slow path in two ways.
 541 * a. Blocking rcfw command send. In this path, driver cannot hold
 542 * the context for longer period since it is holding cpu until
 543 * command is not completed.
 544 * b. Non-blocking rcfw command send. In this path, driver can hold the
 545 * context for longer period. There may be many pending command waiting
 546 * for completion because of non-blocking nature.
 547 *
 548 * Driver will use shadow queue depth. Current queue depth of 8K
 549 * (due to size of rcfw message there can be actual ~4K rcfw outstanding)
 550 * is not optimal for rcfw command processing in firmware.
 551 *
 552 * Restrict at max #RCFW_CMD_NON_BLOCKING_SHADOW_QD Non-Blocking rcfw commands.
 553 * Allow all blocking commands until there is no queue full.
 554 *
 555 * Returns:
 556 * 0 if command completed by firmware.
 557 * Non zero if the command is not completed by firmware.
 558 */
 559int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
 560				 struct bnxt_qplib_cmdqmsg *msg)
 561{
 562	int ret;
 563
 564	if (!msg->block) {
 565		down(&rcfw->rcfw_inflight);
 566		ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
 567		up(&rcfw->rcfw_inflight);
 568	} else {
 569		ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
 570	}
 571
 572	return ret;
 573}
 574
 575/* Completions */
 576static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
 577					 struct creq_func_event *func_event)
 578{
 579	int rc;
 580
 581	switch (func_event->event) {
 582	case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
 583		break;
 584	case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
 585		break;
 586	case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
 587		break;
 588	case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
 589		break;
 590	case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
 591		break;
 592	case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
 593		break;
 594	case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
 595		break;
 596	case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
 597		/* SRQ ctx error, call srq_handler??
 598		 * But there's no SRQ handle!
 599		 */
 600		break;
 601	case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
 602		break;
 603	case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
 604		break;
 605	case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
 606		break;
 607	case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
 608		break;
 609	case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
 610		break;
 611	default:
 612		return -EINVAL;
 613	}
 614
 615	rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
 616	return rc;
 617}
 618
 619static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
 620				       struct creq_qp_event *qp_event,
 621				       u32 *num_wait)
 622{
 623	struct creq_qp_error_notification *err_event;
 624	struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
 625	struct bnxt_qplib_crsqe *crsqe;
 626	u32 qp_id, tbl_indx, req_size;
 627	struct bnxt_qplib_qp *qp;
 628	u16 cookie, blocked = 0;
 629	bool is_waiter_alive;
 630	struct pci_dev *pdev;
 631	unsigned long flags;
 632	u32 wait_cmds = 0;
 633	int rc = 0;
 634
 635	pdev = rcfw->pdev;
 636	switch (qp_event->event) {
 637	case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
 638		err_event = (struct creq_qp_error_notification *)qp_event;
 639		qp_id = le32_to_cpu(err_event->xid);
 640		tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
 641		qp = rcfw->qp_tbl[tbl_indx].qp_handle;
 642		dev_dbg(&pdev->dev, "Received QP error notification\n");
 643		dev_dbg(&pdev->dev,
 644			"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
 645			qp_id, err_event->req_err_state_reason,
 646			err_event->res_err_state_reason);
 647		if (!qp)
 648			break;
 649		bnxt_qplib_mark_qp_error(qp);
 650		rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
 651		break;
 652	default:
 653		/*
 654		 * Command Response
 655		 * cmdq->lock needs to be acquired to synchronie
 656		 * the command send and completion reaping. This function
 657		 * is always called with creq->lock held. Using
 658		 * the nested variant of spin_lock.
 659		 *
 660		 */
 661
 662		spin_lock_irqsave_nested(&hwq->lock, flags,
 663					 SINGLE_DEPTH_NESTING);
 664		cookie = le16_to_cpu(qp_event->cookie);
 665		blocked = cookie & RCFW_CMD_IS_BLOCKING;
 666		cookie &= RCFW_MAX_COOKIE_VALUE;
 667		crsqe = &rcfw->crsqe_tbl[cookie];
 668
 669		if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
 670				       &rcfw->cmdq.flags),
 671		    "QPLIB: Unreponsive rcfw channel detected.!!")) {
 672			dev_info(&pdev->dev,
 673				 "rcfw timedout: cookie = %#x, free_slots = %d",
 674				 cookie, crsqe->free_slots);
 675			spin_unlock_irqrestore(&hwq->lock, flags);
 676			return rc;
 677		}
 678
 679		if (crsqe->is_internal_cmd && !qp_event->status)
 680			atomic_dec(&rcfw->timeout_send);
 681
 682		if (crsqe->is_waiter_alive) {
 683			if (crsqe->resp) {
 684				memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
 685				/* Insert write memory barrier to ensure that
 686				 * response data is copied before clearing the
 687				 * flags
 688				 */
 689				smp_wmb();
 690			}
 691			if (!blocked)
 692				wait_cmds++;
 693		}
 694
 695		req_size = crsqe->req_size;
 696		is_waiter_alive = crsqe->is_waiter_alive;
 697
 698		crsqe->req_size = 0;
 699		if (!is_waiter_alive)
 700			crsqe->resp = NULL;
 701
 702		crsqe->is_in_used = false;
 703
 704		hwq->cons += req_size;
 705
 706		/* This is a case to handle below scenario -
 707		 * Create AH is completed successfully by firmware,
 708		 * but completion took more time and driver already lost
 709		 * the context of create_ah from caller.
 710		 * We have already return failure for create_ah verbs,
 711		 * so let's destroy the same address vector since it is
 712		 * no more used in stack. We don't care about completion
 713		 * in __send_message_no_waiter.
 714		 * If destroy_ah is failued by firmware, there will be AH
 715		 * resource leak and relatively not critical +  unlikely
 716		 * scenario. Current design is not to handle such case.
 717		 */
 718		if (!is_waiter_alive && !qp_event->status &&
 719		    qp_event->event == CREQ_QP_EVENT_EVENT_CREATE_AH)
 720			__destroy_timedout_ah(rcfw,
 721					      (struct creq_create_ah_resp *)
 722					      qp_event);
 723		spin_unlock_irqrestore(&hwq->lock, flags);
 724	}
 725	*num_wait += wait_cmds;
 726	return rc;
 727}
 728
 729/* SP - CREQ Completion handlers */
 730static void bnxt_qplib_service_creq(struct tasklet_struct *t)
 731{
 732	struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
 733	struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
 734	u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
 735	struct bnxt_qplib_hwq *hwq = &creq->hwq;
 736	struct creq_base *creqe;
 737	unsigned long flags;
 738	u32 num_wakeup = 0;
 739	u32 hw_polled = 0;
 740
 741	/* Service the CREQ until budget is over */
 742	spin_lock_irqsave(&hwq->lock, flags);
 743	while (budget > 0) {
 744		creqe = bnxt_qplib_get_qe(hwq, hwq->cons, NULL);
 745		if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
 746			break;
 747		/* The valid test of the entry must be done first before
 748		 * reading any further.
 749		 */
 750		dma_rmb();
 751		rcfw->cmdq.last_seen = jiffies;
 752
 753		type = creqe->type & CREQ_BASE_TYPE_MASK;
 754		switch (type) {
 755		case CREQ_BASE_TYPE_QP_EVENT:
 756			bnxt_qplib_process_qp_event
 757				(rcfw, (struct creq_qp_event *)creqe,
 758				 &num_wakeup);
 759			creq->stats.creq_qp_event_processed++;
 760			break;
 761		case CREQ_BASE_TYPE_FUNC_EVENT:
 762			if (!bnxt_qplib_process_func_event
 763			    (rcfw, (struct creq_func_event *)creqe))
 764				creq->stats.creq_func_event_processed++;
 765			else
 766				dev_warn(&rcfw->pdev->dev,
 767					 "aeqe:%#x Not handled\n", type);
 768			break;
 769		default:
 770			if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
 771				dev_warn(&rcfw->pdev->dev,
 772					 "creqe with event 0x%x not handled\n",
 773					 type);
 774			break;
 775		}
 776		budget--;
 777		hw_polled++;
 778		bnxt_qplib_hwq_incr_cons(hwq->max_elements, &hwq->cons,
 779					 1, &creq->creq_db.dbinfo.flags);
 780	}
 781
 782	if (hw_polled)
 783		bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
 784				      rcfw->res->cctx, true);
 785	spin_unlock_irqrestore(&hwq->lock, flags);
 786	if (num_wakeup)
 787		wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
 788}
 789
 790static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
 791{
 792	struct bnxt_qplib_rcfw *rcfw = dev_instance;
 793	struct bnxt_qplib_creq_ctx *creq;
 794	struct bnxt_qplib_hwq *hwq;
 795	u32 sw_cons;
 796
 797	creq = &rcfw->creq;
 798	hwq = &creq->hwq;
 799	/* Prefetch the CREQ element */
 800	sw_cons = HWQ_CMP(hwq->cons, hwq);
 801	prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
 802
 803	tasklet_schedule(&creq->creq_tasklet);
 804
 805	return IRQ_HANDLED;
 806}
 807
 808/* RCFW */
 809int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
 810{
 811	struct creq_deinitialize_fw_resp resp = {};
 812	struct cmdq_deinitialize_fw req = {};
 813	struct bnxt_qplib_cmdqmsg msg = {};
 814	int rc;
 815
 816	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 817				 CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
 818				 sizeof(req));
 819	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
 820				sizeof(req), sizeof(resp), 0);
 821	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 822	if (rc)
 823		return rc;
 824
 825	clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
 826	return 0;
 827}
 828
 829int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
 830			 struct bnxt_qplib_ctx *ctx, int is_virtfn)
 831{
 832	struct creq_initialize_fw_resp resp = {};
 833	struct cmdq_initialize_fw req = {};
 834	struct bnxt_qplib_cmdqmsg msg = {};
 835	u8 pgsz, lvl;
 836	int rc;
 837
 838	bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
 839				 CMDQ_BASE_OPCODE_INITIALIZE_FW,
 840				 sizeof(req));
 841	/* Supply (log-base-2-of-host-page-size - base-page-shift)
 842	 * to bono to adjust the doorbell page sizes.
 843	 */
 844	req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
 845					   RCFW_DBR_BASE_PAGE_SHIFT);
 846	/*
 847	 * Gen P5 devices doesn't require this allocation
 848	 * as the L2 driver does the same for RoCE also.
 849	 * Also, VFs need not setup the HW context area, PF
 850	 * shall setup this area for VF. Skipping the
 851	 * HW programming
 852	 */
 853	if (is_virtfn)
 854		goto skip_ctx_setup;
 855	if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
 856		goto config_vf_res;
 857
 858	lvl = ctx->qpc_tbl.level;
 859	pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
 860	req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 861				   lvl;
 862	lvl = ctx->mrw_tbl.level;
 863	pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
 864	req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 865				   lvl;
 866	lvl = ctx->srqc_tbl.level;
 867	pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
 868	req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 869				   lvl;
 870	lvl = ctx->cq_tbl.level;
 871	pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
 872	req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 873				 lvl;
 874	lvl = ctx->tim_tbl.level;
 875	pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
 876	req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 877				   lvl;
 878	lvl = ctx->tqm_ctx.pde.level;
 879	pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
 880	req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
 881				   lvl;
 882	req.qpc_page_dir =
 883		cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
 884	req.mrw_page_dir =
 885		cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
 886	req.srq_page_dir =
 887		cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
 888	req.cq_page_dir =
 889		cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
 890	req.tim_page_dir =
 891		cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
 892	req.tqm_page_dir =
 893		cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
 894
 895	req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
 896	req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
 897	req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
 898	req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
 899
 900config_vf_res:
 901	req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
 902	req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
 903	req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
 904	req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
 905	req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
 906
 907skip_ctx_setup:
 908	if (BNXT_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
 909		req.flags |= cpu_to_le16(CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED);
 910	req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
 911	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
 912	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 913	if (rc)
 914		return rc;
 915	set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
 916	return 0;
 917}
 918
 919void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
 920{
 921	kfree(rcfw->qp_tbl);
 922	kfree(rcfw->crsqe_tbl);
 923	bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
 924	bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
 925	rcfw->pdev = NULL;
 926}
 927
 928int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
 929				  struct bnxt_qplib_rcfw *rcfw,
 930				  struct bnxt_qplib_ctx *ctx,
 931				  int qp_tbl_sz)
 932{
 933	struct bnxt_qplib_hwq_attr hwq_attr = {};
 934	struct bnxt_qplib_sg_info sginfo = {};
 935	struct bnxt_qplib_cmdq_ctx *cmdq;
 936	struct bnxt_qplib_creq_ctx *creq;
 937
 938	rcfw->pdev = res->pdev;
 939	cmdq = &rcfw->cmdq;
 940	creq = &rcfw->creq;
 941	rcfw->res = res;
 942
 943	sginfo.pgsize = PAGE_SIZE;
 944	sginfo.pgshft = PAGE_SHIFT;
 945
 946	hwq_attr.sginfo = &sginfo;
 947	hwq_attr.res = rcfw->res;
 948	hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
 949	hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
 950	hwq_attr.type = bnxt_qplib_get_hwq_type(res);
 951
 952	if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
 953		dev_err(&rcfw->pdev->dev,
 954			"HW channel CREQ allocation failed\n");
 955		goto fail;
 956	}
 957
 958	rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT;
 959
 960	sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
 961	hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
 962	hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
 963	hwq_attr.type = HWQ_TYPE_CTX;
 964	if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
 965		dev_err(&rcfw->pdev->dev,
 966			"HW channel CMDQ allocation failed\n");
 967		goto fail;
 968	}
 969
 970	rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
 971				  sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
 972	if (!rcfw->crsqe_tbl)
 973		goto fail;
 974
 975	/* Allocate one extra to hold the QP1 entries */
 976	rcfw->qp_tbl_size = qp_tbl_sz + 1;
 977	rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
 978			       GFP_KERNEL);
 979	if (!rcfw->qp_tbl)
 980		goto fail;
 981
 982	rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
 983
 984	return 0;
 985
 986fail:
 987	bnxt_qplib_free_rcfw_channel(rcfw);
 988	return -ENOMEM;
 989}
 990
 991void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
 992{
 993	struct bnxt_qplib_creq_ctx *creq;
 994
 995	creq = &rcfw->creq;
 996
 997	if (!creq->requested)
 998		return;
 999
1000	creq->requested = false;
1001	/* Mask h/w interrupts */
1002	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
1003	/* Sync with last running IRQ-handler */
1004	synchronize_irq(creq->msix_vec);
1005	free_irq(creq->msix_vec, rcfw);
1006	kfree(creq->irq_name);
1007	creq->irq_name = NULL;
1008	atomic_set(&rcfw->rcfw_intr_enabled, 0);
1009	if (kill)
1010		tasklet_kill(&creq->creq_tasklet);
1011	tasklet_disable(&creq->creq_tasklet);
1012}
1013
1014void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
1015{
1016	struct bnxt_qplib_creq_ctx *creq;
1017	struct bnxt_qplib_cmdq_ctx *cmdq;
1018
1019	creq = &rcfw->creq;
1020	cmdq = &rcfw->cmdq;
1021	/* Make sure the HW channel is stopped! */
1022	bnxt_qplib_rcfw_stop_irq(rcfw, true);
1023
1024	iounmap(cmdq->cmdq_mbox.reg.bar_reg);
1025	iounmap(creq->creq_db.reg.bar_reg);
1026
1027	cmdq->cmdq_mbox.reg.bar_reg = NULL;
1028	creq->creq_db.reg.bar_reg = NULL;
1029	creq->aeq_handler = NULL;
1030	creq->msix_vec = 0;
1031}
1032
1033int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
1034			      bool need_init)
1035{
1036	struct bnxt_qplib_creq_ctx *creq;
1037	struct bnxt_qplib_res *res;
1038	int rc;
1039
1040	creq = &rcfw->creq;
1041	res = rcfw->res;
1042
1043	if (creq->requested)
1044		return -EFAULT;
1045
1046	creq->msix_vec = msix_vector;
1047	if (need_init)
1048		tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
1049	else
1050		tasklet_enable(&creq->creq_tasklet);
1051
1052	creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
1053				   pci_name(res->pdev));
1054	if (!creq->irq_name)
1055		return -ENOMEM;
1056	rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
1057			 creq->irq_name, rcfw);
1058	if (rc) {
1059		kfree(creq->irq_name);
1060		creq->irq_name = NULL;
1061		tasklet_disable(&creq->creq_tasklet);
1062		return rc;
1063	}
1064	creq->requested = true;
1065
1066	bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
1067	atomic_inc(&rcfw->rcfw_intr_enabled);
1068
1069	return 0;
1070}
1071
1072static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)
1073{
1074	struct bnxt_qplib_cmdq_mbox *mbox;
1075	resource_size_t bar_reg;
1076	struct pci_dev *pdev;
1077
1078	pdev = rcfw->pdev;
1079	mbox = &rcfw->cmdq.cmdq_mbox;
1080
1081	mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
1082	mbox->reg.len = RCFW_COMM_SIZE;
1083	mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
1084	if (!mbox->reg.bar_base) {
1085		dev_err(&pdev->dev,
1086			"QPLIB: CMDQ BAR region %d resc start is 0!\n",
1087			mbox->reg.bar_id);
1088		return -ENOMEM;
1089	}
1090
1091	bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
1092	mbox->reg.len = RCFW_COMM_SIZE;
1093	mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
1094	if (!mbox->reg.bar_reg) {
1095		dev_err(&pdev->dev,
1096			"QPLIB: CMDQ BAR region %d mapping failed\n",
1097			mbox->reg.bar_id);
1098		return -ENOMEM;
1099	}
1100
1101	mbox->prod = (void  __iomem *)(mbox->reg.bar_reg +
1102			RCFW_PF_VF_COMM_PROD_OFFSET);
1103	mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
1104	return 0;
1105}
1106
1107static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
1108{
1109	struct bnxt_qplib_creq_db *creq_db;
1110	resource_size_t bar_reg;
1111	struct pci_dev *pdev;
1112
1113	pdev = rcfw->pdev;
1114	creq_db = &rcfw->creq.creq_db;
1115
1116	creq_db->dbinfo.flags = 0;
1117	creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
1118	creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
1119	if (!creq_db->reg.bar_id)
1120		dev_err(&pdev->dev,
1121			"QPLIB: CREQ BAR region %d resc start is 0!",
1122			creq_db->reg.bar_id);
1123
1124	bar_reg = creq_db->reg.bar_base + reg_offt;
1125	/* Unconditionally map 8 bytes to support 57500 series */
1126	creq_db->reg.len = 8;
1127	creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
1128	if (!creq_db->reg.bar_reg) {
1129		dev_err(&pdev->dev,
1130			"QPLIB: CREQ BAR region %d mapping failed",
1131			creq_db->reg.bar_id);
1132		return -ENOMEM;
1133	}
1134	creq_db->dbinfo.db = creq_db->reg.bar_reg;
1135	creq_db->dbinfo.hwq = &rcfw->creq.hwq;
1136	creq_db->dbinfo.xid = rcfw->creq.ring_id;
1137	return 0;
1138}
1139
1140static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
1141{
1142	struct bnxt_qplib_cmdq_ctx *cmdq;
1143	struct bnxt_qplib_creq_ctx *creq;
1144	struct bnxt_qplib_cmdq_mbox *mbox;
1145	struct cmdq_init init = {0};
1146
1147	cmdq = &rcfw->cmdq;
1148	creq = &rcfw->creq;
1149	mbox = &cmdq->cmdq_mbox;
1150
1151	init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
1152	init.cmdq_size_cmdq_lvl =
1153			cpu_to_le16(((rcfw->cmdq_depth <<
1154				      CMDQ_INIT_CMDQ_SIZE_SFT) &
1155				    CMDQ_INIT_CMDQ_SIZE_MASK) |
1156				    ((cmdq->hwq.level <<
1157				      CMDQ_INIT_CMDQ_LVL_SFT) &
1158				    CMDQ_INIT_CMDQ_LVL_MASK));
1159	init.creq_ring_id = cpu_to_le16(creq->ring_id);
1160	/* Write to the Bono mailbox register */
1161	__iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
1162}
1163
1164int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
1165				   int msix_vector,
1166				   int cp_bar_reg_off,
1167				   aeq_handler_t aeq_handler)
1168{
1169	struct bnxt_qplib_cmdq_ctx *cmdq;
1170	struct bnxt_qplib_creq_ctx *creq;
1171	int rc;
1172
1173	cmdq = &rcfw->cmdq;
1174	creq = &rcfw->creq;
1175
1176	/* Clear to defaults */
1177
1178	cmdq->seq_num = 0;
1179	set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
1180	init_waitqueue_head(&cmdq->waitq);
1181
1182	creq->stats.creq_qp_event_processed = 0;
1183	creq->stats.creq_func_event_processed = 0;
1184	creq->aeq_handler = aeq_handler;
1185
1186	rc = bnxt_qplib_map_cmdq_mbox(rcfw);
1187	if (rc)
1188		return rc;
1189
1190	rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
1191	if (rc)
1192		return rc;
1193
1194	rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
1195	if (rc) {
1196		dev_err(&rcfw->pdev->dev,
1197			"Failed to request IRQ for CREQ rc = 0x%x\n", rc);
1198		bnxt_qplib_disable_rcfw_channel(rcfw);
1199		return rc;
1200	}
1201
1202	sema_init(&rcfw->rcfw_inflight, RCFW_CMD_NON_BLOCKING_SHADOW_QD);
1203	bnxt_qplib_start_rcfw(rcfw);
1204
1205	return 0;
1206}