Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/types.h>
   8#include <asm/byteorder.h>
   9#include <linux/bitops.h>
  10#include <linux/delay.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/errno.h>
 
 
 
  13#include <linux/io.h>
 
 
  14#include <linux/kernel.h>
  15#include <linux/list.h>
  16#include <linux/module.h>
  17#include <linux/mutex.h>
  18#include <linux/pci.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/string.h>
  22#include <linux/if_vlan.h>
 
 
 
  23#include "qed.h"
  24#include "qed_cxt.h"
  25#include "qed_dcbx.h"
  26#include "qed_hsi.h"
  27#include "qed_hw.h"
  28#include "qed_init_ops.h"
  29#include "qed_int.h"
  30#include "qed_ll2.h"
  31#include "qed_mcp.h"
  32#include "qed_reg_addr.h"
  33#include <linux/qed/qed_rdma_if.h>
  34#include "qed_rdma.h"
  35#include "qed_roce.h"
  36#include "qed_sp.h"
 
 
 
 
 
 
 
  37
  38static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
 
 
  39
  40static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
  41				__le16 echo, union event_ring_data *data,
  42				u8 fw_return_code)
  43{
  44	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
  45	union rdma_eqe_data *rdata = &data->rdma_data;
  46
  47	if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
  48		u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
  49
  50		/* icid release in this async event can occur only if the icid
  51		 * was offloaded to the FW. In case it wasn't offloaded this is
  52		 * handled in qed_roce_sp_destroy_qp.
  53		 */
  54		qed_roce_free_real_icid(p_hwfn, icid);
  55	} else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
  56		   fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
  57		u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
  58
  59		events.affiliated_event(events.context, fw_event_code,
  60					&srq_id);
  61	} else {
  62		events.affiliated_event(events.context, fw_event_code,
  63					(void *)&rdata->async_handle);
 
 
 
 
 
 
 
 
 
 
 
  64	}
  65
 
 
  66	return 0;
  67}
  68
  69void qed_roce_stop(struct qed_hwfn *p_hwfn)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70{
  71	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
  72	int wait_count = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74	/* when destroying a_RoCE QP the control is returned to the user after
  75	 * the synchronous part. The asynchronous part may take a little longer.
  76	 * We delay for a short while if an async destroy QP is still expected.
  77	 * Beyond the added delay we clear the bitmap anyway.
 
 
 
  78	 */
  79	while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
  80		/* If the HW device is during recovery, all resources are
  81		 * immediately reset without receiving a per-cid indication
  82		 * from HW. In this case we don't expect the cid bitmap to be
  83		 * cleared.
  84		 */
  85		if (p_hwfn->cdev->recov_in_prog)
  86			return;
  87
  88		msleep(100);
  89		if (wait_count++ > 20) {
  90			DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
  91			break;
  92		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  93	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94}
  95
  96static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
  97			       __le32 *dst_gid)
  98{
  99	u32 i;
 100
 101	if (qp->roce_mode == ROCE_V2_IPV4) {
 102		/* The IPv4 addresses shall be aligned to the highest word.
 103		 * The lower words must be zero.
 104		 */
 105		memset(src_gid, 0, sizeof(union qed_gid));
 106		memset(dst_gid, 0, sizeof(union qed_gid));
 107		src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
 108		dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
 109	} else {
 110		/* GIDs and IPv6 addresses coincide in location and size */
 111		for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
 112			src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
 113			dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
 114		}
 115	}
 116}
 117
 118static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 119{
 
 
 120	switch (roce_mode) {
 121	case ROCE_V1:
 122		return PLAIN_ROCE;
 
 123	case ROCE_V2_IPV4:
 124		return RROCE_IPV4;
 
 125	case ROCE_V2_IPV6:
 126		return RROCE_IPV6;
 
 127	default:
 128		return MAX_ROCE_FLAVOR;
 
 129	}
 
 130}
 131
 132static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
 133{
 134	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 135	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 136	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
 137	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 138}
 139
 140int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 141{
 142	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 143	u32 responder_icid;
 144	u32 requester_icid;
 145	int rc;
 146
 147	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 148	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
 149				    &responder_icid);
 150	if (rc) {
 151		spin_unlock_bh(&p_rdma_info->lock);
 152		return rc;
 153	}
 154
 155	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
 156				    &requester_icid);
 157
 158	spin_unlock_bh(&p_rdma_info->lock);
 159	if (rc)
 160		goto err;
 161
 162	/* the two icid's should be adjacent */
 163	if ((requester_icid - responder_icid) != 1) {
 164		DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
 165		rc = -EINVAL;
 166		goto err;
 167	}
 168
 169	responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
 170						      p_rdma_info->proto);
 171	requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
 172						      p_rdma_info->proto);
 173
 174	/* If these icids require a new ILT line allocate DMA-able context for
 175	 * an ILT page
 176	 */
 177	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
 178	if (rc)
 179		goto err;
 180
 181	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
 182	if (rc)
 183		goto err;
 184
 185	*cid = (u16)responder_icid;
 186	return rc;
 187
 188err:
 189	spin_lock_bh(&p_rdma_info->lock);
 190	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
 191	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
 192
 193	spin_unlock_bh(&p_rdma_info->lock);
 194	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 195		   "Allocate CID - failed, rc = %d\n", rc);
 196	return rc;
 197}
 198
 199static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
 200{
 201	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 202	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
 203	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 204}
 205
 206static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 207{
 208	u8 pri, tc = 0;
 209
 210	if (qp->vlan_id) {
 211		pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 212		tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
 213	}
 214
 215	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 216		   "qp icid %u tc: %u (vlan priority %s)\n",
 217		   qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
 218
 219	return tc;
 220}
 221
 222static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 223					struct qed_rdma_qp *qp)
 224{
 225	struct roce_create_qp_resp_ramrod_data *p_ramrod;
 226	u16 regular_latency_queue, low_latency_queue;
 227	struct qed_sp_init_data init_data;
 
 
 228	struct qed_spq_entry *p_ent;
 229	enum protocol_type proto;
 230	u32 flags = 0;
 231	int rc;
 232	u8 tc;
 233
 234	if (!qp->has_resp)
 235		return 0;
 236
 237	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 238
 239	/* Allocate DMA-able memory for IRQ */
 240	qp->irq_num_pages = 1;
 241	qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 242				     RDMA_RING_PAGE_SIZE,
 243				     &qp->irq_phys_addr, GFP_KERNEL);
 244	if (!qp->irq) {
 245		rc = -ENOMEM;
 246		DP_NOTICE(p_hwfn,
 247			  "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
 248			  rc);
 249		return rc;
 250	}
 251
 252	/* Get SPQ entry */
 253	memset(&init_data, 0, sizeof(init_data));
 254	init_data.cid = qp->icid;
 255	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 256	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 257
 258	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
 259				 PROTOCOLID_ROCE, &init_data);
 260	if (rc)
 261		goto err;
 262
 263	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
 264		  qed_roce_mode_to_flavor(qp->roce_mode));
 
 
 
 
 
 265
 266	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
 
 267		  qp->incoming_rdma_read_en);
 268
 269	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
 
 270		  qp->incoming_rdma_write_en);
 271
 272	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
 
 273		  qp->incoming_atomic_en);
 274
 275	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
 
 276		  qp->e2e_flow_control_en);
 277
 278	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 
 279
 280	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
 
 281		  qp->fmr_and_reserved_lkey);
 282
 283	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
 
 284		  qp->min_rnr_nak_timer);
 285
 286	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
 287		  qed_rdma_is_xrc_qp(qp));
 288
 289	p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
 290	p_ramrod->flags = cpu_to_le32(flags);
 291	p_ramrod->max_ird = qp->max_rd_atomic_resp;
 292	p_ramrod->traffic_class = qp->traffic_class_tos;
 293	p_ramrod->hop_limit = qp->hop_limit_ttl;
 294	p_ramrod->irq_num_pages = qp->irq_num_pages;
 295	p_ramrod->p_key = cpu_to_le16(qp->pkey);
 296	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 297	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
 298	p_ramrod->mtu = cpu_to_le16(qp->mtu);
 299	p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
 300	p_ramrod->pd = cpu_to_le16(qp->pd);
 301	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
 302	DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
 303	DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
 304	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 305	p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
 306	p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
 307	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
 308	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
 
 309	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
 310				       qp->rq_cq_id);
 311	p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
 312
 313	tc = qed_roce_get_qp_tc(p_hwfn, qp);
 314	regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
 315	low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
 316	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 317		   "qp icid %u pqs: regular_latency %u low_latency %u\n",
 318		   qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
 319		   low_latency_queue - CM_TX_PQ_BASE);
 320	p_ramrod->regular_latency_phy_queue =
 321	    cpu_to_le16(regular_latency_queue);
 322	p_ramrod->low_latency_phy_queue =
 323	    cpu_to_le16(low_latency_queue);
 324
 
 325	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 326
 327	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
 328	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 329
 330	p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
 331	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
 332	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 333	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 334
 335	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
 336				     qp->stats_queue;
 337
 338	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
 
 
 
 339	if (rc)
 340		goto err;
 341
 342	qp->resp_offloaded = true;
 343	qp->cq_prod = 0;
 344
 345	proto = p_hwfn->p_rdma_info->proto;
 346	qed_roce_set_real_cid(p_hwfn, qp->icid -
 347			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 348
 349	return rc;
 350
 351err:
 352	DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
 353	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 354			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
 355			  qp->irq, qp->irq_phys_addr);
 356
 357	return rc;
 358}
 359
 360static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 361					struct qed_rdma_qp *qp)
 362{
 363	struct roce_create_qp_req_ramrod_data *p_ramrod;
 364	u16 regular_latency_queue, low_latency_queue;
 365	struct qed_sp_init_data init_data;
 
 
 366	struct qed_spq_entry *p_ent;
 367	enum protocol_type proto;
 368	u16 flags = 0;
 369	int rc;
 370	u8 tc;
 371
 372	if (!qp->has_req)
 373		return 0;
 374
 375	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 376
 377	/* Allocate DMA-able memory for ORQ */
 378	qp->orq_num_pages = 1;
 379	qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 380				     RDMA_RING_PAGE_SIZE,
 381				     &qp->orq_phys_addr, GFP_KERNEL);
 382	if (!qp->orq) {
 383		rc = -ENOMEM;
 384		DP_NOTICE(p_hwfn,
 385			  "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
 386			  rc);
 387		return rc;
 388	}
 389
 390	/* Get SPQ entry */
 391	memset(&init_data, 0, sizeof(init_data));
 392	init_data.cid = qp->icid + 1;
 393	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 394	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 395
 396	rc = qed_sp_init_request(p_hwfn, &p_ent,
 397				 ROCE_RAMROD_CREATE_QP,
 398				 PROTOCOLID_ROCE, &init_data);
 399	if (rc)
 400		goto err;
 401
 402	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
 403		  qed_roce_mode_to_flavor(qp->roce_mode));
 404
 405	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
 406		  qp->fmr_and_reserved_lkey);
 407
 408	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
 409		  qp->signal_all);
 410
 411	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
 412		  qp->retry_cnt);
 
 413
 414	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
 415		  qp->rnr_retry_cnt);
 
 416
 417	SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
 418		  qed_rdma_is_xrc_qp(qp));
 419
 420	p_ramrod = &p_ent->ramrod.roce_create_qp_req;
 421	p_ramrod->flags = cpu_to_le16(flags);
 422
 423	SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
 424		  qp->edpm_mode);
 
 425
 426	p_ramrod->max_ord = qp->max_rd_atomic_req;
 427	p_ramrod->traffic_class = qp->traffic_class_tos;
 428	p_ramrod->hop_limit = qp->hop_limit_ttl;
 429	p_ramrod->orq_num_pages = qp->orq_num_pages;
 430	p_ramrod->p_key = cpu_to_le16(qp->pkey);
 431	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 432	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
 433	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
 434	p_ramrod->mtu = cpu_to_le16(qp->mtu);
 435	p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
 436	p_ramrod->pd = cpu_to_le16(qp->pd);
 437	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
 438	DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
 439	DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
 440	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 441	p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
 442	p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
 443	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
 444	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
 445	p_ramrod->cq_cid =
 446	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 447
 448	tc = qed_roce_get_qp_tc(p_hwfn, qp);
 449	regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
 450	low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
 451	DP_VERBOSE(p_hwfn, QED_MSG_SP,
 452		   "qp icid %u pqs: regular_latency %u low_latency %u\n",
 453		   qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
 454		   low_latency_queue - CM_TX_PQ_BASE);
 455	p_ramrod->regular_latency_phy_queue =
 456	    cpu_to_le16(regular_latency_queue);
 457	p_ramrod->low_latency_phy_queue =
 458	    cpu_to_le16(low_latency_queue);
 459
 
 460	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 461
 462	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
 463	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 464
 465	p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
 466	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
 467	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
 468				     qp->stats_queue;
 469
 470	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
 
 
 471	if (rc)
 472		goto err;
 473
 474	qp->req_offloaded = true;
 475	proto = p_hwfn->p_rdma_info->proto;
 476	qed_roce_set_real_cid(p_hwfn,
 477			      qp->icid + 1 -
 478			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 479
 480	return rc;
 481
 482err:
 483	DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
 484	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 485			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
 486			  qp->orq, qp->orq_phys_addr);
 487	return rc;
 488}
 489
 490static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
 491					struct qed_rdma_qp *qp,
 492					bool move_to_err, u32 modify_flags)
 493{
 494	struct roce_modify_qp_resp_ramrod_data *p_ramrod;
 495	struct qed_sp_init_data init_data;
 496	struct qed_spq_entry *p_ent;
 497	u16 flags = 0;
 498	int rc;
 499
 500	if (!qp->has_resp)
 501		return 0;
 502
 503	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 504
 505	if (move_to_err && !qp->resp_offloaded)
 506		return 0;
 507
 508	/* Get SPQ entry */
 509	memset(&init_data, 0, sizeof(init_data));
 510	init_data.cid = qp->icid;
 511	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 512	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 513
 514	rc = qed_sp_init_request(p_hwfn, &p_ent,
 515				 ROCE_EVENT_MODIFY_QP,
 516				 PROTOCOLID_ROCE, &init_data);
 517	if (rc) {
 518		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
 519		return rc;
 520	}
 521
 522	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
 523		  !!move_to_err);
 
 524
 525	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
 
 
 
 
 526		  qp->incoming_rdma_read_en);
 527
 528	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
 
 529		  qp->incoming_rdma_write_en);
 530
 531	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
 
 532		  qp->incoming_atomic_en);
 533
 534	SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
 
 535		  qp->e2e_flow_control_en);
 536
 537	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
 
 538		  GET_FIELD(modify_flags,
 539			    QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
 540
 541	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
 
 542		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 543
 544	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
 
 545		  GET_FIELD(modify_flags,
 546			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 547
 548	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
 
 549		  GET_FIELD(modify_flags,
 550			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
 551
 552	SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
 
 553		  GET_FIELD(modify_flags,
 554			    QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
 555
 556	p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
 557	p_ramrod->flags = cpu_to_le16(flags);
 558
 559	p_ramrod->fields = 0;
 560	SET_FIELD(p_ramrod->fields,
 561		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
 562		  qp->min_rnr_nak_timer);
 563
 564	p_ramrod->max_ird = qp->max_rd_atomic_resp;
 565	p_ramrod->traffic_class = qp->traffic_class_tos;
 566	p_ramrod->hop_limit = qp->hop_limit_ttl;
 567	p_ramrod->p_key = cpu_to_le16(qp->pkey);
 568	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 569	p_ramrod->mtu = cpu_to_le16(qp->mtu);
 570	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 571	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 572
 573	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
 574	return rc;
 575}
 576
 577static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
 578					struct qed_rdma_qp *qp,
 579					bool move_to_sqd,
 580					bool move_to_err, u32 modify_flags)
 581{
 582	struct roce_modify_qp_req_ramrod_data *p_ramrod;
 583	struct qed_sp_init_data init_data;
 584	struct qed_spq_entry *p_ent;
 585	u16 flags = 0;
 586	int rc;
 587
 588	if (!qp->has_req)
 589		return 0;
 590
 591	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 592
 593	if (move_to_err && !(qp->req_offloaded))
 594		return 0;
 595
 596	/* Get SPQ entry */
 597	memset(&init_data, 0, sizeof(init_data));
 598	init_data.cid = qp->icid + 1;
 599	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 600	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 601
 602	rc = qed_sp_init_request(p_hwfn, &p_ent,
 603				 ROCE_EVENT_MODIFY_QP,
 604				 PROTOCOLID_ROCE, &init_data);
 605	if (rc) {
 606		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
 607		return rc;
 608	}
 609
 610	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
 611		  !!move_to_err);
 
 
 
 
 612
 613	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
 614		  !!move_to_sqd);
 615
 616	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
 
 617		  qp->sqd_async);
 618
 619	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
 
 620		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 621
 622	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
 
 623		  GET_FIELD(modify_flags,
 624			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 625
 626	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
 
 627		  GET_FIELD(modify_flags,
 628			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
 629
 630	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
 
 631		  GET_FIELD(modify_flags,
 632			    QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
 633
 634	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
 
 635		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
 636
 637	SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
 
 638		  GET_FIELD(modify_flags,
 639			    QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
 640
 641	p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
 642	p_ramrod->flags = cpu_to_le16(flags);
 643
 644	p_ramrod->fields = 0;
 645	SET_FIELD(p_ramrod->fields,
 646		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
 647	SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
 
 
 648		  qp->rnr_retry_cnt);
 649
 650	p_ramrod->max_ord = qp->max_rd_atomic_req;
 651	p_ramrod->traffic_class = qp->traffic_class_tos;
 652	p_ramrod->hop_limit = qp->hop_limit_ttl;
 653	p_ramrod->p_key = cpu_to_le16(qp->pkey);
 654	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
 655	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
 656	p_ramrod->mtu = cpu_to_le16(qp->mtu);
 657	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
 658	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 659
 660	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
 661	return rc;
 662}
 663
 664static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 665					    struct qed_rdma_qp *qp,
 666					    u32 *cq_prod)
 667{
 668	struct roce_destroy_qp_resp_output_params *p_ramrod_res;
 669	struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
 670	struct qed_sp_init_data init_data;
 671	struct qed_spq_entry *p_ent;
 672	dma_addr_t ramrod_res_phys;
 673	int rc;
 674
 675	if (!qp->has_resp) {
 676		*cq_prod = 0;
 677		return 0;
 678	}
 679
 680	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 681	*cq_prod = qp->cq_prod;
 682
 683	if (!qp->resp_offloaded) {
 684		/* If a responder was never offload, we need to free the cids
 685		 * allocated in create_qp as a FW async event will never arrive
 686		 */
 687		u32 cid;
 688
 689		cid = qp->icid -
 690		      qed_cxt_get_proto_cid_start(p_hwfn,
 691						  p_hwfn->p_rdma_info->proto);
 692		qed_roce_free_cid_pair(p_hwfn, (u16)cid);
 693
 
 694		return 0;
 695	}
 696
 697	/* Get SPQ entry */
 698	memset(&init_data, 0, sizeof(init_data));
 699	init_data.cid = qp->icid;
 700	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 701	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 702
 703	rc = qed_sp_init_request(p_hwfn, &p_ent,
 704				 ROCE_RAMROD_DESTROY_QP,
 705				 PROTOCOLID_ROCE, &init_data);
 706	if (rc)
 707		return rc;
 708
 709	p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
 710
 711	p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 712					  sizeof(*p_ramrod_res),
 713					  &ramrod_res_phys, GFP_KERNEL);
 714
 715	if (!p_ramrod_res) {
 716		rc = -ENOMEM;
 717		DP_NOTICE(p_hwfn,
 718			  "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
 719			  rc);
 720		qed_sp_destroy_request(p_hwfn, p_ent);
 721		return rc;
 722	}
 723
 724	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 725
 726	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 727	if (rc)
 728		goto err;
 729
 730	*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
 731	qp->cq_prod = *cq_prod;
 732
 733	/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
 734	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 735			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
 736			  qp->irq, qp->irq_phys_addr);
 737
 738	qp->resp_offloaded = false;
 739
 740	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
 741
 742err:
 743	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 744			  sizeof(struct roce_destroy_qp_resp_output_params),
 745			  p_ramrod_res, ramrod_res_phys);
 746
 747	return rc;
 748}
 749
 750static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
 751					    struct qed_rdma_qp *qp)
 
 752{
 753	struct roce_destroy_qp_req_output_params *p_ramrod_res;
 754	struct roce_destroy_qp_req_ramrod_data *p_ramrod;
 755	struct qed_sp_init_data init_data;
 756	struct qed_spq_entry *p_ent;
 757	dma_addr_t ramrod_res_phys;
 758	int rc = -ENOMEM;
 759
 760	if (!qp->has_req)
 761		return 0;
 762
 763	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 764
 765	if (!qp->req_offloaded)
 766		return 0;
 767
 768	p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 
 769					  sizeof(*p_ramrod_res),
 770					  &ramrod_res_phys, GFP_KERNEL);
 771	if (!p_ramrod_res) {
 772		DP_NOTICE(p_hwfn,
 773			  "qed destroy requester failed: cannot allocate memory (ramrod)\n");
 774		return rc;
 775	}
 776
 777	/* Get SPQ entry */
 778	memset(&init_data, 0, sizeof(init_data));
 779	init_data.cid = qp->icid + 1;
 780	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 781	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 782
 783	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
 784				 PROTOCOLID_ROCE, &init_data);
 785	if (rc)
 786		goto err;
 787
 788	p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
 789	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 790
 791	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 792	if (rc)
 793		goto err;
 794
 
 
 795	/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
 796	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 797			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
 798			  qp->orq, qp->orq_phys_addr);
 799
 800	qp->req_offloaded = false;
 801
 802	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
 803
 804err:
 805	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
 806			  p_ramrod_res, ramrod_res_phys);
 807
 808	return rc;
 809}
 810
 811int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 812		      struct qed_rdma_qp *qp,
 813		      struct qed_rdma_query_qp_out_params *out_params)
 814{
 815	struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
 816	struct roce_query_qp_req_output_params *p_req_ramrod_res;
 817	struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
 818	struct roce_query_qp_req_ramrod_data *p_req_ramrod;
 819	struct qed_sp_init_data init_data;
 820	dma_addr_t resp_ramrod_res_phys;
 821	dma_addr_t req_ramrod_res_phys;
 822	struct qed_spq_entry *p_ent;
 823	bool rq_err_state;
 824	bool sq_err_state;
 825	bool sq_draining;
 826	int rc = -ENOMEM;
 827
 828	if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
 829		/* We can't send ramrod to the fw since this qp wasn't offloaded
 830		 * to the fw yet
 831		 */
 832		out_params->draining = false;
 833		out_params->rq_psn = qp->rq_psn;
 834		out_params->sq_psn = qp->sq_psn;
 835		out_params->state = qp->cur_state;
 836
 837		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
 838		return 0;
 839	}
 840
 841	if (!(qp->resp_offloaded)) {
 842		DP_NOTICE(p_hwfn,
 843			  "The responder's qp should be offloaded before requester's\n");
 844		return -EINVAL;
 845	}
 846
 847	/* Send a query responder ramrod to FW to get RQ-PSN and state */
 848	p_resp_ramrod_res =
 849		dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 850				   sizeof(*p_resp_ramrod_res),
 851				   &resp_ramrod_res_phys, GFP_KERNEL);
 852	if (!p_resp_ramrod_res) {
 853		DP_NOTICE(p_hwfn,
 854			  "qed query qp failed: cannot allocate memory (ramrod)\n");
 855		return rc;
 856	}
 857
 858	/* Get SPQ entry */
 859	memset(&init_data, 0, sizeof(init_data));
 860	init_data.cid = qp->icid;
 861	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 862	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 863	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 864				 PROTOCOLID_ROCE, &init_data);
 865	if (rc)
 866		goto err_resp;
 867
 868	p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
 869	DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
 870
 871	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 872	if (rc)
 873		goto err_resp;
 874
 875	out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
 876	rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
 877				 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
 878
 879	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
 880			  p_resp_ramrod_res, resp_ramrod_res_phys);
 881
 
 
 
 
 882	if (!(qp->req_offloaded)) {
 883		/* Don't send query qp for the requester */
 884		out_params->sq_psn = qp->sq_psn;
 885		out_params->draining = false;
 886
 887		if (rq_err_state)
 888			qp->cur_state = QED_ROCE_QP_STATE_ERR;
 889
 890		out_params->state = qp->cur_state;
 891
 892		return 0;
 893	}
 894
 895	/* Send a query requester ramrod to FW to get SQ-PSN and state */
 896	p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 
 897					      sizeof(*p_req_ramrod_res),
 898					      &req_ramrod_res_phys,
 899					      GFP_KERNEL);
 900	if (!p_req_ramrod_res) {
 901		rc = -ENOMEM;
 902		DP_NOTICE(p_hwfn,
 903			  "qed query qp failed: cannot allocate memory (ramrod)\n");
 904		return rc;
 905	}
 906
 907	/* Get SPQ entry */
 908	init_data.cid = qp->icid + 1;
 909	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
 910				 PROTOCOLID_ROCE, &init_data);
 911	if (rc)
 912		goto err_req;
 913
 914	p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
 915	DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
 916
 917	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 918	if (rc)
 919		goto err_req;
 920
 
 
 
 921	out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
 922	sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
 923				 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
 924	sq_draining =
 925		GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
 926			  ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
 927
 928	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
 929			  p_req_ramrod_res, req_ramrod_res_phys);
 930
 931	out_params->draining = false;
 932
 933	if (rq_err_state || sq_err_state)
 934		qp->cur_state = QED_ROCE_QP_STATE_ERR;
 
 
 935	else if (sq_draining)
 936		out_params->draining = true;
 937	out_params->state = qp->cur_state;
 938
 939	return 0;
 940
 941err_req:
 942	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
 943			  p_req_ramrod_res, req_ramrod_res_phys);
 944	return rc;
 945err_resp:
 946	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
 947			  p_resp_ramrod_res, resp_ramrod_res_phys);
 948	return rc;
 949}
 950
 951int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 952{
 953	u32 cq_prod;
 
 
 954	int rc;
 955
 956	/* Destroys the specified QP */
 957	if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
 958	    (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
 959	    (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
 960		DP_NOTICE(p_hwfn,
 961			  "QP must be in error, reset or init state before destroying it\n");
 962		return -EINVAL;
 963	}
 964
 965	if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
 966		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
 967						      &cq_prod);
 968		if (rc)
 969			return rc;
 970
 971		/* Send destroy requester ramrod */
 972		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
 973		if (rc)
 974			return rc;
 
 
 
 
 
 975	}
 976
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977	return 0;
 978}
 979
 980int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
 981		       struct qed_rdma_qp *qp,
 982		       enum qed_roce_qp_state prev_state,
 983		       struct qed_rdma_modify_qp_in_params *params)
 984{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 985	int rc = 0;
 986
 987	/* Perform additional operations according to the current state and the
 988	 * next state
 989	 */
 990	if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
 991	     (prev_state == QED_ROCE_QP_STATE_RESET)) &&
 992	    (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
 993		/* Init->RTR or Reset->RTR */
 994		rc = qed_roce_sp_create_responder(p_hwfn, qp);
 995		return rc;
 996	} else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
 997		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
 998		/* RTR-> RTS */
 999		rc = qed_roce_sp_create_requester(p_hwfn, qp);
1000		if (rc)
1001			return rc;
1002
1003		/* Send modify responder ramrod */
1004		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1005						  params->modify_flags);
1006		return rc;
1007	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1008		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1009		/* RTS->RTS */
1010		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1011						  params->modify_flags);
1012		if (rc)
1013			return rc;
1014
1015		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1016						  params->modify_flags);
1017		return rc;
1018	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1019		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1020		/* RTS->SQD */
1021		rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1022						  params->modify_flags);
1023		return rc;
1024	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1025		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1026		/* SQD->SQD */
1027		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1028						  params->modify_flags);
1029		if (rc)
1030			return rc;
1031
1032		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1033						  params->modify_flags);
1034		return rc;
1035	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1036		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1037		/* SQD->RTS */
1038		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1039						  params->modify_flags);
1040		if (rc)
1041			return rc;
1042
1043		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1044						  params->modify_flags);
1045
1046		return rc;
1047	} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
 
1048		/* ->ERR */
1049		rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1050						  params->modify_flags);
1051		if (rc)
1052			return rc;
1053
1054		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1055						  params->modify_flags);
1056		return rc;
1057	} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1058		/* Any state -> RESET */
1059		u32 cq_prod;
1060
1061		/* Send destroy responder ramrod */
1062		rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1063						      qp,
1064						      &cq_prod);
1065
 
 
1066		if (rc)
1067			return rc;
1068
1069		qp->cq_prod = cq_prod;
 
1070
1071		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
 
 
 
 
1072	} else {
1073		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1074	}
1075
1076	return rc;
1077}
1078
1079static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
 
 
1080{
1081	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1082	u32 start_cid, cid, xcid;
 
1083
1084	/* an even icid belongs to a responder while an odd icid belongs to a
1085	 * requester. The 'cid' received as an input can be either. We calculate
1086	 * the "partner" icid and call it xcid. Only if both are free then the
1087	 * "cid" map can be cleared.
1088	 */
1089	start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1090	cid = icid - start_cid;
1091	xcid = cid ^ 1;
1092
1093	spin_lock_bh(&p_rdma_info->lock);
 
 
 
1094
1095	qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1096	if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1097		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1098		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099	}
1100
1101	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 
 
 
1102}
1103
1104void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 
 
1105{
1106	u8 val;
 
 
 
 
 
 
1107
1108	/* if any QPs are already active, we want to disable DPM, since their
1109	 * context information contains information from before the latest DCBx
1110	 * update. Otherwise enable it.
1111	 */
1112	val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1113	p_hwfn->dcbx_no_edpm = (u8)val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1114
1115	qed_rdma_dpm_conf(p_hwfn, p_ptt);
1116}
1117
1118int qed_roce_setup(struct qed_hwfn *p_hwfn)
 
1119{
1120	return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1121					 qed_roce_async_event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1122}
1123
1124int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 
1125{
1126	u32 ll2_ethertype_en;
 
1127
1128	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
 
 
1129
1130	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1131
1132	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1133	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1134	       (ll2_ethertype_en | 0x01));
 
1135
1136	if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1137		DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138		return -EINVAL;
1139	}
1140
1141	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1142	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1143}
v4.10.11
 
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2016  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
 
  32#include <linux/types.h>
  33#include <asm/byteorder.h>
  34#include <linux/bitops.h>
  35#include <linux/delay.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/errno.h>
  38#include <linux/etherdevice.h>
  39#include <linux/if_ether.h>
  40#include <linux/if_vlan.h>
  41#include <linux/io.h>
  42#include <linux/ip.h>
  43#include <linux/ipv6.h>
  44#include <linux/kernel.h>
  45#include <linux/list.h>
  46#include <linux/module.h>
  47#include <linux/mutex.h>
  48#include <linux/pci.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/string.h>
  52#include <linux/tcp.h>
  53#include <linux/bitops.h>
  54#include <linux/qed/qed_roce_if.h>
  55#include <linux/qed/qed_roce_if.h>
  56#include "qed.h"
  57#include "qed_cxt.h"
 
  58#include "qed_hsi.h"
  59#include "qed_hw.h"
  60#include "qed_init_ops.h"
  61#include "qed_int.h"
  62#include "qed_ll2.h"
  63#include "qed_mcp.h"
  64#include "qed_reg_addr.h"
 
 
 
  65#include "qed_sp.h"
  66#include "qed_roce.h"
  67#include "qed_ll2.h"
  68
  69void qed_async_roce_event(struct qed_hwfn *p_hwfn,
  70			  struct event_ring_entry *p_eqe)
  71{
  72	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
  73
  74	p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
  75					     p_eqe->opcode, &p_eqe->data);
  76}
  77
  78static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
  79			       struct qed_bmap *bmap, u32 max_count)
 
  80{
  81	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
 
  82
  83	bmap->max_count = max_count;
 
  84
  85	bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
  86			       GFP_KERNEL);
  87	if (!bmap->bitmap) {
  88		DP_NOTICE(p_hwfn,
  89			  "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
  90		return -ENOMEM;
  91	}
 
  92
  93	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
  94		   bmap->bitmap);
  95	return 0;
  96}
  97
  98static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
  99				  struct qed_bmap *bmap, u32 *id_num)
 100{
 101	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
 102
 103	*id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
 104
 105	if (*id_num >= bmap->max_count) {
 106		DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
 107			  bmap->max_count);
 108		return -EINVAL;
 109	}
 110
 111	__set_bit(*id_num, bmap->bitmap);
 112
 113	return 0;
 114}
 115
 116static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 117				struct qed_bmap *bmap, u32 id_num)
 118{
 119	bool b_acquired;
 120
 121	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
 122	if (id_num >= bmap->max_count)
 123		return;
 124
 125	b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
 126	if (!b_acquired) {
 127		DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
 128		return;
 129	}
 130}
 131
 132static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
 133{
 134	/* First sb id for RoCE is after all the l2 sb */
 135	return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
 136}
 137
 138static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 139			  struct qed_ptt *p_ptt,
 140			  struct qed_rdma_start_in_params *params)
 141{
 142	struct qed_rdma_info *p_rdma_info;
 143	u32 num_cons, num_tasks;
 144	int rc = -ENOMEM;
 145
 146	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
 147
 148	/* Allocate a struct with current pf rdma info */
 149	p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
 150	if (!p_rdma_info) {
 151		DP_NOTICE(p_hwfn,
 152			  "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
 153			  rc);
 154		return rc;
 155	}
 156
 157	p_hwfn->p_rdma_info = p_rdma_info;
 158	p_rdma_info->proto = PROTOCOLID_ROCE;
 159
 160	num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
 161					       NULL);
 162
 163	p_rdma_info->num_qps = num_cons / 2;
 164
 165	num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
 166
 167	/* Each MR uses a single task */
 168	p_rdma_info->num_mrs = num_tasks;
 169
 170	/* Queue zone lines are shared between RoCE and L2 in such a way that
 171	 * they can be used by each without obstructing the other.
 172	 */
 173	p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
 
 
 
 
 
 
 
 174
 175	/* Allocate a struct with device params and fill it */
 176	p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
 177	if (!p_rdma_info->dev) {
 178		DP_NOTICE(p_hwfn,
 179			  "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
 180			  rc);
 181		goto free_rdma_info;
 182	}
 183
 184	/* Allocate a struct with port params and fill it */
 185	p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
 186	if (!p_rdma_info->port) {
 187		DP_NOTICE(p_hwfn,
 188			  "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
 189			  rc);
 190		goto free_rdma_dev;
 191	}
 192
 193	/* Allocate bit map for pd's */
 194	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
 195	if (rc) {
 196		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 197			   "Failed to allocate pd_map, rc = %d\n",
 198			   rc);
 199		goto free_rdma_port;
 200	}
 201
 202	/* Allocate DPI bitmap */
 203	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
 204				 p_hwfn->dpi_count);
 205	if (rc) {
 206		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 207			   "Failed to allocate DPI bitmap, rc = %d\n", rc);
 208		goto free_pd_map;
 209	}
 210
 211	/* Allocate bitmap for cq's. The maximum number of CQs is bounded to
 212	 * twice the number of QPs.
 213	 */
 214	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
 215				 p_rdma_info->num_qps * 2);
 216	if (rc) {
 217		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 218			   "Failed to allocate cq bitmap, rc = %d\n", rc);
 219		goto free_dpi_map;
 220	}
 221
 222	/* Allocate bitmap for toggle bit for cq icids
 223	 * We toggle the bit every time we create or resize cq for a given icid.
 224	 * The maximum number of CQs is bounded to  twice the number of QPs.
 225	 */
 226	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
 227				 p_rdma_info->num_qps * 2);
 228	if (rc) {
 229		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 230			   "Failed to allocate toogle bits, rc = %d\n", rc);
 231		goto free_cq_map;
 232	}
 233
 234	/* Allocate bitmap for itids */
 235	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
 236				 p_rdma_info->num_mrs);
 237	if (rc) {
 238		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 239			   "Failed to allocate itids bitmaps, rc = %d\n", rc);
 240		goto free_toggle_map;
 241	}
 242
 243	/* Allocate bitmap for cids used for qps. */
 244	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
 245	if (rc) {
 246		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 247			   "Failed to allocate cid bitmap, rc = %d\n", rc);
 248		goto free_tid_map;
 249	}
 250
 251	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
 252	return 0;
 253
 254free_tid_map:
 255	kfree(p_rdma_info->tid_map.bitmap);
 256free_toggle_map:
 257	kfree(p_rdma_info->toggle_bits.bitmap);
 258free_cq_map:
 259	kfree(p_rdma_info->cq_map.bitmap);
 260free_dpi_map:
 261	kfree(p_rdma_info->dpi_map.bitmap);
 262free_pd_map:
 263	kfree(p_rdma_info->pd_map.bitmap);
 264free_rdma_port:
 265	kfree(p_rdma_info->port);
 266free_rdma_dev:
 267	kfree(p_rdma_info->dev);
 268free_rdma_info:
 269	kfree(p_rdma_info);
 270
 271	return rc;
 272}
 273
 274static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 275{
 276	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 277
 278	kfree(p_rdma_info->cid_map.bitmap);
 279	kfree(p_rdma_info->tid_map.bitmap);
 280	kfree(p_rdma_info->toggle_bits.bitmap);
 281	kfree(p_rdma_info->cq_map.bitmap);
 282	kfree(p_rdma_info->dpi_map.bitmap);
 283	kfree(p_rdma_info->pd_map.bitmap);
 284
 285	kfree(p_rdma_info->port);
 286	kfree(p_rdma_info->dev);
 287
 288	kfree(p_rdma_info);
 289}
 290
 291static void qed_rdma_free(struct qed_hwfn *p_hwfn)
 292{
 293	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
 294
 295	qed_rdma_resc_free(p_hwfn);
 296}
 297
 298static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
 299{
 300	guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
 301	guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
 302	guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
 303	guid[3] = 0xff;
 304	guid[4] = 0xfe;
 305	guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
 306	guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
 307	guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
 308}
 309
 310static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
 311				 struct qed_rdma_start_in_params *params)
 312{
 313	struct qed_rdma_events *events;
 314
 315	events = &p_hwfn->p_rdma_info->events;
 316
 317	events->unaffiliated_event = params->events->unaffiliated_event;
 318	events->affiliated_event = params->events->affiliated_event;
 319	events->context = params->events->context;
 320}
 321
 322static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
 323				  struct qed_rdma_start_in_params *params)
 324{
 325	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
 326	struct qed_dev *cdev = p_hwfn->cdev;
 327	u32 pci_status_control;
 328	u32 num_qps;
 329
 330	/* Vendor specific information */
 331	dev->vendor_id = cdev->vendor_id;
 332	dev->vendor_part_id = cdev->device_id;
 333	dev->hw_ver = 0;
 334	dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
 335		      (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
 336
 337	qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
 338	dev->node_guid = dev->sys_image_guid;
 339
 340	dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
 341			     RDMA_MAX_SGE_PER_RQ_WQE);
 342
 343	if (cdev->rdma_max_sge)
 344		dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
 345
 346	dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
 347
 348	dev->max_inline = (cdev->rdma_max_inline) ?
 349			  min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
 350			  dev->max_inline;
 351
 352	dev->max_wqe = QED_RDMA_MAX_WQE;
 353	dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
 354
 355	/* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
 356	 * it is up-aligned to 16 and then to ILT page size within qed cxt.
 357	 * This is OK in terms of ILT but we don't want to configure the FW
 358	 * above its abilities
 359	 */
 360	num_qps = ROCE_MAX_QPS;
 361	num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
 362	dev->max_qp = num_qps;
 363
 364	/* CQs uses the same icids that QPs use hence they are limited by the
 365	 * number of icids. There are two icids per QP.
 366	 */
 367	dev->max_cq = num_qps * 2;
 368
 369	/* The number of mrs is smaller by 1 since the first is reserved */
 370	dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
 371	dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
 372
 373	/* The maximum CQE capacity per CQ supported.
 374	 * max number of cqes will be in two layer pbl,
 375	 * 8 is the pointer size in bytes
 376	 * 32 is the size of cq element in bytes
 377	 */
 378	if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
 379		dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
 380	else
 381		dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
 382
 383	dev->max_mw = 0;
 384	dev->max_fmr = QED_RDMA_MAX_FMR;
 385	dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
 386	dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
 387	dev->max_pkey = QED_RDMA_MAX_P_KEY;
 388
 389	dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
 390					  (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
 391	dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
 392					 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
 393	dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
 394					   p_hwfn->p_rdma_info->num_qps;
 395	dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
 396	dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
 397	dev->max_pd = RDMA_MAX_PDS;
 398	dev->max_ah = p_hwfn->p_rdma_info->num_qps;
 399	dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
 400
 401	/* Set capablities */
 402	dev->dev_caps = 0;
 403	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
 404	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
 405	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
 406	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
 407	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
 408	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
 409	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
 410	SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
 411
 412	/* Check atomic operations support in PCI configuration space. */
 413	pci_read_config_dword(cdev->pdev,
 414			      cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
 415			      &pci_status_control);
 416
 417	if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
 418		SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
 419}
 420
 421static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
 422{
 423	struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
 424	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
 425
 426	port->port_state = p_hwfn->mcp_info->link_output.link_up ?
 427			   QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
 428
 429	port->max_msg_size = min_t(u64,
 430				   (dev->max_mr_mw_fmr_size *
 431				    p_hwfn->cdev->rdma_max_sge),
 432				   BIT(31));
 433
 434	port->pkey_bad_counter = 0;
 435}
 436
 437static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 438{
 439	u32 ll2_ethertype_en;
 440
 441	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
 442	p_hwfn->b_rdma_enabled_in_prs = false;
 443
 444	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
 445
 446	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
 447
 448	/* We delay writing to this reg until first cid is allocated. See
 449	 * qed_cxt_dynamic_ilt_alloc function for more details
 450	 */
 451	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
 452	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
 453	       (ll2_ethertype_en | 0x01));
 454
 455	if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
 456		DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
 457		return -EINVAL;
 458	}
 459
 460	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
 461	return 0;
 462}
 463
 464static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
 465			     struct qed_rdma_start_in_params *params,
 466			     struct qed_ptt *p_ptt)
 467{
 468	struct rdma_init_func_ramrod_data *p_ramrod;
 469	struct qed_rdma_cnq_params *p_cnq_pbl_list;
 470	struct rdma_init_func_hdr *p_params_header;
 471	struct rdma_cnq_params *p_cnq_params;
 472	struct qed_sp_init_data init_data;
 473	struct qed_spq_entry *p_ent;
 474	u32 cnq_id, sb_id;
 475	int rc;
 476
 477	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
 478
 479	/* Save the number of cnqs for the function close ramrod */
 480	p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
 481
 482	/* Get SPQ entry */
 483	memset(&init_data, 0, sizeof(init_data));
 484	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 485	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 486
 487	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
 488				 p_hwfn->p_rdma_info->proto, &init_data);
 489	if (rc)
 490		return rc;
 491
 492	p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
 493
 494	p_params_header = &p_ramrod->params_header;
 495	p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
 496							   QED_RDMA_CNQ_RAM);
 497	p_params_header->num_cnqs = params->desired_cnq;
 498
 499	if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
 500		p_params_header->cq_ring_mode = 1;
 501	else
 502		p_params_header->cq_ring_mode = 0;
 503
 504	for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
 505		sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
 506		p_cnq_params = &p_ramrod->cnq_params[cnq_id];
 507		p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
 508		p_cnq_params->sb_num =
 509			cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
 510
 511		p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
 512		p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
 513
 514		DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
 515			       p_cnq_pbl_list->pbl_ptr);
 516
 517		/* we assume here that cnq_id and qz_offset are the same */
 518		p_cnq_params->queue_zone_num =
 519			cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
 520				    cnq_id);
 521	}
 522
 523	return qed_spq_post(p_hwfn, p_ent, NULL);
 524}
 525
 526static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
 527{
 528	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 529	int rc;
 530
 531	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
 532
 533	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 534	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 535				    &p_hwfn->p_rdma_info->tid_map, itid);
 536	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 537	if (rc)
 538		goto out;
 539
 540	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
 541out:
 542	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
 543	return rc;
 544}
 545
 546static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
 547{
 548	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
 549
 550	/* The first DPI is reserved for the Kernel */
 551	__set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
 552
 553	/* Tid 0 will be used as the key for "reserved MR".
 554	 * The driver should allocate memory for it so it can be loaded but no
 555	 * ramrod should be passed on it.
 556	 */
 557	qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
 558	if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
 559		DP_NOTICE(p_hwfn,
 560			  "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
 561		return -EINVAL;
 562	}
 563
 564	return 0;
 565}
 566
 567static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
 568			  struct qed_ptt *p_ptt,
 569			  struct qed_rdma_start_in_params *params)
 570{
 571	int rc;
 572
 573	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
 574
 575	spin_lock_init(&p_hwfn->p_rdma_info->lock);
 576
 577	qed_rdma_init_devinfo(p_hwfn, params);
 578	qed_rdma_init_port(p_hwfn);
 579	qed_rdma_init_events(p_hwfn, params);
 580
 581	rc = qed_rdma_reserve_lkey(p_hwfn);
 582	if (rc)
 583		return rc;
 584
 585	rc = qed_rdma_init_hw(p_hwfn, p_ptt);
 586	if (rc)
 587		return rc;
 588
 589	return qed_rdma_start_fw(p_hwfn, params, p_ptt);
 590}
 591
 592static int qed_rdma_stop(void *rdma_cxt)
 593{
 594	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 595	struct rdma_close_func_ramrod_data *p_ramrod;
 596	struct qed_sp_init_data init_data;
 597	struct qed_spq_entry *p_ent;
 598	struct qed_ptt *p_ptt;
 599	u32 ll2_ethertype_en;
 600	int rc = -EBUSY;
 601
 602	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
 603
 604	p_ptt = qed_ptt_acquire(p_hwfn);
 605	if (!p_ptt) {
 606		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
 607		return rc;
 608	}
 609
 610	/* Disable RoCE search */
 611	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
 612	p_hwfn->b_rdma_enabled_in_prs = false;
 613
 614	qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
 615
 616	ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
 617
 618	qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
 619	       (ll2_ethertype_en & 0xFFFE));
 620
 621	qed_ptt_release(p_hwfn, p_ptt);
 622
 623	/* Get SPQ entry */
 624	memset(&init_data, 0, sizeof(init_data));
 625	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 626	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 627
 628	/* Stop RoCE */
 629	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
 630				 p_hwfn->p_rdma_info->proto, &init_data);
 631	if (rc)
 632		goto out;
 633
 634	p_ramrod = &p_ent->ramrod.rdma_close_func;
 635
 636	p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
 637	p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
 638
 639	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 640
 641out:
 642	qed_rdma_free(p_hwfn);
 643
 644	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
 645	return rc;
 646}
 647
 648static int qed_rdma_add_user(void *rdma_cxt,
 649			     struct qed_rdma_add_user_out_params *out_params)
 650{
 651	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 652	u32 dpi_start_offset;
 653	u32 returned_id = 0;
 654	int rc;
 655
 656	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
 657
 658	/* Allocate DPI */
 659	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 660	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
 661				    &returned_id);
 662	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 663
 664	out_params->dpi = (u16)returned_id;
 665
 666	/* Calculate the corresponding DPI address */
 667	dpi_start_offset = p_hwfn->dpi_start_offset;
 668
 669	out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
 670				     dpi_start_offset +
 671				     ((out_params->dpi) * p_hwfn->dpi_size));
 672
 673	out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
 674				    dpi_start_offset +
 675				    ((out_params->dpi) * p_hwfn->dpi_size);
 676
 677	out_params->dpi_size = p_hwfn->dpi_size;
 678
 679	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
 680	return rc;
 681}
 682
 683static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
 684{
 685	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 686	struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
 687
 688	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
 689
 690	/* Link may have changed */
 691	p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
 692			     QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
 693
 694	p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
 695
 696	return p_port;
 697}
 698
 699static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
 700{
 701	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 702
 703	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
 704
 705	/* Return struct with device parameters */
 706	return p_hwfn->p_rdma_info->dev;
 707}
 708
 709static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
 710{
 711	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 712
 713	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
 714
 715	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 716	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
 717	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 718}
 719
 720static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 721{
 722	struct qed_hwfn *p_hwfn;
 723	u16 qz_num;
 724	u32 addr;
 725
 726	p_hwfn = (struct qed_hwfn *)rdma_cxt;
 727	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
 728	addr = GTT_BAR0_MAP_REG_USDM_RAM +
 729	       USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
 730
 731	REG_WR16(p_hwfn, addr, prod);
 732
 733	/* keep prod updates ordered */
 734	wmb();
 735}
 736
 737static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
 738				  struct qed_dev_rdma_info *info)
 739{
 740	memset(info, 0, sizeof(*info));
 741
 742	info->rdma_type = QED_RDMA_TYPE_ROCE;
 743
 744	qed_fill_dev_info(cdev, &info->common);
 745
 746	return 0;
 747}
 748
 749static int qed_rdma_get_sb_start(struct qed_dev *cdev)
 750{
 751	int feat_num;
 752
 753	if (cdev->num_hwfns > 1)
 754		feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
 755	else
 756		feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
 757			   cdev->num_hwfns;
 758
 759	return feat_num;
 760}
 761
 762static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
 763{
 764	int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
 765	int n_msix = cdev->int_params.rdma_msix_cnt;
 766
 767	return min_t(int, n_cnq, n_msix);
 768}
 769
 770static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
 771{
 772	int limit = 0;
 773
 774	/* Mark the fastpath as free/used */
 775	cdev->int_params.fp_initialized = cnt ? true : false;
 776
 777	if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
 778		DP_ERR(cdev,
 779		       "qed roce supports only MSI-X interrupts (detected %d).\n",
 780		       cdev->int_params.out.int_mode);
 781		return -EINVAL;
 782	} else if (cdev->int_params.fp_msix_cnt) {
 783		limit = cdev->int_params.rdma_msix_cnt;
 784	}
 785
 786	if (!limit)
 787		return -ENOMEM;
 788
 789	return min_t(int, cnt, limit);
 790}
 791
 792static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
 793{
 794	memset(info, 0, sizeof(*info));
 795
 796	if (!cdev->int_params.fp_initialized) {
 797		DP_INFO(cdev,
 798			"Protocol driver requested interrupt information, but its support is not yet configured\n");
 799		return -EINVAL;
 800	}
 801
 802	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
 803		int msix_base = cdev->int_params.rdma_msix_base;
 804
 805		info->msix_cnt = cdev->int_params.rdma_msix_cnt;
 806		info->msix = &cdev->int_params.msix_table[msix_base];
 807
 808		DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
 809			   info->msix_cnt, msix_base);
 810	}
 811
 812	return 0;
 813}
 814
 815static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
 816{
 817	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 818	u32 returned_id;
 819	int rc;
 820
 821	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
 822
 823	/* Allocates an unused protection domain */
 824	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 825	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 826				    &p_hwfn->p_rdma_info->pd_map, &returned_id);
 827	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 828
 829	*pd = (u16)returned_id;
 830
 831	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
 832	return rc;
 833}
 834
 835static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
 836{
 837	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 838
 839	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
 840
 841	/* Returns a previously allocated protection domain for reuse */
 842	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 843	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
 844	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 845}
 846
 847static enum qed_rdma_toggle_bit
 848qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
 849{
 850	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
 851	enum qed_rdma_toggle_bit toggle_bit;
 852	u32 bmap_id;
 853
 854	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
 855
 856	/* the function toggle the bit that is related to a given icid
 857	 * and returns the new toggle bit's value
 858	 */
 859	bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
 860
 861	spin_lock_bh(&p_info->lock);
 862	toggle_bit = !test_and_change_bit(bmap_id,
 863					  p_info->toggle_bits.bitmap);
 864	spin_unlock_bh(&p_info->lock);
 865
 866	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
 867		   toggle_bit);
 868
 869	return toggle_bit;
 870}
 871
 872static int qed_rdma_create_cq(void *rdma_cxt,
 873			      struct qed_rdma_create_cq_in_params *params,
 874			      u16 *icid)
 875{
 876	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 877	struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
 878	struct rdma_create_cq_ramrod_data *p_ramrod;
 879	enum qed_rdma_toggle_bit toggle_bit;
 880	struct qed_sp_init_data init_data;
 881	struct qed_spq_entry *p_ent;
 882	u32 returned_id, start_cid;
 883	int rc;
 884
 885	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
 886		   params->cq_handle_hi, params->cq_handle_lo);
 887
 888	/* Allocate icid */
 889	spin_lock_bh(&p_info->lock);
 890	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 891				    &p_info->cq_map, &returned_id);
 892	spin_unlock_bh(&p_info->lock);
 893
 894	if (rc) {
 895		DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
 896		return rc;
 897	}
 898
 899	start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
 900						p_info->proto);
 901	*icid = returned_id + start_cid;
 902
 903	/* Check if icid requires a page allocation */
 904	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
 905	if (rc)
 906		goto err;
 907
 908	/* Get SPQ entry */
 909	memset(&init_data, 0, sizeof(init_data));
 910	init_data.cid = *icid;
 911	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 912	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 913
 914	/* Send create CQ ramrod */
 915	rc = qed_sp_init_request(p_hwfn, &p_ent,
 916				 RDMA_RAMROD_CREATE_CQ,
 917				 p_info->proto, &init_data);
 918	if (rc)
 919		goto err;
 920
 921	p_ramrod = &p_ent->ramrod.rdma_create_cq;
 922
 923	p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
 924	p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
 925	p_ramrod->dpi = cpu_to_le16(params->dpi);
 926	p_ramrod->is_two_level_pbl = params->pbl_two_level;
 927	p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
 928	DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
 929	p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
 930	p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
 931			   params->cnq_id;
 932	p_ramrod->int_timeout = params->int_timeout;
 933
 934	/* toggle the bit for every resize or create cq for a given icid */
 935	toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
 936
 937	p_ramrod->toggle_bit = toggle_bit;
 938
 939	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 940	if (rc) {
 941		/* restore toggle bit */
 942		qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
 943		goto err;
 944	}
 945
 946	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
 947	return rc;
 948
 949err:
 950	/* release allocated icid */
 951	qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
 952	DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
 953
 954	return rc;
 955}
 956
 957static int
 958qed_rdma_destroy_cq(void *rdma_cxt,
 959		    struct qed_rdma_destroy_cq_in_params *in_params,
 960		    struct qed_rdma_destroy_cq_out_params *out_params)
 961{
 962	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
 963	struct rdma_destroy_cq_output_params *p_ramrod_res;
 964	struct rdma_destroy_cq_ramrod_data *p_ramrod;
 965	struct qed_sp_init_data init_data;
 966	struct qed_spq_entry *p_ent;
 967	dma_addr_t ramrod_res_phys;
 968	int rc = -ENOMEM;
 969
 970	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
 971
 972	p_ramrod_res =
 973	    (struct rdma_destroy_cq_output_params *)
 974	    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 975			       sizeof(struct rdma_destroy_cq_output_params),
 976			       &ramrod_res_phys, GFP_KERNEL);
 977	if (!p_ramrod_res) {
 978		DP_NOTICE(p_hwfn,
 979			  "qed destroy cq failed: cannot allocate memory (ramrod)\n");
 980		return rc;
 981	}
 982
 983	/* Get SPQ entry */
 984	memset(&init_data, 0, sizeof(init_data));
 985	init_data.cid = in_params->icid;
 986	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 987	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 988
 989	/* Send destroy CQ ramrod */
 990	rc = qed_sp_init_request(p_hwfn, &p_ent,
 991				 RDMA_RAMROD_DESTROY_CQ,
 992				 p_hwfn->p_rdma_info->proto, &init_data);
 993	if (rc)
 994		goto err;
 995
 996	p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
 997	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
 998
 999	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1000	if (rc)
1001		goto err;
1002
1003	out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1004
1005	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1006			  sizeof(struct rdma_destroy_cq_output_params),
1007			  p_ramrod_res, ramrod_res_phys);
1008
1009	/* Free icid */
1010	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1011
1012	qed_bmap_release_id(p_hwfn,
1013			    &p_hwfn->p_rdma_info->cq_map,
1014			    (in_params->icid -
1015			     qed_cxt_get_proto_cid_start(p_hwfn,
1016							 p_hwfn->
1017							 p_rdma_info->proto)));
1018
1019	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1020
1021	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1022	return rc;
1023
1024err:	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1025			  sizeof(struct rdma_destroy_cq_output_params),
1026			  p_ramrod_res, ramrod_res_phys);
1027
1028	return rc;
1029}
1030
1031static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1032{
1033	p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1034	p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1035	p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1036}
1037
1038static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1039			       __le32 *dst_gid)
1040{
1041	u32 i;
1042
1043	if (qp->roce_mode == ROCE_V2_IPV4) {
1044		/* The IPv4 addresses shall be aligned to the highest word.
1045		 * The lower words must be zero.
1046		 */
1047		memset(src_gid, 0, sizeof(union qed_gid));
1048		memset(dst_gid, 0, sizeof(union qed_gid));
1049		src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1050		dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1051	} else {
1052		/* GIDs and IPv6 addresses coincide in location and size */
1053		for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1054			src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1055			dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1056		}
1057	}
1058}
1059
1060static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1061{
1062	enum roce_flavor flavor;
1063
1064	switch (roce_mode) {
1065	case ROCE_V1:
1066		flavor = PLAIN_ROCE;
1067		break;
1068	case ROCE_V2_IPV4:
1069		flavor = RROCE_IPV4;
1070		break;
1071	case ROCE_V2_IPV6:
1072		flavor = ROCE_V2_IPV6;
1073		break;
1074	default:
1075		flavor = MAX_ROCE_MODE;
1076		break;
1077	}
1078	return flavor;
1079}
1080
1081static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 
 
 
 
 
 
 
 
1082{
1083	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1084	u32 responder_icid;
1085	u32 requester_icid;
1086	int rc;
1087
1088	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1089	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1090				    &responder_icid);
1091	if (rc) {
1092		spin_unlock_bh(&p_rdma_info->lock);
1093		return rc;
1094	}
1095
1096	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1097				    &requester_icid);
1098
1099	spin_unlock_bh(&p_rdma_info->lock);
1100	if (rc)
1101		goto err;
1102
1103	/* the two icid's should be adjacent */
1104	if ((requester_icid - responder_icid) != 1) {
1105		DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1106		rc = -EINVAL;
1107		goto err;
1108	}
1109
1110	responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1111						      p_rdma_info->proto);
1112	requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1113						      p_rdma_info->proto);
1114
1115	/* If these icids require a new ILT line allocate DMA-able context for
1116	 * an ILT page
1117	 */
1118	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1119	if (rc)
1120		goto err;
1121
1122	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1123	if (rc)
1124		goto err;
1125
1126	*cid = (u16)responder_icid;
1127	return rc;
1128
1129err:
1130	spin_lock_bh(&p_rdma_info->lock);
1131	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1132	qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1133
1134	spin_unlock_bh(&p_rdma_info->lock);
1135	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1136		   "Allocate CID - failed, rc = %d\n", rc);
1137	return rc;
1138}
1139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1140static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1141					struct qed_rdma_qp *qp)
1142{
1143	struct roce_create_qp_resp_ramrod_data *p_ramrod;
 
1144	struct qed_sp_init_data init_data;
1145	union qed_qm_pq_params qm_params;
1146	enum roce_flavor roce_flavor;
1147	struct qed_spq_entry *p_ent;
1148	u16 physical_queue0 = 0;
 
1149	int rc;
 
 
 
 
1150
1151	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1152
1153	/* Allocate DMA-able memory for IRQ */
1154	qp->irq_num_pages = 1;
1155	qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1156				     RDMA_RING_PAGE_SIZE,
1157				     &qp->irq_phys_addr, GFP_KERNEL);
1158	if (!qp->irq) {
1159		rc = -ENOMEM;
1160		DP_NOTICE(p_hwfn,
1161			  "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1162			  rc);
1163		return rc;
1164	}
1165
1166	/* Get SPQ entry */
1167	memset(&init_data, 0, sizeof(init_data));
1168	init_data.cid = qp->icid;
1169	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1170	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1171
1172	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1173				 PROTOCOLID_ROCE, &init_data);
1174	if (rc)
1175		goto err;
1176
1177	p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1178
1179	p_ramrod->flags = 0;
1180
1181	roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1182	SET_FIELD(p_ramrod->flags,
1183		  ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1184
1185	SET_FIELD(p_ramrod->flags,
1186		  ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1187		  qp->incoming_rdma_read_en);
1188
1189	SET_FIELD(p_ramrod->flags,
1190		  ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1191		  qp->incoming_rdma_write_en);
1192
1193	SET_FIELD(p_ramrod->flags,
1194		  ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1195		  qp->incoming_atomic_en);
1196
1197	SET_FIELD(p_ramrod->flags,
1198		  ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1199		  qp->e2e_flow_control_en);
1200
1201	SET_FIELD(p_ramrod->flags,
1202		  ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1203
1204	SET_FIELD(p_ramrod->flags,
1205		  ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1206		  qp->fmr_and_reserved_lkey);
1207
1208	SET_FIELD(p_ramrod->flags,
1209		  ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1210		  qp->min_rnr_nak_timer);
1211
 
 
 
 
 
1212	p_ramrod->max_ird = qp->max_rd_atomic_resp;
1213	p_ramrod->traffic_class = qp->traffic_class_tos;
1214	p_ramrod->hop_limit = qp->hop_limit_ttl;
1215	p_ramrod->irq_num_pages = qp->irq_num_pages;
1216	p_ramrod->p_key = cpu_to_le16(qp->pkey);
1217	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1218	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1219	p_ramrod->mtu = cpu_to_le16(qp->mtu);
1220	p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1221	p_ramrod->pd = cpu_to_le16(qp->pd);
1222	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1223	DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1224	DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1225	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1226	p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1227	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1228	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1229	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1230	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1231	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1232				       qp->rq_cq_id);
 
1233
1234	memset(&qm_params, 0, sizeof(qm_params));
1235	qm_params.roce.qpid = qp->icid >> 1;
1236	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
 
 
 
 
 
 
 
 
1237
1238	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1239	p_ramrod->dpi = cpu_to_le16(qp->dpi);
1240
1241	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1242	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1243
1244	p_ramrod->udp_src_port = qp->udp_src_port;
1245	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1246	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1247	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1248
1249	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1250				     qp->stats_queue;
1251
1252	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1253
1254	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
1255		   rc, physical_queue0);
1256
1257	if (rc)
1258		goto err;
1259
1260	qp->resp_offloaded = true;
 
 
 
 
 
1261
1262	return rc;
1263
1264err:
1265	DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1266	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1267			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1268			  qp->irq, qp->irq_phys_addr);
1269
1270	return rc;
1271}
1272
1273static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1274					struct qed_rdma_qp *qp)
1275{
1276	struct roce_create_qp_req_ramrod_data *p_ramrod;
 
1277	struct qed_sp_init_data init_data;
1278	union qed_qm_pq_params qm_params;
1279	enum roce_flavor roce_flavor;
1280	struct qed_spq_entry *p_ent;
1281	u16 physical_queue0 = 0;
 
1282	int rc;
 
 
 
 
1283
1284	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1285
1286	/* Allocate DMA-able memory for ORQ */
1287	qp->orq_num_pages = 1;
1288	qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1289				     RDMA_RING_PAGE_SIZE,
1290				     &qp->orq_phys_addr, GFP_KERNEL);
1291	if (!qp->orq) {
1292		rc = -ENOMEM;
1293		DP_NOTICE(p_hwfn,
1294			  "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1295			  rc);
1296		return rc;
1297	}
1298
1299	/* Get SPQ entry */
1300	memset(&init_data, 0, sizeof(init_data));
1301	init_data.cid = qp->icid + 1;
1302	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1303	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1304
1305	rc = qed_sp_init_request(p_hwfn, &p_ent,
1306				 ROCE_RAMROD_CREATE_QP,
1307				 PROTOCOLID_ROCE, &init_data);
1308	if (rc)
1309		goto err;
1310
1311	p_ramrod = &p_ent->ramrod.roce_create_qp_req;
 
 
 
 
1312
1313	p_ramrod->flags = 0;
 
1314
1315	roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1316	SET_FIELD(p_ramrod->flags,
1317		  ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1318
1319	SET_FIELD(p_ramrod->flags,
1320		  ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1321		  qp->fmr_and_reserved_lkey);
1322
1323	SET_FIELD(p_ramrod->flags,
1324		  ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1325
1326	SET_FIELD(p_ramrod->flags,
1327		  ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1328
1329	SET_FIELD(p_ramrod->flags,
1330		  ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1331		  qp->rnr_retry_cnt);
1332
1333	p_ramrod->max_ord = qp->max_rd_atomic_req;
1334	p_ramrod->traffic_class = qp->traffic_class_tos;
1335	p_ramrod->hop_limit = qp->hop_limit_ttl;
1336	p_ramrod->orq_num_pages = qp->orq_num_pages;
1337	p_ramrod->p_key = cpu_to_le16(qp->pkey);
1338	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1339	p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1340	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1341	p_ramrod->mtu = cpu_to_le16(qp->mtu);
1342	p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1343	p_ramrod->pd = cpu_to_le16(qp->pd);
1344	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1345	DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1346	DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1347	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1348	p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1349	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1350	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1351	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1352	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1353	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1354				       qp->sq_cq_id);
1355
1356	memset(&qm_params, 0, sizeof(qm_params));
1357	qm_params.roce.qpid = qp->icid >> 1;
1358	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
 
 
 
 
 
 
 
1359
1360	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1361	p_ramrod->dpi = cpu_to_le16(qp->dpi);
1362
1363	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1364	qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1365
1366	p_ramrod->udp_src_port = qp->udp_src_port;
1367	p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1368	p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1369				     qp->stats_queue;
1370
1371	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1372
1373	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1374
1375	if (rc)
1376		goto err;
1377
1378	qp->req_offloaded = true;
 
 
 
 
1379
1380	return rc;
1381
1382err:
1383	DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1384	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1385			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1386			  qp->orq, qp->orq_phys_addr);
1387	return rc;
1388}
1389
1390static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1391					struct qed_rdma_qp *qp,
1392					bool move_to_err, u32 modify_flags)
1393{
1394	struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1395	struct qed_sp_init_data init_data;
1396	struct qed_spq_entry *p_ent;
 
1397	int rc;
1398
 
 
 
1399	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1400
1401	if (move_to_err && !qp->resp_offloaded)
1402		return 0;
1403
1404	/* Get SPQ entry */
1405	memset(&init_data, 0, sizeof(init_data));
1406	init_data.cid = qp->icid;
1407	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1408	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1409
1410	rc = qed_sp_init_request(p_hwfn, &p_ent,
1411				 ROCE_EVENT_MODIFY_QP,
1412				 PROTOCOLID_ROCE, &init_data);
1413	if (rc) {
1414		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1415		return rc;
1416	}
1417
1418	p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1419
1420	p_ramrod->flags = 0;
1421
1422	SET_FIELD(p_ramrod->flags,
1423		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1424
1425	SET_FIELD(p_ramrod->flags,
1426		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1427		  qp->incoming_rdma_read_en);
1428
1429	SET_FIELD(p_ramrod->flags,
1430		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1431		  qp->incoming_rdma_write_en);
1432
1433	SET_FIELD(p_ramrod->flags,
1434		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1435		  qp->incoming_atomic_en);
1436
1437	SET_FIELD(p_ramrod->flags,
1438		  ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1439		  qp->e2e_flow_control_en);
1440
1441	SET_FIELD(p_ramrod->flags,
1442		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1443		  GET_FIELD(modify_flags,
1444			    QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1445
1446	SET_FIELD(p_ramrod->flags,
1447		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1448		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1449
1450	SET_FIELD(p_ramrod->flags,
1451		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1452		  GET_FIELD(modify_flags,
1453			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1454
1455	SET_FIELD(p_ramrod->flags,
1456		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1457		  GET_FIELD(modify_flags,
1458			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1459
1460	SET_FIELD(p_ramrod->flags,
1461		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1462		  GET_FIELD(modify_flags,
1463			    QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1464
 
 
 
1465	p_ramrod->fields = 0;
1466	SET_FIELD(p_ramrod->fields,
1467		  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1468		  qp->min_rnr_nak_timer);
1469
1470	p_ramrod->max_ird = qp->max_rd_atomic_resp;
1471	p_ramrod->traffic_class = qp->traffic_class_tos;
1472	p_ramrod->hop_limit = qp->hop_limit_ttl;
1473	p_ramrod->p_key = cpu_to_le16(qp->pkey);
1474	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1475	p_ramrod->mtu = cpu_to_le16(qp->mtu);
1476	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1477	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1478
1479	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1480	return rc;
1481}
1482
1483static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1484					struct qed_rdma_qp *qp,
1485					bool move_to_sqd,
1486					bool move_to_err, u32 modify_flags)
1487{
1488	struct roce_modify_qp_req_ramrod_data *p_ramrod;
1489	struct qed_sp_init_data init_data;
1490	struct qed_spq_entry *p_ent;
 
1491	int rc;
1492
 
 
 
1493	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1494
1495	if (move_to_err && !(qp->req_offloaded))
1496		return 0;
1497
1498	/* Get SPQ entry */
1499	memset(&init_data, 0, sizeof(init_data));
1500	init_data.cid = qp->icid + 1;
1501	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1502	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1503
1504	rc = qed_sp_init_request(p_hwfn, &p_ent,
1505				 ROCE_EVENT_MODIFY_QP,
1506				 PROTOCOLID_ROCE, &init_data);
1507	if (rc) {
1508		DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1509		return rc;
1510	}
1511
1512	p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1513
1514	p_ramrod->flags = 0;
1515
1516	SET_FIELD(p_ramrod->flags,
1517		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1518
1519	SET_FIELD(p_ramrod->flags,
1520		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1521
1522	SET_FIELD(p_ramrod->flags,
1523		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1524		  qp->sqd_async);
1525
1526	SET_FIELD(p_ramrod->flags,
1527		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1528		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1529
1530	SET_FIELD(p_ramrod->flags,
1531		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1532		  GET_FIELD(modify_flags,
1533			    QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1534
1535	SET_FIELD(p_ramrod->flags,
1536		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1537		  GET_FIELD(modify_flags,
1538			    QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1539
1540	SET_FIELD(p_ramrod->flags,
1541		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1542		  GET_FIELD(modify_flags,
1543			    QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1544
1545	SET_FIELD(p_ramrod->flags,
1546		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1547		  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1548
1549	SET_FIELD(p_ramrod->flags,
1550		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1551		  GET_FIELD(modify_flags,
1552			    QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1553
 
 
 
1554	p_ramrod->fields = 0;
1555	SET_FIELD(p_ramrod->fields,
1556		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1557
1558	SET_FIELD(p_ramrod->fields,
1559		  ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1560		  qp->rnr_retry_cnt);
1561
1562	p_ramrod->max_ord = qp->max_rd_atomic_req;
1563	p_ramrod->traffic_class = qp->traffic_class_tos;
1564	p_ramrod->hop_limit = qp->hop_limit_ttl;
1565	p_ramrod->p_key = cpu_to_le16(qp->pkey);
1566	p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1567	p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1568	p_ramrod->mtu = cpu_to_le16(qp->mtu);
1569	qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1570	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1571
1572	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1573	return rc;
1574}
1575
1576static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1577					    struct qed_rdma_qp *qp,
1578					    u32 *num_invalidated_mw)
1579{
1580	struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1581	struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1582	struct qed_sp_init_data init_data;
1583	struct qed_spq_entry *p_ent;
1584	dma_addr_t ramrod_res_phys;
1585	int rc;
1586
 
 
 
 
 
1587	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 
 
 
 
 
 
 
 
 
 
 
 
1588
1589	if (!qp->resp_offloaded)
1590		return 0;
 
1591
1592	/* Get SPQ entry */
1593	memset(&init_data, 0, sizeof(init_data));
1594	init_data.cid = qp->icid;
1595	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1596	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1597
1598	rc = qed_sp_init_request(p_hwfn, &p_ent,
1599				 ROCE_RAMROD_DESTROY_QP,
1600				 PROTOCOLID_ROCE, &init_data);
1601	if (rc)
1602		return rc;
1603
1604	p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1605
1606	p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1607	    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1608			       &ramrod_res_phys, GFP_KERNEL);
1609
1610	if (!p_ramrod_res) {
1611		rc = -ENOMEM;
1612		DP_NOTICE(p_hwfn,
1613			  "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1614			  rc);
 
1615		return rc;
1616	}
1617
1618	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1619
1620	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1621	if (rc)
1622		goto err;
1623
1624	*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
 
1625
1626	/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1627	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1628			  qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1629			  qp->irq, qp->irq_phys_addr);
1630
1631	qp->resp_offloaded = false;
1632
1633	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1634
1635err:
1636	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1637			  sizeof(struct roce_destroy_qp_resp_output_params),
1638			  p_ramrod_res, ramrod_res_phys);
1639
1640	return rc;
1641}
1642
1643static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1644					    struct qed_rdma_qp *qp,
1645					    u32 *num_bound_mw)
1646{
1647	struct roce_destroy_qp_req_output_params *p_ramrod_res;
1648	struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1649	struct qed_sp_init_data init_data;
1650	struct qed_spq_entry *p_ent;
1651	dma_addr_t ramrod_res_phys;
1652	int rc = -ENOMEM;
1653
 
 
 
1654	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1655
1656	if (!qp->req_offloaded)
1657		return 0;
1658
1659	p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1660		       dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1661					  sizeof(*p_ramrod_res),
1662					  &ramrod_res_phys, GFP_KERNEL);
1663	if (!p_ramrod_res) {
1664		DP_NOTICE(p_hwfn,
1665			  "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1666		return rc;
1667	}
1668
1669	/* Get SPQ entry */
1670	memset(&init_data, 0, sizeof(init_data));
1671	init_data.cid = qp->icid + 1;
1672	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1673	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1674
1675	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1676				 PROTOCOLID_ROCE, &init_data);
1677	if (rc)
1678		goto err;
1679
1680	p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1681	DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1682
1683	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1684	if (rc)
1685		goto err;
1686
1687	*num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1688
1689	/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1690	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1691			  qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1692			  qp->orq, qp->orq_phys_addr);
1693
1694	qp->req_offloaded = false;
1695
1696	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1697
1698err:
1699	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1700			  p_ramrod_res, ramrod_res_phys);
1701
1702	return rc;
1703}
1704
1705static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1706			     struct qed_rdma_qp *qp,
1707			     struct qed_rdma_query_qp_out_params *out_params)
1708{
1709	struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1710	struct roce_query_qp_req_output_params *p_req_ramrod_res;
1711	struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1712	struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1713	struct qed_sp_init_data init_data;
1714	dma_addr_t resp_ramrod_res_phys;
1715	dma_addr_t req_ramrod_res_phys;
1716	struct qed_spq_entry *p_ent;
1717	bool rq_err_state;
1718	bool sq_err_state;
1719	bool sq_draining;
1720	int rc = -ENOMEM;
1721
1722	if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1723		/* We can't send ramrod to the fw since this qp wasn't offloaded
1724		 * to the fw yet
1725		 */
1726		out_params->draining = false;
1727		out_params->rq_psn = qp->rq_psn;
1728		out_params->sq_psn = qp->sq_psn;
1729		out_params->state = qp->cur_state;
1730
1731		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1732		return 0;
1733	}
1734
1735	if (!(qp->resp_offloaded)) {
1736		DP_NOTICE(p_hwfn,
1737			  "The responder's qp should be offloded before requester's\n");
1738		return -EINVAL;
1739	}
1740
1741	/* Send a query responder ramrod to FW to get RQ-PSN and state */
1742	p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1743	    dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1744			       sizeof(*p_resp_ramrod_res),
1745			       &resp_ramrod_res_phys, GFP_KERNEL);
1746	if (!p_resp_ramrod_res) {
1747		DP_NOTICE(p_hwfn,
1748			  "qed query qp failed: cannot allocate memory (ramrod)\n");
1749		return rc;
1750	}
1751
1752	/* Get SPQ entry */
1753	memset(&init_data, 0, sizeof(init_data));
1754	init_data.cid = qp->icid;
1755	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1756	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1757	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1758				 PROTOCOLID_ROCE, &init_data);
1759	if (rc)
1760		goto err_resp;
1761
1762	p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1763	DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1764
1765	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1766	if (rc)
1767		goto err_resp;
1768
 
 
 
 
1769	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1770			  p_resp_ramrod_res, resp_ramrod_res_phys);
1771
1772	out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1773	rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1774				 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1775
1776	if (!(qp->req_offloaded)) {
1777		/* Don't send query qp for the requester */
1778		out_params->sq_psn = qp->sq_psn;
1779		out_params->draining = false;
1780
1781		if (rq_err_state)
1782			qp->cur_state = QED_ROCE_QP_STATE_ERR;
1783
1784		out_params->state = qp->cur_state;
1785
1786		return 0;
1787	}
1788
1789	/* Send a query requester ramrod to FW to get SQ-PSN and state */
1790	p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1791			   dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1792					      sizeof(*p_req_ramrod_res),
1793					      &req_ramrod_res_phys,
1794					      GFP_KERNEL);
1795	if (!p_req_ramrod_res) {
1796		rc = -ENOMEM;
1797		DP_NOTICE(p_hwfn,
1798			  "qed query qp failed: cannot allocate memory (ramrod)\n");
1799		return rc;
1800	}
1801
1802	/* Get SPQ entry */
1803	init_data.cid = qp->icid + 1;
1804	rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1805				 PROTOCOLID_ROCE, &init_data);
1806	if (rc)
1807		goto err_req;
1808
1809	p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1810	DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1811
1812	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1813	if (rc)
1814		goto err_req;
1815
1816	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1817			  p_req_ramrod_res, req_ramrod_res_phys);
1818
1819	out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1820	sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1821				 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1822	sq_draining =
1823		GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1824			  ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1825
 
 
 
1826	out_params->draining = false;
1827
1828	if (rq_err_state)
1829		qp->cur_state = QED_ROCE_QP_STATE_ERR;
1830	else if (sq_err_state)
1831		qp->cur_state = QED_ROCE_QP_STATE_SQE;
1832	else if (sq_draining)
1833		out_params->draining = true;
1834	out_params->state = qp->cur_state;
1835
1836	return 0;
1837
1838err_req:
1839	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1840			  p_req_ramrod_res, req_ramrod_res_phys);
1841	return rc;
1842err_resp:
1843	dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1844			  p_resp_ramrod_res, resp_ramrod_res_phys);
1845	return rc;
1846}
1847
1848static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1849{
1850	u32 num_invalidated_mw = 0;
1851	u32 num_bound_mw = 0;
1852	u32 start_cid;
1853	int rc;
1854
1855	/* Destroys the specified QP */
1856	if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
1857	    (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
1858	    (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
1859		DP_NOTICE(p_hwfn,
1860			  "QP must be in error, reset or init state before destroying it\n");
1861		return -EINVAL;
1862	}
1863
1864	rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
1865	if (rc)
1866		return rc;
 
 
1867
1868	/* Send destroy requester ramrod */
1869	rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
1870	if (rc)
1871		return rc;
1872
1873	if (num_invalidated_mw != num_bound_mw) {
1874		DP_NOTICE(p_hwfn,
1875			  "number of invalidate memory windows is different from bounded ones\n");
1876		return -EINVAL;
1877	}
1878
1879	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1880
1881	start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1882						p_hwfn->p_rdma_info->proto);
1883
1884	/* Release responder's icid */
1885	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1886			    qp->icid - start_cid);
1887
1888	/* Release requester's icid */
1889	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1890			    qp->icid + 1 - start_cid);
1891
1892	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1893
1894	return 0;
1895}
1896
1897static int qed_rdma_query_qp(void *rdma_cxt,
1898			     struct qed_rdma_qp *qp,
1899			     struct qed_rdma_query_qp_out_params *out_params)
 
1900{
1901	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1902	int rc;
1903
1904	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1905
1906	/* The following fields are filled in from qp and not FW as they can't
1907	 * be modified by FW
1908	 */
1909	out_params->mtu = qp->mtu;
1910	out_params->dest_qp = qp->dest_qp;
1911	out_params->incoming_atomic_en = qp->incoming_atomic_en;
1912	out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
1913	out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
1914	out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
1915	out_params->dgid = qp->dgid;
1916	out_params->flow_label = qp->flow_label;
1917	out_params->hop_limit_ttl = qp->hop_limit_ttl;
1918	out_params->traffic_class_tos = qp->traffic_class_tos;
1919	out_params->timeout = qp->ack_timeout;
1920	out_params->rnr_retry = qp->rnr_retry_cnt;
1921	out_params->retry_cnt = qp->retry_cnt;
1922	out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
1923	out_params->pkey_index = 0;
1924	out_params->max_rd_atomic = qp->max_rd_atomic_req;
1925	out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
1926	out_params->sqd_async = qp->sqd_async;
1927
1928	rc = qed_roce_query_qp(p_hwfn, qp, out_params);
1929
1930	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
1931	return rc;
1932}
1933
1934static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
1935{
1936	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1937	int rc = 0;
1938
1939	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1940
1941	rc = qed_roce_destroy_qp(p_hwfn, qp);
1942
1943	/* free qp params struct */
1944	kfree(qp);
1945
1946	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
1947	return rc;
1948}
1949
1950static struct qed_rdma_qp *
1951qed_rdma_create_qp(void *rdma_cxt,
1952		   struct qed_rdma_create_qp_in_params *in_params,
1953		   struct qed_rdma_create_qp_out_params *out_params)
1954{
1955	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1956	struct qed_rdma_qp *qp;
1957	u8 max_stats_queues;
1958	int rc;
1959
1960	if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
1961		DP_ERR(p_hwfn->cdev,
1962		       "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
1963		       rdma_cxt, in_params, out_params);
1964		return NULL;
1965	}
1966
1967	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1968		   "qed rdma create qp called with qp_handle = %08x%08x\n",
1969		   in_params->qp_handle_hi, in_params->qp_handle_lo);
1970
1971	/* Some sanity checks... */
1972	max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
1973	if (in_params->stats_queue >= max_stats_queues) {
1974		DP_ERR(p_hwfn->cdev,
1975		       "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
1976		       in_params->stats_queue, max_stats_queues);
1977		return NULL;
1978	}
1979
1980	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1981	if (!qp) {
1982		DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
1983		return NULL;
1984	}
1985
1986	rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
1987	qp->qpid = ((0xFF << 16) | qp->icid);
1988
1989	DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
1990
1991	if (rc) {
1992		kfree(qp);
1993		return NULL;
1994	}
1995
1996	qp->cur_state = QED_ROCE_QP_STATE_RESET;
1997	qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
1998	qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
1999	qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2000	qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2001	qp->use_srq = in_params->use_srq;
2002	qp->signal_all = in_params->signal_all;
2003	qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2004	qp->pd = in_params->pd;
2005	qp->dpi = in_params->dpi;
2006	qp->sq_cq_id = in_params->sq_cq_id;
2007	qp->sq_num_pages = in_params->sq_num_pages;
2008	qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2009	qp->rq_cq_id = in_params->rq_cq_id;
2010	qp->rq_num_pages = in_params->rq_num_pages;
2011	qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2012	qp->srq_id = in_params->srq_id;
2013	qp->req_offloaded = false;
2014	qp->resp_offloaded = false;
2015	qp->e2e_flow_control_en = qp->use_srq ? false : true;
2016	qp->stats_queue = in_params->stats_queue;
2017
2018	out_params->icid = qp->icid;
2019	out_params->qp_id = qp->qpid;
2020
2021	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2022	return qp;
2023}
2024
2025static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2026			      struct qed_rdma_qp *qp,
2027			      enum qed_roce_qp_state prev_state,
2028			      struct qed_rdma_modify_qp_in_params *params)
2029{
2030	u32 num_invalidated_mw = 0, num_bound_mw = 0;
2031	int rc = 0;
2032
2033	/* Perform additional operations according to the current state and the
2034	 * next state
2035	 */
2036	if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2037	     (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2038	    (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2039		/* Init->RTR or Reset->RTR */
2040		rc = qed_roce_sp_create_responder(p_hwfn, qp);
2041		return rc;
2042	} else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2043		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2044		/* RTR-> RTS */
2045		rc = qed_roce_sp_create_requester(p_hwfn, qp);
2046		if (rc)
2047			return rc;
2048
2049		/* Send modify responder ramrod */
2050		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2051						  params->modify_flags);
2052		return rc;
2053	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2054		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2055		/* RTS->RTS */
2056		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2057						  params->modify_flags);
2058		if (rc)
2059			return rc;
2060
2061		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2062						  params->modify_flags);
2063		return rc;
2064	} else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2065		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2066		/* RTS->SQD */
2067		rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2068						  params->modify_flags);
2069		return rc;
2070	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2071		   (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2072		/* SQD->SQD */
2073		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2074						  params->modify_flags);
2075		if (rc)
2076			return rc;
2077
2078		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2079						  params->modify_flags);
2080		return rc;
2081	} else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2082		   (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2083		/* SQD->RTS */
2084		rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2085						  params->modify_flags);
2086		if (rc)
2087			return rc;
2088
2089		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2090						  params->modify_flags);
2091
2092		return rc;
2093	} else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
2094		   qp->cur_state == QED_ROCE_QP_STATE_SQE) {
2095		/* ->ERR */
2096		rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2097						  params->modify_flags);
2098		if (rc)
2099			return rc;
2100
2101		rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2102						  params->modify_flags);
2103		return rc;
2104	} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2105		/* Any state -> RESET */
 
 
 
 
 
 
2106
2107		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2108						      &num_invalidated_mw);
2109		if (rc)
2110			return rc;
2111
2112		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2113						      &num_bound_mw);
2114
2115		if (num_invalidated_mw != num_bound_mw) {
2116			DP_NOTICE(p_hwfn,
2117				  "number of invalidate memory windows is different from bounded ones\n");
2118			return -EINVAL;
2119		}
2120	} else {
2121		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2122	}
2123
2124	return rc;
2125}
2126
2127static int qed_rdma_modify_qp(void *rdma_cxt,
2128			      struct qed_rdma_qp *qp,
2129			      struct qed_rdma_modify_qp_in_params *params)
2130{
2131	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2132	enum qed_roce_qp_state prev_state;
2133	int rc = 0;
2134
2135	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2136		   qp->icid, params->new_state);
 
 
 
 
 
 
2137
2138	if (rc) {
2139		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2140		return rc;
2141	}
2142
2143	if (GET_FIELD(params->modify_flags,
2144		      QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2145		qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2146		qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2147		qp->incoming_atomic_en = params->incoming_atomic_en;
2148	}
2149
2150	/* Update QP structure with the updated values */
2151	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2152		qp->roce_mode = params->roce_mode;
2153	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2154		qp->pkey = params->pkey;
2155	if (GET_FIELD(params->modify_flags,
2156		      QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2157		qp->e2e_flow_control_en = params->e2e_flow_control_en;
2158	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2159		qp->dest_qp = params->dest_qp;
2160	if (GET_FIELD(params->modify_flags,
2161		      QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2162		/* Indicates that the following parameters have changed:
2163		 * Traffic class, flow label, hop limit, source GID,
2164		 * destination GID, loopback indicator
2165		 */
2166		qp->traffic_class_tos = params->traffic_class_tos;
2167		qp->flow_label = params->flow_label;
2168		qp->hop_limit_ttl = params->hop_limit_ttl;
2169
2170		qp->sgid = params->sgid;
2171		qp->dgid = params->dgid;
2172		qp->udp_src_port = 0;
2173		qp->vlan_id = params->vlan_id;
2174		qp->mtu = params->mtu;
2175		qp->lb_indication = params->lb_indication;
2176		memcpy((u8 *)&qp->remote_mac_addr[0],
2177		       (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2178		if (params->use_local_mac) {
2179			memcpy((u8 *)&qp->local_mac_addr[0],
2180			       (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2181		} else {
2182			memcpy((u8 *)&qp->local_mac_addr[0],
2183			       (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2184		}
2185	}
2186	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2187		qp->rq_psn = params->rq_psn;
2188	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2189		qp->sq_psn = params->sq_psn;
2190	if (GET_FIELD(params->modify_flags,
2191		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2192		qp->max_rd_atomic_req = params->max_rd_atomic_req;
2193	if (GET_FIELD(params->modify_flags,
2194		      QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2195		qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2196	if (GET_FIELD(params->modify_flags,
2197		      QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2198		qp->ack_timeout = params->ack_timeout;
2199	if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2200		qp->retry_cnt = params->retry_cnt;
2201	if (GET_FIELD(params->modify_flags,
2202		      QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2203		qp->rnr_retry_cnt = params->rnr_retry_cnt;
2204	if (GET_FIELD(params->modify_flags,
2205		      QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2206		qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2207
2208	qp->sqd_async = params->sqd_async;
2209
2210	prev_state = qp->cur_state;
2211	if (GET_FIELD(params->modify_flags,
2212		      QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2213		qp->cur_state = params->new_state;
2214		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2215			   qp->cur_state);
2216	}
2217
2218	rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2219
2220	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2221	return rc;
2222}
2223
2224static int
2225qed_rdma_register_tid(void *rdma_cxt,
2226		      struct qed_rdma_register_tid_in_params *params)
2227{
2228	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2229	struct rdma_register_tid_ramrod_data *p_ramrod;
2230	struct qed_sp_init_data init_data;
2231	struct qed_spq_entry *p_ent;
2232	enum rdma_tid_type tid_type;
2233	u8 fw_return_code;
2234	int rc;
2235
2236	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2237
2238	/* Get SPQ entry */
2239	memset(&init_data, 0, sizeof(init_data));
2240	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2241	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2242
2243	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2244				 p_hwfn->p_rdma_info->proto, &init_data);
2245	if (rc) {
2246		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2247		return rc;
2248	}
2249
2250	if (p_hwfn->p_rdma_info->last_tid < params->itid)
2251		p_hwfn->p_rdma_info->last_tid = params->itid;
2252
2253	p_ramrod = &p_ent->ramrod.rdma_register_tid;
2254
2255	p_ramrod->flags = 0;
2256	SET_FIELD(p_ramrod->flags,
2257		  RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2258		  params->pbl_two_level);
2259
2260	SET_FIELD(p_ramrod->flags,
2261		  RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2262
2263	SET_FIELD(p_ramrod->flags,
2264		  RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2265
2266	/* Don't initialize D/C field, as it may override other bits. */
2267	if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2268		SET_FIELD(p_ramrod->flags,
2269			  RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2270			  params->page_size_log - 12);
2271
2272	SET_FIELD(p_ramrod->flags,
2273		  RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
2274		  p_hwfn->p_rdma_info->last_tid);
2275
2276	SET_FIELD(p_ramrod->flags,
2277		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2278		  params->remote_read);
2279
2280	SET_FIELD(p_ramrod->flags,
2281		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2282		  params->remote_write);
2283
2284	SET_FIELD(p_ramrod->flags,
2285		  RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2286		  params->remote_atomic);
2287
2288	SET_FIELD(p_ramrod->flags,
2289		  RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2290		  params->local_write);
2291
2292	SET_FIELD(p_ramrod->flags,
2293		  RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2294
2295	SET_FIELD(p_ramrod->flags,
2296		  RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2297		  params->mw_bind);
2298
2299	SET_FIELD(p_ramrod->flags1,
2300		  RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2301		  params->pbl_page_size_log - 12);
2302
2303	SET_FIELD(p_ramrod->flags2,
2304		  RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2305
2306	switch (params->tid_type) {
2307	case QED_RDMA_TID_REGISTERED_MR:
2308		tid_type = RDMA_TID_REGISTERED_MR;
2309		break;
2310	case QED_RDMA_TID_FMR:
2311		tid_type = RDMA_TID_FMR;
2312		break;
2313	case QED_RDMA_TID_MW_TYPE1:
2314		tid_type = RDMA_TID_MW_TYPE1;
2315		break;
2316	case QED_RDMA_TID_MW_TYPE2A:
2317		tid_type = RDMA_TID_MW_TYPE2A;
2318		break;
2319	default:
2320		rc = -EINVAL;
2321		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2322		return rc;
2323	}
2324	SET_FIELD(p_ramrod->flags1,
2325		  RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2326
2327	p_ramrod->itid = cpu_to_le32(params->itid);
2328	p_ramrod->key = params->key;
2329	p_ramrod->pd = cpu_to_le16(params->pd);
2330	p_ramrod->length_hi = (u8)(params->length >> 32);
2331	p_ramrod->length_lo = DMA_LO_LE(params->length);
2332	if (params->zbva) {
2333		/* Lower 32 bits of the registered MR address.
2334		 * In case of zero based MR, will hold FBO
2335		 */
2336		p_ramrod->va.hi = 0;
2337		p_ramrod->va.lo = cpu_to_le32(params->fbo);
2338	} else {
2339		DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2340	}
2341	DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2342
2343	/* DIF */
2344	if (params->dif_enabled) {
2345		SET_FIELD(p_ramrod->flags2,
2346			  RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2347		DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2348			       params->dif_error_addr);
2349		DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2350	}
2351
2352	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2353
2354	if (fw_return_code != RDMA_RETURN_OK) {
2355		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2356		return -EINVAL;
2357	}
2358
2359	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2360	return rc;
2361}
2362
2363static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2364{
2365	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2366	struct rdma_deregister_tid_ramrod_data *p_ramrod;
2367	struct qed_sp_init_data init_data;
2368	struct qed_spq_entry *p_ent;
2369	struct qed_ptt *p_ptt;
2370	u8 fw_return_code;
2371	int rc;
2372
2373	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2374
2375	/* Get SPQ entry */
2376	memset(&init_data, 0, sizeof(init_data));
2377	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2378	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2379
2380	rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2381				 p_hwfn->p_rdma_info->proto, &init_data);
2382	if (rc) {
2383		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2384		return rc;
2385	}
2386
2387	p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2388	p_ramrod->itid = cpu_to_le32(itid);
2389
2390	rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2391	if (rc) {
2392		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2393		return rc;
2394	}
2395
2396	if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2397		DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2398		return -EINVAL;
2399	} else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2400		/* Bit indicating that the TID is in use and a nig drain is
2401		 * required before sending the ramrod again
2402		 */
2403		p_ptt = qed_ptt_acquire(p_hwfn);
2404		if (!p_ptt) {
2405			rc = -EBUSY;
2406			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2407				   "Failed to acquire PTT\n");
2408			return rc;
2409		}
2410
2411		rc = qed_mcp_drain(p_hwfn, p_ptt);
2412		if (rc) {
2413			qed_ptt_release(p_hwfn, p_ptt);
2414			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2415				   "Drain failed\n");
2416			return rc;
2417		}
2418
2419		qed_ptt_release(p_hwfn, p_ptt);
2420
2421		/* Resend the ramrod */
2422		rc = qed_sp_init_request(p_hwfn, &p_ent,
2423					 RDMA_RAMROD_DEREGISTER_MR,
2424					 p_hwfn->p_rdma_info->proto,
2425					 &init_data);
2426		if (rc) {
2427			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2428				   "Failed to init sp-element\n");
2429			return rc;
2430		}
2431
2432		rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2433		if (rc) {
2434			DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2435				   "Ramrod failed\n");
2436			return rc;
2437		}
2438
2439		if (fw_return_code != RDMA_RETURN_OK) {
2440			DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2441				  fw_return_code);
2442			return rc;
2443		}
2444	}
2445
2446	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2447	return rc;
2448}
2449
2450static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2451{
2452	return QED_LEADING_HWFN(cdev);
2453}
2454
2455static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2456{
2457	u32 val;
2458
2459	val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2460
2461	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2462	DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2463		   "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2464		   val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2465}
2466
2467void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2468{
2469	p_hwfn->db_bar_no_edpm = true;
2470
2471	qed_rdma_dpm_conf(p_hwfn, p_ptt);
2472}
2473
2474static int qed_rdma_start(void *rdma_cxt,
2475			  struct qed_rdma_start_in_params *params)
2476{
2477	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2478	struct qed_ptt *p_ptt;
2479	int rc = -EBUSY;
2480
2481	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2482		   "desired_cnq = %08x\n", params->desired_cnq);
2483
2484	p_ptt = qed_ptt_acquire(p_hwfn);
2485	if (!p_ptt)
2486		goto err;
2487
2488	rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2489	if (rc)
2490		goto err1;
2491
2492	rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2493	if (rc)
2494		goto err2;
2495
2496	qed_ptt_release(p_hwfn, p_ptt);
2497
2498	return rc;
2499
2500err2:
2501	qed_rdma_free(p_hwfn);
2502err1:
2503	qed_ptt_release(p_hwfn, p_ptt);
2504err:
2505	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2506	return rc;
2507}
2508
2509static int qed_rdma_init(struct qed_dev *cdev,
2510			 struct qed_rdma_start_in_params *params)
2511{
2512	return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2513}
2514
2515static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2516{
2517	struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2518
2519	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2520
2521	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2522	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2523	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2524}
2525
2526void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2527				     u8 connection_handle,
2528				     void *cookie,
2529				     dma_addr_t first_frag_addr,
2530				     bool b_last_fragment, bool b_last_packet)
2531{
2532	struct qed_roce_ll2_packet *packet = cookie;
2533	struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2534
2535	roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
2536}
2537
2538void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2539				    u8 connection_handle,
2540				    void *cookie,
2541				    dma_addr_t first_frag_addr,
2542				    bool b_last_fragment, bool b_last_packet)
2543{
2544	qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
2545					cookie, first_frag_addr,
2546					b_last_fragment, b_last_packet);
2547}
2548
2549void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
2550				     u8 connection_handle,
2551				     void *cookie,
2552				     dma_addr_t rx_buf_addr,
2553				     u16 data_length,
2554				     u8 data_length_error,
2555				     u16 parse_flags,
2556				     u16 vlan,
2557				     u32 src_mac_addr_hi,
2558				     u16 src_mac_addr_lo, bool b_last_packet)
2559{
2560	struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2561	struct qed_roce_ll2_rx_params params;
2562	struct qed_dev *cdev = p_hwfn->cdev;
2563	struct qed_roce_ll2_packet pkt;
2564
2565	DP_VERBOSE(cdev,
2566		   QED_MSG_LL2,
2567		   "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
2568		   (void *)(uintptr_t)rx_buf_addr,
2569		   data_length, data_length_error);
2570
2571	memset(&pkt, 0, sizeof(pkt));
2572	pkt.n_seg = 1;
2573	pkt.payload[0].baddr = rx_buf_addr;
2574	pkt.payload[0].len = data_length;
2575
2576	memset(&params, 0, sizeof(params));
2577	params.vlan_id = vlan;
2578	*((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
2579	*((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
2580
2581	if (data_length_error) {
2582		DP_ERR(cdev,
2583		       "roce ll2 rx complete: data length error %d, length=%d\n",
2584		       data_length_error, data_length);
2585		params.rc = -EINVAL;
2586	}
2587
2588	roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
2589}
2590
2591static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2592				       u8 *old_mac_address,
2593				       u8 *new_mac_address)
2594{
2595	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2596	struct qed_ptt *p_ptt;
2597	int rc = 0;
2598
2599	if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
2600		DP_ERR(cdev,
2601		       "qed roce mac filter failed - roce_info/ll2 NULL\n");
2602		return -EINVAL;
2603	}
2604
2605	p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2606	if (!p_ptt) {
2607		DP_ERR(cdev,
2608		       "qed roce ll2 mac filter set: failed to acquire PTT\n");
2609		return -EINVAL;
2610	}
2611
2612	mutex_lock(&hwfn->ll2->lock);
2613	if (old_mac_address)
2614		qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2615					  old_mac_address);
2616	if (new_mac_address)
2617		rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2618					    new_mac_address);
2619	mutex_unlock(&hwfn->ll2->lock);
2620
2621	qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2622
2623	if (rc)
2624		DP_ERR(cdev,
2625		       "qed roce ll2 mac filter set: failed to add mac filter\n");
2626
2627	return rc;
2628}
2629
2630static int qed_roce_ll2_start(struct qed_dev *cdev,
2631			      struct qed_roce_ll2_params *params)
2632{
2633	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2634	struct qed_roce_ll2_info *roce_ll2;
2635	struct qed_ll2_conn ll2_params;
2636	int rc;
2637
2638	if (!params) {
2639		DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
2640		return -EINVAL;
2641	}
2642	if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
2643		DP_ERR(cdev,
2644		       "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
2645		       params->cbs.tx_cb, params->cbs.rx_cb);
2646		return -EINVAL;
2647	}
2648	if (!is_valid_ether_addr(params->mac_address)) {
2649		DP_ERR(cdev,
2650		       "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
2651		       params->mac_address);
2652		return -EINVAL;
2653	}
2654
2655	/* Initialize */
2656	roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
2657	if (!roce_ll2) {
2658		DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
2659		return -ENOMEM;
2660	}
2661	roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2662	roce_ll2->cbs = params->cbs;
2663	roce_ll2->cb_cookie = params->cb_cookie;
2664	mutex_init(&roce_ll2->lock);
2665
2666	memset(&ll2_params, 0, sizeof(ll2_params));
2667	ll2_params.conn_type = QED_LL2_TYPE_ROCE;
2668	ll2_params.mtu = params->mtu;
2669	ll2_params.rx_drop_ttl0_flg = true;
2670	ll2_params.rx_vlan_removal_en = false;
2671	ll2_params.tx_dest = CORE_TX_DEST_NW;
2672	ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
2673	ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
2674	ll2_params.gsi_enable = true;
2675
2676	rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
2677					params->max_rx_buffers,
2678					params->max_tx_buffers,
2679					&roce_ll2->handle);
2680	if (rc) {
2681		DP_ERR(cdev,
2682		       "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
2683		       rc);
2684		goto err;
2685	}
2686
2687	rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2688					  roce_ll2->handle);
2689	if (rc) {
2690		DP_ERR(cdev,
2691		       "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
2692		       rc);
2693		goto err1;
2694	}
2695
2696	hwfn->ll2 = roce_ll2;
2697
2698	rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
2699	if (rc) {
2700		hwfn->ll2 = NULL;
2701		goto err2;
2702	}
2703	ether_addr_copy(roce_ll2->mac_address, params->mac_address);
2704
2705	return 0;
2706
2707err2:
2708	qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2709err1:
2710	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2711err:
2712	kfree(roce_ll2);
2713	return rc;
2714}
2715
2716static int qed_roce_ll2_stop(struct qed_dev *cdev)
2717{
2718	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2719	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2720	int rc;
2721
2722	if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
2723		DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
2724		return -EINVAL;
2725	}
2726
2727	/* remove LL2 MAC address filter */
2728	rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
2729	eth_zero_addr(roce_ll2->mac_address);
2730
2731	rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2732					  roce_ll2->handle);
2733	if (rc)
2734		DP_ERR(cdev,
2735		       "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
2736		       rc);
2737
2738	qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2739
2740	roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2741
2742	kfree(roce_ll2);
2743
2744	return rc;
2745}
2746
2747static int qed_roce_ll2_tx(struct qed_dev *cdev,
2748			   struct qed_roce_ll2_packet *pkt,
2749			   struct qed_roce_ll2_tx_params *params)
2750{
2751	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2752	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2753	enum qed_ll2_roce_flavor_type qed_roce_flavor;
2754	u8 flags = 0;
2755	int rc;
2756	int i;
2757
2758	if (!pkt || !params) {
2759		DP_ERR(cdev,
2760		       "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2761		       cdev, pkt, params);
2762		return -EINVAL;
2763	}
2764
2765	qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
2766						      : QED_LL2_RROCE;
2767
2768	if (pkt->roce_mode == ROCE_V2_IPV4)
2769		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2770
2771	/* Tx header */
2772	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
2773				       1 + pkt->n_seg, 0, flags, 0,
2774				       QED_LL2_TX_DEST_NW,
2775				       qed_roce_flavor, pkt->header.baddr,
2776				       pkt->header.len, pkt, 1);
2777	if (rc) {
2778		DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
2779		return QED_ROCE_TX_HEAD_FAILURE;
2780	}
2781
2782	/* Tx payload */
2783	for (i = 0; i < pkt->n_seg; i++) {
2784		rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2785						       roce_ll2->handle,
2786						       pkt->payload[i].baddr,
2787						       pkt->payload[i].len);
2788		if (rc) {
2789			/* If failed not much to do here, partial packet has
2790			 * been posted * we can't free memory, will need to wait
2791			 * for completion
2792			 */
2793			DP_ERR(cdev,
2794			       "roce ll2 tx: payload failed (rc=%d)\n", rc);
2795			return QED_ROCE_TX_FRAG_FAILURE;
2796		}
2797	}
2798
2799	return 0;
2800}
2801
2802static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
2803				       struct qed_roce_ll2_buffer *buf,
2804				       u64 cookie, u8 notify_fw)
2805{
2806	return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2807				      QED_LEADING_HWFN(cdev)->ll2->handle,
2808				      buf->baddr, buf->len,
2809				      (void *)(uintptr_t)cookie, notify_fw);
2810}
2811
2812static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2813{
2814	struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2815	struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2816
2817	return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2818				 roce_ll2->handle, stats);
2819}
2820
2821static const struct qed_rdma_ops qed_rdma_ops_pass = {
2822	.common = &qed_common_ops_pass,
2823	.fill_dev_info = &qed_fill_rdma_dev_info,
2824	.rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2825	.rdma_init = &qed_rdma_init,
2826	.rdma_add_user = &qed_rdma_add_user,
2827	.rdma_remove_user = &qed_rdma_remove_user,
2828	.rdma_stop = &qed_rdma_stop,
2829	.rdma_query_port = &qed_rdma_query_port,
2830	.rdma_query_device = &qed_rdma_query_device,
2831	.rdma_get_start_sb = &qed_rdma_get_sb_start,
2832	.rdma_get_rdma_int = &qed_rdma_get_int,
2833	.rdma_set_rdma_int = &qed_rdma_set_int,
2834	.rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2835	.rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2836	.rdma_alloc_pd = &qed_rdma_alloc_pd,
2837	.rdma_dealloc_pd = &qed_rdma_free_pd,
2838	.rdma_create_cq = &qed_rdma_create_cq,
2839	.rdma_destroy_cq = &qed_rdma_destroy_cq,
2840	.rdma_create_qp = &qed_rdma_create_qp,
2841	.rdma_modify_qp = &qed_rdma_modify_qp,
2842	.rdma_query_qp = &qed_rdma_query_qp,
2843	.rdma_destroy_qp = &qed_rdma_destroy_qp,
2844	.rdma_alloc_tid = &qed_rdma_alloc_tid,
2845	.rdma_free_tid = &qed_rdma_free_tid,
2846	.rdma_register_tid = &qed_rdma_register_tid,
2847	.rdma_deregister_tid = &qed_rdma_deregister_tid,
2848	.roce_ll2_start = &qed_roce_ll2_start,
2849	.roce_ll2_stop = &qed_roce_ll2_stop,
2850	.roce_ll2_tx = &qed_roce_ll2_tx,
2851	.roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
2852	.roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2853	.roce_ll2_stats = &qed_roce_ll2_stats,
2854};
2855
2856const struct qed_rdma_ops *qed_get_rdma_ops(void)
2857{
2858	return &qed_rdma_ops_pass;
2859}
2860EXPORT_SYMBOL(qed_get_rdma_ops);