Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/if_ether.h>
   8#include <linux/if_vlan.h>
   9#include <linux/ip.h>
  10#include <linux/ipv6.h>
  11#include <linux/spinlock.h>
  12#include <linux/tcp.h>
  13#include "qed_cxt.h"
  14#include "qed_hw.h"
  15#include "qed_ll2.h"
  16#include "qed_rdma.h"
  17#include "qed_reg_addr.h"
  18#include "qed_sp.h"
  19#include "qed_ooo.h"
  20
  21#define QED_IWARP_ORD_DEFAULT		32
  22#define QED_IWARP_IRD_DEFAULT		32
  23#define QED_IWARP_MAX_FW_MSS		4120
  24
  25#define QED_EP_SIG 0xecabcdef
  26
  27struct mpa_v2_hdr {
  28	__be16 ird;
  29	__be16 ord;
  30};
  31
  32#define MPA_V2_PEER2PEER_MODEL  0x8000
  33#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  34#define MPA_V2_READ_RTR         0x4000	/* on ord */
  35#define MPA_V2_WRITE_RTR        0x8000
  36#define MPA_V2_IRD_ORD_MASK     0x3FFF
  37
  38#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  39
  40#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  41
  42#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
  43#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
  44#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
  45#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
  46
  47#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  48#define TIMESTAMP_HEADER_SIZE		(12)
  49#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  50
  51#define QED_IWARP_TS_EN			BIT(0)
  52#define QED_IWARP_DA_EN			BIT(1)
  53#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  54#define QED_IWARP_PARAM_P2P		(1)
  55
  56#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  57#define QED_IWARP_DEF_CWND_FACTOR	(4)
  58#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  59#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  60#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  61
  62static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
  63				 __le16 echo, union event_ring_data *data,
 
  64				 u8 fw_return_code);
  65
  66/* Override devinfo with iWARP specific values */
  67void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  68{
  69	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  70
  71	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  72	dev->max_qp = min_t(u32,
  73			    IWARP_MAX_QPS,
  74			    p_hwfn->p_rdma_info->num_qps) -
  75		      QED_IWARP_PREALLOC_CNT;
  76
  77	dev->max_cq = dev->max_qp;
  78
  79	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
  80	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
  81}
  82
  83void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  84{
  85	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
  86	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
  87	p_hwfn->b_rdma_enabled_in_prs = true;
  88}
  89
  90/* We have two cid maps, one for tcp which should be used only from passive
  91 * syn processing and replacing a pre-allocated ep in the list. The second
  92 * for active tcp and for QPs.
  93 */
  94static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
  95{
  96	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
  97
  98	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  99
 100	if (cid < QED_IWARP_PREALLOC_CNT)
 101		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 102				    cid);
 103	else
 104		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 105
 106	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 107}
 108
 109void
 110qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 111			 struct iwarp_init_func_ramrod_data *p_ramrod)
 112{
 113	p_ramrod->iwarp.ll2_ooo_q_index =
 114	    RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
 115	    p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 116
 117	p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
 118	p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
 119	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 120
 121	return;
 122}
 123
 124static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 125{
 126	int rc;
 127
 128	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 129	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 130	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 131	if (rc) {
 132		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 133		return rc;
 134	}
 135	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 136
 137	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 138	if (rc)
 139		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 140
 141	return rc;
 142}
 143
 144static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 145{
 146	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 147
 148	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 149	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 150	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 151}
 152
 153/* This function allocates a cid for passive tcp (called from syn receive)
 154 * the reason it's separate from the regular cid allocation is because it
 155 * is assured that these cids already have ilt allocated. They are preallocated
 156 * to ensure that we won't need to allocate memory during syn processing
 157 */
 158static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 159{
 160	int rc;
 161
 162	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 163
 164	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 165				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 166
 167	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 168
 169	if (rc) {
 170		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 171			   "can't allocate iwarp tcp cid max-count=%d\n",
 172			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 173
 174		*cid = QED_IWARP_INVALID_TCP_CID;
 175		return rc;
 176	}
 177
 178	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 179					    p_hwfn->p_rdma_info->proto);
 180	return 0;
 181}
 182
 183int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 184			struct qed_rdma_qp *qp,
 185			struct qed_rdma_create_qp_out_params *out_params)
 186{
 187	struct iwarp_create_qp_ramrod_data *p_ramrod;
 188	struct qed_sp_init_data init_data;
 189	struct qed_spq_entry *p_ent;
 190	u16 physical_queue;
 191	u32 cid;
 192	int rc;
 193
 194	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 195					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 196					      &qp->shared_queue_phys_addr,
 197					      GFP_KERNEL);
 198	if (!qp->shared_queue)
 199		return -ENOMEM;
 200
 201	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 202	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 203	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 204	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 205	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 206	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 207	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 208	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 209
 210	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 211	if (rc)
 212		goto err1;
 213
 214	qp->icid = (u16)cid;
 215
 216	memset(&init_data, 0, sizeof(init_data));
 217	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 218	init_data.cid = qp->icid;
 219	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 220
 221	rc = qed_sp_init_request(p_hwfn, &p_ent,
 222				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 223				 PROTOCOLID_IWARP, &init_data);
 224	if (rc)
 225		goto err2;
 226
 227	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 228
 229	SET_FIELD(p_ramrod->flags,
 230		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 231		  qp->fmr_and_reserved_lkey);
 232
 233	SET_FIELD(p_ramrod->flags,
 234		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 235
 236	SET_FIELD(p_ramrod->flags,
 237		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 238		  qp->incoming_rdma_read_en);
 239
 240	SET_FIELD(p_ramrod->flags,
 241		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 242		  qp->incoming_rdma_write_en);
 243
 244	SET_FIELD(p_ramrod->flags,
 245		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 246		  qp->incoming_atomic_en);
 247
 248	SET_FIELD(p_ramrod->flags,
 249		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 250
 251	p_ramrod->pd = cpu_to_le16(qp->pd);
 252	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
 253	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
 254
 255	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 256	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 257	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
 258	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
 259
 260	p_ramrod->cq_cid_for_sq =
 261	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 262	p_ramrod->cq_cid_for_rq =
 263	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 264
 265	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 266
 267	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 268	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 269	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 270	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 271
 272	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 273	if (rc)
 274		goto err2;
 275
 276	return rc;
 277
 278err2:
 279	qed_iwarp_cid_cleaned(p_hwfn, cid);
 280err1:
 281	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 282			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 283			  qp->shared_queue, qp->shared_queue_phys_addr);
 284
 285	return rc;
 286}
 287
 288static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 289{
 290	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 291	struct qed_sp_init_data init_data;
 292	struct qed_spq_entry *p_ent;
 293	u16 flags, trans_to_state;
 294	int rc;
 295
 296	/* Get SPQ entry */
 297	memset(&init_data, 0, sizeof(init_data));
 298	init_data.cid = qp->icid;
 299	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 300	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 301
 302	rc = qed_sp_init_request(p_hwfn, &p_ent,
 303				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 304				 p_hwfn->p_rdma_info->proto, &init_data);
 305	if (rc)
 306		return rc;
 307
 308	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 309
 310	flags = le16_to_cpu(p_ramrod->flags);
 311	SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
 312	p_ramrod->flags = cpu_to_le16(flags);
 313
 314	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 315		trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 316	else
 317		trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 318
 319	p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
 320
 321	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 322
 323	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 324
 325	return rc;
 326}
 327
 328enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 329{
 330	switch (state) {
 331	case QED_ROCE_QP_STATE_RESET:
 332	case QED_ROCE_QP_STATE_INIT:
 333	case QED_ROCE_QP_STATE_RTR:
 334		return QED_IWARP_QP_STATE_IDLE;
 335	case QED_ROCE_QP_STATE_RTS:
 336		return QED_IWARP_QP_STATE_RTS;
 337	case QED_ROCE_QP_STATE_SQD:
 338		return QED_IWARP_QP_STATE_CLOSING;
 339	case QED_ROCE_QP_STATE_ERR:
 340		return QED_IWARP_QP_STATE_ERROR;
 341	case QED_ROCE_QP_STATE_SQE:
 342		return QED_IWARP_QP_STATE_TERMINATE;
 343	default:
 344		return QED_IWARP_QP_STATE_ERROR;
 345	}
 346}
 347
 348static enum qed_roce_qp_state
 349qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 350{
 351	switch (state) {
 352	case QED_IWARP_QP_STATE_IDLE:
 353		return QED_ROCE_QP_STATE_INIT;
 354	case QED_IWARP_QP_STATE_RTS:
 355		return QED_ROCE_QP_STATE_RTS;
 356	case QED_IWARP_QP_STATE_TERMINATE:
 357		return QED_ROCE_QP_STATE_SQE;
 358	case QED_IWARP_QP_STATE_CLOSING:
 359		return QED_ROCE_QP_STATE_SQD;
 360	case QED_IWARP_QP_STATE_ERROR:
 361		return QED_ROCE_QP_STATE_ERR;
 362	default:
 363		return QED_ROCE_QP_STATE_ERR;
 364	}
 365}
 366
 367static const char * const iwarp_state_names[] = {
 368	"IDLE",
 369	"RTS",
 370	"TERMINATE",
 371	"CLOSING",
 372	"ERROR",
 373};
 374
 375int
 376qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 377		    struct qed_rdma_qp *qp,
 378		    enum qed_iwarp_qp_state new_state, bool internal)
 379{
 380	enum qed_iwarp_qp_state prev_iw_state;
 381	bool modify_fw = false;
 382	int rc = 0;
 383
 384	/* modify QP can be called from upper-layer or as a result of async
 385	 * RST/FIN... therefore need to protect
 386	 */
 387	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 388	prev_iw_state = qp->iwarp_state;
 389
 390	if (prev_iw_state == new_state) {
 391		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 392		return 0;
 393	}
 394
 395	switch (prev_iw_state) {
 396	case QED_IWARP_QP_STATE_IDLE:
 397		switch (new_state) {
 398		case QED_IWARP_QP_STATE_RTS:
 399			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 400			break;
 401		case QED_IWARP_QP_STATE_ERROR:
 402			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 403			if (!internal)
 404				modify_fw = true;
 405			break;
 406		default:
 407			break;
 408		}
 409		break;
 410	case QED_IWARP_QP_STATE_RTS:
 411		switch (new_state) {
 412		case QED_IWARP_QP_STATE_CLOSING:
 413			if (!internal)
 414				modify_fw = true;
 415
 416			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 417			break;
 418		case QED_IWARP_QP_STATE_ERROR:
 419			if (!internal)
 420				modify_fw = true;
 421			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 422			break;
 423		default:
 424			break;
 425		}
 426		break;
 427	case QED_IWARP_QP_STATE_ERROR:
 428		switch (new_state) {
 429		case QED_IWARP_QP_STATE_IDLE:
 430
 431			qp->iwarp_state = new_state;
 432			break;
 433		case QED_IWARP_QP_STATE_CLOSING:
 434			/* could happen due to race... do nothing.... */
 435			break;
 436		default:
 437			rc = -EINVAL;
 438		}
 439		break;
 440	case QED_IWARP_QP_STATE_TERMINATE:
 441	case QED_IWARP_QP_STATE_CLOSING:
 442		qp->iwarp_state = new_state;
 443		break;
 444	default:
 445		break;
 446	}
 447
 448	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 449		   qp->icid,
 450		   iwarp_state_names[prev_iw_state],
 451		   iwarp_state_names[qp->iwarp_state],
 452		   internal ? "internal" : "");
 453
 454	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 455
 456	if (modify_fw)
 457		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 458
 459	return rc;
 460}
 461
 462int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 463{
 464	struct qed_sp_init_data init_data;
 465	struct qed_spq_entry *p_ent;
 466	int rc;
 467
 468	/* Get SPQ entry */
 469	memset(&init_data, 0, sizeof(init_data));
 470	init_data.cid = qp->icid;
 471	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 472	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 473
 474	rc = qed_sp_init_request(p_hwfn, &p_ent,
 475				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 476				 p_hwfn->p_rdma_info->proto, &init_data);
 477	if (rc)
 478		return rc;
 479
 480	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 481
 482	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 483
 484	return rc;
 485}
 486
 487static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 488				 struct qed_iwarp_ep *ep,
 489				 bool remove_from_active_list)
 490{
 491	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 492			  sizeof(*ep->ep_buffer_virt),
 493			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 494
 495	if (remove_from_active_list) {
 496		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 497		list_del(&ep->list_entry);
 498		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 499	}
 500
 501	if (ep->qp)
 502		ep->qp->ep = NULL;
 503
 504	kfree(ep);
 505}
 506
 507int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 508{
 509	struct qed_iwarp_ep *ep = qp->ep;
 510	int wait_count = 0;
 511	int rc = 0;
 512
 513	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 514		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 515					 QED_IWARP_QP_STATE_ERROR, false);
 516		if (rc)
 517			return rc;
 518	}
 519
 520	/* Make sure ep is closed before returning and freeing memory. */
 521	if (ep) {
 522		while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
 523		       wait_count++ < 200)
 524			msleep(100);
 525
 526		if (ep->state != QED_IWARP_EP_CLOSED)
 527			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 528				  ep->state);
 529
 530		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 531	}
 532
 533	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 534
 535	if (qp->shared_queue)
 536		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 537				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 538				  qp->shared_queue, qp->shared_queue_phys_addr);
 539
 540	return rc;
 541}
 542
 543static int
 544qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 545{
 546	struct qed_iwarp_ep *ep;
 547	int rc;
 548
 549	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 550	if (!ep)
 551		return -ENOMEM;
 552
 553	ep->state = QED_IWARP_EP_INIT;
 554
 555	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 556						sizeof(*ep->ep_buffer_virt),
 557						&ep->ep_buffer_phys,
 558						GFP_KERNEL);
 559	if (!ep->ep_buffer_virt) {
 560		rc = -ENOMEM;
 561		goto err;
 562	}
 563
 564	ep->sig = QED_EP_SIG;
 565
 566	*ep_out = ep;
 567
 568	return 0;
 569
 570err:
 571	kfree(ep);
 572	return rc;
 573}
 574
 575static void
 576qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 577			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 578{
 579	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 580		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 581		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 582		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 583		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 584		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 585		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 586
 587	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 588		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 589			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 590			   p_tcp_ramrod->tcp.local_ip,
 591			   p_tcp_ramrod->tcp.local_port,
 592			   p_tcp_ramrod->tcp.remote_ip,
 593			   p_tcp_ramrod->tcp.remote_port,
 594			   p_tcp_ramrod->tcp.vlan_id);
 595	} else {
 596		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 597			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 598			   p_tcp_ramrod->tcp.local_ip,
 599			   p_tcp_ramrod->tcp.local_port,
 600			   p_tcp_ramrod->tcp.remote_ip,
 601			   p_tcp_ramrod->tcp.remote_port,
 602			   p_tcp_ramrod->tcp.vlan_id);
 603	}
 604
 605	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 606		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 607		   p_tcp_ramrod->tcp.flow_label,
 608		   p_tcp_ramrod->tcp.ttl,
 609		   p_tcp_ramrod->tcp.tos_or_tc,
 610		   p_tcp_ramrod->tcp.mss,
 611		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 612		   p_tcp_ramrod->tcp.connect_mode,
 613		   p_tcp_ramrod->tcp.flags);
 614
 615	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 616		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 617		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 618		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 619}
 620
 621static int
 622qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 623{
 624	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 625	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 626	struct tcp_offload_params_opt2 *tcp;
 627	struct qed_sp_init_data init_data;
 628	struct qed_spq_entry *p_ent;
 629	dma_addr_t async_output_phys;
 630	dma_addr_t in_pdata_phys;
 631	u16 physical_q;
 632	u16 flags = 0;
 633	u8 tcp_flags;
 634	int rc;
 635	int i;
 636
 637	memset(&init_data, 0, sizeof(init_data));
 638	init_data.cid = ep->tcp_cid;
 639	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 640	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 641		init_data.comp_mode = QED_SPQ_MODE_CB;
 642	else
 643		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 644
 645	rc = qed_sp_init_request(p_hwfn, &p_ent,
 646				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 647				 PROTOCOLID_IWARP, &init_data);
 648	if (rc)
 649		return rc;
 650
 651	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 652
 653	in_pdata_phys = ep->ep_buffer_phys +
 654			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 655	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 656		       in_pdata_phys);
 657
 658	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 659	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 660
 661	async_output_phys = ep->ep_buffer_phys +
 662			    offsetof(struct qed_iwarp_ep_memory, async_output);
 663	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 664		       async_output_phys);
 665
 666	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 667	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 668
 669	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 670	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 671	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 672	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 673	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 674
 675	tcp = &p_tcp_ramrod->tcp;
 676	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 677			    &tcp->remote_mac_addr_mid,
 678			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 679	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 680			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 681
 682	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 683
 684	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 685
 686	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 687		  !!(tcp_flags & QED_IWARP_TS_EN));
 688
 689	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 690		  !!(tcp_flags & QED_IWARP_DA_EN));
 691
 692	tcp->flags = cpu_to_le16(flags);
 693	tcp->ip_version = ep->cm_info.ip_version;
 694
 695	for (i = 0; i < 4; i++) {
 696		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 697		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 698	}
 699
 700	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 701	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 702	tcp->mss = cpu_to_le16(ep->mss);
 703	tcp->flow_label = 0;
 704	tcp->ttl = 0x40;
 705	tcp->tos_or_tc = 0;
 706
 707	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 708	tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
 709	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 710	tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
 711	tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
 712
 713	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 714	tcp->connect_mode = ep->connect_mode;
 715
 716	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 717		tcp->syn_ip_payload_length =
 718			cpu_to_le16(ep->syn_ip_payload_length);
 719		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 720		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 721	}
 722
 723	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 724
 725	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 726
 727	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 728		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 729
 730	return rc;
 731}
 732
 733static void
 734qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 735{
 736	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 737	struct qed_iwarp_cm_event_params params;
 738	struct mpa_v2_hdr *mpa_v2;
 739	union async_output *async_data;
 740	u16 mpa_ord, mpa_ird;
 741	u8 mpa_hdr_size = 0;
 742	u16 ulp_data_len;
 743	u8 mpa_rev;
 744
 745	async_data = &ep->ep_buffer_virt->async_output;
 746
 747	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 748	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 749		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 750		   async_data->mpa_request.ulp_data_len,
 751		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 752
 753	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 754		/* Read ord/ird values from private data buffer */
 755		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 756		mpa_hdr_size = sizeof(*mpa_v2);
 757
 758		mpa_ord = ntohs(mpa_v2->ord);
 759		mpa_ird = ntohs(mpa_v2->ird);
 760
 761		/* Temprary store in cm_info incoming ord/ird requested, later
 762		 * replace with negotiated value during accept
 763		 */
 764		ep->cm_info.ord = (u8)min_t(u16,
 765					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 766					    QED_IWARP_ORD_DEFAULT);
 767
 768		ep->cm_info.ird = (u8)min_t(u16,
 769					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 770					    QED_IWARP_IRD_DEFAULT);
 771
 772		/* Peer2Peer negotiation */
 773		ep->rtr_type = MPA_RTR_TYPE_NONE;
 774		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 775			if (mpa_ord & MPA_V2_WRITE_RTR)
 776				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 777
 778			if (mpa_ord & MPA_V2_READ_RTR)
 779				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 780
 781			if (mpa_ird & MPA_V2_SEND_RTR)
 782				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 783
 784			ep->rtr_type &= iwarp_info->rtr_type;
 785
 786			/* if we're left with no match send our capabilities */
 787			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 788				ep->rtr_type = iwarp_info->rtr_type;
 789		}
 790
 791		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 792	} else {
 793		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 794		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 795		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 796	}
 797
 798	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 799		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 800		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 801		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 802
 803	/* Strip mpa v2 hdr from private data before sending to upper layer */
 804	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 805
 806	ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
 807	ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
 808
 809	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 810	params.cm_info = &ep->cm_info;
 811	params.ep_context = ep;
 812	params.status = 0;
 813
 814	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 815	ep->event_cb(ep->cb_context, &params);
 816}
 817
 818static int
 819qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 820{
 821	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 822	struct mpa_outgoing_params *common;
 823	struct qed_iwarp_info *iwarp_info;
 824	struct qed_sp_init_data init_data;
 825	dma_addr_t async_output_phys;
 826	struct qed_spq_entry *p_ent;
 827	dma_addr_t out_pdata_phys;
 828	dma_addr_t in_pdata_phys;
 829	struct qed_rdma_qp *qp;
 830	bool reject;
 831	u32 val;
 832	int rc;
 833
 834	if (!ep)
 835		return -EINVAL;
 836
 837	qp = ep->qp;
 838	reject = !qp;
 839
 840	memset(&init_data, 0, sizeof(init_data));
 841	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 842	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 843
 844	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 845		init_data.comp_mode = QED_SPQ_MODE_CB;
 846	else
 847		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 848
 849	rc = qed_sp_init_request(p_hwfn, &p_ent,
 850				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 851				 PROTOCOLID_IWARP, &init_data);
 852	if (rc)
 853		return rc;
 854
 855	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 856	common = &p_mpa_ramrod->common;
 857
 858	out_pdata_phys = ep->ep_buffer_phys +
 859			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 860	DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
 
 
 
 
 861
 862	val = ep->cm_info.private_data_len;
 863	common->outgoing_ulp_buffer.len = cpu_to_le16(val);
 864	common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 865
 866	common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
 867	common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
 868
 869	val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 870	p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
 871
 872	in_pdata_phys = ep->ep_buffer_phys +
 873			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 874	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 875	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 876		       in_pdata_phys);
 877	p_mpa_ramrod->incoming_ulp_buffer.len =
 878	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 879	async_output_phys = ep->ep_buffer_phys +
 880			    offsetof(struct qed_iwarp_ep_memory, async_output);
 881	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 882		       async_output_phys);
 883	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 884	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 885
 886	if (!reject) {
 887		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 888			       qp->shared_queue_phys_addr);
 889		p_mpa_ramrod->stats_counter_id =
 890		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 891	} else {
 892		common->reject = 1;
 893	}
 894
 895	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 896	p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
 897	p_mpa_ramrod->mode = ep->mpa_rev;
 898	SET_FIELD(p_mpa_ramrod->rtr_pref,
 899		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 900
 901	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 902	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 903	if (!reject)
 904		ep->cid = qp->icid;	/* Now they're migrated. */
 905
 906	DP_VERBOSE(p_hwfn,
 907		   QED_MSG_RDMA,
 908		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 909		   reject ? 0xffff : qp->icid,
 910		   ep->tcp_cid,
 911		   rc,
 912		   ep->cm_info.ird,
 913		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 914	return rc;
 915}
 916
 917static void
 918qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 919{
 920	ep->state = QED_IWARP_EP_INIT;
 921	if (ep->qp)
 922		ep->qp->ep = NULL;
 923	ep->qp = NULL;
 924	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 925
 926	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 927		/* We don't care about the return code, it's ok if tcp_cid
 928		 * remains invalid...in this case we'll defer allocation
 929		 */
 930		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 931	}
 932	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 933
 934	list_move_tail(&ep->list_entry,
 935		       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 936
 937	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 938}
 939
 940static void
 941qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 942{
 943	struct mpa_v2_hdr *mpa_v2_params;
 944	union async_output *async_data;
 945	u16 mpa_ird, mpa_ord;
 946	u8 mpa_data_size = 0;
 947	u16 ulp_data_len;
 948
 949	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 950		mpa_v2_params =
 951			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 952		mpa_data_size = sizeof(*mpa_v2_params);
 953		mpa_ird = ntohs(mpa_v2_params->ird);
 954		mpa_ord = ntohs(mpa_v2_params->ord);
 955
 956		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 957		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 958	}
 959
 960	async_data = &ep->ep_buffer_virt->async_output;
 961	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 962
 963	ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
 964	ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
 
 965}
 966
 967static void
 968qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 969{
 970	struct qed_iwarp_cm_event_params params;
 971
 972	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 973		DP_NOTICE(p_hwfn,
 974			  "MPA reply event not expected on passive side!\n");
 975		return;
 976	}
 977
 978	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 979
 980	qed_iwarp_parse_private_data(p_hwfn, ep);
 981
 982	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 983		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 984		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 985
 986	params.cm_info = &ep->cm_info;
 987	params.ep_context = ep;
 988	params.status = 0;
 989
 990	ep->mpa_reply_processed = true;
 991
 992	ep->event_cb(ep->cb_context, &params);
 993}
 994
 995#define QED_IWARP_CONNECT_MODE_STRING(ep) \
 996	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 997
 998/* Called as a result of the event:
 999 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1000 */
1001static void
1002qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1003		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1004{
1005	struct qed_iwarp_cm_event_params params;
1006
1007	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1008		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1009	else
1010		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1011
1012	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1013		qed_iwarp_parse_private_data(p_hwfn, ep);
1014
1015	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1016		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1017		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1018
1019	params.cm_info = &ep->cm_info;
1020
1021	params.ep_context = ep;
1022
1023	switch (fw_return_code) {
1024	case RDMA_RETURN_OK:
1025		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1026		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1027		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1028		ep->state = QED_IWARP_EP_ESTABLISHED;
1029		params.status = 0;
1030		break;
1031	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1032		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1033			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1034		params.status = -EBUSY;
1035		break;
1036	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1037		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1038			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1039		params.status = -ECONNREFUSED;
1040		break;
1041	case IWARP_CONN_ERROR_MPA_RST:
1042		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1043			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1044			  ep->tcp_cid);
1045		params.status = -ECONNRESET;
1046		break;
1047	case IWARP_CONN_ERROR_MPA_FIN:
1048		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1049			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1050		params.status = -ECONNREFUSED;
1051		break;
1052	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1053		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1054			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1055		params.status = -ECONNREFUSED;
1056		break;
1057	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1058		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1059			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1060		params.status = -ECONNREFUSED;
1061		break;
1062	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1063		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1064			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1065		params.status = -ECONNREFUSED;
1066		break;
1067	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1068		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1069			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1070		params.status = -ECONNREFUSED;
1071		break;
1072	case IWARP_CONN_ERROR_MPA_TERMINATE:
1073		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1074			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1075		params.status = -ECONNREFUSED;
1076		break;
1077	default:
1078		params.status = -ECONNRESET;
1079		break;
1080	}
1081
1082	if (fw_return_code != RDMA_RETURN_OK)
1083		/* paired with READ_ONCE in destroy_qp */
1084		smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1085
1086	ep->event_cb(ep->cb_context, &params);
1087
1088	/* on passive side, if there is no associated QP (REJECT) we need to
1089	 * return the ep to the pool, (in the regular case we add an element
1090	 * in accept instead of this one.
1091	 * In both cases we need to remove it from the ep_list.
1092	 */
1093	if (fw_return_code != RDMA_RETURN_OK) {
1094		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1095		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1096		    (!ep->qp)) {	/* Rejected */
1097			qed_iwarp_return_ep(p_hwfn, ep);
1098		} else {
1099			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1100			list_del(&ep->list_entry);
1101			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1102		}
1103	}
1104}
1105
1106static void
1107qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1108			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1109{
1110	struct mpa_v2_hdr *mpa_v2_params;
1111	u16 mpa_ird, mpa_ord;
1112
1113	*mpa_data_size = 0;
1114	if (MPA_REV2(ep->mpa_rev)) {
1115		mpa_v2_params =
1116		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1117		*mpa_data_size = sizeof(*mpa_v2_params);
1118
1119		mpa_ird = (u16)ep->cm_info.ird;
1120		mpa_ord = (u16)ep->cm_info.ord;
1121
1122		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1123			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1124
1125			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1126				mpa_ird |= MPA_V2_SEND_RTR;
1127
1128			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1129				mpa_ord |= MPA_V2_WRITE_RTR;
1130
1131			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1132				mpa_ord |= MPA_V2_READ_RTR;
1133		}
1134
1135		mpa_v2_params->ird = htons(mpa_ird);
1136		mpa_v2_params->ord = htons(mpa_ord);
1137
1138		DP_VERBOSE(p_hwfn,
1139			   QED_MSG_RDMA,
1140			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1141			   mpa_v2_params->ird,
1142			   mpa_v2_params->ord,
1143			   *((u32 *)mpa_v2_params),
1144			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1145			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1146			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1147			   !!(mpa_ird & MPA_V2_SEND_RTR),
1148			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1149			   !!(mpa_ord & MPA_V2_READ_RTR));
1150	}
1151}
1152
1153int qed_iwarp_connect(void *rdma_cxt,
1154		      struct qed_iwarp_connect_in *iparams,
1155		      struct qed_iwarp_connect_out *oparams)
1156{
1157	struct qed_hwfn *p_hwfn = rdma_cxt;
1158	struct qed_iwarp_info *iwarp_info;
1159	struct qed_iwarp_ep *ep;
1160	u8 mpa_data_size = 0;
1161	u32 cid;
1162	int rc;
1163
1164	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1166		DP_NOTICE(p_hwfn,
1167			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168			  iparams->qp->icid, iparams->cm_info.ord,
1169			  iparams->cm_info.ird);
1170
1171		return -EINVAL;
1172	}
1173
1174	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1175
1176	/* Allocate ep object */
1177	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1178	if (rc)
1179		return rc;
1180
1181	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1182	if (rc)
1183		goto err;
1184
1185	ep->tcp_cid = cid;
1186
1187	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1190
1191	ep->qp = iparams->qp;
1192	ep->qp->ep = ep;
1193	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1196
1197	ep->cm_info.ord = iparams->cm_info.ord;
1198	ep->cm_info.ird = iparams->cm_info.ird;
1199
1200	ep->rtr_type = iwarp_info->rtr_type;
1201	if (!iwarp_info->peer2peer)
1202		ep->rtr_type = MPA_RTR_TYPE_NONE;
1203
1204	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205		ep->cm_info.ord = 1;
1206
1207	ep->mpa_rev = iwarp_info->mpa_rev;
1208
1209	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1210
1211	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213				       mpa_data_size;
1214
1215	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216	       iparams->cm_info.private_data,
1217	       iparams->cm_info.private_data_len);
1218
1219	ep->mss = iparams->mss;
1220	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1221
1222	ep->event_cb = iparams->event_cb;
1223	ep->cb_context = iparams->cb_context;
1224	ep->connect_mode = TCP_CONNECT_ACTIVE;
1225
1226	oparams->ep_context = ep;
1227
1228	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1229
1230	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1231		   iparams->qp->icid, ep->tcp_cid, rc);
1232
1233	if (rc) {
1234		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1235		goto err;
1236	}
1237
1238	return rc;
1239err:
1240	qed_iwarp_cid_cleaned(p_hwfn, cid);
1241
1242	return rc;
1243}
1244
1245static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1246{
1247	struct qed_iwarp_ep *ep = NULL;
1248	int rc;
1249
1250	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1251
1252	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1253		DP_ERR(p_hwfn, "Ep list is empty\n");
1254		goto out;
1255	}
1256
1257	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1258			      struct qed_iwarp_ep, list_entry);
1259
1260	/* in some cases we could have failed allocating a tcp cid when added
1261	 * from accept / failure... retry now..this is not the common case.
1262	 */
1263	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1264		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1265
1266		/* if we fail we could look for another entry with a valid
1267		 * tcp_cid, but since we don't expect to reach this anyway
1268		 * it's not worth the handling
1269		 */
1270		if (rc) {
1271			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1272			ep = NULL;
1273			goto out;
1274		}
1275	}
1276
1277	list_del(&ep->list_entry);
1278
1279out:
1280	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1281	return ep;
1282}
1283
1284#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1285#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1286
1287/* This function waits for all the bits of a bmap to be cleared, as long as
1288 * there is progress ( i.e. the number of bits left to be cleared decreases )
1289 * the function continues.
1290 */
1291static int
1292qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1293{
1294	int prev_weight = 0;
1295	int wait_count = 0;
1296	int weight = 0;
1297
1298	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1299	prev_weight = weight;
1300
1301	while (weight) {
1302		/* If the HW device is during recovery, all resources are
1303		 * immediately reset without receiving a per-cid indication
1304		 * from HW. In this case we don't expect the cid_map to be
1305		 * cleared.
1306		 */
1307		if (p_hwfn->cdev->recov_in_prog)
1308			return 0;
1309
1310		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1311
1312		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1313
1314		if (prev_weight == weight) {
1315			wait_count++;
1316		} else {
1317			prev_weight = weight;
1318			wait_count = 0;
1319		}
1320
1321		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1322			DP_NOTICE(p_hwfn,
1323				  "%s bitmap wait timed out (%d cids pending)\n",
1324				  bmap->name, weight);
1325			return -EBUSY;
1326		}
1327	}
1328	return 0;
1329}
1330
1331static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1332{
1333	int rc;
1334	int i;
1335
1336	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1337					    &p_hwfn->p_rdma_info->tcp_cid_map);
1338	if (rc)
1339		return rc;
1340
1341	/* Now free the tcp cids from the main cid map */
1342	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1343		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1344
1345	/* Now wait for all cids to be completed */
1346	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1347					      &p_hwfn->p_rdma_info->cid_map);
1348}
1349
1350static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1351{
1352	struct qed_iwarp_ep *ep;
1353
1354	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1355		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356
1357		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1358				      struct qed_iwarp_ep, list_entry);
1359
1360		if (!ep) {
1361			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362			break;
1363		}
1364		list_del(&ep->list_entry);
1365
1366		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1367
1368		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1369			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1370
1371		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1372	}
1373}
1374
1375static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1376{
1377	struct qed_iwarp_ep *ep;
1378	int rc = 0;
1379	int count;
1380	u32 cid;
1381	int i;
1382
1383	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1384	for (i = 0; i < count; i++) {
1385		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1386		if (rc)
1387			return rc;
1388
1389		/* During initialization we allocate from the main pool,
1390		 * afterwards we allocate only from the tcp_cid.
1391		 */
1392		if (init) {
1393			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394			if (rc)
1395				goto err;
1396			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1397		} else {
1398			/* We don't care about the return code, it's ok if
1399			 * tcp_cid remains invalid...in this case we'll
1400			 * defer allocation
1401			 */
1402			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1403		}
1404
1405		ep->tcp_cid = cid;
1406
1407		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408		list_add_tail(&ep->list_entry,
1409			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1410		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1411	}
1412
1413	return rc;
1414
1415err:
1416	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1417
1418	return rc;
1419}
1420
1421int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1422{
1423	int rc;
1424
1425	/* Allocate bitmap for tcp cid. These are used by passive side
1426	 * to ensure it can allocate a tcp cid during dpc that was
1427	 * pre-acquired and doesn't require dynamic allocation of ilt
1428	 */
1429	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1430				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1431	if (rc) {
1432		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1433			   "Failed to allocate tcp cid, rc = %d\n", rc);
1434		return rc;
1435	}
1436
1437	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1438	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1439
1440	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1441	if (rc)
1442		return rc;
1443
1444	return qed_ooo_alloc(p_hwfn);
1445}
1446
1447void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1448{
1449	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1450
1451	qed_ooo_free(p_hwfn);
1452	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1453	kfree(iwarp_info->mpa_bufs);
1454	kfree(iwarp_info->partial_fpdus);
1455	kfree(iwarp_info->mpa_intermediate_buf);
1456}
1457
1458int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1459{
1460	struct qed_hwfn *p_hwfn = rdma_cxt;
1461	struct qed_iwarp_ep *ep;
1462	u8 mpa_data_size = 0;
1463	int rc;
1464
1465	ep = iparams->ep_context;
1466	if (!ep) {
1467		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1468		return -EINVAL;
1469	}
1470
1471	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1472		   iparams->qp->icid, ep->tcp_cid);
1473
1474	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1475	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476		DP_VERBOSE(p_hwfn,
1477			   QED_MSG_RDMA,
1478			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1479			   iparams->qp->icid,
1480			   ep->tcp_cid, iparams->ord, iparams->ord);
1481		return -EINVAL;
1482	}
1483
1484	qed_iwarp_prealloc_ep(p_hwfn, false);
1485
1486	ep->cb_context = iparams->cb_context;
1487	ep->qp = iparams->qp;
1488	ep->qp->ep = ep;
1489
1490	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1491		/* Negotiate ord/ird: if upperlayer requested ord larger than
1492		 * ird advertised by remote, we need to decrease our ord
1493		 */
1494		if (iparams->ord > ep->cm_info.ird)
1495			iparams->ord = ep->cm_info.ird;
1496
1497		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1498		    (iparams->ird == 0))
1499			iparams->ird = 1;
1500	}
1501
1502	/* Update cm_info ord/ird to be negotiated values */
1503	ep->cm_info.ord = iparams->ord;
1504	ep->cm_info.ird = iparams->ird;
1505
1506	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1507
1508	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1509	ep->cm_info.private_data_len = iparams->private_data_len +
1510				       mpa_data_size;
1511
1512	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1513	       iparams->private_data, iparams->private_data_len);
1514
1515	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1516	if (rc)
1517		qed_iwarp_modify_qp(p_hwfn,
1518				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1519
1520	return rc;
1521}
1522
1523int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1524{
1525	struct qed_hwfn *p_hwfn = rdma_cxt;
1526	struct qed_iwarp_ep *ep;
1527	u8 mpa_data_size = 0;
1528
1529	ep = iparams->ep_context;
1530	if (!ep) {
1531		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1532		return -EINVAL;
1533	}
1534
1535	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1536
1537	ep->cb_context = iparams->cb_context;
1538	ep->qp = NULL;
1539
1540	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1541
1542	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1543	ep->cm_info.private_data_len = iparams->private_data_len +
1544				       mpa_data_size;
1545
1546	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1547	       iparams->private_data, iparams->private_data_len);
1548
1549	return qed_iwarp_mpa_offload(p_hwfn, ep);
1550}
1551
1552static void
1553qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1554			struct qed_iwarp_cm_info *cm_info)
1555{
1556	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1557		   cm_info->ip_version);
1558
1559	if (cm_info->ip_version == QED_TCP_IPV4)
1560		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1561			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1562			   cm_info->remote_ip, cm_info->remote_port,
1563			   cm_info->local_ip, cm_info->local_port,
1564			   cm_info->vlan);
1565	else
1566		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1567			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1568			   cm_info->remote_ip, cm_info->remote_port,
1569			   cm_info->local_ip, cm_info->local_port,
1570			   cm_info->vlan);
1571
1572	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1573		   "private_data_len = %x ord = %d, ird = %d\n",
1574		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1575}
1576
1577static int
1578qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1579		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1580{
1581	int rc;
1582
1583	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1584				    (u16)buf->buff_size, buf, 1);
1585	if (rc) {
1586		DP_NOTICE(p_hwfn,
1587			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1588			  rc, handle);
1589		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1590				  buf->data, buf->data_phys_addr);
1591		kfree(buf);
1592	}
1593
1594	return rc;
1595}
1596
1597static bool
1598qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1599{
1600	struct qed_iwarp_ep *ep = NULL;
1601	bool found = false;
1602
1603	list_for_each_entry(ep,
1604			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1605			    list_entry) {
1606		if ((ep->cm_info.local_port == cm_info->local_port) &&
1607		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1608		    (ep->cm_info.vlan == cm_info->vlan) &&
1609		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1610			    sizeof(cm_info->local_ip)) &&
1611		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1612			    sizeof(cm_info->remote_ip))) {
1613			found = true;
1614			break;
1615		}
1616	}
1617
1618	if (found) {
1619		DP_NOTICE(p_hwfn,
1620			  "SYN received on active connection - dropping\n");
1621		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1622
1623		return true;
1624	}
1625
1626	return false;
1627}
1628
1629static struct qed_iwarp_listener *
1630qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1631		       struct qed_iwarp_cm_info *cm_info)
1632{
1633	struct qed_iwarp_listener *listener = NULL;
1634	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635	bool found = false;
1636
 
 
1637	list_for_each_entry(listener,
1638			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1639			    list_entry) {
1640		if (listener->port == cm_info->local_port) {
1641			if (!memcmp(listener->ip_addr,
1642				    ip_zero, sizeof(ip_zero))) {
1643				found = true;
1644				break;
1645			}
1646
1647			if (!memcmp(listener->ip_addr,
1648				    cm_info->local_ip,
1649				    sizeof(cm_info->local_ip)) &&
1650			    (listener->vlan == cm_info->vlan)) {
1651				found = true;
1652				break;
1653			}
1654		}
1655	}
1656
1657	if (found) {
1658		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1659			   listener);
1660		return listener;
1661	}
1662
1663	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1664	return NULL;
1665}
1666
1667static int
1668qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1669		       struct qed_iwarp_cm_info *cm_info,
1670		       void *buf,
1671		       u8 *remote_mac_addr,
1672		       u8 *local_mac_addr,
1673		       int *payload_len, int *tcp_start_offset)
1674{
1675	struct vlan_ethhdr *vethh;
1676	bool vlan_valid = false;
1677	struct ipv6hdr *ip6h;
1678	struct ethhdr *ethh;
1679	struct tcphdr *tcph;
1680	struct iphdr *iph;
1681	int eth_hlen;
1682	int ip_hlen;
1683	int eth_type;
1684	int i;
1685
1686	ethh = buf;
1687	eth_type = ntohs(ethh->h_proto);
1688	if (eth_type == ETH_P_8021Q) {
1689		vlan_valid = true;
1690		vethh = (struct vlan_ethhdr *)ethh;
1691		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1692		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1693	}
1694
1695	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1696
1697	if (!ether_addr_equal(ethh->h_dest,
1698			      p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1699		DP_VERBOSE(p_hwfn,
1700			   QED_MSG_RDMA,
1701			   "Got unexpected mac %pM instead of %pM\n",
1702			   ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1703		return -EINVAL;
1704	}
1705
1706	ether_addr_copy(remote_mac_addr, ethh->h_source);
1707	ether_addr_copy(local_mac_addr, ethh->h_dest);
1708
1709	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1710		   eth_type, ethh->h_source);
1711
1712	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1713		   eth_hlen, ethh->h_dest);
1714
1715	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1716
1717	if (eth_type == ETH_P_IP) {
1718		if (iph->protocol != IPPROTO_TCP) {
1719			DP_NOTICE(p_hwfn,
1720				  "Unexpected ip protocol on ll2 %x\n",
1721				  iph->protocol);
1722			return -EINVAL;
1723		}
1724
1725		cm_info->local_ip[0] = ntohl(iph->daddr);
1726		cm_info->remote_ip[0] = ntohl(iph->saddr);
1727		cm_info->ip_version = QED_TCP_IPV4;
1728
1729		ip_hlen = (iph->ihl) * sizeof(u32);
1730		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1731	} else if (eth_type == ETH_P_IPV6) {
1732		ip6h = (struct ipv6hdr *)iph;
1733
1734		if (ip6h->nexthdr != IPPROTO_TCP) {
1735			DP_NOTICE(p_hwfn,
1736				  "Unexpected ip protocol on ll2 %x\n",
1737				  iph->protocol);
1738			return -EINVAL;
1739		}
1740
1741		for (i = 0; i < 4; i++) {
1742			cm_info->local_ip[i] =
1743			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1744			cm_info->remote_ip[i] =
1745			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1746		}
1747		cm_info->ip_version = QED_TCP_IPV6;
1748
1749		ip_hlen = sizeof(*ip6h);
1750		*payload_len = ntohs(ip6h->payload_len);
1751	} else {
1752		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1753		return -EINVAL;
1754	}
1755
1756	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1757
1758	if (!tcph->syn) {
1759		DP_NOTICE(p_hwfn,
1760			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1761			  iph->ihl, tcph->source, tcph->dest);
1762		return -EINVAL;
1763	}
1764
1765	cm_info->local_port = ntohs(tcph->dest);
1766	cm_info->remote_port = ntohs(tcph->source);
1767
1768	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1769
1770	*tcp_start_offset = eth_hlen + ip_hlen;
1771
1772	return 0;
1773}
1774
1775static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1776						      u16 cid)
1777{
1778	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1779	struct qed_iwarp_fpdu *partial_fpdu;
1780	u32 idx;
1781
1782	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1783	if (idx >= iwarp_info->max_num_partial_fpdus) {
1784		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1785		       iwarp_info->max_num_partial_fpdus);
1786		return NULL;
1787	}
1788
1789	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1790
1791	return partial_fpdu;
1792}
1793
1794enum qed_iwarp_mpa_pkt_type {
1795	QED_IWARP_MPA_PKT_PACKED,
1796	QED_IWARP_MPA_PKT_PARTIAL,
1797	QED_IWARP_MPA_PKT_UNALIGNED
1798};
1799
1800#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1801#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1802#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1803
1804/* Pad to multiple of 4 */
1805#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1806#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1807	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1808					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1809					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1810
1811/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1812#define QED_IWARP_MAX_BDS_PER_FPDU 3
1813
1814static const char * const pkt_type_str[] = {
1815	"QED_IWARP_MPA_PKT_PACKED",
1816	"QED_IWARP_MPA_PKT_PARTIAL",
1817	"QED_IWARP_MPA_PKT_UNALIGNED"
1818};
1819
1820static int
1821qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1822		      struct qed_iwarp_fpdu *fpdu,
1823		      struct qed_iwarp_ll2_buff *buf);
1824
1825static enum qed_iwarp_mpa_pkt_type
1826qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1827		       struct qed_iwarp_fpdu *fpdu,
1828		       u16 tcp_payload_len, u8 *mpa_data)
1829{
1830	enum qed_iwarp_mpa_pkt_type pkt_type;
1831	u16 mpa_len;
1832
1833	if (fpdu->incomplete_bytes) {
1834		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1835		goto out;
1836	}
1837
1838	/* special case of one byte remaining...
1839	 * lower byte will be read next packet
1840	 */
1841	if (tcp_payload_len == 1) {
1842		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1843		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1844		goto out;
1845	}
1846
1847	mpa_len = ntohs(*(__force __be16 *)mpa_data);
1848	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1849
1850	if (fpdu->fpdu_length <= tcp_payload_len)
1851		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1852	else
1853		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1854
1855out:
1856	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1857		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1858		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1859
1860	return pkt_type;
1861}
1862
1863static void
1864qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1865		    struct qed_iwarp_fpdu *fpdu,
1866		    struct unaligned_opaque_data *pkt_data,
1867		    u16 tcp_payload_size, u8 placement_offset)
1868{
1869	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1870
1871	fpdu->mpa_buf = buf;
1872	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874	fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
1875	fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
1876
1877	if (tcp_payload_size == 1)
1878		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879	else if (tcp_payload_size < fpdu->fpdu_length)
1880		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881	else
1882		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1883
1884	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885}
1886
1887static int
1888qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889		 struct qed_iwarp_fpdu *fpdu,
1890		 struct unaligned_opaque_data *pkt_data,
1891		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892{
1893	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1894	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1895	int rc;
1896
1897	/* need to copy the data from the partial packet stored in fpdu
1898	 * to the new buf, for this we also need to move the data currently
1899	 * placed on the buf. The assumption is that the buffer is big enough
1900	 * since fpdu_length <= mss, we use an intermediate buffer since
1901	 * we may need to copy the new data to an overlapping location
1902	 */
1903	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1904		DP_ERR(p_hwfn,
1905		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1906		       buf->buff_size, fpdu->mpa_frag_len,
1907		       tcp_payload_size, fpdu->incomplete_bytes);
1908		return -EINVAL;
1909	}
1910
1911	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1912		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1913		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1914		   (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
1915
1916	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917	memcpy(tmp_buf + fpdu->mpa_frag_len,
1918	       (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
1919
1920	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1921	if (rc)
1922		return rc;
1923
1924	/* If we managed to post the buffer copy the data to the new buffer
1925	 * o/w this will occur in the next round...
1926	 */
1927	memcpy((u8 *)(buf->data), tmp_buf,
1928	       fpdu->mpa_frag_len + tcp_payload_size);
1929
1930	fpdu->mpa_buf = buf;
1931	/* fpdu->pkt_hdr remains as is */
1932	/* fpdu->mpa_frag is overridden with new buf */
1933	fpdu->mpa_frag = buf->data_phys_addr;
1934	fpdu->mpa_frag_virt = buf->data;
1935	fpdu->mpa_frag_len += tcp_payload_size;
1936
1937	fpdu->incomplete_bytes -= tcp_payload_size;
1938
1939	DP_VERBOSE(p_hwfn,
1940		   QED_MSG_RDMA,
1941		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1942		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1943		   fpdu->incomplete_bytes);
1944
1945	return 0;
1946}
1947
1948static void
1949qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1950			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1951{
1952	u16 mpa_len;
1953
1954	/* Update incomplete packets if needed */
1955	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1956		/* Missing lower byte is now available */
1957		mpa_len = fpdu->fpdu_length | *mpa_data;
1958		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1959		/* one byte of hdr */
1960		fpdu->mpa_frag_len = 1;
1961		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1962		DP_VERBOSE(p_hwfn,
1963			   QED_MSG_RDMA,
1964			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1965			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1966	}
1967}
1968
1969#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1970	(GET_FIELD((_curr_pkt)->flags,	   \
1971		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1972
1973/* This function is used to recycle a buffer using the ll2 drop option. It
1974 * uses the mechanism to ensure that all buffers posted to tx before this one
1975 * were completed. The buffer sent here will be sent as a cookie in the tx
1976 * completion function and can then be reposted to rx chain when done. The flow
1977 * that requires this is the flow where a FPDU splits over more than 3 tcp
1978 * segments. In this case the driver needs to re-post a rx buffer instead of
1979 * the one received, but driver can't simply repost a buffer it copied from
1980 * as there is a case where the buffer was originally a packed FPDU, and is
1981 * partially posted to FW. Driver needs to ensure FW is done with it.
1982 */
1983static int
1984qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1985		      struct qed_iwarp_fpdu *fpdu,
1986		      struct qed_iwarp_ll2_buff *buf)
1987{
1988	struct qed_ll2_tx_pkt_info tx_pkt;
1989	u8 ll2_handle;
1990	int rc;
1991
1992	memset(&tx_pkt, 0, sizeof(tx_pkt));
1993	tx_pkt.num_of_bds = 1;
1994	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1995	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1996	tx_pkt.first_frag = fpdu->pkt_hdr;
1997	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1998	buf->piggy_buf = NULL;
1999	tx_pkt.cookie = buf;
2000
2001	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2002
2003	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2004	if (rc)
2005		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2006			   "Can't drop packet rc=%d\n", rc);
2007
2008	DP_VERBOSE(p_hwfn,
2009		   QED_MSG_RDMA,
2010		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2011		   (unsigned long int)tx_pkt.first_frag,
2012		   tx_pkt.first_frag_len, buf, rc);
2013
2014	return rc;
2015}
2016
2017static int
2018qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2019{
2020	struct qed_ll2_tx_pkt_info tx_pkt;
2021	u8 ll2_handle;
2022	int rc;
2023
2024	memset(&tx_pkt, 0, sizeof(tx_pkt));
2025	tx_pkt.num_of_bds = 1;
2026	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2027	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2028
2029	tx_pkt.first_frag = fpdu->pkt_hdr;
2030	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2031	tx_pkt.enable_ip_cksum = true;
2032	tx_pkt.enable_l4_cksum = true;
2033	tx_pkt.calc_ip_len = true;
2034	/* vlan overload with enum iwarp_ll2_tx_queues */
2035	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2036
2037	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2038
2039	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2040	if (rc)
2041		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2042			   "Can't send right edge rc=%d\n", rc);
2043	DP_VERBOSE(p_hwfn,
2044		   QED_MSG_RDMA,
2045		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2046		   tx_pkt.num_of_bds,
2047		   (unsigned long int)tx_pkt.first_frag,
2048		   tx_pkt.first_frag_len, rc);
2049
2050	return rc;
2051}
2052
2053static int
2054qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2055		    struct qed_iwarp_fpdu *fpdu,
2056		    struct unaligned_opaque_data *curr_pkt,
2057		    struct qed_iwarp_ll2_buff *buf,
2058		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2059{
2060	struct qed_ll2_tx_pkt_info tx_pkt;
2061	u16 first_mpa_offset;
2062	u8 ll2_handle;
2063	int rc;
2064
2065	memset(&tx_pkt, 0, sizeof(tx_pkt));
2066
2067	/* An unaligned packet means it's split over two tcp segments. So the
2068	 * complete packet requires 3 bds, one for the header, one for the
2069	 * part of the fpdu of the first tcp segment, and the last fragment
2070	 * will point to the remainder of the fpdu. A packed pdu, requires only
2071	 * two bds, one for the header and one for the data.
2072	 */
2073	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076
2077	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2078	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079	    tcp_payload_size <= fpdu->fpdu_length)
2080		tx_pkt.cookie = fpdu->mpa_buf;
2081
2082	tx_pkt.first_frag = fpdu->pkt_hdr;
2083	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084	tx_pkt.enable_ip_cksum = true;
2085	tx_pkt.enable_l4_cksum = true;
2086	tx_pkt.calc_ip_len = true;
2087	/* vlan overload with enum iwarp_ll2_tx_queues */
2088	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089
2090	/* special case of unaligned packet and not packed, need to send
2091	 * both buffers as cookie to release.
2092	 */
2093	if (tcp_payload_size == fpdu->incomplete_bytes)
2094		fpdu->mpa_buf->piggy_buf = buf;
2095
2096	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097
2098	/* Set first fragment to header */
2099	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100	if (rc)
2101		goto out;
2102
2103	/* Set second fragment to first part of packet */
2104	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105					       fpdu->mpa_frag,
2106					       fpdu->mpa_frag_len);
2107	if (rc)
2108		goto out;
2109
2110	if (!fpdu->incomplete_bytes)
2111		goto out;
2112
2113	first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2114
2115	/* Set third fragment to second part of the packet */
2116	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2117					       ll2_handle,
2118					       buf->data_phys_addr +
2119					       first_mpa_offset,
2120					       fpdu->incomplete_bytes);
2121out:
2122	DP_VERBOSE(p_hwfn,
2123		   QED_MSG_RDMA,
2124		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2125		   tx_pkt.num_of_bds,
2126		   tx_pkt.first_frag_len,
2127		   fpdu->mpa_frag_len,
2128		   fpdu->incomplete_bytes, rc);
2129
2130	return rc;
2131}
2132
2133static void
2134qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2135		       struct unaligned_opaque_data *curr_pkt,
2136		       u32 opaque_data0, u32 opaque_data1)
2137{
2138	u64 opaque_data;
2139
2140	opaque_data = HILO_64(cpu_to_le32(opaque_data1),
2141			      cpu_to_le32(opaque_data0));
2142	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2143
2144	le16_add_cpu(&curr_pkt->first_mpa_offset,
2145		     curr_pkt->tcp_payload_offset);
 
2146}
2147
2148/* This function is called when an unaligned or incomplete MPA packet arrives
2149 * driver needs to align the packet, perhaps using previous data and send
2150 * it down to FW once it is aligned.
2151 */
2152static int
2153qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2154			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2155{
2156	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2157	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2158	enum qed_iwarp_mpa_pkt_type pkt_type;
2159	struct qed_iwarp_fpdu *fpdu;
2160	u16 cid, first_mpa_offset;
2161	int rc = -EINVAL;
2162	u8 *mpa_data;
2163
2164	cid = le32_to_cpu(curr_pkt->cid);
2165
2166	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2167	if (!fpdu) { /* something corrupt with cid, post rx back */
2168		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2169		       cid);
2170		goto err;
2171	}
2172
2173	do {
2174		first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2175		mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
2176
2177		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2178						  mpa_buf->tcp_payload_len,
2179						  mpa_data);
2180
2181		switch (pkt_type) {
2182		case QED_IWARP_MPA_PKT_PARTIAL:
2183			qed_iwarp_init_fpdu(buf, fpdu,
2184					    curr_pkt,
2185					    mpa_buf->tcp_payload_len,
2186					    mpa_buf->placement_offset);
2187
2188			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2189				mpa_buf->tcp_payload_len = 0;
2190				break;
2191			}
2192
2193			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2194
2195			if (rc) {
2196				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2197					   "Can't send FPDU:reset rc=%d\n", rc);
2198				memset(fpdu, 0, sizeof(*fpdu));
2199				break;
2200			}
2201
2202			mpa_buf->tcp_payload_len = 0;
2203			break;
2204		case QED_IWARP_MPA_PKT_PACKED:
2205			qed_iwarp_init_fpdu(buf, fpdu,
2206					    curr_pkt,
2207					    mpa_buf->tcp_payload_len,
2208					    mpa_buf->placement_offset);
2209
2210			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2211						 mpa_buf->tcp_payload_len,
2212						 pkt_type);
2213			if (rc) {
2214				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2215					   "Can't send FPDU:reset rc=%d\n", rc);
2216				memset(fpdu, 0, sizeof(*fpdu));
2217				break;
2218			}
2219
2220			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2221			le16_add_cpu(&curr_pkt->first_mpa_offset,
2222				     fpdu->fpdu_length);
2223			break;
2224		case QED_IWARP_MPA_PKT_UNALIGNED:
2225			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2226			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2227				/* special handling of fpdu split over more
2228				 * than 2 segments
2229				 */
2230				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2231					rc = qed_iwarp_win_right_edge(p_hwfn,
2232								      fpdu);
2233					/* packet will be re-processed later */
2234					if (rc)
2235						return rc;
2236				}
2237
2238				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2239						      buf,
2240						      mpa_buf->tcp_payload_len);
2241				if (rc) /* packet will be re-processed later */
2242					return rc;
2243
2244				mpa_buf->tcp_payload_len = 0;
2245				break;
2246			}
2247
2248			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2249						 mpa_buf->tcp_payload_len,
2250						 pkt_type);
2251			if (rc) {
2252				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2253					   "Can't send FPDU:delay rc=%d\n", rc);
2254				/* don't reset fpdu -> we need it for next
2255				 * classify
2256				 */
2257				break;
2258			}
2259
2260			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2261			le16_add_cpu(&curr_pkt->first_mpa_offset,
2262				     fpdu->incomplete_bytes);
2263
2264			/* The framed PDU was sent - no more incomplete bytes */
2265			fpdu->incomplete_bytes = 0;
2266			break;
2267		}
2268	} while (mpa_buf->tcp_payload_len && !rc);
2269
2270	return rc;
2271
2272err:
2273	qed_iwarp_ll2_post_rx(p_hwfn,
2274			      buf,
2275			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2276	return rc;
2277}
2278
2279static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2280{
2281	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2282	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2283	int rc;
2284
2285	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2286		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2287					   struct qed_iwarp_ll2_mpa_buf,
2288					   list_entry);
2289
2290		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2291
2292		/* busy means break and continue processing later, don't
2293		 * remove the buf from the pending list.
2294		 */
2295		if (rc == -EBUSY)
2296			break;
2297
2298		list_move_tail(&mpa_buf->list_entry,
2299			       &iwarp_info->mpa_buf_list);
2300
2301		if (rc) {	/* different error, don't continue */
2302			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2303			break;
2304		}
2305	}
2306}
2307
2308static void
2309qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2310{
2311	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2312	struct qed_iwarp_info *iwarp_info;
2313	struct qed_hwfn *p_hwfn = cxt;
2314	u16 first_mpa_offset;
2315
2316	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2317	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2318				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2319	if (!mpa_buf) {
2320		DP_ERR(p_hwfn, "No free mpa buf\n");
2321		goto err;
2322	}
2323
2324	list_del(&mpa_buf->list_entry);
2325	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2326			       data->opaque_data_0, data->opaque_data_1);
2327
2328	first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
2329
2330	DP_VERBOSE(p_hwfn,
2331		   QED_MSG_RDMA,
2332		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2333		   data->length.packet_length, first_mpa_offset,
2334		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2335		   mpa_buf->data.cid);
2336
2337	mpa_buf->ll2_buf = data->cookie;
2338	mpa_buf->tcp_payload_len = data->length.packet_length -
2339				   first_mpa_offset;
2340
2341	first_mpa_offset += data->u.placement_offset;
2342	mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
2343	mpa_buf->placement_offset = data->u.placement_offset;
2344
2345	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2346
2347	qed_iwarp_process_pending_pkts(p_hwfn);
2348	return;
2349err:
2350	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2351			      iwarp_info->ll2_mpa_handle);
2352}
2353
2354static void
2355qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2356{
2357	struct qed_iwarp_ll2_buff *buf = data->cookie;
2358	struct qed_iwarp_listener *listener;
2359	struct qed_ll2_tx_pkt_info tx_pkt;
2360	struct qed_iwarp_cm_info cm_info;
2361	struct qed_hwfn *p_hwfn = cxt;
2362	u8 remote_mac_addr[ETH_ALEN];
2363	u8 local_mac_addr[ETH_ALEN];
2364	struct qed_iwarp_ep *ep;
2365	int tcp_start_offset;
2366	u8 ll2_syn_handle;
2367	int payload_len;
2368	u32 hdr_size;
2369	int rc;
2370
2371	memset(&cm_info, 0, sizeof(cm_info));
2372	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2373
2374	/* Check if packet was received with errors... */
2375	if (data->err_flags) {
2376		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2377			  data->err_flags);
2378		goto err;
2379	}
2380
2381	if (GET_FIELD(data->parse_flags,
2382		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2383	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2384		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2385		goto err;
2386	}
2387
2388	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2389				    data->u.placement_offset, remote_mac_addr,
2390				    local_mac_addr, &payload_len,
2391				    &tcp_start_offset);
2392	if (rc)
2393		goto err;
2394
2395	/* Check if there is a listener for this 4-tuple+vlan */
2396	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2397	if (!listener) {
2398		DP_VERBOSE(p_hwfn,
2399			   QED_MSG_RDMA,
2400			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2401			   data->parse_flags, data->length.packet_length);
2402
2403		memset(&tx_pkt, 0, sizeof(tx_pkt));
2404		tx_pkt.num_of_bds = 1;
2405		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2406		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2407		tx_pkt.first_frag = buf->data_phys_addr +
2408				    data->u.placement_offset;
2409		tx_pkt.first_frag_len = data->length.packet_length;
2410		tx_pkt.cookie = buf;
2411
2412		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2413					       &tx_pkt, true);
2414
2415		if (rc) {
2416			DP_NOTICE(p_hwfn,
2417				  "Can't post SYN back to chip rc=%d\n", rc);
2418			goto err;
2419		}
2420		return;
2421	}
2422
2423	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2424	/* There may be an open ep on this connection if this is a syn
2425	 * retrasnmit... need to make sure there isn't...
2426	 */
2427	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2428		goto err;
2429
2430	ep = qed_iwarp_get_free_ep(p_hwfn);
2431	if (!ep)
2432		goto err;
2433
2434	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2435	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2436	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2437
2438	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2439	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2440
2441	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2442
2443	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2444	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2445	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2446
2447	ep->event_cb = listener->event_cb;
2448	ep->cb_context = listener->cb_context;
2449	ep->connect_mode = TCP_CONNECT_PASSIVE;
2450
2451	ep->syn = buf;
2452	ep->syn_ip_payload_length = (u16)payload_len;
2453	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2454			   tcp_start_offset;
2455
2456	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2457	if (rc) {
2458		qed_iwarp_return_ep(p_hwfn, ep);
2459		goto err;
2460	}
2461
2462	return;
2463err:
2464	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2465}
2466
2467static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2468				     void *cookie, dma_addr_t rx_buf_addr,
2469				     bool b_last_packet)
2470{
2471	struct qed_iwarp_ll2_buff *buffer = cookie;
2472	struct qed_hwfn *p_hwfn = cxt;
2473
2474	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2475			  buffer->data, buffer->data_phys_addr);
2476	kfree(buffer);
2477}
2478
2479static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2480				      void *cookie, dma_addr_t first_frag_addr,
2481				      bool b_last_fragment, bool b_last_packet)
2482{
2483	struct qed_iwarp_ll2_buff *buffer = cookie;
2484	struct qed_iwarp_ll2_buff *piggy;
2485	struct qed_hwfn *p_hwfn = cxt;
2486
2487	if (!buffer)		/* can happen in packed mpa unaligned... */
2488		return;
2489
2490	/* this was originally an rx packet, post it back */
2491	piggy = buffer->piggy_buf;
2492	if (piggy) {
2493		buffer->piggy_buf = NULL;
2494		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2495	}
2496
2497	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2498
2499	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2500		qed_iwarp_process_pending_pkts(p_hwfn);
2501
2502	return;
2503}
2504
2505static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2506				     void *cookie, dma_addr_t first_frag_addr,
2507				     bool b_last_fragment, bool b_last_packet)
2508{
2509	struct qed_iwarp_ll2_buff *buffer = cookie;
2510	struct qed_hwfn *p_hwfn = cxt;
2511
2512	if (!buffer)
2513		return;
2514
2515	if (buffer->piggy_buf) {
2516		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2517				  buffer->piggy_buf->buff_size,
2518				  buffer->piggy_buf->data,
2519				  buffer->piggy_buf->data_phys_addr);
2520
2521		kfree(buffer->piggy_buf);
2522	}
2523
2524	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2525			  buffer->data, buffer->data_phys_addr);
2526
2527	kfree(buffer);
2528}
2529
2530/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2531 * is received, need to reset the FPDU.
2532 */
2533static void
2534qed_iwarp_ll2_slowpath(void *cxt,
2535		       u8 connection_handle,
2536		       u32 opaque_data_0, u32 opaque_data_1)
2537{
2538	struct unaligned_opaque_data unalign_data;
2539	struct qed_hwfn *p_hwfn = cxt;
2540	struct qed_iwarp_fpdu *fpdu;
2541	u32 cid;
2542
2543	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2544			       opaque_data_0, opaque_data_1);
2545
2546	cid = le32_to_cpu(unalign_data.cid);
2547
2548	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
2549
2550	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2551	if (fpdu)
2552		memset(fpdu, 0, sizeof(*fpdu));
2553}
2554
2555static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2556{
2557	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2558	int rc = 0;
2559
2560	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2561		rc = qed_ll2_terminate_connection(p_hwfn,
2562						  iwarp_info->ll2_syn_handle);
2563		if (rc)
2564			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2565
2566		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2567		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2568	}
2569
2570	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2571		rc = qed_ll2_terminate_connection(p_hwfn,
2572						  iwarp_info->ll2_ooo_handle);
2573		if (rc)
2574			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2575
2576		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2577		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2578	}
2579
2580	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2581		rc = qed_ll2_terminate_connection(p_hwfn,
2582						  iwarp_info->ll2_mpa_handle);
2583		if (rc)
2584			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2585
2586		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2587		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2588	}
2589
2590	qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2591				  p_hwfn->p_rdma_info->iwarp.mac_addr);
2592
2593	return rc;
2594}
2595
2596static int
2597qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2598			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2599{
2600	struct qed_iwarp_ll2_buff *buffer;
2601	int rc = 0;
2602	int i;
2603
2604	for (i = 0; i < num_rx_bufs; i++) {
2605		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2606		if (!buffer) {
2607			rc = -ENOMEM;
2608			break;
2609		}
2610
2611		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2612						  buff_size,
2613						  &buffer->data_phys_addr,
2614						  GFP_KERNEL);
2615		if (!buffer->data) {
2616			kfree(buffer);
2617			rc = -ENOMEM;
2618			break;
2619		}
2620
2621		buffer->buff_size = buff_size;
2622		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2623		if (rc)
2624			/* buffers will be deallocated by qed_ll2 */
2625			break;
2626	}
2627	return rc;
2628}
2629
2630#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2631	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2632		ETH_CACHE_LINE_SIZE)
2633
2634static int
2635qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2636		    struct qed_rdma_start_in_params *params,
2637		    u32 rcv_wnd_size)
2638{
2639	struct qed_iwarp_info *iwarp_info;
2640	struct qed_ll2_acquire_data data;
2641	struct qed_ll2_cbs cbs;
2642	u32 buff_size;
2643	u16 n_ooo_bufs;
2644	int rc = 0;
2645	int i;
2646
2647	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2648	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2649	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2650	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2651
2652	iwarp_info->max_mtu = params->max_mtu;
2653
2654	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2655
2656	rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2657	if (rc)
2658		return rc;
2659
2660	/* Start SYN connection */
2661	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2662	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2663	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2664	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2665	cbs.slowpath_cb = NULL;
2666	cbs.cookie = p_hwfn;
2667
2668	memset(&data, 0, sizeof(data));
2669	data.input.conn_type = QED_LL2_TYPE_IWARP;
2670	/* SYN will use ctx based queues */
2671	data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
2672	data.input.mtu = params->max_mtu;
2673	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2674	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2675	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2676	data.input.tx_tc = PKT_LB_TC;
2677	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2678	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2679	data.cbs = &cbs;
2680
2681	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2682	if (rc) {
2683		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2684		qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2685		return rc;
2686	}
2687
2688	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2689	if (rc) {
2690		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2691		goto err;
2692	}
2693
2694	buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2695	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2696					 QED_IWARP_LL2_SYN_RX_SIZE,
2697					 buff_size,
2698					 iwarp_info->ll2_syn_handle);
2699	if (rc)
2700		goto err;
2701
2702	/* Start OOO connection */
2703	data.input.conn_type = QED_LL2_TYPE_OOO;
2704	/* OOO/unaligned will use legacy ll2 queues (ram based) */
2705	data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
2706	data.input.mtu = params->max_mtu;
2707
2708	n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2709		     iwarp_info->max_mtu;
2710	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2711
2712	data.input.rx_num_desc = n_ooo_bufs;
2713	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2714
2715	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2716	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2717	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2718
2719	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2720	if (rc)
2721		goto err;
2722
2723	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2724	if (rc)
2725		goto err;
2726
2727	/* Start Unaligned MPA connection */
2728	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2729	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2730
2731	memset(&data, 0, sizeof(data));
2732	data.input.conn_type = QED_LL2_TYPE_IWARP;
2733	data.input.mtu = params->max_mtu;
2734	/* FW requires that once a packet arrives OOO, it must have at
2735	 * least 2 rx buffers available on the unaligned connection
2736	 * for handling the case that it is a partial fpdu.
2737	 */
2738	data.input.rx_num_desc = n_ooo_bufs * 2;
2739	data.input.tx_num_desc = data.input.rx_num_desc;
2740	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2741	data.input.tx_tc = PKT_LB_TC;
2742	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2743	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2744	data.input.secondary_queue = true;
2745	data.cbs = &cbs;
2746
2747	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2748	if (rc)
2749		goto err;
2750
2751	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2752	if (rc)
2753		goto err;
2754
2755	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2756					 data.input.rx_num_desc,
2757					 buff_size,
2758					 iwarp_info->ll2_mpa_handle);
2759	if (rc)
2760		goto err;
2761
2762	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2763					    sizeof(*iwarp_info->partial_fpdus),
2764					    GFP_KERNEL);
2765	if (!iwarp_info->partial_fpdus) {
2766		rc = -ENOMEM;
2767		goto err;
2768	}
2769
2770	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2771
2772	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2773	if (!iwarp_info->mpa_intermediate_buf) {
2774		rc = -ENOMEM;
2775		goto err;
2776	}
2777
2778	/* The mpa_bufs array serves for pending RX packets received on the
2779	 * mpa ll2 that don't have place on the tx ring and require later
2780	 * processing. We can't fail on allocation of such a struct therefore
2781	 * we allocate enough to take care of all rx packets
2782	 */
2783	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2784				       sizeof(*iwarp_info->mpa_bufs),
2785				       GFP_KERNEL);
2786	if (!iwarp_info->mpa_bufs) {
2787		rc = -ENOMEM;
2788		goto err;
2789	}
2790
2791	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2792	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2793	for (i = 0; i < data.input.rx_num_desc; i++)
2794		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2795			      &iwarp_info->mpa_buf_list);
2796	return rc;
2797err:
2798	qed_iwarp_ll2_stop(p_hwfn);
2799
2800	return rc;
2801}
2802
2803static struct {
2804	u32 two_ports;
2805	u32 four_ports;
2806} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2807	{QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2808	{QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2809};
2810
2811int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2812		    struct qed_rdma_start_in_params *params)
2813{
2814	struct qed_dev *cdev = p_hwfn->cdev;
2815	struct qed_iwarp_info *iwarp_info;
2816	enum chip_ids chip_id;
2817	u32 rcv_wnd_size;
2818
2819	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2820
2821	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2822
2823	chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2824	rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2825		qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2826		qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2827
2828	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2829	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2830	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2831	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2832	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2833	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2834
2835	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2836
2837	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2838				MPA_RTR_TYPE_ZERO_WRITE |
2839				MPA_RTR_TYPE_ZERO_READ;
2840
2841	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2842	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2843	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2844
2845	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2846				  qed_iwarp_async_event);
2847	qed_ooo_setup(p_hwfn);
2848
2849	return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2850}
2851
2852int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2853{
2854	int rc;
2855
2856	qed_iwarp_free_prealloc_ep(p_hwfn);
2857	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2858	if (rc)
2859		return rc;
2860
 
 
2861	return qed_iwarp_ll2_stop(p_hwfn);
2862}
2863
2864static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2865				  struct qed_iwarp_ep *ep,
2866				  u8 fw_return_code)
2867{
2868	struct qed_iwarp_cm_event_params params;
2869
2870	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2871
2872	params.event = QED_IWARP_EVENT_CLOSE;
2873	params.ep_context = ep;
2874	params.cm_info = &ep->cm_info;
2875	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2876			 0 : -ECONNRESET;
2877
2878	/* paired with READ_ONCE in destroy_qp */
2879	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2880
2881	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2882	list_del(&ep->list_entry);
2883	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2884
2885	ep->event_cb(ep->cb_context, &params);
2886}
2887
2888static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2889					 struct qed_iwarp_ep *ep,
2890					 int fw_ret_code)
2891{
2892	struct qed_iwarp_cm_event_params params;
2893	bool event_cb = false;
2894
2895	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2896		   ep->cid, fw_ret_code);
2897
2898	switch (fw_ret_code) {
2899	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2900		params.status = 0;
2901		params.event = QED_IWARP_EVENT_DISCONNECT;
2902		event_cb = true;
2903		break;
2904	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2905		params.status = -ECONNRESET;
2906		params.event = QED_IWARP_EVENT_DISCONNECT;
2907		event_cb = true;
2908		break;
2909	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2910		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2911		event_cb = true;
2912		break;
2913	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2914		params.event = QED_IWARP_EVENT_IRQ_FULL;
2915		event_cb = true;
2916		break;
2917	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2918		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2919		event_cb = true;
2920		break;
2921	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2922		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2923		event_cb = true;
2924		break;
2925	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2926		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2927		event_cb = true;
2928		break;
2929	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2930		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2931		event_cb = true;
2932		break;
2933	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2934		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2935		event_cb = true;
2936		break;
2937	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2938		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2939		event_cb = true;
2940		break;
2941	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2942		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2943		event_cb = true;
2944		break;
2945	default:
2946		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2947			   "Unhandled exception received...fw_ret_code=%d\n",
2948			   fw_ret_code);
2949		break;
2950	}
2951
2952	if (event_cb) {
2953		params.ep_context = ep;
2954		params.cm_info = &ep->cm_info;
2955		ep->event_cb(ep->cb_context, &params);
2956	}
2957}
2958
2959static void
2960qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2961				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2962{
2963	struct qed_iwarp_cm_event_params params;
2964
2965	memset(&params, 0, sizeof(params));
2966	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2967	params.ep_context = ep;
2968	params.cm_info = &ep->cm_info;
2969	/* paired with READ_ONCE in destroy_qp */
2970	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2971
2972	switch (fw_return_code) {
2973	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2974		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2975			   "%s(0x%x) TCP connect got invalid packet\n",
2976			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2977		params.status = -ECONNRESET;
2978		break;
2979	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2980		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2981			   "%s(0x%x) TCP Connection Reset\n",
2982			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2983		params.status = -ECONNRESET;
2984		break;
2985	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2986		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2987			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2988		params.status = -EBUSY;
2989		break;
2990	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2991		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2992			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2993		params.status = -ECONNREFUSED;
2994		break;
2995	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2996		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2997			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2998		params.status = -ECONNRESET;
2999		break;
3000	default:
3001		DP_ERR(p_hwfn,
3002		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
3003		       QED_IWARP_CONNECT_MODE_STRING(ep),
3004		       ep->tcp_cid, fw_return_code);
3005		params.status = -ECONNRESET;
3006		break;
3007	}
3008
3009	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3010		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
3011		qed_iwarp_return_ep(p_hwfn, ep);
3012	} else {
3013		ep->event_cb(ep->cb_context, &params);
3014		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3015		list_del(&ep->list_entry);
3016		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3017	}
3018}
3019
3020static void
3021qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
3022			   struct qed_iwarp_ep *ep, u8 fw_return_code)
3023{
3024	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3025
3026	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3027		/* Done with the SYN packet, post back to ll2 rx */
3028		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3029
3030		ep->syn = NULL;
3031
3032		/* If connect failed - upper layer doesn't know about it */
3033		if (fw_return_code == RDMA_RETURN_OK)
3034			qed_iwarp_mpa_received(p_hwfn, ep);
3035		else
3036			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3037							   fw_return_code);
3038	} else {
3039		if (fw_return_code == RDMA_RETURN_OK)
3040			qed_iwarp_mpa_offload(p_hwfn, ep);
3041		else
3042			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3043							   fw_return_code);
3044	}
3045}
3046
3047static inline bool
3048qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3049{
3050	if (!ep || (ep->sig != QED_EP_SIG)) {
3051		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3052		return false;
3053	}
3054
3055	return true;
3056}
3057
3058static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
3059				 __le16 echo, union event_ring_data *data,
 
3060				 u8 fw_return_code)
3061{
3062	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3063	struct regpair *fw_handle = &data->rdma_data.async_handle;
3064	struct qed_iwarp_ep *ep = NULL;
3065	u16 srq_offset;
3066	u16 srq_id;
3067	u16 cid;
3068
3069	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3070						       fw_handle->lo);
3071
3072	switch (fw_event_code) {
3073	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3074		/* Async completion after TCP 3-way handshake */
3075		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3076			return -EINVAL;
3077		DP_VERBOSE(p_hwfn,
3078			   QED_MSG_RDMA,
3079			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3080			   ep->tcp_cid, fw_return_code);
3081		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3082		break;
3083	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3084		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3085			return -EINVAL;
3086		DP_VERBOSE(p_hwfn,
3087			   QED_MSG_RDMA,
3088			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3089			   ep->cid, fw_return_code);
3090		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3091		break;
3092	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3093		/* Async completion for Close Connection ramrod */
3094		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3095			return -EINVAL;
3096		DP_VERBOSE(p_hwfn,
3097			   QED_MSG_RDMA,
3098			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3099			   ep->cid, fw_return_code);
3100		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3101		break;
3102	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3103		/* Async event for active side only */
3104		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3105			return -EINVAL;
3106		DP_VERBOSE(p_hwfn,
3107			   QED_MSG_RDMA,
3108			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3109			   ep->cid, fw_return_code);
3110		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3111		break;
3112	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3113		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3114			return -EINVAL;
3115		DP_VERBOSE(p_hwfn,
3116			   QED_MSG_RDMA,
3117			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3118			   ep->cid, fw_return_code);
3119		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3120		break;
3121	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3122		cid = (u16)le32_to_cpu(fw_handle->lo);
3123		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3124			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3125		qed_iwarp_cid_cleaned(p_hwfn, cid);
3126
3127		break;
3128	case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3129		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3130		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3131		/* FW assigns value that is no greater than u16 */
3132		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3133		events.affiliated_event(events.context,
3134					QED_IWARP_EVENT_SRQ_EMPTY,
3135					&srq_id);
3136		break;
3137	case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3138		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3139		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3140		/* FW assigns value that is no greater than u16 */
3141		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3142		events.affiliated_event(events.context,
3143					QED_IWARP_EVENT_SRQ_LIMIT,
3144					&srq_id);
3145		break;
3146	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3147		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3148
3149		p_hwfn->p_rdma_info->events.affiliated_event(
3150			p_hwfn->p_rdma_info->events.context,
3151			QED_IWARP_EVENT_CQ_OVERFLOW,
3152			(void *)fw_handle);
3153		break;
3154	default:
3155		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3156		       fw_event_code);
3157		return -EINVAL;
3158	}
3159	return 0;
3160}
3161
3162int
3163qed_iwarp_create_listen(void *rdma_cxt,
3164			struct qed_iwarp_listen_in *iparams,
3165			struct qed_iwarp_listen_out *oparams)
3166{
3167	struct qed_hwfn *p_hwfn = rdma_cxt;
3168	struct qed_iwarp_listener *listener;
3169
3170	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3171	if (!listener)
3172		return -ENOMEM;
3173
3174	listener->ip_version = iparams->ip_version;
3175	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3176	listener->port = iparams->port;
3177	listener->vlan = iparams->vlan;
3178
3179	listener->event_cb = iparams->event_cb;
3180	listener->cb_context = iparams->cb_context;
3181	listener->max_backlog = iparams->max_backlog;
3182	oparams->handle = listener;
3183
3184	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3185	list_add_tail(&listener->list_entry,
3186		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3187	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3188
3189	DP_VERBOSE(p_hwfn,
3190		   QED_MSG_RDMA,
3191		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3192		   listener->event_cb,
3193		   listener,
3194		   listener->ip_addr[0],
3195		   listener->ip_addr[1],
3196		   listener->ip_addr[2],
3197		   listener->ip_addr[3], listener->port, listener->vlan);
3198
3199	return 0;
3200}
3201
3202int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3203{
3204	struct qed_iwarp_listener *listener = handle;
3205	struct qed_hwfn *p_hwfn = rdma_cxt;
3206
3207	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3208
3209	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3210	list_del(&listener->list_entry);
3211	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3212
3213	kfree(listener);
3214
3215	return 0;
3216}
3217
3218int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3219{
3220	struct qed_hwfn *p_hwfn = rdma_cxt;
3221	struct qed_sp_init_data init_data;
3222	struct qed_spq_entry *p_ent;
3223	struct qed_iwarp_ep *ep;
3224	struct qed_rdma_qp *qp;
3225	int rc;
3226
3227	ep = iparams->ep_context;
3228	if (!ep) {
3229		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3230		return -EINVAL;
3231	}
3232
3233	qp = ep->qp;
3234
3235	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3236		   qp->icid, ep->tcp_cid);
3237
3238	memset(&init_data, 0, sizeof(init_data));
3239	init_data.cid = qp->icid;
3240	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3241	init_data.comp_mode = QED_SPQ_MODE_CB;
3242
3243	rc = qed_sp_init_request(p_hwfn, &p_ent,
3244				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3245				 PROTOCOLID_IWARP, &init_data);
3246
3247	if (rc)
3248		return rc;
3249
3250	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3251
3252	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3253
3254	return rc;
3255}
3256
3257void
3258qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3259		   struct qed_rdma_query_qp_out_params *out_params)
3260{
3261	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3262}
v5.4
 
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
 
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/spinlock.h>
  37#include <linux/tcp.h>
  38#include "qed_cxt.h"
  39#include "qed_hw.h"
  40#include "qed_ll2.h"
  41#include "qed_rdma.h"
  42#include "qed_reg_addr.h"
  43#include "qed_sp.h"
  44#include "qed_ooo.h"
  45
  46#define QED_IWARP_ORD_DEFAULT		32
  47#define QED_IWARP_IRD_DEFAULT		32
  48#define QED_IWARP_MAX_FW_MSS		4120
  49
  50#define QED_EP_SIG 0xecabcdef
  51
  52struct mpa_v2_hdr {
  53	__be16 ird;
  54	__be16 ord;
  55};
  56
  57#define MPA_V2_PEER2PEER_MODEL  0x8000
  58#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  59#define MPA_V2_READ_RTR         0x4000	/* on ord */
  60#define MPA_V2_WRITE_RTR        0x8000
  61#define MPA_V2_IRD_ORD_MASK     0x3FFF
  62
  63#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  64
  65#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  66
  67#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
  68#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
  69#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
  70#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
  71
  72#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  73#define TIMESTAMP_HEADER_SIZE		(12)
  74#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  75
  76#define QED_IWARP_TS_EN			BIT(0)
  77#define QED_IWARP_DA_EN			BIT(1)
  78#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  79#define QED_IWARP_PARAM_P2P		(1)
  80
  81#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  82#define QED_IWARP_DEF_CWND_FACTOR	(4)
  83#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  84#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  85#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  86
  87static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  88				 u8 fw_event_code, u16 echo,
  89				 union event_ring_data *data,
  90				 u8 fw_return_code);
  91
  92/* Override devinfo with iWARP specific values */
  93void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  94{
  95	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  96
  97	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  98	dev->max_qp = min_t(u32,
  99			    IWARP_MAX_QPS,
 100			    p_hwfn->p_rdma_info->num_qps) -
 101		      QED_IWARP_PREALLOC_CNT;
 102
 103	dev->max_cq = dev->max_qp;
 104
 105	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
 106	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
 107}
 108
 109void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 110{
 111	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
 112	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
 113	p_hwfn->b_rdma_enabled_in_prs = true;
 114}
 115
 116/* We have two cid maps, one for tcp which should be used only from passive
 117 * syn processing and replacing a pre-allocated ep in the list. The second
 118 * for active tcp and for QPs.
 119 */
 120static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 121{
 122	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 123
 124	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 125
 126	if (cid < QED_IWARP_PREALLOC_CNT)
 127		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 128				    cid);
 129	else
 130		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 131
 132	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 133}
 134
 135void
 136qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 137			 struct iwarp_init_func_ramrod_data *p_ramrod)
 138{
 139	p_ramrod->iwarp.ll2_ooo_q_index =
 140		RESC_START(p_hwfn, QED_LL2_QUEUE) +
 141		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 142
 
 
 143	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 144
 145	return;
 146}
 147
 148static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 149{
 150	int rc;
 151
 152	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 153	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 154	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 155	if (rc) {
 156		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 157		return rc;
 158	}
 159	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 160
 161	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 162	if (rc)
 163		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 164
 165	return rc;
 166}
 167
 168static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 169{
 170	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 171
 172	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 173	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 174	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 175}
 176
 177/* This function allocates a cid for passive tcp (called from syn receive)
 178 * the reason it's separate from the regular cid allocation is because it
 179 * is assured that these cids already have ilt allocated. They are preallocated
 180 * to ensure that we won't need to allocate memory during syn processing
 181 */
 182static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 183{
 184	int rc;
 185
 186	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 187
 188	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 189				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 190
 191	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 192
 193	if (rc) {
 194		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 195			   "can't allocate iwarp tcp cid max-count=%d\n",
 196			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 197
 198		*cid = QED_IWARP_INVALID_TCP_CID;
 199		return rc;
 200	}
 201
 202	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 203					    p_hwfn->p_rdma_info->proto);
 204	return 0;
 205}
 206
 207int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 208			struct qed_rdma_qp *qp,
 209			struct qed_rdma_create_qp_out_params *out_params)
 210{
 211	struct iwarp_create_qp_ramrod_data *p_ramrod;
 212	struct qed_sp_init_data init_data;
 213	struct qed_spq_entry *p_ent;
 214	u16 physical_queue;
 215	u32 cid;
 216	int rc;
 217
 218	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 219					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 220					      &qp->shared_queue_phys_addr,
 221					      GFP_KERNEL);
 222	if (!qp->shared_queue)
 223		return -ENOMEM;
 224
 225	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 226	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 227	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 228	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 229	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 230	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 231	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 232	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 233
 234	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 235	if (rc)
 236		goto err1;
 237
 238	qp->icid = (u16)cid;
 239
 240	memset(&init_data, 0, sizeof(init_data));
 241	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 242	init_data.cid = qp->icid;
 243	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 244
 245	rc = qed_sp_init_request(p_hwfn, &p_ent,
 246				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 247				 PROTOCOLID_IWARP, &init_data);
 248	if (rc)
 249		goto err2;
 250
 251	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 252
 253	SET_FIELD(p_ramrod->flags,
 254		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 255		  qp->fmr_and_reserved_lkey);
 256
 257	SET_FIELD(p_ramrod->flags,
 258		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 259
 260	SET_FIELD(p_ramrod->flags,
 261		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 262		  qp->incoming_rdma_read_en);
 263
 264	SET_FIELD(p_ramrod->flags,
 265		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 266		  qp->incoming_rdma_write_en);
 267
 268	SET_FIELD(p_ramrod->flags,
 269		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 270		  qp->incoming_atomic_en);
 271
 272	SET_FIELD(p_ramrod->flags,
 273		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 274
 275	p_ramrod->pd = qp->pd;
 276	p_ramrod->sq_num_pages = qp->sq_num_pages;
 277	p_ramrod->rq_num_pages = qp->rq_num_pages;
 278
 279	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 280	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 281	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 282	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 283
 284	p_ramrod->cq_cid_for_sq =
 285	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 286	p_ramrod->cq_cid_for_rq =
 287	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 288
 289	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 290
 291	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 292	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 293	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 294	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 295
 296	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 297	if (rc)
 298		goto err2;
 299
 300	return rc;
 301
 302err2:
 303	qed_iwarp_cid_cleaned(p_hwfn, cid);
 304err1:
 305	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 306			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 307			  qp->shared_queue, qp->shared_queue_phys_addr);
 308
 309	return rc;
 310}
 311
 312static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 313{
 314	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 315	struct qed_sp_init_data init_data;
 316	struct qed_spq_entry *p_ent;
 
 317	int rc;
 318
 319	/* Get SPQ entry */
 320	memset(&init_data, 0, sizeof(init_data));
 321	init_data.cid = qp->icid;
 322	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 323	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 324
 325	rc = qed_sp_init_request(p_hwfn, &p_ent,
 326				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 327				 p_hwfn->p_rdma_info->proto, &init_data);
 328	if (rc)
 329		return rc;
 330
 331	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 332	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
 333		  0x1);
 
 
 
 334	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 335		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 336	else
 337		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 
 
 338
 339	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 340
 341	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 342
 343	return rc;
 344}
 345
 346enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 347{
 348	switch (state) {
 349	case QED_ROCE_QP_STATE_RESET:
 350	case QED_ROCE_QP_STATE_INIT:
 351	case QED_ROCE_QP_STATE_RTR:
 352		return QED_IWARP_QP_STATE_IDLE;
 353	case QED_ROCE_QP_STATE_RTS:
 354		return QED_IWARP_QP_STATE_RTS;
 355	case QED_ROCE_QP_STATE_SQD:
 356		return QED_IWARP_QP_STATE_CLOSING;
 357	case QED_ROCE_QP_STATE_ERR:
 358		return QED_IWARP_QP_STATE_ERROR;
 359	case QED_ROCE_QP_STATE_SQE:
 360		return QED_IWARP_QP_STATE_TERMINATE;
 361	default:
 362		return QED_IWARP_QP_STATE_ERROR;
 363	}
 364}
 365
 366static enum qed_roce_qp_state
 367qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 368{
 369	switch (state) {
 370	case QED_IWARP_QP_STATE_IDLE:
 371		return QED_ROCE_QP_STATE_INIT;
 372	case QED_IWARP_QP_STATE_RTS:
 373		return QED_ROCE_QP_STATE_RTS;
 374	case QED_IWARP_QP_STATE_TERMINATE:
 375		return QED_ROCE_QP_STATE_SQE;
 376	case QED_IWARP_QP_STATE_CLOSING:
 377		return QED_ROCE_QP_STATE_SQD;
 378	case QED_IWARP_QP_STATE_ERROR:
 379		return QED_ROCE_QP_STATE_ERR;
 380	default:
 381		return QED_ROCE_QP_STATE_ERR;
 382	}
 383}
 384
 385static const char * const iwarp_state_names[] = {
 386	"IDLE",
 387	"RTS",
 388	"TERMINATE",
 389	"CLOSING",
 390	"ERROR",
 391};
 392
 393int
 394qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 395		    struct qed_rdma_qp *qp,
 396		    enum qed_iwarp_qp_state new_state, bool internal)
 397{
 398	enum qed_iwarp_qp_state prev_iw_state;
 399	bool modify_fw = false;
 400	int rc = 0;
 401
 402	/* modify QP can be called from upper-layer or as a result of async
 403	 * RST/FIN... therefore need to protect
 404	 */
 405	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 406	prev_iw_state = qp->iwarp_state;
 407
 408	if (prev_iw_state == new_state) {
 409		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 410		return 0;
 411	}
 412
 413	switch (prev_iw_state) {
 414	case QED_IWARP_QP_STATE_IDLE:
 415		switch (new_state) {
 416		case QED_IWARP_QP_STATE_RTS:
 417			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 418			break;
 419		case QED_IWARP_QP_STATE_ERROR:
 420			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 421			if (!internal)
 422				modify_fw = true;
 423			break;
 424		default:
 425			break;
 426		}
 427		break;
 428	case QED_IWARP_QP_STATE_RTS:
 429		switch (new_state) {
 430		case QED_IWARP_QP_STATE_CLOSING:
 431			if (!internal)
 432				modify_fw = true;
 433
 434			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 435			break;
 436		case QED_IWARP_QP_STATE_ERROR:
 437			if (!internal)
 438				modify_fw = true;
 439			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 440			break;
 441		default:
 442			break;
 443		}
 444		break;
 445	case QED_IWARP_QP_STATE_ERROR:
 446		switch (new_state) {
 447		case QED_IWARP_QP_STATE_IDLE:
 448
 449			qp->iwarp_state = new_state;
 450			break;
 451		case QED_IWARP_QP_STATE_CLOSING:
 452			/* could happen due to race... do nothing.... */
 453			break;
 454		default:
 455			rc = -EINVAL;
 456		}
 457		break;
 458	case QED_IWARP_QP_STATE_TERMINATE:
 459	case QED_IWARP_QP_STATE_CLOSING:
 460		qp->iwarp_state = new_state;
 461		break;
 462	default:
 463		break;
 464	}
 465
 466	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 467		   qp->icid,
 468		   iwarp_state_names[prev_iw_state],
 469		   iwarp_state_names[qp->iwarp_state],
 470		   internal ? "internal" : "");
 471
 472	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 473
 474	if (modify_fw)
 475		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 476
 477	return rc;
 478}
 479
 480int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 481{
 482	struct qed_sp_init_data init_data;
 483	struct qed_spq_entry *p_ent;
 484	int rc;
 485
 486	/* Get SPQ entry */
 487	memset(&init_data, 0, sizeof(init_data));
 488	init_data.cid = qp->icid;
 489	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 490	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 491
 492	rc = qed_sp_init_request(p_hwfn, &p_ent,
 493				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 494				 p_hwfn->p_rdma_info->proto, &init_data);
 495	if (rc)
 496		return rc;
 497
 498	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 499
 500	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 501
 502	return rc;
 503}
 504
 505static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 506				 struct qed_iwarp_ep *ep,
 507				 bool remove_from_active_list)
 508{
 509	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 510			  sizeof(*ep->ep_buffer_virt),
 511			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 512
 513	if (remove_from_active_list) {
 514		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 515		list_del(&ep->list_entry);
 516		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 517	}
 518
 519	if (ep->qp)
 520		ep->qp->ep = NULL;
 521
 522	kfree(ep);
 523}
 524
 525int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 526{
 527	struct qed_iwarp_ep *ep = qp->ep;
 528	int wait_count = 0;
 529	int rc = 0;
 530
 531	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 532		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 533					 QED_IWARP_QP_STATE_ERROR, false);
 534		if (rc)
 535			return rc;
 536	}
 537
 538	/* Make sure ep is closed before returning and freeing memory. */
 539	if (ep) {
 540		while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
 541		       wait_count++ < 200)
 542			msleep(100);
 543
 544		if (ep->state != QED_IWARP_EP_CLOSED)
 545			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 546				  ep->state);
 547
 548		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 549	}
 550
 551	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 552
 553	if (qp->shared_queue)
 554		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 555				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 556				  qp->shared_queue, qp->shared_queue_phys_addr);
 557
 558	return rc;
 559}
 560
 561static int
 562qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 563{
 564	struct qed_iwarp_ep *ep;
 565	int rc;
 566
 567	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 568	if (!ep)
 569		return -ENOMEM;
 570
 571	ep->state = QED_IWARP_EP_INIT;
 572
 573	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 574						sizeof(*ep->ep_buffer_virt),
 575						&ep->ep_buffer_phys,
 576						GFP_KERNEL);
 577	if (!ep->ep_buffer_virt) {
 578		rc = -ENOMEM;
 579		goto err;
 580	}
 581
 582	ep->sig = QED_EP_SIG;
 583
 584	*ep_out = ep;
 585
 586	return 0;
 587
 588err:
 589	kfree(ep);
 590	return rc;
 591}
 592
 593static void
 594qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 595			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 596{
 597	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 598		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 599		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 600		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 601		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 602		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 603		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 604
 605	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 606		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 607			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 608			   p_tcp_ramrod->tcp.local_ip,
 609			   p_tcp_ramrod->tcp.local_port,
 610			   p_tcp_ramrod->tcp.remote_ip,
 611			   p_tcp_ramrod->tcp.remote_port,
 612			   p_tcp_ramrod->tcp.vlan_id);
 613	} else {
 614		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 615			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 616			   p_tcp_ramrod->tcp.local_ip,
 617			   p_tcp_ramrod->tcp.local_port,
 618			   p_tcp_ramrod->tcp.remote_ip,
 619			   p_tcp_ramrod->tcp.remote_port,
 620			   p_tcp_ramrod->tcp.vlan_id);
 621	}
 622
 623	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 624		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 625		   p_tcp_ramrod->tcp.flow_label,
 626		   p_tcp_ramrod->tcp.ttl,
 627		   p_tcp_ramrod->tcp.tos_or_tc,
 628		   p_tcp_ramrod->tcp.mss,
 629		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 630		   p_tcp_ramrod->tcp.connect_mode,
 631		   p_tcp_ramrod->tcp.flags);
 632
 633	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 634		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 635		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 636		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 637}
 638
 639static int
 640qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 641{
 642	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 643	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 644	struct tcp_offload_params_opt2 *tcp;
 645	struct qed_sp_init_data init_data;
 646	struct qed_spq_entry *p_ent;
 647	dma_addr_t async_output_phys;
 648	dma_addr_t in_pdata_phys;
 649	u16 physical_q;
 
 650	u8 tcp_flags;
 651	int rc;
 652	int i;
 653
 654	memset(&init_data, 0, sizeof(init_data));
 655	init_data.cid = ep->tcp_cid;
 656	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 657	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 658		init_data.comp_mode = QED_SPQ_MODE_CB;
 659	else
 660		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 661
 662	rc = qed_sp_init_request(p_hwfn, &p_ent,
 663				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 664				 PROTOCOLID_IWARP, &init_data);
 665	if (rc)
 666		return rc;
 667
 668	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 669
 670	in_pdata_phys = ep->ep_buffer_phys +
 671			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 672	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 673		       in_pdata_phys);
 674
 675	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 676	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 677
 678	async_output_phys = ep->ep_buffer_phys +
 679			    offsetof(struct qed_iwarp_ep_memory, async_output);
 680	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 681		       async_output_phys);
 682
 683	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 684	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 685
 686	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 687	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 688	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 689	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 690	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 691
 692	tcp = &p_tcp_ramrod->tcp;
 693	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 694			    &tcp->remote_mac_addr_mid,
 695			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 696	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 697			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 698
 699	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 700
 701	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 702	tcp->flags = 0;
 703	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 704		  !!(tcp_flags & QED_IWARP_TS_EN));
 705
 706	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 707		  !!(tcp_flags & QED_IWARP_DA_EN));
 708
 
 709	tcp->ip_version = ep->cm_info.ip_version;
 710
 711	for (i = 0; i < 4; i++) {
 712		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 713		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 714	}
 715
 716	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 717	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 718	tcp->mss = cpu_to_le16(ep->mss);
 719	tcp->flow_label = 0;
 720	tcp->ttl = 0x40;
 721	tcp->tos_or_tc = 0;
 722
 723	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 724	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
 725	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 726	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
 727	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
 728
 729	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 730	tcp->connect_mode = ep->connect_mode;
 731
 732	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 733		tcp->syn_ip_payload_length =
 734			cpu_to_le16(ep->syn_ip_payload_length);
 735		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 736		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 737	}
 738
 739	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 740
 741	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 742
 743	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 744		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 745
 746	return rc;
 747}
 748
 749static void
 750qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 751{
 752	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 753	struct qed_iwarp_cm_event_params params;
 754	struct mpa_v2_hdr *mpa_v2;
 755	union async_output *async_data;
 756	u16 mpa_ord, mpa_ird;
 757	u8 mpa_hdr_size = 0;
 
 758	u8 mpa_rev;
 759
 760	async_data = &ep->ep_buffer_virt->async_output;
 761
 762	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 763	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 764		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 765		   async_data->mpa_request.ulp_data_len,
 766		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 767
 768	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 769		/* Read ord/ird values from private data buffer */
 770		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 771		mpa_hdr_size = sizeof(*mpa_v2);
 772
 773		mpa_ord = ntohs(mpa_v2->ord);
 774		mpa_ird = ntohs(mpa_v2->ird);
 775
 776		/* Temprary store in cm_info incoming ord/ird requested, later
 777		 * replace with negotiated value during accept
 778		 */
 779		ep->cm_info.ord = (u8)min_t(u16,
 780					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 781					    QED_IWARP_ORD_DEFAULT);
 782
 783		ep->cm_info.ird = (u8)min_t(u16,
 784					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 785					    QED_IWARP_IRD_DEFAULT);
 786
 787		/* Peer2Peer negotiation */
 788		ep->rtr_type = MPA_RTR_TYPE_NONE;
 789		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 790			if (mpa_ord & MPA_V2_WRITE_RTR)
 791				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 792
 793			if (mpa_ord & MPA_V2_READ_RTR)
 794				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 795
 796			if (mpa_ird & MPA_V2_SEND_RTR)
 797				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 798
 799			ep->rtr_type &= iwarp_info->rtr_type;
 800
 801			/* if we're left with no match send our capabilities */
 802			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 803				ep->rtr_type = iwarp_info->rtr_type;
 804		}
 805
 806		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 807	} else {
 808		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 809		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 810		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 811	}
 812
 813	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 814		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 815		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 816		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 817
 818	/* Strip mpa v2 hdr from private data before sending to upper layer */
 819	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 820
 821	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
 822				       mpa_hdr_size;
 823
 824	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 825	params.cm_info = &ep->cm_info;
 826	params.ep_context = ep;
 827	params.status = 0;
 828
 829	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 830	ep->event_cb(ep->cb_context, &params);
 831}
 832
 833static int
 834qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 835{
 836	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 
 837	struct qed_iwarp_info *iwarp_info;
 838	struct qed_sp_init_data init_data;
 839	dma_addr_t async_output_phys;
 840	struct qed_spq_entry *p_ent;
 841	dma_addr_t out_pdata_phys;
 842	dma_addr_t in_pdata_phys;
 843	struct qed_rdma_qp *qp;
 844	bool reject;
 
 845	int rc;
 846
 847	if (!ep)
 848		return -EINVAL;
 849
 850	qp = ep->qp;
 851	reject = !qp;
 852
 853	memset(&init_data, 0, sizeof(init_data));
 854	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 855	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 856
 857	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 858		init_data.comp_mode = QED_SPQ_MODE_CB;
 859	else
 860		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 861
 862	rc = qed_sp_init_request(p_hwfn, &p_ent,
 863				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 864				 PROTOCOLID_IWARP, &init_data);
 865	if (rc)
 866		return rc;
 867
 868	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 
 
 869	out_pdata_phys = ep->ep_buffer_phys +
 870			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 871	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
 872		       out_pdata_phys);
 873	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
 874	    ep->cm_info.private_data_len;
 875	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 876
 877	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
 878	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
 
 879
 880	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 
 
 
 
 881
 882	in_pdata_phys = ep->ep_buffer_phys +
 883			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 884	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 885	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 886		       in_pdata_phys);
 887	p_mpa_ramrod->incoming_ulp_buffer.len =
 888	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 889	async_output_phys = ep->ep_buffer_phys +
 890			    offsetof(struct qed_iwarp_ep_memory, async_output);
 891	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 892		       async_output_phys);
 893	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 894	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 895
 896	if (!reject) {
 897		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 898			       qp->shared_queue_phys_addr);
 899		p_mpa_ramrod->stats_counter_id =
 900		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 901	} else {
 902		p_mpa_ramrod->common.reject = 1;
 903	}
 904
 905	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 906	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 907	p_mpa_ramrod->mode = ep->mpa_rev;
 908	SET_FIELD(p_mpa_ramrod->rtr_pref,
 909		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 910
 911	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 912	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 913	if (!reject)
 914		ep->cid = qp->icid;	/* Now they're migrated. */
 915
 916	DP_VERBOSE(p_hwfn,
 917		   QED_MSG_RDMA,
 918		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 919		   reject ? 0xffff : qp->icid,
 920		   ep->tcp_cid,
 921		   rc,
 922		   ep->cm_info.ird,
 923		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 924	return rc;
 925}
 926
 927static void
 928qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 929{
 930	ep->state = QED_IWARP_EP_INIT;
 931	if (ep->qp)
 932		ep->qp->ep = NULL;
 933	ep->qp = NULL;
 934	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 935
 936	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 937		/* We don't care about the return code, it's ok if tcp_cid
 938		 * remains invalid...in this case we'll defer allocation
 939		 */
 940		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 941	}
 942	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 943
 944	list_move_tail(&ep->list_entry,
 945		       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 946
 947	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 948}
 949
 950static void
 951qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 952{
 953	struct mpa_v2_hdr *mpa_v2_params;
 954	union async_output *async_data;
 955	u16 mpa_ird, mpa_ord;
 956	u8 mpa_data_size = 0;
 
 957
 958	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 959		mpa_v2_params =
 960			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 961		mpa_data_size = sizeof(*mpa_v2_params);
 962		mpa_ird = ntohs(mpa_v2_params->ird);
 963		mpa_ord = ntohs(mpa_v2_params->ord);
 964
 965		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 966		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 967	}
 
 968	async_data = &ep->ep_buffer_virt->async_output;
 
 969
 970	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 971	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
 972				       mpa_data_size;
 973}
 974
 975static void
 976qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 977{
 978	struct qed_iwarp_cm_event_params params;
 979
 980	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 981		DP_NOTICE(p_hwfn,
 982			  "MPA reply event not expected on passive side!\n");
 983		return;
 984	}
 985
 986	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 987
 988	qed_iwarp_parse_private_data(p_hwfn, ep);
 989
 990	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 991		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 992		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 993
 994	params.cm_info = &ep->cm_info;
 995	params.ep_context = ep;
 996	params.status = 0;
 997
 998	ep->mpa_reply_processed = true;
 999
1000	ep->event_cb(ep->cb_context, &params);
1001}
1002
1003#define QED_IWARP_CONNECT_MODE_STRING(ep) \
1004	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1005
1006/* Called as a result of the event:
1007 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1008 */
1009static void
1010qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1011		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1012{
1013	struct qed_iwarp_cm_event_params params;
1014
1015	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1016		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1017	else
1018		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1019
1020	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1021		qed_iwarp_parse_private_data(p_hwfn, ep);
1022
1023	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1024		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1025		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1026
1027	params.cm_info = &ep->cm_info;
1028
1029	params.ep_context = ep;
1030
1031	switch (fw_return_code) {
1032	case RDMA_RETURN_OK:
1033		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1034		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1035		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1036		ep->state = QED_IWARP_EP_ESTABLISHED;
1037		params.status = 0;
1038		break;
1039	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1040		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1041			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042		params.status = -EBUSY;
1043		break;
1044	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1045		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1046			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1047		params.status = -ECONNREFUSED;
1048		break;
1049	case IWARP_CONN_ERROR_MPA_RST:
1050		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1051			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1052			  ep->tcp_cid);
1053		params.status = -ECONNRESET;
1054		break;
1055	case IWARP_CONN_ERROR_MPA_FIN:
1056		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1057			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058		params.status = -ECONNREFUSED;
1059		break;
1060	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1061		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1062			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063		params.status = -ECONNREFUSED;
1064		break;
1065	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1066		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1067			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068		params.status = -ECONNREFUSED;
1069		break;
1070	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1071		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1072			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073		params.status = -ECONNREFUSED;
1074		break;
1075	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1076		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1077			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078		params.status = -ECONNREFUSED;
1079		break;
1080	case IWARP_CONN_ERROR_MPA_TERMINATE:
1081		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1082			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1083		params.status = -ECONNREFUSED;
1084		break;
1085	default:
1086		params.status = -ECONNRESET;
1087		break;
1088	}
1089
1090	if (fw_return_code != RDMA_RETURN_OK)
1091		/* paired with READ_ONCE in destroy_qp */
1092		smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1093
1094	ep->event_cb(ep->cb_context, &params);
1095
1096	/* on passive side, if there is no associated QP (REJECT) we need to
1097	 * return the ep to the pool, (in the regular case we add an element
1098	 * in accept instead of this one.
1099	 * In both cases we need to remove it from the ep_list.
1100	 */
1101	if (fw_return_code != RDMA_RETURN_OK) {
1102		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1103		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1104		    (!ep->qp)) {	/* Rejected */
1105			qed_iwarp_return_ep(p_hwfn, ep);
1106		} else {
1107			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1108			list_del(&ep->list_entry);
1109			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1110		}
1111	}
1112}
1113
1114static void
1115qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1116			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1117{
1118	struct mpa_v2_hdr *mpa_v2_params;
1119	u16 mpa_ird, mpa_ord;
1120
1121	*mpa_data_size = 0;
1122	if (MPA_REV2(ep->mpa_rev)) {
1123		mpa_v2_params =
1124		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1125		*mpa_data_size = sizeof(*mpa_v2_params);
1126
1127		mpa_ird = (u16)ep->cm_info.ird;
1128		mpa_ord = (u16)ep->cm_info.ord;
1129
1130		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1131			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1132
1133			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1134				mpa_ird |= MPA_V2_SEND_RTR;
1135
1136			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1137				mpa_ord |= MPA_V2_WRITE_RTR;
1138
1139			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1140				mpa_ord |= MPA_V2_READ_RTR;
1141		}
1142
1143		mpa_v2_params->ird = htons(mpa_ird);
1144		mpa_v2_params->ord = htons(mpa_ord);
1145
1146		DP_VERBOSE(p_hwfn,
1147			   QED_MSG_RDMA,
1148			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1149			   mpa_v2_params->ird,
1150			   mpa_v2_params->ord,
1151			   *((u32 *)mpa_v2_params),
1152			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1153			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1154			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1155			   !!(mpa_ird & MPA_V2_SEND_RTR),
1156			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1157			   !!(mpa_ord & MPA_V2_READ_RTR));
1158	}
1159}
1160
1161int qed_iwarp_connect(void *rdma_cxt,
1162		      struct qed_iwarp_connect_in *iparams,
1163		      struct qed_iwarp_connect_out *oparams)
1164{
1165	struct qed_hwfn *p_hwfn = rdma_cxt;
1166	struct qed_iwarp_info *iwarp_info;
1167	struct qed_iwarp_ep *ep;
1168	u8 mpa_data_size = 0;
1169	u32 cid;
1170	int rc;
1171
1172	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1173	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1174		DP_NOTICE(p_hwfn,
1175			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1176			  iparams->qp->icid, iparams->cm_info.ord,
1177			  iparams->cm_info.ird);
1178
1179		return -EINVAL;
1180	}
1181
1182	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1183
1184	/* Allocate ep object */
1185	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1186	if (rc)
1187		return rc;
1188
1189	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1190	if (rc)
1191		goto err;
1192
1193	ep->tcp_cid = cid;
1194
1195	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1196	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1197	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1198
1199	ep->qp = iparams->qp;
1200	ep->qp->ep = ep;
1201	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1202	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1203	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1204
1205	ep->cm_info.ord = iparams->cm_info.ord;
1206	ep->cm_info.ird = iparams->cm_info.ird;
1207
1208	ep->rtr_type = iwarp_info->rtr_type;
1209	if (!iwarp_info->peer2peer)
1210		ep->rtr_type = MPA_RTR_TYPE_NONE;
1211
1212	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1213		ep->cm_info.ord = 1;
1214
1215	ep->mpa_rev = iwarp_info->mpa_rev;
1216
1217	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1218
1219	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1220	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1221				       mpa_data_size;
1222
1223	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1224	       iparams->cm_info.private_data,
1225	       iparams->cm_info.private_data_len);
1226
1227	ep->mss = iparams->mss;
1228	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1229
1230	ep->event_cb = iparams->event_cb;
1231	ep->cb_context = iparams->cb_context;
1232	ep->connect_mode = TCP_CONNECT_ACTIVE;
1233
1234	oparams->ep_context = ep;
1235
1236	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1237
1238	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1239		   iparams->qp->icid, ep->tcp_cid, rc);
1240
1241	if (rc) {
1242		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1243		goto err;
1244	}
1245
1246	return rc;
1247err:
1248	qed_iwarp_cid_cleaned(p_hwfn, cid);
1249
1250	return rc;
1251}
1252
1253static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1254{
1255	struct qed_iwarp_ep *ep = NULL;
1256	int rc;
1257
1258	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1259
1260	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1261		DP_ERR(p_hwfn, "Ep list is empty\n");
1262		goto out;
1263	}
1264
1265	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1266			      struct qed_iwarp_ep, list_entry);
1267
1268	/* in some cases we could have failed allocating a tcp cid when added
1269	 * from accept / failure... retry now..this is not the common case.
1270	 */
1271	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1272		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1273
1274		/* if we fail we could look for another entry with a valid
1275		 * tcp_cid, but since we don't expect to reach this anyway
1276		 * it's not worth the handling
1277		 */
1278		if (rc) {
1279			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1280			ep = NULL;
1281			goto out;
1282		}
1283	}
1284
1285	list_del(&ep->list_entry);
1286
1287out:
1288	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1289	return ep;
1290}
1291
1292#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1293#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1294
1295/* This function waits for all the bits of a bmap to be cleared, as long as
1296 * there is progress ( i.e. the number of bits left to be cleared decreases )
1297 * the function continues.
1298 */
1299static int
1300qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1301{
1302	int prev_weight = 0;
1303	int wait_count = 0;
1304	int weight = 0;
1305
1306	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1307	prev_weight = weight;
1308
1309	while (weight) {
 
 
 
 
 
 
 
 
1310		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1311
1312		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1313
1314		if (prev_weight == weight) {
1315			wait_count++;
1316		} else {
1317			prev_weight = weight;
1318			wait_count = 0;
1319		}
1320
1321		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1322			DP_NOTICE(p_hwfn,
1323				  "%s bitmap wait timed out (%d cids pending)\n",
1324				  bmap->name, weight);
1325			return -EBUSY;
1326		}
1327	}
1328	return 0;
1329}
1330
1331static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1332{
1333	int rc;
1334	int i;
1335
1336	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1337					    &p_hwfn->p_rdma_info->tcp_cid_map);
1338	if (rc)
1339		return rc;
1340
1341	/* Now free the tcp cids from the main cid map */
1342	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1343		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1344
1345	/* Now wait for all cids to be completed */
1346	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1347					      &p_hwfn->p_rdma_info->cid_map);
1348}
1349
1350static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1351{
1352	struct qed_iwarp_ep *ep;
1353
1354	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1355		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356
1357		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1358				      struct qed_iwarp_ep, list_entry);
1359
1360		if (!ep) {
1361			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362			break;
1363		}
1364		list_del(&ep->list_entry);
1365
1366		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1367
1368		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1369			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1370
1371		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1372	}
1373}
1374
1375static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1376{
1377	struct qed_iwarp_ep *ep;
1378	int rc = 0;
1379	int count;
1380	u32 cid;
1381	int i;
1382
1383	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1384	for (i = 0; i < count; i++) {
1385		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1386		if (rc)
1387			return rc;
1388
1389		/* During initialization we allocate from the main pool,
1390		 * afterwards we allocate only from the tcp_cid.
1391		 */
1392		if (init) {
1393			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394			if (rc)
1395				goto err;
1396			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1397		} else {
1398			/* We don't care about the return code, it's ok if
1399			 * tcp_cid remains invalid...in this case we'll
1400			 * defer allocation
1401			 */
1402			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1403		}
1404
1405		ep->tcp_cid = cid;
1406
1407		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408		list_add_tail(&ep->list_entry,
1409			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1410		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1411	}
1412
1413	return rc;
1414
1415err:
1416	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1417
1418	return rc;
1419}
1420
1421int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1422{
1423	int rc;
1424
1425	/* Allocate bitmap for tcp cid. These are used by passive side
1426	 * to ensure it can allocate a tcp cid during dpc that was
1427	 * pre-acquired and doesn't require dynamic allocation of ilt
1428	 */
1429	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1430				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1431	if (rc) {
1432		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1433			   "Failed to allocate tcp cid, rc = %d\n", rc);
1434		return rc;
1435	}
1436
1437	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1438	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1439
1440	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1441	if (rc)
1442		return rc;
1443
1444	return qed_ooo_alloc(p_hwfn);
1445}
1446
1447void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1448{
1449	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1450
1451	qed_ooo_free(p_hwfn);
1452	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1453	kfree(iwarp_info->mpa_bufs);
1454	kfree(iwarp_info->partial_fpdus);
1455	kfree(iwarp_info->mpa_intermediate_buf);
1456}
1457
1458int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1459{
1460	struct qed_hwfn *p_hwfn = rdma_cxt;
1461	struct qed_iwarp_ep *ep;
1462	u8 mpa_data_size = 0;
1463	int rc;
1464
1465	ep = iparams->ep_context;
1466	if (!ep) {
1467		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1468		return -EINVAL;
1469	}
1470
1471	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1472		   iparams->qp->icid, ep->tcp_cid);
1473
1474	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1475	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476		DP_VERBOSE(p_hwfn,
1477			   QED_MSG_RDMA,
1478			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1479			   iparams->qp->icid,
1480			   ep->tcp_cid, iparams->ord, iparams->ord);
1481		return -EINVAL;
1482	}
1483
1484	qed_iwarp_prealloc_ep(p_hwfn, false);
1485
1486	ep->cb_context = iparams->cb_context;
1487	ep->qp = iparams->qp;
1488	ep->qp->ep = ep;
1489
1490	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1491		/* Negotiate ord/ird: if upperlayer requested ord larger than
1492		 * ird advertised by remote, we need to decrease our ord
1493		 */
1494		if (iparams->ord > ep->cm_info.ird)
1495			iparams->ord = ep->cm_info.ird;
1496
1497		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1498		    (iparams->ird == 0))
1499			iparams->ird = 1;
1500	}
1501
1502	/* Update cm_info ord/ird to be negotiated values */
1503	ep->cm_info.ord = iparams->ord;
1504	ep->cm_info.ird = iparams->ird;
1505
1506	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1507
1508	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1509	ep->cm_info.private_data_len = iparams->private_data_len +
1510				       mpa_data_size;
1511
1512	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1513	       iparams->private_data, iparams->private_data_len);
1514
1515	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1516	if (rc)
1517		qed_iwarp_modify_qp(p_hwfn,
1518				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1519
1520	return rc;
1521}
1522
1523int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1524{
1525	struct qed_hwfn *p_hwfn = rdma_cxt;
1526	struct qed_iwarp_ep *ep;
1527	u8 mpa_data_size = 0;
1528
1529	ep = iparams->ep_context;
1530	if (!ep) {
1531		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1532		return -EINVAL;
1533	}
1534
1535	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1536
1537	ep->cb_context = iparams->cb_context;
1538	ep->qp = NULL;
1539
1540	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1541
1542	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1543	ep->cm_info.private_data_len = iparams->private_data_len +
1544				       mpa_data_size;
1545
1546	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1547	       iparams->private_data, iparams->private_data_len);
1548
1549	return qed_iwarp_mpa_offload(p_hwfn, ep);
1550}
1551
1552static void
1553qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1554			struct qed_iwarp_cm_info *cm_info)
1555{
1556	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1557		   cm_info->ip_version);
1558
1559	if (cm_info->ip_version == QED_TCP_IPV4)
1560		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1561			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1562			   cm_info->remote_ip, cm_info->remote_port,
1563			   cm_info->local_ip, cm_info->local_port,
1564			   cm_info->vlan);
1565	else
1566		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1567			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1568			   cm_info->remote_ip, cm_info->remote_port,
1569			   cm_info->local_ip, cm_info->local_port,
1570			   cm_info->vlan);
1571
1572	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1573		   "private_data_len = %x ord = %d, ird = %d\n",
1574		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1575}
1576
1577static int
1578qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1579		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1580{
1581	int rc;
1582
1583	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1584				    (u16)buf->buff_size, buf, 1);
1585	if (rc) {
1586		DP_NOTICE(p_hwfn,
1587			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1588			  rc, handle);
1589		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1590				  buf->data, buf->data_phys_addr);
1591		kfree(buf);
1592	}
1593
1594	return rc;
1595}
1596
1597static bool
1598qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1599{
1600	struct qed_iwarp_ep *ep = NULL;
1601	bool found = false;
1602
1603	list_for_each_entry(ep,
1604			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1605			    list_entry) {
1606		if ((ep->cm_info.local_port == cm_info->local_port) &&
1607		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1608		    (ep->cm_info.vlan == cm_info->vlan) &&
1609		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1610			    sizeof(cm_info->local_ip)) &&
1611		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1612			    sizeof(cm_info->remote_ip))) {
1613			found = true;
1614			break;
1615		}
1616	}
1617
1618	if (found) {
1619		DP_NOTICE(p_hwfn,
1620			  "SYN received on active connection - dropping\n");
1621		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1622
1623		return true;
1624	}
1625
1626	return false;
1627}
1628
1629static struct qed_iwarp_listener *
1630qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1631		       struct qed_iwarp_cm_info *cm_info)
1632{
1633	struct qed_iwarp_listener *listener = NULL;
1634	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635	bool found = false;
1636
1637	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1638
1639	list_for_each_entry(listener,
1640			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1641			    list_entry) {
1642		if (listener->port == cm_info->local_port) {
1643			if (!memcmp(listener->ip_addr,
1644				    ip_zero, sizeof(ip_zero))) {
1645				found = true;
1646				break;
1647			}
1648
1649			if (!memcmp(listener->ip_addr,
1650				    cm_info->local_ip,
1651				    sizeof(cm_info->local_ip)) &&
1652			    (listener->vlan == cm_info->vlan)) {
1653				found = true;
1654				break;
1655			}
1656		}
1657	}
1658
1659	if (found) {
1660		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1661			   listener);
1662		return listener;
1663	}
1664
1665	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1666	return NULL;
1667}
1668
1669static int
1670qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1671		       struct qed_iwarp_cm_info *cm_info,
1672		       void *buf,
1673		       u8 *remote_mac_addr,
1674		       u8 *local_mac_addr,
1675		       int *payload_len, int *tcp_start_offset)
1676{
1677	struct vlan_ethhdr *vethh;
1678	bool vlan_valid = false;
1679	struct ipv6hdr *ip6h;
1680	struct ethhdr *ethh;
1681	struct tcphdr *tcph;
1682	struct iphdr *iph;
1683	int eth_hlen;
1684	int ip_hlen;
1685	int eth_type;
1686	int i;
1687
1688	ethh = buf;
1689	eth_type = ntohs(ethh->h_proto);
1690	if (eth_type == ETH_P_8021Q) {
1691		vlan_valid = true;
1692		vethh = (struct vlan_ethhdr *)ethh;
1693		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1694		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1695	}
1696
1697	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1698
1699	if (!ether_addr_equal(ethh->h_dest,
1700			      p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1701		DP_VERBOSE(p_hwfn,
1702			   QED_MSG_RDMA,
1703			   "Got unexpected mac %pM instead of %pM\n",
1704			   ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1705		return -EINVAL;
1706	}
1707
1708	ether_addr_copy(remote_mac_addr, ethh->h_source);
1709	ether_addr_copy(local_mac_addr, ethh->h_dest);
1710
1711	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1712		   eth_type, ethh->h_source);
1713
1714	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1715		   eth_hlen, ethh->h_dest);
1716
1717	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1718
1719	if (eth_type == ETH_P_IP) {
1720		if (iph->protocol != IPPROTO_TCP) {
1721			DP_NOTICE(p_hwfn,
1722				  "Unexpected ip protocol on ll2 %x\n",
1723				  iph->protocol);
1724			return -EINVAL;
1725		}
1726
1727		cm_info->local_ip[0] = ntohl(iph->daddr);
1728		cm_info->remote_ip[0] = ntohl(iph->saddr);
1729		cm_info->ip_version = QED_TCP_IPV4;
1730
1731		ip_hlen = (iph->ihl) * sizeof(u32);
1732		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1733	} else if (eth_type == ETH_P_IPV6) {
1734		ip6h = (struct ipv6hdr *)iph;
1735
1736		if (ip6h->nexthdr != IPPROTO_TCP) {
1737			DP_NOTICE(p_hwfn,
1738				  "Unexpected ip protocol on ll2 %x\n",
1739				  iph->protocol);
1740			return -EINVAL;
1741		}
1742
1743		for (i = 0; i < 4; i++) {
1744			cm_info->local_ip[i] =
1745			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1746			cm_info->remote_ip[i] =
1747			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1748		}
1749		cm_info->ip_version = QED_TCP_IPV6;
1750
1751		ip_hlen = sizeof(*ip6h);
1752		*payload_len = ntohs(ip6h->payload_len);
1753	} else {
1754		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1755		return -EINVAL;
1756	}
1757
1758	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1759
1760	if (!tcph->syn) {
1761		DP_NOTICE(p_hwfn,
1762			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1763			  iph->ihl, tcph->source, tcph->dest);
1764		return -EINVAL;
1765	}
1766
1767	cm_info->local_port = ntohs(tcph->dest);
1768	cm_info->remote_port = ntohs(tcph->source);
1769
1770	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1771
1772	*tcp_start_offset = eth_hlen + ip_hlen;
1773
1774	return 0;
1775}
1776
1777static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1778						      u16 cid)
1779{
1780	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1781	struct qed_iwarp_fpdu *partial_fpdu;
1782	u32 idx;
1783
1784	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1785	if (idx >= iwarp_info->max_num_partial_fpdus) {
1786		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1787		       iwarp_info->max_num_partial_fpdus);
1788		return NULL;
1789	}
1790
1791	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1792
1793	return partial_fpdu;
1794}
1795
1796enum qed_iwarp_mpa_pkt_type {
1797	QED_IWARP_MPA_PKT_PACKED,
1798	QED_IWARP_MPA_PKT_PARTIAL,
1799	QED_IWARP_MPA_PKT_UNALIGNED
1800};
1801
1802#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1803#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1804#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1805
1806/* Pad to multiple of 4 */
1807#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1808#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1809	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1810					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1811					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1812
1813/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1814#define QED_IWARP_MAX_BDS_PER_FPDU 3
1815
1816static const char * const pkt_type_str[] = {
1817	"QED_IWARP_MPA_PKT_PACKED",
1818	"QED_IWARP_MPA_PKT_PARTIAL",
1819	"QED_IWARP_MPA_PKT_UNALIGNED"
1820};
1821
1822static int
1823qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1824		      struct qed_iwarp_fpdu *fpdu,
1825		      struct qed_iwarp_ll2_buff *buf);
1826
1827static enum qed_iwarp_mpa_pkt_type
1828qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1829		       struct qed_iwarp_fpdu *fpdu,
1830		       u16 tcp_payload_len, u8 *mpa_data)
1831{
1832	enum qed_iwarp_mpa_pkt_type pkt_type;
1833	u16 mpa_len;
1834
1835	if (fpdu->incomplete_bytes) {
1836		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1837		goto out;
1838	}
1839
1840	/* special case of one byte remaining...
1841	 * lower byte will be read next packet
1842	 */
1843	if (tcp_payload_len == 1) {
1844		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1845		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1846		goto out;
1847	}
1848
1849	mpa_len = ntohs(*((u16 *)(mpa_data)));
1850	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1851
1852	if (fpdu->fpdu_length <= tcp_payload_len)
1853		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1854	else
1855		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1856
1857out:
1858	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1859		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1860		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1861
1862	return pkt_type;
1863}
1864
1865static void
1866qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1867		    struct qed_iwarp_fpdu *fpdu,
1868		    struct unaligned_opaque_data *pkt_data,
1869		    u16 tcp_payload_size, u8 placement_offset)
1870{
 
 
1871	fpdu->mpa_buf = buf;
1872	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874	fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1875	fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1876
1877	if (tcp_payload_size == 1)
1878		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879	else if (tcp_payload_size < fpdu->fpdu_length)
1880		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881	else
1882		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1883
1884	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885}
1886
1887static int
1888qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889		 struct qed_iwarp_fpdu *fpdu,
1890		 struct unaligned_opaque_data *pkt_data,
1891		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892{
 
1893	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1894	int rc;
1895
1896	/* need to copy the data from the partial packet stored in fpdu
1897	 * to the new buf, for this we also need to move the data currently
1898	 * placed on the buf. The assumption is that the buffer is big enough
1899	 * since fpdu_length <= mss, we use an intermediate buffer since
1900	 * we may need to copy the new data to an overlapping location
1901	 */
1902	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1903		DP_ERR(p_hwfn,
1904		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1905		       buf->buff_size, fpdu->mpa_frag_len,
1906		       tcp_payload_size, fpdu->incomplete_bytes);
1907		return -EINVAL;
1908	}
1909
1910	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1911		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1912		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1913		   (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1914		   tcp_payload_size);
1915
1916	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917	memcpy(tmp_buf + fpdu->mpa_frag_len,
1918	       (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1919	       tcp_payload_size);
1920
1921	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1922	if (rc)
1923		return rc;
1924
1925	/* If we managed to post the buffer copy the data to the new buffer
1926	 * o/w this will occur in the next round...
1927	 */
1928	memcpy((u8 *)(buf->data), tmp_buf,
1929	       fpdu->mpa_frag_len + tcp_payload_size);
1930
1931	fpdu->mpa_buf = buf;
1932	/* fpdu->pkt_hdr remains as is */
1933	/* fpdu->mpa_frag is overridden with new buf */
1934	fpdu->mpa_frag = buf->data_phys_addr;
1935	fpdu->mpa_frag_virt = buf->data;
1936	fpdu->mpa_frag_len += tcp_payload_size;
1937
1938	fpdu->incomplete_bytes -= tcp_payload_size;
1939
1940	DP_VERBOSE(p_hwfn,
1941		   QED_MSG_RDMA,
1942		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1943		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1944		   fpdu->incomplete_bytes);
1945
1946	return 0;
1947}
1948
1949static void
1950qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1951			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1952{
1953	u16 mpa_len;
1954
1955	/* Update incomplete packets if needed */
1956	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1957		/* Missing lower byte is now available */
1958		mpa_len = fpdu->fpdu_length | *mpa_data;
1959		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1960		/* one byte of hdr */
1961		fpdu->mpa_frag_len = 1;
1962		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1963		DP_VERBOSE(p_hwfn,
1964			   QED_MSG_RDMA,
1965			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1966			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1967	}
1968}
1969
1970#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1971	(GET_FIELD((_curr_pkt)->flags,	   \
1972		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1973
1974/* This function is used to recycle a buffer using the ll2 drop option. It
1975 * uses the mechanism to ensure that all buffers posted to tx before this one
1976 * were completed. The buffer sent here will be sent as a cookie in the tx
1977 * completion function and can then be reposted to rx chain when done. The flow
1978 * that requires this is the flow where a FPDU splits over more than 3 tcp
1979 * segments. In this case the driver needs to re-post a rx buffer instead of
1980 * the one received, but driver can't simply repost a buffer it copied from
1981 * as there is a case where the buffer was originally a packed FPDU, and is
1982 * partially posted to FW. Driver needs to ensure FW is done with it.
1983 */
1984static int
1985qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1986		      struct qed_iwarp_fpdu *fpdu,
1987		      struct qed_iwarp_ll2_buff *buf)
1988{
1989	struct qed_ll2_tx_pkt_info tx_pkt;
1990	u8 ll2_handle;
1991	int rc;
1992
1993	memset(&tx_pkt, 0, sizeof(tx_pkt));
1994	tx_pkt.num_of_bds = 1;
1995	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1996	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1997	tx_pkt.first_frag = fpdu->pkt_hdr;
1998	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1999	buf->piggy_buf = NULL;
2000	tx_pkt.cookie = buf;
2001
2002	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2003
2004	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2005	if (rc)
2006		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2007			   "Can't drop packet rc=%d\n", rc);
2008
2009	DP_VERBOSE(p_hwfn,
2010		   QED_MSG_RDMA,
2011		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2012		   (unsigned long int)tx_pkt.first_frag,
2013		   tx_pkt.first_frag_len, buf, rc);
2014
2015	return rc;
2016}
2017
2018static int
2019qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2020{
2021	struct qed_ll2_tx_pkt_info tx_pkt;
2022	u8 ll2_handle;
2023	int rc;
2024
2025	memset(&tx_pkt, 0, sizeof(tx_pkt));
2026	tx_pkt.num_of_bds = 1;
2027	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2028	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2029
2030	tx_pkt.first_frag = fpdu->pkt_hdr;
2031	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2032	tx_pkt.enable_ip_cksum = true;
2033	tx_pkt.enable_l4_cksum = true;
2034	tx_pkt.calc_ip_len = true;
2035	/* vlan overload with enum iwarp_ll2_tx_queues */
2036	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2037
2038	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2039
2040	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2041	if (rc)
2042		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2043			   "Can't send right edge rc=%d\n", rc);
2044	DP_VERBOSE(p_hwfn,
2045		   QED_MSG_RDMA,
2046		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2047		   tx_pkt.num_of_bds,
2048		   (unsigned long int)tx_pkt.first_frag,
2049		   tx_pkt.first_frag_len, rc);
2050
2051	return rc;
2052}
2053
2054static int
2055qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2056		    struct qed_iwarp_fpdu *fpdu,
2057		    struct unaligned_opaque_data *curr_pkt,
2058		    struct qed_iwarp_ll2_buff *buf,
2059		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2060{
2061	struct qed_ll2_tx_pkt_info tx_pkt;
 
2062	u8 ll2_handle;
2063	int rc;
2064
2065	memset(&tx_pkt, 0, sizeof(tx_pkt));
2066
2067	/* An unaligned packet means it's split over two tcp segments. So the
2068	 * complete packet requires 3 bds, one for the header, one for the
2069	 * part of the fpdu of the first tcp segment, and the last fragment
2070	 * will point to the remainder of the fpdu. A packed pdu, requires only
2071	 * two bds, one for the header and one for the data.
2072	 */
2073	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076
2077	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2078	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079	    tcp_payload_size <= fpdu->fpdu_length)
2080		tx_pkt.cookie = fpdu->mpa_buf;
2081
2082	tx_pkt.first_frag = fpdu->pkt_hdr;
2083	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084	tx_pkt.enable_ip_cksum = true;
2085	tx_pkt.enable_l4_cksum = true;
2086	tx_pkt.calc_ip_len = true;
2087	/* vlan overload with enum iwarp_ll2_tx_queues */
2088	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089
2090	/* special case of unaligned packet and not packed, need to send
2091	 * both buffers as cookie to release.
2092	 */
2093	if (tcp_payload_size == fpdu->incomplete_bytes)
2094		fpdu->mpa_buf->piggy_buf = buf;
2095
2096	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097
2098	/* Set first fragment to header */
2099	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100	if (rc)
2101		goto out;
2102
2103	/* Set second fragment to first part of packet */
2104	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105					       fpdu->mpa_frag,
2106					       fpdu->mpa_frag_len);
2107	if (rc)
2108		goto out;
2109
2110	if (!fpdu->incomplete_bytes)
2111		goto out;
2112
 
 
2113	/* Set third fragment to second part of the packet */
2114	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2115					       ll2_handle,
2116					       buf->data_phys_addr +
2117					       curr_pkt->first_mpa_offset,
2118					       fpdu->incomplete_bytes);
2119out:
2120	DP_VERBOSE(p_hwfn,
2121		   QED_MSG_RDMA,
2122		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2123		   tx_pkt.num_of_bds,
2124		   tx_pkt.first_frag_len,
2125		   fpdu->mpa_frag_len,
2126		   fpdu->incomplete_bytes, rc);
2127
2128	return rc;
2129}
2130
2131static void
2132qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2133		       struct unaligned_opaque_data *curr_pkt,
2134		       u32 opaque_data0, u32 opaque_data1)
2135{
2136	u64 opaque_data;
2137
2138	opaque_data = HILO_64(opaque_data1, opaque_data0);
 
2139	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2140
2141	curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2142				     le16_to_cpu(curr_pkt->first_mpa_offset);
2143	curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2144}
2145
2146/* This function is called when an unaligned or incomplete MPA packet arrives
2147 * driver needs to align the packet, perhaps using previous data and send
2148 * it down to FW once it is aligned.
2149 */
2150static int
2151qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2152			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2153{
2154	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2155	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2156	enum qed_iwarp_mpa_pkt_type pkt_type;
2157	struct qed_iwarp_fpdu *fpdu;
 
2158	int rc = -EINVAL;
2159	u8 *mpa_data;
2160
2161	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
 
 
2162	if (!fpdu) { /* something corrupt with cid, post rx back */
2163		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2164		       curr_pkt->cid);
2165		goto err;
2166	}
2167
2168	do {
2169		mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
 
2170
2171		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2172						  mpa_buf->tcp_payload_len,
2173						  mpa_data);
2174
2175		switch (pkt_type) {
2176		case QED_IWARP_MPA_PKT_PARTIAL:
2177			qed_iwarp_init_fpdu(buf, fpdu,
2178					    curr_pkt,
2179					    mpa_buf->tcp_payload_len,
2180					    mpa_buf->placement_offset);
2181
2182			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2183				mpa_buf->tcp_payload_len = 0;
2184				break;
2185			}
2186
2187			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2188
2189			if (rc) {
2190				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2191					   "Can't send FPDU:reset rc=%d\n", rc);
2192				memset(fpdu, 0, sizeof(*fpdu));
2193				break;
2194			}
2195
2196			mpa_buf->tcp_payload_len = 0;
2197			break;
2198		case QED_IWARP_MPA_PKT_PACKED:
2199			qed_iwarp_init_fpdu(buf, fpdu,
2200					    curr_pkt,
2201					    mpa_buf->tcp_payload_len,
2202					    mpa_buf->placement_offset);
2203
2204			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2205						 mpa_buf->tcp_payload_len,
2206						 pkt_type);
2207			if (rc) {
2208				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2209					   "Can't send FPDU:reset rc=%d\n", rc);
2210				memset(fpdu, 0, sizeof(*fpdu));
2211				break;
2212			}
2213
2214			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2215			curr_pkt->first_mpa_offset += fpdu->fpdu_length;
 
2216			break;
2217		case QED_IWARP_MPA_PKT_UNALIGNED:
2218			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2219			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2220				/* special handling of fpdu split over more
2221				 * than 2 segments
2222				 */
2223				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2224					rc = qed_iwarp_win_right_edge(p_hwfn,
2225								      fpdu);
2226					/* packet will be re-processed later */
2227					if (rc)
2228						return rc;
2229				}
2230
2231				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2232						      buf,
2233						      mpa_buf->tcp_payload_len);
2234				if (rc) /* packet will be re-processed later */
2235					return rc;
2236
2237				mpa_buf->tcp_payload_len = 0;
2238				break;
2239			}
2240
2241			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2242						 mpa_buf->tcp_payload_len,
2243						 pkt_type);
2244			if (rc) {
2245				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2246					   "Can't send FPDU:delay rc=%d\n", rc);
2247				/* don't reset fpdu -> we need it for next
2248				 * classify
2249				 */
2250				break;
2251			}
2252
2253			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2254			curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
 
 
2255			/* The framed PDU was sent - no more incomplete bytes */
2256			fpdu->incomplete_bytes = 0;
2257			break;
2258		}
2259	} while (mpa_buf->tcp_payload_len && !rc);
2260
2261	return rc;
2262
2263err:
2264	qed_iwarp_ll2_post_rx(p_hwfn,
2265			      buf,
2266			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2267	return rc;
2268}
2269
2270static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2271{
2272	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2273	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2274	int rc;
2275
2276	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2277		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2278					   struct qed_iwarp_ll2_mpa_buf,
2279					   list_entry);
2280
2281		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2282
2283		/* busy means break and continue processing later, don't
2284		 * remove the buf from the pending list.
2285		 */
2286		if (rc == -EBUSY)
2287			break;
2288
2289		list_move_tail(&mpa_buf->list_entry,
2290			       &iwarp_info->mpa_buf_list);
2291
2292		if (rc) {	/* different error, don't continue */
2293			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2294			break;
2295		}
2296	}
2297}
2298
2299static void
2300qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2301{
2302	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2303	struct qed_iwarp_info *iwarp_info;
2304	struct qed_hwfn *p_hwfn = cxt;
 
2305
2306	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2307	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2308				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2309	if (!mpa_buf) {
2310		DP_ERR(p_hwfn, "No free mpa buf\n");
2311		goto err;
2312	}
2313
2314	list_del(&mpa_buf->list_entry);
2315	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2316			       data->opaque_data_0, data->opaque_data_1);
2317
 
 
2318	DP_VERBOSE(p_hwfn,
2319		   QED_MSG_RDMA,
2320		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2321		   data->length.packet_length, mpa_buf->data.first_mpa_offset,
2322		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2323		   mpa_buf->data.cid);
2324
2325	mpa_buf->ll2_buf = data->cookie;
2326	mpa_buf->tcp_payload_len = data->length.packet_length -
2327				   mpa_buf->data.first_mpa_offset;
2328	mpa_buf->data.first_mpa_offset += data->u.placement_offset;
 
 
2329	mpa_buf->placement_offset = data->u.placement_offset;
2330
2331	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2332
2333	qed_iwarp_process_pending_pkts(p_hwfn);
2334	return;
2335err:
2336	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2337			      iwarp_info->ll2_mpa_handle);
2338}
2339
2340static void
2341qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2342{
2343	struct qed_iwarp_ll2_buff *buf = data->cookie;
2344	struct qed_iwarp_listener *listener;
2345	struct qed_ll2_tx_pkt_info tx_pkt;
2346	struct qed_iwarp_cm_info cm_info;
2347	struct qed_hwfn *p_hwfn = cxt;
2348	u8 remote_mac_addr[ETH_ALEN];
2349	u8 local_mac_addr[ETH_ALEN];
2350	struct qed_iwarp_ep *ep;
2351	int tcp_start_offset;
2352	u8 ll2_syn_handle;
2353	int payload_len;
2354	u32 hdr_size;
2355	int rc;
2356
2357	memset(&cm_info, 0, sizeof(cm_info));
2358	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2359
2360	/* Check if packet was received with errors... */
2361	if (data->err_flags) {
2362		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2363			  data->err_flags);
2364		goto err;
2365	}
2366
2367	if (GET_FIELD(data->parse_flags,
2368		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2369	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2370		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2371		goto err;
2372	}
2373
2374	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2375				    data->u.placement_offset, remote_mac_addr,
2376				    local_mac_addr, &payload_len,
2377				    &tcp_start_offset);
2378	if (rc)
2379		goto err;
2380
2381	/* Check if there is a listener for this 4-tuple+vlan */
2382	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2383	if (!listener) {
2384		DP_VERBOSE(p_hwfn,
2385			   QED_MSG_RDMA,
2386			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2387			   data->parse_flags, data->length.packet_length);
2388
2389		memset(&tx_pkt, 0, sizeof(tx_pkt));
2390		tx_pkt.num_of_bds = 1;
2391		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2392		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2393		tx_pkt.first_frag = buf->data_phys_addr +
2394				    data->u.placement_offset;
2395		tx_pkt.first_frag_len = data->length.packet_length;
2396		tx_pkt.cookie = buf;
2397
2398		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2399					       &tx_pkt, true);
2400
2401		if (rc) {
2402			DP_NOTICE(p_hwfn,
2403				  "Can't post SYN back to chip rc=%d\n", rc);
2404			goto err;
2405		}
2406		return;
2407	}
2408
2409	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2410	/* There may be an open ep on this connection if this is a syn
2411	 * retrasnmit... need to make sure there isn't...
2412	 */
2413	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2414		goto err;
2415
2416	ep = qed_iwarp_get_free_ep(p_hwfn);
2417	if (!ep)
2418		goto err;
2419
2420	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2421	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2422	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2423
2424	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2425	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2426
2427	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2428
2429	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
2430	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2431	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2432
2433	ep->event_cb = listener->event_cb;
2434	ep->cb_context = listener->cb_context;
2435	ep->connect_mode = TCP_CONNECT_PASSIVE;
2436
2437	ep->syn = buf;
2438	ep->syn_ip_payload_length = (u16)payload_len;
2439	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2440			   tcp_start_offset;
2441
2442	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2443	if (rc) {
2444		qed_iwarp_return_ep(p_hwfn, ep);
2445		goto err;
2446	}
2447
2448	return;
2449err:
2450	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2451}
2452
2453static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2454				     void *cookie, dma_addr_t rx_buf_addr,
2455				     bool b_last_packet)
2456{
2457	struct qed_iwarp_ll2_buff *buffer = cookie;
2458	struct qed_hwfn *p_hwfn = cxt;
2459
2460	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2461			  buffer->data, buffer->data_phys_addr);
2462	kfree(buffer);
2463}
2464
2465static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2466				      void *cookie, dma_addr_t first_frag_addr,
2467				      bool b_last_fragment, bool b_last_packet)
2468{
2469	struct qed_iwarp_ll2_buff *buffer = cookie;
2470	struct qed_iwarp_ll2_buff *piggy;
2471	struct qed_hwfn *p_hwfn = cxt;
2472
2473	if (!buffer)		/* can happen in packed mpa unaligned... */
2474		return;
2475
2476	/* this was originally an rx packet, post it back */
2477	piggy = buffer->piggy_buf;
2478	if (piggy) {
2479		buffer->piggy_buf = NULL;
2480		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2481	}
2482
2483	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2484
2485	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2486		qed_iwarp_process_pending_pkts(p_hwfn);
2487
2488	return;
2489}
2490
2491static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2492				     void *cookie, dma_addr_t first_frag_addr,
2493				     bool b_last_fragment, bool b_last_packet)
2494{
2495	struct qed_iwarp_ll2_buff *buffer = cookie;
2496	struct qed_hwfn *p_hwfn = cxt;
2497
2498	if (!buffer)
2499		return;
2500
2501	if (buffer->piggy_buf) {
2502		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2503				  buffer->piggy_buf->buff_size,
2504				  buffer->piggy_buf->data,
2505				  buffer->piggy_buf->data_phys_addr);
2506
2507		kfree(buffer->piggy_buf);
2508	}
2509
2510	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2511			  buffer->data, buffer->data_phys_addr);
2512
2513	kfree(buffer);
2514}
2515
2516/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2517 * is received, need to reset the FPDU.
2518 */
2519static void
2520qed_iwarp_ll2_slowpath(void *cxt,
2521		       u8 connection_handle,
2522		       u32 opaque_data_0, u32 opaque_data_1)
2523{
2524	struct unaligned_opaque_data unalign_data;
2525	struct qed_hwfn *p_hwfn = cxt;
2526	struct qed_iwarp_fpdu *fpdu;
 
2527
2528	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2529			       opaque_data_0, opaque_data_1);
2530
2531	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2532		   unalign_data.cid);
 
2533
2534	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2535	if (fpdu)
2536		memset(fpdu, 0, sizeof(*fpdu));
2537}
2538
2539static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2540{
2541	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2542	int rc = 0;
2543
2544	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2545		rc = qed_ll2_terminate_connection(p_hwfn,
2546						  iwarp_info->ll2_syn_handle);
2547		if (rc)
2548			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2549
2550		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2551		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2552	}
2553
2554	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2555		rc = qed_ll2_terminate_connection(p_hwfn,
2556						  iwarp_info->ll2_ooo_handle);
2557		if (rc)
2558			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2559
2560		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2561		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2562	}
2563
2564	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2565		rc = qed_ll2_terminate_connection(p_hwfn,
2566						  iwarp_info->ll2_mpa_handle);
2567		if (rc)
2568			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2569
2570		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2571		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2572	}
2573
2574	qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2575				  p_hwfn->p_rdma_info->iwarp.mac_addr);
2576
2577	return rc;
2578}
2579
2580static int
2581qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2582			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2583{
2584	struct qed_iwarp_ll2_buff *buffer;
2585	int rc = 0;
2586	int i;
2587
2588	for (i = 0; i < num_rx_bufs; i++) {
2589		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2590		if (!buffer) {
2591			rc = -ENOMEM;
2592			break;
2593		}
2594
2595		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2596						  buff_size,
2597						  &buffer->data_phys_addr,
2598						  GFP_KERNEL);
2599		if (!buffer->data) {
2600			kfree(buffer);
2601			rc = -ENOMEM;
2602			break;
2603		}
2604
2605		buffer->buff_size = buff_size;
2606		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2607		if (rc)
2608			/* buffers will be deallocated by qed_ll2 */
2609			break;
2610	}
2611	return rc;
2612}
2613
2614#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2615	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2616		ETH_CACHE_LINE_SIZE)
2617
2618static int
2619qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2620		    struct qed_rdma_start_in_params *params,
2621		    u32 rcv_wnd_size)
2622{
2623	struct qed_iwarp_info *iwarp_info;
2624	struct qed_ll2_acquire_data data;
2625	struct qed_ll2_cbs cbs;
2626	u32 buff_size;
2627	u16 n_ooo_bufs;
2628	int rc = 0;
2629	int i;
2630
2631	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2632	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2633	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2634	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2635
2636	iwarp_info->max_mtu = params->max_mtu;
2637
2638	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2639
2640	rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2641	if (rc)
2642		return rc;
2643
2644	/* Start SYN connection */
2645	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2646	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2647	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2648	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2649	cbs.slowpath_cb = NULL;
2650	cbs.cookie = p_hwfn;
2651
2652	memset(&data, 0, sizeof(data));
2653	data.input.conn_type = QED_LL2_TYPE_IWARP;
 
 
2654	data.input.mtu = params->max_mtu;
2655	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2656	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2657	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2658	data.input.tx_tc = PKT_LB_TC;
2659	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2660	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2661	data.cbs = &cbs;
2662
2663	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2664	if (rc) {
2665		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2666		qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2667		return rc;
2668	}
2669
2670	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2671	if (rc) {
2672		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2673		goto err;
2674	}
2675
2676	buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2677	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2678					 QED_IWARP_LL2_SYN_RX_SIZE,
2679					 buff_size,
2680					 iwarp_info->ll2_syn_handle);
2681	if (rc)
2682		goto err;
2683
2684	/* Start OOO connection */
2685	data.input.conn_type = QED_LL2_TYPE_OOO;
 
 
2686	data.input.mtu = params->max_mtu;
2687
2688	n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2689		     iwarp_info->max_mtu;
2690	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2691
2692	data.input.rx_num_desc = n_ooo_bufs;
2693	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2694
2695	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2696	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2697	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2698
2699	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2700	if (rc)
2701		goto err;
2702
2703	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2704	if (rc)
2705		goto err;
2706
2707	/* Start Unaligned MPA connection */
2708	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2709	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2710
2711	memset(&data, 0, sizeof(data));
2712	data.input.conn_type = QED_LL2_TYPE_IWARP;
2713	data.input.mtu = params->max_mtu;
2714	/* FW requires that once a packet arrives OOO, it must have at
2715	 * least 2 rx buffers available on the unaligned connection
2716	 * for handling the case that it is a partial fpdu.
2717	 */
2718	data.input.rx_num_desc = n_ooo_bufs * 2;
2719	data.input.tx_num_desc = data.input.rx_num_desc;
2720	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2721	data.input.tx_tc = PKT_LB_TC;
2722	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2723	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2724	data.input.secondary_queue = true;
2725	data.cbs = &cbs;
2726
2727	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2728	if (rc)
2729		goto err;
2730
2731	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2732	if (rc)
2733		goto err;
2734
2735	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2736					 data.input.rx_num_desc,
2737					 buff_size,
2738					 iwarp_info->ll2_mpa_handle);
2739	if (rc)
2740		goto err;
2741
2742	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2743					    sizeof(*iwarp_info->partial_fpdus),
2744					    GFP_KERNEL);
2745	if (!iwarp_info->partial_fpdus)
 
2746		goto err;
 
2747
2748	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2749
2750	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2751	if (!iwarp_info->mpa_intermediate_buf)
 
2752		goto err;
 
2753
2754	/* The mpa_bufs array serves for pending RX packets received on the
2755	 * mpa ll2 that don't have place on the tx ring and require later
2756	 * processing. We can't fail on allocation of such a struct therefore
2757	 * we allocate enough to take care of all rx packets
2758	 */
2759	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2760				       sizeof(*iwarp_info->mpa_bufs),
2761				       GFP_KERNEL);
2762	if (!iwarp_info->mpa_bufs)
 
2763		goto err;
 
2764
2765	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2766	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2767	for (i = 0; i < data.input.rx_num_desc; i++)
2768		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2769			      &iwarp_info->mpa_buf_list);
2770	return rc;
2771err:
2772	qed_iwarp_ll2_stop(p_hwfn);
2773
2774	return rc;
2775}
2776
2777static struct {
2778	u32 two_ports;
2779	u32 four_ports;
2780} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2781	{QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2782	{QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2783};
2784
2785int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2786		    struct qed_rdma_start_in_params *params)
2787{
2788	struct qed_dev *cdev = p_hwfn->cdev;
2789	struct qed_iwarp_info *iwarp_info;
2790	enum chip_ids chip_id;
2791	u32 rcv_wnd_size;
2792
2793	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2794
2795	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2796
2797	chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2798	rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2799		qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2800		qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2801
2802	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2803	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2804	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2805	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2806	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2807	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2808
2809	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2810
2811	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2812				MPA_RTR_TYPE_ZERO_WRITE |
2813				MPA_RTR_TYPE_ZERO_READ;
2814
2815	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2816	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2817	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2818
2819	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2820				  qed_iwarp_async_event);
2821	qed_ooo_setup(p_hwfn);
2822
2823	return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2824}
2825
2826int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2827{
2828	int rc;
2829
2830	qed_iwarp_free_prealloc_ep(p_hwfn);
2831	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2832	if (rc)
2833		return rc;
2834
2835	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2836
2837	return qed_iwarp_ll2_stop(p_hwfn);
2838}
2839
2840static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2841				  struct qed_iwarp_ep *ep,
2842				  u8 fw_return_code)
2843{
2844	struct qed_iwarp_cm_event_params params;
2845
2846	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2847
2848	params.event = QED_IWARP_EVENT_CLOSE;
2849	params.ep_context = ep;
2850	params.cm_info = &ep->cm_info;
2851	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2852			 0 : -ECONNRESET;
2853
2854	/* paired with READ_ONCE in destroy_qp */
2855	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2856
2857	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2858	list_del(&ep->list_entry);
2859	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2860
2861	ep->event_cb(ep->cb_context, &params);
2862}
2863
2864static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2865					 struct qed_iwarp_ep *ep,
2866					 int fw_ret_code)
2867{
2868	struct qed_iwarp_cm_event_params params;
2869	bool event_cb = false;
2870
2871	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2872		   ep->cid, fw_ret_code);
2873
2874	switch (fw_ret_code) {
2875	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2876		params.status = 0;
2877		params.event = QED_IWARP_EVENT_DISCONNECT;
2878		event_cb = true;
2879		break;
2880	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2881		params.status = -ECONNRESET;
2882		params.event = QED_IWARP_EVENT_DISCONNECT;
2883		event_cb = true;
2884		break;
2885	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2886		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2887		event_cb = true;
2888		break;
2889	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2890		params.event = QED_IWARP_EVENT_IRQ_FULL;
2891		event_cb = true;
2892		break;
2893	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2894		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2895		event_cb = true;
2896		break;
2897	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2898		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2899		event_cb = true;
2900		break;
2901	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2902		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2903		event_cb = true;
2904		break;
2905	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2906		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2907		event_cb = true;
2908		break;
2909	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2910		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2911		event_cb = true;
2912		break;
2913	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2914		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2915		event_cb = true;
2916		break;
2917	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2918		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2919		event_cb = true;
2920		break;
2921	default:
2922		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2923			   "Unhandled exception received...fw_ret_code=%d\n",
2924			   fw_ret_code);
2925		break;
2926	}
2927
2928	if (event_cb) {
2929		params.ep_context = ep;
2930		params.cm_info = &ep->cm_info;
2931		ep->event_cb(ep->cb_context, &params);
2932	}
2933}
2934
2935static void
2936qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2937				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2938{
2939	struct qed_iwarp_cm_event_params params;
2940
2941	memset(&params, 0, sizeof(params));
2942	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2943	params.ep_context = ep;
2944	params.cm_info = &ep->cm_info;
2945	/* paired with READ_ONCE in destroy_qp */
2946	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2947
2948	switch (fw_return_code) {
2949	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2950		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2951			   "%s(0x%x) TCP connect got invalid packet\n",
2952			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2953		params.status = -ECONNRESET;
2954		break;
2955	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2956		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2957			   "%s(0x%x) TCP Connection Reset\n",
2958			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2959		params.status = -ECONNRESET;
2960		break;
2961	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2962		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2963			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2964		params.status = -EBUSY;
2965		break;
2966	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2967		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2968			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2969		params.status = -ECONNREFUSED;
2970		break;
2971	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2972		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2973			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2974		params.status = -ECONNRESET;
2975		break;
2976	default:
2977		DP_ERR(p_hwfn,
2978		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
2979		       QED_IWARP_CONNECT_MODE_STRING(ep),
2980		       ep->tcp_cid, fw_return_code);
2981		params.status = -ECONNRESET;
2982		break;
2983	}
2984
2985	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2986		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2987		qed_iwarp_return_ep(p_hwfn, ep);
2988	} else {
2989		ep->event_cb(ep->cb_context, &params);
2990		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2991		list_del(&ep->list_entry);
2992		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2993	}
2994}
2995
2996static void
2997qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2998			   struct qed_iwarp_ep *ep, u8 fw_return_code)
2999{
3000	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3001
3002	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3003		/* Done with the SYN packet, post back to ll2 rx */
3004		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3005
3006		ep->syn = NULL;
3007
3008		/* If connect failed - upper layer doesn't know about it */
3009		if (fw_return_code == RDMA_RETURN_OK)
3010			qed_iwarp_mpa_received(p_hwfn, ep);
3011		else
3012			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3013							   fw_return_code);
3014	} else {
3015		if (fw_return_code == RDMA_RETURN_OK)
3016			qed_iwarp_mpa_offload(p_hwfn, ep);
3017		else
3018			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3019							   fw_return_code);
3020	}
3021}
3022
3023static inline bool
3024qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3025{
3026	if (!ep || (ep->sig != QED_EP_SIG)) {
3027		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3028		return false;
3029	}
3030
3031	return true;
3032}
3033
3034static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
3035				 u8 fw_event_code, u16 echo,
3036				 union event_ring_data *data,
3037				 u8 fw_return_code)
3038{
3039	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3040	struct regpair *fw_handle = &data->rdma_data.async_handle;
3041	struct qed_iwarp_ep *ep = NULL;
3042	u16 srq_offset;
3043	u16 srq_id;
3044	u16 cid;
3045
3046	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3047						       fw_handle->lo);
3048
3049	switch (fw_event_code) {
3050	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3051		/* Async completion after TCP 3-way handshake */
3052		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3053			return -EINVAL;
3054		DP_VERBOSE(p_hwfn,
3055			   QED_MSG_RDMA,
3056			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3057			   ep->tcp_cid, fw_return_code);
3058		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3059		break;
3060	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3061		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3062			return -EINVAL;
3063		DP_VERBOSE(p_hwfn,
3064			   QED_MSG_RDMA,
3065			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3066			   ep->cid, fw_return_code);
3067		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3068		break;
3069	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3070		/* Async completion for Close Connection ramrod */
3071		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3072			return -EINVAL;
3073		DP_VERBOSE(p_hwfn,
3074			   QED_MSG_RDMA,
3075			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3076			   ep->cid, fw_return_code);
3077		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3078		break;
3079	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3080		/* Async event for active side only */
3081		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3082			return -EINVAL;
3083		DP_VERBOSE(p_hwfn,
3084			   QED_MSG_RDMA,
3085			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3086			   ep->cid, fw_return_code);
3087		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3088		break;
3089	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3090		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3091			return -EINVAL;
3092		DP_VERBOSE(p_hwfn,
3093			   QED_MSG_RDMA,
3094			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3095			   ep->cid, fw_return_code);
3096		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3097		break;
3098	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3099		cid = (u16)le32_to_cpu(fw_handle->lo);
3100		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3101			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3102		qed_iwarp_cid_cleaned(p_hwfn, cid);
3103
3104		break;
3105	case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3106		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3107		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3108		/* FW assigns value that is no greater than u16 */
3109		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3110		events.affiliated_event(events.context,
3111					QED_IWARP_EVENT_SRQ_EMPTY,
3112					&srq_id);
3113		break;
3114	case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3115		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3116		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3117		/* FW assigns value that is no greater than u16 */
3118		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3119		events.affiliated_event(events.context,
3120					QED_IWARP_EVENT_SRQ_LIMIT,
3121					&srq_id);
3122		break;
3123	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3124		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3125
3126		p_hwfn->p_rdma_info->events.affiliated_event(
3127			p_hwfn->p_rdma_info->events.context,
3128			QED_IWARP_EVENT_CQ_OVERFLOW,
3129			(void *)fw_handle);
3130		break;
3131	default:
3132		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3133		       fw_event_code);
3134		return -EINVAL;
3135	}
3136	return 0;
3137}
3138
3139int
3140qed_iwarp_create_listen(void *rdma_cxt,
3141			struct qed_iwarp_listen_in *iparams,
3142			struct qed_iwarp_listen_out *oparams)
3143{
3144	struct qed_hwfn *p_hwfn = rdma_cxt;
3145	struct qed_iwarp_listener *listener;
3146
3147	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3148	if (!listener)
3149		return -ENOMEM;
3150
3151	listener->ip_version = iparams->ip_version;
3152	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3153	listener->port = iparams->port;
3154	listener->vlan = iparams->vlan;
3155
3156	listener->event_cb = iparams->event_cb;
3157	listener->cb_context = iparams->cb_context;
3158	listener->max_backlog = iparams->max_backlog;
3159	oparams->handle = listener;
3160
3161	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3162	list_add_tail(&listener->list_entry,
3163		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3164	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3165
3166	DP_VERBOSE(p_hwfn,
3167		   QED_MSG_RDMA,
3168		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3169		   listener->event_cb,
3170		   listener,
3171		   listener->ip_addr[0],
3172		   listener->ip_addr[1],
3173		   listener->ip_addr[2],
3174		   listener->ip_addr[3], listener->port, listener->vlan);
3175
3176	return 0;
3177}
3178
3179int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3180{
3181	struct qed_iwarp_listener *listener = handle;
3182	struct qed_hwfn *p_hwfn = rdma_cxt;
3183
3184	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3185
3186	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3187	list_del(&listener->list_entry);
3188	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3189
3190	kfree(listener);
3191
3192	return 0;
3193}
3194
3195int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3196{
3197	struct qed_hwfn *p_hwfn = rdma_cxt;
3198	struct qed_sp_init_data init_data;
3199	struct qed_spq_entry *p_ent;
3200	struct qed_iwarp_ep *ep;
3201	struct qed_rdma_qp *qp;
3202	int rc;
3203
3204	ep = iparams->ep_context;
3205	if (!ep) {
3206		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3207		return -EINVAL;
3208	}
3209
3210	qp = ep->qp;
3211
3212	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3213		   qp->icid, ep->tcp_cid);
3214
3215	memset(&init_data, 0, sizeof(init_data));
3216	init_data.cid = qp->icid;
3217	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3218	init_data.comp_mode = QED_SPQ_MODE_CB;
3219
3220	rc = qed_sp_init_request(p_hwfn, &p_ent,
3221				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3222				 PROTOCOLID_IWARP, &init_data);
3223
3224	if (rc)
3225		return rc;
3226
3227	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3228
3229	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3230
3231	return rc;
3232}
3233
3234void
3235qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3236		   struct qed_rdma_query_qp_out_params *out_params)
3237{
3238	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3239}