Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/if_ether.h>
   8#include <linux/if_vlan.h>
   9#include <linux/ip.h>
  10#include <linux/ipv6.h>
  11#include <linux/spinlock.h>
  12#include <linux/tcp.h>
  13#include "qed_cxt.h"
  14#include "qed_hw.h"
  15#include "qed_ll2.h"
  16#include "qed_rdma.h"
  17#include "qed_reg_addr.h"
  18#include "qed_sp.h"
  19#include "qed_ooo.h"
  20
  21#define QED_IWARP_ORD_DEFAULT		32
  22#define QED_IWARP_IRD_DEFAULT		32
  23#define QED_IWARP_MAX_FW_MSS		4120
  24
  25#define QED_EP_SIG 0xecabcdef
  26
  27struct mpa_v2_hdr {
  28	__be16 ird;
  29	__be16 ord;
  30};
  31
  32#define MPA_V2_PEER2PEER_MODEL  0x8000
  33#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  34#define MPA_V2_READ_RTR         0x4000	/* on ord */
  35#define MPA_V2_WRITE_RTR        0x8000
  36#define MPA_V2_IRD_ORD_MASK     0x3FFF
  37
  38#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  39
  40#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  41
  42#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
  43#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
  44#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
  45#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
  46
  47#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  48#define TIMESTAMP_HEADER_SIZE		(12)
  49#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  50
  51#define QED_IWARP_TS_EN			BIT(0)
  52#define QED_IWARP_DA_EN			BIT(1)
  53#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  54#define QED_IWARP_PARAM_P2P		(1)
  55
  56#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  57#define QED_IWARP_DEF_CWND_FACTOR	(4)
  58#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  59#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  60#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  61
  62static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
  63				 __le16 echo, union event_ring_data *data,
 
  64				 u8 fw_return_code);
  65
  66/* Override devinfo with iWARP specific values */
  67void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  68{
  69	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  70
  71	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  72	dev->max_qp = min_t(u32,
  73			    IWARP_MAX_QPS,
  74			    p_hwfn->p_rdma_info->num_qps) -
  75		      QED_IWARP_PREALLOC_CNT;
  76
  77	dev->max_cq = dev->max_qp;
  78
  79	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
  80	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
  81}
  82
  83void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
  84{
  85	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
  86	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
  87	p_hwfn->b_rdma_enabled_in_prs = true;
  88}
  89
  90/* We have two cid maps, one for tcp which should be used only from passive
  91 * syn processing and replacing a pre-allocated ep in the list. The second
  92 * for active tcp and for QPs.
  93 */
  94static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
  95{
  96	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
  97
  98	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
  99
 100	if (cid < QED_IWARP_PREALLOC_CNT)
 101		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 102				    cid);
 103	else
 104		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 105
 106	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 107}
 108
 109void
 110qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 111			 struct iwarp_init_func_ramrod_data *p_ramrod)
 112{
 113	p_ramrod->iwarp.ll2_ooo_q_index =
 114	    RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
 115	    p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 116
 117	p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
 118	p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
 119	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 120
 121	return;
 122}
 123
 124static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 125{
 126	int rc;
 127
 128	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 129	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 130	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 131	if (rc) {
 132		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 133		return rc;
 134	}
 135	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 136
 137	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 138	if (rc)
 139		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 140
 141	return rc;
 142}
 143
 144static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 145{
 146	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 147
 148	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 149	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 150	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 151}
 152
 153/* This function allocates a cid for passive tcp (called from syn receive)
 154 * the reason it's separate from the regular cid allocation is because it
 155 * is assured that these cids already have ilt allocated. They are preallocated
 156 * to ensure that we won't need to allocate memory during syn processing
 157 */
 158static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 159{
 160	int rc;
 161
 162	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 163
 164	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 165				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 166
 167	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 168
 169	if (rc) {
 170		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 171			   "can't allocate iwarp tcp cid max-count=%d\n",
 172			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 173
 174		*cid = QED_IWARP_INVALID_TCP_CID;
 175		return rc;
 176	}
 177
 178	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 179					    p_hwfn->p_rdma_info->proto);
 180	return 0;
 181}
 182
 183int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 184			struct qed_rdma_qp *qp,
 185			struct qed_rdma_create_qp_out_params *out_params)
 186{
 187	struct iwarp_create_qp_ramrod_data *p_ramrod;
 188	struct qed_sp_init_data init_data;
 189	struct qed_spq_entry *p_ent;
 190	u16 physical_queue;
 191	u32 cid;
 192	int rc;
 193
 194	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 195					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 196					      &qp->shared_queue_phys_addr,
 197					      GFP_KERNEL);
 198	if (!qp->shared_queue)
 199		return -ENOMEM;
 200
 201	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 202	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 203	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 204	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 205	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 206	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 207	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 208	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 209
 210	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 211	if (rc)
 212		goto err1;
 213
 214	qp->icid = (u16)cid;
 215
 216	memset(&init_data, 0, sizeof(init_data));
 217	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 218	init_data.cid = qp->icid;
 219	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 220
 221	rc = qed_sp_init_request(p_hwfn, &p_ent,
 222				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 223				 PROTOCOLID_IWARP, &init_data);
 224	if (rc)
 225		goto err2;
 226
 227	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 228
 229	SET_FIELD(p_ramrod->flags,
 230		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 231		  qp->fmr_and_reserved_lkey);
 232
 233	SET_FIELD(p_ramrod->flags,
 234		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 235
 236	SET_FIELD(p_ramrod->flags,
 237		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 238		  qp->incoming_rdma_read_en);
 239
 240	SET_FIELD(p_ramrod->flags,
 241		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 242		  qp->incoming_rdma_write_en);
 243
 244	SET_FIELD(p_ramrod->flags,
 245		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 246		  qp->incoming_atomic_en);
 247
 248	SET_FIELD(p_ramrod->flags,
 249		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 250
 251	p_ramrod->pd = cpu_to_le16(qp->pd);
 252	p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
 253	p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
 254
 255	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 256	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 257	p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
 258	p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
 259
 260	p_ramrod->cq_cid_for_sq =
 261	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 262	p_ramrod->cq_cid_for_rq =
 263	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 264
 265	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 266
 267	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 268	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 269	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 270	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 271
 272	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 273	if (rc)
 274		goto err2;
 275
 276	return rc;
 277
 278err2:
 279	qed_iwarp_cid_cleaned(p_hwfn, cid);
 280err1:
 281	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 282			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 283			  qp->shared_queue, qp->shared_queue_phys_addr);
 284
 285	return rc;
 286}
 287
 288static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 289{
 290	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 291	struct qed_sp_init_data init_data;
 292	struct qed_spq_entry *p_ent;
 293	u16 flags, trans_to_state;
 294	int rc;
 295
 296	/* Get SPQ entry */
 297	memset(&init_data, 0, sizeof(init_data));
 298	init_data.cid = qp->icid;
 299	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 300	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 301
 302	rc = qed_sp_init_request(p_hwfn, &p_ent,
 303				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 304				 p_hwfn->p_rdma_info->proto, &init_data);
 305	if (rc)
 306		return rc;
 307
 308	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 309
 310	flags = le16_to_cpu(p_ramrod->flags);
 311	SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
 312	p_ramrod->flags = cpu_to_le16(flags);
 313
 314	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 315		trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 316	else
 317		trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 318
 319	p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
 320
 321	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 322
 323	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 324
 325	return rc;
 326}
 327
 328enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 329{
 330	switch (state) {
 331	case QED_ROCE_QP_STATE_RESET:
 332	case QED_ROCE_QP_STATE_INIT:
 333	case QED_ROCE_QP_STATE_RTR:
 334		return QED_IWARP_QP_STATE_IDLE;
 335	case QED_ROCE_QP_STATE_RTS:
 336		return QED_IWARP_QP_STATE_RTS;
 337	case QED_ROCE_QP_STATE_SQD:
 338		return QED_IWARP_QP_STATE_CLOSING;
 339	case QED_ROCE_QP_STATE_ERR:
 340		return QED_IWARP_QP_STATE_ERROR;
 341	case QED_ROCE_QP_STATE_SQE:
 342		return QED_IWARP_QP_STATE_TERMINATE;
 343	default:
 344		return QED_IWARP_QP_STATE_ERROR;
 345	}
 346}
 347
 348static enum qed_roce_qp_state
 349qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 350{
 351	switch (state) {
 352	case QED_IWARP_QP_STATE_IDLE:
 353		return QED_ROCE_QP_STATE_INIT;
 354	case QED_IWARP_QP_STATE_RTS:
 355		return QED_ROCE_QP_STATE_RTS;
 356	case QED_IWARP_QP_STATE_TERMINATE:
 357		return QED_ROCE_QP_STATE_SQE;
 358	case QED_IWARP_QP_STATE_CLOSING:
 359		return QED_ROCE_QP_STATE_SQD;
 360	case QED_IWARP_QP_STATE_ERROR:
 361		return QED_ROCE_QP_STATE_ERR;
 362	default:
 363		return QED_ROCE_QP_STATE_ERR;
 364	}
 365}
 366
 367static const char * const iwarp_state_names[] = {
 368	"IDLE",
 369	"RTS",
 370	"TERMINATE",
 371	"CLOSING",
 372	"ERROR",
 373};
 374
 375int
 376qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 377		    struct qed_rdma_qp *qp,
 378		    enum qed_iwarp_qp_state new_state, bool internal)
 379{
 380	enum qed_iwarp_qp_state prev_iw_state;
 381	bool modify_fw = false;
 382	int rc = 0;
 383
 384	/* modify QP can be called from upper-layer or as a result of async
 385	 * RST/FIN... therefore need to protect
 386	 */
 387	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 388	prev_iw_state = qp->iwarp_state;
 389
 390	if (prev_iw_state == new_state) {
 391		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 392		return 0;
 393	}
 394
 395	switch (prev_iw_state) {
 396	case QED_IWARP_QP_STATE_IDLE:
 397		switch (new_state) {
 398		case QED_IWARP_QP_STATE_RTS:
 399			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 400			break;
 401		case QED_IWARP_QP_STATE_ERROR:
 402			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 403			if (!internal)
 404				modify_fw = true;
 405			break;
 406		default:
 407			break;
 408		}
 409		break;
 410	case QED_IWARP_QP_STATE_RTS:
 411		switch (new_state) {
 412		case QED_IWARP_QP_STATE_CLOSING:
 413			if (!internal)
 414				modify_fw = true;
 415
 416			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 417			break;
 418		case QED_IWARP_QP_STATE_ERROR:
 419			if (!internal)
 420				modify_fw = true;
 421			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 422			break;
 423		default:
 424			break;
 425		}
 426		break;
 427	case QED_IWARP_QP_STATE_ERROR:
 428		switch (new_state) {
 429		case QED_IWARP_QP_STATE_IDLE:
 430
 431			qp->iwarp_state = new_state;
 432			break;
 433		case QED_IWARP_QP_STATE_CLOSING:
 434			/* could happen due to race... do nothing.... */
 435			break;
 436		default:
 437			rc = -EINVAL;
 438		}
 439		break;
 440	case QED_IWARP_QP_STATE_TERMINATE:
 441	case QED_IWARP_QP_STATE_CLOSING:
 442		qp->iwarp_state = new_state;
 443		break;
 444	default:
 445		break;
 446	}
 447
 448	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 449		   qp->icid,
 450		   iwarp_state_names[prev_iw_state],
 451		   iwarp_state_names[qp->iwarp_state],
 452		   internal ? "internal" : "");
 453
 454	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 455
 456	if (modify_fw)
 457		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 458
 459	return rc;
 460}
 461
 462int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 463{
 464	struct qed_sp_init_data init_data;
 465	struct qed_spq_entry *p_ent;
 466	int rc;
 467
 468	/* Get SPQ entry */
 469	memset(&init_data, 0, sizeof(init_data));
 470	init_data.cid = qp->icid;
 471	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 472	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 473
 474	rc = qed_sp_init_request(p_hwfn, &p_ent,
 475				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 476				 p_hwfn->p_rdma_info->proto, &init_data);
 477	if (rc)
 478		return rc;
 479
 480	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 481
 482	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 483
 484	return rc;
 485}
 486
 487static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 488				 struct qed_iwarp_ep *ep,
 489				 bool remove_from_active_list)
 490{
 491	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 492			  sizeof(*ep->ep_buffer_virt),
 493			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 494
 495	if (remove_from_active_list) {
 496		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 497		list_del(&ep->list_entry);
 498		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 499	}
 500
 501	if (ep->qp)
 502		ep->qp->ep = NULL;
 503
 504	kfree(ep);
 505}
 506
 507int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 508{
 509	struct qed_iwarp_ep *ep = qp->ep;
 510	int wait_count = 0;
 511	int rc = 0;
 512
 513	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 514		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 515					 QED_IWARP_QP_STATE_ERROR, false);
 516		if (rc)
 517			return rc;
 518	}
 519
 520	/* Make sure ep is closed before returning and freeing memory. */
 521	if (ep) {
 522		while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
 523		       wait_count++ < 200)
 524			msleep(100);
 525
 526		if (ep->state != QED_IWARP_EP_CLOSED)
 527			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 528				  ep->state);
 529
 530		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 531	}
 532
 533	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 534
 535	if (qp->shared_queue)
 536		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 537				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 538				  qp->shared_queue, qp->shared_queue_phys_addr);
 539
 540	return rc;
 541}
 542
 543static int
 544qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 545{
 546	struct qed_iwarp_ep *ep;
 547	int rc;
 548
 549	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 550	if (!ep)
 551		return -ENOMEM;
 552
 553	ep->state = QED_IWARP_EP_INIT;
 554
 555	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 556						sizeof(*ep->ep_buffer_virt),
 557						&ep->ep_buffer_phys,
 558						GFP_KERNEL);
 559	if (!ep->ep_buffer_virt) {
 560		rc = -ENOMEM;
 561		goto err;
 562	}
 563
 564	ep->sig = QED_EP_SIG;
 565
 566	*ep_out = ep;
 567
 568	return 0;
 569
 570err:
 571	kfree(ep);
 572	return rc;
 573}
 574
 575static void
 576qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 577			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 578{
 579	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 580		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 581		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 582		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 583		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 584		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 585		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 586
 587	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 588		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 589			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 590			   p_tcp_ramrod->tcp.local_ip,
 591			   p_tcp_ramrod->tcp.local_port,
 592			   p_tcp_ramrod->tcp.remote_ip,
 593			   p_tcp_ramrod->tcp.remote_port,
 594			   p_tcp_ramrod->tcp.vlan_id);
 595	} else {
 596		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 597			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 598			   p_tcp_ramrod->tcp.local_ip,
 599			   p_tcp_ramrod->tcp.local_port,
 600			   p_tcp_ramrod->tcp.remote_ip,
 601			   p_tcp_ramrod->tcp.remote_port,
 602			   p_tcp_ramrod->tcp.vlan_id);
 603	}
 604
 605	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 606		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 607		   p_tcp_ramrod->tcp.flow_label,
 608		   p_tcp_ramrod->tcp.ttl,
 609		   p_tcp_ramrod->tcp.tos_or_tc,
 610		   p_tcp_ramrod->tcp.mss,
 611		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 612		   p_tcp_ramrod->tcp.connect_mode,
 613		   p_tcp_ramrod->tcp.flags);
 614
 615	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 616		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 617		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 618		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 619}
 620
 621static int
 622qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 623{
 624	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 625	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 626	struct tcp_offload_params_opt2 *tcp;
 627	struct qed_sp_init_data init_data;
 628	struct qed_spq_entry *p_ent;
 629	dma_addr_t async_output_phys;
 630	dma_addr_t in_pdata_phys;
 631	u16 physical_q;
 632	u16 flags = 0;
 633	u8 tcp_flags;
 634	int rc;
 635	int i;
 636
 637	memset(&init_data, 0, sizeof(init_data));
 638	init_data.cid = ep->tcp_cid;
 639	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 640	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 641		init_data.comp_mode = QED_SPQ_MODE_CB;
 642	else
 643		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 644
 645	rc = qed_sp_init_request(p_hwfn, &p_ent,
 646				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 647				 PROTOCOLID_IWARP, &init_data);
 648	if (rc)
 649		return rc;
 650
 651	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 652
 653	in_pdata_phys = ep->ep_buffer_phys +
 654			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 655	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 656		       in_pdata_phys);
 657
 658	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 659	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 660
 661	async_output_phys = ep->ep_buffer_phys +
 662			    offsetof(struct qed_iwarp_ep_memory, async_output);
 663	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 664		       async_output_phys);
 665
 666	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 667	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 668
 669	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 670	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 671	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 672	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 673	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 674
 675	tcp = &p_tcp_ramrod->tcp;
 676	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 677			    &tcp->remote_mac_addr_mid,
 678			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 679	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 680			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 681
 682	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 683
 684	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 685
 686	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 687		  !!(tcp_flags & QED_IWARP_TS_EN));
 688
 689	SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 690		  !!(tcp_flags & QED_IWARP_DA_EN));
 691
 692	tcp->flags = cpu_to_le16(flags);
 693	tcp->ip_version = ep->cm_info.ip_version;
 694
 695	for (i = 0; i < 4; i++) {
 696		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 697		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 698	}
 699
 700	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 701	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 702	tcp->mss = cpu_to_le16(ep->mss);
 703	tcp->flow_label = 0;
 704	tcp->ttl = 0x40;
 705	tcp->tos_or_tc = 0;
 706
 707	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 708	tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
 709	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 710	tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
 711	tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
 712
 713	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 714	tcp->connect_mode = ep->connect_mode;
 715
 716	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 717		tcp->syn_ip_payload_length =
 718			cpu_to_le16(ep->syn_ip_payload_length);
 719		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 720		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 721	}
 722
 723	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 724
 725	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 726
 727	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 728		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 729
 730	return rc;
 731}
 732
 733static void
 734qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 735{
 736	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 737	struct qed_iwarp_cm_event_params params;
 738	struct mpa_v2_hdr *mpa_v2;
 739	union async_output *async_data;
 740	u16 mpa_ord, mpa_ird;
 741	u8 mpa_hdr_size = 0;
 742	u16 ulp_data_len;
 743	u8 mpa_rev;
 744
 745	async_data = &ep->ep_buffer_virt->async_output;
 746
 747	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 748	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 749		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 750		   async_data->mpa_request.ulp_data_len,
 751		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 752
 753	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 754		/* Read ord/ird values from private data buffer */
 755		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 756		mpa_hdr_size = sizeof(*mpa_v2);
 757
 758		mpa_ord = ntohs(mpa_v2->ord);
 759		mpa_ird = ntohs(mpa_v2->ird);
 760
 761		/* Temprary store in cm_info incoming ord/ird requested, later
 762		 * replace with negotiated value during accept
 763		 */
 764		ep->cm_info.ord = (u8)min_t(u16,
 765					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 766					    QED_IWARP_ORD_DEFAULT);
 767
 768		ep->cm_info.ird = (u8)min_t(u16,
 769					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 770					    QED_IWARP_IRD_DEFAULT);
 771
 772		/* Peer2Peer negotiation */
 773		ep->rtr_type = MPA_RTR_TYPE_NONE;
 774		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 775			if (mpa_ord & MPA_V2_WRITE_RTR)
 776				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 777
 778			if (mpa_ord & MPA_V2_READ_RTR)
 779				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 780
 781			if (mpa_ird & MPA_V2_SEND_RTR)
 782				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 783
 784			ep->rtr_type &= iwarp_info->rtr_type;
 785
 786			/* if we're left with no match send our capabilities */
 787			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 788				ep->rtr_type = iwarp_info->rtr_type;
 789		}
 790
 791		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 792	} else {
 793		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 794		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 795		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 796	}
 797
 798	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 799		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 800		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 801		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 802
 803	/* Strip mpa v2 hdr from private data before sending to upper layer */
 804	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 805
 806	ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
 807	ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
 808
 809	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 810	params.cm_info = &ep->cm_info;
 811	params.ep_context = ep;
 812	params.status = 0;
 813
 814	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 815	ep->event_cb(ep->cb_context, &params);
 816}
 817
 818static int
 819qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 820{
 821	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 822	struct mpa_outgoing_params *common;
 823	struct qed_iwarp_info *iwarp_info;
 824	struct qed_sp_init_data init_data;
 825	dma_addr_t async_output_phys;
 826	struct qed_spq_entry *p_ent;
 827	dma_addr_t out_pdata_phys;
 828	dma_addr_t in_pdata_phys;
 829	struct qed_rdma_qp *qp;
 830	bool reject;
 831	u32 val;
 832	int rc;
 833
 834	if (!ep)
 835		return -EINVAL;
 836
 837	qp = ep->qp;
 838	reject = !qp;
 839
 840	memset(&init_data, 0, sizeof(init_data));
 841	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 842	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 843
 844	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 845		init_data.comp_mode = QED_SPQ_MODE_CB;
 846	else
 847		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 848
 849	rc = qed_sp_init_request(p_hwfn, &p_ent,
 850				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 851				 PROTOCOLID_IWARP, &init_data);
 852	if (rc)
 853		return rc;
 854
 855	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 856	common = &p_mpa_ramrod->common;
 857
 858	out_pdata_phys = ep->ep_buffer_phys +
 859			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 860	DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
 861
 862	val = ep->cm_info.private_data_len;
 863	common->outgoing_ulp_buffer.len = cpu_to_le16(val);
 864	common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 865
 866	common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
 867	common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
 868
 869	val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 870	p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
 871
 872	in_pdata_phys = ep->ep_buffer_phys +
 873			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 874	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 875	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 876		       in_pdata_phys);
 877	p_mpa_ramrod->incoming_ulp_buffer.len =
 878	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 879	async_output_phys = ep->ep_buffer_phys +
 880			    offsetof(struct qed_iwarp_ep_memory, async_output);
 881	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 882		       async_output_phys);
 883	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 884	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 885
 886	if (!reject) {
 887		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 888			       qp->shared_queue_phys_addr);
 889		p_mpa_ramrod->stats_counter_id =
 890		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 891	} else {
 892		common->reject = 1;
 893	}
 894
 895	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 896	p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
 897	p_mpa_ramrod->mode = ep->mpa_rev;
 898	SET_FIELD(p_mpa_ramrod->rtr_pref,
 899		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 900
 901	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 902	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 903	if (!reject)
 904		ep->cid = qp->icid;	/* Now they're migrated. */
 905
 906	DP_VERBOSE(p_hwfn,
 907		   QED_MSG_RDMA,
 908		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 909		   reject ? 0xffff : qp->icid,
 910		   ep->tcp_cid,
 911		   rc,
 912		   ep->cm_info.ird,
 913		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 914	return rc;
 915}
 916
 917static void
 918qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 919{
 920	ep->state = QED_IWARP_EP_INIT;
 921	if (ep->qp)
 922		ep->qp->ep = NULL;
 923	ep->qp = NULL;
 924	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 925
 926	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 927		/* We don't care about the return code, it's ok if tcp_cid
 928		 * remains invalid...in this case we'll defer allocation
 929		 */
 930		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 931	}
 932	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 933
 934	list_move_tail(&ep->list_entry,
 935		       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 
 936
 937	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 938}
 939
 940static void
 941qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 942{
 943	struct mpa_v2_hdr *mpa_v2_params;
 944	union async_output *async_data;
 945	u16 mpa_ird, mpa_ord;
 946	u8 mpa_data_size = 0;
 947	u16 ulp_data_len;
 948
 949	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 950		mpa_v2_params =
 951			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 952		mpa_data_size = sizeof(*mpa_v2_params);
 953		mpa_ird = ntohs(mpa_v2_params->ird);
 954		mpa_ord = ntohs(mpa_v2_params->ord);
 955
 956		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 957		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 958	}
 959
 960	async_data = &ep->ep_buffer_virt->async_output;
 961	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 962
 963	ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
 964	ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
 
 965}
 966
 967static void
 968qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 969{
 970	struct qed_iwarp_cm_event_params params;
 971
 972	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 973		DP_NOTICE(p_hwfn,
 974			  "MPA reply event not expected on passive side!\n");
 975		return;
 976	}
 977
 978	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 979
 980	qed_iwarp_parse_private_data(p_hwfn, ep);
 981
 982	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 983		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 984		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 985
 986	params.cm_info = &ep->cm_info;
 987	params.ep_context = ep;
 988	params.status = 0;
 989
 990	ep->mpa_reply_processed = true;
 991
 992	ep->event_cb(ep->cb_context, &params);
 993}
 994
 995#define QED_IWARP_CONNECT_MODE_STRING(ep) \
 996	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 997
 998/* Called as a result of the event:
 999 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1000 */
1001static void
1002qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1003		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1004{
1005	struct qed_iwarp_cm_event_params params;
1006
1007	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1008		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1009	else
1010		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1011
1012	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1013		qed_iwarp_parse_private_data(p_hwfn, ep);
1014
1015	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1016		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1017		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1018
1019	params.cm_info = &ep->cm_info;
1020
1021	params.ep_context = ep;
1022
 
 
1023	switch (fw_return_code) {
1024	case RDMA_RETURN_OK:
1025		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1026		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1027		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1028		ep->state = QED_IWARP_EP_ESTABLISHED;
1029		params.status = 0;
1030		break;
1031	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1032		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1033			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1034		params.status = -EBUSY;
1035		break;
1036	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1037		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1038			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1039		params.status = -ECONNREFUSED;
1040		break;
1041	case IWARP_CONN_ERROR_MPA_RST:
1042		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1043			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1044			  ep->tcp_cid);
1045		params.status = -ECONNRESET;
1046		break;
1047	case IWARP_CONN_ERROR_MPA_FIN:
1048		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1049			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1050		params.status = -ECONNREFUSED;
1051		break;
1052	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1053		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1054			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1055		params.status = -ECONNREFUSED;
1056		break;
1057	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1058		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1059			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1060		params.status = -ECONNREFUSED;
1061		break;
1062	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1063		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1064			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1065		params.status = -ECONNREFUSED;
1066		break;
1067	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1068		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1069			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1070		params.status = -ECONNREFUSED;
1071		break;
1072	case IWARP_CONN_ERROR_MPA_TERMINATE:
1073		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1074			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1075		params.status = -ECONNREFUSED;
1076		break;
1077	default:
1078		params.status = -ECONNRESET;
1079		break;
1080	}
1081
1082	if (fw_return_code != RDMA_RETURN_OK)
1083		/* paired with READ_ONCE in destroy_qp */
1084		smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1085
1086	ep->event_cb(ep->cb_context, &params);
1087
1088	/* on passive side, if there is no associated QP (REJECT) we need to
1089	 * return the ep to the pool, (in the regular case we add an element
1090	 * in accept instead of this one.
1091	 * In both cases we need to remove it from the ep_list.
1092	 */
1093	if (fw_return_code != RDMA_RETURN_OK) {
1094		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1095		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1096		    (!ep->qp)) {	/* Rejected */
1097			qed_iwarp_return_ep(p_hwfn, ep);
1098		} else {
1099			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1100			list_del(&ep->list_entry);
1101			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1102		}
1103	}
1104}
1105
1106static void
1107qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1108			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1109{
1110	struct mpa_v2_hdr *mpa_v2_params;
1111	u16 mpa_ird, mpa_ord;
1112
1113	*mpa_data_size = 0;
1114	if (MPA_REV2(ep->mpa_rev)) {
1115		mpa_v2_params =
1116		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1117		*mpa_data_size = sizeof(*mpa_v2_params);
1118
1119		mpa_ird = (u16)ep->cm_info.ird;
1120		mpa_ord = (u16)ep->cm_info.ord;
1121
1122		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1123			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1124
1125			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1126				mpa_ird |= MPA_V2_SEND_RTR;
1127
1128			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1129				mpa_ord |= MPA_V2_WRITE_RTR;
1130
1131			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1132				mpa_ord |= MPA_V2_READ_RTR;
1133		}
1134
1135		mpa_v2_params->ird = htons(mpa_ird);
1136		mpa_v2_params->ord = htons(mpa_ord);
1137
1138		DP_VERBOSE(p_hwfn,
1139			   QED_MSG_RDMA,
1140			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1141			   mpa_v2_params->ird,
1142			   mpa_v2_params->ord,
1143			   *((u32 *)mpa_v2_params),
1144			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1145			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1146			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1147			   !!(mpa_ird & MPA_V2_SEND_RTR),
1148			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1149			   !!(mpa_ord & MPA_V2_READ_RTR));
1150	}
1151}
1152
1153int qed_iwarp_connect(void *rdma_cxt,
1154		      struct qed_iwarp_connect_in *iparams,
1155		      struct qed_iwarp_connect_out *oparams)
1156{
1157	struct qed_hwfn *p_hwfn = rdma_cxt;
1158	struct qed_iwarp_info *iwarp_info;
1159	struct qed_iwarp_ep *ep;
1160	u8 mpa_data_size = 0;
 
1161	u32 cid;
1162	int rc;
1163
1164	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1166		DP_NOTICE(p_hwfn,
1167			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168			  iparams->qp->icid, iparams->cm_info.ord,
1169			  iparams->cm_info.ird);
1170
1171		return -EINVAL;
1172	}
1173
1174	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1175
1176	/* Allocate ep object */
1177	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1178	if (rc)
1179		return rc;
1180
1181	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1182	if (rc)
1183		goto err;
1184
1185	ep->tcp_cid = cid;
1186
1187	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1190
1191	ep->qp = iparams->qp;
1192	ep->qp->ep = ep;
1193	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1196
1197	ep->cm_info.ord = iparams->cm_info.ord;
1198	ep->cm_info.ird = iparams->cm_info.ird;
1199
1200	ep->rtr_type = iwarp_info->rtr_type;
1201	if (!iwarp_info->peer2peer)
1202		ep->rtr_type = MPA_RTR_TYPE_NONE;
1203
1204	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205		ep->cm_info.ord = 1;
1206
1207	ep->mpa_rev = iwarp_info->mpa_rev;
1208
1209	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1210
1211	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213				       mpa_data_size;
1214
1215	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216	       iparams->cm_info.private_data,
1217	       iparams->cm_info.private_data_len);
1218
1219	ep->mss = iparams->mss;
 
 
 
1220	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1221
1222	ep->event_cb = iparams->event_cb;
1223	ep->cb_context = iparams->cb_context;
1224	ep->connect_mode = TCP_CONNECT_ACTIVE;
1225
1226	oparams->ep_context = ep;
1227
1228	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1229
1230	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1231		   iparams->qp->icid, ep->tcp_cid, rc);
1232
1233	if (rc) {
1234		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1235		goto err;
1236	}
1237
1238	return rc;
1239err:
1240	qed_iwarp_cid_cleaned(p_hwfn, cid);
1241
1242	return rc;
1243}
1244
1245static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1246{
1247	struct qed_iwarp_ep *ep = NULL;
1248	int rc;
1249
1250	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1251
1252	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1253		DP_ERR(p_hwfn, "Ep list is empty\n");
1254		goto out;
1255	}
1256
1257	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1258			      struct qed_iwarp_ep, list_entry);
1259
1260	/* in some cases we could have failed allocating a tcp cid when added
1261	 * from accept / failure... retry now..this is not the common case.
1262	 */
1263	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1264		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1265
1266		/* if we fail we could look for another entry with a valid
1267		 * tcp_cid, but since we don't expect to reach this anyway
1268		 * it's not worth the handling
1269		 */
1270		if (rc) {
1271			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1272			ep = NULL;
1273			goto out;
1274		}
1275	}
1276
1277	list_del(&ep->list_entry);
1278
1279out:
1280	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1281	return ep;
1282}
1283
1284#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1285#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1286
1287/* This function waits for all the bits of a bmap to be cleared, as long as
1288 * there is progress ( i.e. the number of bits left to be cleared decreases )
1289 * the function continues.
1290 */
1291static int
1292qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1293{
1294	int prev_weight = 0;
1295	int wait_count = 0;
1296	int weight = 0;
1297
1298	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1299	prev_weight = weight;
1300
1301	while (weight) {
1302		/* If the HW device is during recovery, all resources are
1303		 * immediately reset without receiving a per-cid indication
1304		 * from HW. In this case we don't expect the cid_map to be
1305		 * cleared.
1306		 */
1307		if (p_hwfn->cdev->recov_in_prog)
1308			return 0;
1309
1310		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1311
1312		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1313
1314		if (prev_weight == weight) {
1315			wait_count++;
1316		} else {
1317			prev_weight = weight;
1318			wait_count = 0;
1319		}
1320
1321		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1322			DP_NOTICE(p_hwfn,
1323				  "%s bitmap wait timed out (%d cids pending)\n",
1324				  bmap->name, weight);
1325			return -EBUSY;
1326		}
1327	}
1328	return 0;
1329}
1330
1331static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1332{
1333	int rc;
1334	int i;
1335
1336	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1337					    &p_hwfn->p_rdma_info->tcp_cid_map);
1338	if (rc)
1339		return rc;
1340
1341	/* Now free the tcp cids from the main cid map */
1342	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1343		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1344
1345	/* Now wait for all cids to be completed */
1346	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1347					      &p_hwfn->p_rdma_info->cid_map);
1348}
1349
1350static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1351{
1352	struct qed_iwarp_ep *ep;
1353
1354	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1355		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356
1357		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1358				      struct qed_iwarp_ep, list_entry);
1359
1360		if (!ep) {
1361			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362			break;
1363		}
1364		list_del(&ep->list_entry);
1365
1366		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1367
1368		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1369			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1370
1371		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1372	}
1373}
1374
1375static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1376{
1377	struct qed_iwarp_ep *ep;
1378	int rc = 0;
1379	int count;
1380	u32 cid;
1381	int i;
1382
1383	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1384	for (i = 0; i < count; i++) {
1385		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1386		if (rc)
1387			return rc;
1388
1389		/* During initialization we allocate from the main pool,
1390		 * afterwards we allocate only from the tcp_cid.
1391		 */
1392		if (init) {
1393			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394			if (rc)
1395				goto err;
1396			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1397		} else {
1398			/* We don't care about the return code, it's ok if
1399			 * tcp_cid remains invalid...in this case we'll
1400			 * defer allocation
1401			 */
1402			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1403		}
1404
1405		ep->tcp_cid = cid;
1406
1407		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408		list_add_tail(&ep->list_entry,
1409			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1410		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1411	}
1412
1413	return rc;
1414
1415err:
1416	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1417
1418	return rc;
1419}
1420
1421int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1422{
1423	int rc;
1424
1425	/* Allocate bitmap for tcp cid. These are used by passive side
1426	 * to ensure it can allocate a tcp cid during dpc that was
1427	 * pre-acquired and doesn't require dynamic allocation of ilt
1428	 */
1429	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1430				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1431	if (rc) {
1432		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1433			   "Failed to allocate tcp cid, rc = %d\n", rc);
1434		return rc;
1435	}
1436
1437	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1438	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1439
1440	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1441	if (rc)
1442		return rc;
1443
1444	return qed_ooo_alloc(p_hwfn);
1445}
1446
1447void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1448{
1449	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1450
1451	qed_ooo_free(p_hwfn);
1452	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1453	kfree(iwarp_info->mpa_bufs);
1454	kfree(iwarp_info->partial_fpdus);
1455	kfree(iwarp_info->mpa_intermediate_buf);
1456}
1457
1458int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1459{
1460	struct qed_hwfn *p_hwfn = rdma_cxt;
1461	struct qed_iwarp_ep *ep;
1462	u8 mpa_data_size = 0;
1463	int rc;
1464
1465	ep = iparams->ep_context;
1466	if (!ep) {
1467		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1468		return -EINVAL;
1469	}
1470
1471	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1472		   iparams->qp->icid, ep->tcp_cid);
1473
1474	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1475	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476		DP_VERBOSE(p_hwfn,
1477			   QED_MSG_RDMA,
1478			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1479			   iparams->qp->icid,
1480			   ep->tcp_cid, iparams->ord, iparams->ord);
1481		return -EINVAL;
1482	}
1483
1484	qed_iwarp_prealloc_ep(p_hwfn, false);
1485
1486	ep->cb_context = iparams->cb_context;
1487	ep->qp = iparams->qp;
1488	ep->qp->ep = ep;
1489
1490	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1491		/* Negotiate ord/ird: if upperlayer requested ord larger than
1492		 * ird advertised by remote, we need to decrease our ord
1493		 */
1494		if (iparams->ord > ep->cm_info.ird)
1495			iparams->ord = ep->cm_info.ird;
1496
1497		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1498		    (iparams->ird == 0))
1499			iparams->ird = 1;
1500	}
1501
1502	/* Update cm_info ord/ird to be negotiated values */
1503	ep->cm_info.ord = iparams->ord;
1504	ep->cm_info.ird = iparams->ird;
1505
1506	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1507
1508	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1509	ep->cm_info.private_data_len = iparams->private_data_len +
1510				       mpa_data_size;
1511
1512	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1513	       iparams->private_data, iparams->private_data_len);
1514
1515	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1516	if (rc)
1517		qed_iwarp_modify_qp(p_hwfn,
1518				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1519
1520	return rc;
1521}
1522
1523int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1524{
1525	struct qed_hwfn *p_hwfn = rdma_cxt;
1526	struct qed_iwarp_ep *ep;
1527	u8 mpa_data_size = 0;
1528
1529	ep = iparams->ep_context;
1530	if (!ep) {
1531		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1532		return -EINVAL;
1533	}
1534
1535	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1536
1537	ep->cb_context = iparams->cb_context;
1538	ep->qp = NULL;
1539
1540	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1541
1542	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1543	ep->cm_info.private_data_len = iparams->private_data_len +
1544				       mpa_data_size;
1545
1546	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1547	       iparams->private_data, iparams->private_data_len);
1548
1549	return qed_iwarp_mpa_offload(p_hwfn, ep);
1550}
1551
1552static void
1553qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1554			struct qed_iwarp_cm_info *cm_info)
1555{
1556	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1557		   cm_info->ip_version);
1558
1559	if (cm_info->ip_version == QED_TCP_IPV4)
1560		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1561			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1562			   cm_info->remote_ip, cm_info->remote_port,
1563			   cm_info->local_ip, cm_info->local_port,
1564			   cm_info->vlan);
1565	else
1566		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1567			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1568			   cm_info->remote_ip, cm_info->remote_port,
1569			   cm_info->local_ip, cm_info->local_port,
1570			   cm_info->vlan);
1571
1572	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1573		   "private_data_len = %x ord = %d, ird = %d\n",
1574		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1575}
1576
1577static int
1578qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1579		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1580{
1581	int rc;
1582
1583	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1584				    (u16)buf->buff_size, buf, 1);
1585	if (rc) {
1586		DP_NOTICE(p_hwfn,
1587			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1588			  rc, handle);
1589		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1590				  buf->data, buf->data_phys_addr);
1591		kfree(buf);
1592	}
1593
1594	return rc;
1595}
1596
1597static bool
1598qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1599{
1600	struct qed_iwarp_ep *ep = NULL;
1601	bool found = false;
1602
1603	list_for_each_entry(ep,
1604			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1605			    list_entry) {
1606		if ((ep->cm_info.local_port == cm_info->local_port) &&
1607		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1608		    (ep->cm_info.vlan == cm_info->vlan) &&
1609		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1610			    sizeof(cm_info->local_ip)) &&
1611		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1612			    sizeof(cm_info->remote_ip))) {
1613			found = true;
1614			break;
1615		}
1616	}
1617
1618	if (found) {
1619		DP_NOTICE(p_hwfn,
1620			  "SYN received on active connection - dropping\n");
1621		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1622
1623		return true;
1624	}
1625
1626	return false;
1627}
1628
1629static struct qed_iwarp_listener *
1630qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1631		       struct qed_iwarp_cm_info *cm_info)
1632{
1633	struct qed_iwarp_listener *listener = NULL;
1634	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635	bool found = false;
1636
 
 
1637	list_for_each_entry(listener,
1638			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1639			    list_entry) {
1640		if (listener->port == cm_info->local_port) {
1641			if (!memcmp(listener->ip_addr,
1642				    ip_zero, sizeof(ip_zero))) {
1643				found = true;
1644				break;
1645			}
1646
1647			if (!memcmp(listener->ip_addr,
1648				    cm_info->local_ip,
1649				    sizeof(cm_info->local_ip)) &&
1650			    (listener->vlan == cm_info->vlan)) {
1651				found = true;
1652				break;
1653			}
1654		}
1655	}
1656
1657	if (found) {
1658		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1659			   listener);
1660		return listener;
1661	}
1662
1663	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1664	return NULL;
1665}
1666
1667static int
1668qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1669		       struct qed_iwarp_cm_info *cm_info,
1670		       void *buf,
1671		       u8 *remote_mac_addr,
1672		       u8 *local_mac_addr,
1673		       int *payload_len, int *tcp_start_offset)
1674{
1675	struct vlan_ethhdr *vethh;
1676	bool vlan_valid = false;
1677	struct ipv6hdr *ip6h;
1678	struct ethhdr *ethh;
1679	struct tcphdr *tcph;
1680	struct iphdr *iph;
1681	int eth_hlen;
1682	int ip_hlen;
1683	int eth_type;
1684	int i;
1685
1686	ethh = buf;
1687	eth_type = ntohs(ethh->h_proto);
1688	if (eth_type == ETH_P_8021Q) {
1689		vlan_valid = true;
1690		vethh = (struct vlan_ethhdr *)ethh;
1691		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1692		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1693	}
1694
1695	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1696
1697	if (!ether_addr_equal(ethh->h_dest,
1698			      p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1699		DP_VERBOSE(p_hwfn,
1700			   QED_MSG_RDMA,
1701			   "Got unexpected mac %pM instead of %pM\n",
1702			   ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1703		return -EINVAL;
1704	}
1705
1706	ether_addr_copy(remote_mac_addr, ethh->h_source);
1707	ether_addr_copy(local_mac_addr, ethh->h_dest);
1708
1709	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1710		   eth_type, ethh->h_source);
1711
1712	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1713		   eth_hlen, ethh->h_dest);
1714
1715	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1716
1717	if (eth_type == ETH_P_IP) {
1718		if (iph->protocol != IPPROTO_TCP) {
1719			DP_NOTICE(p_hwfn,
1720				  "Unexpected ip protocol on ll2 %x\n",
1721				  iph->protocol);
1722			return -EINVAL;
1723		}
1724
1725		cm_info->local_ip[0] = ntohl(iph->daddr);
1726		cm_info->remote_ip[0] = ntohl(iph->saddr);
1727		cm_info->ip_version = QED_TCP_IPV4;
1728
1729		ip_hlen = (iph->ihl) * sizeof(u32);
1730		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1731	} else if (eth_type == ETH_P_IPV6) {
1732		ip6h = (struct ipv6hdr *)iph;
1733
1734		if (ip6h->nexthdr != IPPROTO_TCP) {
1735			DP_NOTICE(p_hwfn,
1736				  "Unexpected ip protocol on ll2 %x\n",
1737				  iph->protocol);
1738			return -EINVAL;
1739		}
1740
1741		for (i = 0; i < 4; i++) {
1742			cm_info->local_ip[i] =
1743			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1744			cm_info->remote_ip[i] =
1745			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1746		}
1747		cm_info->ip_version = QED_TCP_IPV6;
1748
1749		ip_hlen = sizeof(*ip6h);
1750		*payload_len = ntohs(ip6h->payload_len);
1751	} else {
1752		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1753		return -EINVAL;
1754	}
1755
1756	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1757
1758	if (!tcph->syn) {
1759		DP_NOTICE(p_hwfn,
1760			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1761			  iph->ihl, tcph->source, tcph->dest);
1762		return -EINVAL;
1763	}
1764
1765	cm_info->local_port = ntohs(tcph->dest);
1766	cm_info->remote_port = ntohs(tcph->source);
1767
1768	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1769
1770	*tcp_start_offset = eth_hlen + ip_hlen;
1771
1772	return 0;
1773}
1774
1775static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1776						      u16 cid)
1777{
1778	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1779	struct qed_iwarp_fpdu *partial_fpdu;
1780	u32 idx;
1781
1782	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1783	if (idx >= iwarp_info->max_num_partial_fpdus) {
1784		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1785		       iwarp_info->max_num_partial_fpdus);
1786		return NULL;
1787	}
1788
1789	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1790
1791	return partial_fpdu;
1792}
1793
1794enum qed_iwarp_mpa_pkt_type {
1795	QED_IWARP_MPA_PKT_PACKED,
1796	QED_IWARP_MPA_PKT_PARTIAL,
1797	QED_IWARP_MPA_PKT_UNALIGNED
1798};
1799
1800#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1801#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1802#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1803
1804/* Pad to multiple of 4 */
1805#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1806#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1807	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1808					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1809					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1810
1811/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1812#define QED_IWARP_MAX_BDS_PER_FPDU 3
1813
1814static const char * const pkt_type_str[] = {
1815	"QED_IWARP_MPA_PKT_PACKED",
1816	"QED_IWARP_MPA_PKT_PARTIAL",
1817	"QED_IWARP_MPA_PKT_UNALIGNED"
1818};
1819
1820static int
1821qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1822		      struct qed_iwarp_fpdu *fpdu,
1823		      struct qed_iwarp_ll2_buff *buf);
1824
1825static enum qed_iwarp_mpa_pkt_type
1826qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1827		       struct qed_iwarp_fpdu *fpdu,
1828		       u16 tcp_payload_len, u8 *mpa_data)
1829{
1830	enum qed_iwarp_mpa_pkt_type pkt_type;
1831	u16 mpa_len;
1832
1833	if (fpdu->incomplete_bytes) {
1834		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1835		goto out;
1836	}
1837
1838	/* special case of one byte remaining...
1839	 * lower byte will be read next packet
1840	 */
1841	if (tcp_payload_len == 1) {
1842		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1843		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1844		goto out;
1845	}
1846
1847	mpa_len = ntohs(*(__force __be16 *)mpa_data);
1848	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1849
1850	if (fpdu->fpdu_length <= tcp_payload_len)
1851		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1852	else
1853		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1854
1855out:
1856	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1857		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1858		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1859
1860	return pkt_type;
1861}
1862
1863static void
1864qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1865		    struct qed_iwarp_fpdu *fpdu,
1866		    struct unaligned_opaque_data *pkt_data,
1867		    u16 tcp_payload_size, u8 placement_offset)
1868{
1869	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1870
1871	fpdu->mpa_buf = buf;
1872	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874	fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
1875	fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
1876
1877	if (tcp_payload_size == 1)
1878		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879	else if (tcp_payload_size < fpdu->fpdu_length)
1880		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881	else
1882		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1883
1884	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885}
1886
1887static int
1888qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889		 struct qed_iwarp_fpdu *fpdu,
1890		 struct unaligned_opaque_data *pkt_data,
1891		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892{
1893	u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
1894	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1895	int rc;
1896
1897	/* need to copy the data from the partial packet stored in fpdu
1898	 * to the new buf, for this we also need to move the data currently
1899	 * placed on the buf. The assumption is that the buffer is big enough
1900	 * since fpdu_length <= mss, we use an intermediate buffer since
1901	 * we may need to copy the new data to an overlapping location
1902	 */
1903	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1904		DP_ERR(p_hwfn,
1905		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1906		       buf->buff_size, fpdu->mpa_frag_len,
1907		       tcp_payload_size, fpdu->incomplete_bytes);
1908		return -EINVAL;
1909	}
1910
1911	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1912		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1913		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1914		   (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
1915
1916	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917	memcpy(tmp_buf + fpdu->mpa_frag_len,
1918	       (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
1919
1920	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1921	if (rc)
1922		return rc;
1923
1924	/* If we managed to post the buffer copy the data to the new buffer
1925	 * o/w this will occur in the next round...
1926	 */
1927	memcpy((u8 *)(buf->data), tmp_buf,
1928	       fpdu->mpa_frag_len + tcp_payload_size);
1929
1930	fpdu->mpa_buf = buf;
1931	/* fpdu->pkt_hdr remains as is */
1932	/* fpdu->mpa_frag is overridden with new buf */
1933	fpdu->mpa_frag = buf->data_phys_addr;
1934	fpdu->mpa_frag_virt = buf->data;
1935	fpdu->mpa_frag_len += tcp_payload_size;
1936
1937	fpdu->incomplete_bytes -= tcp_payload_size;
1938
1939	DP_VERBOSE(p_hwfn,
1940		   QED_MSG_RDMA,
1941		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1942		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1943		   fpdu->incomplete_bytes);
1944
1945	return 0;
1946}
1947
1948static void
1949qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1950			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1951{
1952	u16 mpa_len;
1953
1954	/* Update incomplete packets if needed */
1955	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1956		/* Missing lower byte is now available */
1957		mpa_len = fpdu->fpdu_length | *mpa_data;
1958		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1959		/* one byte of hdr */
1960		fpdu->mpa_frag_len = 1;
1961		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1962		DP_VERBOSE(p_hwfn,
1963			   QED_MSG_RDMA,
1964			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1965			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1966	}
1967}
1968
1969#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1970	(GET_FIELD((_curr_pkt)->flags,	   \
1971		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1972
1973/* This function is used to recycle a buffer using the ll2 drop option. It
1974 * uses the mechanism to ensure that all buffers posted to tx before this one
1975 * were completed. The buffer sent here will be sent as a cookie in the tx
1976 * completion function and can then be reposted to rx chain when done. The flow
1977 * that requires this is the flow where a FPDU splits over more than 3 tcp
1978 * segments. In this case the driver needs to re-post a rx buffer instead of
1979 * the one received, but driver can't simply repost a buffer it copied from
1980 * as there is a case where the buffer was originally a packed FPDU, and is
1981 * partially posted to FW. Driver needs to ensure FW is done with it.
1982 */
1983static int
1984qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1985		      struct qed_iwarp_fpdu *fpdu,
1986		      struct qed_iwarp_ll2_buff *buf)
1987{
1988	struct qed_ll2_tx_pkt_info tx_pkt;
1989	u8 ll2_handle;
1990	int rc;
1991
1992	memset(&tx_pkt, 0, sizeof(tx_pkt));
1993	tx_pkt.num_of_bds = 1;
1994	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1995	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1996	tx_pkt.first_frag = fpdu->pkt_hdr;
1997	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1998	buf->piggy_buf = NULL;
1999	tx_pkt.cookie = buf;
2000
2001	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2002
2003	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2004	if (rc)
2005		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2006			   "Can't drop packet rc=%d\n", rc);
2007
2008	DP_VERBOSE(p_hwfn,
2009		   QED_MSG_RDMA,
2010		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2011		   (unsigned long int)tx_pkt.first_frag,
2012		   tx_pkt.first_frag_len, buf, rc);
2013
2014	return rc;
2015}
2016
2017static int
2018qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2019{
2020	struct qed_ll2_tx_pkt_info tx_pkt;
2021	u8 ll2_handle;
2022	int rc;
2023
2024	memset(&tx_pkt, 0, sizeof(tx_pkt));
2025	tx_pkt.num_of_bds = 1;
2026	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2027	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2028
2029	tx_pkt.first_frag = fpdu->pkt_hdr;
2030	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2031	tx_pkt.enable_ip_cksum = true;
2032	tx_pkt.enable_l4_cksum = true;
2033	tx_pkt.calc_ip_len = true;
2034	/* vlan overload with enum iwarp_ll2_tx_queues */
2035	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2036
2037	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2038
2039	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2040	if (rc)
2041		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2042			   "Can't send right edge rc=%d\n", rc);
2043	DP_VERBOSE(p_hwfn,
2044		   QED_MSG_RDMA,
2045		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2046		   tx_pkt.num_of_bds,
2047		   (unsigned long int)tx_pkt.first_frag,
2048		   tx_pkt.first_frag_len, rc);
2049
2050	return rc;
2051}
2052
2053static int
2054qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2055		    struct qed_iwarp_fpdu *fpdu,
2056		    struct unaligned_opaque_data *curr_pkt,
2057		    struct qed_iwarp_ll2_buff *buf,
2058		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2059{
2060	struct qed_ll2_tx_pkt_info tx_pkt;
2061	u16 first_mpa_offset;
2062	u8 ll2_handle;
2063	int rc;
2064
2065	memset(&tx_pkt, 0, sizeof(tx_pkt));
2066
2067	/* An unaligned packet means it's split over two tcp segments. So the
2068	 * complete packet requires 3 bds, one for the header, one for the
2069	 * part of the fpdu of the first tcp segment, and the last fragment
2070	 * will point to the remainder of the fpdu. A packed pdu, requires only
2071	 * two bds, one for the header and one for the data.
2072	 */
2073	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076
2077	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2078	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079	    tcp_payload_size <= fpdu->fpdu_length)
2080		tx_pkt.cookie = fpdu->mpa_buf;
2081
2082	tx_pkt.first_frag = fpdu->pkt_hdr;
2083	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084	tx_pkt.enable_ip_cksum = true;
2085	tx_pkt.enable_l4_cksum = true;
2086	tx_pkt.calc_ip_len = true;
2087	/* vlan overload with enum iwarp_ll2_tx_queues */
2088	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089
2090	/* special case of unaligned packet and not packed, need to send
2091	 * both buffers as cookie to release.
2092	 */
2093	if (tcp_payload_size == fpdu->incomplete_bytes)
2094		fpdu->mpa_buf->piggy_buf = buf;
2095
2096	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097
2098	/* Set first fragment to header */
2099	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100	if (rc)
2101		goto out;
2102
2103	/* Set second fragment to first part of packet */
2104	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105					       fpdu->mpa_frag,
2106					       fpdu->mpa_frag_len);
2107	if (rc)
2108		goto out;
2109
2110	if (!fpdu->incomplete_bytes)
2111		goto out;
2112
2113	first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2114
2115	/* Set third fragment to second part of the packet */
2116	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2117					       ll2_handle,
2118					       buf->data_phys_addr +
2119					       first_mpa_offset,
2120					       fpdu->incomplete_bytes);
2121out:
2122	DP_VERBOSE(p_hwfn,
2123		   QED_MSG_RDMA,
2124		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2125		   tx_pkt.num_of_bds,
2126		   tx_pkt.first_frag_len,
2127		   fpdu->mpa_frag_len,
2128		   fpdu->incomplete_bytes, rc);
2129
2130	return rc;
2131}
2132
2133static void
2134qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2135		       struct unaligned_opaque_data *curr_pkt,
2136		       u32 opaque_data0, u32 opaque_data1)
2137{
2138	u64 opaque_data;
2139
2140	opaque_data = HILO_64(cpu_to_le32(opaque_data1),
2141			      cpu_to_le32(opaque_data0));
2142	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2143
2144	le16_add_cpu(&curr_pkt->first_mpa_offset,
2145		     curr_pkt->tcp_payload_offset);
 
2146}
2147
2148/* This function is called when an unaligned or incomplete MPA packet arrives
2149 * driver needs to align the packet, perhaps using previous data and send
2150 * it down to FW once it is aligned.
2151 */
2152static int
2153qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2154			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2155{
2156	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2157	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2158	enum qed_iwarp_mpa_pkt_type pkt_type;
2159	struct qed_iwarp_fpdu *fpdu;
2160	u16 cid, first_mpa_offset;
2161	int rc = -EINVAL;
2162	u8 *mpa_data;
2163
2164	cid = le32_to_cpu(curr_pkt->cid);
2165
2166	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2167	if (!fpdu) { /* something corrupt with cid, post rx back */
2168		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2169		       cid);
2170		goto err;
2171	}
2172
2173	do {
2174		first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
2175		mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
2176
2177		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2178						  mpa_buf->tcp_payload_len,
2179						  mpa_data);
2180
2181		switch (pkt_type) {
2182		case QED_IWARP_MPA_PKT_PARTIAL:
2183			qed_iwarp_init_fpdu(buf, fpdu,
2184					    curr_pkt,
2185					    mpa_buf->tcp_payload_len,
2186					    mpa_buf->placement_offset);
2187
2188			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2189				mpa_buf->tcp_payload_len = 0;
2190				break;
2191			}
2192
2193			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2194
2195			if (rc) {
2196				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2197					   "Can't send FPDU:reset rc=%d\n", rc);
2198				memset(fpdu, 0, sizeof(*fpdu));
2199				break;
2200			}
2201
2202			mpa_buf->tcp_payload_len = 0;
2203			break;
2204		case QED_IWARP_MPA_PKT_PACKED:
2205			qed_iwarp_init_fpdu(buf, fpdu,
2206					    curr_pkt,
2207					    mpa_buf->tcp_payload_len,
2208					    mpa_buf->placement_offset);
2209
2210			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2211						 mpa_buf->tcp_payload_len,
2212						 pkt_type);
2213			if (rc) {
2214				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2215					   "Can't send FPDU:reset rc=%d\n", rc);
2216				memset(fpdu, 0, sizeof(*fpdu));
2217				break;
2218			}
2219
2220			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2221			le16_add_cpu(&curr_pkt->first_mpa_offset,
2222				     fpdu->fpdu_length);
2223			break;
2224		case QED_IWARP_MPA_PKT_UNALIGNED:
2225			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2226			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2227				/* special handling of fpdu split over more
2228				 * than 2 segments
2229				 */
2230				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2231					rc = qed_iwarp_win_right_edge(p_hwfn,
2232								      fpdu);
2233					/* packet will be re-processed later */
2234					if (rc)
2235						return rc;
2236				}
2237
2238				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2239						      buf,
2240						      mpa_buf->tcp_payload_len);
2241				if (rc) /* packet will be re-processed later */
2242					return rc;
2243
2244				mpa_buf->tcp_payload_len = 0;
2245				break;
2246			}
2247
2248			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2249						 mpa_buf->tcp_payload_len,
2250						 pkt_type);
2251			if (rc) {
2252				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2253					   "Can't send FPDU:delay rc=%d\n", rc);
2254				/* don't reset fpdu -> we need it for next
2255				 * classify
2256				 */
2257				break;
2258			}
2259
2260			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2261			le16_add_cpu(&curr_pkt->first_mpa_offset,
2262				     fpdu->incomplete_bytes);
2263
2264			/* The framed PDU was sent - no more incomplete bytes */
2265			fpdu->incomplete_bytes = 0;
2266			break;
2267		}
2268	} while (mpa_buf->tcp_payload_len && !rc);
2269
2270	return rc;
2271
2272err:
2273	qed_iwarp_ll2_post_rx(p_hwfn,
2274			      buf,
2275			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2276	return rc;
2277}
2278
2279static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2280{
2281	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2282	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2283	int rc;
2284
2285	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2286		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2287					   struct qed_iwarp_ll2_mpa_buf,
2288					   list_entry);
2289
2290		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2291
2292		/* busy means break and continue processing later, don't
2293		 * remove the buf from the pending list.
2294		 */
2295		if (rc == -EBUSY)
2296			break;
2297
2298		list_move_tail(&mpa_buf->list_entry,
2299			       &iwarp_info->mpa_buf_list);
2300
2301		if (rc) {	/* different error, don't continue */
2302			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2303			break;
2304		}
2305	}
2306}
2307
2308static void
2309qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2310{
2311	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2312	struct qed_iwarp_info *iwarp_info;
2313	struct qed_hwfn *p_hwfn = cxt;
2314	u16 first_mpa_offset;
2315
2316	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2317	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2318				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2319	if (!mpa_buf) {
2320		DP_ERR(p_hwfn, "No free mpa buf\n");
2321		goto err;
2322	}
2323
2324	list_del(&mpa_buf->list_entry);
2325	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2326			       data->opaque_data_0, data->opaque_data_1);
2327
2328	first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
2329
2330	DP_VERBOSE(p_hwfn,
2331		   QED_MSG_RDMA,
2332		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2333		   data->length.packet_length, first_mpa_offset,
2334		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2335		   mpa_buf->data.cid);
2336
2337	mpa_buf->ll2_buf = data->cookie;
2338	mpa_buf->tcp_payload_len = data->length.packet_length -
2339				   first_mpa_offset;
2340
2341	first_mpa_offset += data->u.placement_offset;
2342	mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
2343	mpa_buf->placement_offset = data->u.placement_offset;
2344
2345	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2346
2347	qed_iwarp_process_pending_pkts(p_hwfn);
2348	return;
2349err:
2350	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2351			      iwarp_info->ll2_mpa_handle);
2352}
2353
2354static void
2355qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2356{
2357	struct qed_iwarp_ll2_buff *buf = data->cookie;
2358	struct qed_iwarp_listener *listener;
2359	struct qed_ll2_tx_pkt_info tx_pkt;
2360	struct qed_iwarp_cm_info cm_info;
2361	struct qed_hwfn *p_hwfn = cxt;
2362	u8 remote_mac_addr[ETH_ALEN];
2363	u8 local_mac_addr[ETH_ALEN];
2364	struct qed_iwarp_ep *ep;
2365	int tcp_start_offset;
 
2366	u8 ll2_syn_handle;
2367	int payload_len;
2368	u32 hdr_size;
2369	int rc;
2370
2371	memset(&cm_info, 0, sizeof(cm_info));
2372	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2373
2374	/* Check if packet was received with errors... */
2375	if (data->err_flags) {
2376		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2377			  data->err_flags);
2378		goto err;
2379	}
2380
2381	if (GET_FIELD(data->parse_flags,
2382		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2383	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2384		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2385		goto err;
2386	}
2387
2388	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2389				    data->u.placement_offset, remote_mac_addr,
2390				    local_mac_addr, &payload_len,
2391				    &tcp_start_offset);
2392	if (rc)
2393		goto err;
2394
2395	/* Check if there is a listener for this 4-tuple+vlan */
2396	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2397	if (!listener) {
2398		DP_VERBOSE(p_hwfn,
2399			   QED_MSG_RDMA,
2400			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2401			   data->parse_flags, data->length.packet_length);
2402
2403		memset(&tx_pkt, 0, sizeof(tx_pkt));
2404		tx_pkt.num_of_bds = 1;
2405		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2406		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2407		tx_pkt.first_frag = buf->data_phys_addr +
2408				    data->u.placement_offset;
2409		tx_pkt.first_frag_len = data->length.packet_length;
2410		tx_pkt.cookie = buf;
2411
2412		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2413					       &tx_pkt, true);
2414
2415		if (rc) {
2416			DP_NOTICE(p_hwfn,
2417				  "Can't post SYN back to chip rc=%d\n", rc);
2418			goto err;
2419		}
2420		return;
2421	}
2422
2423	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2424	/* There may be an open ep on this connection if this is a syn
2425	 * retrasnmit... need to make sure there isn't...
2426	 */
2427	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2428		goto err;
2429
2430	ep = qed_iwarp_get_free_ep(p_hwfn);
2431	if (!ep)
2432		goto err;
2433
2434	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2435	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2436	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2437
2438	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2439	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2440
2441	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2442
2443	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
 
 
 
 
2444	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2445	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2446
2447	ep->event_cb = listener->event_cb;
2448	ep->cb_context = listener->cb_context;
2449	ep->connect_mode = TCP_CONNECT_PASSIVE;
2450
2451	ep->syn = buf;
2452	ep->syn_ip_payload_length = (u16)payload_len;
2453	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2454			   tcp_start_offset;
2455
2456	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2457	if (rc) {
2458		qed_iwarp_return_ep(p_hwfn, ep);
2459		goto err;
2460	}
2461
2462	return;
2463err:
2464	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2465}
2466
2467static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2468				     void *cookie, dma_addr_t rx_buf_addr,
2469				     bool b_last_packet)
2470{
2471	struct qed_iwarp_ll2_buff *buffer = cookie;
2472	struct qed_hwfn *p_hwfn = cxt;
2473
2474	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2475			  buffer->data, buffer->data_phys_addr);
2476	kfree(buffer);
2477}
2478
2479static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2480				      void *cookie, dma_addr_t first_frag_addr,
2481				      bool b_last_fragment, bool b_last_packet)
2482{
2483	struct qed_iwarp_ll2_buff *buffer = cookie;
2484	struct qed_iwarp_ll2_buff *piggy;
2485	struct qed_hwfn *p_hwfn = cxt;
2486
2487	if (!buffer)		/* can happen in packed mpa unaligned... */
2488		return;
2489
2490	/* this was originally an rx packet, post it back */
2491	piggy = buffer->piggy_buf;
2492	if (piggy) {
2493		buffer->piggy_buf = NULL;
2494		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2495	}
2496
2497	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2498
2499	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2500		qed_iwarp_process_pending_pkts(p_hwfn);
2501
2502	return;
2503}
2504
2505static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2506				     void *cookie, dma_addr_t first_frag_addr,
2507				     bool b_last_fragment, bool b_last_packet)
2508{
2509	struct qed_iwarp_ll2_buff *buffer = cookie;
2510	struct qed_hwfn *p_hwfn = cxt;
2511
2512	if (!buffer)
2513		return;
2514
2515	if (buffer->piggy_buf) {
2516		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2517				  buffer->piggy_buf->buff_size,
2518				  buffer->piggy_buf->data,
2519				  buffer->piggy_buf->data_phys_addr);
2520
2521		kfree(buffer->piggy_buf);
2522	}
2523
2524	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2525			  buffer->data, buffer->data_phys_addr);
2526
2527	kfree(buffer);
2528}
2529
2530/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2531 * is received, need to reset the FPDU.
2532 */
2533static void
2534qed_iwarp_ll2_slowpath(void *cxt,
2535		       u8 connection_handle,
2536		       u32 opaque_data_0, u32 opaque_data_1)
2537{
2538	struct unaligned_opaque_data unalign_data;
2539	struct qed_hwfn *p_hwfn = cxt;
2540	struct qed_iwarp_fpdu *fpdu;
2541	u32 cid;
2542
2543	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2544			       opaque_data_0, opaque_data_1);
2545
2546	cid = le32_to_cpu(unalign_data.cid);
 
2547
2548	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
2549
2550	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
2551	if (fpdu)
2552		memset(fpdu, 0, sizeof(*fpdu));
2553}
2554
2555static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2556{
2557	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2558	int rc = 0;
2559
2560	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2561		rc = qed_ll2_terminate_connection(p_hwfn,
2562						  iwarp_info->ll2_syn_handle);
2563		if (rc)
2564			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2565
2566		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2567		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2568	}
2569
2570	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2571		rc = qed_ll2_terminate_connection(p_hwfn,
2572						  iwarp_info->ll2_ooo_handle);
2573		if (rc)
2574			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2575
2576		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2577		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2578	}
2579
2580	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2581		rc = qed_ll2_terminate_connection(p_hwfn,
2582						  iwarp_info->ll2_mpa_handle);
2583		if (rc)
2584			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2585
2586		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2587		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2588	}
2589
2590	qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2591				  p_hwfn->p_rdma_info->iwarp.mac_addr);
2592
2593	return rc;
2594}
2595
2596static int
2597qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2598			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2599{
2600	struct qed_iwarp_ll2_buff *buffer;
2601	int rc = 0;
2602	int i;
2603
2604	for (i = 0; i < num_rx_bufs; i++) {
2605		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2606		if (!buffer) {
2607			rc = -ENOMEM;
2608			break;
2609		}
2610
2611		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2612						  buff_size,
2613						  &buffer->data_phys_addr,
2614						  GFP_KERNEL);
2615		if (!buffer->data) {
2616			kfree(buffer);
2617			rc = -ENOMEM;
2618			break;
2619		}
2620
2621		buffer->buff_size = buff_size;
2622		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2623		if (rc)
2624			/* buffers will be deallocated by qed_ll2 */
2625			break;
2626	}
2627	return rc;
2628}
2629
2630#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2631	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2632		ETH_CACHE_LINE_SIZE)
2633
2634static int
2635qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2636		    struct qed_rdma_start_in_params *params,
2637		    u32 rcv_wnd_size)
2638{
2639	struct qed_iwarp_info *iwarp_info;
2640	struct qed_ll2_acquire_data data;
2641	struct qed_ll2_cbs cbs;
2642	u32 buff_size;
2643	u16 n_ooo_bufs;
2644	int rc = 0;
2645	int i;
2646
2647	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2648	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2649	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2650	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2651
2652	iwarp_info->max_mtu = params->max_mtu;
2653
2654	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2655
2656	rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2657	if (rc)
2658		return rc;
2659
2660	/* Start SYN connection */
2661	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2662	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2663	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2664	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2665	cbs.slowpath_cb = NULL;
2666	cbs.cookie = p_hwfn;
2667
2668	memset(&data, 0, sizeof(data));
2669	data.input.conn_type = QED_LL2_TYPE_IWARP;
2670	/* SYN will use ctx based queues */
2671	data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
2672	data.input.mtu = params->max_mtu;
2673	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2674	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2675	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2676	data.input.tx_tc = PKT_LB_TC;
2677	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2678	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2679	data.cbs = &cbs;
2680
2681	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2682	if (rc) {
2683		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2684		qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2685		return rc;
2686	}
2687
2688	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2689	if (rc) {
2690		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2691		goto err;
2692	}
2693
2694	buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2695	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2696					 QED_IWARP_LL2_SYN_RX_SIZE,
2697					 buff_size,
2698					 iwarp_info->ll2_syn_handle);
2699	if (rc)
2700		goto err;
2701
2702	/* Start OOO connection */
2703	data.input.conn_type = QED_LL2_TYPE_OOO;
2704	/* OOO/unaligned will use legacy ll2 queues (ram based) */
2705	data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
2706	data.input.mtu = params->max_mtu;
2707
2708	n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2709		     iwarp_info->max_mtu;
2710	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2711
2712	data.input.rx_num_desc = n_ooo_bufs;
2713	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2714
2715	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2716	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2717	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2718
2719	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2720	if (rc)
2721		goto err;
2722
2723	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2724	if (rc)
2725		goto err;
2726
2727	/* Start Unaligned MPA connection */
2728	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2729	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2730
2731	memset(&data, 0, sizeof(data));
2732	data.input.conn_type = QED_LL2_TYPE_IWARP;
2733	data.input.mtu = params->max_mtu;
2734	/* FW requires that once a packet arrives OOO, it must have at
2735	 * least 2 rx buffers available on the unaligned connection
2736	 * for handling the case that it is a partial fpdu.
2737	 */
2738	data.input.rx_num_desc = n_ooo_bufs * 2;
2739	data.input.tx_num_desc = data.input.rx_num_desc;
2740	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2741	data.input.tx_tc = PKT_LB_TC;
2742	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2743	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2744	data.input.secondary_queue = true;
2745	data.cbs = &cbs;
2746
2747	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2748	if (rc)
2749		goto err;
2750
2751	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2752	if (rc)
2753		goto err;
2754
 
2755	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2756					 data.input.rx_num_desc,
2757					 buff_size,
2758					 iwarp_info->ll2_mpa_handle);
2759	if (rc)
2760		goto err;
2761
2762	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2763					    sizeof(*iwarp_info->partial_fpdus),
2764					    GFP_KERNEL);
2765	if (!iwarp_info->partial_fpdus) {
2766		rc = -ENOMEM;
2767		goto err;
2768	}
2769
2770	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2771
2772	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2773	if (!iwarp_info->mpa_intermediate_buf) {
2774		rc = -ENOMEM;
2775		goto err;
2776	}
2777
2778	/* The mpa_bufs array serves for pending RX packets received on the
2779	 * mpa ll2 that don't have place on the tx ring and require later
2780	 * processing. We can't fail on allocation of such a struct therefore
2781	 * we allocate enough to take care of all rx packets
2782	 */
2783	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2784				       sizeof(*iwarp_info->mpa_bufs),
2785				       GFP_KERNEL);
2786	if (!iwarp_info->mpa_bufs) {
2787		rc = -ENOMEM;
2788		goto err;
2789	}
2790
2791	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2792	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2793	for (i = 0; i < data.input.rx_num_desc; i++)
2794		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2795			      &iwarp_info->mpa_buf_list);
2796	return rc;
2797err:
2798	qed_iwarp_ll2_stop(p_hwfn);
2799
2800	return rc;
2801}
2802
2803static struct {
2804	u32 two_ports;
2805	u32 four_ports;
2806} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2807	{QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2808	{QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2809};
2810
2811int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2812		    struct qed_rdma_start_in_params *params)
2813{
2814	struct qed_dev *cdev = p_hwfn->cdev;
2815	struct qed_iwarp_info *iwarp_info;
2816	enum chip_ids chip_id;
2817	u32 rcv_wnd_size;
2818
2819	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2820
2821	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2822
2823	chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2824	rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2825		qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2826		qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2827
2828	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2829	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2830	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2831	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2832	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2833	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2834
2835	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2836
2837	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2838				MPA_RTR_TYPE_ZERO_WRITE |
2839				MPA_RTR_TYPE_ZERO_READ;
2840
2841	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2842	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2843	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2844
2845	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2846				  qed_iwarp_async_event);
2847	qed_ooo_setup(p_hwfn);
2848
2849	return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2850}
2851
2852int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2853{
2854	int rc;
2855
2856	qed_iwarp_free_prealloc_ep(p_hwfn);
2857	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2858	if (rc)
2859		return rc;
2860
2861	return qed_iwarp_ll2_stop(p_hwfn);
 
 
2862}
2863
2864static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2865				  struct qed_iwarp_ep *ep,
2866				  u8 fw_return_code)
2867{
2868	struct qed_iwarp_cm_event_params params;
2869
2870	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2871
2872	params.event = QED_IWARP_EVENT_CLOSE;
2873	params.ep_context = ep;
2874	params.cm_info = &ep->cm_info;
2875	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2876			 0 : -ECONNRESET;
2877
2878	/* paired with READ_ONCE in destroy_qp */
2879	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2880
2881	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2882	list_del(&ep->list_entry);
2883	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2884
2885	ep->event_cb(ep->cb_context, &params);
2886}
2887
2888static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2889					 struct qed_iwarp_ep *ep,
2890					 int fw_ret_code)
2891{
2892	struct qed_iwarp_cm_event_params params;
2893	bool event_cb = false;
2894
2895	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2896		   ep->cid, fw_ret_code);
2897
2898	switch (fw_ret_code) {
2899	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2900		params.status = 0;
2901		params.event = QED_IWARP_EVENT_DISCONNECT;
2902		event_cb = true;
2903		break;
2904	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2905		params.status = -ECONNRESET;
2906		params.event = QED_IWARP_EVENT_DISCONNECT;
2907		event_cb = true;
2908		break;
2909	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2910		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2911		event_cb = true;
2912		break;
2913	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2914		params.event = QED_IWARP_EVENT_IRQ_FULL;
2915		event_cb = true;
2916		break;
2917	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2918		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2919		event_cb = true;
2920		break;
2921	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2922		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2923		event_cb = true;
2924		break;
2925	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2926		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2927		event_cb = true;
2928		break;
2929	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2930		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2931		event_cb = true;
2932		break;
2933	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2934		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2935		event_cb = true;
2936		break;
2937	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2938		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2939		event_cb = true;
2940		break;
2941	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2942		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2943		event_cb = true;
2944		break;
2945	default:
2946		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2947			   "Unhandled exception received...fw_ret_code=%d\n",
2948			   fw_ret_code);
2949		break;
2950	}
2951
2952	if (event_cb) {
2953		params.ep_context = ep;
2954		params.cm_info = &ep->cm_info;
2955		ep->event_cb(ep->cb_context, &params);
2956	}
2957}
2958
2959static void
2960qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2961				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2962{
2963	struct qed_iwarp_cm_event_params params;
2964
2965	memset(&params, 0, sizeof(params));
2966	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2967	params.ep_context = ep;
2968	params.cm_info = &ep->cm_info;
2969	/* paired with READ_ONCE in destroy_qp */
2970	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2971
2972	switch (fw_return_code) {
2973	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2974		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2975			   "%s(0x%x) TCP connect got invalid packet\n",
2976			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2977		params.status = -ECONNRESET;
2978		break;
2979	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2980		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2981			   "%s(0x%x) TCP Connection Reset\n",
2982			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2983		params.status = -ECONNRESET;
2984		break;
2985	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2986		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2987			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2988		params.status = -EBUSY;
2989		break;
2990	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2991		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2992			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2993		params.status = -ECONNREFUSED;
2994		break;
2995	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2996		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2997			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2998		params.status = -ECONNRESET;
2999		break;
3000	default:
3001		DP_ERR(p_hwfn,
3002		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
3003		       QED_IWARP_CONNECT_MODE_STRING(ep),
3004		       ep->tcp_cid, fw_return_code);
3005		params.status = -ECONNRESET;
3006		break;
3007	}
3008
3009	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3010		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
3011		qed_iwarp_return_ep(p_hwfn, ep);
3012	} else {
3013		ep->event_cb(ep->cb_context, &params);
3014		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3015		list_del(&ep->list_entry);
3016		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3017	}
3018}
3019
3020static void
3021qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
3022			   struct qed_iwarp_ep *ep, u8 fw_return_code)
3023{
3024	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3025
3026	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3027		/* Done with the SYN packet, post back to ll2 rx */
3028		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3029
3030		ep->syn = NULL;
3031
3032		/* If connect failed - upper layer doesn't know about it */
3033		if (fw_return_code == RDMA_RETURN_OK)
3034			qed_iwarp_mpa_received(p_hwfn, ep);
3035		else
3036			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3037							   fw_return_code);
3038	} else {
3039		if (fw_return_code == RDMA_RETURN_OK)
3040			qed_iwarp_mpa_offload(p_hwfn, ep);
3041		else
3042			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3043							   fw_return_code);
3044	}
3045}
3046
3047static inline bool
3048qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3049{
3050	if (!ep || (ep->sig != QED_EP_SIG)) {
3051		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3052		return false;
3053	}
3054
3055	return true;
3056}
3057
3058static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
3059				 __le16 echo, union event_ring_data *data,
 
3060				 u8 fw_return_code)
3061{
3062	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3063	struct regpair *fw_handle = &data->rdma_data.async_handle;
3064	struct qed_iwarp_ep *ep = NULL;
3065	u16 srq_offset;
3066	u16 srq_id;
3067	u16 cid;
3068
3069	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3070						       fw_handle->lo);
3071
3072	switch (fw_event_code) {
3073	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3074		/* Async completion after TCP 3-way handshake */
3075		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3076			return -EINVAL;
3077		DP_VERBOSE(p_hwfn,
3078			   QED_MSG_RDMA,
3079			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3080			   ep->tcp_cid, fw_return_code);
3081		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3082		break;
3083	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3084		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3085			return -EINVAL;
3086		DP_VERBOSE(p_hwfn,
3087			   QED_MSG_RDMA,
3088			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3089			   ep->cid, fw_return_code);
3090		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3091		break;
3092	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3093		/* Async completion for Close Connection ramrod */
3094		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3095			return -EINVAL;
3096		DP_VERBOSE(p_hwfn,
3097			   QED_MSG_RDMA,
3098			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3099			   ep->cid, fw_return_code);
3100		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3101		break;
3102	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3103		/* Async event for active side only */
3104		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3105			return -EINVAL;
3106		DP_VERBOSE(p_hwfn,
3107			   QED_MSG_RDMA,
3108			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3109			   ep->cid, fw_return_code);
3110		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3111		break;
3112	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3113		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3114			return -EINVAL;
3115		DP_VERBOSE(p_hwfn,
3116			   QED_MSG_RDMA,
3117			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3118			   ep->cid, fw_return_code);
3119		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3120		break;
3121	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3122		cid = (u16)le32_to_cpu(fw_handle->lo);
3123		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3124			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3125		qed_iwarp_cid_cleaned(p_hwfn, cid);
3126
3127		break;
3128	case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3129		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3130		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3131		/* FW assigns value that is no greater than u16 */
3132		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3133		events.affiliated_event(events.context,
3134					QED_IWARP_EVENT_SRQ_EMPTY,
3135					&srq_id);
3136		break;
3137	case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3138		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3139		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3140		/* FW assigns value that is no greater than u16 */
3141		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3142		events.affiliated_event(events.context,
3143					QED_IWARP_EVENT_SRQ_LIMIT,
3144					&srq_id);
3145		break;
3146	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3147		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3148
3149		p_hwfn->p_rdma_info->events.affiliated_event(
3150			p_hwfn->p_rdma_info->events.context,
3151			QED_IWARP_EVENT_CQ_OVERFLOW,
3152			(void *)fw_handle);
3153		break;
3154	default:
3155		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3156		       fw_event_code);
3157		return -EINVAL;
3158	}
3159	return 0;
3160}
3161
3162int
3163qed_iwarp_create_listen(void *rdma_cxt,
3164			struct qed_iwarp_listen_in *iparams,
3165			struct qed_iwarp_listen_out *oparams)
3166{
3167	struct qed_hwfn *p_hwfn = rdma_cxt;
3168	struct qed_iwarp_listener *listener;
3169
3170	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3171	if (!listener)
3172		return -ENOMEM;
3173
3174	listener->ip_version = iparams->ip_version;
3175	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3176	listener->port = iparams->port;
3177	listener->vlan = iparams->vlan;
3178
3179	listener->event_cb = iparams->event_cb;
3180	listener->cb_context = iparams->cb_context;
3181	listener->max_backlog = iparams->max_backlog;
3182	oparams->handle = listener;
3183
3184	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3185	list_add_tail(&listener->list_entry,
3186		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3187	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3188
3189	DP_VERBOSE(p_hwfn,
3190		   QED_MSG_RDMA,
3191		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3192		   listener->event_cb,
3193		   listener,
3194		   listener->ip_addr[0],
3195		   listener->ip_addr[1],
3196		   listener->ip_addr[2],
3197		   listener->ip_addr[3], listener->port, listener->vlan);
3198
3199	return 0;
3200}
3201
3202int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3203{
3204	struct qed_iwarp_listener *listener = handle;
3205	struct qed_hwfn *p_hwfn = rdma_cxt;
3206
3207	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3208
3209	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3210	list_del(&listener->list_entry);
3211	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3212
3213	kfree(listener);
3214
3215	return 0;
3216}
3217
3218int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3219{
3220	struct qed_hwfn *p_hwfn = rdma_cxt;
3221	struct qed_sp_init_data init_data;
3222	struct qed_spq_entry *p_ent;
3223	struct qed_iwarp_ep *ep;
3224	struct qed_rdma_qp *qp;
3225	int rc;
3226
3227	ep = iparams->ep_context;
3228	if (!ep) {
3229		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3230		return -EINVAL;
3231	}
3232
3233	qp = ep->qp;
3234
3235	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3236		   qp->icid, ep->tcp_cid);
3237
3238	memset(&init_data, 0, sizeof(init_data));
3239	init_data.cid = qp->icid;
3240	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3241	init_data.comp_mode = QED_SPQ_MODE_CB;
3242
3243	rc = qed_sp_init_request(p_hwfn, &p_ent,
3244				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3245				 PROTOCOLID_IWARP, &init_data);
3246
3247	if (rc)
3248		return rc;
3249
3250	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3251
3252	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3253
3254	return rc;
3255}
3256
3257void
3258qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3259		   struct qed_rdma_query_qp_out_params *out_params)
3260{
3261	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3262}
v4.17
 
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
 
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/spinlock.h>
  37#include <linux/tcp.h>
  38#include "qed_cxt.h"
  39#include "qed_hw.h"
  40#include "qed_ll2.h"
  41#include "qed_rdma.h"
  42#include "qed_reg_addr.h"
  43#include "qed_sp.h"
  44#include "qed_ooo.h"
  45
  46#define QED_IWARP_ORD_DEFAULT		32
  47#define QED_IWARP_IRD_DEFAULT		32
  48#define QED_IWARP_MAX_FW_MSS		4120
  49
  50#define QED_EP_SIG 0xecabcdef
  51
  52struct mpa_v2_hdr {
  53	__be16 ird;
  54	__be16 ord;
  55};
  56
  57#define MPA_V2_PEER2PEER_MODEL  0x8000
  58#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  59#define MPA_V2_READ_RTR         0x4000	/* on ord */
  60#define MPA_V2_WRITE_RTR        0x8000
  61#define MPA_V2_IRD_ORD_MASK     0x3FFF
  62
  63#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  64
  65#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  66#define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
 
 
 
 
 
  67#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  68#define TIMESTAMP_HEADER_SIZE		(12)
  69#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  70
  71#define QED_IWARP_TS_EN			BIT(0)
  72#define QED_IWARP_DA_EN			BIT(1)
  73#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  74#define QED_IWARP_PARAM_P2P		(1)
  75
  76#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  77#define QED_IWARP_DEF_CWND_FACTOR	(4)
  78#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  79#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  80#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  81
  82static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  83				 u8 fw_event_code, u16 echo,
  84				 union event_ring_data *data,
  85				 u8 fw_return_code);
  86
  87/* Override devinfo with iWARP specific values */
  88void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  89{
  90	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  91
  92	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  93	dev->max_qp = min_t(u32,
  94			    IWARP_MAX_QPS,
  95			    p_hwfn->p_rdma_info->num_qps) -
  96		      QED_IWARP_PREALLOC_CNT;
  97
  98	dev->max_cq = dev->max_qp;
  99
 100	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
 101	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
 102}
 103
 104void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 105{
 106	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
 107	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
 108	p_hwfn->b_rdma_enabled_in_prs = true;
 109}
 110
 111/* We have two cid maps, one for tcp which should be used only from passive
 112 * syn processing and replacing a pre-allocated ep in the list. The second
 113 * for active tcp and for QPs.
 114 */
 115static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 116{
 117	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 118
 119	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 120
 121	if (cid < QED_IWARP_PREALLOC_CNT)
 122		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 123				    cid);
 124	else
 125		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 126
 127	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 128}
 129
 130void
 131qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 132			 struct iwarp_init_func_ramrod_data *p_ramrod)
 133{
 134	p_ramrod->iwarp.ll2_ooo_q_index =
 135		RESC_START(p_hwfn, QED_LL2_QUEUE) +
 136		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 137
 
 
 138	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 139
 140	return;
 141}
 142
 143static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 144{
 145	int rc;
 146
 147	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 148	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 149	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 150	if (rc) {
 151		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 152		return rc;
 153	}
 154	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 155
 156	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 157	if (rc)
 158		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 159
 160	return rc;
 161}
 162
 163static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 164{
 165	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 166
 167	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 168	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 169	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 170}
 171
 172/* This function allocates a cid for passive tcp (called from syn receive)
 173 * the reason it's separate from the regular cid allocation is because it
 174 * is assured that these cids already have ilt allocated. They are preallocated
 175 * to ensure that we won't need to allocate memory during syn processing
 176 */
 177static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 178{
 179	int rc;
 180
 181	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 182
 183	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 184				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 185
 186	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 187
 188	if (rc) {
 189		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 190			   "can't allocate iwarp tcp cid max-count=%d\n",
 191			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 192
 193		*cid = QED_IWARP_INVALID_TCP_CID;
 194		return rc;
 195	}
 196
 197	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 198					    p_hwfn->p_rdma_info->proto);
 199	return 0;
 200}
 201
 202int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 203			struct qed_rdma_qp *qp,
 204			struct qed_rdma_create_qp_out_params *out_params)
 205{
 206	struct iwarp_create_qp_ramrod_data *p_ramrod;
 207	struct qed_sp_init_data init_data;
 208	struct qed_spq_entry *p_ent;
 209	u16 physical_queue;
 210	u32 cid;
 211	int rc;
 212
 213	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 214					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 215					      &qp->shared_queue_phys_addr,
 216					      GFP_KERNEL);
 217	if (!qp->shared_queue)
 218		return -ENOMEM;
 219
 220	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 221	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 222	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 223	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 224	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 225	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 226	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 227	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 228
 229	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 230	if (rc)
 231		goto err1;
 232
 233	qp->icid = (u16)cid;
 234
 235	memset(&init_data, 0, sizeof(init_data));
 236	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 237	init_data.cid = qp->icid;
 238	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 239
 240	rc = qed_sp_init_request(p_hwfn, &p_ent,
 241				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 242				 PROTOCOLID_IWARP, &init_data);
 243	if (rc)
 244		goto err2;
 245
 246	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 247
 248	SET_FIELD(p_ramrod->flags,
 249		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 250		  qp->fmr_and_reserved_lkey);
 251
 252	SET_FIELD(p_ramrod->flags,
 253		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 254
 255	SET_FIELD(p_ramrod->flags,
 256		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 257		  qp->incoming_rdma_read_en);
 258
 259	SET_FIELD(p_ramrod->flags,
 260		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 261		  qp->incoming_rdma_write_en);
 262
 263	SET_FIELD(p_ramrod->flags,
 264		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 265		  qp->incoming_atomic_en);
 266
 267	SET_FIELD(p_ramrod->flags,
 268		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 269
 270	p_ramrod->pd = qp->pd;
 271	p_ramrod->sq_num_pages = qp->sq_num_pages;
 272	p_ramrod->rq_num_pages = qp->rq_num_pages;
 273
 274	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 275	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 
 
 276
 277	p_ramrod->cq_cid_for_sq =
 278	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 279	p_ramrod->cq_cid_for_rq =
 280	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 281
 282	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 283
 284	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 285	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 286	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 287	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 288
 289	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 290	if (rc)
 291		goto err2;
 292
 293	return rc;
 294
 295err2:
 296	qed_iwarp_cid_cleaned(p_hwfn, cid);
 297err1:
 298	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 299			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 300			  qp->shared_queue, qp->shared_queue_phys_addr);
 301
 302	return rc;
 303}
 304
 305static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 306{
 307	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 308	struct qed_sp_init_data init_data;
 309	struct qed_spq_entry *p_ent;
 
 310	int rc;
 311
 312	/* Get SPQ entry */
 313	memset(&init_data, 0, sizeof(init_data));
 314	init_data.cid = qp->icid;
 315	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 316	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 317
 318	rc = qed_sp_init_request(p_hwfn, &p_ent,
 319				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 320				 p_hwfn->p_rdma_info->proto, &init_data);
 321	if (rc)
 322		return rc;
 323
 324	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 325	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
 326		  0x1);
 
 
 
 327	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 328		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 329	else
 330		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 
 
 331
 332	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 333
 334	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 335
 336	return rc;
 337}
 338
 339enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 340{
 341	switch (state) {
 342	case QED_ROCE_QP_STATE_RESET:
 343	case QED_ROCE_QP_STATE_INIT:
 344	case QED_ROCE_QP_STATE_RTR:
 345		return QED_IWARP_QP_STATE_IDLE;
 346	case QED_ROCE_QP_STATE_RTS:
 347		return QED_IWARP_QP_STATE_RTS;
 348	case QED_ROCE_QP_STATE_SQD:
 349		return QED_IWARP_QP_STATE_CLOSING;
 350	case QED_ROCE_QP_STATE_ERR:
 351		return QED_IWARP_QP_STATE_ERROR;
 352	case QED_ROCE_QP_STATE_SQE:
 353		return QED_IWARP_QP_STATE_TERMINATE;
 354	default:
 355		return QED_IWARP_QP_STATE_ERROR;
 356	}
 357}
 358
 359static enum qed_roce_qp_state
 360qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 361{
 362	switch (state) {
 363	case QED_IWARP_QP_STATE_IDLE:
 364		return QED_ROCE_QP_STATE_INIT;
 365	case QED_IWARP_QP_STATE_RTS:
 366		return QED_ROCE_QP_STATE_RTS;
 367	case QED_IWARP_QP_STATE_TERMINATE:
 368		return QED_ROCE_QP_STATE_SQE;
 369	case QED_IWARP_QP_STATE_CLOSING:
 370		return QED_ROCE_QP_STATE_SQD;
 371	case QED_IWARP_QP_STATE_ERROR:
 372		return QED_ROCE_QP_STATE_ERR;
 373	default:
 374		return QED_ROCE_QP_STATE_ERR;
 375	}
 376}
 377
 378const char *iwarp_state_names[] = {
 379	"IDLE",
 380	"RTS",
 381	"TERMINATE",
 382	"CLOSING",
 383	"ERROR",
 384};
 385
 386int
 387qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 388		    struct qed_rdma_qp *qp,
 389		    enum qed_iwarp_qp_state new_state, bool internal)
 390{
 391	enum qed_iwarp_qp_state prev_iw_state;
 392	bool modify_fw = false;
 393	int rc = 0;
 394
 395	/* modify QP can be called from upper-layer or as a result of async
 396	 * RST/FIN... therefore need to protect
 397	 */
 398	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 399	prev_iw_state = qp->iwarp_state;
 400
 401	if (prev_iw_state == new_state) {
 402		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 403		return 0;
 404	}
 405
 406	switch (prev_iw_state) {
 407	case QED_IWARP_QP_STATE_IDLE:
 408		switch (new_state) {
 409		case QED_IWARP_QP_STATE_RTS:
 410			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 411			break;
 412		case QED_IWARP_QP_STATE_ERROR:
 413			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 414			if (!internal)
 415				modify_fw = true;
 416			break;
 417		default:
 418			break;
 419		}
 420		break;
 421	case QED_IWARP_QP_STATE_RTS:
 422		switch (new_state) {
 423		case QED_IWARP_QP_STATE_CLOSING:
 424			if (!internal)
 425				modify_fw = true;
 426
 427			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 428			break;
 429		case QED_IWARP_QP_STATE_ERROR:
 430			if (!internal)
 431				modify_fw = true;
 432			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 433			break;
 434		default:
 435			break;
 436		}
 437		break;
 438	case QED_IWARP_QP_STATE_ERROR:
 439		switch (new_state) {
 440		case QED_IWARP_QP_STATE_IDLE:
 441
 442			qp->iwarp_state = new_state;
 443			break;
 444		case QED_IWARP_QP_STATE_CLOSING:
 445			/* could happen due to race... do nothing.... */
 446			break;
 447		default:
 448			rc = -EINVAL;
 449		}
 450		break;
 451	case QED_IWARP_QP_STATE_TERMINATE:
 452	case QED_IWARP_QP_STATE_CLOSING:
 453		qp->iwarp_state = new_state;
 454		break;
 455	default:
 456		break;
 457	}
 458
 459	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 460		   qp->icid,
 461		   iwarp_state_names[prev_iw_state],
 462		   iwarp_state_names[qp->iwarp_state],
 463		   internal ? "internal" : "");
 464
 465	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 466
 467	if (modify_fw)
 468		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 469
 470	return rc;
 471}
 472
 473int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 474{
 475	struct qed_sp_init_data init_data;
 476	struct qed_spq_entry *p_ent;
 477	int rc;
 478
 479	/* Get SPQ entry */
 480	memset(&init_data, 0, sizeof(init_data));
 481	init_data.cid = qp->icid;
 482	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 483	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 484
 485	rc = qed_sp_init_request(p_hwfn, &p_ent,
 486				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 487				 p_hwfn->p_rdma_info->proto, &init_data);
 488	if (rc)
 489		return rc;
 490
 491	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 492
 493	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 494
 495	return rc;
 496}
 497
 498static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 499				 struct qed_iwarp_ep *ep,
 500				 bool remove_from_active_list)
 501{
 502	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 503			  sizeof(*ep->ep_buffer_virt),
 504			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 505
 506	if (remove_from_active_list) {
 507		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 508		list_del(&ep->list_entry);
 509		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 510	}
 511
 512	if (ep->qp)
 513		ep->qp->ep = NULL;
 514
 515	kfree(ep);
 516}
 517
 518int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 519{
 520	struct qed_iwarp_ep *ep = qp->ep;
 521	int wait_count = 0;
 522	int rc = 0;
 523
 524	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 525		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 526					 QED_IWARP_QP_STATE_ERROR, false);
 527		if (rc)
 528			return rc;
 529	}
 530
 531	/* Make sure ep is closed before returning and freeing memory. */
 532	if (ep) {
 533		while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
 
 534			msleep(100);
 535
 536		if (ep->state != QED_IWARP_EP_CLOSED)
 537			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 538				  ep->state);
 539
 540		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 541	}
 542
 543	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 544
 545	if (qp->shared_queue)
 546		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 547				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 548				  qp->shared_queue, qp->shared_queue_phys_addr);
 549
 550	return rc;
 551}
 552
 553static int
 554qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 555{
 556	struct qed_iwarp_ep *ep;
 557	int rc;
 558
 559	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 560	if (!ep)
 561		return -ENOMEM;
 562
 563	ep->state = QED_IWARP_EP_INIT;
 564
 565	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 566						sizeof(*ep->ep_buffer_virt),
 567						&ep->ep_buffer_phys,
 568						GFP_KERNEL);
 569	if (!ep->ep_buffer_virt) {
 570		rc = -ENOMEM;
 571		goto err;
 572	}
 573
 574	ep->sig = QED_EP_SIG;
 575
 576	*ep_out = ep;
 577
 578	return 0;
 579
 580err:
 581	kfree(ep);
 582	return rc;
 583}
 584
 585static void
 586qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 587			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 588{
 589	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 590		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 591		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 592		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 593		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 594		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 595		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 596
 597	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 598		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 599			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 600			   p_tcp_ramrod->tcp.local_ip,
 601			   p_tcp_ramrod->tcp.local_port,
 602			   p_tcp_ramrod->tcp.remote_ip,
 603			   p_tcp_ramrod->tcp.remote_port,
 604			   p_tcp_ramrod->tcp.vlan_id);
 605	} else {
 606		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 607			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 608			   p_tcp_ramrod->tcp.local_ip,
 609			   p_tcp_ramrod->tcp.local_port,
 610			   p_tcp_ramrod->tcp.remote_ip,
 611			   p_tcp_ramrod->tcp.remote_port,
 612			   p_tcp_ramrod->tcp.vlan_id);
 613	}
 614
 615	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 616		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 617		   p_tcp_ramrod->tcp.flow_label,
 618		   p_tcp_ramrod->tcp.ttl,
 619		   p_tcp_ramrod->tcp.tos_or_tc,
 620		   p_tcp_ramrod->tcp.mss,
 621		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 622		   p_tcp_ramrod->tcp.connect_mode,
 623		   p_tcp_ramrod->tcp.flags);
 624
 625	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 626		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 627		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 628		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 629}
 630
 631static int
 632qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 633{
 634	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 635	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 636	struct tcp_offload_params_opt2 *tcp;
 637	struct qed_sp_init_data init_data;
 638	struct qed_spq_entry *p_ent;
 639	dma_addr_t async_output_phys;
 640	dma_addr_t in_pdata_phys;
 641	u16 physical_q;
 
 642	u8 tcp_flags;
 643	int rc;
 644	int i;
 645
 646	memset(&init_data, 0, sizeof(init_data));
 647	init_data.cid = ep->tcp_cid;
 648	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 649	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 650		init_data.comp_mode = QED_SPQ_MODE_CB;
 651	else
 652		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 653
 654	rc = qed_sp_init_request(p_hwfn, &p_ent,
 655				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 656				 PROTOCOLID_IWARP, &init_data);
 657	if (rc)
 658		return rc;
 659
 660	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 661
 662	in_pdata_phys = ep->ep_buffer_phys +
 663			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 664	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 665		       in_pdata_phys);
 666
 667	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 668	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 669
 670	async_output_phys = ep->ep_buffer_phys +
 671			    offsetof(struct qed_iwarp_ep_memory, async_output);
 672	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 673		       async_output_phys);
 674
 675	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 676	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 677
 678	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 679	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 680	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 681	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 682	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 683
 684	tcp = &p_tcp_ramrod->tcp;
 685	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 686			    &tcp->remote_mac_addr_mid,
 687			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 688	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 689			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 690
 691	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 692
 693	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 694	tcp->flags = 0;
 695	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 696		  !!(tcp_flags & QED_IWARP_TS_EN));
 697
 698	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 699		  !!(tcp_flags & QED_IWARP_DA_EN));
 700
 
 701	tcp->ip_version = ep->cm_info.ip_version;
 702
 703	for (i = 0; i < 4; i++) {
 704		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 705		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 706	}
 707
 708	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 709	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 710	tcp->mss = cpu_to_le16(ep->mss);
 711	tcp->flow_label = 0;
 712	tcp->ttl = 0x40;
 713	tcp->tos_or_tc = 0;
 714
 715	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 716	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
 717	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 718	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
 719	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
 720
 721	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 722	tcp->connect_mode = ep->connect_mode;
 723
 724	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 725		tcp->syn_ip_payload_length =
 726			cpu_to_le16(ep->syn_ip_payload_length);
 727		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 728		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 729	}
 730
 731	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 732
 733	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 734
 735	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 736		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 737
 738	return rc;
 739}
 740
 741static void
 742qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 743{
 744	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 745	struct qed_iwarp_cm_event_params params;
 746	struct mpa_v2_hdr *mpa_v2;
 747	union async_output *async_data;
 748	u16 mpa_ord, mpa_ird;
 749	u8 mpa_hdr_size = 0;
 
 750	u8 mpa_rev;
 751
 752	async_data = &ep->ep_buffer_virt->async_output;
 753
 754	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 755	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 756		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 757		   async_data->mpa_request.ulp_data_len,
 758		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 759
 760	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 761		/* Read ord/ird values from private data buffer */
 762		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 763		mpa_hdr_size = sizeof(*mpa_v2);
 764
 765		mpa_ord = ntohs(mpa_v2->ord);
 766		mpa_ird = ntohs(mpa_v2->ird);
 767
 768		/* Temprary store in cm_info incoming ord/ird requested, later
 769		 * replace with negotiated value during accept
 770		 */
 771		ep->cm_info.ord = (u8)min_t(u16,
 772					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 773					    QED_IWARP_ORD_DEFAULT);
 774
 775		ep->cm_info.ird = (u8)min_t(u16,
 776					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 777					    QED_IWARP_IRD_DEFAULT);
 778
 779		/* Peer2Peer negotiation */
 780		ep->rtr_type = MPA_RTR_TYPE_NONE;
 781		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 782			if (mpa_ord & MPA_V2_WRITE_RTR)
 783				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 784
 785			if (mpa_ord & MPA_V2_READ_RTR)
 786				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 787
 788			if (mpa_ird & MPA_V2_SEND_RTR)
 789				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 790
 791			ep->rtr_type &= iwarp_info->rtr_type;
 792
 793			/* if we're left with no match send our capabilities */
 794			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 795				ep->rtr_type = iwarp_info->rtr_type;
 796		}
 797
 798		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 799	} else {
 800		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 801		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 802		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 803	}
 804
 805	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 806		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 807		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 808		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 809
 810	/* Strip mpa v2 hdr from private data before sending to upper layer */
 811	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 812
 813	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
 814				       mpa_hdr_size;
 815
 816	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 817	params.cm_info = &ep->cm_info;
 818	params.ep_context = ep;
 819	params.status = 0;
 820
 821	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 822	ep->event_cb(ep->cb_context, &params);
 823}
 824
 825static int
 826qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 827{
 828	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 
 829	struct qed_iwarp_info *iwarp_info;
 830	struct qed_sp_init_data init_data;
 831	dma_addr_t async_output_phys;
 832	struct qed_spq_entry *p_ent;
 833	dma_addr_t out_pdata_phys;
 834	dma_addr_t in_pdata_phys;
 835	struct qed_rdma_qp *qp;
 836	bool reject;
 
 837	int rc;
 838
 839	if (!ep)
 840		return -EINVAL;
 841
 842	qp = ep->qp;
 843	reject = !qp;
 844
 845	memset(&init_data, 0, sizeof(init_data));
 846	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 847	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 848
 849	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 850		init_data.comp_mode = QED_SPQ_MODE_CB;
 851	else
 852		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 853
 854	rc = qed_sp_init_request(p_hwfn, &p_ent,
 855				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 856				 PROTOCOLID_IWARP, &init_data);
 857	if (rc)
 858		return rc;
 859
 860	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 
 
 861	out_pdata_phys = ep->ep_buffer_phys +
 862			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 863	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
 864		       out_pdata_phys);
 865	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
 866	    ep->cm_info.private_data_len;
 867	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 868
 869	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
 870	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
 871
 872	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 
 873
 874	in_pdata_phys = ep->ep_buffer_phys +
 875			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 876	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 877	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 878		       in_pdata_phys);
 879	p_mpa_ramrod->incoming_ulp_buffer.len =
 880	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 881	async_output_phys = ep->ep_buffer_phys +
 882			    offsetof(struct qed_iwarp_ep_memory, async_output);
 883	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 884		       async_output_phys);
 885	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 886	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 887
 888	if (!reject) {
 889		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 890			       qp->shared_queue_phys_addr);
 891		p_mpa_ramrod->stats_counter_id =
 892		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 893	} else {
 894		p_mpa_ramrod->common.reject = 1;
 895	}
 896
 897	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 898	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 899	p_mpa_ramrod->mode = ep->mpa_rev;
 900	SET_FIELD(p_mpa_ramrod->rtr_pref,
 901		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 902
 903	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 904	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 905	if (!reject)
 906		ep->cid = qp->icid;	/* Now they're migrated. */
 907
 908	DP_VERBOSE(p_hwfn,
 909		   QED_MSG_RDMA,
 910		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 911		   reject ? 0xffff : qp->icid,
 912		   ep->tcp_cid,
 913		   rc,
 914		   ep->cm_info.ird,
 915		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 916	return rc;
 917}
 918
 919static void
 920qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 921{
 922	ep->state = QED_IWARP_EP_INIT;
 923	if (ep->qp)
 924		ep->qp->ep = NULL;
 925	ep->qp = NULL;
 926	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 927
 928	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 929		/* We don't care about the return code, it's ok if tcp_cid
 930		 * remains invalid...in this case we'll defer allocation
 931		 */
 932		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 933	}
 934	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 935
 936	list_del(&ep->list_entry);
 937	list_add_tail(&ep->list_entry,
 938		      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 939
 940	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 941}
 942
 943void
 944qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 945{
 946	struct mpa_v2_hdr *mpa_v2_params;
 947	union async_output *async_data;
 948	u16 mpa_ird, mpa_ord;
 949	u8 mpa_data_size = 0;
 
 950
 951	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 952		mpa_v2_params =
 953			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 954		mpa_data_size = sizeof(*mpa_v2_params);
 955		mpa_ird = ntohs(mpa_v2_params->ird);
 956		mpa_ord = ntohs(mpa_v2_params->ord);
 957
 958		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 959		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 960	}
 
 961	async_data = &ep->ep_buffer_virt->async_output;
 
 962
 963	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 964	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
 965				       mpa_data_size;
 966}
 967
 968void
 969qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 970{
 971	struct qed_iwarp_cm_event_params params;
 972
 973	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 974		DP_NOTICE(p_hwfn,
 975			  "MPA reply event not expected on passive side!\n");
 976		return;
 977	}
 978
 979	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 980
 981	qed_iwarp_parse_private_data(p_hwfn, ep);
 982
 983	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 984		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 985		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 986
 987	params.cm_info = &ep->cm_info;
 988	params.ep_context = ep;
 989	params.status = 0;
 990
 991	ep->mpa_reply_processed = true;
 992
 993	ep->event_cb(ep->cb_context, &params);
 994}
 995
 996#define QED_IWARP_CONNECT_MODE_STRING(ep) \
 997	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 998
 999/* Called as a result of the event:
1000 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1001 */
1002static void
1003qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1004		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1005{
1006	struct qed_iwarp_cm_event_params params;
1007
1008	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1009		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1010	else
1011		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1012
1013	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1014		qed_iwarp_parse_private_data(p_hwfn, ep);
1015
1016	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1017		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1018		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1019
1020	params.cm_info = &ep->cm_info;
1021
1022	params.ep_context = ep;
1023
1024	ep->state = QED_IWARP_EP_CLOSED;
1025
1026	switch (fw_return_code) {
1027	case RDMA_RETURN_OK:
1028		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1029		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1030		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1031		ep->state = QED_IWARP_EP_ESTABLISHED;
1032		params.status = 0;
1033		break;
1034	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1035		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1036			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037		params.status = -EBUSY;
1038		break;
1039	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1040		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1041			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042		params.status = -ECONNREFUSED;
1043		break;
1044	case IWARP_CONN_ERROR_MPA_RST:
1045		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1046			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1047			  ep->tcp_cid);
1048		params.status = -ECONNRESET;
1049		break;
1050	case IWARP_CONN_ERROR_MPA_FIN:
1051		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1052			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053		params.status = -ECONNREFUSED;
1054		break;
1055	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1056		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1057			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058		params.status = -ECONNREFUSED;
1059		break;
1060	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1061		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1062			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063		params.status = -ECONNREFUSED;
1064		break;
1065	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1066		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1067			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068		params.status = -ECONNREFUSED;
1069		break;
1070	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1071		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1072			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073		params.status = -ECONNREFUSED;
1074		break;
1075	case IWARP_CONN_ERROR_MPA_TERMINATE:
1076		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1077			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078		params.status = -ECONNREFUSED;
1079		break;
1080	default:
1081		params.status = -ECONNRESET;
1082		break;
1083	}
1084
 
 
 
 
1085	ep->event_cb(ep->cb_context, &params);
1086
1087	/* on passive side, if there is no associated QP (REJECT) we need to
1088	 * return the ep to the pool, (in the regular case we add an element
1089	 * in accept instead of this one.
1090	 * In both cases we need to remove it from the ep_list.
1091	 */
1092	if (fw_return_code != RDMA_RETURN_OK) {
1093		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1094		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1095		    (!ep->qp)) {	/* Rejected */
1096			qed_iwarp_return_ep(p_hwfn, ep);
1097		} else {
1098			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1099			list_del(&ep->list_entry);
1100			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1101		}
1102	}
1103}
1104
1105static void
1106qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1107			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1108{
1109	struct mpa_v2_hdr *mpa_v2_params;
1110	u16 mpa_ird, mpa_ord;
1111
1112	*mpa_data_size = 0;
1113	if (MPA_REV2(ep->mpa_rev)) {
1114		mpa_v2_params =
1115		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1116		*mpa_data_size = sizeof(*mpa_v2_params);
1117
1118		mpa_ird = (u16)ep->cm_info.ird;
1119		mpa_ord = (u16)ep->cm_info.ord;
1120
1121		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1122			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1123
1124			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1125				mpa_ird |= MPA_V2_SEND_RTR;
1126
1127			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1128				mpa_ord |= MPA_V2_WRITE_RTR;
1129
1130			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1131				mpa_ord |= MPA_V2_READ_RTR;
1132		}
1133
1134		mpa_v2_params->ird = htons(mpa_ird);
1135		mpa_v2_params->ord = htons(mpa_ord);
1136
1137		DP_VERBOSE(p_hwfn,
1138			   QED_MSG_RDMA,
1139			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1140			   mpa_v2_params->ird,
1141			   mpa_v2_params->ord,
1142			   *((u32 *)mpa_v2_params),
1143			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1144			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1145			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1146			   !!(mpa_ird & MPA_V2_SEND_RTR),
1147			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1148			   !!(mpa_ord & MPA_V2_READ_RTR));
1149	}
1150}
1151
1152int qed_iwarp_connect(void *rdma_cxt,
1153		      struct qed_iwarp_connect_in *iparams,
1154		      struct qed_iwarp_connect_out *oparams)
1155{
1156	struct qed_hwfn *p_hwfn = rdma_cxt;
1157	struct qed_iwarp_info *iwarp_info;
1158	struct qed_iwarp_ep *ep;
1159	u8 mpa_data_size = 0;
1160	u8 ts_hdr_size = 0;
1161	u32 cid;
1162	int rc;
1163
1164	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1166		DP_NOTICE(p_hwfn,
1167			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168			  iparams->qp->icid, iparams->cm_info.ord,
1169			  iparams->cm_info.ird);
1170
1171		return -EINVAL;
1172	}
1173
1174	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1175
1176	/* Allocate ep object */
1177	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1178	if (rc)
1179		return rc;
1180
1181	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1182	if (rc)
1183		goto err;
1184
1185	ep->tcp_cid = cid;
1186
1187	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1190
1191	ep->qp = iparams->qp;
1192	ep->qp->ep = ep;
1193	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1196
1197	ep->cm_info.ord = iparams->cm_info.ord;
1198	ep->cm_info.ird = iparams->cm_info.ird;
1199
1200	ep->rtr_type = iwarp_info->rtr_type;
1201	if (!iwarp_info->peer2peer)
1202		ep->rtr_type = MPA_RTR_TYPE_NONE;
1203
1204	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205		ep->cm_info.ord = 1;
1206
1207	ep->mpa_rev = iwarp_info->mpa_rev;
1208
1209	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1210
1211	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213				       mpa_data_size;
1214
1215	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216	       iparams->cm_info.private_data,
1217	       iparams->cm_info.private_data_len);
1218
1219	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
1220		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1221
1222	ep->mss = iparams->mss - ts_hdr_size;
1223	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1224
1225	ep->event_cb = iparams->event_cb;
1226	ep->cb_context = iparams->cb_context;
1227	ep->connect_mode = TCP_CONNECT_ACTIVE;
1228
1229	oparams->ep_context = ep;
1230
1231	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1232
1233	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1234		   iparams->qp->icid, ep->tcp_cid, rc);
1235
1236	if (rc) {
1237		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1238		goto err;
1239	}
1240
1241	return rc;
1242err:
1243	qed_iwarp_cid_cleaned(p_hwfn, cid);
1244
1245	return rc;
1246}
1247
1248static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1249{
1250	struct qed_iwarp_ep *ep = NULL;
1251	int rc;
1252
1253	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1254
1255	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1256		DP_ERR(p_hwfn, "Ep list is empty\n");
1257		goto out;
1258	}
1259
1260	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1261			      struct qed_iwarp_ep, list_entry);
1262
1263	/* in some cases we could have failed allocating a tcp cid when added
1264	 * from accept / failure... retry now..this is not the common case.
1265	 */
1266	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1267		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1268
1269		/* if we fail we could look for another entry with a valid
1270		 * tcp_cid, but since we don't expect to reach this anyway
1271		 * it's not worth the handling
1272		 */
1273		if (rc) {
1274			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1275			ep = NULL;
1276			goto out;
1277		}
1278	}
1279
1280	list_del(&ep->list_entry);
1281
1282out:
1283	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1284	return ep;
1285}
1286
1287#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1288#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1289
1290/* This function waits for all the bits of a bmap to be cleared, as long as
1291 * there is progress ( i.e. the number of bits left to be cleared decreases )
1292 * the function continues.
1293 */
1294static int
1295qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1296{
1297	int prev_weight = 0;
1298	int wait_count = 0;
1299	int weight = 0;
1300
1301	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1302	prev_weight = weight;
1303
1304	while (weight) {
 
 
 
 
 
 
 
 
1305		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1306
1307		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1308
1309		if (prev_weight == weight) {
1310			wait_count++;
1311		} else {
1312			prev_weight = weight;
1313			wait_count = 0;
1314		}
1315
1316		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1317			DP_NOTICE(p_hwfn,
1318				  "%s bitmap wait timed out (%d cids pending)\n",
1319				  bmap->name, weight);
1320			return -EBUSY;
1321		}
1322	}
1323	return 0;
1324}
1325
1326static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1327{
1328	int rc;
1329	int i;
1330
1331	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1332					    &p_hwfn->p_rdma_info->tcp_cid_map);
1333	if (rc)
1334		return rc;
1335
1336	/* Now free the tcp cids from the main cid map */
1337	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1338		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1339
1340	/* Now wait for all cids to be completed */
1341	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1342					      &p_hwfn->p_rdma_info->cid_map);
1343}
1344
1345static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1346{
1347	struct qed_iwarp_ep *ep;
1348
1349	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1350		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1351
1352		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1353				      struct qed_iwarp_ep, list_entry);
1354
1355		if (!ep) {
1356			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1357			break;
1358		}
1359		list_del(&ep->list_entry);
1360
1361		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362
1363		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1364			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1365
1366		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1367	}
1368}
1369
1370static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1371{
1372	struct qed_iwarp_ep *ep;
1373	int rc = 0;
1374	int count;
1375	u32 cid;
1376	int i;
1377
1378	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1379	for (i = 0; i < count; i++) {
1380		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1381		if (rc)
1382			return rc;
1383
1384		/* During initialization we allocate from the main pool,
1385		 * afterwards we allocate only from the tcp_cid.
1386		 */
1387		if (init) {
1388			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1389			if (rc)
1390				goto err;
1391			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1392		} else {
1393			/* We don't care about the return code, it's ok if
1394			 * tcp_cid remains invalid...in this case we'll
1395			 * defer allocation
1396			 */
1397			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1398		}
1399
1400		ep->tcp_cid = cid;
1401
1402		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1403		list_add_tail(&ep->list_entry,
1404			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1405		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406	}
1407
1408	return rc;
1409
1410err:
1411	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1412
1413	return rc;
1414}
1415
1416int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1417{
1418	int rc;
1419
1420	/* Allocate bitmap for tcp cid. These are used by passive side
1421	 * to ensure it can allocate a tcp cid during dpc that was
1422	 * pre-acquired and doesn't require dynamic allocation of ilt
1423	 */
1424	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1425				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1426	if (rc) {
1427		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1428			   "Failed to allocate tcp cid, rc = %d\n", rc);
1429		return rc;
1430	}
1431
1432	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1433	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1434
1435	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1436	if (rc)
1437		return rc;
1438
1439	return qed_ooo_alloc(p_hwfn);
1440}
1441
1442void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1443{
1444	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1445
1446	qed_ooo_free(p_hwfn);
1447	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1448	kfree(iwarp_info->mpa_bufs);
1449	kfree(iwarp_info->partial_fpdus);
1450	kfree(iwarp_info->mpa_intermediate_buf);
1451}
1452
1453int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1454{
1455	struct qed_hwfn *p_hwfn = rdma_cxt;
1456	struct qed_iwarp_ep *ep;
1457	u8 mpa_data_size = 0;
1458	int rc;
1459
1460	ep = iparams->ep_context;
1461	if (!ep) {
1462		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1463		return -EINVAL;
1464	}
1465
1466	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1467		   iparams->qp->icid, ep->tcp_cid);
1468
1469	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1470	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1471		DP_VERBOSE(p_hwfn,
1472			   QED_MSG_RDMA,
1473			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1474			   iparams->qp->icid,
1475			   ep->tcp_cid, iparams->ord, iparams->ord);
1476		return -EINVAL;
1477	}
1478
1479	qed_iwarp_prealloc_ep(p_hwfn, false);
1480
1481	ep->cb_context = iparams->cb_context;
1482	ep->qp = iparams->qp;
1483	ep->qp->ep = ep;
1484
1485	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1486		/* Negotiate ord/ird: if upperlayer requested ord larger than
1487		 * ird advertised by remote, we need to decrease our ord
1488		 */
1489		if (iparams->ord > ep->cm_info.ird)
1490			iparams->ord = ep->cm_info.ird;
1491
1492		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1493		    (iparams->ird == 0))
1494			iparams->ird = 1;
1495	}
1496
1497	/* Update cm_info ord/ird to be negotiated values */
1498	ep->cm_info.ord = iparams->ord;
1499	ep->cm_info.ird = iparams->ird;
1500
1501	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1502
1503	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1504	ep->cm_info.private_data_len = iparams->private_data_len +
1505				       mpa_data_size;
1506
1507	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1508	       iparams->private_data, iparams->private_data_len);
1509
1510	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1511	if (rc)
1512		qed_iwarp_modify_qp(p_hwfn,
1513				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1514
1515	return rc;
1516}
1517
1518int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1519{
1520	struct qed_hwfn *p_hwfn = rdma_cxt;
1521	struct qed_iwarp_ep *ep;
1522	u8 mpa_data_size = 0;
1523
1524	ep = iparams->ep_context;
1525	if (!ep) {
1526		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1527		return -EINVAL;
1528	}
1529
1530	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1531
1532	ep->cb_context = iparams->cb_context;
1533	ep->qp = NULL;
1534
1535	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1536
1537	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1538	ep->cm_info.private_data_len = iparams->private_data_len +
1539				       mpa_data_size;
1540
1541	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1542	       iparams->private_data, iparams->private_data_len);
1543
1544	return qed_iwarp_mpa_offload(p_hwfn, ep);
1545}
1546
1547static void
1548qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1549			struct qed_iwarp_cm_info *cm_info)
1550{
1551	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1552		   cm_info->ip_version);
1553
1554	if (cm_info->ip_version == QED_TCP_IPV4)
1555		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1556			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1557			   cm_info->remote_ip, cm_info->remote_port,
1558			   cm_info->local_ip, cm_info->local_port,
1559			   cm_info->vlan);
1560	else
1561		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1562			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1563			   cm_info->remote_ip, cm_info->remote_port,
1564			   cm_info->local_ip, cm_info->local_port,
1565			   cm_info->vlan);
1566
1567	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1568		   "private_data_len = %x ord = %d, ird = %d\n",
1569		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1570}
1571
1572static int
1573qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1574		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1575{
1576	int rc;
1577
1578	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1579				    (u16)buf->buff_size, buf, 1);
1580	if (rc) {
1581		DP_NOTICE(p_hwfn,
1582			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1583			  rc, handle);
1584		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1585				  buf->data, buf->data_phys_addr);
1586		kfree(buf);
1587	}
1588
1589	return rc;
1590}
1591
1592static bool
1593qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1594{
1595	struct qed_iwarp_ep *ep = NULL;
1596	bool found = false;
1597
1598	list_for_each_entry(ep,
1599			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1600			    list_entry) {
1601		if ((ep->cm_info.local_port == cm_info->local_port) &&
1602		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1603		    (ep->cm_info.vlan == cm_info->vlan) &&
1604		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1605			    sizeof(cm_info->local_ip)) &&
1606		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1607			    sizeof(cm_info->remote_ip))) {
1608			found = true;
1609			break;
1610		}
1611	}
1612
1613	if (found) {
1614		DP_NOTICE(p_hwfn,
1615			  "SYN received on active connection - dropping\n");
1616		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1617
1618		return true;
1619	}
1620
1621	return false;
1622}
1623
1624static struct qed_iwarp_listener *
1625qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1626		       struct qed_iwarp_cm_info *cm_info)
1627{
1628	struct qed_iwarp_listener *listener = NULL;
1629	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1630	bool found = false;
1631
1632	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1633
1634	list_for_each_entry(listener,
1635			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1636			    list_entry) {
1637		if (listener->port == cm_info->local_port) {
1638			if (!memcmp(listener->ip_addr,
1639				    ip_zero, sizeof(ip_zero))) {
1640				found = true;
1641				break;
1642			}
1643
1644			if (!memcmp(listener->ip_addr,
1645				    cm_info->local_ip,
1646				    sizeof(cm_info->local_ip)) &&
1647			    (listener->vlan == cm_info->vlan)) {
1648				found = true;
1649				break;
1650			}
1651		}
1652	}
1653
1654	if (found) {
1655		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1656			   listener);
1657		return listener;
1658	}
1659
1660	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1661	return NULL;
1662}
1663
1664static int
1665qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1666		       struct qed_iwarp_cm_info *cm_info,
1667		       void *buf,
1668		       u8 *remote_mac_addr,
1669		       u8 *local_mac_addr,
1670		       int *payload_len, int *tcp_start_offset)
1671{
1672	struct vlan_ethhdr *vethh;
1673	bool vlan_valid = false;
1674	struct ipv6hdr *ip6h;
1675	struct ethhdr *ethh;
1676	struct tcphdr *tcph;
1677	struct iphdr *iph;
1678	int eth_hlen;
1679	int ip_hlen;
1680	int eth_type;
1681	int i;
1682
1683	ethh = buf;
1684	eth_type = ntohs(ethh->h_proto);
1685	if (eth_type == ETH_P_8021Q) {
1686		vlan_valid = true;
1687		vethh = (struct vlan_ethhdr *)ethh;
1688		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1689		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1690	}
1691
1692	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1693
 
 
 
 
 
 
 
 
 
1694	ether_addr_copy(remote_mac_addr, ethh->h_source);
1695	ether_addr_copy(local_mac_addr, ethh->h_dest);
1696
1697	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1698		   eth_type, ethh->h_source);
1699
1700	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1701		   eth_hlen, ethh->h_dest);
1702
1703	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704
1705	if (eth_type == ETH_P_IP) {
1706		if (iph->protocol != IPPROTO_TCP) {
1707			DP_NOTICE(p_hwfn,
1708				  "Unexpected ip protocol on ll2 %x\n",
1709				  iph->protocol);
1710			return -EINVAL;
1711		}
1712
1713		cm_info->local_ip[0] = ntohl(iph->daddr);
1714		cm_info->remote_ip[0] = ntohl(iph->saddr);
1715		cm_info->ip_version = TCP_IPV4;
1716
1717		ip_hlen = (iph->ihl) * sizeof(u32);
1718		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1719	} else if (eth_type == ETH_P_IPV6) {
1720		ip6h = (struct ipv6hdr *)iph;
1721
1722		if (ip6h->nexthdr != IPPROTO_TCP) {
1723			DP_NOTICE(p_hwfn,
1724				  "Unexpected ip protocol on ll2 %x\n",
1725				  iph->protocol);
1726			return -EINVAL;
1727		}
1728
1729		for (i = 0; i < 4; i++) {
1730			cm_info->local_ip[i] =
1731			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1732			cm_info->remote_ip[i] =
1733			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1734		}
1735		cm_info->ip_version = TCP_IPV6;
1736
1737		ip_hlen = sizeof(*ip6h);
1738		*payload_len = ntohs(ip6h->payload_len);
1739	} else {
1740		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1741		return -EINVAL;
1742	}
1743
1744	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1745
1746	if (!tcph->syn) {
1747		DP_NOTICE(p_hwfn,
1748			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1749			  iph->ihl, tcph->source, tcph->dest);
1750		return -EINVAL;
1751	}
1752
1753	cm_info->local_port = ntohs(tcph->dest);
1754	cm_info->remote_port = ntohs(tcph->source);
1755
1756	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1757
1758	*tcp_start_offset = eth_hlen + ip_hlen;
1759
1760	return 0;
1761}
1762
1763static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1764						      u16 cid)
1765{
1766	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1767	struct qed_iwarp_fpdu *partial_fpdu;
1768	u32 idx;
1769
1770	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1771	if (idx >= iwarp_info->max_num_partial_fpdus) {
1772		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1773		       iwarp_info->max_num_partial_fpdus);
1774		return NULL;
1775	}
1776
1777	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1778
1779	return partial_fpdu;
1780}
1781
1782enum qed_iwarp_mpa_pkt_type {
1783	QED_IWARP_MPA_PKT_PACKED,
1784	QED_IWARP_MPA_PKT_PARTIAL,
1785	QED_IWARP_MPA_PKT_UNALIGNED
1786};
1787
1788#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1789#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1790#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1791
1792/* Pad to multiple of 4 */
1793#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1794#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1795	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1796					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1797					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1798
1799/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1800#define QED_IWARP_MAX_BDS_PER_FPDU 3
1801
1802static const char * const pkt_type_str[] = {
1803	"QED_IWARP_MPA_PKT_PACKED",
1804	"QED_IWARP_MPA_PKT_PARTIAL",
1805	"QED_IWARP_MPA_PKT_UNALIGNED"
1806};
1807
1808static int
1809qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1810		      struct qed_iwarp_fpdu *fpdu,
1811		      struct qed_iwarp_ll2_buff *buf);
1812
1813static enum qed_iwarp_mpa_pkt_type
1814qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1815		       struct qed_iwarp_fpdu *fpdu,
1816		       u16 tcp_payload_len, u8 *mpa_data)
1817{
1818	enum qed_iwarp_mpa_pkt_type pkt_type;
1819	u16 mpa_len;
1820
1821	if (fpdu->incomplete_bytes) {
1822		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1823		goto out;
1824	}
1825
1826	/* special case of one byte remaining...
1827	 * lower byte will be read next packet
1828	 */
1829	if (tcp_payload_len == 1) {
1830		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1831		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1832		goto out;
1833	}
1834
1835	mpa_len = ntohs(*((u16 *)(mpa_data)));
1836	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1837
1838	if (fpdu->fpdu_length <= tcp_payload_len)
1839		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1840	else
1841		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1842
1843out:
1844	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1845		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1846		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1847
1848	return pkt_type;
1849}
1850
1851static void
1852qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1853		    struct qed_iwarp_fpdu *fpdu,
1854		    struct unaligned_opaque_data *pkt_data,
1855		    u16 tcp_payload_size, u8 placement_offset)
1856{
 
 
1857	fpdu->mpa_buf = buf;
1858	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1859	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1860	fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1861	fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1862
1863	if (tcp_payload_size == 1)
1864		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1865	else if (tcp_payload_size < fpdu->fpdu_length)
1866		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1867	else
1868		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1869
1870	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1871}
1872
1873static int
1874qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1875		 struct qed_iwarp_fpdu *fpdu,
1876		 struct unaligned_opaque_data *pkt_data,
1877		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1878{
 
1879	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1880	int rc;
1881
1882	/* need to copy the data from the partial packet stored in fpdu
1883	 * to the new buf, for this we also need to move the data currently
1884	 * placed on the buf. The assumption is that the buffer is big enough
1885	 * since fpdu_length <= mss, we use an intermediate buffer since
1886	 * we may need to copy the new data to an overlapping location
1887	 */
1888	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1889		DP_ERR(p_hwfn,
1890		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1891		       buf->buff_size, fpdu->mpa_frag_len,
1892		       tcp_payload_size, fpdu->incomplete_bytes);
1893		return -EINVAL;
1894	}
1895
1896	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1897		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1898		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1899		   (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1900		   tcp_payload_size);
1901
1902	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1903	memcpy(tmp_buf + fpdu->mpa_frag_len,
1904	       (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1905	       tcp_payload_size);
1906
1907	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1908	if (rc)
1909		return rc;
1910
1911	/* If we managed to post the buffer copy the data to the new buffer
1912	 * o/w this will occur in the next round...
1913	 */
1914	memcpy((u8 *)(buf->data), tmp_buf,
1915	       fpdu->mpa_frag_len + tcp_payload_size);
1916
1917	fpdu->mpa_buf = buf;
1918	/* fpdu->pkt_hdr remains as is */
1919	/* fpdu->mpa_frag is overridden with new buf */
1920	fpdu->mpa_frag = buf->data_phys_addr;
1921	fpdu->mpa_frag_virt = buf->data;
1922	fpdu->mpa_frag_len += tcp_payload_size;
1923
1924	fpdu->incomplete_bytes -= tcp_payload_size;
1925
1926	DP_VERBOSE(p_hwfn,
1927		   QED_MSG_RDMA,
1928		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1929		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1930		   fpdu->incomplete_bytes);
1931
1932	return 0;
1933}
1934
1935static void
1936qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1937			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1938{
1939	u16 mpa_len;
1940
1941	/* Update incomplete packets if needed */
1942	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1943		/* Missing lower byte is now available */
1944		mpa_len = fpdu->fpdu_length | *mpa_data;
1945		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1946		/* one byte of hdr */
1947		fpdu->mpa_frag_len = 1;
1948		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1949		DP_VERBOSE(p_hwfn,
1950			   QED_MSG_RDMA,
1951			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1952			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1953	}
1954}
1955
1956#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1957	(GET_FIELD((_curr_pkt)->flags,	   \
1958		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1959
1960/* This function is used to recycle a buffer using the ll2 drop option. It
1961 * uses the mechanism to ensure that all buffers posted to tx before this one
1962 * were completed. The buffer sent here will be sent as a cookie in the tx
1963 * completion function and can then be reposted to rx chain when done. The flow
1964 * that requires this is the flow where a FPDU splits over more than 3 tcp
1965 * segments. In this case the driver needs to re-post a rx buffer instead of
1966 * the one received, but driver can't simply repost a buffer it copied from
1967 * as there is a case where the buffer was originally a packed FPDU, and is
1968 * partially posted to FW. Driver needs to ensure FW is done with it.
1969 */
1970static int
1971qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1972		      struct qed_iwarp_fpdu *fpdu,
1973		      struct qed_iwarp_ll2_buff *buf)
1974{
1975	struct qed_ll2_tx_pkt_info tx_pkt;
1976	u8 ll2_handle;
1977	int rc;
1978
1979	memset(&tx_pkt, 0, sizeof(tx_pkt));
1980	tx_pkt.num_of_bds = 1;
1981	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1982	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1983	tx_pkt.first_frag = fpdu->pkt_hdr;
1984	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1985	buf->piggy_buf = NULL;
1986	tx_pkt.cookie = buf;
1987
1988	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
1989
1990	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
1991	if (rc)
1992		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1993			   "Can't drop packet rc=%d\n", rc);
1994
1995	DP_VERBOSE(p_hwfn,
1996		   QED_MSG_RDMA,
1997		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
1998		   (unsigned long int)tx_pkt.first_frag,
1999		   tx_pkt.first_frag_len, buf, rc);
2000
2001	return rc;
2002}
2003
2004static int
2005qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2006{
2007	struct qed_ll2_tx_pkt_info tx_pkt;
2008	u8 ll2_handle;
2009	int rc;
2010
2011	memset(&tx_pkt, 0, sizeof(tx_pkt));
2012	tx_pkt.num_of_bds = 1;
2013	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2014	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2015
2016	tx_pkt.first_frag = fpdu->pkt_hdr;
2017	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2018	tx_pkt.enable_ip_cksum = true;
2019	tx_pkt.enable_l4_cksum = true;
2020	tx_pkt.calc_ip_len = true;
2021	/* vlan overload with enum iwarp_ll2_tx_queues */
2022	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2023
2024	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2025
2026	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2027	if (rc)
2028		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2029			   "Can't send right edge rc=%d\n", rc);
2030	DP_VERBOSE(p_hwfn,
2031		   QED_MSG_RDMA,
2032		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2033		   tx_pkt.num_of_bds,
2034		   (unsigned long int)tx_pkt.first_frag,
2035		   tx_pkt.first_frag_len, rc);
2036
2037	return rc;
2038}
2039
2040static int
2041qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2042		    struct qed_iwarp_fpdu *fpdu,
2043		    struct unaligned_opaque_data *curr_pkt,
2044		    struct qed_iwarp_ll2_buff *buf,
2045		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2046{
2047	struct qed_ll2_tx_pkt_info tx_pkt;
 
2048	u8 ll2_handle;
2049	int rc;
2050
2051	memset(&tx_pkt, 0, sizeof(tx_pkt));
2052
2053	/* An unaligned packet means it's split over two tcp segments. So the
2054	 * complete packet requires 3 bds, one for the header, one for the
2055	 * part of the fpdu of the first tcp segment, and the last fragment
2056	 * will point to the remainder of the fpdu. A packed pdu, requires only
2057	 * two bds, one for the header and one for the data.
2058	 */
2059	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2060	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2061	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2062
2063	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2064	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2065	    tcp_payload_size <= fpdu->fpdu_length)
2066		tx_pkt.cookie = fpdu->mpa_buf;
2067
2068	tx_pkt.first_frag = fpdu->pkt_hdr;
2069	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2070	tx_pkt.enable_ip_cksum = true;
2071	tx_pkt.enable_l4_cksum = true;
2072	tx_pkt.calc_ip_len = true;
2073	/* vlan overload with enum iwarp_ll2_tx_queues */
2074	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2075
2076	/* special case of unaligned packet and not packed, need to send
2077	 * both buffers as cookie to release.
2078	 */
2079	if (tcp_payload_size == fpdu->incomplete_bytes)
2080		fpdu->mpa_buf->piggy_buf = buf;
2081
2082	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2083
2084	/* Set first fragment to header */
2085	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2086	if (rc)
2087		goto out;
2088
2089	/* Set second fragment to first part of packet */
2090	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2091					       fpdu->mpa_frag,
2092					       fpdu->mpa_frag_len);
2093	if (rc)
2094		goto out;
2095
2096	if (!fpdu->incomplete_bytes)
2097		goto out;
2098
 
 
2099	/* Set third fragment to second part of the packet */
2100	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2101					       ll2_handle,
2102					       buf->data_phys_addr +
2103					       curr_pkt->first_mpa_offset,
2104					       fpdu->incomplete_bytes);
2105out:
2106	DP_VERBOSE(p_hwfn,
2107		   QED_MSG_RDMA,
2108		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2109		   tx_pkt.num_of_bds,
2110		   tx_pkt.first_frag_len,
2111		   fpdu->mpa_frag_len,
2112		   fpdu->incomplete_bytes, rc);
2113
2114	return rc;
2115}
2116
2117static void
2118qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2119		       struct unaligned_opaque_data *curr_pkt,
2120		       u32 opaque_data0, u32 opaque_data1)
2121{
2122	u64 opaque_data;
2123
2124	opaque_data = HILO_64(opaque_data1, opaque_data0);
 
2125	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2126
2127	curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2128				     le16_to_cpu(curr_pkt->first_mpa_offset);
2129	curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2130}
2131
2132/* This function is called when an unaligned or incomplete MPA packet arrives
2133 * driver needs to align the packet, perhaps using previous data and send
2134 * it down to FW once it is aligned.
2135 */
2136static int
2137qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2138			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2139{
2140	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2141	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2142	enum qed_iwarp_mpa_pkt_type pkt_type;
2143	struct qed_iwarp_fpdu *fpdu;
 
2144	int rc = -EINVAL;
2145	u8 *mpa_data;
2146
2147	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
 
 
2148	if (!fpdu) { /* something corrupt with cid, post rx back */
2149		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2150		       curr_pkt->cid);
2151		goto err;
2152	}
2153
2154	do {
2155		mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
 
2156
2157		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2158						  mpa_buf->tcp_payload_len,
2159						  mpa_data);
2160
2161		switch (pkt_type) {
2162		case QED_IWARP_MPA_PKT_PARTIAL:
2163			qed_iwarp_init_fpdu(buf, fpdu,
2164					    curr_pkt,
2165					    mpa_buf->tcp_payload_len,
2166					    mpa_buf->placement_offset);
2167
2168			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2169				mpa_buf->tcp_payload_len = 0;
2170				break;
2171			}
2172
2173			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2174
2175			if (rc) {
2176				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2177					   "Can't send FPDU:reset rc=%d\n", rc);
2178				memset(fpdu, 0, sizeof(*fpdu));
2179				break;
2180			}
2181
2182			mpa_buf->tcp_payload_len = 0;
2183			break;
2184		case QED_IWARP_MPA_PKT_PACKED:
2185			qed_iwarp_init_fpdu(buf, fpdu,
2186					    curr_pkt,
2187					    mpa_buf->tcp_payload_len,
2188					    mpa_buf->placement_offset);
2189
2190			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2191						 mpa_buf->tcp_payload_len,
2192						 pkt_type);
2193			if (rc) {
2194				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2195					   "Can't send FPDU:reset rc=%d\n", rc);
2196				memset(fpdu, 0, sizeof(*fpdu));
2197				break;
2198			}
2199
2200			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2201			curr_pkt->first_mpa_offset += fpdu->fpdu_length;
 
2202			break;
2203		case QED_IWARP_MPA_PKT_UNALIGNED:
2204			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2205			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2206				/* special handling of fpdu split over more
2207				 * than 2 segments
2208				 */
2209				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2210					rc = qed_iwarp_win_right_edge(p_hwfn,
2211								      fpdu);
2212					/* packet will be re-processed later */
2213					if (rc)
2214						return rc;
2215				}
2216
2217				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2218						      buf,
2219						      mpa_buf->tcp_payload_len);
2220				if (rc) /* packet will be re-processed later */
2221					return rc;
2222
2223				mpa_buf->tcp_payload_len = 0;
2224				break;
2225			}
2226
2227			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2228						 mpa_buf->tcp_payload_len,
2229						 pkt_type);
2230			if (rc) {
2231				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2232					   "Can't send FPDU:delay rc=%d\n", rc);
2233				/* don't reset fpdu -> we need it for next
2234				 * classify
2235				 */
2236				break;
2237			}
2238
2239			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2240			curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
 
 
2241			/* The framed PDU was sent - no more incomplete bytes */
2242			fpdu->incomplete_bytes = 0;
2243			break;
2244		}
2245	} while (mpa_buf->tcp_payload_len && !rc);
2246
2247	return rc;
2248
2249err:
2250	qed_iwarp_ll2_post_rx(p_hwfn,
2251			      buf,
2252			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2253	return rc;
2254}
2255
2256static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2257{
2258	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2259	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2260	int rc;
2261
2262	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2263		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2264					   struct qed_iwarp_ll2_mpa_buf,
2265					   list_entry);
2266
2267		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2268
2269		/* busy means break and continue processing later, don't
2270		 * remove the buf from the pending list.
2271		 */
2272		if (rc == -EBUSY)
2273			break;
2274
2275		list_del(&mpa_buf->list_entry);
2276		list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
2277
2278		if (rc) {	/* different error, don't continue */
2279			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2280			break;
2281		}
2282	}
2283}
2284
2285static void
2286qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2287{
2288	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2289	struct qed_iwarp_info *iwarp_info;
2290	struct qed_hwfn *p_hwfn = cxt;
 
2291
2292	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2293	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2294				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2295	if (!mpa_buf) {
2296		DP_ERR(p_hwfn, "No free mpa buf\n");
2297		goto err;
2298	}
2299
2300	list_del(&mpa_buf->list_entry);
2301	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2302			       data->opaque_data_0, data->opaque_data_1);
2303
 
 
2304	DP_VERBOSE(p_hwfn,
2305		   QED_MSG_RDMA,
2306		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2307		   data->length.packet_length, mpa_buf->data.first_mpa_offset,
2308		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2309		   mpa_buf->data.cid);
2310
2311	mpa_buf->ll2_buf = data->cookie;
2312	mpa_buf->tcp_payload_len = data->length.packet_length -
2313				   mpa_buf->data.first_mpa_offset;
2314	mpa_buf->data.first_mpa_offset += data->u.placement_offset;
 
 
2315	mpa_buf->placement_offset = data->u.placement_offset;
2316
2317	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2318
2319	qed_iwarp_process_pending_pkts(p_hwfn);
2320	return;
2321err:
2322	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2323			      iwarp_info->ll2_mpa_handle);
2324}
2325
2326static void
2327qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2328{
2329	struct qed_iwarp_ll2_buff *buf = data->cookie;
2330	struct qed_iwarp_listener *listener;
2331	struct qed_ll2_tx_pkt_info tx_pkt;
2332	struct qed_iwarp_cm_info cm_info;
2333	struct qed_hwfn *p_hwfn = cxt;
2334	u8 remote_mac_addr[ETH_ALEN];
2335	u8 local_mac_addr[ETH_ALEN];
2336	struct qed_iwarp_ep *ep;
2337	int tcp_start_offset;
2338	u8 ts_hdr_size = 0;
2339	u8 ll2_syn_handle;
2340	int payload_len;
2341	u32 hdr_size;
2342	int rc;
2343
2344	memset(&cm_info, 0, sizeof(cm_info));
2345	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2346
2347	/* Check if packet was received with errors... */
2348	if (data->err_flags) {
2349		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2350			  data->err_flags);
2351		goto err;
2352	}
2353
2354	if (GET_FIELD(data->parse_flags,
2355		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2356	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2357		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2358		goto err;
2359	}
2360
2361	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2362				    data->u.placement_offset, remote_mac_addr,
2363				    local_mac_addr, &payload_len,
2364				    &tcp_start_offset);
2365	if (rc)
2366		goto err;
2367
2368	/* Check if there is a listener for this 4-tuple+vlan */
2369	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2370	if (!listener) {
2371		DP_VERBOSE(p_hwfn,
2372			   QED_MSG_RDMA,
2373			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2374			   data->parse_flags, data->length.packet_length);
2375
2376		memset(&tx_pkt, 0, sizeof(tx_pkt));
2377		tx_pkt.num_of_bds = 1;
2378		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2379		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2380		tx_pkt.first_frag = buf->data_phys_addr +
2381				    data->u.placement_offset;
2382		tx_pkt.first_frag_len = data->length.packet_length;
2383		tx_pkt.cookie = buf;
2384
2385		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2386					       &tx_pkt, true);
2387
2388		if (rc) {
2389			DP_NOTICE(p_hwfn,
2390				  "Can't post SYN back to chip rc=%d\n", rc);
2391			goto err;
2392		}
2393		return;
2394	}
2395
2396	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2397	/* There may be an open ep on this connection if this is a syn
2398	 * retrasnmit... need to make sure there isn't...
2399	 */
2400	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2401		goto err;
2402
2403	ep = qed_iwarp_get_free_ep(p_hwfn);
2404	if (!ep)
2405		goto err;
2406
2407	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2408	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2409	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2410
2411	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2412	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2413
2414	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2415
2416	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
2417		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
2418
2419	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
2420		   ts_hdr_size;
2421	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2422	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2423
2424	ep->event_cb = listener->event_cb;
2425	ep->cb_context = listener->cb_context;
2426	ep->connect_mode = TCP_CONNECT_PASSIVE;
2427
2428	ep->syn = buf;
2429	ep->syn_ip_payload_length = (u16)payload_len;
2430	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2431			   tcp_start_offset;
2432
2433	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2434	if (rc) {
2435		qed_iwarp_return_ep(p_hwfn, ep);
2436		goto err;
2437	}
2438
2439	return;
2440err:
2441	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2442}
2443
2444static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2445				     void *cookie, dma_addr_t rx_buf_addr,
2446				     bool b_last_packet)
2447{
2448	struct qed_iwarp_ll2_buff *buffer = cookie;
2449	struct qed_hwfn *p_hwfn = cxt;
2450
2451	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2452			  buffer->data, buffer->data_phys_addr);
2453	kfree(buffer);
2454}
2455
2456static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2457				      void *cookie, dma_addr_t first_frag_addr,
2458				      bool b_last_fragment, bool b_last_packet)
2459{
2460	struct qed_iwarp_ll2_buff *buffer = cookie;
2461	struct qed_iwarp_ll2_buff *piggy;
2462	struct qed_hwfn *p_hwfn = cxt;
2463
2464	if (!buffer)		/* can happen in packed mpa unaligned... */
2465		return;
2466
2467	/* this was originally an rx packet, post it back */
2468	piggy = buffer->piggy_buf;
2469	if (piggy) {
2470		buffer->piggy_buf = NULL;
2471		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2472	}
2473
2474	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2475
2476	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2477		qed_iwarp_process_pending_pkts(p_hwfn);
2478
2479	return;
2480}
2481
2482static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2483				     void *cookie, dma_addr_t first_frag_addr,
2484				     bool b_last_fragment, bool b_last_packet)
2485{
2486	struct qed_iwarp_ll2_buff *buffer = cookie;
2487	struct qed_hwfn *p_hwfn = cxt;
2488
2489	if (!buffer)
2490		return;
2491
2492	if (buffer->piggy_buf) {
2493		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2494				  buffer->piggy_buf->buff_size,
2495				  buffer->piggy_buf->data,
2496				  buffer->piggy_buf->data_phys_addr);
2497
2498		kfree(buffer->piggy_buf);
2499	}
2500
2501	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2502			  buffer->data, buffer->data_phys_addr);
2503
2504	kfree(buffer);
2505}
2506
2507/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2508 * is received, need to reset the FPDU.
2509 */
2510void
2511qed_iwarp_ll2_slowpath(void *cxt,
2512		       u8 connection_handle,
2513		       u32 opaque_data_0, u32 opaque_data_1)
2514{
2515	struct unaligned_opaque_data unalign_data;
2516	struct qed_hwfn *p_hwfn = cxt;
2517	struct qed_iwarp_fpdu *fpdu;
 
2518
2519	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2520			       opaque_data_0, opaque_data_1);
2521
2522	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2523		   unalign_data.cid);
2524
2525	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
 
 
2526	if (fpdu)
2527		memset(fpdu, 0, sizeof(*fpdu));
2528}
2529
2530static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2531{
2532	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2533	int rc = 0;
2534
2535	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2536		rc = qed_ll2_terminate_connection(p_hwfn,
2537						  iwarp_info->ll2_syn_handle);
2538		if (rc)
2539			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2540
2541		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2542		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2543	}
2544
2545	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2546		rc = qed_ll2_terminate_connection(p_hwfn,
2547						  iwarp_info->ll2_ooo_handle);
2548		if (rc)
2549			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2550
2551		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2552		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2553	}
2554
2555	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2556		rc = qed_ll2_terminate_connection(p_hwfn,
2557						  iwarp_info->ll2_mpa_handle);
2558		if (rc)
2559			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2560
2561		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2562		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2563	}
2564
2565	qed_llh_remove_mac_filter(p_hwfn,
2566				  p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
 
2567	return rc;
2568}
2569
2570static int
2571qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2572			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2573{
2574	struct qed_iwarp_ll2_buff *buffer;
2575	int rc = 0;
2576	int i;
2577
2578	for (i = 0; i < num_rx_bufs; i++) {
2579		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2580		if (!buffer) {
2581			rc = -ENOMEM;
2582			break;
2583		}
2584
2585		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2586						  buff_size,
2587						  &buffer->data_phys_addr,
2588						  GFP_KERNEL);
2589		if (!buffer->data) {
2590			kfree(buffer);
2591			rc = -ENOMEM;
2592			break;
2593		}
2594
2595		buffer->buff_size = buff_size;
2596		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2597		if (rc)
2598			/* buffers will be deallocated by qed_ll2 */
2599			break;
2600	}
2601	return rc;
2602}
2603
2604#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2605	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2606		ETH_CACHE_LINE_SIZE)
2607
2608static int
2609qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2610		    struct qed_rdma_start_in_params *params,
2611		    struct qed_ptt *p_ptt)
2612{
2613	struct qed_iwarp_info *iwarp_info;
2614	struct qed_ll2_acquire_data data;
2615	struct qed_ll2_cbs cbs;
2616	u32 mpa_buff_size;
2617	u16 n_ooo_bufs;
2618	int rc = 0;
2619	int i;
2620
2621	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2622	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2623	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2624	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2625
2626	iwarp_info->max_mtu = params->max_mtu;
2627
2628	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2629
2630	rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2631	if (rc)
2632		return rc;
2633
2634	/* Start SYN connection */
2635	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2636	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2637	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2638	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
 
2639	cbs.cookie = p_hwfn;
2640
2641	memset(&data, 0, sizeof(data));
2642	data.input.conn_type = QED_LL2_TYPE_IWARP;
2643	data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
 
 
2644	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2645	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2646	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2647	data.input.tx_tc = PKT_LB_TC;
2648	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2649	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2650	data.cbs = &cbs;
2651
2652	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2653	if (rc) {
2654		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2655		qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2656		return rc;
2657	}
2658
2659	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2660	if (rc) {
2661		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2662		goto err;
2663	}
2664
 
2665	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2666					 QED_IWARP_LL2_SYN_RX_SIZE,
2667					 QED_IWARP_MAX_SYN_PKT_SIZE,
2668					 iwarp_info->ll2_syn_handle);
2669	if (rc)
2670		goto err;
2671
2672	/* Start OOO connection */
2673	data.input.conn_type = QED_LL2_TYPE_OOO;
 
 
2674	data.input.mtu = params->max_mtu;
2675
2676	n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
2677		     iwarp_info->max_mtu;
2678	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2679
2680	data.input.rx_num_desc = n_ooo_bufs;
2681	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2682
2683	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2684	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2685	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2686
2687	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2688	if (rc)
2689		goto err;
2690
2691	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2692	if (rc)
2693		goto err;
2694
2695	/* Start Unaligned MPA connection */
2696	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2697	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2698
2699	memset(&data, 0, sizeof(data));
2700	data.input.conn_type = QED_LL2_TYPE_IWARP;
2701	data.input.mtu = params->max_mtu;
2702	/* FW requires that once a packet arrives OOO, it must have at
2703	 * least 2 rx buffers available on the unaligned connection
2704	 * for handling the case that it is a partial fpdu.
2705	 */
2706	data.input.rx_num_desc = n_ooo_bufs * 2;
2707	data.input.tx_num_desc = data.input.rx_num_desc;
2708	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
 
 
2709	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2710	data.input.secondary_queue = true;
2711	data.cbs = &cbs;
2712
2713	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2714	if (rc)
2715		goto err;
2716
2717	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2718	if (rc)
2719		goto err;
2720
2721	mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2722	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2723					 data.input.rx_num_desc,
2724					 mpa_buff_size,
2725					 iwarp_info->ll2_mpa_handle);
2726	if (rc)
2727		goto err;
2728
2729	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2730					    sizeof(*iwarp_info->partial_fpdus),
2731					    GFP_KERNEL);
2732	if (!iwarp_info->partial_fpdus)
 
2733		goto err;
 
2734
2735	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2736
2737	iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
2738	if (!iwarp_info->mpa_intermediate_buf)
 
2739		goto err;
 
2740
2741	/* The mpa_bufs array serves for pending RX packets received on the
2742	 * mpa ll2 that don't have place on the tx ring and require later
2743	 * processing. We can't fail on allocation of such a struct therefore
2744	 * we allocate enough to take care of all rx packets
2745	 */
2746	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2747				       sizeof(*iwarp_info->mpa_bufs),
2748				       GFP_KERNEL);
2749	if (!iwarp_info->mpa_bufs)
 
2750		goto err;
 
2751
2752	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2753	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2754	for (i = 0; i < data.input.rx_num_desc; i++)
2755		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2756			      &iwarp_info->mpa_buf_list);
2757	return rc;
2758err:
2759	qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2760
2761	return rc;
2762}
2763
2764int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
 
 
 
 
 
 
 
2765		    struct qed_rdma_start_in_params *params)
2766{
 
2767	struct qed_iwarp_info *iwarp_info;
 
2768	u32 rcv_wnd_size;
2769
2770	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2771
2772	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2773	rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
 
 
 
 
2774
2775	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2776	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2777	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2778	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2779	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2780	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2781
2782	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2783
2784	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2785				MPA_RTR_TYPE_ZERO_WRITE |
2786				MPA_RTR_TYPE_ZERO_READ;
2787
2788	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2789	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2790	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2791
2792	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2793				  qed_iwarp_async_event);
2794	qed_ooo_setup(p_hwfn);
2795
2796	return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
2797}
2798
2799int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2800{
2801	int rc;
2802
2803	qed_iwarp_free_prealloc_ep(p_hwfn);
2804	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2805	if (rc)
2806		return rc;
2807
2808	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2809
2810	return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2811}
2812
2813void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2814			   struct qed_iwarp_ep *ep, u8 fw_return_code)
 
2815{
2816	struct qed_iwarp_cm_event_params params;
2817
2818	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2819
2820	params.event = QED_IWARP_EVENT_CLOSE;
2821	params.ep_context = ep;
2822	params.cm_info = &ep->cm_info;
2823	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2824			 0 : -ECONNRESET;
2825
2826	ep->state = QED_IWARP_EP_CLOSED;
 
 
2827	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2828	list_del(&ep->list_entry);
2829	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2830
2831	ep->event_cb(ep->cb_context, &params);
2832}
2833
2834void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2835				  struct qed_iwarp_ep *ep, int fw_ret_code)
 
2836{
2837	struct qed_iwarp_cm_event_params params;
2838	bool event_cb = false;
2839
2840	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2841		   ep->cid, fw_ret_code);
2842
2843	switch (fw_ret_code) {
2844	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2845		params.status = 0;
2846		params.event = QED_IWARP_EVENT_DISCONNECT;
2847		event_cb = true;
2848		break;
2849	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2850		params.status = -ECONNRESET;
2851		params.event = QED_IWARP_EVENT_DISCONNECT;
2852		event_cb = true;
2853		break;
2854	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2855		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2856		event_cb = true;
2857		break;
2858	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2859		params.event = QED_IWARP_EVENT_IRQ_FULL;
2860		event_cb = true;
2861		break;
2862	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2863		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2864		event_cb = true;
2865		break;
2866	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2867		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2868		event_cb = true;
2869		break;
2870	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2871		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2872		event_cb = true;
2873		break;
2874	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2875		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2876		event_cb = true;
2877		break;
2878	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2879		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2880		event_cb = true;
2881		break;
2882	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2883		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2884		event_cb = true;
2885		break;
2886	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2887		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2888		event_cb = true;
2889		break;
2890	default:
2891		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2892			   "Unhandled exception received...fw_ret_code=%d\n",
2893			   fw_ret_code);
2894		break;
2895	}
2896
2897	if (event_cb) {
2898		params.ep_context = ep;
2899		params.cm_info = &ep->cm_info;
2900		ep->event_cb(ep->cb_context, &params);
2901	}
2902}
2903
2904static void
2905qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2906				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2907{
2908	struct qed_iwarp_cm_event_params params;
2909
2910	memset(&params, 0, sizeof(params));
2911	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2912	params.ep_context = ep;
2913	params.cm_info = &ep->cm_info;
2914	ep->state = QED_IWARP_EP_CLOSED;
 
2915
2916	switch (fw_return_code) {
2917	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2918		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2919			   "%s(0x%x) TCP connect got invalid packet\n",
2920			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2921		params.status = -ECONNRESET;
2922		break;
2923	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2924		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2925			   "%s(0x%x) TCP Connection Reset\n",
2926			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2927		params.status = -ECONNRESET;
2928		break;
2929	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2930		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2931			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2932		params.status = -EBUSY;
2933		break;
2934	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2935		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2936			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2937		params.status = -ECONNREFUSED;
2938		break;
2939	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2940		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2941			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2942		params.status = -ECONNRESET;
2943		break;
2944	default:
2945		DP_ERR(p_hwfn,
2946		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
2947		       QED_IWARP_CONNECT_MODE_STRING(ep),
2948		       ep->tcp_cid, fw_return_code);
2949		params.status = -ECONNRESET;
2950		break;
2951	}
2952
2953	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2954		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2955		qed_iwarp_return_ep(p_hwfn, ep);
2956	} else {
2957		ep->event_cb(ep->cb_context, &params);
2958		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2959		list_del(&ep->list_entry);
2960		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2961	}
2962}
2963
2964void
2965qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2966			   struct qed_iwarp_ep *ep, u8 fw_return_code)
2967{
2968	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2969
2970	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2971		/* Done with the SYN packet, post back to ll2 rx */
2972		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
2973
2974		ep->syn = NULL;
2975
2976		/* If connect failed - upper layer doesn't know about it */
2977		if (fw_return_code == RDMA_RETURN_OK)
2978			qed_iwarp_mpa_received(p_hwfn, ep);
2979		else
2980			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2981							   fw_return_code);
2982	} else {
2983		if (fw_return_code == RDMA_RETURN_OK)
2984			qed_iwarp_mpa_offload(p_hwfn, ep);
2985		else
2986			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2987							   fw_return_code);
2988	}
2989}
2990
2991static inline bool
2992qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
2993{
2994	if (!ep || (ep->sig != QED_EP_SIG)) {
2995		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
2996		return false;
2997	}
2998
2999	return true;
3000}
3001
3002static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
3003				 u8 fw_event_code, u16 echo,
3004				 union event_ring_data *data,
3005				 u8 fw_return_code)
3006{
 
3007	struct regpair *fw_handle = &data->rdma_data.async_handle;
3008	struct qed_iwarp_ep *ep = NULL;
 
 
3009	u16 cid;
3010
3011	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3012						       fw_handle->lo);
3013
3014	switch (fw_event_code) {
3015	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3016		/* Async completion after TCP 3-way handshake */
3017		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3018			return -EINVAL;
3019		DP_VERBOSE(p_hwfn,
3020			   QED_MSG_RDMA,
3021			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3022			   ep->tcp_cid, fw_return_code);
3023		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3024		break;
3025	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3026		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3027			return -EINVAL;
3028		DP_VERBOSE(p_hwfn,
3029			   QED_MSG_RDMA,
3030			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3031			   ep->cid, fw_return_code);
3032		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3033		break;
3034	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3035		/* Async completion for Close Connection ramrod */
3036		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3037			return -EINVAL;
3038		DP_VERBOSE(p_hwfn,
3039			   QED_MSG_RDMA,
3040			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3041			   ep->cid, fw_return_code);
3042		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3043		break;
3044	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3045		/* Async event for active side only */
3046		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3047			return -EINVAL;
3048		DP_VERBOSE(p_hwfn,
3049			   QED_MSG_RDMA,
3050			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3051			   ep->cid, fw_return_code);
3052		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3053		break;
3054	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3055		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3056			return -EINVAL;
3057		DP_VERBOSE(p_hwfn,
3058			   QED_MSG_RDMA,
3059			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3060			   ep->cid, fw_return_code);
3061		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3062		break;
3063	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3064		cid = (u16)le32_to_cpu(fw_handle->lo);
3065		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3066			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3067		qed_iwarp_cid_cleaned(p_hwfn, cid);
3068
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3069		break;
3070	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3071		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3072
3073		p_hwfn->p_rdma_info->events.affiliated_event(
3074			p_hwfn->p_rdma_info->events.context,
3075			QED_IWARP_EVENT_CQ_OVERFLOW,
3076			(void *)fw_handle);
3077		break;
3078	default:
3079		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3080		       fw_event_code);
3081		return -EINVAL;
3082	}
3083	return 0;
3084}
3085
3086int
3087qed_iwarp_create_listen(void *rdma_cxt,
3088			struct qed_iwarp_listen_in *iparams,
3089			struct qed_iwarp_listen_out *oparams)
3090{
3091	struct qed_hwfn *p_hwfn = rdma_cxt;
3092	struct qed_iwarp_listener *listener;
3093
3094	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3095	if (!listener)
3096		return -ENOMEM;
3097
3098	listener->ip_version = iparams->ip_version;
3099	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3100	listener->port = iparams->port;
3101	listener->vlan = iparams->vlan;
3102
3103	listener->event_cb = iparams->event_cb;
3104	listener->cb_context = iparams->cb_context;
3105	listener->max_backlog = iparams->max_backlog;
3106	oparams->handle = listener;
3107
3108	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3109	list_add_tail(&listener->list_entry,
3110		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3111	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3112
3113	DP_VERBOSE(p_hwfn,
3114		   QED_MSG_RDMA,
3115		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3116		   listener->event_cb,
3117		   listener,
3118		   listener->ip_addr[0],
3119		   listener->ip_addr[1],
3120		   listener->ip_addr[2],
3121		   listener->ip_addr[3], listener->port, listener->vlan);
3122
3123	return 0;
3124}
3125
3126int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3127{
3128	struct qed_iwarp_listener *listener = handle;
3129	struct qed_hwfn *p_hwfn = rdma_cxt;
3130
3131	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3132
3133	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3134	list_del(&listener->list_entry);
3135	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3136
3137	kfree(listener);
3138
3139	return 0;
3140}
3141
3142int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3143{
3144	struct qed_hwfn *p_hwfn = rdma_cxt;
3145	struct qed_sp_init_data init_data;
3146	struct qed_spq_entry *p_ent;
3147	struct qed_iwarp_ep *ep;
3148	struct qed_rdma_qp *qp;
3149	int rc;
3150
3151	ep = iparams->ep_context;
3152	if (!ep) {
3153		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3154		return -EINVAL;
3155	}
3156
3157	qp = ep->qp;
3158
3159	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3160		   qp->icid, ep->tcp_cid);
3161
3162	memset(&init_data, 0, sizeof(init_data));
3163	init_data.cid = qp->icid;
3164	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3165	init_data.comp_mode = QED_SPQ_MODE_CB;
3166
3167	rc = qed_sp_init_request(p_hwfn, &p_ent,
3168				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3169				 PROTOCOLID_IWARP, &init_data);
3170
3171	if (rc)
3172		return rc;
3173
3174	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3175
3176	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3177
3178	return rc;
3179}
3180
3181void
3182qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3183		   struct qed_rdma_query_qp_out_params *out_params)
3184{
3185	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3186}