Linux Audio

Check our new training course

Loading...
v4.17
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/spinlock.h>
  37#include <linux/tcp.h>
  38#include "qed_cxt.h"
  39#include "qed_hw.h"
  40#include "qed_ll2.h"
  41#include "qed_rdma.h"
  42#include "qed_reg_addr.h"
  43#include "qed_sp.h"
  44#include "qed_ooo.h"
  45
  46#define QED_IWARP_ORD_DEFAULT		32
  47#define QED_IWARP_IRD_DEFAULT		32
  48#define QED_IWARP_MAX_FW_MSS		4120
  49
  50#define QED_EP_SIG 0xecabcdef
  51
  52struct mpa_v2_hdr {
  53	__be16 ird;
  54	__be16 ord;
  55};
  56
  57#define MPA_V2_PEER2PEER_MODEL  0x8000
  58#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  59#define MPA_V2_READ_RTR         0x4000	/* on ord */
  60#define MPA_V2_WRITE_RTR        0x8000
  61#define MPA_V2_IRD_ORD_MASK     0x3FFF
  62
  63#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  64
  65#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  66#define QED_IWARP_RCV_WND_SIZE_DEF	(256 * 1024)
 
 
 
 
 
  67#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  68#define TIMESTAMP_HEADER_SIZE		(12)
  69#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  70
  71#define QED_IWARP_TS_EN			BIT(0)
  72#define QED_IWARP_DA_EN			BIT(1)
  73#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  74#define QED_IWARP_PARAM_P2P		(1)
  75
  76#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  77#define QED_IWARP_DEF_CWND_FACTOR	(4)
  78#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  79#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  80#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  81
  82static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  83				 u8 fw_event_code, u16 echo,
  84				 union event_ring_data *data,
  85				 u8 fw_return_code);
  86
  87/* Override devinfo with iWARP specific values */
  88void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  89{
  90	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  91
  92	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  93	dev->max_qp = min_t(u32,
  94			    IWARP_MAX_QPS,
  95			    p_hwfn->p_rdma_info->num_qps) -
  96		      QED_IWARP_PREALLOC_CNT;
  97
  98	dev->max_cq = dev->max_qp;
  99
 100	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
 101	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
 102}
 103
 104void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 105{
 106	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
 107	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
 108	p_hwfn->b_rdma_enabled_in_prs = true;
 109}
 110
 111/* We have two cid maps, one for tcp which should be used only from passive
 112 * syn processing and replacing a pre-allocated ep in the list. The second
 113 * for active tcp and for QPs.
 114 */
 115static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 116{
 117	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 118
 119	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 120
 121	if (cid < QED_IWARP_PREALLOC_CNT)
 122		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 123				    cid);
 124	else
 125		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 126
 127	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 128}
 129
 130void
 131qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 132			 struct iwarp_init_func_ramrod_data *p_ramrod)
 133{
 134	p_ramrod->iwarp.ll2_ooo_q_index =
 135		RESC_START(p_hwfn, QED_LL2_QUEUE) +
 136		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 137
 138	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 139
 140	return;
 141}
 142
 143static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 144{
 145	int rc;
 146
 147	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 148	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 149	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 150	if (rc) {
 151		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 152		return rc;
 153	}
 154	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 155
 156	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 157	if (rc)
 158		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 159
 160	return rc;
 161}
 162
 163static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 164{
 165	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 166
 167	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 168	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 169	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 170}
 171
 172/* This function allocates a cid for passive tcp (called from syn receive)
 173 * the reason it's separate from the regular cid allocation is because it
 174 * is assured that these cids already have ilt allocated. They are preallocated
 175 * to ensure that we won't need to allocate memory during syn processing
 176 */
 177static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 178{
 179	int rc;
 180
 181	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 182
 183	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 184				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 185
 186	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 187
 188	if (rc) {
 189		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 190			   "can't allocate iwarp tcp cid max-count=%d\n",
 191			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 192
 193		*cid = QED_IWARP_INVALID_TCP_CID;
 194		return rc;
 195	}
 196
 197	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 198					    p_hwfn->p_rdma_info->proto);
 199	return 0;
 200}
 201
 202int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 203			struct qed_rdma_qp *qp,
 204			struct qed_rdma_create_qp_out_params *out_params)
 205{
 206	struct iwarp_create_qp_ramrod_data *p_ramrod;
 207	struct qed_sp_init_data init_data;
 208	struct qed_spq_entry *p_ent;
 209	u16 physical_queue;
 210	u32 cid;
 211	int rc;
 212
 213	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 214					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 215					      &qp->shared_queue_phys_addr,
 216					      GFP_KERNEL);
 217	if (!qp->shared_queue)
 218		return -ENOMEM;
 219
 220	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 221	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 222	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 223	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 224	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 225	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 226	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 227	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 228
 229	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 230	if (rc)
 231		goto err1;
 232
 233	qp->icid = (u16)cid;
 234
 235	memset(&init_data, 0, sizeof(init_data));
 236	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 237	init_data.cid = qp->icid;
 238	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 239
 240	rc = qed_sp_init_request(p_hwfn, &p_ent,
 241				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 242				 PROTOCOLID_IWARP, &init_data);
 243	if (rc)
 244		goto err2;
 245
 246	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 247
 248	SET_FIELD(p_ramrod->flags,
 249		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 250		  qp->fmr_and_reserved_lkey);
 251
 252	SET_FIELD(p_ramrod->flags,
 253		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 254
 255	SET_FIELD(p_ramrod->flags,
 256		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 257		  qp->incoming_rdma_read_en);
 258
 259	SET_FIELD(p_ramrod->flags,
 260		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 261		  qp->incoming_rdma_write_en);
 262
 263	SET_FIELD(p_ramrod->flags,
 264		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 265		  qp->incoming_atomic_en);
 266
 267	SET_FIELD(p_ramrod->flags,
 268		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 269
 270	p_ramrod->pd = qp->pd;
 271	p_ramrod->sq_num_pages = qp->sq_num_pages;
 272	p_ramrod->rq_num_pages = qp->rq_num_pages;
 273
 
 
 274	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 275	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 276
 277	p_ramrod->cq_cid_for_sq =
 278	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 279	p_ramrod->cq_cid_for_rq =
 280	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 281
 282	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 283
 284	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 285	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 286	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 287	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 288
 289	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 290	if (rc)
 291		goto err2;
 292
 293	return rc;
 294
 295err2:
 296	qed_iwarp_cid_cleaned(p_hwfn, cid);
 297err1:
 298	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 299			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 300			  qp->shared_queue, qp->shared_queue_phys_addr);
 301
 302	return rc;
 303}
 304
 305static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 306{
 307	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 308	struct qed_sp_init_data init_data;
 309	struct qed_spq_entry *p_ent;
 310	int rc;
 311
 312	/* Get SPQ entry */
 313	memset(&init_data, 0, sizeof(init_data));
 314	init_data.cid = qp->icid;
 315	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 316	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 317
 318	rc = qed_sp_init_request(p_hwfn, &p_ent,
 319				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 320				 p_hwfn->p_rdma_info->proto, &init_data);
 321	if (rc)
 322		return rc;
 323
 324	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 325	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
 326		  0x1);
 327	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 328		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 329	else
 330		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 331
 332	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 333
 334	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 335
 336	return rc;
 337}
 338
 339enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 340{
 341	switch (state) {
 342	case QED_ROCE_QP_STATE_RESET:
 343	case QED_ROCE_QP_STATE_INIT:
 344	case QED_ROCE_QP_STATE_RTR:
 345		return QED_IWARP_QP_STATE_IDLE;
 346	case QED_ROCE_QP_STATE_RTS:
 347		return QED_IWARP_QP_STATE_RTS;
 348	case QED_ROCE_QP_STATE_SQD:
 349		return QED_IWARP_QP_STATE_CLOSING;
 350	case QED_ROCE_QP_STATE_ERR:
 351		return QED_IWARP_QP_STATE_ERROR;
 352	case QED_ROCE_QP_STATE_SQE:
 353		return QED_IWARP_QP_STATE_TERMINATE;
 354	default:
 355		return QED_IWARP_QP_STATE_ERROR;
 356	}
 357}
 358
 359static enum qed_roce_qp_state
 360qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 361{
 362	switch (state) {
 363	case QED_IWARP_QP_STATE_IDLE:
 364		return QED_ROCE_QP_STATE_INIT;
 365	case QED_IWARP_QP_STATE_RTS:
 366		return QED_ROCE_QP_STATE_RTS;
 367	case QED_IWARP_QP_STATE_TERMINATE:
 368		return QED_ROCE_QP_STATE_SQE;
 369	case QED_IWARP_QP_STATE_CLOSING:
 370		return QED_ROCE_QP_STATE_SQD;
 371	case QED_IWARP_QP_STATE_ERROR:
 372		return QED_ROCE_QP_STATE_ERR;
 373	default:
 374		return QED_ROCE_QP_STATE_ERR;
 375	}
 376}
 377
 378const char *iwarp_state_names[] = {
 379	"IDLE",
 380	"RTS",
 381	"TERMINATE",
 382	"CLOSING",
 383	"ERROR",
 384};
 385
 386int
 387qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 388		    struct qed_rdma_qp *qp,
 389		    enum qed_iwarp_qp_state new_state, bool internal)
 390{
 391	enum qed_iwarp_qp_state prev_iw_state;
 392	bool modify_fw = false;
 393	int rc = 0;
 394
 395	/* modify QP can be called from upper-layer or as a result of async
 396	 * RST/FIN... therefore need to protect
 397	 */
 398	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 399	prev_iw_state = qp->iwarp_state;
 400
 401	if (prev_iw_state == new_state) {
 402		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 403		return 0;
 404	}
 405
 406	switch (prev_iw_state) {
 407	case QED_IWARP_QP_STATE_IDLE:
 408		switch (new_state) {
 409		case QED_IWARP_QP_STATE_RTS:
 410			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 411			break;
 412		case QED_IWARP_QP_STATE_ERROR:
 413			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 414			if (!internal)
 415				modify_fw = true;
 416			break;
 417		default:
 418			break;
 419		}
 420		break;
 421	case QED_IWARP_QP_STATE_RTS:
 422		switch (new_state) {
 423		case QED_IWARP_QP_STATE_CLOSING:
 424			if (!internal)
 425				modify_fw = true;
 426
 427			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 428			break;
 429		case QED_IWARP_QP_STATE_ERROR:
 430			if (!internal)
 431				modify_fw = true;
 432			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 433			break;
 434		default:
 435			break;
 436		}
 437		break;
 438	case QED_IWARP_QP_STATE_ERROR:
 439		switch (new_state) {
 440		case QED_IWARP_QP_STATE_IDLE:
 441
 442			qp->iwarp_state = new_state;
 443			break;
 444		case QED_IWARP_QP_STATE_CLOSING:
 445			/* could happen due to race... do nothing.... */
 446			break;
 447		default:
 448			rc = -EINVAL;
 449		}
 450		break;
 451	case QED_IWARP_QP_STATE_TERMINATE:
 452	case QED_IWARP_QP_STATE_CLOSING:
 453		qp->iwarp_state = new_state;
 454		break;
 455	default:
 456		break;
 457	}
 458
 459	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 460		   qp->icid,
 461		   iwarp_state_names[prev_iw_state],
 462		   iwarp_state_names[qp->iwarp_state],
 463		   internal ? "internal" : "");
 464
 465	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 466
 467	if (modify_fw)
 468		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 469
 470	return rc;
 471}
 472
 473int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 474{
 475	struct qed_sp_init_data init_data;
 476	struct qed_spq_entry *p_ent;
 477	int rc;
 478
 479	/* Get SPQ entry */
 480	memset(&init_data, 0, sizeof(init_data));
 481	init_data.cid = qp->icid;
 482	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 483	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 484
 485	rc = qed_sp_init_request(p_hwfn, &p_ent,
 486				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 487				 p_hwfn->p_rdma_info->proto, &init_data);
 488	if (rc)
 489		return rc;
 490
 491	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 492
 493	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 494
 495	return rc;
 496}
 497
 498static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 499				 struct qed_iwarp_ep *ep,
 500				 bool remove_from_active_list)
 501{
 502	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 503			  sizeof(*ep->ep_buffer_virt),
 504			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 505
 506	if (remove_from_active_list) {
 507		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 508		list_del(&ep->list_entry);
 509		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 510	}
 511
 512	if (ep->qp)
 513		ep->qp->ep = NULL;
 514
 515	kfree(ep);
 516}
 517
 518int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 519{
 520	struct qed_iwarp_ep *ep = qp->ep;
 521	int wait_count = 0;
 522	int rc = 0;
 523
 524	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 525		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 526					 QED_IWARP_QP_STATE_ERROR, false);
 527		if (rc)
 528			return rc;
 529	}
 530
 531	/* Make sure ep is closed before returning and freeing memory. */
 532	if (ep) {
 533		while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
 
 534			msleep(100);
 535
 536		if (ep->state != QED_IWARP_EP_CLOSED)
 537			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 538				  ep->state);
 539
 540		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 541	}
 542
 543	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 544
 545	if (qp->shared_queue)
 546		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 547				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 548				  qp->shared_queue, qp->shared_queue_phys_addr);
 549
 550	return rc;
 551}
 552
 553static int
 554qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 555{
 556	struct qed_iwarp_ep *ep;
 557	int rc;
 558
 559	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 560	if (!ep)
 561		return -ENOMEM;
 562
 563	ep->state = QED_IWARP_EP_INIT;
 564
 565	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 566						sizeof(*ep->ep_buffer_virt),
 567						&ep->ep_buffer_phys,
 568						GFP_KERNEL);
 569	if (!ep->ep_buffer_virt) {
 570		rc = -ENOMEM;
 571		goto err;
 572	}
 573
 574	ep->sig = QED_EP_SIG;
 575
 576	*ep_out = ep;
 577
 578	return 0;
 579
 580err:
 581	kfree(ep);
 582	return rc;
 583}
 584
 585static void
 586qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 587			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 588{
 589	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 590		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 591		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 592		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 593		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 594		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 595		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 596
 597	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 598		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 599			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 600			   p_tcp_ramrod->tcp.local_ip,
 601			   p_tcp_ramrod->tcp.local_port,
 602			   p_tcp_ramrod->tcp.remote_ip,
 603			   p_tcp_ramrod->tcp.remote_port,
 604			   p_tcp_ramrod->tcp.vlan_id);
 605	} else {
 606		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 607			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 608			   p_tcp_ramrod->tcp.local_ip,
 609			   p_tcp_ramrod->tcp.local_port,
 610			   p_tcp_ramrod->tcp.remote_ip,
 611			   p_tcp_ramrod->tcp.remote_port,
 612			   p_tcp_ramrod->tcp.vlan_id);
 613	}
 614
 615	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 616		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 617		   p_tcp_ramrod->tcp.flow_label,
 618		   p_tcp_ramrod->tcp.ttl,
 619		   p_tcp_ramrod->tcp.tos_or_tc,
 620		   p_tcp_ramrod->tcp.mss,
 621		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 622		   p_tcp_ramrod->tcp.connect_mode,
 623		   p_tcp_ramrod->tcp.flags);
 624
 625	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 626		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 627		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 628		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 629}
 630
 631static int
 632qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 633{
 634	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 635	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 636	struct tcp_offload_params_opt2 *tcp;
 637	struct qed_sp_init_data init_data;
 638	struct qed_spq_entry *p_ent;
 639	dma_addr_t async_output_phys;
 640	dma_addr_t in_pdata_phys;
 641	u16 physical_q;
 642	u8 tcp_flags;
 643	int rc;
 644	int i;
 645
 646	memset(&init_data, 0, sizeof(init_data));
 647	init_data.cid = ep->tcp_cid;
 648	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 649	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 650		init_data.comp_mode = QED_SPQ_MODE_CB;
 651	else
 652		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 653
 654	rc = qed_sp_init_request(p_hwfn, &p_ent,
 655				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 656				 PROTOCOLID_IWARP, &init_data);
 657	if (rc)
 658		return rc;
 659
 660	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 661
 662	in_pdata_phys = ep->ep_buffer_phys +
 663			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 664	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 665		       in_pdata_phys);
 666
 667	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 668	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 669
 670	async_output_phys = ep->ep_buffer_phys +
 671			    offsetof(struct qed_iwarp_ep_memory, async_output);
 672	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 673		       async_output_phys);
 674
 675	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 676	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 677
 678	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 679	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 680	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 681	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 682	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 683
 684	tcp = &p_tcp_ramrod->tcp;
 685	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 686			    &tcp->remote_mac_addr_mid,
 687			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 688	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 689			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 690
 691	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 692
 693	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 694	tcp->flags = 0;
 695	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 696		  !!(tcp_flags & QED_IWARP_TS_EN));
 697
 698	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 699		  !!(tcp_flags & QED_IWARP_DA_EN));
 700
 701	tcp->ip_version = ep->cm_info.ip_version;
 702
 703	for (i = 0; i < 4; i++) {
 704		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 705		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 706	}
 707
 708	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 709	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 710	tcp->mss = cpu_to_le16(ep->mss);
 711	tcp->flow_label = 0;
 712	tcp->ttl = 0x40;
 713	tcp->tos_or_tc = 0;
 714
 715	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 716	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
 717	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 718	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
 719	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
 720
 721	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 722	tcp->connect_mode = ep->connect_mode;
 723
 724	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 725		tcp->syn_ip_payload_length =
 726			cpu_to_le16(ep->syn_ip_payload_length);
 727		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 728		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 729	}
 730
 731	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 732
 733	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 734
 735	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 736		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 737
 738	return rc;
 739}
 740
 741static void
 742qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 743{
 744	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 745	struct qed_iwarp_cm_event_params params;
 746	struct mpa_v2_hdr *mpa_v2;
 747	union async_output *async_data;
 748	u16 mpa_ord, mpa_ird;
 749	u8 mpa_hdr_size = 0;
 750	u8 mpa_rev;
 751
 752	async_data = &ep->ep_buffer_virt->async_output;
 753
 754	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 755	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 756		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 757		   async_data->mpa_request.ulp_data_len,
 758		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 759
 760	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 761		/* Read ord/ird values from private data buffer */
 762		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 763		mpa_hdr_size = sizeof(*mpa_v2);
 764
 765		mpa_ord = ntohs(mpa_v2->ord);
 766		mpa_ird = ntohs(mpa_v2->ird);
 767
 768		/* Temprary store in cm_info incoming ord/ird requested, later
 769		 * replace with negotiated value during accept
 770		 */
 771		ep->cm_info.ord = (u8)min_t(u16,
 772					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 773					    QED_IWARP_ORD_DEFAULT);
 774
 775		ep->cm_info.ird = (u8)min_t(u16,
 776					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 777					    QED_IWARP_IRD_DEFAULT);
 778
 779		/* Peer2Peer negotiation */
 780		ep->rtr_type = MPA_RTR_TYPE_NONE;
 781		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 782			if (mpa_ord & MPA_V2_WRITE_RTR)
 783				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 784
 785			if (mpa_ord & MPA_V2_READ_RTR)
 786				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 787
 788			if (mpa_ird & MPA_V2_SEND_RTR)
 789				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 790
 791			ep->rtr_type &= iwarp_info->rtr_type;
 792
 793			/* if we're left with no match send our capabilities */
 794			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 795				ep->rtr_type = iwarp_info->rtr_type;
 796		}
 797
 798		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 799	} else {
 800		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 801		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 802		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 803	}
 804
 805	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 806		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 807		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 808		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 809
 810	/* Strip mpa v2 hdr from private data before sending to upper layer */
 811	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 812
 813	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
 814				       mpa_hdr_size;
 815
 816	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 817	params.cm_info = &ep->cm_info;
 818	params.ep_context = ep;
 819	params.status = 0;
 820
 821	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 822	ep->event_cb(ep->cb_context, &params);
 823}
 824
 825static int
 826qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 827{
 828	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 829	struct qed_iwarp_info *iwarp_info;
 830	struct qed_sp_init_data init_data;
 831	dma_addr_t async_output_phys;
 832	struct qed_spq_entry *p_ent;
 833	dma_addr_t out_pdata_phys;
 834	dma_addr_t in_pdata_phys;
 835	struct qed_rdma_qp *qp;
 836	bool reject;
 837	int rc;
 838
 839	if (!ep)
 840		return -EINVAL;
 841
 842	qp = ep->qp;
 843	reject = !qp;
 844
 845	memset(&init_data, 0, sizeof(init_data));
 846	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 847	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 848
 849	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 850		init_data.comp_mode = QED_SPQ_MODE_CB;
 851	else
 852		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 853
 854	rc = qed_sp_init_request(p_hwfn, &p_ent,
 855				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 856				 PROTOCOLID_IWARP, &init_data);
 857	if (rc)
 858		return rc;
 859
 860	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 861	out_pdata_phys = ep->ep_buffer_phys +
 862			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 863	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
 864		       out_pdata_phys);
 865	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
 866	    ep->cm_info.private_data_len;
 867	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 868
 869	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
 870	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
 871
 872	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 873
 874	in_pdata_phys = ep->ep_buffer_phys +
 875			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 876	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 877	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 878		       in_pdata_phys);
 879	p_mpa_ramrod->incoming_ulp_buffer.len =
 880	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 881	async_output_phys = ep->ep_buffer_phys +
 882			    offsetof(struct qed_iwarp_ep_memory, async_output);
 883	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 884		       async_output_phys);
 885	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 886	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 887
 888	if (!reject) {
 889		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 890			       qp->shared_queue_phys_addr);
 891		p_mpa_ramrod->stats_counter_id =
 892		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 893	} else {
 894		p_mpa_ramrod->common.reject = 1;
 895	}
 896
 897	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 898	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 899	p_mpa_ramrod->mode = ep->mpa_rev;
 900	SET_FIELD(p_mpa_ramrod->rtr_pref,
 901		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 902
 903	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 904	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 905	if (!reject)
 906		ep->cid = qp->icid;	/* Now they're migrated. */
 907
 908	DP_VERBOSE(p_hwfn,
 909		   QED_MSG_RDMA,
 910		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 911		   reject ? 0xffff : qp->icid,
 912		   ep->tcp_cid,
 913		   rc,
 914		   ep->cm_info.ird,
 915		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 916	return rc;
 917}
 918
 919static void
 920qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 921{
 922	ep->state = QED_IWARP_EP_INIT;
 923	if (ep->qp)
 924		ep->qp->ep = NULL;
 925	ep->qp = NULL;
 926	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 927
 928	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 929		/* We don't care about the return code, it's ok if tcp_cid
 930		 * remains invalid...in this case we'll defer allocation
 931		 */
 932		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 933	}
 934	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 935
 936	list_del(&ep->list_entry);
 937	list_add_tail(&ep->list_entry,
 938		      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 939
 940	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 941}
 942
 943void
 944qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 945{
 946	struct mpa_v2_hdr *mpa_v2_params;
 947	union async_output *async_data;
 948	u16 mpa_ird, mpa_ord;
 949	u8 mpa_data_size = 0;
 950
 951	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 952		mpa_v2_params =
 953			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 954		mpa_data_size = sizeof(*mpa_v2_params);
 955		mpa_ird = ntohs(mpa_v2_params->ird);
 956		mpa_ord = ntohs(mpa_v2_params->ord);
 957
 958		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 959		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 960	}
 961	async_data = &ep->ep_buffer_virt->async_output;
 962
 963	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 964	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
 965				       mpa_data_size;
 966}
 967
 968void
 969qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 970{
 971	struct qed_iwarp_cm_event_params params;
 972
 973	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 974		DP_NOTICE(p_hwfn,
 975			  "MPA reply event not expected on passive side!\n");
 976		return;
 977	}
 978
 979	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 980
 981	qed_iwarp_parse_private_data(p_hwfn, ep);
 982
 983	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 984		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 985		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 986
 987	params.cm_info = &ep->cm_info;
 988	params.ep_context = ep;
 989	params.status = 0;
 990
 991	ep->mpa_reply_processed = true;
 992
 993	ep->event_cb(ep->cb_context, &params);
 994}
 995
 996#define QED_IWARP_CONNECT_MODE_STRING(ep) \
 997	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
 998
 999/* Called as a result of the event:
1000 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1001 */
1002static void
1003qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1004		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1005{
1006	struct qed_iwarp_cm_event_params params;
1007
1008	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1009		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1010	else
1011		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1012
1013	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1014		qed_iwarp_parse_private_data(p_hwfn, ep);
1015
1016	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1017		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1018		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1019
1020	params.cm_info = &ep->cm_info;
1021
1022	params.ep_context = ep;
1023
1024	ep->state = QED_IWARP_EP_CLOSED;
1025
1026	switch (fw_return_code) {
1027	case RDMA_RETURN_OK:
1028		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1029		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1030		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1031		ep->state = QED_IWARP_EP_ESTABLISHED;
1032		params.status = 0;
1033		break;
1034	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1035		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1036			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1037		params.status = -EBUSY;
1038		break;
1039	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1040		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1041			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042		params.status = -ECONNREFUSED;
1043		break;
1044	case IWARP_CONN_ERROR_MPA_RST:
1045		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1046			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1047			  ep->tcp_cid);
1048		params.status = -ECONNRESET;
1049		break;
1050	case IWARP_CONN_ERROR_MPA_FIN:
1051		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1052			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1053		params.status = -ECONNREFUSED;
1054		break;
1055	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1056		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1057			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058		params.status = -ECONNREFUSED;
1059		break;
1060	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1061		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1062			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063		params.status = -ECONNREFUSED;
1064		break;
1065	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1066		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1067			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068		params.status = -ECONNREFUSED;
1069		break;
1070	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1071		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1072			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073		params.status = -ECONNREFUSED;
1074		break;
1075	case IWARP_CONN_ERROR_MPA_TERMINATE:
1076		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1077			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078		params.status = -ECONNREFUSED;
1079		break;
1080	default:
1081		params.status = -ECONNRESET;
1082		break;
1083	}
1084
 
 
 
 
1085	ep->event_cb(ep->cb_context, &params);
1086
1087	/* on passive side, if there is no associated QP (REJECT) we need to
1088	 * return the ep to the pool, (in the regular case we add an element
1089	 * in accept instead of this one.
1090	 * In both cases we need to remove it from the ep_list.
1091	 */
1092	if (fw_return_code != RDMA_RETURN_OK) {
1093		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1094		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1095		    (!ep->qp)) {	/* Rejected */
1096			qed_iwarp_return_ep(p_hwfn, ep);
1097		} else {
1098			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1099			list_del(&ep->list_entry);
1100			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1101		}
1102	}
1103}
1104
1105static void
1106qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1107			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1108{
1109	struct mpa_v2_hdr *mpa_v2_params;
1110	u16 mpa_ird, mpa_ord;
1111
1112	*mpa_data_size = 0;
1113	if (MPA_REV2(ep->mpa_rev)) {
1114		mpa_v2_params =
1115		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1116		*mpa_data_size = sizeof(*mpa_v2_params);
1117
1118		mpa_ird = (u16)ep->cm_info.ird;
1119		mpa_ord = (u16)ep->cm_info.ord;
1120
1121		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1122			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1123
1124			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1125				mpa_ird |= MPA_V2_SEND_RTR;
1126
1127			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1128				mpa_ord |= MPA_V2_WRITE_RTR;
1129
1130			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1131				mpa_ord |= MPA_V2_READ_RTR;
1132		}
1133
1134		mpa_v2_params->ird = htons(mpa_ird);
1135		mpa_v2_params->ord = htons(mpa_ord);
1136
1137		DP_VERBOSE(p_hwfn,
1138			   QED_MSG_RDMA,
1139			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1140			   mpa_v2_params->ird,
1141			   mpa_v2_params->ord,
1142			   *((u32 *)mpa_v2_params),
1143			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1144			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1145			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1146			   !!(mpa_ird & MPA_V2_SEND_RTR),
1147			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1148			   !!(mpa_ord & MPA_V2_READ_RTR));
1149	}
1150}
1151
1152int qed_iwarp_connect(void *rdma_cxt,
1153		      struct qed_iwarp_connect_in *iparams,
1154		      struct qed_iwarp_connect_out *oparams)
1155{
1156	struct qed_hwfn *p_hwfn = rdma_cxt;
1157	struct qed_iwarp_info *iwarp_info;
1158	struct qed_iwarp_ep *ep;
1159	u8 mpa_data_size = 0;
1160	u8 ts_hdr_size = 0;
1161	u32 cid;
1162	int rc;
1163
1164	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1165	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1166		DP_NOTICE(p_hwfn,
1167			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1168			  iparams->qp->icid, iparams->cm_info.ord,
1169			  iparams->cm_info.ird);
1170
1171		return -EINVAL;
1172	}
1173
1174	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1175
1176	/* Allocate ep object */
1177	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1178	if (rc)
1179		return rc;
1180
1181	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1182	if (rc)
1183		goto err;
1184
1185	ep->tcp_cid = cid;
1186
1187	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1188	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1189	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1190
1191	ep->qp = iparams->qp;
1192	ep->qp->ep = ep;
1193	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1194	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1195	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1196
1197	ep->cm_info.ord = iparams->cm_info.ord;
1198	ep->cm_info.ird = iparams->cm_info.ird;
1199
1200	ep->rtr_type = iwarp_info->rtr_type;
1201	if (!iwarp_info->peer2peer)
1202		ep->rtr_type = MPA_RTR_TYPE_NONE;
1203
1204	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1205		ep->cm_info.ord = 1;
1206
1207	ep->mpa_rev = iwarp_info->mpa_rev;
1208
1209	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1210
1211	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1212	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1213				       mpa_data_size;
1214
1215	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1216	       iparams->cm_info.private_data,
1217	       iparams->cm_info.private_data_len);
1218
1219	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
1220		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
1221
1222	ep->mss = iparams->mss - ts_hdr_size;
1223	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1224
1225	ep->event_cb = iparams->event_cb;
1226	ep->cb_context = iparams->cb_context;
1227	ep->connect_mode = TCP_CONNECT_ACTIVE;
1228
1229	oparams->ep_context = ep;
1230
1231	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1232
1233	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1234		   iparams->qp->icid, ep->tcp_cid, rc);
1235
1236	if (rc) {
1237		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1238		goto err;
1239	}
1240
1241	return rc;
1242err:
1243	qed_iwarp_cid_cleaned(p_hwfn, cid);
1244
1245	return rc;
1246}
1247
1248static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1249{
1250	struct qed_iwarp_ep *ep = NULL;
1251	int rc;
1252
1253	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1254
1255	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1256		DP_ERR(p_hwfn, "Ep list is empty\n");
1257		goto out;
1258	}
1259
1260	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1261			      struct qed_iwarp_ep, list_entry);
1262
1263	/* in some cases we could have failed allocating a tcp cid when added
1264	 * from accept / failure... retry now..this is not the common case.
1265	 */
1266	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1267		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1268
1269		/* if we fail we could look for another entry with a valid
1270		 * tcp_cid, but since we don't expect to reach this anyway
1271		 * it's not worth the handling
1272		 */
1273		if (rc) {
1274			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1275			ep = NULL;
1276			goto out;
1277		}
1278	}
1279
1280	list_del(&ep->list_entry);
1281
1282out:
1283	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1284	return ep;
1285}
1286
1287#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1288#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1289
1290/* This function waits for all the bits of a bmap to be cleared, as long as
1291 * there is progress ( i.e. the number of bits left to be cleared decreases )
1292 * the function continues.
1293 */
1294static int
1295qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1296{
1297	int prev_weight = 0;
1298	int wait_count = 0;
1299	int weight = 0;
1300
1301	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1302	prev_weight = weight;
1303
1304	while (weight) {
1305		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1306
1307		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1308
1309		if (prev_weight == weight) {
1310			wait_count++;
1311		} else {
1312			prev_weight = weight;
1313			wait_count = 0;
1314		}
1315
1316		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1317			DP_NOTICE(p_hwfn,
1318				  "%s bitmap wait timed out (%d cids pending)\n",
1319				  bmap->name, weight);
1320			return -EBUSY;
1321		}
1322	}
1323	return 0;
1324}
1325
1326static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1327{
1328	int rc;
1329	int i;
1330
1331	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1332					    &p_hwfn->p_rdma_info->tcp_cid_map);
1333	if (rc)
1334		return rc;
1335
1336	/* Now free the tcp cids from the main cid map */
1337	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1338		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1339
1340	/* Now wait for all cids to be completed */
1341	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1342					      &p_hwfn->p_rdma_info->cid_map);
1343}
1344
1345static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1346{
1347	struct qed_iwarp_ep *ep;
1348
1349	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1350		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1351
1352		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1353				      struct qed_iwarp_ep, list_entry);
1354
1355		if (!ep) {
1356			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1357			break;
1358		}
1359		list_del(&ep->list_entry);
1360
1361		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362
1363		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1364			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1365
1366		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1367	}
1368}
1369
1370static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1371{
1372	struct qed_iwarp_ep *ep;
1373	int rc = 0;
1374	int count;
1375	u32 cid;
1376	int i;
1377
1378	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1379	for (i = 0; i < count; i++) {
1380		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1381		if (rc)
1382			return rc;
1383
1384		/* During initialization we allocate from the main pool,
1385		 * afterwards we allocate only from the tcp_cid.
1386		 */
1387		if (init) {
1388			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1389			if (rc)
1390				goto err;
1391			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1392		} else {
1393			/* We don't care about the return code, it's ok if
1394			 * tcp_cid remains invalid...in this case we'll
1395			 * defer allocation
1396			 */
1397			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1398		}
1399
1400		ep->tcp_cid = cid;
1401
1402		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1403		list_add_tail(&ep->list_entry,
1404			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1405		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1406	}
1407
1408	return rc;
1409
1410err:
1411	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1412
1413	return rc;
1414}
1415
1416int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1417{
1418	int rc;
1419
1420	/* Allocate bitmap for tcp cid. These are used by passive side
1421	 * to ensure it can allocate a tcp cid during dpc that was
1422	 * pre-acquired and doesn't require dynamic allocation of ilt
1423	 */
1424	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1425				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1426	if (rc) {
1427		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1428			   "Failed to allocate tcp cid, rc = %d\n", rc);
1429		return rc;
1430	}
1431
1432	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1433	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1434
1435	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1436	if (rc)
1437		return rc;
1438
1439	return qed_ooo_alloc(p_hwfn);
1440}
1441
1442void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1443{
1444	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1445
1446	qed_ooo_free(p_hwfn);
1447	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1448	kfree(iwarp_info->mpa_bufs);
1449	kfree(iwarp_info->partial_fpdus);
1450	kfree(iwarp_info->mpa_intermediate_buf);
1451}
1452
1453int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1454{
1455	struct qed_hwfn *p_hwfn = rdma_cxt;
1456	struct qed_iwarp_ep *ep;
1457	u8 mpa_data_size = 0;
1458	int rc;
1459
1460	ep = iparams->ep_context;
1461	if (!ep) {
1462		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1463		return -EINVAL;
1464	}
1465
1466	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1467		   iparams->qp->icid, ep->tcp_cid);
1468
1469	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1470	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1471		DP_VERBOSE(p_hwfn,
1472			   QED_MSG_RDMA,
1473			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1474			   iparams->qp->icid,
1475			   ep->tcp_cid, iparams->ord, iparams->ord);
1476		return -EINVAL;
1477	}
1478
1479	qed_iwarp_prealloc_ep(p_hwfn, false);
1480
1481	ep->cb_context = iparams->cb_context;
1482	ep->qp = iparams->qp;
1483	ep->qp->ep = ep;
1484
1485	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1486		/* Negotiate ord/ird: if upperlayer requested ord larger than
1487		 * ird advertised by remote, we need to decrease our ord
1488		 */
1489		if (iparams->ord > ep->cm_info.ird)
1490			iparams->ord = ep->cm_info.ird;
1491
1492		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1493		    (iparams->ird == 0))
1494			iparams->ird = 1;
1495	}
1496
1497	/* Update cm_info ord/ird to be negotiated values */
1498	ep->cm_info.ord = iparams->ord;
1499	ep->cm_info.ird = iparams->ird;
1500
1501	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1502
1503	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1504	ep->cm_info.private_data_len = iparams->private_data_len +
1505				       mpa_data_size;
1506
1507	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1508	       iparams->private_data, iparams->private_data_len);
1509
1510	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1511	if (rc)
1512		qed_iwarp_modify_qp(p_hwfn,
1513				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1514
1515	return rc;
1516}
1517
1518int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1519{
1520	struct qed_hwfn *p_hwfn = rdma_cxt;
1521	struct qed_iwarp_ep *ep;
1522	u8 mpa_data_size = 0;
1523
1524	ep = iparams->ep_context;
1525	if (!ep) {
1526		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1527		return -EINVAL;
1528	}
1529
1530	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1531
1532	ep->cb_context = iparams->cb_context;
1533	ep->qp = NULL;
1534
1535	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1536
1537	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1538	ep->cm_info.private_data_len = iparams->private_data_len +
1539				       mpa_data_size;
1540
1541	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1542	       iparams->private_data, iparams->private_data_len);
1543
1544	return qed_iwarp_mpa_offload(p_hwfn, ep);
1545}
1546
1547static void
1548qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1549			struct qed_iwarp_cm_info *cm_info)
1550{
1551	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1552		   cm_info->ip_version);
1553
1554	if (cm_info->ip_version == QED_TCP_IPV4)
1555		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1556			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1557			   cm_info->remote_ip, cm_info->remote_port,
1558			   cm_info->local_ip, cm_info->local_port,
1559			   cm_info->vlan);
1560	else
1561		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1562			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1563			   cm_info->remote_ip, cm_info->remote_port,
1564			   cm_info->local_ip, cm_info->local_port,
1565			   cm_info->vlan);
1566
1567	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1568		   "private_data_len = %x ord = %d, ird = %d\n",
1569		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1570}
1571
1572static int
1573qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1574		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1575{
1576	int rc;
1577
1578	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1579				    (u16)buf->buff_size, buf, 1);
1580	if (rc) {
1581		DP_NOTICE(p_hwfn,
1582			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1583			  rc, handle);
1584		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1585				  buf->data, buf->data_phys_addr);
1586		kfree(buf);
1587	}
1588
1589	return rc;
1590}
1591
1592static bool
1593qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1594{
1595	struct qed_iwarp_ep *ep = NULL;
1596	bool found = false;
1597
1598	list_for_each_entry(ep,
1599			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1600			    list_entry) {
1601		if ((ep->cm_info.local_port == cm_info->local_port) &&
1602		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1603		    (ep->cm_info.vlan == cm_info->vlan) &&
1604		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1605			    sizeof(cm_info->local_ip)) &&
1606		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1607			    sizeof(cm_info->remote_ip))) {
1608			found = true;
1609			break;
1610		}
1611	}
1612
1613	if (found) {
1614		DP_NOTICE(p_hwfn,
1615			  "SYN received on active connection - dropping\n");
1616		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1617
1618		return true;
1619	}
1620
1621	return false;
1622}
1623
1624static struct qed_iwarp_listener *
1625qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1626		       struct qed_iwarp_cm_info *cm_info)
1627{
1628	struct qed_iwarp_listener *listener = NULL;
1629	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1630	bool found = false;
1631
1632	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1633
1634	list_for_each_entry(listener,
1635			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1636			    list_entry) {
1637		if (listener->port == cm_info->local_port) {
1638			if (!memcmp(listener->ip_addr,
1639				    ip_zero, sizeof(ip_zero))) {
1640				found = true;
1641				break;
1642			}
1643
1644			if (!memcmp(listener->ip_addr,
1645				    cm_info->local_ip,
1646				    sizeof(cm_info->local_ip)) &&
1647			    (listener->vlan == cm_info->vlan)) {
1648				found = true;
1649				break;
1650			}
1651		}
1652	}
1653
1654	if (found) {
1655		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1656			   listener);
1657		return listener;
1658	}
1659
1660	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1661	return NULL;
1662}
1663
1664static int
1665qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1666		       struct qed_iwarp_cm_info *cm_info,
1667		       void *buf,
1668		       u8 *remote_mac_addr,
1669		       u8 *local_mac_addr,
1670		       int *payload_len, int *tcp_start_offset)
1671{
1672	struct vlan_ethhdr *vethh;
1673	bool vlan_valid = false;
1674	struct ipv6hdr *ip6h;
1675	struct ethhdr *ethh;
1676	struct tcphdr *tcph;
1677	struct iphdr *iph;
1678	int eth_hlen;
1679	int ip_hlen;
1680	int eth_type;
1681	int i;
1682
1683	ethh = buf;
1684	eth_type = ntohs(ethh->h_proto);
1685	if (eth_type == ETH_P_8021Q) {
1686		vlan_valid = true;
1687		vethh = (struct vlan_ethhdr *)ethh;
1688		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1689		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1690	}
1691
1692	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1693
 
 
 
 
 
 
 
 
 
1694	ether_addr_copy(remote_mac_addr, ethh->h_source);
1695	ether_addr_copy(local_mac_addr, ethh->h_dest);
1696
1697	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1698		   eth_type, ethh->h_source);
1699
1700	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1701		   eth_hlen, ethh->h_dest);
1702
1703	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704
1705	if (eth_type == ETH_P_IP) {
1706		if (iph->protocol != IPPROTO_TCP) {
1707			DP_NOTICE(p_hwfn,
1708				  "Unexpected ip protocol on ll2 %x\n",
1709				  iph->protocol);
1710			return -EINVAL;
1711		}
1712
1713		cm_info->local_ip[0] = ntohl(iph->daddr);
1714		cm_info->remote_ip[0] = ntohl(iph->saddr);
1715		cm_info->ip_version = TCP_IPV4;
1716
1717		ip_hlen = (iph->ihl) * sizeof(u32);
1718		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1719	} else if (eth_type == ETH_P_IPV6) {
1720		ip6h = (struct ipv6hdr *)iph;
1721
1722		if (ip6h->nexthdr != IPPROTO_TCP) {
1723			DP_NOTICE(p_hwfn,
1724				  "Unexpected ip protocol on ll2 %x\n",
1725				  iph->protocol);
1726			return -EINVAL;
1727		}
1728
1729		for (i = 0; i < 4; i++) {
1730			cm_info->local_ip[i] =
1731			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1732			cm_info->remote_ip[i] =
1733			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1734		}
1735		cm_info->ip_version = TCP_IPV6;
1736
1737		ip_hlen = sizeof(*ip6h);
1738		*payload_len = ntohs(ip6h->payload_len);
1739	} else {
1740		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1741		return -EINVAL;
1742	}
1743
1744	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1745
1746	if (!tcph->syn) {
1747		DP_NOTICE(p_hwfn,
1748			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1749			  iph->ihl, tcph->source, tcph->dest);
1750		return -EINVAL;
1751	}
1752
1753	cm_info->local_port = ntohs(tcph->dest);
1754	cm_info->remote_port = ntohs(tcph->source);
1755
1756	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1757
1758	*tcp_start_offset = eth_hlen + ip_hlen;
1759
1760	return 0;
1761}
1762
1763static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1764						      u16 cid)
1765{
1766	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1767	struct qed_iwarp_fpdu *partial_fpdu;
1768	u32 idx;
1769
1770	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1771	if (idx >= iwarp_info->max_num_partial_fpdus) {
1772		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1773		       iwarp_info->max_num_partial_fpdus);
1774		return NULL;
1775	}
1776
1777	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1778
1779	return partial_fpdu;
1780}
1781
1782enum qed_iwarp_mpa_pkt_type {
1783	QED_IWARP_MPA_PKT_PACKED,
1784	QED_IWARP_MPA_PKT_PARTIAL,
1785	QED_IWARP_MPA_PKT_UNALIGNED
1786};
1787
1788#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1789#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1790#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1791
1792/* Pad to multiple of 4 */
1793#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1794#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1795	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1796					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1797					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1798
1799/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1800#define QED_IWARP_MAX_BDS_PER_FPDU 3
1801
1802static const char * const pkt_type_str[] = {
1803	"QED_IWARP_MPA_PKT_PACKED",
1804	"QED_IWARP_MPA_PKT_PARTIAL",
1805	"QED_IWARP_MPA_PKT_UNALIGNED"
1806};
1807
1808static int
1809qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1810		      struct qed_iwarp_fpdu *fpdu,
1811		      struct qed_iwarp_ll2_buff *buf);
1812
1813static enum qed_iwarp_mpa_pkt_type
1814qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1815		       struct qed_iwarp_fpdu *fpdu,
1816		       u16 tcp_payload_len, u8 *mpa_data)
1817{
1818	enum qed_iwarp_mpa_pkt_type pkt_type;
1819	u16 mpa_len;
1820
1821	if (fpdu->incomplete_bytes) {
1822		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1823		goto out;
1824	}
1825
1826	/* special case of one byte remaining...
1827	 * lower byte will be read next packet
1828	 */
1829	if (tcp_payload_len == 1) {
1830		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1831		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1832		goto out;
1833	}
1834
1835	mpa_len = ntohs(*((u16 *)(mpa_data)));
1836	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1837
1838	if (fpdu->fpdu_length <= tcp_payload_len)
1839		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1840	else
1841		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1842
1843out:
1844	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1845		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1846		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1847
1848	return pkt_type;
1849}
1850
1851static void
1852qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1853		    struct qed_iwarp_fpdu *fpdu,
1854		    struct unaligned_opaque_data *pkt_data,
1855		    u16 tcp_payload_size, u8 placement_offset)
1856{
1857	fpdu->mpa_buf = buf;
1858	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1859	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1860	fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1861	fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1862
1863	if (tcp_payload_size == 1)
1864		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1865	else if (tcp_payload_size < fpdu->fpdu_length)
1866		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1867	else
1868		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1869
1870	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1871}
1872
1873static int
1874qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1875		 struct qed_iwarp_fpdu *fpdu,
1876		 struct unaligned_opaque_data *pkt_data,
1877		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1878{
1879	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1880	int rc;
1881
1882	/* need to copy the data from the partial packet stored in fpdu
1883	 * to the new buf, for this we also need to move the data currently
1884	 * placed on the buf. The assumption is that the buffer is big enough
1885	 * since fpdu_length <= mss, we use an intermediate buffer since
1886	 * we may need to copy the new data to an overlapping location
1887	 */
1888	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1889		DP_ERR(p_hwfn,
1890		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1891		       buf->buff_size, fpdu->mpa_frag_len,
1892		       tcp_payload_size, fpdu->incomplete_bytes);
1893		return -EINVAL;
1894	}
1895
1896	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1897		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1898		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1899		   (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1900		   tcp_payload_size);
1901
1902	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1903	memcpy(tmp_buf + fpdu->mpa_frag_len,
1904	       (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1905	       tcp_payload_size);
1906
1907	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1908	if (rc)
1909		return rc;
1910
1911	/* If we managed to post the buffer copy the data to the new buffer
1912	 * o/w this will occur in the next round...
1913	 */
1914	memcpy((u8 *)(buf->data), tmp_buf,
1915	       fpdu->mpa_frag_len + tcp_payload_size);
1916
1917	fpdu->mpa_buf = buf;
1918	/* fpdu->pkt_hdr remains as is */
1919	/* fpdu->mpa_frag is overridden with new buf */
1920	fpdu->mpa_frag = buf->data_phys_addr;
1921	fpdu->mpa_frag_virt = buf->data;
1922	fpdu->mpa_frag_len += tcp_payload_size;
1923
1924	fpdu->incomplete_bytes -= tcp_payload_size;
1925
1926	DP_VERBOSE(p_hwfn,
1927		   QED_MSG_RDMA,
1928		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1929		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1930		   fpdu->incomplete_bytes);
1931
1932	return 0;
1933}
1934
1935static void
1936qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1937			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1938{
1939	u16 mpa_len;
1940
1941	/* Update incomplete packets if needed */
1942	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1943		/* Missing lower byte is now available */
1944		mpa_len = fpdu->fpdu_length | *mpa_data;
1945		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1946		/* one byte of hdr */
1947		fpdu->mpa_frag_len = 1;
1948		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1949		DP_VERBOSE(p_hwfn,
1950			   QED_MSG_RDMA,
1951			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1952			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1953	}
1954}
1955
1956#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1957	(GET_FIELD((_curr_pkt)->flags,	   \
1958		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1959
1960/* This function is used to recycle a buffer using the ll2 drop option. It
1961 * uses the mechanism to ensure that all buffers posted to tx before this one
1962 * were completed. The buffer sent here will be sent as a cookie in the tx
1963 * completion function and can then be reposted to rx chain when done. The flow
1964 * that requires this is the flow where a FPDU splits over more than 3 tcp
1965 * segments. In this case the driver needs to re-post a rx buffer instead of
1966 * the one received, but driver can't simply repost a buffer it copied from
1967 * as there is a case where the buffer was originally a packed FPDU, and is
1968 * partially posted to FW. Driver needs to ensure FW is done with it.
1969 */
1970static int
1971qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1972		      struct qed_iwarp_fpdu *fpdu,
1973		      struct qed_iwarp_ll2_buff *buf)
1974{
1975	struct qed_ll2_tx_pkt_info tx_pkt;
1976	u8 ll2_handle;
1977	int rc;
1978
1979	memset(&tx_pkt, 0, sizeof(tx_pkt));
1980	tx_pkt.num_of_bds = 1;
1981	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1982	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1983	tx_pkt.first_frag = fpdu->pkt_hdr;
1984	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1985	buf->piggy_buf = NULL;
1986	tx_pkt.cookie = buf;
1987
1988	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
1989
1990	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
1991	if (rc)
1992		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1993			   "Can't drop packet rc=%d\n", rc);
1994
1995	DP_VERBOSE(p_hwfn,
1996		   QED_MSG_RDMA,
1997		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
1998		   (unsigned long int)tx_pkt.first_frag,
1999		   tx_pkt.first_frag_len, buf, rc);
2000
2001	return rc;
2002}
2003
2004static int
2005qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2006{
2007	struct qed_ll2_tx_pkt_info tx_pkt;
2008	u8 ll2_handle;
2009	int rc;
2010
2011	memset(&tx_pkt, 0, sizeof(tx_pkt));
2012	tx_pkt.num_of_bds = 1;
2013	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2014	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2015
2016	tx_pkt.first_frag = fpdu->pkt_hdr;
2017	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2018	tx_pkt.enable_ip_cksum = true;
2019	tx_pkt.enable_l4_cksum = true;
2020	tx_pkt.calc_ip_len = true;
2021	/* vlan overload with enum iwarp_ll2_tx_queues */
2022	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2023
2024	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2025
2026	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2027	if (rc)
2028		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2029			   "Can't send right edge rc=%d\n", rc);
2030	DP_VERBOSE(p_hwfn,
2031		   QED_MSG_RDMA,
2032		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2033		   tx_pkt.num_of_bds,
2034		   (unsigned long int)tx_pkt.first_frag,
2035		   tx_pkt.first_frag_len, rc);
2036
2037	return rc;
2038}
2039
2040static int
2041qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2042		    struct qed_iwarp_fpdu *fpdu,
2043		    struct unaligned_opaque_data *curr_pkt,
2044		    struct qed_iwarp_ll2_buff *buf,
2045		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2046{
2047	struct qed_ll2_tx_pkt_info tx_pkt;
2048	u8 ll2_handle;
2049	int rc;
2050
2051	memset(&tx_pkt, 0, sizeof(tx_pkt));
2052
2053	/* An unaligned packet means it's split over two tcp segments. So the
2054	 * complete packet requires 3 bds, one for the header, one for the
2055	 * part of the fpdu of the first tcp segment, and the last fragment
2056	 * will point to the remainder of the fpdu. A packed pdu, requires only
2057	 * two bds, one for the header and one for the data.
2058	 */
2059	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2060	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2061	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2062
2063	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2064	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2065	    tcp_payload_size <= fpdu->fpdu_length)
2066		tx_pkt.cookie = fpdu->mpa_buf;
2067
2068	tx_pkt.first_frag = fpdu->pkt_hdr;
2069	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2070	tx_pkt.enable_ip_cksum = true;
2071	tx_pkt.enable_l4_cksum = true;
2072	tx_pkt.calc_ip_len = true;
2073	/* vlan overload with enum iwarp_ll2_tx_queues */
2074	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2075
2076	/* special case of unaligned packet and not packed, need to send
2077	 * both buffers as cookie to release.
2078	 */
2079	if (tcp_payload_size == fpdu->incomplete_bytes)
2080		fpdu->mpa_buf->piggy_buf = buf;
2081
2082	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2083
2084	/* Set first fragment to header */
2085	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2086	if (rc)
2087		goto out;
2088
2089	/* Set second fragment to first part of packet */
2090	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2091					       fpdu->mpa_frag,
2092					       fpdu->mpa_frag_len);
2093	if (rc)
2094		goto out;
2095
2096	if (!fpdu->incomplete_bytes)
2097		goto out;
2098
2099	/* Set third fragment to second part of the packet */
2100	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2101					       ll2_handle,
2102					       buf->data_phys_addr +
2103					       curr_pkt->first_mpa_offset,
2104					       fpdu->incomplete_bytes);
2105out:
2106	DP_VERBOSE(p_hwfn,
2107		   QED_MSG_RDMA,
2108		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2109		   tx_pkt.num_of_bds,
2110		   tx_pkt.first_frag_len,
2111		   fpdu->mpa_frag_len,
2112		   fpdu->incomplete_bytes, rc);
2113
2114	return rc;
2115}
2116
2117static void
2118qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2119		       struct unaligned_opaque_data *curr_pkt,
2120		       u32 opaque_data0, u32 opaque_data1)
2121{
2122	u64 opaque_data;
2123
2124	opaque_data = HILO_64(opaque_data1, opaque_data0);
2125	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2126
2127	curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2128				     le16_to_cpu(curr_pkt->first_mpa_offset);
2129	curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2130}
2131
2132/* This function is called when an unaligned or incomplete MPA packet arrives
2133 * driver needs to align the packet, perhaps using previous data and send
2134 * it down to FW once it is aligned.
2135 */
2136static int
2137qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2138			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2139{
2140	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2141	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2142	enum qed_iwarp_mpa_pkt_type pkt_type;
2143	struct qed_iwarp_fpdu *fpdu;
2144	int rc = -EINVAL;
2145	u8 *mpa_data;
2146
2147	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2148	if (!fpdu) { /* something corrupt with cid, post rx back */
2149		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2150		       curr_pkt->cid);
2151		goto err;
2152	}
2153
2154	do {
2155		mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2156
2157		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2158						  mpa_buf->tcp_payload_len,
2159						  mpa_data);
2160
2161		switch (pkt_type) {
2162		case QED_IWARP_MPA_PKT_PARTIAL:
2163			qed_iwarp_init_fpdu(buf, fpdu,
2164					    curr_pkt,
2165					    mpa_buf->tcp_payload_len,
2166					    mpa_buf->placement_offset);
2167
2168			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2169				mpa_buf->tcp_payload_len = 0;
2170				break;
2171			}
2172
2173			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2174
2175			if (rc) {
2176				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2177					   "Can't send FPDU:reset rc=%d\n", rc);
2178				memset(fpdu, 0, sizeof(*fpdu));
2179				break;
2180			}
2181
2182			mpa_buf->tcp_payload_len = 0;
2183			break;
2184		case QED_IWARP_MPA_PKT_PACKED:
2185			qed_iwarp_init_fpdu(buf, fpdu,
2186					    curr_pkt,
2187					    mpa_buf->tcp_payload_len,
2188					    mpa_buf->placement_offset);
2189
2190			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2191						 mpa_buf->tcp_payload_len,
2192						 pkt_type);
2193			if (rc) {
2194				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2195					   "Can't send FPDU:reset rc=%d\n", rc);
2196				memset(fpdu, 0, sizeof(*fpdu));
2197				break;
2198			}
2199
2200			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2201			curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2202			break;
2203		case QED_IWARP_MPA_PKT_UNALIGNED:
2204			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2205			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2206				/* special handling of fpdu split over more
2207				 * than 2 segments
2208				 */
2209				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2210					rc = qed_iwarp_win_right_edge(p_hwfn,
2211								      fpdu);
2212					/* packet will be re-processed later */
2213					if (rc)
2214						return rc;
2215				}
2216
2217				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2218						      buf,
2219						      mpa_buf->tcp_payload_len);
2220				if (rc) /* packet will be re-processed later */
2221					return rc;
2222
2223				mpa_buf->tcp_payload_len = 0;
2224				break;
2225			}
2226
2227			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2228						 mpa_buf->tcp_payload_len,
2229						 pkt_type);
2230			if (rc) {
2231				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2232					   "Can't send FPDU:delay rc=%d\n", rc);
2233				/* don't reset fpdu -> we need it for next
2234				 * classify
2235				 */
2236				break;
2237			}
2238
2239			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2240			curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2241			/* The framed PDU was sent - no more incomplete bytes */
2242			fpdu->incomplete_bytes = 0;
2243			break;
2244		}
2245	} while (mpa_buf->tcp_payload_len && !rc);
2246
2247	return rc;
2248
2249err:
2250	qed_iwarp_ll2_post_rx(p_hwfn,
2251			      buf,
2252			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2253	return rc;
2254}
2255
2256static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2257{
2258	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2259	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2260	int rc;
2261
2262	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2263		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2264					   struct qed_iwarp_ll2_mpa_buf,
2265					   list_entry);
2266
2267		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2268
2269		/* busy means break and continue processing later, don't
2270		 * remove the buf from the pending list.
2271		 */
2272		if (rc == -EBUSY)
2273			break;
2274
2275		list_del(&mpa_buf->list_entry);
2276		list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
2277
2278		if (rc) {	/* different error, don't continue */
2279			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2280			break;
2281		}
2282	}
2283}
2284
2285static void
2286qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2287{
2288	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2289	struct qed_iwarp_info *iwarp_info;
2290	struct qed_hwfn *p_hwfn = cxt;
2291
2292	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2293	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2294				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2295	if (!mpa_buf) {
2296		DP_ERR(p_hwfn, "No free mpa buf\n");
2297		goto err;
2298	}
2299
2300	list_del(&mpa_buf->list_entry);
2301	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2302			       data->opaque_data_0, data->opaque_data_1);
2303
2304	DP_VERBOSE(p_hwfn,
2305		   QED_MSG_RDMA,
2306		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2307		   data->length.packet_length, mpa_buf->data.first_mpa_offset,
2308		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2309		   mpa_buf->data.cid);
2310
2311	mpa_buf->ll2_buf = data->cookie;
2312	mpa_buf->tcp_payload_len = data->length.packet_length -
2313				   mpa_buf->data.first_mpa_offset;
2314	mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2315	mpa_buf->placement_offset = data->u.placement_offset;
2316
2317	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2318
2319	qed_iwarp_process_pending_pkts(p_hwfn);
2320	return;
2321err:
2322	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2323			      iwarp_info->ll2_mpa_handle);
2324}
2325
2326static void
2327qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2328{
2329	struct qed_iwarp_ll2_buff *buf = data->cookie;
2330	struct qed_iwarp_listener *listener;
2331	struct qed_ll2_tx_pkt_info tx_pkt;
2332	struct qed_iwarp_cm_info cm_info;
2333	struct qed_hwfn *p_hwfn = cxt;
2334	u8 remote_mac_addr[ETH_ALEN];
2335	u8 local_mac_addr[ETH_ALEN];
2336	struct qed_iwarp_ep *ep;
2337	int tcp_start_offset;
2338	u8 ts_hdr_size = 0;
2339	u8 ll2_syn_handle;
2340	int payload_len;
2341	u32 hdr_size;
2342	int rc;
2343
2344	memset(&cm_info, 0, sizeof(cm_info));
2345	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2346
2347	/* Check if packet was received with errors... */
2348	if (data->err_flags) {
2349		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2350			  data->err_flags);
2351		goto err;
2352	}
2353
2354	if (GET_FIELD(data->parse_flags,
2355		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2356	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2357		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2358		goto err;
2359	}
2360
2361	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2362				    data->u.placement_offset, remote_mac_addr,
2363				    local_mac_addr, &payload_len,
2364				    &tcp_start_offset);
2365	if (rc)
2366		goto err;
2367
2368	/* Check if there is a listener for this 4-tuple+vlan */
2369	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2370	if (!listener) {
2371		DP_VERBOSE(p_hwfn,
2372			   QED_MSG_RDMA,
2373			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2374			   data->parse_flags, data->length.packet_length);
2375
2376		memset(&tx_pkt, 0, sizeof(tx_pkt));
2377		tx_pkt.num_of_bds = 1;
2378		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2379		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2380		tx_pkt.first_frag = buf->data_phys_addr +
2381				    data->u.placement_offset;
2382		tx_pkt.first_frag_len = data->length.packet_length;
2383		tx_pkt.cookie = buf;
2384
2385		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2386					       &tx_pkt, true);
2387
2388		if (rc) {
2389			DP_NOTICE(p_hwfn,
2390				  "Can't post SYN back to chip rc=%d\n", rc);
2391			goto err;
2392		}
2393		return;
2394	}
2395
2396	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2397	/* There may be an open ep on this connection if this is a syn
2398	 * retrasnmit... need to make sure there isn't...
2399	 */
2400	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2401		goto err;
2402
2403	ep = qed_iwarp_get_free_ep(p_hwfn);
2404	if (!ep)
2405		goto err;
2406
2407	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2408	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2409	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2410
2411	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2412	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2413
2414	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2415
2416	if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN)
2417		ts_hdr_size = TIMESTAMP_HEADER_SIZE;
2418
2419	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
2420		   ts_hdr_size;
2421	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2422	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2423
2424	ep->event_cb = listener->event_cb;
2425	ep->cb_context = listener->cb_context;
2426	ep->connect_mode = TCP_CONNECT_PASSIVE;
2427
2428	ep->syn = buf;
2429	ep->syn_ip_payload_length = (u16)payload_len;
2430	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2431			   tcp_start_offset;
2432
2433	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2434	if (rc) {
2435		qed_iwarp_return_ep(p_hwfn, ep);
2436		goto err;
2437	}
2438
2439	return;
2440err:
2441	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2442}
2443
2444static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2445				     void *cookie, dma_addr_t rx_buf_addr,
2446				     bool b_last_packet)
2447{
2448	struct qed_iwarp_ll2_buff *buffer = cookie;
2449	struct qed_hwfn *p_hwfn = cxt;
2450
2451	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2452			  buffer->data, buffer->data_phys_addr);
2453	kfree(buffer);
2454}
2455
2456static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2457				      void *cookie, dma_addr_t first_frag_addr,
2458				      bool b_last_fragment, bool b_last_packet)
2459{
2460	struct qed_iwarp_ll2_buff *buffer = cookie;
2461	struct qed_iwarp_ll2_buff *piggy;
2462	struct qed_hwfn *p_hwfn = cxt;
2463
2464	if (!buffer)		/* can happen in packed mpa unaligned... */
2465		return;
2466
2467	/* this was originally an rx packet, post it back */
2468	piggy = buffer->piggy_buf;
2469	if (piggy) {
2470		buffer->piggy_buf = NULL;
2471		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2472	}
2473
2474	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2475
2476	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2477		qed_iwarp_process_pending_pkts(p_hwfn);
2478
2479	return;
2480}
2481
2482static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2483				     void *cookie, dma_addr_t first_frag_addr,
2484				     bool b_last_fragment, bool b_last_packet)
2485{
2486	struct qed_iwarp_ll2_buff *buffer = cookie;
2487	struct qed_hwfn *p_hwfn = cxt;
2488
2489	if (!buffer)
2490		return;
2491
2492	if (buffer->piggy_buf) {
2493		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2494				  buffer->piggy_buf->buff_size,
2495				  buffer->piggy_buf->data,
2496				  buffer->piggy_buf->data_phys_addr);
2497
2498		kfree(buffer->piggy_buf);
2499	}
2500
2501	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2502			  buffer->data, buffer->data_phys_addr);
2503
2504	kfree(buffer);
2505}
2506
2507/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2508 * is received, need to reset the FPDU.
2509 */
2510void
2511qed_iwarp_ll2_slowpath(void *cxt,
2512		       u8 connection_handle,
2513		       u32 opaque_data_0, u32 opaque_data_1)
2514{
2515	struct unaligned_opaque_data unalign_data;
2516	struct qed_hwfn *p_hwfn = cxt;
2517	struct qed_iwarp_fpdu *fpdu;
2518
2519	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2520			       opaque_data_0, opaque_data_1);
2521
2522	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2523		   unalign_data.cid);
2524
2525	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2526	if (fpdu)
2527		memset(fpdu, 0, sizeof(*fpdu));
2528}
2529
2530static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2531{
2532	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2533	int rc = 0;
2534
2535	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2536		rc = qed_ll2_terminate_connection(p_hwfn,
2537						  iwarp_info->ll2_syn_handle);
2538		if (rc)
2539			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2540
2541		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2542		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2543	}
2544
2545	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2546		rc = qed_ll2_terminate_connection(p_hwfn,
2547						  iwarp_info->ll2_ooo_handle);
2548		if (rc)
2549			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2550
2551		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2552		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2553	}
2554
2555	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2556		rc = qed_ll2_terminate_connection(p_hwfn,
2557						  iwarp_info->ll2_mpa_handle);
2558		if (rc)
2559			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2560
2561		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2562		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2563	}
2564
2565	qed_llh_remove_mac_filter(p_hwfn,
2566				  p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
 
2567	return rc;
2568}
2569
2570static int
2571qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2572			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2573{
2574	struct qed_iwarp_ll2_buff *buffer;
2575	int rc = 0;
2576	int i;
2577
2578	for (i = 0; i < num_rx_bufs; i++) {
2579		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2580		if (!buffer) {
2581			rc = -ENOMEM;
2582			break;
2583		}
2584
2585		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2586						  buff_size,
2587						  &buffer->data_phys_addr,
2588						  GFP_KERNEL);
2589		if (!buffer->data) {
2590			kfree(buffer);
2591			rc = -ENOMEM;
2592			break;
2593		}
2594
2595		buffer->buff_size = buff_size;
2596		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2597		if (rc)
2598			/* buffers will be deallocated by qed_ll2 */
2599			break;
2600	}
2601	return rc;
2602}
2603
2604#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2605	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2606		ETH_CACHE_LINE_SIZE)
2607
2608static int
2609qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2610		    struct qed_rdma_start_in_params *params,
2611		    struct qed_ptt *p_ptt)
2612{
2613	struct qed_iwarp_info *iwarp_info;
2614	struct qed_ll2_acquire_data data;
2615	struct qed_ll2_cbs cbs;
2616	u32 mpa_buff_size;
2617	u16 n_ooo_bufs;
2618	int rc = 0;
2619	int i;
2620
2621	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2622	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2623	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2624	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2625
2626	iwarp_info->max_mtu = params->max_mtu;
2627
2628	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2629
2630	rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2631	if (rc)
2632		return rc;
2633
2634	/* Start SYN connection */
2635	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2636	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2637	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2638	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
 
2639	cbs.cookie = p_hwfn;
2640
2641	memset(&data, 0, sizeof(data));
2642	data.input.conn_type = QED_LL2_TYPE_IWARP;
2643	data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE;
2644	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2645	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2646	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2647	data.input.tx_tc = PKT_LB_TC;
2648	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2649	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2650	data.cbs = &cbs;
2651
2652	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2653	if (rc) {
2654		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2655		qed_llh_remove_mac_filter(p_hwfn, p_ptt, params->mac_addr);
2656		return rc;
2657	}
2658
2659	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2660	if (rc) {
2661		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2662		goto err;
2663	}
2664
 
2665	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2666					 QED_IWARP_LL2_SYN_RX_SIZE,
2667					 QED_IWARP_MAX_SYN_PKT_SIZE,
2668					 iwarp_info->ll2_syn_handle);
2669	if (rc)
2670		goto err;
2671
2672	/* Start OOO connection */
2673	data.input.conn_type = QED_LL2_TYPE_OOO;
2674	data.input.mtu = params->max_mtu;
2675
2676	n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) /
2677		     iwarp_info->max_mtu;
2678	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2679
2680	data.input.rx_num_desc = n_ooo_bufs;
2681	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2682
2683	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2684	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2685	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2686
2687	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2688	if (rc)
2689		goto err;
2690
2691	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2692	if (rc)
2693		goto err;
2694
2695	/* Start Unaligned MPA connection */
2696	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2697	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2698
2699	memset(&data, 0, sizeof(data));
2700	data.input.conn_type = QED_LL2_TYPE_IWARP;
2701	data.input.mtu = params->max_mtu;
2702	/* FW requires that once a packet arrives OOO, it must have at
2703	 * least 2 rx buffers available on the unaligned connection
2704	 * for handling the case that it is a partial fpdu.
2705	 */
2706	data.input.rx_num_desc = n_ooo_bufs * 2;
2707	data.input.tx_num_desc = data.input.rx_num_desc;
2708	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
 
 
2709	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2710	data.input.secondary_queue = true;
2711	data.cbs = &cbs;
2712
2713	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2714	if (rc)
2715		goto err;
2716
2717	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2718	if (rc)
2719		goto err;
2720
2721	mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2722	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2723					 data.input.rx_num_desc,
2724					 mpa_buff_size,
2725					 iwarp_info->ll2_mpa_handle);
2726	if (rc)
2727		goto err;
2728
2729	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2730					    sizeof(*iwarp_info->partial_fpdus),
2731					    GFP_KERNEL);
2732	if (!iwarp_info->partial_fpdus)
2733		goto err;
2734
2735	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2736
2737	iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
2738	if (!iwarp_info->mpa_intermediate_buf)
2739		goto err;
2740
2741	/* The mpa_bufs array serves for pending RX packets received on the
2742	 * mpa ll2 that don't have place on the tx ring and require later
2743	 * processing. We can't fail on allocation of such a struct therefore
2744	 * we allocate enough to take care of all rx packets
2745	 */
2746	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2747				       sizeof(*iwarp_info->mpa_bufs),
2748				       GFP_KERNEL);
2749	if (!iwarp_info->mpa_bufs)
2750		goto err;
2751
2752	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2753	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2754	for (i = 0; i < data.input.rx_num_desc; i++)
2755		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2756			      &iwarp_info->mpa_buf_list);
2757	return rc;
2758err:
2759	qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2760
2761	return rc;
2762}
2763
2764int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
 
 
 
 
 
 
 
 
2765		    struct qed_rdma_start_in_params *params)
2766{
 
2767	struct qed_iwarp_info *iwarp_info;
 
2768	u32 rcv_wnd_size;
2769
2770	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2771
2772	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2773	rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF;
 
 
 
 
2774
2775	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2776	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2777	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2778	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2779	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2780	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2781
2782	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2783
2784	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2785				MPA_RTR_TYPE_ZERO_WRITE |
2786				MPA_RTR_TYPE_ZERO_READ;
2787
2788	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2789	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2790	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2791
2792	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2793				  qed_iwarp_async_event);
2794	qed_ooo_setup(p_hwfn);
2795
2796	return qed_iwarp_ll2_start(p_hwfn, params, p_ptt);
2797}
2798
2799int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2800{
2801	int rc;
2802
2803	qed_iwarp_free_prealloc_ep(p_hwfn);
2804	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2805	if (rc)
2806		return rc;
2807
2808	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2809
2810	return qed_iwarp_ll2_stop(p_hwfn, p_ptt);
2811}
2812
2813void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2814			   struct qed_iwarp_ep *ep, u8 fw_return_code)
 
2815{
2816	struct qed_iwarp_cm_event_params params;
2817
2818	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2819
2820	params.event = QED_IWARP_EVENT_CLOSE;
2821	params.ep_context = ep;
2822	params.cm_info = &ep->cm_info;
2823	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2824			 0 : -ECONNRESET;
2825
2826	ep->state = QED_IWARP_EP_CLOSED;
 
 
2827	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2828	list_del(&ep->list_entry);
2829	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2830
2831	ep->event_cb(ep->cb_context, &params);
2832}
2833
2834void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2835				  struct qed_iwarp_ep *ep, int fw_ret_code)
 
2836{
2837	struct qed_iwarp_cm_event_params params;
2838	bool event_cb = false;
2839
2840	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2841		   ep->cid, fw_ret_code);
2842
2843	switch (fw_ret_code) {
2844	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2845		params.status = 0;
2846		params.event = QED_IWARP_EVENT_DISCONNECT;
2847		event_cb = true;
2848		break;
2849	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2850		params.status = -ECONNRESET;
2851		params.event = QED_IWARP_EVENT_DISCONNECT;
2852		event_cb = true;
2853		break;
2854	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2855		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2856		event_cb = true;
2857		break;
2858	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2859		params.event = QED_IWARP_EVENT_IRQ_FULL;
2860		event_cb = true;
2861		break;
2862	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2863		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2864		event_cb = true;
2865		break;
2866	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2867		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2868		event_cb = true;
2869		break;
2870	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2871		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2872		event_cb = true;
2873		break;
2874	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2875		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2876		event_cb = true;
2877		break;
2878	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2879		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2880		event_cb = true;
2881		break;
2882	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2883		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2884		event_cb = true;
2885		break;
2886	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2887		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2888		event_cb = true;
2889		break;
2890	default:
2891		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2892			   "Unhandled exception received...fw_ret_code=%d\n",
2893			   fw_ret_code);
2894		break;
2895	}
2896
2897	if (event_cb) {
2898		params.ep_context = ep;
2899		params.cm_info = &ep->cm_info;
2900		ep->event_cb(ep->cb_context, &params);
2901	}
2902}
2903
2904static void
2905qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2906				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2907{
2908	struct qed_iwarp_cm_event_params params;
2909
2910	memset(&params, 0, sizeof(params));
2911	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2912	params.ep_context = ep;
2913	params.cm_info = &ep->cm_info;
2914	ep->state = QED_IWARP_EP_CLOSED;
 
2915
2916	switch (fw_return_code) {
2917	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2918		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2919			   "%s(0x%x) TCP connect got invalid packet\n",
2920			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2921		params.status = -ECONNRESET;
2922		break;
2923	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2924		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2925			   "%s(0x%x) TCP Connection Reset\n",
2926			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2927		params.status = -ECONNRESET;
2928		break;
2929	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2930		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2931			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2932		params.status = -EBUSY;
2933		break;
2934	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2935		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2936			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2937		params.status = -ECONNREFUSED;
2938		break;
2939	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2940		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2941			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2942		params.status = -ECONNRESET;
2943		break;
2944	default:
2945		DP_ERR(p_hwfn,
2946		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
2947		       QED_IWARP_CONNECT_MODE_STRING(ep),
2948		       ep->tcp_cid, fw_return_code);
2949		params.status = -ECONNRESET;
2950		break;
2951	}
2952
2953	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2954		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2955		qed_iwarp_return_ep(p_hwfn, ep);
2956	} else {
2957		ep->event_cb(ep->cb_context, &params);
2958		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2959		list_del(&ep->list_entry);
2960		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2961	}
2962}
2963
2964void
2965qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2966			   struct qed_iwarp_ep *ep, u8 fw_return_code)
2967{
2968	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2969
2970	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2971		/* Done with the SYN packet, post back to ll2 rx */
2972		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
2973
2974		ep->syn = NULL;
2975
2976		/* If connect failed - upper layer doesn't know about it */
2977		if (fw_return_code == RDMA_RETURN_OK)
2978			qed_iwarp_mpa_received(p_hwfn, ep);
2979		else
2980			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2981							   fw_return_code);
2982	} else {
2983		if (fw_return_code == RDMA_RETURN_OK)
2984			qed_iwarp_mpa_offload(p_hwfn, ep);
2985		else
2986			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
2987							   fw_return_code);
2988	}
2989}
2990
2991static inline bool
2992qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
2993{
2994	if (!ep || (ep->sig != QED_EP_SIG)) {
2995		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
2996		return false;
2997	}
2998
2999	return true;
3000}
3001
3002static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
3003				 u8 fw_event_code, u16 echo,
3004				 union event_ring_data *data,
3005				 u8 fw_return_code)
3006{
 
3007	struct regpair *fw_handle = &data->rdma_data.async_handle;
3008	struct qed_iwarp_ep *ep = NULL;
 
 
3009	u16 cid;
3010
3011	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3012						       fw_handle->lo);
3013
3014	switch (fw_event_code) {
3015	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3016		/* Async completion after TCP 3-way handshake */
3017		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3018			return -EINVAL;
3019		DP_VERBOSE(p_hwfn,
3020			   QED_MSG_RDMA,
3021			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3022			   ep->tcp_cid, fw_return_code);
3023		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3024		break;
3025	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3026		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3027			return -EINVAL;
3028		DP_VERBOSE(p_hwfn,
3029			   QED_MSG_RDMA,
3030			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3031			   ep->cid, fw_return_code);
3032		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3033		break;
3034	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3035		/* Async completion for Close Connection ramrod */
3036		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3037			return -EINVAL;
3038		DP_VERBOSE(p_hwfn,
3039			   QED_MSG_RDMA,
3040			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3041			   ep->cid, fw_return_code);
3042		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3043		break;
3044	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3045		/* Async event for active side only */
3046		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3047			return -EINVAL;
3048		DP_VERBOSE(p_hwfn,
3049			   QED_MSG_RDMA,
3050			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3051			   ep->cid, fw_return_code);
3052		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3053		break;
3054	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3055		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3056			return -EINVAL;
3057		DP_VERBOSE(p_hwfn,
3058			   QED_MSG_RDMA,
3059			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3060			   ep->cid, fw_return_code);
3061		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3062		break;
3063	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3064		cid = (u16)le32_to_cpu(fw_handle->lo);
3065		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3066			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3067		qed_iwarp_cid_cleaned(p_hwfn, cid);
3068
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3069		break;
3070	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3071		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3072
3073		p_hwfn->p_rdma_info->events.affiliated_event(
3074			p_hwfn->p_rdma_info->events.context,
3075			QED_IWARP_EVENT_CQ_OVERFLOW,
3076			(void *)fw_handle);
3077		break;
3078	default:
3079		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3080		       fw_event_code);
3081		return -EINVAL;
3082	}
3083	return 0;
3084}
3085
3086int
3087qed_iwarp_create_listen(void *rdma_cxt,
3088			struct qed_iwarp_listen_in *iparams,
3089			struct qed_iwarp_listen_out *oparams)
3090{
3091	struct qed_hwfn *p_hwfn = rdma_cxt;
3092	struct qed_iwarp_listener *listener;
3093
3094	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3095	if (!listener)
3096		return -ENOMEM;
3097
3098	listener->ip_version = iparams->ip_version;
3099	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3100	listener->port = iparams->port;
3101	listener->vlan = iparams->vlan;
3102
3103	listener->event_cb = iparams->event_cb;
3104	listener->cb_context = iparams->cb_context;
3105	listener->max_backlog = iparams->max_backlog;
3106	oparams->handle = listener;
3107
3108	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3109	list_add_tail(&listener->list_entry,
3110		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3111	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3112
3113	DP_VERBOSE(p_hwfn,
3114		   QED_MSG_RDMA,
3115		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3116		   listener->event_cb,
3117		   listener,
3118		   listener->ip_addr[0],
3119		   listener->ip_addr[1],
3120		   listener->ip_addr[2],
3121		   listener->ip_addr[3], listener->port, listener->vlan);
3122
3123	return 0;
3124}
3125
3126int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3127{
3128	struct qed_iwarp_listener *listener = handle;
3129	struct qed_hwfn *p_hwfn = rdma_cxt;
3130
3131	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3132
3133	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3134	list_del(&listener->list_entry);
3135	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3136
3137	kfree(listener);
3138
3139	return 0;
3140}
3141
3142int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3143{
3144	struct qed_hwfn *p_hwfn = rdma_cxt;
3145	struct qed_sp_init_data init_data;
3146	struct qed_spq_entry *p_ent;
3147	struct qed_iwarp_ep *ep;
3148	struct qed_rdma_qp *qp;
3149	int rc;
3150
3151	ep = iparams->ep_context;
3152	if (!ep) {
3153		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3154		return -EINVAL;
3155	}
3156
3157	qp = ep->qp;
3158
3159	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3160		   qp->icid, ep->tcp_cid);
3161
3162	memset(&init_data, 0, sizeof(init_data));
3163	init_data.cid = qp->icid;
3164	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3165	init_data.comp_mode = QED_SPQ_MODE_CB;
3166
3167	rc = qed_sp_init_request(p_hwfn, &p_ent,
3168				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3169				 PROTOCOLID_IWARP, &init_data);
3170
3171	if (rc)
3172		return rc;
3173
3174	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3175
3176	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3177
3178	return rc;
3179}
3180
3181void
3182qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3183		   struct qed_rdma_query_qp_out_params *out_params)
3184{
3185	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3186}
v5.4
   1/* QLogic qed NIC Driver
   2 * Copyright (c) 2015-2017  QLogic Corporation
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and /or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/if_ether.h>
  33#include <linux/if_vlan.h>
  34#include <linux/ip.h>
  35#include <linux/ipv6.h>
  36#include <linux/spinlock.h>
  37#include <linux/tcp.h>
  38#include "qed_cxt.h"
  39#include "qed_hw.h"
  40#include "qed_ll2.h"
  41#include "qed_rdma.h"
  42#include "qed_reg_addr.h"
  43#include "qed_sp.h"
  44#include "qed_ooo.h"
  45
  46#define QED_IWARP_ORD_DEFAULT		32
  47#define QED_IWARP_IRD_DEFAULT		32
  48#define QED_IWARP_MAX_FW_MSS		4120
  49
  50#define QED_EP_SIG 0xecabcdef
  51
  52struct mpa_v2_hdr {
  53	__be16 ird;
  54	__be16 ord;
  55};
  56
  57#define MPA_V2_PEER2PEER_MODEL  0x8000
  58#define MPA_V2_SEND_RTR         0x4000	/* on ird */
  59#define MPA_V2_READ_RTR         0x4000	/* on ord */
  60#define MPA_V2_WRITE_RTR        0x8000
  61#define MPA_V2_IRD_ORD_MASK     0x3FFF
  62
  63#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
  64
  65#define QED_IWARP_INVALID_TCP_CID	0xffffffff
  66
  67#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
  68#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
  69#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
  70#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
  71
  72#define QED_IWARP_RCV_WND_SIZE_MIN	(0xffff)
  73#define TIMESTAMP_HEADER_SIZE		(12)
  74#define QED_IWARP_MAX_FIN_RT_DEFAULT	(2)
  75
  76#define QED_IWARP_TS_EN			BIT(0)
  77#define QED_IWARP_DA_EN			BIT(1)
  78#define QED_IWARP_PARAM_CRC_NEEDED	(1)
  79#define QED_IWARP_PARAM_P2P		(1)
  80
  81#define QED_IWARP_DEF_MAX_RT_TIME	(0)
  82#define QED_IWARP_DEF_CWND_FACTOR	(4)
  83#define QED_IWARP_DEF_KA_MAX_PROBE_CNT	(5)
  84#define QED_IWARP_DEF_KA_TIMEOUT	(1200000)	/* 20 min */
  85#define QED_IWARP_DEF_KA_INTERVAL	(1000)		/* 1 sec */
  86
  87static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
  88				 u8 fw_event_code, u16 echo,
  89				 union event_ring_data *data,
  90				 u8 fw_return_code);
  91
  92/* Override devinfo with iWARP specific values */
  93void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
  94{
  95	struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
  96
  97	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
  98	dev->max_qp = min_t(u32,
  99			    IWARP_MAX_QPS,
 100			    p_hwfn->p_rdma_info->num_qps) -
 101		      QED_IWARP_PREALLOC_CNT;
 102
 103	dev->max_cq = dev->max_qp;
 104
 105	dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
 106	dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
 107}
 108
 109void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 110{
 111	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
 112	qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
 113	p_hwfn->b_rdma_enabled_in_prs = true;
 114}
 115
 116/* We have two cid maps, one for tcp which should be used only from passive
 117 * syn processing and replacing a pre-allocated ep in the list. The second
 118 * for active tcp and for QPs.
 119 */
 120static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
 121{
 122	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 123
 124	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 125
 126	if (cid < QED_IWARP_PREALLOC_CNT)
 127		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
 128				    cid);
 129	else
 130		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 131
 132	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 133}
 134
 135void
 136qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
 137			 struct iwarp_init_func_ramrod_data *p_ramrod)
 138{
 139	p_ramrod->iwarp.ll2_ooo_q_index =
 140		RESC_START(p_hwfn, QED_LL2_QUEUE) +
 141		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
 142
 143	p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
 144
 145	return;
 146}
 147
 148static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 149{
 150	int rc;
 151
 152	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 153	rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
 154	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 155	if (rc) {
 156		DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
 157		return rc;
 158	}
 159	*cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 160
 161	rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
 162	if (rc)
 163		qed_iwarp_cid_cleaned(p_hwfn, *cid);
 164
 165	return rc;
 166}
 167
 168static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
 169{
 170	cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
 171
 172	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 173	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 174	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 175}
 176
 177/* This function allocates a cid for passive tcp (called from syn receive)
 178 * the reason it's separate from the regular cid allocation is because it
 179 * is assured that these cids already have ilt allocated. They are preallocated
 180 * to ensure that we won't need to allocate memory during syn processing
 181 */
 182static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
 183{
 184	int rc;
 185
 186	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
 187
 188	rc = qed_rdma_bmap_alloc_id(p_hwfn,
 189				    &p_hwfn->p_rdma_info->tcp_cid_map, cid);
 190
 191	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
 192
 193	if (rc) {
 194		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 195			   "can't allocate iwarp tcp cid max-count=%d\n",
 196			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
 197
 198		*cid = QED_IWARP_INVALID_TCP_CID;
 199		return rc;
 200	}
 201
 202	*cid += qed_cxt_get_proto_cid_start(p_hwfn,
 203					    p_hwfn->p_rdma_info->proto);
 204	return 0;
 205}
 206
 207int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
 208			struct qed_rdma_qp *qp,
 209			struct qed_rdma_create_qp_out_params *out_params)
 210{
 211	struct iwarp_create_qp_ramrod_data *p_ramrod;
 212	struct qed_sp_init_data init_data;
 213	struct qed_spq_entry *p_ent;
 214	u16 physical_queue;
 215	u32 cid;
 216	int rc;
 217
 218	qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 219					      IWARP_SHARED_QUEUE_PAGE_SIZE,
 220					      &qp->shared_queue_phys_addr,
 221					      GFP_KERNEL);
 222	if (!qp->shared_queue)
 223		return -ENOMEM;
 224
 225	out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
 226	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 227	out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
 228	    IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
 229	out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
 230	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 231	out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
 232	    IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
 233
 234	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
 235	if (rc)
 236		goto err1;
 237
 238	qp->icid = (u16)cid;
 239
 240	memset(&init_data, 0, sizeof(init_data));
 241	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 242	init_data.cid = qp->icid;
 243	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 244
 245	rc = qed_sp_init_request(p_hwfn, &p_ent,
 246				 IWARP_RAMROD_CMD_ID_CREATE_QP,
 247				 PROTOCOLID_IWARP, &init_data);
 248	if (rc)
 249		goto err2;
 250
 251	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
 252
 253	SET_FIELD(p_ramrod->flags,
 254		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
 255		  qp->fmr_and_reserved_lkey);
 256
 257	SET_FIELD(p_ramrod->flags,
 258		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
 259
 260	SET_FIELD(p_ramrod->flags,
 261		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
 262		  qp->incoming_rdma_read_en);
 263
 264	SET_FIELD(p_ramrod->flags,
 265		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
 266		  qp->incoming_rdma_write_en);
 267
 268	SET_FIELD(p_ramrod->flags,
 269		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
 270		  qp->incoming_atomic_en);
 271
 272	SET_FIELD(p_ramrod->flags,
 273		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 274
 275	p_ramrod->pd = qp->pd;
 276	p_ramrod->sq_num_pages = qp->sq_num_pages;
 277	p_ramrod->rq_num_pages = qp->rq_num_pages;
 278
 279	p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
 280	p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
 281	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 282	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
 283
 284	p_ramrod->cq_cid_for_sq =
 285	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 286	p_ramrod->cq_cid_for_rq =
 287	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
 288
 289	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 290
 291	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 292	p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
 293	physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 294	p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
 295
 296	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 297	if (rc)
 298		goto err2;
 299
 300	return rc;
 301
 302err2:
 303	qed_iwarp_cid_cleaned(p_hwfn, cid);
 304err1:
 305	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 306			  IWARP_SHARED_QUEUE_PAGE_SIZE,
 307			  qp->shared_queue, qp->shared_queue_phys_addr);
 308
 309	return rc;
 310}
 311
 312static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 313{
 314	struct iwarp_modify_qp_ramrod_data *p_ramrod;
 315	struct qed_sp_init_data init_data;
 316	struct qed_spq_entry *p_ent;
 317	int rc;
 318
 319	/* Get SPQ entry */
 320	memset(&init_data, 0, sizeof(init_data));
 321	init_data.cid = qp->icid;
 322	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 323	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 324
 325	rc = qed_sp_init_request(p_hwfn, &p_ent,
 326				 IWARP_RAMROD_CMD_ID_MODIFY_QP,
 327				 p_hwfn->p_rdma_info->proto, &init_data);
 328	if (rc)
 329		return rc;
 330
 331	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
 332	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
 333		  0x1);
 334	if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
 335		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
 336	else
 337		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
 338
 339	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 340
 341	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
 342
 343	return rc;
 344}
 345
 346enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
 347{
 348	switch (state) {
 349	case QED_ROCE_QP_STATE_RESET:
 350	case QED_ROCE_QP_STATE_INIT:
 351	case QED_ROCE_QP_STATE_RTR:
 352		return QED_IWARP_QP_STATE_IDLE;
 353	case QED_ROCE_QP_STATE_RTS:
 354		return QED_IWARP_QP_STATE_RTS;
 355	case QED_ROCE_QP_STATE_SQD:
 356		return QED_IWARP_QP_STATE_CLOSING;
 357	case QED_ROCE_QP_STATE_ERR:
 358		return QED_IWARP_QP_STATE_ERROR;
 359	case QED_ROCE_QP_STATE_SQE:
 360		return QED_IWARP_QP_STATE_TERMINATE;
 361	default:
 362		return QED_IWARP_QP_STATE_ERROR;
 363	}
 364}
 365
 366static enum qed_roce_qp_state
 367qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
 368{
 369	switch (state) {
 370	case QED_IWARP_QP_STATE_IDLE:
 371		return QED_ROCE_QP_STATE_INIT;
 372	case QED_IWARP_QP_STATE_RTS:
 373		return QED_ROCE_QP_STATE_RTS;
 374	case QED_IWARP_QP_STATE_TERMINATE:
 375		return QED_ROCE_QP_STATE_SQE;
 376	case QED_IWARP_QP_STATE_CLOSING:
 377		return QED_ROCE_QP_STATE_SQD;
 378	case QED_IWARP_QP_STATE_ERROR:
 379		return QED_ROCE_QP_STATE_ERR;
 380	default:
 381		return QED_ROCE_QP_STATE_ERR;
 382	}
 383}
 384
 385static const char * const iwarp_state_names[] = {
 386	"IDLE",
 387	"RTS",
 388	"TERMINATE",
 389	"CLOSING",
 390	"ERROR",
 391};
 392
 393int
 394qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
 395		    struct qed_rdma_qp *qp,
 396		    enum qed_iwarp_qp_state new_state, bool internal)
 397{
 398	enum qed_iwarp_qp_state prev_iw_state;
 399	bool modify_fw = false;
 400	int rc = 0;
 401
 402	/* modify QP can be called from upper-layer or as a result of async
 403	 * RST/FIN... therefore need to protect
 404	 */
 405	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 406	prev_iw_state = qp->iwarp_state;
 407
 408	if (prev_iw_state == new_state) {
 409		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 410		return 0;
 411	}
 412
 413	switch (prev_iw_state) {
 414	case QED_IWARP_QP_STATE_IDLE:
 415		switch (new_state) {
 416		case QED_IWARP_QP_STATE_RTS:
 417			qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
 418			break;
 419		case QED_IWARP_QP_STATE_ERROR:
 420			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 421			if (!internal)
 422				modify_fw = true;
 423			break;
 424		default:
 425			break;
 426		}
 427		break;
 428	case QED_IWARP_QP_STATE_RTS:
 429		switch (new_state) {
 430		case QED_IWARP_QP_STATE_CLOSING:
 431			if (!internal)
 432				modify_fw = true;
 433
 434			qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
 435			break;
 436		case QED_IWARP_QP_STATE_ERROR:
 437			if (!internal)
 438				modify_fw = true;
 439			qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
 440			break;
 441		default:
 442			break;
 443		}
 444		break;
 445	case QED_IWARP_QP_STATE_ERROR:
 446		switch (new_state) {
 447		case QED_IWARP_QP_STATE_IDLE:
 448
 449			qp->iwarp_state = new_state;
 450			break;
 451		case QED_IWARP_QP_STATE_CLOSING:
 452			/* could happen due to race... do nothing.... */
 453			break;
 454		default:
 455			rc = -EINVAL;
 456		}
 457		break;
 458	case QED_IWARP_QP_STATE_TERMINATE:
 459	case QED_IWARP_QP_STATE_CLOSING:
 460		qp->iwarp_state = new_state;
 461		break;
 462	default:
 463		break;
 464	}
 465
 466	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
 467		   qp->icid,
 468		   iwarp_state_names[prev_iw_state],
 469		   iwarp_state_names[qp->iwarp_state],
 470		   internal ? "internal" : "");
 471
 472	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
 473
 474	if (modify_fw)
 475		rc = qed_iwarp_modify_fw(p_hwfn, qp);
 476
 477	return rc;
 478}
 479
 480int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 481{
 482	struct qed_sp_init_data init_data;
 483	struct qed_spq_entry *p_ent;
 484	int rc;
 485
 486	/* Get SPQ entry */
 487	memset(&init_data, 0, sizeof(init_data));
 488	init_data.cid = qp->icid;
 489	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 490	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 491
 492	rc = qed_sp_init_request(p_hwfn, &p_ent,
 493				 IWARP_RAMROD_CMD_ID_DESTROY_QP,
 494				 p_hwfn->p_rdma_info->proto, &init_data);
 495	if (rc)
 496		return rc;
 497
 498	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 499
 500	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
 501
 502	return rc;
 503}
 504
 505static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
 506				 struct qed_iwarp_ep *ep,
 507				 bool remove_from_active_list)
 508{
 509	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 510			  sizeof(*ep->ep_buffer_virt),
 511			  ep->ep_buffer_virt, ep->ep_buffer_phys);
 512
 513	if (remove_from_active_list) {
 514		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 515		list_del(&ep->list_entry);
 516		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 517	}
 518
 519	if (ep->qp)
 520		ep->qp->ep = NULL;
 521
 522	kfree(ep);
 523}
 524
 525int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 526{
 527	struct qed_iwarp_ep *ep = qp->ep;
 528	int wait_count = 0;
 529	int rc = 0;
 530
 531	if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
 532		rc = qed_iwarp_modify_qp(p_hwfn, qp,
 533					 QED_IWARP_QP_STATE_ERROR, false);
 534		if (rc)
 535			return rc;
 536	}
 537
 538	/* Make sure ep is closed before returning and freeing memory. */
 539	if (ep) {
 540		while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
 541		       wait_count++ < 200)
 542			msleep(100);
 543
 544		if (ep->state != QED_IWARP_EP_CLOSED)
 545			DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
 546				  ep->state);
 547
 548		qed_iwarp_destroy_ep(p_hwfn, ep, false);
 549	}
 550
 551	rc = qed_iwarp_fw_destroy(p_hwfn, qp);
 552
 553	if (qp->shared_queue)
 554		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
 555				  IWARP_SHARED_QUEUE_PAGE_SIZE,
 556				  qp->shared_queue, qp->shared_queue_phys_addr);
 557
 558	return rc;
 559}
 560
 561static int
 562qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
 563{
 564	struct qed_iwarp_ep *ep;
 565	int rc;
 566
 567	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
 568	if (!ep)
 569		return -ENOMEM;
 570
 571	ep->state = QED_IWARP_EP_INIT;
 572
 573	ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
 574						sizeof(*ep->ep_buffer_virt),
 575						&ep->ep_buffer_phys,
 576						GFP_KERNEL);
 577	if (!ep->ep_buffer_virt) {
 578		rc = -ENOMEM;
 579		goto err;
 580	}
 581
 582	ep->sig = QED_EP_SIG;
 583
 584	*ep_out = ep;
 585
 586	return 0;
 587
 588err:
 589	kfree(ep);
 590	return rc;
 591}
 592
 593static void
 594qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
 595			   struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
 596{
 597	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
 598		   p_tcp_ramrod->tcp.local_mac_addr_lo,
 599		   p_tcp_ramrod->tcp.local_mac_addr_mid,
 600		   p_tcp_ramrod->tcp.local_mac_addr_hi,
 601		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
 602		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
 603		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
 604
 605	if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
 606		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 607			   "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
 608			   p_tcp_ramrod->tcp.local_ip,
 609			   p_tcp_ramrod->tcp.local_port,
 610			   p_tcp_ramrod->tcp.remote_ip,
 611			   p_tcp_ramrod->tcp.remote_port,
 612			   p_tcp_ramrod->tcp.vlan_id);
 613	} else {
 614		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 615			   "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
 616			   p_tcp_ramrod->tcp.local_ip,
 617			   p_tcp_ramrod->tcp.local_port,
 618			   p_tcp_ramrod->tcp.remote_ip,
 619			   p_tcp_ramrod->tcp.remote_port,
 620			   p_tcp_ramrod->tcp.vlan_id);
 621	}
 622
 623	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 624		   "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
 625		   p_tcp_ramrod->tcp.flow_label,
 626		   p_tcp_ramrod->tcp.ttl,
 627		   p_tcp_ramrod->tcp.tos_or_tc,
 628		   p_tcp_ramrod->tcp.mss,
 629		   p_tcp_ramrod->tcp.rcv_wnd_scale,
 630		   p_tcp_ramrod->tcp.connect_mode,
 631		   p_tcp_ramrod->tcp.flags);
 632
 633	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
 634		   p_tcp_ramrod->tcp.syn_ip_payload_length,
 635		   p_tcp_ramrod->tcp.syn_phy_addr_lo,
 636		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
 637}
 638
 639static int
 640qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 641{
 642	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 643	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
 644	struct tcp_offload_params_opt2 *tcp;
 645	struct qed_sp_init_data init_data;
 646	struct qed_spq_entry *p_ent;
 647	dma_addr_t async_output_phys;
 648	dma_addr_t in_pdata_phys;
 649	u16 physical_q;
 650	u8 tcp_flags;
 651	int rc;
 652	int i;
 653
 654	memset(&init_data, 0, sizeof(init_data));
 655	init_data.cid = ep->tcp_cid;
 656	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 657	if (ep->connect_mode == TCP_CONNECT_PASSIVE)
 658		init_data.comp_mode = QED_SPQ_MODE_CB;
 659	else
 660		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 661
 662	rc = qed_sp_init_request(p_hwfn, &p_ent,
 663				 IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
 664				 PROTOCOLID_IWARP, &init_data);
 665	if (rc)
 666		return rc;
 667
 668	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
 669
 670	in_pdata_phys = ep->ep_buffer_phys +
 671			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 672	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
 673		       in_pdata_phys);
 674
 675	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
 676	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 677
 678	async_output_phys = ep->ep_buffer_phys +
 679			    offsetof(struct qed_iwarp_ep_memory, async_output);
 680	DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
 681		       async_output_phys);
 682
 683	p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 684	p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 685
 686	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
 687	p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
 688	physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
 689	p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
 690	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
 691
 692	tcp = &p_tcp_ramrod->tcp;
 693	qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
 694			    &tcp->remote_mac_addr_mid,
 695			    &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
 696	qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
 697			    &tcp->local_mac_addr_lo, ep->local_mac_addr);
 698
 699	tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 700
 701	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
 702	tcp->flags = 0;
 703	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
 704		  !!(tcp_flags & QED_IWARP_TS_EN));
 705
 706	SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
 707		  !!(tcp_flags & QED_IWARP_DA_EN));
 708
 709	tcp->ip_version = ep->cm_info.ip_version;
 710
 711	for (i = 0; i < 4; i++) {
 712		tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
 713		tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
 714	}
 715
 716	tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
 717	tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
 718	tcp->mss = cpu_to_le16(ep->mss);
 719	tcp->flow_label = 0;
 720	tcp->ttl = 0x40;
 721	tcp->tos_or_tc = 0;
 722
 723	tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
 724	tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
 725	tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
 726	tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
 727	tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
 728
 729	tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
 730	tcp->connect_mode = ep->connect_mode;
 731
 732	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 733		tcp->syn_ip_payload_length =
 734			cpu_to_le16(ep->syn_ip_payload_length);
 735		tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
 736		tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
 737	}
 738
 739	qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
 740
 741	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 742
 743	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 744		   "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
 745
 746	return rc;
 747}
 748
 749static void
 750qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 751{
 752	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 753	struct qed_iwarp_cm_event_params params;
 754	struct mpa_v2_hdr *mpa_v2;
 755	union async_output *async_data;
 756	u16 mpa_ord, mpa_ird;
 757	u8 mpa_hdr_size = 0;
 758	u8 mpa_rev;
 759
 760	async_data = &ep->ep_buffer_virt->async_output;
 761
 762	mpa_rev = async_data->mpa_request.mpa_handshake_mode;
 763	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 764		   "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
 765		   async_data->mpa_request.ulp_data_len,
 766		   mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
 767
 768	if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
 769		/* Read ord/ird values from private data buffer */
 770		mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
 771		mpa_hdr_size = sizeof(*mpa_v2);
 772
 773		mpa_ord = ntohs(mpa_v2->ord);
 774		mpa_ird = ntohs(mpa_v2->ird);
 775
 776		/* Temprary store in cm_info incoming ord/ird requested, later
 777		 * replace with negotiated value during accept
 778		 */
 779		ep->cm_info.ord = (u8)min_t(u16,
 780					    (mpa_ord & MPA_V2_IRD_ORD_MASK),
 781					    QED_IWARP_ORD_DEFAULT);
 782
 783		ep->cm_info.ird = (u8)min_t(u16,
 784					    (mpa_ird & MPA_V2_IRD_ORD_MASK),
 785					    QED_IWARP_IRD_DEFAULT);
 786
 787		/* Peer2Peer negotiation */
 788		ep->rtr_type = MPA_RTR_TYPE_NONE;
 789		if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
 790			if (mpa_ord & MPA_V2_WRITE_RTR)
 791				ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
 792
 793			if (mpa_ord & MPA_V2_READ_RTR)
 794				ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
 795
 796			if (mpa_ird & MPA_V2_SEND_RTR)
 797				ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
 798
 799			ep->rtr_type &= iwarp_info->rtr_type;
 800
 801			/* if we're left with no match send our capabilities */
 802			if (ep->rtr_type == MPA_RTR_TYPE_NONE)
 803				ep->rtr_type = iwarp_info->rtr_type;
 804		}
 805
 806		ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
 807	} else {
 808		ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
 809		ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
 810		ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
 811	}
 812
 813	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 814		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
 815		   mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
 816		   async_data->mpa_request.ulp_data_len, mpa_hdr_size);
 817
 818	/* Strip mpa v2 hdr from private data before sending to upper layer */
 819	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 820
 821	ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
 822				       mpa_hdr_size;
 823
 824	params.event = QED_IWARP_EVENT_MPA_REQUEST;
 825	params.cm_info = &ep->cm_info;
 826	params.ep_context = ep;
 827	params.status = 0;
 828
 829	ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
 830	ep->event_cb(ep->cb_context, &params);
 831}
 832
 833static int
 834qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 835{
 836	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
 837	struct qed_iwarp_info *iwarp_info;
 838	struct qed_sp_init_data init_data;
 839	dma_addr_t async_output_phys;
 840	struct qed_spq_entry *p_ent;
 841	dma_addr_t out_pdata_phys;
 842	dma_addr_t in_pdata_phys;
 843	struct qed_rdma_qp *qp;
 844	bool reject;
 845	int rc;
 846
 847	if (!ep)
 848		return -EINVAL;
 849
 850	qp = ep->qp;
 851	reject = !qp;
 852
 853	memset(&init_data, 0, sizeof(init_data));
 854	init_data.cid = reject ? ep->tcp_cid : qp->icid;
 855	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
 856
 857	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
 858		init_data.comp_mode = QED_SPQ_MODE_CB;
 859	else
 860		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
 861
 862	rc = qed_sp_init_request(p_hwfn, &p_ent,
 863				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
 864				 PROTOCOLID_IWARP, &init_data);
 865	if (rc)
 866		return rc;
 867
 868	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
 869	out_pdata_phys = ep->ep_buffer_phys +
 870			 offsetof(struct qed_iwarp_ep_memory, out_pdata);
 871	DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
 872		       out_pdata_phys);
 873	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
 874	    ep->cm_info.private_data_len;
 875	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 876
 877	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
 878	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
 879
 880	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
 881
 882	in_pdata_phys = ep->ep_buffer_phys +
 883			offsetof(struct qed_iwarp_ep_memory, in_pdata);
 884	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
 885	DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
 886		       in_pdata_phys);
 887	p_mpa_ramrod->incoming_ulp_buffer.len =
 888	    cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
 889	async_output_phys = ep->ep_buffer_phys +
 890			    offsetof(struct qed_iwarp_ep_memory, async_output);
 891	DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
 892		       async_output_phys);
 893	p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
 894	p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
 895
 896	if (!reject) {
 897		DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
 898			       qp->shared_queue_phys_addr);
 899		p_mpa_ramrod->stats_counter_id =
 900		    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
 901	} else {
 902		p_mpa_ramrod->common.reject = 1;
 903	}
 904
 905	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
 906	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
 907	p_mpa_ramrod->mode = ep->mpa_rev;
 908	SET_FIELD(p_mpa_ramrod->rtr_pref,
 909		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
 910
 911	ep->state = QED_IWARP_EP_MPA_OFFLOADED;
 912	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 913	if (!reject)
 914		ep->cid = qp->icid;	/* Now they're migrated. */
 915
 916	DP_VERBOSE(p_hwfn,
 917		   QED_MSG_RDMA,
 918		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
 919		   reject ? 0xffff : qp->icid,
 920		   ep->tcp_cid,
 921		   rc,
 922		   ep->cm_info.ird,
 923		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
 924	return rc;
 925}
 926
 927static void
 928qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 929{
 930	ep->state = QED_IWARP_EP_INIT;
 931	if (ep->qp)
 932		ep->qp->ep = NULL;
 933	ep->qp = NULL;
 934	memset(&ep->cm_info, 0, sizeof(ep->cm_info));
 935
 936	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
 937		/* We don't care about the return code, it's ok if tcp_cid
 938		 * remains invalid...in this case we'll defer allocation
 939		 */
 940		qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
 941	}
 942	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 943
 944	list_move_tail(&ep->list_entry,
 945		       &p_hwfn->p_rdma_info->iwarp.ep_free_list);
 
 946
 947	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
 948}
 949
 950static void
 951qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 952{
 953	struct mpa_v2_hdr *mpa_v2_params;
 954	union async_output *async_data;
 955	u16 mpa_ird, mpa_ord;
 956	u8 mpa_data_size = 0;
 957
 958	if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
 959		mpa_v2_params =
 960			(struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
 961		mpa_data_size = sizeof(*mpa_v2_params);
 962		mpa_ird = ntohs(mpa_v2_params->ird);
 963		mpa_ord = ntohs(mpa_v2_params->ord);
 964
 965		ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
 966		ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
 967	}
 968	async_data = &ep->ep_buffer_virt->async_output;
 969
 970	ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
 971	ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
 972				       mpa_data_size;
 973}
 974
 975static void
 976qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 977{
 978	struct qed_iwarp_cm_event_params params;
 979
 980	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
 981		DP_NOTICE(p_hwfn,
 982			  "MPA reply event not expected on passive side!\n");
 983		return;
 984	}
 985
 986	params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
 987
 988	qed_iwarp_parse_private_data(p_hwfn, ep);
 989
 990	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
 991		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
 992		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
 993
 994	params.cm_info = &ep->cm_info;
 995	params.ep_context = ep;
 996	params.status = 0;
 997
 998	ep->mpa_reply_processed = true;
 999
1000	ep->event_cb(ep->cb_context, &params);
1001}
1002
1003#define QED_IWARP_CONNECT_MODE_STRING(ep) \
1004	((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
1005
1006/* Called as a result of the event:
1007 * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
1008 */
1009static void
1010qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
1011		       struct qed_iwarp_ep *ep, u8 fw_return_code)
1012{
1013	struct qed_iwarp_cm_event_params params;
1014
1015	if (ep->connect_mode == TCP_CONNECT_ACTIVE)
1016		params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
1017	else
1018		params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
1019
1020	if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
1021		qed_iwarp_parse_private_data(p_hwfn, ep);
1022
1023	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1024		   "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
1025		   ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
1026
1027	params.cm_info = &ep->cm_info;
1028
1029	params.ep_context = ep;
1030
 
 
1031	switch (fw_return_code) {
1032	case RDMA_RETURN_OK:
1033		ep->qp->max_rd_atomic_req = ep->cm_info.ord;
1034		ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
1035		qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
1036		ep->state = QED_IWARP_EP_ESTABLISHED;
1037		params.status = 0;
1038		break;
1039	case IWARP_CONN_ERROR_MPA_TIMEOUT:
1040		DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
1041			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1042		params.status = -EBUSY;
1043		break;
1044	case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
1045		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
1046			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1047		params.status = -ECONNREFUSED;
1048		break;
1049	case IWARP_CONN_ERROR_MPA_RST:
1050		DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
1051			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
1052			  ep->tcp_cid);
1053		params.status = -ECONNRESET;
1054		break;
1055	case IWARP_CONN_ERROR_MPA_FIN:
1056		DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
1057			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1058		params.status = -ECONNREFUSED;
1059		break;
1060	case IWARP_CONN_ERROR_MPA_INSUF_IRD:
1061		DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
1062			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1063		params.status = -ECONNREFUSED;
1064		break;
1065	case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
1066		DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
1067			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1068		params.status = -ECONNREFUSED;
1069		break;
1070	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
1071		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
1072			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1073		params.status = -ECONNREFUSED;
1074		break;
1075	case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
1076		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
1077			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1078		params.status = -ECONNREFUSED;
1079		break;
1080	case IWARP_CONN_ERROR_MPA_TERMINATE:
1081		DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
1082			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
1083		params.status = -ECONNREFUSED;
1084		break;
1085	default:
1086		params.status = -ECONNRESET;
1087		break;
1088	}
1089
1090	if (fw_return_code != RDMA_RETURN_OK)
1091		/* paired with READ_ONCE in destroy_qp */
1092		smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
1093
1094	ep->event_cb(ep->cb_context, &params);
1095
1096	/* on passive side, if there is no associated QP (REJECT) we need to
1097	 * return the ep to the pool, (in the regular case we add an element
1098	 * in accept instead of this one.
1099	 * In both cases we need to remove it from the ep_list.
1100	 */
1101	if (fw_return_code != RDMA_RETURN_OK) {
1102		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1103		if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
1104		    (!ep->qp)) {	/* Rejected */
1105			qed_iwarp_return_ep(p_hwfn, ep);
1106		} else {
1107			spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1108			list_del(&ep->list_entry);
1109			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1110		}
1111	}
1112}
1113
1114static void
1115qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
1116			     struct qed_iwarp_ep *ep, u8 *mpa_data_size)
1117{
1118	struct mpa_v2_hdr *mpa_v2_params;
1119	u16 mpa_ird, mpa_ord;
1120
1121	*mpa_data_size = 0;
1122	if (MPA_REV2(ep->mpa_rev)) {
1123		mpa_v2_params =
1124		    (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
1125		*mpa_data_size = sizeof(*mpa_v2_params);
1126
1127		mpa_ird = (u16)ep->cm_info.ird;
1128		mpa_ord = (u16)ep->cm_info.ord;
1129
1130		if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
1131			mpa_ird |= MPA_V2_PEER2PEER_MODEL;
1132
1133			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
1134				mpa_ird |= MPA_V2_SEND_RTR;
1135
1136			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
1137				mpa_ord |= MPA_V2_WRITE_RTR;
1138
1139			if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
1140				mpa_ord |= MPA_V2_READ_RTR;
1141		}
1142
1143		mpa_v2_params->ird = htons(mpa_ird);
1144		mpa_v2_params->ord = htons(mpa_ord);
1145
1146		DP_VERBOSE(p_hwfn,
1147			   QED_MSG_RDMA,
1148			   "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
1149			   mpa_v2_params->ird,
1150			   mpa_v2_params->ord,
1151			   *((u32 *)mpa_v2_params),
1152			   mpa_ord & MPA_V2_IRD_ORD_MASK,
1153			   mpa_ird & MPA_V2_IRD_ORD_MASK,
1154			   !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
1155			   !!(mpa_ird & MPA_V2_SEND_RTR),
1156			   !!(mpa_ord & MPA_V2_WRITE_RTR),
1157			   !!(mpa_ord & MPA_V2_READ_RTR));
1158	}
1159}
1160
1161int qed_iwarp_connect(void *rdma_cxt,
1162		      struct qed_iwarp_connect_in *iparams,
1163		      struct qed_iwarp_connect_out *oparams)
1164{
1165	struct qed_hwfn *p_hwfn = rdma_cxt;
1166	struct qed_iwarp_info *iwarp_info;
1167	struct qed_iwarp_ep *ep;
1168	u8 mpa_data_size = 0;
 
1169	u32 cid;
1170	int rc;
1171
1172	if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
1173	    (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
1174		DP_NOTICE(p_hwfn,
1175			  "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1176			  iparams->qp->icid, iparams->cm_info.ord,
1177			  iparams->cm_info.ird);
1178
1179		return -EINVAL;
1180	}
1181
1182	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1183
1184	/* Allocate ep object */
1185	rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1186	if (rc)
1187		return rc;
1188
1189	rc = qed_iwarp_create_ep(p_hwfn, &ep);
1190	if (rc)
1191		goto err;
1192
1193	ep->tcp_cid = cid;
1194
1195	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1196	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
1197	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1198
1199	ep->qp = iparams->qp;
1200	ep->qp->ep = ep;
1201	ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
1202	ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
1203	memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
1204
1205	ep->cm_info.ord = iparams->cm_info.ord;
1206	ep->cm_info.ird = iparams->cm_info.ird;
1207
1208	ep->rtr_type = iwarp_info->rtr_type;
1209	if (!iwarp_info->peer2peer)
1210		ep->rtr_type = MPA_RTR_TYPE_NONE;
1211
1212	if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
1213		ep->cm_info.ord = 1;
1214
1215	ep->mpa_rev = iwarp_info->mpa_rev;
1216
1217	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1218
1219	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1220	ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
1221				       mpa_data_size;
1222
1223	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1224	       iparams->cm_info.private_data,
1225	       iparams->cm_info.private_data_len);
1226
1227	ep->mss = iparams->mss;
 
 
 
1228	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
1229
1230	ep->event_cb = iparams->event_cb;
1231	ep->cb_context = iparams->cb_context;
1232	ep->connect_mode = TCP_CONNECT_ACTIVE;
1233
1234	oparams->ep_context = ep;
1235
1236	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
1237
1238	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
1239		   iparams->qp->icid, ep->tcp_cid, rc);
1240
1241	if (rc) {
1242		qed_iwarp_destroy_ep(p_hwfn, ep, true);
1243		goto err;
1244	}
1245
1246	return rc;
1247err:
1248	qed_iwarp_cid_cleaned(p_hwfn, cid);
1249
1250	return rc;
1251}
1252
1253static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
1254{
1255	struct qed_iwarp_ep *ep = NULL;
1256	int rc;
1257
1258	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1259
1260	if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1261		DP_ERR(p_hwfn, "Ep list is empty\n");
1262		goto out;
1263	}
1264
1265	ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1266			      struct qed_iwarp_ep, list_entry);
1267
1268	/* in some cases we could have failed allocating a tcp cid when added
1269	 * from accept / failure... retry now..this is not the common case.
1270	 */
1271	if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
1272		rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
1273
1274		/* if we fail we could look for another entry with a valid
1275		 * tcp_cid, but since we don't expect to reach this anyway
1276		 * it's not worth the handling
1277		 */
1278		if (rc) {
1279			ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
1280			ep = NULL;
1281			goto out;
1282		}
1283	}
1284
1285	list_del(&ep->list_entry);
1286
1287out:
1288	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1289	return ep;
1290}
1291
1292#define QED_IWARP_MAX_CID_CLEAN_TIME  100
1293#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
1294
1295/* This function waits for all the bits of a bmap to be cleared, as long as
1296 * there is progress ( i.e. the number of bits left to be cleared decreases )
1297 * the function continues.
1298 */
1299static int
1300qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
1301{
1302	int prev_weight = 0;
1303	int wait_count = 0;
1304	int weight = 0;
1305
1306	weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1307	prev_weight = weight;
1308
1309	while (weight) {
1310		msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
1311
1312		weight = bitmap_weight(bmap->bitmap, bmap->max_count);
1313
1314		if (prev_weight == weight) {
1315			wait_count++;
1316		} else {
1317			prev_weight = weight;
1318			wait_count = 0;
1319		}
1320
1321		if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
1322			DP_NOTICE(p_hwfn,
1323				  "%s bitmap wait timed out (%d cids pending)\n",
1324				  bmap->name, weight);
1325			return -EBUSY;
1326		}
1327	}
1328	return 0;
1329}
1330
1331static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
1332{
1333	int rc;
1334	int i;
1335
1336	rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
1337					    &p_hwfn->p_rdma_info->tcp_cid_map);
1338	if (rc)
1339		return rc;
1340
1341	/* Now free the tcp cids from the main cid map */
1342	for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
1343		qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
1344
1345	/* Now wait for all cids to be completed */
1346	return qed_iwarp_wait_cid_map_cleared(p_hwfn,
1347					      &p_hwfn->p_rdma_info->cid_map);
1348}
1349
1350static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
1351{
1352	struct qed_iwarp_ep *ep;
1353
1354	while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
1355		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1356
1357		ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
1358				      struct qed_iwarp_ep, list_entry);
1359
1360		if (!ep) {
1361			spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1362			break;
1363		}
1364		list_del(&ep->list_entry);
1365
1366		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1367
1368		if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
1369			qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
1370
1371		qed_iwarp_destroy_ep(p_hwfn, ep, false);
1372	}
1373}
1374
1375static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
1376{
1377	struct qed_iwarp_ep *ep;
1378	int rc = 0;
1379	int count;
1380	u32 cid;
1381	int i;
1382
1383	count = init ? QED_IWARP_PREALLOC_CNT : 1;
1384	for (i = 0; i < count; i++) {
1385		rc = qed_iwarp_create_ep(p_hwfn, &ep);
1386		if (rc)
1387			return rc;
1388
1389		/* During initialization we allocate from the main pool,
1390		 * afterwards we allocate only from the tcp_cid.
1391		 */
1392		if (init) {
1393			rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
1394			if (rc)
1395				goto err;
1396			qed_iwarp_set_tcp_cid(p_hwfn, cid);
1397		} else {
1398			/* We don't care about the return code, it's ok if
1399			 * tcp_cid remains invalid...in this case we'll
1400			 * defer allocation
1401			 */
1402			qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
1403		}
1404
1405		ep->tcp_cid = cid;
1406
1407		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1408		list_add_tail(&ep->list_entry,
1409			      &p_hwfn->p_rdma_info->iwarp.ep_free_list);
1410		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1411	}
1412
1413	return rc;
1414
1415err:
1416	qed_iwarp_destroy_ep(p_hwfn, ep, false);
1417
1418	return rc;
1419}
1420
1421int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
1422{
1423	int rc;
1424
1425	/* Allocate bitmap for tcp cid. These are used by passive side
1426	 * to ensure it can allocate a tcp cid during dpc that was
1427	 * pre-acquired and doesn't require dynamic allocation of ilt
1428	 */
1429	rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
1430				 QED_IWARP_PREALLOC_CNT, "TCP_CID");
1431	if (rc) {
1432		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1433			   "Failed to allocate tcp cid, rc = %d\n", rc);
1434		return rc;
1435	}
1436
1437	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
1438	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
1439
1440	rc = qed_iwarp_prealloc_ep(p_hwfn, true);
1441	if (rc)
1442		return rc;
1443
1444	return qed_ooo_alloc(p_hwfn);
1445}
1446
1447void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
1448{
1449	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1450
1451	qed_ooo_free(p_hwfn);
1452	qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
1453	kfree(iwarp_info->mpa_bufs);
1454	kfree(iwarp_info->partial_fpdus);
1455	kfree(iwarp_info->mpa_intermediate_buf);
1456}
1457
1458int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
1459{
1460	struct qed_hwfn *p_hwfn = rdma_cxt;
1461	struct qed_iwarp_ep *ep;
1462	u8 mpa_data_size = 0;
1463	int rc;
1464
1465	ep = iparams->ep_context;
1466	if (!ep) {
1467		DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
1468		return -EINVAL;
1469	}
1470
1471	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
1472		   iparams->qp->icid, ep->tcp_cid);
1473
1474	if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
1475	    (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
1476		DP_VERBOSE(p_hwfn,
1477			   QED_MSG_RDMA,
1478			   "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
1479			   iparams->qp->icid,
1480			   ep->tcp_cid, iparams->ord, iparams->ord);
1481		return -EINVAL;
1482	}
1483
1484	qed_iwarp_prealloc_ep(p_hwfn, false);
1485
1486	ep->cb_context = iparams->cb_context;
1487	ep->qp = iparams->qp;
1488	ep->qp->ep = ep;
1489
1490	if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
1491		/* Negotiate ord/ird: if upperlayer requested ord larger than
1492		 * ird advertised by remote, we need to decrease our ord
1493		 */
1494		if (iparams->ord > ep->cm_info.ird)
1495			iparams->ord = ep->cm_info.ird;
1496
1497		if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
1498		    (iparams->ird == 0))
1499			iparams->ird = 1;
1500	}
1501
1502	/* Update cm_info ord/ird to be negotiated values */
1503	ep->cm_info.ord = iparams->ord;
1504	ep->cm_info.ird = iparams->ird;
1505
1506	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1507
1508	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1509	ep->cm_info.private_data_len = iparams->private_data_len +
1510				       mpa_data_size;
1511
1512	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1513	       iparams->private_data, iparams->private_data_len);
1514
1515	rc = qed_iwarp_mpa_offload(p_hwfn, ep);
1516	if (rc)
1517		qed_iwarp_modify_qp(p_hwfn,
1518				    iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
1519
1520	return rc;
1521}
1522
1523int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
1524{
1525	struct qed_hwfn *p_hwfn = rdma_cxt;
1526	struct qed_iwarp_ep *ep;
1527	u8 mpa_data_size = 0;
1528
1529	ep = iparams->ep_context;
1530	if (!ep) {
1531		DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
1532		return -EINVAL;
1533	}
1534
1535	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
1536
1537	ep->cb_context = iparams->cb_context;
1538	ep->qp = NULL;
1539
1540	qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
1541
1542	ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
1543	ep->cm_info.private_data_len = iparams->private_data_len +
1544				       mpa_data_size;
1545
1546	memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
1547	       iparams->private_data, iparams->private_data_len);
1548
1549	return qed_iwarp_mpa_offload(p_hwfn, ep);
1550}
1551
1552static void
1553qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
1554			struct qed_iwarp_cm_info *cm_info)
1555{
1556	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
1557		   cm_info->ip_version);
1558
1559	if (cm_info->ip_version == QED_TCP_IPV4)
1560		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1561			   "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
1562			   cm_info->remote_ip, cm_info->remote_port,
1563			   cm_info->local_ip, cm_info->local_port,
1564			   cm_info->vlan);
1565	else
1566		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1567			   "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
1568			   cm_info->remote_ip, cm_info->remote_port,
1569			   cm_info->local_ip, cm_info->local_port,
1570			   cm_info->vlan);
1571
1572	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1573		   "private_data_len = %x ord = %d, ird = %d\n",
1574		   cm_info->private_data_len, cm_info->ord, cm_info->ird);
1575}
1576
1577static int
1578qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
1579		      struct qed_iwarp_ll2_buff *buf, u8 handle)
1580{
1581	int rc;
1582
1583	rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
1584				    (u16)buf->buff_size, buf, 1);
1585	if (rc) {
1586		DP_NOTICE(p_hwfn,
1587			  "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
1588			  rc, handle);
1589		dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
1590				  buf->data, buf->data_phys_addr);
1591		kfree(buf);
1592	}
1593
1594	return rc;
1595}
1596
1597static bool
1598qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
1599{
1600	struct qed_iwarp_ep *ep = NULL;
1601	bool found = false;
1602
1603	list_for_each_entry(ep,
1604			    &p_hwfn->p_rdma_info->iwarp.ep_list,
1605			    list_entry) {
1606		if ((ep->cm_info.local_port == cm_info->local_port) &&
1607		    (ep->cm_info.remote_port == cm_info->remote_port) &&
1608		    (ep->cm_info.vlan == cm_info->vlan) &&
1609		    !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
1610			    sizeof(cm_info->local_ip)) &&
1611		    !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
1612			    sizeof(cm_info->remote_ip))) {
1613			found = true;
1614			break;
1615		}
1616	}
1617
1618	if (found) {
1619		DP_NOTICE(p_hwfn,
1620			  "SYN received on active connection - dropping\n");
1621		qed_iwarp_print_cm_info(p_hwfn, cm_info);
1622
1623		return true;
1624	}
1625
1626	return false;
1627}
1628
1629static struct qed_iwarp_listener *
1630qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
1631		       struct qed_iwarp_cm_info *cm_info)
1632{
1633	struct qed_iwarp_listener *listener = NULL;
1634	static const u32 ip_zero[4] = { 0, 0, 0, 0 };
1635	bool found = false;
1636
1637	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1638
1639	list_for_each_entry(listener,
1640			    &p_hwfn->p_rdma_info->iwarp.listen_list,
1641			    list_entry) {
1642		if (listener->port == cm_info->local_port) {
1643			if (!memcmp(listener->ip_addr,
1644				    ip_zero, sizeof(ip_zero))) {
1645				found = true;
1646				break;
1647			}
1648
1649			if (!memcmp(listener->ip_addr,
1650				    cm_info->local_ip,
1651				    sizeof(cm_info->local_ip)) &&
1652			    (listener->vlan == cm_info->vlan)) {
1653				found = true;
1654				break;
1655			}
1656		}
1657	}
1658
1659	if (found) {
1660		DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
1661			   listener);
1662		return listener;
1663	}
1664
1665	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
1666	return NULL;
1667}
1668
1669static int
1670qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1671		       struct qed_iwarp_cm_info *cm_info,
1672		       void *buf,
1673		       u8 *remote_mac_addr,
1674		       u8 *local_mac_addr,
1675		       int *payload_len, int *tcp_start_offset)
1676{
1677	struct vlan_ethhdr *vethh;
1678	bool vlan_valid = false;
1679	struct ipv6hdr *ip6h;
1680	struct ethhdr *ethh;
1681	struct tcphdr *tcph;
1682	struct iphdr *iph;
1683	int eth_hlen;
1684	int ip_hlen;
1685	int eth_type;
1686	int i;
1687
1688	ethh = buf;
1689	eth_type = ntohs(ethh->h_proto);
1690	if (eth_type == ETH_P_8021Q) {
1691		vlan_valid = true;
1692		vethh = (struct vlan_ethhdr *)ethh;
1693		cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
1694		eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
1695	}
1696
1697	eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
1698
1699	if (!ether_addr_equal(ethh->h_dest,
1700			      p_hwfn->p_rdma_info->iwarp.mac_addr)) {
1701		DP_VERBOSE(p_hwfn,
1702			   QED_MSG_RDMA,
1703			   "Got unexpected mac %pM instead of %pM\n",
1704			   ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
1705		return -EINVAL;
1706	}
1707
1708	ether_addr_copy(remote_mac_addr, ethh->h_source);
1709	ether_addr_copy(local_mac_addr, ethh->h_dest);
1710
1711	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
1712		   eth_type, ethh->h_source);
1713
1714	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
1715		   eth_hlen, ethh->h_dest);
1716
1717	iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1718
1719	if (eth_type == ETH_P_IP) {
1720		if (iph->protocol != IPPROTO_TCP) {
1721			DP_NOTICE(p_hwfn,
1722				  "Unexpected ip protocol on ll2 %x\n",
1723				  iph->protocol);
1724			return -EINVAL;
1725		}
1726
1727		cm_info->local_ip[0] = ntohl(iph->daddr);
1728		cm_info->remote_ip[0] = ntohl(iph->saddr);
1729		cm_info->ip_version = QED_TCP_IPV4;
1730
1731		ip_hlen = (iph->ihl) * sizeof(u32);
1732		*payload_len = ntohs(iph->tot_len) - ip_hlen;
1733	} else if (eth_type == ETH_P_IPV6) {
1734		ip6h = (struct ipv6hdr *)iph;
1735
1736		if (ip6h->nexthdr != IPPROTO_TCP) {
1737			DP_NOTICE(p_hwfn,
1738				  "Unexpected ip protocol on ll2 %x\n",
1739				  iph->protocol);
1740			return -EINVAL;
1741		}
1742
1743		for (i = 0; i < 4; i++) {
1744			cm_info->local_ip[i] =
1745			    ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
1746			cm_info->remote_ip[i] =
1747			    ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
1748		}
1749		cm_info->ip_version = QED_TCP_IPV6;
1750
1751		ip_hlen = sizeof(*ip6h);
1752		*payload_len = ntohs(ip6h->payload_len);
1753	} else {
1754		DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
1755		return -EINVAL;
1756	}
1757
1758	tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
1759
1760	if (!tcph->syn) {
1761		DP_NOTICE(p_hwfn,
1762			  "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
1763			  iph->ihl, tcph->source, tcph->dest);
1764		return -EINVAL;
1765	}
1766
1767	cm_info->local_port = ntohs(tcph->dest);
1768	cm_info->remote_port = ntohs(tcph->source);
1769
1770	qed_iwarp_print_cm_info(p_hwfn, cm_info);
1771
1772	*tcp_start_offset = eth_hlen + ip_hlen;
1773
1774	return 0;
1775}
1776
1777static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
1778						      u16 cid)
1779{
1780	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
1781	struct qed_iwarp_fpdu *partial_fpdu;
1782	u32 idx;
1783
1784	idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
1785	if (idx >= iwarp_info->max_num_partial_fpdus) {
1786		DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
1787		       iwarp_info->max_num_partial_fpdus);
1788		return NULL;
1789	}
1790
1791	partial_fpdu = &iwarp_info->partial_fpdus[idx];
1792
1793	return partial_fpdu;
1794}
1795
1796enum qed_iwarp_mpa_pkt_type {
1797	QED_IWARP_MPA_PKT_PACKED,
1798	QED_IWARP_MPA_PKT_PARTIAL,
1799	QED_IWARP_MPA_PKT_UNALIGNED
1800};
1801
1802#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
1803#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
1804#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
1805
1806/* Pad to multiple of 4 */
1807#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
1808#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len)				   \
1809	(QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) +			   \
1810					 QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
1811					 QED_IWARP_MPA_CRC32_DIGEST_SIZE)
1812
1813/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
1814#define QED_IWARP_MAX_BDS_PER_FPDU 3
1815
1816static const char * const pkt_type_str[] = {
1817	"QED_IWARP_MPA_PKT_PACKED",
1818	"QED_IWARP_MPA_PKT_PARTIAL",
1819	"QED_IWARP_MPA_PKT_UNALIGNED"
1820};
1821
1822static int
1823qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1824		      struct qed_iwarp_fpdu *fpdu,
1825		      struct qed_iwarp_ll2_buff *buf);
1826
1827static enum qed_iwarp_mpa_pkt_type
1828qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
1829		       struct qed_iwarp_fpdu *fpdu,
1830		       u16 tcp_payload_len, u8 *mpa_data)
1831{
1832	enum qed_iwarp_mpa_pkt_type pkt_type;
1833	u16 mpa_len;
1834
1835	if (fpdu->incomplete_bytes) {
1836		pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
1837		goto out;
1838	}
1839
1840	/* special case of one byte remaining...
1841	 * lower byte will be read next packet
1842	 */
1843	if (tcp_payload_len == 1) {
1844		fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
1845		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1846		goto out;
1847	}
1848
1849	mpa_len = ntohs(*((u16 *)(mpa_data)));
1850	fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1851
1852	if (fpdu->fpdu_length <= tcp_payload_len)
1853		pkt_type = QED_IWARP_MPA_PKT_PACKED;
1854	else
1855		pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
1856
1857out:
1858	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1859		   "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
1860		   pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
1861
1862	return pkt_type;
1863}
1864
1865static void
1866qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
1867		    struct qed_iwarp_fpdu *fpdu,
1868		    struct unaligned_opaque_data *pkt_data,
1869		    u16 tcp_payload_size, u8 placement_offset)
1870{
1871	fpdu->mpa_buf = buf;
1872	fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
1873	fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
1874	fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
1875	fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
1876
1877	if (tcp_payload_size == 1)
1878		fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
1879	else if (tcp_payload_size < fpdu->fpdu_length)
1880		fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
1881	else
1882		fpdu->incomplete_bytes = 0;	/* complete fpdu */
1883
1884	fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
1885}
1886
1887static int
1888qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
1889		 struct qed_iwarp_fpdu *fpdu,
1890		 struct unaligned_opaque_data *pkt_data,
1891		 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
1892{
1893	u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
1894	int rc;
1895
1896	/* need to copy the data from the partial packet stored in fpdu
1897	 * to the new buf, for this we also need to move the data currently
1898	 * placed on the buf. The assumption is that the buffer is big enough
1899	 * since fpdu_length <= mss, we use an intermediate buffer since
1900	 * we may need to copy the new data to an overlapping location
1901	 */
1902	if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
1903		DP_ERR(p_hwfn,
1904		       "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1905		       buf->buff_size, fpdu->mpa_frag_len,
1906		       tcp_payload_size, fpdu->incomplete_bytes);
1907		return -EINVAL;
1908	}
1909
1910	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1911		   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
1912		   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
1913		   (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1914		   tcp_payload_size);
1915
1916	memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
1917	memcpy(tmp_buf + fpdu->mpa_frag_len,
1918	       (u8 *)(buf->data) + pkt_data->first_mpa_offset,
1919	       tcp_payload_size);
1920
1921	rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
1922	if (rc)
1923		return rc;
1924
1925	/* If we managed to post the buffer copy the data to the new buffer
1926	 * o/w this will occur in the next round...
1927	 */
1928	memcpy((u8 *)(buf->data), tmp_buf,
1929	       fpdu->mpa_frag_len + tcp_payload_size);
1930
1931	fpdu->mpa_buf = buf;
1932	/* fpdu->pkt_hdr remains as is */
1933	/* fpdu->mpa_frag is overridden with new buf */
1934	fpdu->mpa_frag = buf->data_phys_addr;
1935	fpdu->mpa_frag_virt = buf->data;
1936	fpdu->mpa_frag_len += tcp_payload_size;
1937
1938	fpdu->incomplete_bytes -= tcp_payload_size;
1939
1940	DP_VERBOSE(p_hwfn,
1941		   QED_MSG_RDMA,
1942		   "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
1943		   buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
1944		   fpdu->incomplete_bytes);
1945
1946	return 0;
1947}
1948
1949static void
1950qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1951			     struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
1952{
1953	u16 mpa_len;
1954
1955	/* Update incomplete packets if needed */
1956	if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
1957		/* Missing lower byte is now available */
1958		mpa_len = fpdu->fpdu_length | *mpa_data;
1959		fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1960		/* one byte of hdr */
1961		fpdu->mpa_frag_len = 1;
1962		fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1963		DP_VERBOSE(p_hwfn,
1964			   QED_MSG_RDMA,
1965			   "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
1966			   mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
1967	}
1968}
1969
1970#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
1971	(GET_FIELD((_curr_pkt)->flags,	   \
1972		   UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
1973
1974/* This function is used to recycle a buffer using the ll2 drop option. It
1975 * uses the mechanism to ensure that all buffers posted to tx before this one
1976 * were completed. The buffer sent here will be sent as a cookie in the tx
1977 * completion function and can then be reposted to rx chain when done. The flow
1978 * that requires this is the flow where a FPDU splits over more than 3 tcp
1979 * segments. In this case the driver needs to re-post a rx buffer instead of
1980 * the one received, but driver can't simply repost a buffer it copied from
1981 * as there is a case where the buffer was originally a packed FPDU, and is
1982 * partially posted to FW. Driver needs to ensure FW is done with it.
1983 */
1984static int
1985qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
1986		      struct qed_iwarp_fpdu *fpdu,
1987		      struct qed_iwarp_ll2_buff *buf)
1988{
1989	struct qed_ll2_tx_pkt_info tx_pkt;
1990	u8 ll2_handle;
1991	int rc;
1992
1993	memset(&tx_pkt, 0, sizeof(tx_pkt));
1994	tx_pkt.num_of_bds = 1;
1995	tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
1996	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
1997	tx_pkt.first_frag = fpdu->pkt_hdr;
1998	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
1999	buf->piggy_buf = NULL;
2000	tx_pkt.cookie = buf;
2001
2002	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2003
2004	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2005	if (rc)
2006		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2007			   "Can't drop packet rc=%d\n", rc);
2008
2009	DP_VERBOSE(p_hwfn,
2010		   QED_MSG_RDMA,
2011		   "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
2012		   (unsigned long int)tx_pkt.first_frag,
2013		   tx_pkt.first_frag_len, buf, rc);
2014
2015	return rc;
2016}
2017
2018static int
2019qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
2020{
2021	struct qed_ll2_tx_pkt_info tx_pkt;
2022	u8 ll2_handle;
2023	int rc;
2024
2025	memset(&tx_pkt, 0, sizeof(tx_pkt));
2026	tx_pkt.num_of_bds = 1;
2027	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2028	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
2029
2030	tx_pkt.first_frag = fpdu->pkt_hdr;
2031	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2032	tx_pkt.enable_ip_cksum = true;
2033	tx_pkt.enable_l4_cksum = true;
2034	tx_pkt.calc_ip_len = true;
2035	/* vlan overload with enum iwarp_ll2_tx_queues */
2036	tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
2037
2038	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2039
2040	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2041	if (rc)
2042		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2043			   "Can't send right edge rc=%d\n", rc);
2044	DP_VERBOSE(p_hwfn,
2045		   QED_MSG_RDMA,
2046		   "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
2047		   tx_pkt.num_of_bds,
2048		   (unsigned long int)tx_pkt.first_frag,
2049		   tx_pkt.first_frag_len, rc);
2050
2051	return rc;
2052}
2053
2054static int
2055qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
2056		    struct qed_iwarp_fpdu *fpdu,
2057		    struct unaligned_opaque_data *curr_pkt,
2058		    struct qed_iwarp_ll2_buff *buf,
2059		    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
2060{
2061	struct qed_ll2_tx_pkt_info tx_pkt;
2062	u8 ll2_handle;
2063	int rc;
2064
2065	memset(&tx_pkt, 0, sizeof(tx_pkt));
2066
2067	/* An unaligned packet means it's split over two tcp segments. So the
2068	 * complete packet requires 3 bds, one for the header, one for the
2069	 * part of the fpdu of the first tcp segment, and the last fragment
2070	 * will point to the remainder of the fpdu. A packed pdu, requires only
2071	 * two bds, one for the header and one for the data.
2072	 */
2073	tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
2074	tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2075	tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
2076
2077	/* Send the mpa_buf only with the last fpdu (in case of packed) */
2078	if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
2079	    tcp_payload_size <= fpdu->fpdu_length)
2080		tx_pkt.cookie = fpdu->mpa_buf;
2081
2082	tx_pkt.first_frag = fpdu->pkt_hdr;
2083	tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
2084	tx_pkt.enable_ip_cksum = true;
2085	tx_pkt.enable_l4_cksum = true;
2086	tx_pkt.calc_ip_len = true;
2087	/* vlan overload with enum iwarp_ll2_tx_queues */
2088	tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
2089
2090	/* special case of unaligned packet and not packed, need to send
2091	 * both buffers as cookie to release.
2092	 */
2093	if (tcp_payload_size == fpdu->incomplete_bytes)
2094		fpdu->mpa_buf->piggy_buf = buf;
2095
2096	ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
2097
2098	/* Set first fragment to header */
2099	rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
2100	if (rc)
2101		goto out;
2102
2103	/* Set second fragment to first part of packet */
2104	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
2105					       fpdu->mpa_frag,
2106					       fpdu->mpa_frag_len);
2107	if (rc)
2108		goto out;
2109
2110	if (!fpdu->incomplete_bytes)
2111		goto out;
2112
2113	/* Set third fragment to second part of the packet */
2114	rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2115					       ll2_handle,
2116					       buf->data_phys_addr +
2117					       curr_pkt->first_mpa_offset,
2118					       fpdu->incomplete_bytes);
2119out:
2120	DP_VERBOSE(p_hwfn,
2121		   QED_MSG_RDMA,
2122		   "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
2123		   tx_pkt.num_of_bds,
2124		   tx_pkt.first_frag_len,
2125		   fpdu->mpa_frag_len,
2126		   fpdu->incomplete_bytes, rc);
2127
2128	return rc;
2129}
2130
2131static void
2132qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
2133		       struct unaligned_opaque_data *curr_pkt,
2134		       u32 opaque_data0, u32 opaque_data1)
2135{
2136	u64 opaque_data;
2137
2138	opaque_data = HILO_64(opaque_data1, opaque_data0);
2139	*curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
2140
2141	curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
2142				     le16_to_cpu(curr_pkt->first_mpa_offset);
2143	curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
2144}
2145
2146/* This function is called when an unaligned or incomplete MPA packet arrives
2147 * driver needs to align the packet, perhaps using previous data and send
2148 * it down to FW once it is aligned.
2149 */
2150static int
2151qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
2152			  struct qed_iwarp_ll2_mpa_buf *mpa_buf)
2153{
2154	struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
2155	struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
2156	enum qed_iwarp_mpa_pkt_type pkt_type;
2157	struct qed_iwarp_fpdu *fpdu;
2158	int rc = -EINVAL;
2159	u8 *mpa_data;
2160
2161	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
2162	if (!fpdu) { /* something corrupt with cid, post rx back */
2163		DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
2164		       curr_pkt->cid);
2165		goto err;
2166	}
2167
2168	do {
2169		mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
2170
2171		pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
2172						  mpa_buf->tcp_payload_len,
2173						  mpa_data);
2174
2175		switch (pkt_type) {
2176		case QED_IWARP_MPA_PKT_PARTIAL:
2177			qed_iwarp_init_fpdu(buf, fpdu,
2178					    curr_pkt,
2179					    mpa_buf->tcp_payload_len,
2180					    mpa_buf->placement_offset);
2181
2182			if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2183				mpa_buf->tcp_payload_len = 0;
2184				break;
2185			}
2186
2187			rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
2188
2189			if (rc) {
2190				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2191					   "Can't send FPDU:reset rc=%d\n", rc);
2192				memset(fpdu, 0, sizeof(*fpdu));
2193				break;
2194			}
2195
2196			mpa_buf->tcp_payload_len = 0;
2197			break;
2198		case QED_IWARP_MPA_PKT_PACKED:
2199			qed_iwarp_init_fpdu(buf, fpdu,
2200					    curr_pkt,
2201					    mpa_buf->tcp_payload_len,
2202					    mpa_buf->placement_offset);
2203
2204			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2205						 mpa_buf->tcp_payload_len,
2206						 pkt_type);
2207			if (rc) {
2208				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2209					   "Can't send FPDU:reset rc=%d\n", rc);
2210				memset(fpdu, 0, sizeof(*fpdu));
2211				break;
2212			}
2213
2214			mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
2215			curr_pkt->first_mpa_offset += fpdu->fpdu_length;
2216			break;
2217		case QED_IWARP_MPA_PKT_UNALIGNED:
2218			qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
2219			if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
2220				/* special handling of fpdu split over more
2221				 * than 2 segments
2222				 */
2223				if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
2224					rc = qed_iwarp_win_right_edge(p_hwfn,
2225								      fpdu);
2226					/* packet will be re-processed later */
2227					if (rc)
2228						return rc;
2229				}
2230
2231				rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
2232						      buf,
2233						      mpa_buf->tcp_payload_len);
2234				if (rc) /* packet will be re-processed later */
2235					return rc;
2236
2237				mpa_buf->tcp_payload_len = 0;
2238				break;
2239			}
2240
2241			rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
2242						 mpa_buf->tcp_payload_len,
2243						 pkt_type);
2244			if (rc) {
2245				DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2246					   "Can't send FPDU:delay rc=%d\n", rc);
2247				/* don't reset fpdu -> we need it for next
2248				 * classify
2249				 */
2250				break;
2251			}
2252
2253			mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
2254			curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
2255			/* The framed PDU was sent - no more incomplete bytes */
2256			fpdu->incomplete_bytes = 0;
2257			break;
2258		}
2259	} while (mpa_buf->tcp_payload_len && !rc);
2260
2261	return rc;
2262
2263err:
2264	qed_iwarp_ll2_post_rx(p_hwfn,
2265			      buf,
2266			      p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
2267	return rc;
2268}
2269
2270static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
2271{
2272	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2273	struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
2274	int rc;
2275
2276	while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
2277		mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
2278					   struct qed_iwarp_ll2_mpa_buf,
2279					   list_entry);
2280
2281		rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
2282
2283		/* busy means break and continue processing later, don't
2284		 * remove the buf from the pending list.
2285		 */
2286		if (rc == -EBUSY)
2287			break;
2288
2289		list_move_tail(&mpa_buf->list_entry,
2290			       &iwarp_info->mpa_buf_list);
2291
2292		if (rc) {	/* different error, don't continue */
2293			DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
2294			break;
2295		}
2296	}
2297}
2298
2299static void
2300qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2301{
2302	struct qed_iwarp_ll2_mpa_buf *mpa_buf;
2303	struct qed_iwarp_info *iwarp_info;
2304	struct qed_hwfn *p_hwfn = cxt;
2305
2306	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2307	mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
2308				   struct qed_iwarp_ll2_mpa_buf, list_entry);
2309	if (!mpa_buf) {
2310		DP_ERR(p_hwfn, "No free mpa buf\n");
2311		goto err;
2312	}
2313
2314	list_del(&mpa_buf->list_entry);
2315	qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
2316			       data->opaque_data_0, data->opaque_data_1);
2317
2318	DP_VERBOSE(p_hwfn,
2319		   QED_MSG_RDMA,
2320		   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
2321		   data->length.packet_length, mpa_buf->data.first_mpa_offset,
2322		   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
2323		   mpa_buf->data.cid);
2324
2325	mpa_buf->ll2_buf = data->cookie;
2326	mpa_buf->tcp_payload_len = data->length.packet_length -
2327				   mpa_buf->data.first_mpa_offset;
2328	mpa_buf->data.first_mpa_offset += data->u.placement_offset;
2329	mpa_buf->placement_offset = data->u.placement_offset;
2330
2331	list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
2332
2333	qed_iwarp_process_pending_pkts(p_hwfn);
2334	return;
2335err:
2336	qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
2337			      iwarp_info->ll2_mpa_handle);
2338}
2339
2340static void
2341qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
2342{
2343	struct qed_iwarp_ll2_buff *buf = data->cookie;
2344	struct qed_iwarp_listener *listener;
2345	struct qed_ll2_tx_pkt_info tx_pkt;
2346	struct qed_iwarp_cm_info cm_info;
2347	struct qed_hwfn *p_hwfn = cxt;
2348	u8 remote_mac_addr[ETH_ALEN];
2349	u8 local_mac_addr[ETH_ALEN];
2350	struct qed_iwarp_ep *ep;
2351	int tcp_start_offset;
 
2352	u8 ll2_syn_handle;
2353	int payload_len;
2354	u32 hdr_size;
2355	int rc;
2356
2357	memset(&cm_info, 0, sizeof(cm_info));
2358	ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
2359
2360	/* Check if packet was received with errors... */
2361	if (data->err_flags) {
2362		DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
2363			  data->err_flags);
2364		goto err;
2365	}
2366
2367	if (GET_FIELD(data->parse_flags,
2368		      PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
2369	    GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
2370		DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
2371		goto err;
2372	}
2373
2374	rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
2375				    data->u.placement_offset, remote_mac_addr,
2376				    local_mac_addr, &payload_len,
2377				    &tcp_start_offset);
2378	if (rc)
2379		goto err;
2380
2381	/* Check if there is a listener for this 4-tuple+vlan */
2382	listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
2383	if (!listener) {
2384		DP_VERBOSE(p_hwfn,
2385			   QED_MSG_RDMA,
2386			   "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
2387			   data->parse_flags, data->length.packet_length);
2388
2389		memset(&tx_pkt, 0, sizeof(tx_pkt));
2390		tx_pkt.num_of_bds = 1;
2391		tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
2392		tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
2393		tx_pkt.first_frag = buf->data_phys_addr +
2394				    data->u.placement_offset;
2395		tx_pkt.first_frag_len = data->length.packet_length;
2396		tx_pkt.cookie = buf;
2397
2398		rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
2399					       &tx_pkt, true);
2400
2401		if (rc) {
2402			DP_NOTICE(p_hwfn,
2403				  "Can't post SYN back to chip rc=%d\n", rc);
2404			goto err;
2405		}
2406		return;
2407	}
2408
2409	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
2410	/* There may be an open ep on this connection if this is a syn
2411	 * retrasnmit... need to make sure there isn't...
2412	 */
2413	if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
2414		goto err;
2415
2416	ep = qed_iwarp_get_free_ep(p_hwfn);
2417	if (!ep)
2418		goto err;
2419
2420	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2421	list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
2422	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2423
2424	ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
2425	ether_addr_copy(ep->local_mac_addr, local_mac_addr);
2426
2427	memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
2428
2429	hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
 
 
 
 
2430	ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
2431	ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
2432
2433	ep->event_cb = listener->event_cb;
2434	ep->cb_context = listener->cb_context;
2435	ep->connect_mode = TCP_CONNECT_PASSIVE;
2436
2437	ep->syn = buf;
2438	ep->syn_ip_payload_length = (u16)payload_len;
2439	ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
2440			   tcp_start_offset;
2441
2442	rc = qed_iwarp_tcp_offload(p_hwfn, ep);
2443	if (rc) {
2444		qed_iwarp_return_ep(p_hwfn, ep);
2445		goto err;
2446	}
2447
2448	return;
2449err:
2450	qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
2451}
2452
2453static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
2454				     void *cookie, dma_addr_t rx_buf_addr,
2455				     bool b_last_packet)
2456{
2457	struct qed_iwarp_ll2_buff *buffer = cookie;
2458	struct qed_hwfn *p_hwfn = cxt;
2459
2460	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2461			  buffer->data, buffer->data_phys_addr);
2462	kfree(buffer);
2463}
2464
2465static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
2466				      void *cookie, dma_addr_t first_frag_addr,
2467				      bool b_last_fragment, bool b_last_packet)
2468{
2469	struct qed_iwarp_ll2_buff *buffer = cookie;
2470	struct qed_iwarp_ll2_buff *piggy;
2471	struct qed_hwfn *p_hwfn = cxt;
2472
2473	if (!buffer)		/* can happen in packed mpa unaligned... */
2474		return;
2475
2476	/* this was originally an rx packet, post it back */
2477	piggy = buffer->piggy_buf;
2478	if (piggy) {
2479		buffer->piggy_buf = NULL;
2480		qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
2481	}
2482
2483	qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
2484
2485	if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
2486		qed_iwarp_process_pending_pkts(p_hwfn);
2487
2488	return;
2489}
2490
2491static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
2492				     void *cookie, dma_addr_t first_frag_addr,
2493				     bool b_last_fragment, bool b_last_packet)
2494{
2495	struct qed_iwarp_ll2_buff *buffer = cookie;
2496	struct qed_hwfn *p_hwfn = cxt;
2497
2498	if (!buffer)
2499		return;
2500
2501	if (buffer->piggy_buf) {
2502		dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2503				  buffer->piggy_buf->buff_size,
2504				  buffer->piggy_buf->data,
2505				  buffer->piggy_buf->data_phys_addr);
2506
2507		kfree(buffer->piggy_buf);
2508	}
2509
2510	dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
2511			  buffer->data, buffer->data_phys_addr);
2512
2513	kfree(buffer);
2514}
2515
2516/* The only slowpath for iwarp ll2 is unalign flush. When this completion
2517 * is received, need to reset the FPDU.
2518 */
2519static void
2520qed_iwarp_ll2_slowpath(void *cxt,
2521		       u8 connection_handle,
2522		       u32 opaque_data_0, u32 opaque_data_1)
2523{
2524	struct unaligned_opaque_data unalign_data;
2525	struct qed_hwfn *p_hwfn = cxt;
2526	struct qed_iwarp_fpdu *fpdu;
2527
2528	qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
2529			       opaque_data_0, opaque_data_1);
2530
2531	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
2532		   unalign_data.cid);
2533
2534	fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
2535	if (fpdu)
2536		memset(fpdu, 0, sizeof(*fpdu));
2537}
2538
2539static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
2540{
2541	struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2542	int rc = 0;
2543
2544	if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
2545		rc = qed_ll2_terminate_connection(p_hwfn,
2546						  iwarp_info->ll2_syn_handle);
2547		if (rc)
2548			DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
2549
2550		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2551		iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2552	}
2553
2554	if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
2555		rc = qed_ll2_terminate_connection(p_hwfn,
2556						  iwarp_info->ll2_ooo_handle);
2557		if (rc)
2558			DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
2559
2560		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2561		iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2562	}
2563
2564	if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
2565		rc = qed_ll2_terminate_connection(p_hwfn,
2566						  iwarp_info->ll2_mpa_handle);
2567		if (rc)
2568			DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
2569
2570		qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2571		iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2572	}
2573
2574	qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
2575				  p_hwfn->p_rdma_info->iwarp.mac_addr);
2576
2577	return rc;
2578}
2579
2580static int
2581qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
2582			    int num_rx_bufs, int buff_size, u8 ll2_handle)
2583{
2584	struct qed_iwarp_ll2_buff *buffer;
2585	int rc = 0;
2586	int i;
2587
2588	for (i = 0; i < num_rx_bufs; i++) {
2589		buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2590		if (!buffer) {
2591			rc = -ENOMEM;
2592			break;
2593		}
2594
2595		buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2596						  buff_size,
2597						  &buffer->data_phys_addr,
2598						  GFP_KERNEL);
2599		if (!buffer->data) {
2600			kfree(buffer);
2601			rc = -ENOMEM;
2602			break;
2603		}
2604
2605		buffer->buff_size = buff_size;
2606		rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
2607		if (rc)
2608			/* buffers will be deallocated by qed_ll2 */
2609			break;
2610	}
2611	return rc;
2612}
2613
2614#define QED_IWARP_MAX_BUF_SIZE(mtu)				     \
2615	ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
2616		ETH_CACHE_LINE_SIZE)
2617
2618static int
2619qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
2620		    struct qed_rdma_start_in_params *params,
2621		    u32 rcv_wnd_size)
2622{
2623	struct qed_iwarp_info *iwarp_info;
2624	struct qed_ll2_acquire_data data;
2625	struct qed_ll2_cbs cbs;
2626	u32 buff_size;
2627	u16 n_ooo_bufs;
2628	int rc = 0;
2629	int i;
2630
2631	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2632	iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
2633	iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
2634	iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
2635
2636	iwarp_info->max_mtu = params->max_mtu;
2637
2638	ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
2639
2640	rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2641	if (rc)
2642		return rc;
2643
2644	/* Start SYN connection */
2645	cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
2646	cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
2647	cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
2648	cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
2649	cbs.slowpath_cb = NULL;
2650	cbs.cookie = p_hwfn;
2651
2652	memset(&data, 0, sizeof(data));
2653	data.input.conn_type = QED_LL2_TYPE_IWARP;
2654	data.input.mtu = params->max_mtu;
2655	data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
2656	data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
2657	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2658	data.input.tx_tc = PKT_LB_TC;
2659	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2660	data.p_connection_handle = &iwarp_info->ll2_syn_handle;
2661	data.cbs = &cbs;
2662
2663	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2664	if (rc) {
2665		DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
2666		qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
2667		return rc;
2668	}
2669
2670	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
2671	if (rc) {
2672		DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
2673		goto err;
2674	}
2675
2676	buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
2677	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2678					 QED_IWARP_LL2_SYN_RX_SIZE,
2679					 buff_size,
2680					 iwarp_info->ll2_syn_handle);
2681	if (rc)
2682		goto err;
2683
2684	/* Start OOO connection */
2685	data.input.conn_type = QED_LL2_TYPE_OOO;
2686	data.input.mtu = params->max_mtu;
2687
2688	n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
2689		     iwarp_info->max_mtu;
2690	n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
2691
2692	data.input.rx_num_desc = n_ooo_bufs;
2693	data.input.rx_num_ooo_buffers = n_ooo_bufs;
2694
2695	data.input.tx_max_bds_per_packet = 1;	/* will never be fragmented */
2696	data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
2697	data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
2698
2699	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2700	if (rc)
2701		goto err;
2702
2703	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
2704	if (rc)
2705		goto err;
2706
2707	/* Start Unaligned MPA connection */
2708	cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
2709	cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
2710
2711	memset(&data, 0, sizeof(data));
2712	data.input.conn_type = QED_LL2_TYPE_IWARP;
2713	data.input.mtu = params->max_mtu;
2714	/* FW requires that once a packet arrives OOO, it must have at
2715	 * least 2 rx buffers available on the unaligned connection
2716	 * for handling the case that it is a partial fpdu.
2717	 */
2718	data.input.rx_num_desc = n_ooo_bufs * 2;
2719	data.input.tx_num_desc = data.input.rx_num_desc;
2720	data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
2721	data.input.tx_tc = PKT_LB_TC;
2722	data.input.tx_dest = QED_LL2_TX_DEST_LB;
2723	data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
2724	data.input.secondary_queue = true;
2725	data.cbs = &cbs;
2726
2727	rc = qed_ll2_acquire_connection(p_hwfn, &data);
2728	if (rc)
2729		goto err;
2730
2731	rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
2732	if (rc)
2733		goto err;
2734
 
2735	rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
2736					 data.input.rx_num_desc,
2737					 buff_size,
2738					 iwarp_info->ll2_mpa_handle);
2739	if (rc)
2740		goto err;
2741
2742	iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
2743					    sizeof(*iwarp_info->partial_fpdus),
2744					    GFP_KERNEL);
2745	if (!iwarp_info->partial_fpdus)
2746		goto err;
2747
2748	iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
2749
2750	iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
2751	if (!iwarp_info->mpa_intermediate_buf)
2752		goto err;
2753
2754	/* The mpa_bufs array serves for pending RX packets received on the
2755	 * mpa ll2 that don't have place on the tx ring and require later
2756	 * processing. We can't fail on allocation of such a struct therefore
2757	 * we allocate enough to take care of all rx packets
2758	 */
2759	iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
2760				       sizeof(*iwarp_info->mpa_bufs),
2761				       GFP_KERNEL);
2762	if (!iwarp_info->mpa_bufs)
2763		goto err;
2764
2765	INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
2766	INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
2767	for (i = 0; i < data.input.rx_num_desc; i++)
2768		list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
2769			      &iwarp_info->mpa_buf_list);
2770	return rc;
2771err:
2772	qed_iwarp_ll2_stop(p_hwfn);
2773
2774	return rc;
2775}
2776
2777static struct {
2778	u32 two_ports;
2779	u32 four_ports;
2780} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
2781	{QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
2782	{QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
2783};
2784
2785int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
2786		    struct qed_rdma_start_in_params *params)
2787{
2788	struct qed_dev *cdev = p_hwfn->cdev;
2789	struct qed_iwarp_info *iwarp_info;
2790	enum chip_ids chip_id;
2791	u32 rcv_wnd_size;
2792
2793	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
2794
2795	iwarp_info->tcp_flags = QED_IWARP_TS_EN;
2796
2797	chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
2798	rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
2799		qed_iwarp_rcv_wnd_size[chip_id].four_ports :
2800		qed_iwarp_rcv_wnd_size[chip_id].two_ports;
2801
2802	/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
2803	iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
2804	    ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
2805	iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
2806	iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
2807	iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
2808
2809	iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
2810
2811	iwarp_info->rtr_type =  MPA_RTR_TYPE_ZERO_SEND |
2812				MPA_RTR_TYPE_ZERO_WRITE |
2813				MPA_RTR_TYPE_ZERO_READ;
2814
2815	spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
2816	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
2817	INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
2818
2819	qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
2820				  qed_iwarp_async_event);
2821	qed_ooo_setup(p_hwfn);
2822
2823	return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
2824}
2825
2826int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
2827{
2828	int rc;
2829
2830	qed_iwarp_free_prealloc_ep(p_hwfn);
2831	rc = qed_iwarp_wait_for_all_cids(p_hwfn);
2832	if (rc)
2833		return rc;
2834
2835	qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
2836
2837	return qed_iwarp_ll2_stop(p_hwfn);
2838}
2839
2840static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
2841				  struct qed_iwarp_ep *ep,
2842				  u8 fw_return_code)
2843{
2844	struct qed_iwarp_cm_event_params params;
2845
2846	qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
2847
2848	params.event = QED_IWARP_EVENT_CLOSE;
2849	params.ep_context = ep;
2850	params.cm_info = &ep->cm_info;
2851	params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
2852			 0 : -ECONNRESET;
2853
2854	/* paired with READ_ONCE in destroy_qp */
2855	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2856
2857	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2858	list_del(&ep->list_entry);
2859	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2860
2861	ep->event_cb(ep->cb_context, &params);
2862}
2863
2864static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
2865					 struct qed_iwarp_ep *ep,
2866					 int fw_ret_code)
2867{
2868	struct qed_iwarp_cm_event_params params;
2869	bool event_cb = false;
2870
2871	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
2872		   ep->cid, fw_ret_code);
2873
2874	switch (fw_ret_code) {
2875	case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
2876		params.status = 0;
2877		params.event = QED_IWARP_EVENT_DISCONNECT;
2878		event_cb = true;
2879		break;
2880	case IWARP_EXCEPTION_DETECTED_LLP_RESET:
2881		params.status = -ECONNRESET;
2882		params.event = QED_IWARP_EVENT_DISCONNECT;
2883		event_cb = true;
2884		break;
2885	case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
2886		params.event = QED_IWARP_EVENT_RQ_EMPTY;
2887		event_cb = true;
2888		break;
2889	case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
2890		params.event = QED_IWARP_EVENT_IRQ_FULL;
2891		event_cb = true;
2892		break;
2893	case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
2894		params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
2895		event_cb = true;
2896		break;
2897	case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
2898		params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
2899		event_cb = true;
2900		break;
2901	case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
2902		params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
2903		event_cb = true;
2904		break;
2905	case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
2906		params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
2907		event_cb = true;
2908		break;
2909	case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
2910		params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
2911		event_cb = true;
2912		break;
2913	case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
2914		params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
2915		event_cb = true;
2916		break;
2917	case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
2918		params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
2919		event_cb = true;
2920		break;
2921	default:
2922		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2923			   "Unhandled exception received...fw_ret_code=%d\n",
2924			   fw_ret_code);
2925		break;
2926	}
2927
2928	if (event_cb) {
2929		params.ep_context = ep;
2930		params.cm_info = &ep->cm_info;
2931		ep->event_cb(ep->cb_context, &params);
2932	}
2933}
2934
2935static void
2936qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
2937				   struct qed_iwarp_ep *ep, u8 fw_return_code)
2938{
2939	struct qed_iwarp_cm_event_params params;
2940
2941	memset(&params, 0, sizeof(params));
2942	params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
2943	params.ep_context = ep;
2944	params.cm_info = &ep->cm_info;
2945	/* paired with READ_ONCE in destroy_qp */
2946	smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
2947
2948	switch (fw_return_code) {
2949	case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
2950		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2951			   "%s(0x%x) TCP connect got invalid packet\n",
2952			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2953		params.status = -ECONNRESET;
2954		break;
2955	case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
2956		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2957			   "%s(0x%x) TCP Connection Reset\n",
2958			   QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2959		params.status = -ECONNRESET;
2960		break;
2961	case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
2962		DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
2963			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2964		params.status = -EBUSY;
2965		break;
2966	case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
2967		DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
2968			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2969		params.status = -ECONNREFUSED;
2970		break;
2971	case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
2972		DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
2973			  QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
2974		params.status = -ECONNRESET;
2975		break;
2976	default:
2977		DP_ERR(p_hwfn,
2978		       "%s(0x%x) Unexpected return code tcp connect: %d\n",
2979		       QED_IWARP_CONNECT_MODE_STRING(ep),
2980		       ep->tcp_cid, fw_return_code);
2981		params.status = -ECONNRESET;
2982		break;
2983	}
2984
2985	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
2986		ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
2987		qed_iwarp_return_ep(p_hwfn, ep);
2988	} else {
2989		ep->event_cb(ep->cb_context, &params);
2990		spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2991		list_del(&ep->list_entry);
2992		spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
2993	}
2994}
2995
2996static void
2997qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
2998			   struct qed_iwarp_ep *ep, u8 fw_return_code)
2999{
3000	u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
3001
3002	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
3003		/* Done with the SYN packet, post back to ll2 rx */
3004		qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
3005
3006		ep->syn = NULL;
3007
3008		/* If connect failed - upper layer doesn't know about it */
3009		if (fw_return_code == RDMA_RETURN_OK)
3010			qed_iwarp_mpa_received(p_hwfn, ep);
3011		else
3012			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3013							   fw_return_code);
3014	} else {
3015		if (fw_return_code == RDMA_RETURN_OK)
3016			qed_iwarp_mpa_offload(p_hwfn, ep);
3017		else
3018			qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
3019							   fw_return_code);
3020	}
3021}
3022
3023static inline bool
3024qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
3025{
3026	if (!ep || (ep->sig != QED_EP_SIG)) {
3027		DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
3028		return false;
3029	}
3030
3031	return true;
3032}
3033
3034static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
3035				 u8 fw_event_code, u16 echo,
3036				 union event_ring_data *data,
3037				 u8 fw_return_code)
3038{
3039	struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
3040	struct regpair *fw_handle = &data->rdma_data.async_handle;
3041	struct qed_iwarp_ep *ep = NULL;
3042	u16 srq_offset;
3043	u16 srq_id;
3044	u16 cid;
3045
3046	ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
3047						       fw_handle->lo);
3048
3049	switch (fw_event_code) {
3050	case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
3051		/* Async completion after TCP 3-way handshake */
3052		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3053			return -EINVAL;
3054		DP_VERBOSE(p_hwfn,
3055			   QED_MSG_RDMA,
3056			   "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
3057			   ep->tcp_cid, fw_return_code);
3058		qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
3059		break;
3060	case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
3061		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3062			return -EINVAL;
3063		DP_VERBOSE(p_hwfn,
3064			   QED_MSG_RDMA,
3065			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
3066			   ep->cid, fw_return_code);
3067		qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
3068		break;
3069	case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
3070		/* Async completion for Close Connection ramrod */
3071		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3072			return -EINVAL;
3073		DP_VERBOSE(p_hwfn,
3074			   QED_MSG_RDMA,
3075			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
3076			   ep->cid, fw_return_code);
3077		qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
3078		break;
3079	case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
3080		/* Async event for active side only */
3081		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3082			return -EINVAL;
3083		DP_VERBOSE(p_hwfn,
3084			   QED_MSG_RDMA,
3085			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
3086			   ep->cid, fw_return_code);
3087		qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
3088		break;
3089	case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
3090		if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
3091			return -EINVAL;
3092		DP_VERBOSE(p_hwfn,
3093			   QED_MSG_RDMA,
3094			   "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
3095			   ep->cid, fw_return_code);
3096		qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
3097		break;
3098	case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
3099		cid = (u16)le32_to_cpu(fw_handle->lo);
3100		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
3101			   "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
3102		qed_iwarp_cid_cleaned(p_hwfn, cid);
3103
3104		break;
3105	case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
3106		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
3107		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3108		/* FW assigns value that is no greater than u16 */
3109		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3110		events.affiliated_event(events.context,
3111					QED_IWARP_EVENT_SRQ_EMPTY,
3112					&srq_id);
3113		break;
3114	case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
3115		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
3116		srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
3117		/* FW assigns value that is no greater than u16 */
3118		srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
3119		events.affiliated_event(events.context,
3120					QED_IWARP_EVENT_SRQ_LIMIT,
3121					&srq_id);
3122		break;
3123	case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
3124		DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
3125
3126		p_hwfn->p_rdma_info->events.affiliated_event(
3127			p_hwfn->p_rdma_info->events.context,
3128			QED_IWARP_EVENT_CQ_OVERFLOW,
3129			(void *)fw_handle);
3130		break;
3131	default:
3132		DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
3133		       fw_event_code);
3134		return -EINVAL;
3135	}
3136	return 0;
3137}
3138
3139int
3140qed_iwarp_create_listen(void *rdma_cxt,
3141			struct qed_iwarp_listen_in *iparams,
3142			struct qed_iwarp_listen_out *oparams)
3143{
3144	struct qed_hwfn *p_hwfn = rdma_cxt;
3145	struct qed_iwarp_listener *listener;
3146
3147	listener = kzalloc(sizeof(*listener), GFP_KERNEL);
3148	if (!listener)
3149		return -ENOMEM;
3150
3151	listener->ip_version = iparams->ip_version;
3152	memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
3153	listener->port = iparams->port;
3154	listener->vlan = iparams->vlan;
3155
3156	listener->event_cb = iparams->event_cb;
3157	listener->cb_context = iparams->cb_context;
3158	listener->max_backlog = iparams->max_backlog;
3159	oparams->handle = listener;
3160
3161	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3162	list_add_tail(&listener->list_entry,
3163		      &p_hwfn->p_rdma_info->iwarp.listen_list);
3164	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3165
3166	DP_VERBOSE(p_hwfn,
3167		   QED_MSG_RDMA,
3168		   "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
3169		   listener->event_cb,
3170		   listener,
3171		   listener->ip_addr[0],
3172		   listener->ip_addr[1],
3173		   listener->ip_addr[2],
3174		   listener->ip_addr[3], listener->port, listener->vlan);
3175
3176	return 0;
3177}
3178
3179int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
3180{
3181	struct qed_iwarp_listener *listener = handle;
3182	struct qed_hwfn *p_hwfn = rdma_cxt;
3183
3184	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
3185
3186	spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3187	list_del(&listener->list_entry);
3188	spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
3189
3190	kfree(listener);
3191
3192	return 0;
3193}
3194
3195int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
3196{
3197	struct qed_hwfn *p_hwfn = rdma_cxt;
3198	struct qed_sp_init_data init_data;
3199	struct qed_spq_entry *p_ent;
3200	struct qed_iwarp_ep *ep;
3201	struct qed_rdma_qp *qp;
3202	int rc;
3203
3204	ep = iparams->ep_context;
3205	if (!ep) {
3206		DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
3207		return -EINVAL;
3208	}
3209
3210	qp = ep->qp;
3211
3212	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
3213		   qp->icid, ep->tcp_cid);
3214
3215	memset(&init_data, 0, sizeof(init_data));
3216	init_data.cid = qp->icid;
3217	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
3218	init_data.comp_mode = QED_SPQ_MODE_CB;
3219
3220	rc = qed_sp_init_request(p_hwfn, &p_ent,
3221				 IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
3222				 PROTOCOLID_IWARP, &init_data);
3223
3224	if (rc)
3225		return rc;
3226
3227	rc = qed_spq_post(p_hwfn, p_ent, NULL);
3228
3229	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
3230
3231	return rc;
3232}
3233
3234void
3235qed_iwarp_query_qp(struct qed_rdma_qp *qp,
3236		   struct qed_rdma_query_qp_out_params *out_params)
3237{
3238	out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
3239}