Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
   3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
   4 *
   5 * This software is available to you under a choice of one of two
   6 * licenses.  You may choose to be licensed under the terms of the GNU
   7 * General Public License (GPL) Version 2, available from the file
   8 * COPYING in the main directory of this source tree, or the
   9 * OpenIB.org BSD license below:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *	- Redistributions of source code must retain the above
  16 *	  copyright notice, this list of conditions and the following
  17 *	  disclaimer.
  18 *
  19 *	- Redistributions in binary form must reproduce the above
  20 *	  copyright notice, this list of conditions and the following
  21 *	  disclaimer in the documentation and/or other materials
  22 *	  provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34#include <linux/skbuff.h>
  35
  36#include "rxe.h"
  37#include "rxe_loc.h"
  38#include "rxe_queue.h"
  39
  40enum resp_states {
  41	RESPST_NONE,
  42	RESPST_GET_REQ,
  43	RESPST_CHK_PSN,
  44	RESPST_CHK_OP_SEQ,
  45	RESPST_CHK_OP_VALID,
  46	RESPST_CHK_RESOURCE,
  47	RESPST_CHK_LENGTH,
  48	RESPST_CHK_RKEY,
  49	RESPST_EXECUTE,
  50	RESPST_READ_REPLY,
  51	RESPST_COMPLETE,
  52	RESPST_ACKNOWLEDGE,
  53	RESPST_CLEANUP,
  54	RESPST_DUPLICATE_REQUEST,
  55	RESPST_ERR_MALFORMED_WQE,
  56	RESPST_ERR_UNSUPPORTED_OPCODE,
  57	RESPST_ERR_MISALIGNED_ATOMIC,
  58	RESPST_ERR_PSN_OUT_OF_SEQ,
  59	RESPST_ERR_MISSING_OPCODE_FIRST,
  60	RESPST_ERR_MISSING_OPCODE_LAST_C,
  61	RESPST_ERR_MISSING_OPCODE_LAST_D1E,
  62	RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
  63	RESPST_ERR_RNR,
  64	RESPST_ERR_RKEY_VIOLATION,
  65	RESPST_ERR_LENGTH,
  66	RESPST_ERR_CQ_OVERFLOW,
  67	RESPST_ERROR,
  68	RESPST_RESET,
  69	RESPST_DONE,
  70	RESPST_EXIT,
  71};
  72
  73static char *resp_state_name[] = {
  74	[RESPST_NONE]				= "NONE",
  75	[RESPST_GET_REQ]			= "GET_REQ",
  76	[RESPST_CHK_PSN]			= "CHK_PSN",
  77	[RESPST_CHK_OP_SEQ]			= "CHK_OP_SEQ",
  78	[RESPST_CHK_OP_VALID]			= "CHK_OP_VALID",
  79	[RESPST_CHK_RESOURCE]			= "CHK_RESOURCE",
  80	[RESPST_CHK_LENGTH]			= "CHK_LENGTH",
  81	[RESPST_CHK_RKEY]			= "CHK_RKEY",
  82	[RESPST_EXECUTE]			= "EXECUTE",
  83	[RESPST_READ_REPLY]			= "READ_REPLY",
  84	[RESPST_COMPLETE]			= "COMPLETE",
  85	[RESPST_ACKNOWLEDGE]			= "ACKNOWLEDGE",
  86	[RESPST_CLEANUP]			= "CLEANUP",
  87	[RESPST_DUPLICATE_REQUEST]		= "DUPLICATE_REQUEST",
  88	[RESPST_ERR_MALFORMED_WQE]		= "ERR_MALFORMED_WQE",
  89	[RESPST_ERR_UNSUPPORTED_OPCODE]		= "ERR_UNSUPPORTED_OPCODE",
  90	[RESPST_ERR_MISALIGNED_ATOMIC]		= "ERR_MISALIGNED_ATOMIC",
  91	[RESPST_ERR_PSN_OUT_OF_SEQ]		= "ERR_PSN_OUT_OF_SEQ",
  92	[RESPST_ERR_MISSING_OPCODE_FIRST]	= "ERR_MISSING_OPCODE_FIRST",
  93	[RESPST_ERR_MISSING_OPCODE_LAST_C]	= "ERR_MISSING_OPCODE_LAST_C",
  94	[RESPST_ERR_MISSING_OPCODE_LAST_D1E]	= "ERR_MISSING_OPCODE_LAST_D1E",
  95	[RESPST_ERR_TOO_MANY_RDMA_ATM_REQ]	= "ERR_TOO_MANY_RDMA_ATM_REQ",
  96	[RESPST_ERR_RNR]			= "ERR_RNR",
  97	[RESPST_ERR_RKEY_VIOLATION]		= "ERR_RKEY_VIOLATION",
  98	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
  99	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
 100	[RESPST_ERROR]				= "ERROR",
 101	[RESPST_RESET]				= "RESET",
 102	[RESPST_DONE]				= "DONE",
 103	[RESPST_EXIT]				= "EXIT",
 104};
 105
 106/* rxe_recv calls here to add a request packet to the input queue */
 107void rxe_resp_queue_pkt(struct rxe_dev *rxe, struct rxe_qp *qp,
 108			struct sk_buff *skb)
 109{
 110	int must_sched;
 111	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 112
 113	skb_queue_tail(&qp->req_pkts, skb);
 114
 115	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
 116			(skb_queue_len(&qp->req_pkts) > 1);
 117
 118	rxe_run_task(&qp->resp.task, must_sched);
 119}
 120
 121static inline enum resp_states get_req(struct rxe_qp *qp,
 122				       struct rxe_pkt_info **pkt_p)
 123{
 124	struct sk_buff *skb;
 125
 126	if (qp->resp.state == QP_STATE_ERROR) {
 127		skb = skb_dequeue(&qp->req_pkts);
 128		if (skb) {
 129			/* drain request packet queue */
 130			rxe_drop_ref(qp);
 131			kfree_skb(skb);
 132			return RESPST_GET_REQ;
 133		}
 134
 135		/* go drain recv wr queue */
 136		return RESPST_CHK_RESOURCE;
 137	}
 138
 139	skb = skb_peek(&qp->req_pkts);
 140	if (!skb)
 141		return RESPST_EXIT;
 142
 143	*pkt_p = SKB_TO_PKT(skb);
 144
 145	return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
 146}
 147
 148static enum resp_states check_psn(struct rxe_qp *qp,
 149				  struct rxe_pkt_info *pkt)
 150{
 151	int diff = psn_compare(pkt->psn, qp->resp.psn);
 152
 153	switch (qp_type(qp)) {
 154	case IB_QPT_RC:
 155		if (diff > 0) {
 156			if (qp->resp.sent_psn_nak)
 157				return RESPST_CLEANUP;
 158
 159			qp->resp.sent_psn_nak = 1;
 160			return RESPST_ERR_PSN_OUT_OF_SEQ;
 161
 162		} else if (diff < 0) {
 163			return RESPST_DUPLICATE_REQUEST;
 164		}
 165
 166		if (qp->resp.sent_psn_nak)
 167			qp->resp.sent_psn_nak = 0;
 168
 169		break;
 170
 171	case IB_QPT_UC:
 172		if (qp->resp.drop_msg || diff != 0) {
 173			if (pkt->mask & RXE_START_MASK) {
 174				qp->resp.drop_msg = 0;
 175				return RESPST_CHK_OP_SEQ;
 176			}
 177
 178			qp->resp.drop_msg = 1;
 179			return RESPST_CLEANUP;
 180		}
 181		break;
 182	default:
 183		break;
 184	}
 185
 186	return RESPST_CHK_OP_SEQ;
 187}
 188
 189static enum resp_states check_op_seq(struct rxe_qp *qp,
 190				     struct rxe_pkt_info *pkt)
 191{
 192	switch (qp_type(qp)) {
 193	case IB_QPT_RC:
 194		switch (qp->resp.opcode) {
 195		case IB_OPCODE_RC_SEND_FIRST:
 196		case IB_OPCODE_RC_SEND_MIDDLE:
 197			switch (pkt->opcode) {
 198			case IB_OPCODE_RC_SEND_MIDDLE:
 199			case IB_OPCODE_RC_SEND_LAST:
 200			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 201			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 202				return RESPST_CHK_OP_VALID;
 203			default:
 204				return RESPST_ERR_MISSING_OPCODE_LAST_C;
 205			}
 206
 207		case IB_OPCODE_RC_RDMA_WRITE_FIRST:
 208		case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 209			switch (pkt->opcode) {
 210			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 211			case IB_OPCODE_RC_RDMA_WRITE_LAST:
 212			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 213				return RESPST_CHK_OP_VALID;
 214			default:
 215				return RESPST_ERR_MISSING_OPCODE_LAST_C;
 216			}
 217
 218		default:
 219			switch (pkt->opcode) {
 220			case IB_OPCODE_RC_SEND_MIDDLE:
 221			case IB_OPCODE_RC_SEND_LAST:
 222			case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
 223			case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
 224			case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
 225			case IB_OPCODE_RC_RDMA_WRITE_LAST:
 226			case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 227				return RESPST_ERR_MISSING_OPCODE_FIRST;
 228			default:
 229				return RESPST_CHK_OP_VALID;
 230			}
 231		}
 232		break;
 233
 234	case IB_QPT_UC:
 235		switch (qp->resp.opcode) {
 236		case IB_OPCODE_UC_SEND_FIRST:
 237		case IB_OPCODE_UC_SEND_MIDDLE:
 238			switch (pkt->opcode) {
 239			case IB_OPCODE_UC_SEND_MIDDLE:
 240			case IB_OPCODE_UC_SEND_LAST:
 241			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 242				return RESPST_CHK_OP_VALID;
 243			default:
 244				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 245			}
 246
 247		case IB_OPCODE_UC_RDMA_WRITE_FIRST:
 248		case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 249			switch (pkt->opcode) {
 250			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 251			case IB_OPCODE_UC_RDMA_WRITE_LAST:
 252			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 253				return RESPST_CHK_OP_VALID;
 254			default:
 255				return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
 256			}
 257
 258		default:
 259			switch (pkt->opcode) {
 260			case IB_OPCODE_UC_SEND_MIDDLE:
 261			case IB_OPCODE_UC_SEND_LAST:
 262			case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
 263			case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
 264			case IB_OPCODE_UC_RDMA_WRITE_LAST:
 265			case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
 266				qp->resp.drop_msg = 1;
 267				return RESPST_CLEANUP;
 268			default:
 269				return RESPST_CHK_OP_VALID;
 270			}
 271		}
 272		break;
 273
 274	default:
 275		return RESPST_CHK_OP_VALID;
 276	}
 277}
 278
 279static enum resp_states check_op_valid(struct rxe_qp *qp,
 280				       struct rxe_pkt_info *pkt)
 281{
 282	switch (qp_type(qp)) {
 283	case IB_QPT_RC:
 284		if (((pkt->mask & RXE_READ_MASK) &&
 285		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
 286		    ((pkt->mask & RXE_WRITE_MASK) &&
 287		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
 288		    ((pkt->mask & RXE_ATOMIC_MASK) &&
 289		     !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
 290			return RESPST_ERR_UNSUPPORTED_OPCODE;
 291		}
 292
 293		break;
 294
 295	case IB_QPT_UC:
 296		if ((pkt->mask & RXE_WRITE_MASK) &&
 297		    !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
 298			qp->resp.drop_msg = 1;
 299			return RESPST_CLEANUP;
 300		}
 301
 302		break;
 303
 304	case IB_QPT_UD:
 305	case IB_QPT_SMI:
 306	case IB_QPT_GSI:
 307		break;
 308
 309	default:
 310		WARN_ON(1);
 311		break;
 312	}
 313
 314	return RESPST_CHK_RESOURCE;
 315}
 316
 317static enum resp_states get_srq_wqe(struct rxe_qp *qp)
 318{
 319	struct rxe_srq *srq = qp->srq;
 320	struct rxe_queue *q = srq->rq.queue;
 321	struct rxe_recv_wqe *wqe;
 322	struct ib_event ev;
 323
 324	if (srq->error)
 325		return RESPST_ERR_RNR;
 326
 327	spin_lock_bh(&srq->rq.consumer_lock);
 328
 329	wqe = queue_head(q);
 330	if (!wqe) {
 331		spin_unlock_bh(&srq->rq.consumer_lock);
 332		return RESPST_ERR_RNR;
 333	}
 334
 335	/* note kernel and user space recv wqes have same size */
 336	memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
 337
 338	qp->resp.wqe = &qp->resp.srq_wqe.wqe;
 339	advance_consumer(q);
 340
 341	if (srq->limit && srq->ibsrq.event_handler &&
 342	    (queue_count(q) < srq->limit)) {
 343		srq->limit = 0;
 344		goto event;
 345	}
 346
 347	spin_unlock_bh(&srq->rq.consumer_lock);
 348	return RESPST_CHK_LENGTH;
 349
 350event:
 351	spin_unlock_bh(&srq->rq.consumer_lock);
 352	ev.device = qp->ibqp.device;
 353	ev.element.srq = qp->ibqp.srq;
 354	ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
 355	srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
 356	return RESPST_CHK_LENGTH;
 357}
 358
 359static enum resp_states check_resource(struct rxe_qp *qp,
 360				       struct rxe_pkt_info *pkt)
 361{
 362	struct rxe_srq *srq = qp->srq;
 363
 364	if (qp->resp.state == QP_STATE_ERROR) {
 365		if (qp->resp.wqe) {
 366			qp->resp.status = IB_WC_WR_FLUSH_ERR;
 367			return RESPST_COMPLETE;
 368		} else if (!srq) {
 369			qp->resp.wqe = queue_head(qp->rq.queue);
 370			if (qp->resp.wqe) {
 371				qp->resp.status = IB_WC_WR_FLUSH_ERR;
 372				return RESPST_COMPLETE;
 373			} else {
 374				return RESPST_EXIT;
 375			}
 376		} else {
 377			return RESPST_EXIT;
 378		}
 379	}
 380
 381	if (pkt->mask & RXE_READ_OR_ATOMIC) {
 382		/* it is the requesters job to not send
 383		 * too many read/atomic ops, we just
 384		 * recycle the responder resource queue
 385		 */
 386		if (likely(qp->attr.max_dest_rd_atomic > 0))
 387			return RESPST_CHK_LENGTH;
 388		else
 389			return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
 390	}
 391
 392	if (pkt->mask & RXE_RWR_MASK) {
 393		if (srq)
 394			return get_srq_wqe(qp);
 395
 396		qp->resp.wqe = queue_head(qp->rq.queue);
 397		return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
 398	}
 399
 400	return RESPST_CHK_LENGTH;
 401}
 402
 403static enum resp_states check_length(struct rxe_qp *qp,
 404				     struct rxe_pkt_info *pkt)
 405{
 406	switch (qp_type(qp)) {
 407	case IB_QPT_RC:
 408		return RESPST_CHK_RKEY;
 409
 410	case IB_QPT_UC:
 411		return RESPST_CHK_RKEY;
 412
 413	default:
 414		return RESPST_CHK_RKEY;
 415	}
 416}
 417
 418static enum resp_states check_rkey(struct rxe_qp *qp,
 419				   struct rxe_pkt_info *pkt)
 420{
 421	struct rxe_mem *mem;
 422	u64 va;
 423	u32 rkey;
 424	u32 resid;
 425	u32 pktlen;
 426	int mtu = qp->mtu;
 427	enum resp_states state;
 428	int access;
 429
 430	if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
 431		if (pkt->mask & RXE_RETH_MASK) {
 432			qp->resp.va = reth_va(pkt);
 433			qp->resp.rkey = reth_rkey(pkt);
 434			qp->resp.resid = reth_len(pkt);
 435		}
 436		access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
 437						     : IB_ACCESS_REMOTE_WRITE;
 438	} else if (pkt->mask & RXE_ATOMIC_MASK) {
 439		qp->resp.va = atmeth_va(pkt);
 440		qp->resp.rkey = atmeth_rkey(pkt);
 441		qp->resp.resid = sizeof(u64);
 442		access = IB_ACCESS_REMOTE_ATOMIC;
 443	} else {
 444		return RESPST_EXECUTE;
 445	}
 446
 447	/* A zero-byte op is not required to set an addr or rkey. */
 448	if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
 449	    (pkt->mask & RXE_RETH_MASK) &&
 450	    reth_len(pkt) == 0) {
 451		return RESPST_EXECUTE;
 452	}
 453
 454	va	= qp->resp.va;
 455	rkey	= qp->resp.rkey;
 456	resid	= qp->resp.resid;
 457	pktlen	= payload_size(pkt);
 458
 459	mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
 460	if (!mem) {
 461		state = RESPST_ERR_RKEY_VIOLATION;
 462		goto err1;
 463	}
 464
 465	if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
 466		state = RESPST_ERR_RKEY_VIOLATION;
 467		goto err1;
 468	}
 469
 470	if (mem_check_range(mem, va, resid)) {
 471		state = RESPST_ERR_RKEY_VIOLATION;
 472		goto err2;
 473	}
 474
 475	if (pkt->mask & RXE_WRITE_MASK)	 {
 476		if (resid > mtu) {
 477			if (pktlen != mtu || bth_pad(pkt)) {
 478				state = RESPST_ERR_LENGTH;
 479				goto err2;
 480			}
 481
 482			qp->resp.resid = mtu;
 483		} else {
 484			if (pktlen != resid) {
 485				state = RESPST_ERR_LENGTH;
 486				goto err2;
 487			}
 488			if ((bth_pad(pkt) != (0x3 & (-resid)))) {
 489				/* This case may not be exactly that
 490				 * but nothing else fits.
 491				 */
 492				state = RESPST_ERR_LENGTH;
 493				goto err2;
 494			}
 495		}
 496	}
 497
 498	WARN_ON(qp->resp.mr);
 499
 500	qp->resp.mr = mem;
 501	return RESPST_EXECUTE;
 502
 503err2:
 504	rxe_drop_ref(mem);
 505err1:
 506	return state;
 507}
 508
 509static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
 510				     int data_len)
 511{
 512	int err;
 513	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 514
 515	err = copy_data(rxe, qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
 516			data_addr, data_len, to_mem_obj, NULL);
 517	if (unlikely(err))
 518		return (err == -ENOSPC) ? RESPST_ERR_LENGTH
 519					: RESPST_ERR_MALFORMED_WQE;
 520
 521	return RESPST_NONE;
 522}
 523
 524static enum resp_states write_data_in(struct rxe_qp *qp,
 525				      struct rxe_pkt_info *pkt)
 526{
 527	enum resp_states rc = RESPST_NONE;
 528	int	err;
 529	int data_len = payload_size(pkt);
 530
 531	err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
 532			   data_len, to_mem_obj, NULL);
 533	if (err) {
 534		rc = RESPST_ERR_RKEY_VIOLATION;
 535		goto out;
 536	}
 537
 538	qp->resp.va += data_len;
 539	qp->resp.resid -= data_len;
 540
 541out:
 542	return rc;
 543}
 544
 545/* Guarantee atomicity of atomic operations at the machine level. */
 546static DEFINE_SPINLOCK(atomic_ops_lock);
 547
 548static enum resp_states process_atomic(struct rxe_qp *qp,
 549				       struct rxe_pkt_info *pkt)
 550{
 551	u64 iova = atmeth_va(pkt);
 552	u64 *vaddr;
 553	enum resp_states ret;
 554	struct rxe_mem *mr = qp->resp.mr;
 555
 556	if (mr->state != RXE_MEM_STATE_VALID) {
 557		ret = RESPST_ERR_RKEY_VIOLATION;
 558		goto out;
 559	}
 560
 561	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
 562
 563	/* check vaddr is 8 bytes aligned. */
 564	if (!vaddr || (uintptr_t)vaddr & 7) {
 565		ret = RESPST_ERR_MISALIGNED_ATOMIC;
 566		goto out;
 567	}
 568
 569	spin_lock_bh(&atomic_ops_lock);
 570
 571	qp->resp.atomic_orig = *vaddr;
 572
 573	if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
 574	    pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
 575		if (*vaddr == atmeth_comp(pkt))
 576			*vaddr = atmeth_swap_add(pkt);
 577	} else {
 578		*vaddr += atmeth_swap_add(pkt);
 579	}
 580
 581	spin_unlock_bh(&atomic_ops_lock);
 582
 583	ret = RESPST_NONE;
 584out:
 585	return ret;
 586}
 587
 588static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
 589					  struct rxe_pkt_info *pkt,
 590					  struct rxe_pkt_info *ack,
 591					  int opcode,
 592					  int payload,
 593					  u32 psn,
 594					  u8 syndrome,
 595					  u32 *crcp)
 596{
 597	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 598	struct sk_buff *skb;
 599	u32 crc = 0;
 600	u32 *p;
 601	int paylen;
 602	int pad;
 603	int err;
 604
 605	/*
 606	 * allocate packet
 607	 */
 608	pad = (-payload) & 0x3;
 609	paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
 610
 611	skb = rxe->ifc_ops->init_packet(rxe, &qp->pri_av, paylen, ack);
 612	if (!skb)
 613		return NULL;
 614
 615	ack->qp = qp;
 616	ack->opcode = opcode;
 617	ack->mask = rxe_opcode[opcode].mask;
 618	ack->offset = pkt->offset;
 619	ack->paylen = paylen;
 620
 621	/* fill in bth using the request packet headers */
 622	memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
 623
 624	bth_set_opcode(ack, opcode);
 625	bth_set_qpn(ack, qp->attr.dest_qp_num);
 626	bth_set_pad(ack, pad);
 627	bth_set_se(ack, 0);
 628	bth_set_psn(ack, psn);
 629	bth_set_ack(ack, 0);
 630	ack->psn = psn;
 631
 632	if (ack->mask & RXE_AETH_MASK) {
 633		aeth_set_syn(ack, syndrome);
 634		aeth_set_msn(ack, qp->resp.msn);
 635	}
 636
 637	if (ack->mask & RXE_ATMACK_MASK)
 638		atmack_set_orig(ack, qp->resp.atomic_orig);
 639
 640	err = rxe->ifc_ops->prepare(rxe, ack, skb, &crc);
 641	if (err) {
 642		kfree_skb(skb);
 643		return NULL;
 644	}
 645
 646	if (crcp) {
 647		/* CRC computation will be continued by the caller */
 648		*crcp = crc;
 649	} else {
 650		p = payload_addr(ack) + payload + bth_pad(ack);
 651		*p = ~crc;
 652	}
 653
 654	return skb;
 655}
 656
 657/* RDMA read response. If res is not NULL, then we have a current RDMA request
 658 * being processed or replayed.
 659 */
 660static enum resp_states read_reply(struct rxe_qp *qp,
 661				   struct rxe_pkt_info *req_pkt)
 662{
 663	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 664	struct rxe_pkt_info ack_pkt;
 665	struct sk_buff *skb;
 666	int mtu = qp->mtu;
 667	enum resp_states state;
 668	int payload;
 669	int opcode;
 670	int err;
 671	struct resp_res *res = qp->resp.res;
 672	u32 icrc;
 673	u32 *p;
 674
 675	if (!res) {
 676		/* This is the first time we process that request. Get a
 677		 * resource
 678		 */
 679		res = &qp->resp.resources[qp->resp.res_head];
 680
 681		free_rd_atomic_resource(qp, res);
 682		rxe_advance_resp_resource(qp);
 683
 684		res->type		= RXE_READ_MASK;
 685
 686		res->read.va		= qp->resp.va;
 687		res->read.va_org	= qp->resp.va;
 688
 689		res->first_psn		= req_pkt->psn;
 690
 691		if (reth_len(req_pkt)) {
 692			res->last_psn	= (req_pkt->psn +
 693					   (reth_len(req_pkt) + mtu - 1) /
 694					   mtu - 1) & BTH_PSN_MASK;
 695		} else {
 696			res->last_psn	= res->first_psn;
 697		}
 698		res->cur_psn		= req_pkt->psn;
 699
 700		res->read.resid		= qp->resp.resid;
 701		res->read.length	= qp->resp.resid;
 702		res->read.rkey		= qp->resp.rkey;
 703
 704		/* note res inherits the reference to mr from qp */
 705		res->read.mr		= qp->resp.mr;
 706		qp->resp.mr		= NULL;
 707
 708		qp->resp.res		= res;
 709		res->state		= rdatm_res_state_new;
 710	}
 711
 712	if (res->state == rdatm_res_state_new) {
 713		if (res->read.resid <= mtu)
 714			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
 715		else
 716			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
 717	} else {
 718		if (res->read.resid > mtu)
 719			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
 720		else
 721			opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
 722	}
 723
 724	res->state = rdatm_res_state_next;
 725
 726	payload = min_t(int, res->read.resid, mtu);
 727
 728	skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
 729				 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
 730	if (!skb)
 731		return RESPST_ERR_RNR;
 732
 733	err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
 734			   payload, from_mem_obj, &icrc);
 735	if (err)
 736		pr_err("Failed copying memory\n");
 737
 738	p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
 739	*p = ~icrc;
 740
 741	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
 742	if (err) {
 743		pr_err("Failed sending RDMA reply.\n");
 744		kfree_skb(skb);
 745		return RESPST_ERR_RNR;
 746	}
 747
 748	res->read.va += payload;
 749	res->read.resid -= payload;
 750	res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
 751
 752	if (res->read.resid > 0) {
 753		state = RESPST_DONE;
 754	} else {
 755		qp->resp.res = NULL;
 756		qp->resp.opcode = -1;
 757		if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
 758			qp->resp.psn = res->cur_psn;
 759		state = RESPST_CLEANUP;
 760	}
 761
 762	return state;
 763}
 764
 765static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
 766				   struct rxe_pkt_info *pkt)
 767{
 768	struct sk_buff *skb = PKT_TO_SKB(pkt);
 769
 770	memset(hdr, 0, sizeof(*hdr));
 771	if (skb->protocol == htons(ETH_P_IP))
 772		memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
 773	else if (skb->protocol == htons(ETH_P_IPV6))
 774		memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
 775}
 776
 777/* Executes a new request. A retried request never reach that function (send
 778 * and writes are discarded, and reads and atomics are retried elsewhere.
 779 */
 780static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
 781{
 782	enum resp_states err;
 783
 784	if (pkt->mask & RXE_SEND_MASK) {
 785		if (qp_type(qp) == IB_QPT_UD ||
 786		    qp_type(qp) == IB_QPT_SMI ||
 787		    qp_type(qp) == IB_QPT_GSI) {
 788			union rdma_network_hdr hdr;
 789
 790			build_rdma_network_hdr(&hdr, pkt);
 791
 792			err = send_data_in(qp, &hdr, sizeof(hdr));
 793			if (err)
 794				return err;
 795		}
 796		err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
 797		if (err)
 798			return err;
 799	} else if (pkt->mask & RXE_WRITE_MASK) {
 800		err = write_data_in(qp, pkt);
 801		if (err)
 802			return err;
 803	} else if (pkt->mask & RXE_READ_MASK) {
 804		/* For RDMA Read we can increment the msn now. See C9-148. */
 805		qp->resp.msn++;
 806		return RESPST_READ_REPLY;
 807	} else if (pkt->mask & RXE_ATOMIC_MASK) {
 808		err = process_atomic(qp, pkt);
 809		if (err)
 810			return err;
 811	} else
 812		/* Unreachable */
 813		WARN_ON(1);
 814
 815	/* We successfully processed this new request. */
 816	qp->resp.msn++;
 817
 818	/* next expected psn, read handles this separately */
 819	qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
 820
 821	qp->resp.opcode = pkt->opcode;
 822	qp->resp.status = IB_WC_SUCCESS;
 823
 824	if (pkt->mask & RXE_COMP_MASK)
 825		return RESPST_COMPLETE;
 826	else if (qp_type(qp) == IB_QPT_RC)
 827		return RESPST_ACKNOWLEDGE;
 828	else
 829		return RESPST_CLEANUP;
 830}
 831
 832static enum resp_states do_complete(struct rxe_qp *qp,
 833				    struct rxe_pkt_info *pkt)
 834{
 835	struct rxe_cqe cqe;
 836	struct ib_wc *wc = &cqe.ibwc;
 837	struct ib_uverbs_wc *uwc = &cqe.uibwc;
 838	struct rxe_recv_wqe *wqe = qp->resp.wqe;
 839
 840	if (unlikely(!wqe))
 841		return RESPST_CLEANUP;
 842
 843	memset(&cqe, 0, sizeof(cqe));
 844
 845	wc->wr_id		= wqe->wr_id;
 846	wc->status		= qp->resp.status;
 847	wc->qp			= &qp->ibqp;
 848
 849	/* fields after status are not required for errors */
 850	if (wc->status == IB_WC_SUCCESS) {
 851		wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
 852				pkt->mask & RXE_WRITE_MASK) ?
 853					IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
 854		wc->vendor_err = 0;
 855		wc->byte_len = wqe->dma.length - wqe->dma.resid;
 856
 857		/* fields after byte_len are different between kernel and user
 858		 * space
 859		 */
 860		if (qp->rcq->is_user) {
 861			uwc->wc_flags = IB_WC_GRH;
 862
 863			if (pkt->mask & RXE_IMMDT_MASK) {
 864				uwc->wc_flags |= IB_WC_WITH_IMM;
 865				uwc->ex.imm_data =
 866					(__u32 __force)immdt_imm(pkt);
 867			}
 868
 869			if (pkt->mask & RXE_IETH_MASK) {
 870				uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
 871				uwc->ex.invalidate_rkey = ieth_rkey(pkt);
 872			}
 873
 874			uwc->qp_num		= qp->ibqp.qp_num;
 875
 876			if (pkt->mask & RXE_DETH_MASK)
 877				uwc->src_qp = deth_sqp(pkt);
 878
 879			uwc->port_num		= qp->attr.port_num;
 880		} else {
 881			struct sk_buff *skb = PKT_TO_SKB(pkt);
 882
 883			wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
 884			if (skb->protocol == htons(ETH_P_IP))
 885				wc->network_hdr_type = RDMA_NETWORK_IPV4;
 886			else
 887				wc->network_hdr_type = RDMA_NETWORK_IPV6;
 888
 889			if (pkt->mask & RXE_IMMDT_MASK) {
 890				wc->wc_flags |= IB_WC_WITH_IMM;
 891				wc->ex.imm_data = immdt_imm(pkt);
 892			}
 893
 894			if (pkt->mask & RXE_IETH_MASK) {
 895				struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 896				struct rxe_mem *rmr;
 897
 898				wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 899				wc->ex.invalidate_rkey = ieth_rkey(pkt);
 900
 901				rmr = rxe_pool_get_index(&rxe->mr_pool,
 902							 wc->ex.invalidate_rkey >> 8);
 903				if (unlikely(!rmr)) {
 904					pr_err("Bad rkey %#x invalidation\n",
 905					       wc->ex.invalidate_rkey);
 906					return RESPST_ERROR;
 907				}
 908				rmr->state = RXE_MEM_STATE_FREE;
 909			}
 910
 911			wc->qp			= &qp->ibqp;
 912
 913			if (pkt->mask & RXE_DETH_MASK)
 914				wc->src_qp = deth_sqp(pkt);
 915
 916			wc->port_num		= qp->attr.port_num;
 917		}
 918	}
 919
 920	/* have copy for srq and reference for !srq */
 921	if (!qp->srq)
 922		advance_consumer(qp->rq.queue);
 923
 924	qp->resp.wqe = NULL;
 925
 926	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
 927		return RESPST_ERR_CQ_OVERFLOW;
 928
 929	if (qp->resp.state == QP_STATE_ERROR)
 930		return RESPST_CHK_RESOURCE;
 931
 932	if (!pkt)
 933		return RESPST_DONE;
 934	else if (qp_type(qp) == IB_QPT_RC)
 935		return RESPST_ACKNOWLEDGE;
 936	else
 937		return RESPST_CLEANUP;
 938}
 939
 940static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 941		    u8 syndrome, u32 psn)
 942{
 943	int err = 0;
 944	struct rxe_pkt_info ack_pkt;
 945	struct sk_buff *skb;
 946	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 947
 948	skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
 949				 0, psn, syndrome, NULL);
 950	if (!skb) {
 951		err = -ENOMEM;
 952		goto err1;
 953	}
 954
 955	err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
 956	if (err) {
 957		pr_err_ratelimited("Failed sending ack\n");
 958		kfree_skb(skb);
 959	}
 960
 961err1:
 962	return err;
 963}
 964
 965static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
 966			   u8 syndrome)
 967{
 968	int rc = 0;
 969	struct rxe_pkt_info ack_pkt;
 970	struct sk_buff *skb;
 971	struct sk_buff *skb_copy;
 972	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
 973	struct resp_res *res;
 974
 975	skb = prepare_ack_packet(qp, pkt, &ack_pkt,
 976				 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
 977				 syndrome, NULL);
 978	if (!skb) {
 979		rc = -ENOMEM;
 980		goto out;
 981	}
 982
 983	skb_copy = skb_clone(skb, GFP_ATOMIC);
 984	if (skb_copy)
 985		rxe_add_ref(qp); /* for the new SKB */
 986	else {
 987		pr_warn("Could not clone atomic response\n");
 988		rc = -ENOMEM;
 989		goto out;
 990	}
 991
 992	res = &qp->resp.resources[qp->resp.res_head];
 993	free_rd_atomic_resource(qp, res);
 994	rxe_advance_resp_resource(qp);
 995
 996	memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
 997
 998	res->type = RXE_ATOMIC_MASK;
 999	res->atomic.skb = skb;
1000	res->first_psn = ack_pkt.psn;
1001	res->last_psn  = ack_pkt.psn;
1002	res->cur_psn   = ack_pkt.psn;
1003
1004	rc = rxe_xmit_packet(rxe, qp, &ack_pkt, skb_copy);
1005	if (rc) {
1006		pr_err_ratelimited("Failed sending ack\n");
1007		rxe_drop_ref(qp);
1008		kfree_skb(skb_copy);
1009	}
1010
1011out:
1012	return rc;
1013}
1014
1015static enum resp_states acknowledge(struct rxe_qp *qp,
1016				    struct rxe_pkt_info *pkt)
1017{
1018	if (qp_type(qp) != IB_QPT_RC)
1019		return RESPST_CLEANUP;
1020
1021	if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
1022		send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
1023	else if (pkt->mask & RXE_ATOMIC_MASK)
1024		send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1025	else if (bth_ack(pkt))
1026		send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1027
1028	return RESPST_CLEANUP;
1029}
1030
1031static enum resp_states cleanup(struct rxe_qp *qp,
1032				struct rxe_pkt_info *pkt)
1033{
1034	struct sk_buff *skb;
1035
1036	if (pkt) {
1037		skb = skb_dequeue(&qp->req_pkts);
1038		rxe_drop_ref(qp);
1039		kfree_skb(skb);
1040	}
1041
1042	if (qp->resp.mr) {
1043		rxe_drop_ref(qp->resp.mr);
1044		qp->resp.mr = NULL;
1045	}
1046
1047	return RESPST_DONE;
1048}
1049
1050static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1051{
1052	int i;
1053
1054	for (i = 0; i < qp->attr.max_rd_atomic; i++) {
1055		struct resp_res *res = &qp->resp.resources[i];
1056
1057		if (res->type == 0)
1058			continue;
1059
1060		if (psn_compare(psn, res->first_psn) >= 0 &&
1061		    psn_compare(psn, res->last_psn) <= 0) {
1062			return res;
1063		}
1064	}
1065
1066	return NULL;
1067}
1068
1069static enum resp_states duplicate_request(struct rxe_qp *qp,
1070					  struct rxe_pkt_info *pkt)
1071{
1072	enum resp_states rc;
1073	u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
1074
1075	if (pkt->mask & RXE_SEND_MASK ||
1076	    pkt->mask & RXE_WRITE_MASK) {
1077		/* SEND. Ack again and cleanup. C9-105. */
1078		if (bth_ack(pkt))
1079			send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1080		rc = RESPST_CLEANUP;
1081		goto out;
1082	} else if (pkt->mask & RXE_READ_MASK) {
1083		struct resp_res *res;
1084
1085		res = find_resource(qp, pkt->psn);
1086		if (!res) {
1087			/* Resource not found. Class D error.  Drop the
1088			 * request.
1089			 */
1090			rc = RESPST_CLEANUP;
1091			goto out;
1092		} else {
1093			/* Ensure this new request is the same as the previous
1094			 * one or a subset of it.
1095			 */
1096			u64 iova = reth_va(pkt);
1097			u32 resid = reth_len(pkt);
1098
1099			if (iova < res->read.va_org ||
1100			    resid > res->read.length ||
1101			    (iova + resid) > (res->read.va_org +
1102					      res->read.length)) {
1103				rc = RESPST_CLEANUP;
1104				goto out;
1105			}
1106
1107			if (reth_rkey(pkt) != res->read.rkey) {
1108				rc = RESPST_CLEANUP;
1109				goto out;
1110			}
1111
1112			res->cur_psn = pkt->psn;
1113			res->state = (pkt->psn == res->first_psn) ?
1114					rdatm_res_state_new :
1115					rdatm_res_state_replay;
1116
1117			/* Reset the resource, except length. */
1118			res->read.va_org = iova;
1119			res->read.va = iova;
1120			res->read.resid = resid;
1121
1122			/* Replay the RDMA read reply. */
1123			qp->resp.res = res;
1124			rc = RESPST_READ_REPLY;
1125			goto out;
1126		}
1127	} else {
1128		struct resp_res *res;
1129
1130		/* Find the operation in our list of responder resources. */
1131		res = find_resource(qp, pkt->psn);
1132		if (res) {
1133			struct sk_buff *skb_copy;
1134
1135			skb_copy = skb_clone(res->atomic.skb, GFP_ATOMIC);
1136			if (skb_copy) {
1137				rxe_add_ref(qp); /* for the new SKB */
1138			} else {
1139				pr_warn("Couldn't clone atomic resp\n");
1140				rc = RESPST_CLEANUP;
1141				goto out;
1142			}
1143
1144			/* Resend the result. */
1145			rc = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp,
1146					     pkt, skb_copy);
1147			if (rc) {
1148				pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1149				rxe_drop_ref(qp);
1150				kfree_skb(skb_copy);
1151				rc = RESPST_CLEANUP;
1152				goto out;
1153			}
1154		}
1155
1156		/* Resource not found. Class D error. Drop the request. */
1157		rc = RESPST_CLEANUP;
1158		goto out;
1159	}
1160out:
1161	return rc;
1162}
1163
1164/* Process a class A or C. Both are treated the same in this implementation. */
1165static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1166			      enum ib_wc_status status)
1167{
1168	qp->resp.aeth_syndrome	= syndrome;
1169	qp->resp.status		= status;
1170
1171	/* indicate that we should go through the ERROR state */
1172	qp->resp.goto_error	= 1;
1173}
1174
1175static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1176{
1177	/* UC */
1178	if (qp->srq) {
1179		/* Class E */
1180		qp->resp.drop_msg = 1;
1181		if (qp->resp.wqe) {
1182			qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1183			return RESPST_COMPLETE;
1184		} else {
1185			return RESPST_CLEANUP;
1186		}
1187	} else {
1188		/* Class D1. This packet may be the start of a
1189		 * new message and could be valid. The previous
1190		 * message is invalid and ignored. reset the
1191		 * recv wr to its original state
1192		 */
1193		if (qp->resp.wqe) {
1194			qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1195			qp->resp.wqe->dma.cur_sge = 0;
1196			qp->resp.wqe->dma.sge_offset = 0;
1197			qp->resp.opcode = -1;
1198		}
1199
1200		if (qp->resp.mr) {
1201			rxe_drop_ref(qp->resp.mr);
1202			qp->resp.mr = NULL;
1203		}
1204
1205		return RESPST_CLEANUP;
1206	}
1207}
1208
1209int rxe_responder(void *arg)
1210{
1211	struct rxe_qp *qp = (struct rxe_qp *)arg;
1212	enum resp_states state;
1213	struct rxe_pkt_info *pkt = NULL;
1214	int ret = 0;
1215
1216	rxe_add_ref(qp);
1217
1218	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1219
1220	if (!qp->valid) {
1221		ret = -EINVAL;
1222		goto done;
1223	}
1224
1225	switch (qp->resp.state) {
1226	case QP_STATE_RESET:
1227		state = RESPST_RESET;
1228		break;
1229
1230	default:
1231		state = RESPST_GET_REQ;
1232		break;
1233	}
1234
1235	while (1) {
1236		pr_debug("qp#%d state = %s\n", qp_num(qp),
1237			 resp_state_name[state]);
1238		switch (state) {
1239		case RESPST_GET_REQ:
1240			state = get_req(qp, &pkt);
1241			break;
1242		case RESPST_CHK_PSN:
1243			state = check_psn(qp, pkt);
1244			break;
1245		case RESPST_CHK_OP_SEQ:
1246			state = check_op_seq(qp, pkt);
1247			break;
1248		case RESPST_CHK_OP_VALID:
1249			state = check_op_valid(qp, pkt);
1250			break;
1251		case RESPST_CHK_RESOURCE:
1252			state = check_resource(qp, pkt);
1253			break;
1254		case RESPST_CHK_LENGTH:
1255			state = check_length(qp, pkt);
1256			break;
1257		case RESPST_CHK_RKEY:
1258			state = check_rkey(qp, pkt);
1259			break;
1260		case RESPST_EXECUTE:
1261			state = execute(qp, pkt);
1262			break;
1263		case RESPST_COMPLETE:
1264			state = do_complete(qp, pkt);
1265			break;
1266		case RESPST_READ_REPLY:
1267			state = read_reply(qp, pkt);
1268			break;
1269		case RESPST_ACKNOWLEDGE:
1270			state = acknowledge(qp, pkt);
1271			break;
1272		case RESPST_CLEANUP:
1273			state = cleanup(qp, pkt);
1274			break;
1275		case RESPST_DUPLICATE_REQUEST:
1276			state = duplicate_request(qp, pkt);
1277			break;
1278		case RESPST_ERR_PSN_OUT_OF_SEQ:
1279			/* RC only - Class B. Drop packet. */
1280			send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1281			state = RESPST_CLEANUP;
1282			break;
1283
1284		case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1285		case RESPST_ERR_MISSING_OPCODE_FIRST:
1286		case RESPST_ERR_MISSING_OPCODE_LAST_C:
1287		case RESPST_ERR_UNSUPPORTED_OPCODE:
1288		case RESPST_ERR_MISALIGNED_ATOMIC:
1289			/* RC Only - Class C. */
1290			do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1291					  IB_WC_REM_INV_REQ_ERR);
1292			state = RESPST_COMPLETE;
1293			break;
1294
1295		case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1296			state = do_class_d1e_error(qp);
1297			break;
1298		case RESPST_ERR_RNR:
1299			if (qp_type(qp) == IB_QPT_RC) {
1300				/* RC - class B */
1301				send_ack(qp, pkt, AETH_RNR_NAK |
1302					 (~AETH_TYPE_MASK &
1303					 qp->attr.min_rnr_timer),
1304					 pkt->psn);
1305			} else {
1306				/* UD/UC - class D */
1307				qp->resp.drop_msg = 1;
1308			}
1309			state = RESPST_CLEANUP;
1310			break;
1311
1312		case RESPST_ERR_RKEY_VIOLATION:
1313			if (qp_type(qp) == IB_QPT_RC) {
1314				/* Class C */
1315				do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1316						  IB_WC_REM_ACCESS_ERR);
1317				state = RESPST_COMPLETE;
1318			} else {
1319				qp->resp.drop_msg = 1;
1320				if (qp->srq) {
1321					/* UC/SRQ Class D */
1322					qp->resp.status = IB_WC_REM_ACCESS_ERR;
1323					state = RESPST_COMPLETE;
1324				} else {
1325					/* UC/non-SRQ Class E. */
1326					state = RESPST_CLEANUP;
1327				}
1328			}
1329			break;
1330
1331		case RESPST_ERR_LENGTH:
1332			if (qp_type(qp) == IB_QPT_RC) {
1333				/* Class C */
1334				do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1335						  IB_WC_REM_INV_REQ_ERR);
1336				state = RESPST_COMPLETE;
1337			} else if (qp->srq) {
1338				/* UC/UD - class E */
1339				qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1340				state = RESPST_COMPLETE;
1341			} else {
1342				/* UC/UD - class D */
1343				qp->resp.drop_msg = 1;
1344				state = RESPST_CLEANUP;
1345			}
1346			break;
1347
1348		case RESPST_ERR_MALFORMED_WQE:
1349			/* All, Class A. */
1350			do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1351					  IB_WC_LOC_QP_OP_ERR);
1352			state = RESPST_COMPLETE;
1353			break;
1354
1355		case RESPST_ERR_CQ_OVERFLOW:
1356			/* All - Class G */
1357			state = RESPST_ERROR;
1358			break;
1359
1360		case RESPST_DONE:
1361			if (qp->resp.goto_error) {
1362				state = RESPST_ERROR;
1363				break;
1364			}
1365
1366			goto done;
1367
1368		case RESPST_EXIT:
1369			if (qp->resp.goto_error) {
1370				state = RESPST_ERROR;
1371				break;
1372			}
1373
1374			goto exit;
1375
1376		case RESPST_RESET: {
1377			struct sk_buff *skb;
1378
1379			while ((skb = skb_dequeue(&qp->req_pkts))) {
1380				rxe_drop_ref(qp);
1381				kfree_skb(skb);
1382			}
1383
1384			while (!qp->srq && qp->rq.queue &&
1385			       queue_head(qp->rq.queue))
1386				advance_consumer(qp->rq.queue);
1387
1388			qp->resp.wqe = NULL;
1389			goto exit;
1390		}
1391
1392		case RESPST_ERROR:
1393			qp->resp.goto_error = 0;
1394			pr_warn("qp#%d moved to error state\n", qp_num(qp));
1395			rxe_qp_error(qp);
1396			goto exit;
1397
1398		default:
1399			WARN_ON(1);
1400		}
1401	}
1402
1403exit:
1404	ret = -EAGAIN;
1405done:
1406	rxe_drop_ref(qp);
1407	return ret;
1408}