Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32#include "iwch_provider.h"
 33#include "iwch.h"
 34
 35/*
 36 * Get one cq entry from cxio and map it to openib.
 37 *
 38 * Returns:
 39 *	0			EMPTY;
 40 *	1			cqe returned
 41 *	-EAGAIN		caller must try again
 42 *	any other -errno	fatal error
 43 */
 44static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
 45			    struct ib_wc *wc)
 46{
 47	struct iwch_qp *qhp = NULL;
 48	struct t3_cqe cqe, *rd_cqe;
 49	struct t3_wq *wq;
 50	u32 credit = 0;
 51	u8 cqe_flushed;
 52	u64 cookie;
 53	int ret = 1;
 54
 55	rd_cqe = cxio_next_cqe(&chp->cq);
 56
 57	if (!rd_cqe)
 58		return 0;
 59
 60	qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
 61	if (!qhp)
 62		wq = NULL;
 63	else {
 64		spin_lock(&qhp->lock);
 65		wq = &(qhp->wq);
 66	}
 67	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
 68				   &credit);
 69	if (t3a_device(chp->rhp) && credit) {
 70		PDBG("%s updating %d cq credits on id %d\n", __func__,
 71		     credit, chp->cq.cqid);
 72		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
 73	}
 74
 75	if (ret) {
 76		ret = -EAGAIN;
 77		goto out;
 78	}
 79	ret = 1;
 80
 81	wc->wr_id = cookie;
 82	wc->qp = &qhp->ibqp;
 83	wc->vendor_err = CQE_STATUS(cqe);
 84	wc->wc_flags = 0;
 85
 86	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
 87	     "lo 0x%x cookie 0x%llx\n", __func__,
 88	     CQE_QPID(cqe), CQE_TYPE(cqe),
 89	     CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
 90	     CQE_WRID_LOW(cqe), (unsigned long long) cookie);
 91
 92	if (CQE_TYPE(cqe) == 0) {
 93		if (!CQE_STATUS(cqe))
 94			wc->byte_len = CQE_LEN(cqe);
 95		else
 96			wc->byte_len = 0;
 97		wc->opcode = IB_WC_RECV;
 98		if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
 99		    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
100			wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
101			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
102		}
103	} else {
104		switch (CQE_OPCODE(cqe)) {
105		case T3_RDMA_WRITE:
106			wc->opcode = IB_WC_RDMA_WRITE;
107			break;
108		case T3_READ_REQ:
109			wc->opcode = IB_WC_RDMA_READ;
110			wc->byte_len = CQE_LEN(cqe);
111			break;
112		case T3_SEND:
113		case T3_SEND_WITH_SE:
114		case T3_SEND_WITH_INV:
115		case T3_SEND_WITH_SE_INV:
116			wc->opcode = IB_WC_SEND;
117			break;
118		case T3_BIND_MW:
119			wc->opcode = IB_WC_BIND_MW;
120			break;
121
122		case T3_LOCAL_INV:
123			wc->opcode = IB_WC_LOCAL_INV;
124			break;
125		case T3_FAST_REGISTER:
126			wc->opcode = IB_WC_FAST_REG_MR;
127			break;
128		default:
129			printk(KERN_ERR MOD "Unexpected opcode %d "
130			       "in the CQE received for QPID=0x%0x\n",
131			       CQE_OPCODE(cqe), CQE_QPID(cqe));
132			ret = -EINVAL;
133			goto out;
134		}
135	}
136
137	if (cqe_flushed)
138		wc->status = IB_WC_WR_FLUSH_ERR;
139	else {
140
141		switch (CQE_STATUS(cqe)) {
142		case TPT_ERR_SUCCESS:
143			wc->status = IB_WC_SUCCESS;
144			break;
145		case TPT_ERR_STAG:
146			wc->status = IB_WC_LOC_ACCESS_ERR;
147			break;
148		case TPT_ERR_PDID:
149			wc->status = IB_WC_LOC_PROT_ERR;
150			break;
151		case TPT_ERR_QPID:
152		case TPT_ERR_ACCESS:
153			wc->status = IB_WC_LOC_ACCESS_ERR;
154			break;
155		case TPT_ERR_WRAP:
156			wc->status = IB_WC_GENERAL_ERR;
157			break;
158		case TPT_ERR_BOUND:
159			wc->status = IB_WC_LOC_LEN_ERR;
160			break;
161		case TPT_ERR_INVALIDATE_SHARED_MR:
162		case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
163			wc->status = IB_WC_MW_BIND_ERR;
164			break;
165		case TPT_ERR_CRC:
166		case TPT_ERR_MARKER:
167		case TPT_ERR_PDU_LEN_ERR:
168		case TPT_ERR_OUT_OF_RQE:
169		case TPT_ERR_DDP_VERSION:
170		case TPT_ERR_RDMA_VERSION:
171		case TPT_ERR_DDP_QUEUE_NUM:
172		case TPT_ERR_MSN:
173		case TPT_ERR_TBIT:
174		case TPT_ERR_MO:
175		case TPT_ERR_MSN_RANGE:
176		case TPT_ERR_IRD_OVERFLOW:
177		case TPT_ERR_OPCODE:
178			wc->status = IB_WC_FATAL_ERR;
179			break;
180		case TPT_ERR_SWFLUSH:
181			wc->status = IB_WC_WR_FLUSH_ERR;
182			break;
183		default:
184			printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
185			       "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
186			ret = -EINVAL;
187		}
188	}
189out:
190	if (wq)
191		spin_unlock(&qhp->lock);
192	return ret;
193}
194
195int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
196{
197	struct iwch_dev *rhp;
198	struct iwch_cq *chp;
199	unsigned long flags;
200	int npolled;
201	int err = 0;
202
203	chp = to_iwch_cq(ibcq);
204	rhp = chp->rhp;
205
206	spin_lock_irqsave(&chp->lock, flags);
207	for (npolled = 0; npolled < num_entries; ++npolled) {
208#ifdef DEBUG
209		int i=0;
210#endif
211
212		/*
213		 * Because T3 can post CQEs that are _not_ associated
214		 * with a WR, we might have to poll again after removing
215		 * one of these.
216		 */
217		do {
218			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
219#ifdef DEBUG
220			BUG_ON(++i > 1000);
221#endif
222		} while (err == -EAGAIN);
223		if (err <= 0)
224			break;
225	}
226	spin_unlock_irqrestore(&chp->lock, flags);
227
228	if (err < 0)
229		return err;
230	else {
231		return npolled;
232	}
233}