Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 */
 32#include "iwch_provider.h"
 33#include "iwch.h"
 34
 35static int __iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
 36			      struct iwch_qp *qhp, struct ib_wc *wc)
 37{
 38	struct t3_wq *wq = qhp ? &qhp->wq : NULL;
 39	struct t3_cqe cqe;
 40	u32 credit = 0;
 41	u8 cqe_flushed;
 42	u64 cookie;
 43	int ret = 1;
 44
 45	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
 46				   &credit);
 47	if (t3a_device(chp->rhp) && credit) {
 48		pr_debug("%s updating %d cq credits on id %d\n", __func__,
 49			 credit, chp->cq.cqid);
 50		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
 51	}
 52
 53	if (ret) {
 54		ret = -EAGAIN;
 55		goto out;
 56	}
 57	ret = 1;
 58
 59	wc->wr_id = cookie;
 60	wc->qp = qhp ? &qhp->ibqp : NULL;
 61	wc->vendor_err = CQE_STATUS(cqe);
 62	wc->wc_flags = 0;
 63
 64	pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
 65		 __func__,
 66		 CQE_QPID(cqe), CQE_TYPE(cqe),
 67		 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
 68		 CQE_WRID_LOW(cqe), (unsigned long long)cookie);
 69
 70	if (CQE_TYPE(cqe) == 0) {
 71		if (!CQE_STATUS(cqe))
 72			wc->byte_len = CQE_LEN(cqe);
 73		else
 74			wc->byte_len = 0;
 75		wc->opcode = IB_WC_RECV;
 76		if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
 77		    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
 78			wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
 79			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
 80		}
 81	} else {
 82		switch (CQE_OPCODE(cqe)) {
 83		case T3_RDMA_WRITE:
 84			wc->opcode = IB_WC_RDMA_WRITE;
 85			break;
 86		case T3_READ_REQ:
 87			wc->opcode = IB_WC_RDMA_READ;
 88			wc->byte_len = CQE_LEN(cqe);
 89			break;
 90		case T3_SEND:
 91		case T3_SEND_WITH_SE:
 92		case T3_SEND_WITH_INV:
 93		case T3_SEND_WITH_SE_INV:
 94			wc->opcode = IB_WC_SEND;
 95			break;
 96		case T3_LOCAL_INV:
 97			wc->opcode = IB_WC_LOCAL_INV;
 98			break;
 99		case T3_FAST_REGISTER:
100			wc->opcode = IB_WC_REG_MR;
101			break;
102		default:
103			pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
104			       CQE_OPCODE(cqe), CQE_QPID(cqe));
105			ret = -EINVAL;
106			goto out;
107		}
108	}
109
110	if (cqe_flushed)
111		wc->status = IB_WC_WR_FLUSH_ERR;
112	else {
113
114		switch (CQE_STATUS(cqe)) {
115		case TPT_ERR_SUCCESS:
116			wc->status = IB_WC_SUCCESS;
117			break;
118		case TPT_ERR_STAG:
119			wc->status = IB_WC_LOC_ACCESS_ERR;
120			break;
121		case TPT_ERR_PDID:
122			wc->status = IB_WC_LOC_PROT_ERR;
123			break;
124		case TPT_ERR_QPID:
125		case TPT_ERR_ACCESS:
126			wc->status = IB_WC_LOC_ACCESS_ERR;
127			break;
128		case TPT_ERR_WRAP:
129			wc->status = IB_WC_GENERAL_ERR;
130			break;
131		case TPT_ERR_BOUND:
132			wc->status = IB_WC_LOC_LEN_ERR;
133			break;
134		case TPT_ERR_INVALIDATE_SHARED_MR:
135		case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
136			wc->status = IB_WC_MW_BIND_ERR;
137			break;
138		case TPT_ERR_CRC:
139		case TPT_ERR_MARKER:
140		case TPT_ERR_PDU_LEN_ERR:
141		case TPT_ERR_OUT_OF_RQE:
142		case TPT_ERR_DDP_VERSION:
143		case TPT_ERR_RDMA_VERSION:
144		case TPT_ERR_DDP_QUEUE_NUM:
145		case TPT_ERR_MSN:
146		case TPT_ERR_TBIT:
147		case TPT_ERR_MO:
148		case TPT_ERR_MSN_RANGE:
149		case TPT_ERR_IRD_OVERFLOW:
150		case TPT_ERR_OPCODE:
151			wc->status = IB_WC_FATAL_ERR;
152			break;
153		case TPT_ERR_SWFLUSH:
154			wc->status = IB_WC_WR_FLUSH_ERR;
155			break;
156		default:
157			pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
158			       CQE_STATUS(cqe), CQE_QPID(cqe));
159			ret = -EINVAL;
160		}
161	}
162out:
163	return ret;
164}
165
166/*
167 * Get one cq entry from cxio and map it to openib.
168 *
169 * Returns:
170 *	0			EMPTY;
171 *	1			cqe returned
172 *	-EAGAIN		caller must try again
173 *	any other -errno	fatal error
174 */
175static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
176			    struct ib_wc *wc)
177{
178	struct iwch_qp *qhp;
179	struct t3_cqe *rd_cqe;
180	int ret;
181
182	rd_cqe = cxio_next_cqe(&chp->cq);
183
184	if (!rd_cqe)
185		return 0;
186
187	qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
188	if (qhp) {
189		spin_lock(&qhp->lock);
190		ret = __iwch_poll_cq_one(rhp, chp, qhp, wc);
191		spin_unlock(&qhp->lock);
192	} else {
193		ret = __iwch_poll_cq_one(rhp, chp, NULL, wc);
194	}
195	return ret;
196}
197
198int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
199{
200	struct iwch_dev *rhp;
201	struct iwch_cq *chp;
202	unsigned long flags;
203	int npolled;
204	int err = 0;
205
206	chp = to_iwch_cq(ibcq);
207	rhp = chp->rhp;
208
209	spin_lock_irqsave(&chp->lock, flags);
210	for (npolled = 0; npolled < num_entries; ++npolled) {
211
212		/*
213		 * Because T3 can post CQEs that are _not_ associated
214		 * with a WR, we might have to poll again after removing
215		 * one of these.
216		 */
217		do {
218			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
219		} while (err == -EAGAIN);
220		if (err <= 0)
221			break;
222	}
223	spin_unlock_irqrestore(&chp->lock, flags);
224
225	if (err < 0)
226		return err;
227	else {
228		return npolled;
229	}
230}