Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
  1/*
  2 * Copyright (c) 2012 - 2019 Intel Corporation.  All rights reserved.
  3 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
  4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5 *
  6 * This software is available to you under a choice of one of two
  7 * licenses.  You may choose to be licensed under the terms of the GNU
  8 * General Public License (GPL) Version 2, available from the file
  9 * COPYING in the main directory of this source tree, or the
 10 * OpenIB.org BSD license below:
 11 *
 12 *     Redistribution and use in source and binary forms, with or
 13 *     without modification, are permitted provided that the following
 14 *     conditions are met:
 15 *
 16 *      - Redistributions of source code must retain the above
 17 *        copyright notice, this list of conditions and the following
 18 *        disclaimer.
 19 *
 20 *      - Redistributions in binary form must reproduce the above
 21 *        copyright notice, this list of conditions and the following
 22 *        disclaimer in the documentation and/or other materials
 23 *        provided with the distribution.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 32 * SOFTWARE.
 33 */
 34
 35#include <linux/err.h>
 36#include <linux/vmalloc.h>
 37#include <rdma/rdma_vt.h>
 38#ifdef CONFIG_DEBUG_FS
 39#include <linux/seq_file.h>
 40#endif
 41
 42#include "qib.h"
 43
 
 
 
 
 
 
 
 
 44static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
 45			      struct rvt_qpn_map *map, unsigned off)
 46{
 47	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
 48}
 49
 50static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
 51					struct rvt_qpn_map *map, unsigned off,
 52					unsigned n, u16 qpt_mask)
 53{
 54	if (qpt_mask) {
 55		off++;
 56		if (((off & qpt_mask) >> 1) >= n)
 57			off = (off | qpt_mask) + 2;
 58	} else {
 59		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
 60	}
 61	return off;
 62}
 63
 64const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
 65[IB_WR_RDMA_WRITE] = {
 66	.length = sizeof(struct ib_rdma_wr),
 67	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
 68},
 69
 70[IB_WR_RDMA_READ] = {
 71	.length = sizeof(struct ib_rdma_wr),
 72	.qpt_support = BIT(IB_QPT_RC),
 73	.flags = RVT_OPERATION_ATOMIC,
 74},
 75
 76[IB_WR_ATOMIC_CMP_AND_SWP] = {
 77	.length = sizeof(struct ib_atomic_wr),
 78	.qpt_support = BIT(IB_QPT_RC),
 79	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
 80},
 81
 82[IB_WR_ATOMIC_FETCH_AND_ADD] = {
 83	.length = sizeof(struct ib_atomic_wr),
 84	.qpt_support = BIT(IB_QPT_RC),
 85	.flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
 86},
 87
 88[IB_WR_RDMA_WRITE_WITH_IMM] = {
 89	.length = sizeof(struct ib_rdma_wr),
 90	.qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
 91},
 92
 93[IB_WR_SEND] = {
 94	.length = sizeof(struct ib_send_wr),
 95	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
 96		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
 97},
 98
 99[IB_WR_SEND_WITH_IMM] = {
100	.length = sizeof(struct ib_send_wr),
101	.qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
102		       BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
103},
104
105};
106
107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
 
108{
109	unsigned long page = get_zeroed_page(GFP_KERNEL);
110
111	/*
112	 * Free the page if someone raced with us installing it.
113	 */
114
115	spin_lock(&qpt->lock);
116	if (map->page)
117		free_page(page);
118	else
119		map->page = (void *)page;
120	spin_unlock(&qpt->lock);
121}
122
123/*
124 * Allocate the next available QPN or
125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
126 */
127int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
128		  enum ib_qp_type type, u32 port)
129{
130	u32 i, offset, max_scan, qpn;
131	struct rvt_qpn_map *map;
132	u32 ret;
133	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
134	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
135					      verbs_dev);
136	u16 qpt_mask = dd->qpn_mask;
137
138	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
139		u32 n;
140
141		ret = type == IB_QPT_GSI;
142		n = 1 << (ret + 2 * (port - 1));
143		spin_lock(&qpt->lock);
144		if (qpt->flags & n)
145			ret = -EINVAL;
146		else
147			qpt->flags |= n;
148		spin_unlock(&qpt->lock);
149		goto bail;
150	}
151
152	qpn = qpt->last + 2;
153	if (qpn >= RVT_QPN_MAX)
154		qpn = 2;
155	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
156		qpn = (qpn | qpt_mask) + 2;
157	offset = qpn & RVT_BITS_PER_PAGE_MASK;
158	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
159	max_scan = qpt->nmaps - !offset;
160	for (i = 0;;) {
161		if (unlikely(!map->page)) {
162			get_map_page(qpt, map);
163			if (unlikely(!map->page))
164				break;
165		}
166		do {
167			if (!test_and_set_bit(offset, map->page)) {
168				qpt->last = qpn;
169				ret = qpn;
170				goto bail;
171			}
172			offset = find_next_offset(qpt, map, offset,
173				dd->n_krcv_queues, qpt_mask);
174			qpn = mk_qpn(qpt, map, offset);
175			/*
176			 * This test differs from alloc_pidmap().
177			 * If find_next_offset() does find a zero
178			 * bit, we don't need to check for QPN
179			 * wrapping around past our starting QPN.
180			 * We just need to be sure we don't loop
181			 * forever.
182			 */
183		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
184		/*
185		 * In order to keep the number of pages allocated to a
186		 * minimum, we scan the all existing pages before increasing
187		 * the size of the bitmap table.
188		 */
189		if (++i > max_scan) {
190			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
191				break;
192			map = &qpt->map[qpt->nmaps++];
193			offset = 0;
194		} else if (map < &qpt->map[qpt->nmaps]) {
195			++map;
196			offset = 0;
197		} else {
198			map = &qpt->map[0];
199			offset = 2;
200		}
201		qpn = mk_qpn(qpt, map, offset);
202	}
203
204	ret = -ENOMEM;
205
206bail:
207	return ret;
208}
209
210/*
211 * qib_free_all_qps - check for QPs still in use
212 */
213unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
214{
215	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
216	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
217					      verbs_dev);
218	unsigned n, qp_inuse = 0;
219
220	for (n = 0; n < dd->num_pports; n++) {
221		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
222
223		rcu_read_lock();
224		if (rcu_dereference(ibp->rvp.qp[0]))
225			qp_inuse++;
226		if (rcu_dereference(ibp->rvp.qp[1]))
227			qp_inuse++;
228		rcu_read_unlock();
229	}
230	return qp_inuse;
231}
232
233void qib_notify_qp_reset(struct rvt_qp *qp)
234{
235	struct qib_qp_priv *priv = qp->priv;
236
237	atomic_set(&priv->s_dma_busy, 0);
238}
239
240void qib_notify_error_qp(struct rvt_qp *qp)
241{
242	struct qib_qp_priv *priv = qp->priv;
243	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
244
245	spin_lock(&dev->rdi.pending_lock);
246	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
247		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
248		list_del_init(&priv->iowait);
249	}
250	spin_unlock(&dev->rdi.pending_lock);
251
252	if (!(qp->s_flags & RVT_S_BUSY)) {
253		qp->s_hdrwords = 0;
254		if (qp->s_rdma_mr) {
255			rvt_put_mr(qp->s_rdma_mr);
256			qp->s_rdma_mr = NULL;
257		}
258		if (priv->s_tx) {
259			qib_put_txreq(priv->s_tx);
260			priv->s_tx = NULL;
261		}
262	}
263}
264
265static int mtu_to_enum(u32 mtu)
266{
267	int enum_mtu;
268
269	switch (mtu) {
270	case 4096:
271		enum_mtu = IB_MTU_4096;
272		break;
273	case 2048:
274		enum_mtu = IB_MTU_2048;
275		break;
276	case 1024:
277		enum_mtu = IB_MTU_1024;
278		break;
279	case 512:
280		enum_mtu = IB_MTU_512;
281		break;
282	case 256:
283		enum_mtu = IB_MTU_256;
284		break;
285	default:
286		enum_mtu = IB_MTU_2048;
287	}
288	return enum_mtu;
289}
290
291int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
292			   struct ib_qp_attr *attr)
293{
294	int mtu, pmtu, pidx = qp->port_num - 1;
295	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
296	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
297					      verbs_dev);
298	mtu = ib_mtu_enum_to_int(attr->path_mtu);
299	if (mtu == -1)
300		return -EINVAL;
301
302	if (mtu > dd->pport[pidx].ibmtu)
303		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
304	else
305		pmtu = attr->path_mtu;
306	return pmtu;
307}
308
309int qib_mtu_to_path_mtu(u32 mtu)
310{
311	return mtu_to_enum(mtu);
312}
313
314u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
315{
316	return ib_mtu_enum_to_int(pmtu);
317}
318
319void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320{
321	struct qib_qp_priv *priv;
322
323	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
324	if (!priv)
325		return ERR_PTR(-ENOMEM);
326	priv->owner = qp;
327
328	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
329	if (!priv->s_hdr) {
330		kfree(priv);
331		return ERR_PTR(-ENOMEM);
332	}
333	init_waitqueue_head(&priv->wait_dma);
334	INIT_WORK(&priv->s_work, _qib_do_send);
335	INIT_LIST_HEAD(&priv->iowait);
336
337	return priv;
338}
339
340void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
341{
342	struct qib_qp_priv *priv = qp->priv;
343
344	kfree(priv->s_hdr);
345	kfree(priv);
346}
347
348void qib_stop_send_queue(struct rvt_qp *qp)
349{
350	struct qib_qp_priv *priv = qp->priv;
351
352	cancel_work_sync(&priv->s_work);
 
353}
354
355void qib_quiesce_qp(struct rvt_qp *qp)
356{
357	struct qib_qp_priv *priv = qp->priv;
358
359	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
360	if (priv->s_tx) {
361		qib_put_txreq(priv->s_tx);
362		priv->s_tx = NULL;
363	}
364}
365
366void qib_flush_qp_waiters(struct rvt_qp *qp)
367{
368	struct qib_qp_priv *priv = qp->priv;
369	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
370
371	spin_lock(&dev->rdi.pending_lock);
372	if (!list_empty(&priv->iowait))
373		list_del_init(&priv->iowait);
374	spin_unlock(&dev->rdi.pending_lock);
375}
376
377/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378 * qib_check_send_wqe - validate wr/wqe
379 * @qp: The qp
380 * @wqe: The built wqe
381 * @call_send: Determine if the send should be posted or scheduled
382 *
383 * Returns 0 on success, -EINVAL on failure
 
 
 
 
 
384 */
385int qib_check_send_wqe(struct rvt_qp *qp,
386		       struct rvt_swqe *wqe, bool *call_send)
387{
388	struct rvt_ah *ah;
 
389
390	switch (qp->ibqp.qp_type) {
391	case IB_QPT_RC:
392	case IB_QPT_UC:
393		if (wqe->length > 0x80000000U)
394			return -EINVAL;
395		if (wqe->length > qp->pmtu)
396			*call_send = false;
397		break;
398	case IB_QPT_SMI:
399	case IB_QPT_GSI:
400	case IB_QPT_UD:
401		ah = rvt_get_swqe_ah(wqe);
402		if (wqe->length > (1 << ah->log_pmtu))
403			return -EINVAL;
404		/* progress hint */
405		*call_send = true;
406		break;
407	default:
408		break;
409	}
410	return 0;
411}
412
413#ifdef CONFIG_DEBUG_FS
414
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415static const char * const qp_type_str[] = {
416	"SMI", "GSI", "RC", "UC", "UD",
417};
418
419/**
420 * qib_qp_iter_print - print information to seq_file
421 * @s: the seq_file
422 * @iter: the iterator
423 */
424void qib_qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter)
425{
426	struct rvt_swqe *wqe;
427	struct rvt_qp *qp = iter->qp;
428	struct qib_qp_priv *priv = qp->priv;
429
430	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
431	seq_printf(s,
432		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
433		   iter->n,
434		   qp->ibqp.qp_num,
435		   qp_type_str[qp->ibqp.qp_type],
436		   qp->state,
437		   wqe->wr.opcode,
438		   qp->s_hdrwords,
439		   qp->s_flags,
440		   atomic_read(&priv->s_dma_busy),
441		   !list_empty(&priv->iowait),
442		   qp->timeout,
443		   wqe->ssn,
444		   qp->s_lsn,
445		   qp->s_last_psn,
446		   qp->s_psn, qp->s_next_psn,
447		   qp->s_sending_psn, qp->s_sending_hpsn,
448		   qp->s_last, qp->s_acked, qp->s_cur,
449		   qp->s_tail, qp->s_head, qp->s_size,
450		   qp->remote_qpn,
451		   rdma_ah_get_dlid(&qp->remote_ah_attr));
452}
453
454#endif
  1/*
  2 * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
  3 * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
  4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5 *
  6 * This software is available to you under a choice of one of two
  7 * licenses.  You may choose to be licensed under the terms of the GNU
  8 * General Public License (GPL) Version 2, available from the file
  9 * COPYING in the main directory of this source tree, or the
 10 * OpenIB.org BSD license below:
 11 *
 12 *     Redistribution and use in source and binary forms, with or
 13 *     without modification, are permitted provided that the following
 14 *     conditions are met:
 15 *
 16 *      - Redistributions of source code must retain the above
 17 *        copyright notice, this list of conditions and the following
 18 *        disclaimer.
 19 *
 20 *      - Redistributions in binary form must reproduce the above
 21 *        copyright notice, this list of conditions and the following
 22 *        disclaimer in the documentation and/or other materials
 23 *        provided with the distribution.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 32 * SOFTWARE.
 33 */
 34
 35#include <linux/err.h>
 36#include <linux/vmalloc.h>
 37#include <rdma/rdma_vt.h>
 38#ifdef CONFIG_DEBUG_FS
 39#include <linux/seq_file.h>
 40#endif
 41
 42#include "qib.h"
 43
 44/*
 45 * mask field which was present in now deleted qib_qpn_table
 46 * is not present in rvt_qpn_table. Defining the same field
 47 * as qpt_mask here instead of adding the mask field to
 48 * rvt_qpn_table.
 49 */
 50u16 qpt_mask;
 51
 52static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
 53			      struct rvt_qpn_map *map, unsigned off)
 54{
 55	return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
 56}
 57
 58static inline unsigned find_next_offset(struct rvt_qpn_table *qpt,
 59					struct rvt_qpn_map *map, unsigned off,
 60					unsigned n)
 61{
 62	if (qpt_mask) {
 63		off++;
 64		if (((off & qpt_mask) >> 1) >= n)
 65			off = (off | qpt_mask) + 2;
 66	} else {
 67		off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
 68	}
 69	return off;
 70}
 71
 72/*
 73 * Convert the AETH credit code into the number of credits.
 74 */
 75static u32 credit_table[31] = {
 76	0,                      /* 0 */
 77	1,                      /* 1 */
 78	2,                      /* 2 */
 79	3,                      /* 3 */
 80	4,                      /* 4 */
 81	6,                      /* 5 */
 82	8,                      /* 6 */
 83	12,                     /* 7 */
 84	16,                     /* 8 */
 85	24,                     /* 9 */
 86	32,                     /* A */
 87	48,                     /* B */
 88	64,                     /* C */
 89	96,                     /* D */
 90	128,                    /* E */
 91	192,                    /* F */
 92	256,                    /* 10 */
 93	384,                    /* 11 */
 94	512,                    /* 12 */
 95	768,                    /* 13 */
 96	1024,                   /* 14 */
 97	1536,                   /* 15 */
 98	2048,                   /* 16 */
 99	3072,                   /* 17 */
100	4096,                   /* 18 */
101	6144,                   /* 19 */
102	8192,                   /* 1A */
103	12288,                  /* 1B */
104	16384,                  /* 1C */
105	24576,                  /* 1D */
106	32768                   /* 1E */
 
 
 
 
 
 
107};
108
109static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
110			 gfp_t gfp)
111{
112	unsigned long page = get_zeroed_page(gfp);
113
114	/*
115	 * Free the page if someone raced with us installing it.
116	 */
117
118	spin_lock(&qpt->lock);
119	if (map->page)
120		free_page(page);
121	else
122		map->page = (void *)page;
123	spin_unlock(&qpt->lock);
124}
125
126/*
127 * Allocate the next available QPN or
128 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
129 */
130int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
131		  enum ib_qp_type type, u8 port, gfp_t gfp)
132{
133	u32 i, offset, max_scan, qpn;
134	struct rvt_qpn_map *map;
135	u32 ret;
136	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
137	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
138					      verbs_dev);
 
139
140	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
141		unsigned n;
142
143		ret = type == IB_QPT_GSI;
144		n = 1 << (ret + 2 * (port - 1));
145		spin_lock(&qpt->lock);
146		if (qpt->flags & n)
147			ret = -EINVAL;
148		else
149			qpt->flags |= n;
150		spin_unlock(&qpt->lock);
151		goto bail;
152	}
153
154	qpn = qpt->last + 2;
155	if (qpn >= RVT_QPN_MAX)
156		qpn = 2;
157	if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues)
158		qpn = (qpn | qpt_mask) + 2;
159	offset = qpn & RVT_BITS_PER_PAGE_MASK;
160	map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
161	max_scan = qpt->nmaps - !offset;
162	for (i = 0;;) {
163		if (unlikely(!map->page)) {
164			get_map_page(qpt, map, gfp);
165			if (unlikely(!map->page))
166				break;
167		}
168		do {
169			if (!test_and_set_bit(offset, map->page)) {
170				qpt->last = qpn;
171				ret = qpn;
172				goto bail;
173			}
174			offset = find_next_offset(qpt, map, offset,
175				dd->n_krcv_queues);
176			qpn = mk_qpn(qpt, map, offset);
177			/*
178			 * This test differs from alloc_pidmap().
179			 * If find_next_offset() does find a zero
180			 * bit, we don't need to check for QPN
181			 * wrapping around past our starting QPN.
182			 * We just need to be sure we don't loop
183			 * forever.
184			 */
185		} while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
186		/*
187		 * In order to keep the number of pages allocated to a
188		 * minimum, we scan the all existing pages before increasing
189		 * the size of the bitmap table.
190		 */
191		if (++i > max_scan) {
192			if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
193				break;
194			map = &qpt->map[qpt->nmaps++];
195			offset = 0;
196		} else if (map < &qpt->map[qpt->nmaps]) {
197			++map;
198			offset = 0;
199		} else {
200			map = &qpt->map[0];
201			offset = 2;
202		}
203		qpn = mk_qpn(qpt, map, offset);
204	}
205
206	ret = -ENOMEM;
207
208bail:
209	return ret;
210}
211
212/**
213 * qib_free_all_qps - check for QPs still in use
214 */
215unsigned qib_free_all_qps(struct rvt_dev_info *rdi)
216{
217	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
218	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
219					      verbs_dev);
220	unsigned n, qp_inuse = 0;
221
222	for (n = 0; n < dd->num_pports; n++) {
223		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
224
225		rcu_read_lock();
226		if (rcu_dereference(ibp->rvp.qp[0]))
227			qp_inuse++;
228		if (rcu_dereference(ibp->rvp.qp[1]))
229			qp_inuse++;
230		rcu_read_unlock();
231	}
232	return qp_inuse;
233}
234
235void qib_notify_qp_reset(struct rvt_qp *qp)
236{
237	struct qib_qp_priv *priv = qp->priv;
238
239	atomic_set(&priv->s_dma_busy, 0);
240}
241
242void qib_notify_error_qp(struct rvt_qp *qp)
243{
244	struct qib_qp_priv *priv = qp->priv;
245	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
246
247	spin_lock(&dev->rdi.pending_lock);
248	if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) {
249		qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
250		list_del_init(&priv->iowait);
251	}
252	spin_unlock(&dev->rdi.pending_lock);
253
254	if (!(qp->s_flags & RVT_S_BUSY)) {
255		qp->s_hdrwords = 0;
256		if (qp->s_rdma_mr) {
257			rvt_put_mr(qp->s_rdma_mr);
258			qp->s_rdma_mr = NULL;
259		}
260		if (priv->s_tx) {
261			qib_put_txreq(priv->s_tx);
262			priv->s_tx = NULL;
263		}
264	}
265}
266
267static int mtu_to_enum(u32 mtu)
268{
269	int enum_mtu;
270
271	switch (mtu) {
272	case 4096:
273		enum_mtu = IB_MTU_4096;
274		break;
275	case 2048:
276		enum_mtu = IB_MTU_2048;
277		break;
278	case 1024:
279		enum_mtu = IB_MTU_1024;
280		break;
281	case 512:
282		enum_mtu = IB_MTU_512;
283		break;
284	case 256:
285		enum_mtu = IB_MTU_256;
286		break;
287	default:
288		enum_mtu = IB_MTU_2048;
289	}
290	return enum_mtu;
291}
292
293int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
294			   struct ib_qp_attr *attr)
295{
296	int mtu, pmtu, pidx = qp->port_num - 1;
297	struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi);
298	struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata,
299					      verbs_dev);
300	mtu = ib_mtu_enum_to_int(attr->path_mtu);
301	if (mtu == -1)
302		return -EINVAL;
303
304	if (mtu > dd->pport[pidx].ibmtu)
305		pmtu = mtu_to_enum(dd->pport[pidx].ibmtu);
306	else
307		pmtu = attr->path_mtu;
308	return pmtu;
309}
310
311int qib_mtu_to_path_mtu(u32 mtu)
312{
313	return mtu_to_enum(mtu);
314}
315
316u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317{
318	return ib_mtu_enum_to_int(pmtu);
319}
320
321/**
322 * qib_compute_aeth - compute the AETH (syndrome + MSN)
323 * @qp: the queue pair to compute the AETH for
324 *
325 * Returns the AETH.
326 */
327__be32 qib_compute_aeth(struct rvt_qp *qp)
328{
329	u32 aeth = qp->r_msn & QIB_MSN_MASK;
330
331	if (qp->ibqp.srq) {
332		/*
333		 * Shared receive queues don't generate credits.
334		 * Set the credit field to the invalid value.
335		 */
336		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
337	} else {
338		u32 min, max, x;
339		u32 credits;
340		struct rvt_rwq *wq = qp->r_rq.wq;
341		u32 head;
342		u32 tail;
343
344		/* sanity check pointers before trusting them */
345		head = wq->head;
346		if (head >= qp->r_rq.size)
347			head = 0;
348		tail = wq->tail;
349		if (tail >= qp->r_rq.size)
350			tail = 0;
351		/*
352		 * Compute the number of credits available (RWQEs).
353		 * XXX Not holding the r_rq.lock here so there is a small
354		 * chance that the pair of reads are not atomic.
355		 */
356		credits = head - tail;
357		if ((int)credits < 0)
358			credits += qp->r_rq.size;
359		/*
360		 * Binary search the credit table to find the code to
361		 * use.
362		 */
363		min = 0;
364		max = 31;
365		for (;;) {
366			x = (min + max) / 2;
367			if (credit_table[x] == credits)
368				break;
369			if (credit_table[x] > credits)
370				max = x;
371			else if (min == x)
372				break;
373			else
374				min = x;
375		}
376		aeth |= x << QIB_AETH_CREDIT_SHIFT;
377	}
378	return cpu_to_be32(aeth);
379}
380
381void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp)
382{
383	struct qib_qp_priv *priv;
384
385	priv = kzalloc(sizeof(*priv), gfp);
386	if (!priv)
387		return ERR_PTR(-ENOMEM);
388	priv->owner = qp;
389
390	priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp);
391	if (!priv->s_hdr) {
392		kfree(priv);
393		return ERR_PTR(-ENOMEM);
394	}
395	init_waitqueue_head(&priv->wait_dma);
396	INIT_WORK(&priv->s_work, _qib_do_send);
397	INIT_LIST_HEAD(&priv->iowait);
398
399	return priv;
400}
401
402void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
403{
404	struct qib_qp_priv *priv = qp->priv;
405
406	kfree(priv->s_hdr);
407	kfree(priv);
408}
409
410void qib_stop_send_queue(struct rvt_qp *qp)
411{
412	struct qib_qp_priv *priv = qp->priv;
413
414	cancel_work_sync(&priv->s_work);
415	del_timer_sync(&qp->s_timer);
416}
417
418void qib_quiesce_qp(struct rvt_qp *qp)
419{
420	struct qib_qp_priv *priv = qp->priv;
421
422	wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
423	if (priv->s_tx) {
424		qib_put_txreq(priv->s_tx);
425		priv->s_tx = NULL;
426	}
427}
428
429void qib_flush_qp_waiters(struct rvt_qp *qp)
430{
431	struct qib_qp_priv *priv = qp->priv;
432	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
433
434	spin_lock(&dev->rdi.pending_lock);
435	if (!list_empty(&priv->iowait))
436		list_del_init(&priv->iowait);
437	spin_unlock(&dev->rdi.pending_lock);
438}
439
440/**
441 * qib_get_credit - flush the send work queue of a QP
442 * @qp: the qp who's send work queue to flush
443 * @aeth: the Acknowledge Extended Transport Header
444 *
445 * The QP s_lock should be held.
446 */
447void qib_get_credit(struct rvt_qp *qp, u32 aeth)
448{
449	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
450
451	/*
452	 * If the credit is invalid, we can send
453	 * as many packets as we like.  Otherwise, we have to
454	 * honor the credit field.
455	 */
456	if (credit == QIB_AETH_CREDIT_INVAL) {
457		if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
458			qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
459			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
460				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
461				qib_schedule_send(qp);
462			}
463		}
464	} else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
465		/* Compute new LSN (i.e., MSN + credit) */
466		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
467		if (qib_cmp24(credit, qp->s_lsn) > 0) {
468			qp->s_lsn = credit;
469			if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
470				qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
471				qib_schedule_send(qp);
472			}
473		}
474	}
475}
476
477/**
478 * qib_check_send_wqe - validate wr/wqe
479 * @qp - The qp
480 * @wqe - The built wqe
 
481 *
482 * validate wr/wqe.  This is called
483 * prior to inserting the wqe into
484 * the ring but after the wqe has been
485 * setup.
486 *
487 * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
488 */
489int qib_check_send_wqe(struct rvt_qp *qp,
490		       struct rvt_swqe *wqe)
491{
492	struct rvt_ah *ah;
493	int ret = 0;
494
495	switch (qp->ibqp.qp_type) {
496	case IB_QPT_RC:
497	case IB_QPT_UC:
498		if (wqe->length > 0x80000000U)
499			return -EINVAL;
 
 
500		break;
501	case IB_QPT_SMI:
502	case IB_QPT_GSI:
503	case IB_QPT_UD:
504		ah = ibah_to_rvtah(wqe->ud_wr.ah);
505		if (wqe->length > (1 << ah->log_pmtu))
506			return -EINVAL;
507		/* progress hint */
508		ret = 1;
509		break;
510	default:
511		break;
512	}
513	return ret;
514}
515
516#ifdef CONFIG_DEBUG_FS
517
518struct qib_qp_iter {
519	struct qib_ibdev *dev;
520	struct rvt_qp *qp;
521	int n;
522};
523
524struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
525{
526	struct qib_qp_iter *iter;
527
528	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
529	if (!iter)
530		return NULL;
531
532	iter->dev = dev;
533	if (qib_qp_iter_next(iter)) {
534		kfree(iter);
535		return NULL;
536	}
537
538	return iter;
539}
540
541int qib_qp_iter_next(struct qib_qp_iter *iter)
542{
543	struct qib_ibdev *dev = iter->dev;
544	int n = iter->n;
545	int ret = 1;
546	struct rvt_qp *pqp = iter->qp;
547	struct rvt_qp *qp;
548
549	for (; n < dev->rdi.qp_dev->qp_table_size; n++) {
550		if (pqp)
551			qp = rcu_dereference(pqp->next);
552		else
553			qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]);
554		pqp = qp;
555		if (qp) {
556			iter->qp = qp;
557			iter->n = n;
558			return 0;
559		}
560	}
561	return ret;
562}
563
564static const char * const qp_type_str[] = {
565	"SMI", "GSI", "RC", "UC", "UD",
566};
567
568void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
 
 
 
 
 
569{
570	struct rvt_swqe *wqe;
571	struct rvt_qp *qp = iter->qp;
572	struct qib_qp_priv *priv = qp->priv;
573
574	wqe = rvt_get_swqe_ptr(qp, qp->s_last);
575	seq_printf(s,
576		   "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
577		   iter->n,
578		   qp->ibqp.qp_num,
579		   qp_type_str[qp->ibqp.qp_type],
580		   qp->state,
581		   wqe->wr.opcode,
582		   qp->s_hdrwords,
583		   qp->s_flags,
584		   atomic_read(&priv->s_dma_busy),
585		   !list_empty(&priv->iowait),
586		   qp->timeout,
587		   wqe->ssn,
588		   qp->s_lsn,
589		   qp->s_last_psn,
590		   qp->s_psn, qp->s_next_psn,
591		   qp->s_sending_psn, qp->s_sending_hpsn,
592		   qp->s_last, qp->s_acked, qp->s_cur,
593		   qp->s_tail, qp->s_head, qp->s_size,
594		   qp->remote_qpn,
595		   qp->remote_ah_attr.dlid);
596}
597
598#endif