Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
  3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
  4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
  5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
  6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
  7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
  9 *
 10 * This software is available to you under a choice of one of two
 11 * licenses.  You may choose to be licensed under the terms of the GNU
 12 * General Public License (GPL) Version 2, available from the file
 13 * COPYING in the main directory of this source tree, or the
 14 * OpenIB.org BSD license below:
 15 *
 16 *     Redistribution and use in source and binary forms, with or
 17 *     without modification, are permitted provided that the following
 18 *     conditions are met:
 19 *
 20 *      - Redistributions of source code must retain the above
 21 *        copyright notice, this list of conditions and the following
 22 *        disclaimer.
 23 *
 24 *      - Redistributions in binary form must reproduce the above
 25 *        copyright notice, this list of conditions and the following
 26 *        disclaimer in the documentation and/or other materials
 27 *        provided with the distribution.
 28 *
 29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 36 * SOFTWARE.
 37 */
 38
 39#include <linux/errno.h>
 40#include <linux/err.h>
 
 41#include <linux/string.h>
 
 42
 43#include <rdma/ib_verbs.h>
 44#include <rdma/ib_cache.h>
 
 
 
 45
 46int ib_rate_to_mult(enum ib_rate rate)
 47{
 48	switch (rate) {
 49	case IB_RATE_2_5_GBPS: return  1;
 50	case IB_RATE_5_GBPS:   return  2;
 51	case IB_RATE_10_GBPS:  return  4;
 52	case IB_RATE_20_GBPS:  return  8;
 53	case IB_RATE_30_GBPS:  return 12;
 54	case IB_RATE_40_GBPS:  return 16;
 55	case IB_RATE_60_GBPS:  return 24;
 56	case IB_RATE_80_GBPS:  return 32;
 57	case IB_RATE_120_GBPS: return 48;
 58	default:	       return -1;
 59	}
 60}
 61EXPORT_SYMBOL(ib_rate_to_mult);
 62
 63enum ib_rate mult_to_ib_rate(int mult)
 64{
 65	switch (mult) {
 66	case 1:  return IB_RATE_2_5_GBPS;
 67	case 2:  return IB_RATE_5_GBPS;
 68	case 4:  return IB_RATE_10_GBPS;
 69	case 8:  return IB_RATE_20_GBPS;
 70	case 12: return IB_RATE_30_GBPS;
 71	case 16: return IB_RATE_40_GBPS;
 72	case 24: return IB_RATE_60_GBPS;
 73	case 32: return IB_RATE_80_GBPS;
 74	case 48: return IB_RATE_120_GBPS;
 75	default: return IB_RATE_PORT_CURRENT;
 76	}
 77}
 78EXPORT_SYMBOL(mult_to_ib_rate);
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80enum rdma_transport_type
 81rdma_node_get_transport(enum rdma_node_type node_type)
 82{
 83	switch (node_type) {
 84	case RDMA_NODE_IB_CA:
 85	case RDMA_NODE_IB_SWITCH:
 86	case RDMA_NODE_IB_ROUTER:
 87		return RDMA_TRANSPORT_IB;
 88	case RDMA_NODE_RNIC:
 89		return RDMA_TRANSPORT_IWARP;
 
 
 
 
 90	default:
 91		BUG();
 92		return 0;
 93	}
 94}
 95EXPORT_SYMBOL(rdma_node_get_transport);
 96
 97enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
 98{
 99	if (device->get_link_layer)
100		return device->get_link_layer(device, port_num);
101
102	switch (rdma_node_get_transport(device->node_type)) {
103	case RDMA_TRANSPORT_IB:
104		return IB_LINK_LAYER_INFINIBAND;
105	case RDMA_TRANSPORT_IWARP:
 
 
106		return IB_LINK_LAYER_ETHERNET;
107	default:
108		return IB_LINK_LAYER_UNSPECIFIED;
109	}
110}
111EXPORT_SYMBOL(rdma_port_get_link_layer);
112
113/* Protection domains */
114
115struct ib_pd *ib_alloc_pd(struct ib_device *device)
116{
117	struct ib_pd *pd;
118
119	pd = device->alloc_pd(device, NULL, NULL);
120
121	if (!IS_ERR(pd)) {
122		pd->device  = device;
123		pd->uobject = NULL;
124		atomic_set(&pd->usecnt, 0);
125	}
126
127	return pd;
128}
129EXPORT_SYMBOL(ib_alloc_pd);
130
131int ib_dealloc_pd(struct ib_pd *pd)
132{
133	if (atomic_read(&pd->usecnt))
134		return -EBUSY;
135
136	return pd->device->dealloc_pd(pd);
137}
138EXPORT_SYMBOL(ib_dealloc_pd);
139
140/* Address handles */
141
142struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
143{
144	struct ib_ah *ah;
145
146	ah = pd->device->create_ah(pd, ah_attr);
147
148	if (!IS_ERR(ah)) {
149		ah->device  = pd->device;
150		ah->pd      = pd;
151		ah->uobject = NULL;
152		atomic_inc(&pd->usecnt);
153	}
154
155	return ah;
156}
157EXPORT_SYMBOL(ib_create_ah);
158
159int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
160		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
161{
162	u32 flow_class;
163	u16 gid_index;
164	int ret;
 
 
165
166	memset(ah_attr, 0, sizeof *ah_attr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167	ah_attr->dlid = wc->slid;
168	ah_attr->sl = wc->sl;
169	ah_attr->src_path_bits = wc->dlid_path_bits;
170	ah_attr->port_num = port_num;
171
172	if (wc->wc_flags & IB_WC_GRH) {
173		ah_attr->ah_flags = IB_AH_GRH;
174		ah_attr->grh.dgid = grh->sgid;
175
176		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
177					 &gid_index);
178		if (ret)
179			return ret;
180
181		ah_attr->grh.sgid_index = (u8) gid_index;
182		flow_class = be32_to_cpu(grh->version_tclass_flow);
183		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
184		ah_attr->grh.hop_limit = 0xFF;
185		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
186	}
187	return 0;
188}
189EXPORT_SYMBOL(ib_init_ah_from_wc);
190
191struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
192				   struct ib_grh *grh, u8 port_num)
193{
194	struct ib_ah_attr ah_attr;
195	int ret;
196
197	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
198	if (ret)
199		return ERR_PTR(ret);
200
201	return ib_create_ah(pd, &ah_attr);
202}
203EXPORT_SYMBOL(ib_create_ah_from_wc);
204
205int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
206{
207	return ah->device->modify_ah ?
208		ah->device->modify_ah(ah, ah_attr) :
209		-ENOSYS;
210}
211EXPORT_SYMBOL(ib_modify_ah);
212
213int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
214{
215	return ah->device->query_ah ?
216		ah->device->query_ah(ah, ah_attr) :
217		-ENOSYS;
218}
219EXPORT_SYMBOL(ib_query_ah);
220
221int ib_destroy_ah(struct ib_ah *ah)
222{
223	struct ib_pd *pd;
224	int ret;
225
226	pd = ah->pd;
227	ret = ah->device->destroy_ah(ah);
228	if (!ret)
229		atomic_dec(&pd->usecnt);
230
231	return ret;
232}
233EXPORT_SYMBOL(ib_destroy_ah);
234
235/* Shared receive queues */
236
237struct ib_srq *ib_create_srq(struct ib_pd *pd,
238			     struct ib_srq_init_attr *srq_init_attr)
239{
240	struct ib_srq *srq;
241
242	if (!pd->device->create_srq)
243		return ERR_PTR(-ENOSYS);
244
245	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
246
247	if (!IS_ERR(srq)) {
248		srq->device    	   = pd->device;
249		srq->pd        	   = pd;
250		srq->uobject       = NULL;
251		srq->event_handler = srq_init_attr->event_handler;
252		srq->srq_context   = srq_init_attr->srq_context;
 
 
 
 
 
 
 
253		atomic_inc(&pd->usecnt);
254		atomic_set(&srq->usecnt, 0);
255	}
256
257	return srq;
258}
259EXPORT_SYMBOL(ib_create_srq);
260
261int ib_modify_srq(struct ib_srq *srq,
262		  struct ib_srq_attr *srq_attr,
263		  enum ib_srq_attr_mask srq_attr_mask)
264{
265	return srq->device->modify_srq ?
266		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
267		-ENOSYS;
268}
269EXPORT_SYMBOL(ib_modify_srq);
270
271int ib_query_srq(struct ib_srq *srq,
272		 struct ib_srq_attr *srq_attr)
273{
274	return srq->device->query_srq ?
275		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
276}
277EXPORT_SYMBOL(ib_query_srq);
278
279int ib_destroy_srq(struct ib_srq *srq)
280{
281	struct ib_pd *pd;
 
 
 
282	int ret;
283
284	if (atomic_read(&srq->usecnt))
285		return -EBUSY;
286
287	pd = srq->pd;
 
 
 
 
 
288
289	ret = srq->device->destroy_srq(srq);
290	if (!ret)
291		atomic_dec(&pd->usecnt);
 
 
 
 
 
292
293	return ret;
294}
295EXPORT_SYMBOL(ib_destroy_srq);
296
297/* Queue pairs */
298
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299struct ib_qp *ib_create_qp(struct ib_pd *pd,
300			   struct ib_qp_init_attr *qp_init_attr)
301{
302	struct ib_qp *qp;
 
303
304	qp = pd->device->create_qp(pd, qp_init_attr, NULL);
 
305
306	if (!IS_ERR(qp)) {
307		qp->device     	  = pd->device;
308		qp->pd         	  = pd;
309		qp->send_cq    	  = qp_init_attr->send_cq;
310		qp->recv_cq    	  = qp_init_attr->recv_cq;
311		qp->srq	       	  = qp_init_attr->srq;
312		qp->uobject       = NULL;
313		qp->event_handler = qp_init_attr->event_handler;
314		qp->qp_context    = qp_init_attr->qp_context;
315		qp->qp_type	  = qp_init_attr->qp_type;
316		atomic_inc(&pd->usecnt);
317		atomic_inc(&qp_init_attr->send_cq->usecnt);
318		atomic_inc(&qp_init_attr->recv_cq->usecnt);
319		if (qp_init_attr->srq)
320			atomic_inc(&qp_init_attr->srq->usecnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321	}
322
323	return qp;
324}
325EXPORT_SYMBOL(ib_create_qp);
326
327static const struct {
328	int			valid;
329	enum ib_qp_attr_mask	req_param[IB_QPT_RAW_ETHERTYPE + 1];
330	enum ib_qp_attr_mask	opt_param[IB_QPT_RAW_ETHERTYPE + 1];
 
 
331} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
332	[IB_QPS_RESET] = {
333		[IB_QPS_RESET] = { .valid = 1 },
334		[IB_QPS_INIT]  = {
335			.valid = 1,
336			.req_param = {
337				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
338						IB_QP_PORT			|
339						IB_QP_QKEY),
 
340				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
341						IB_QP_PORT			|
342						IB_QP_ACCESS_FLAGS),
343				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
344						IB_QP_PORT			|
345						IB_QP_ACCESS_FLAGS),
 
 
 
 
 
 
346				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
347						IB_QP_QKEY),
348				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
349						IB_QP_QKEY),
350			}
351		},
352	},
353	[IB_QPS_INIT]  = {
354		[IB_QPS_RESET] = { .valid = 1 },
355		[IB_QPS_ERR] =   { .valid = 1 },
356		[IB_QPS_INIT]  = {
357			.valid = 1,
358			.opt_param = {
359				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
360						IB_QP_PORT			|
361						IB_QP_QKEY),
362				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
363						IB_QP_PORT			|
364						IB_QP_ACCESS_FLAGS),
365				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
366						IB_QP_PORT			|
367						IB_QP_ACCESS_FLAGS),
 
 
 
 
 
 
368				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
369						IB_QP_QKEY),
370				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
371						IB_QP_QKEY),
372			}
373		},
374		[IB_QPS_RTR]   = {
375			.valid = 1,
376			.req_param = {
377				[IB_QPT_UC]  = (IB_QP_AV			|
378						IB_QP_PATH_MTU			|
379						IB_QP_DEST_QPN			|
380						IB_QP_RQ_PSN),
381				[IB_QPT_RC]  = (IB_QP_AV			|
382						IB_QP_PATH_MTU			|
383						IB_QP_DEST_QPN			|
384						IB_QP_RQ_PSN			|
385						IB_QP_MAX_DEST_RD_ATOMIC	|
386						IB_QP_MIN_RNR_TIMER),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387			},
388			.opt_param = {
389				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
390						 IB_QP_QKEY),
391				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
392						 IB_QP_ACCESS_FLAGS		|
393						 IB_QP_PKEY_INDEX),
394				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
395						 IB_QP_ACCESS_FLAGS		|
396						 IB_QP_PKEY_INDEX),
 
 
 
 
 
 
397				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
398						 IB_QP_QKEY),
399				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
400						 IB_QP_QKEY),
401			 }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402		}
403	},
404	[IB_QPS_RTR]   = {
405		[IB_QPS_RESET] = { .valid = 1 },
406		[IB_QPS_ERR] =   { .valid = 1 },
407		[IB_QPS_RTS]   = {
408			.valid = 1,
409			.req_param = {
410				[IB_QPT_UD]  = IB_QP_SQ_PSN,
411				[IB_QPT_UC]  = IB_QP_SQ_PSN,
412				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
413						IB_QP_RETRY_CNT			|
414						IB_QP_RNR_RETRY			|
415						IB_QP_SQ_PSN			|
416						IB_QP_MAX_QP_RD_ATOMIC),
 
 
 
 
 
 
 
417				[IB_QPT_SMI] = IB_QP_SQ_PSN,
418				[IB_QPT_GSI] = IB_QP_SQ_PSN,
419			},
420			.opt_param = {
421				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
422						 IB_QP_QKEY),
423				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
424						 IB_QP_ALT_PATH			|
425						 IB_QP_ACCESS_FLAGS		|
426						 IB_QP_PATH_MIG_STATE),
427				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
428						 IB_QP_ALT_PATH			|
429						 IB_QP_ACCESS_FLAGS		|
430						 IB_QP_MIN_RNR_TIMER		|
431						 IB_QP_PATH_MIG_STATE),
 
 
 
 
 
 
 
 
 
432				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
433						 IB_QP_QKEY),
434				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
435						 IB_QP_QKEY),
436			 }
437		}
438	},
439	[IB_QPS_RTS]   = {
440		[IB_QPS_RESET] = { .valid = 1 },
441		[IB_QPS_ERR] =   { .valid = 1 },
442		[IB_QPS_RTS]   = {
443			.valid = 1,
444			.opt_param = {
445				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
446						IB_QP_QKEY),
447				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
448						IB_QP_ACCESS_FLAGS		|
449						IB_QP_ALT_PATH			|
450						IB_QP_PATH_MIG_STATE),
451				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
452						IB_QP_ACCESS_FLAGS		|
453						IB_QP_ALT_PATH			|
454						IB_QP_PATH_MIG_STATE		|
455						IB_QP_MIN_RNR_TIMER),
 
 
 
 
 
 
 
 
 
456				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
457						IB_QP_QKEY),
458				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
459						IB_QP_QKEY),
460			}
461		},
462		[IB_QPS_SQD]   = {
463			.valid = 1,
464			.opt_param = {
465				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
466				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
467				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
 
 
468				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
469				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
470			}
471		},
472	},
473	[IB_QPS_SQD]   = {
474		[IB_QPS_RESET] = { .valid = 1 },
475		[IB_QPS_ERR] =   { .valid = 1 },
476		[IB_QPS_RTS]   = {
477			.valid = 1,
478			.opt_param = {
479				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
480						IB_QP_QKEY),
481				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
482						IB_QP_ALT_PATH			|
483						IB_QP_ACCESS_FLAGS		|
484						IB_QP_PATH_MIG_STATE),
485				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
486						IB_QP_ALT_PATH			|
487						IB_QP_ACCESS_FLAGS		|
488						IB_QP_MIN_RNR_TIMER		|
489						IB_QP_PATH_MIG_STATE),
 
 
 
 
 
 
 
 
 
490				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
491						IB_QP_QKEY),
492				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
493						IB_QP_QKEY),
494			}
495		},
496		[IB_QPS_SQD]   = {
497			.valid = 1,
498			.opt_param = {
499				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
500						IB_QP_QKEY),
501				[IB_QPT_UC]  = (IB_QP_AV			|
502						IB_QP_ALT_PATH			|
503						IB_QP_ACCESS_FLAGS		|
504						IB_QP_PKEY_INDEX		|
505						IB_QP_PATH_MIG_STATE),
506				[IB_QPT_RC]  = (IB_QP_PORT			|
507						IB_QP_AV			|
508						IB_QP_TIMEOUT			|
509						IB_QP_RETRY_CNT			|
510						IB_QP_RNR_RETRY			|
511						IB_QP_MAX_QP_RD_ATOMIC		|
512						IB_QP_MAX_DEST_RD_ATOMIC	|
513						IB_QP_ALT_PATH			|
514						IB_QP_ACCESS_FLAGS		|
515						IB_QP_PKEY_INDEX		|
516						IB_QP_MIN_RNR_TIMER		|
517						IB_QP_PATH_MIG_STATE),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
518				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
519						IB_QP_QKEY),
520				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
521						IB_QP_QKEY),
522			}
523		}
524	},
525	[IB_QPS_SQE]   = {
526		[IB_QPS_RESET] = { .valid = 1 },
527		[IB_QPS_ERR] =   { .valid = 1 },
528		[IB_QPS_RTS]   = {
529			.valid = 1,
530			.opt_param = {
531				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
532						IB_QP_QKEY),
533				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
534						IB_QP_ACCESS_FLAGS),
535				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
536						IB_QP_QKEY),
537				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
538						IB_QP_QKEY),
539			}
540		}
541	},
542	[IB_QPS_ERR] = {
543		[IB_QPS_RESET] = { .valid = 1 },
544		[IB_QPS_ERR] =   { .valid = 1 }
545	}
546};
547
548int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
549		       enum ib_qp_type type, enum ib_qp_attr_mask mask)
 
550{
551	enum ib_qp_attr_mask req_param, opt_param;
552
553	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
554	    next_state < 0 || next_state > IB_QPS_ERR)
555		return 0;
556
557	if (mask & IB_QP_CUR_STATE  &&
558	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
559	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
560		return 0;
561
562	if (!qp_state_table[cur_state][next_state].valid)
563		return 0;
564
565	req_param = qp_state_table[cur_state][next_state].req_param[type];
566	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
567
 
 
 
 
 
 
 
568	if ((mask & req_param) != req_param)
569		return 0;
570
571	if (mask & ~(req_param | opt_param | IB_QP_STATE))
572		return 0;
573
574	return 1;
575}
576EXPORT_SYMBOL(ib_modify_qp_is_ok);
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578int ib_modify_qp(struct ib_qp *qp,
579		 struct ib_qp_attr *qp_attr,
580		 int qp_attr_mask)
581{
582	return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
 
 
 
 
 
 
583}
584EXPORT_SYMBOL(ib_modify_qp);
585
586int ib_query_qp(struct ib_qp *qp,
587		struct ib_qp_attr *qp_attr,
588		int qp_attr_mask,
589		struct ib_qp_init_attr *qp_init_attr)
590{
591	return qp->device->query_qp ?
592		qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
593		-ENOSYS;
594}
595EXPORT_SYMBOL(ib_query_qp);
596
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
597int ib_destroy_qp(struct ib_qp *qp)
598{
599	struct ib_pd *pd;
600	struct ib_cq *scq, *rcq;
601	struct ib_srq *srq;
602	int ret;
603
604	pd  = qp->pd;
605	scq = qp->send_cq;
606	rcq = qp->recv_cq;
607	srq = qp->srq;
 
 
 
 
 
 
608
609	ret = qp->device->destroy_qp(qp);
610	if (!ret) {
611		atomic_dec(&pd->usecnt);
612		atomic_dec(&scq->usecnt);
613		atomic_dec(&rcq->usecnt);
 
 
 
614		if (srq)
615			atomic_dec(&srq->usecnt);
616	}
617
618	return ret;
619}
620EXPORT_SYMBOL(ib_destroy_qp);
621
622/* Completion queues */
623
624struct ib_cq *ib_create_cq(struct ib_device *device,
625			   ib_comp_handler comp_handler,
626			   void (*event_handler)(struct ib_event *, void *),
627			   void *cq_context, int cqe, int comp_vector)
628{
629	struct ib_cq *cq;
630
631	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
632
633	if (!IS_ERR(cq)) {
634		cq->device        = device;
635		cq->uobject       = NULL;
636		cq->comp_handler  = comp_handler;
637		cq->event_handler = event_handler;
638		cq->cq_context    = cq_context;
639		atomic_set(&cq->usecnt, 0);
640	}
641
642	return cq;
643}
644EXPORT_SYMBOL(ib_create_cq);
645
646int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
647{
648	return cq->device->modify_cq ?
649		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
650}
651EXPORT_SYMBOL(ib_modify_cq);
652
653int ib_destroy_cq(struct ib_cq *cq)
654{
655	if (atomic_read(&cq->usecnt))
656		return -EBUSY;
657
658	return cq->device->destroy_cq(cq);
659}
660EXPORT_SYMBOL(ib_destroy_cq);
661
662int ib_resize_cq(struct ib_cq *cq, int cqe)
663{
664	return cq->device->resize_cq ?
665		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
666}
667EXPORT_SYMBOL(ib_resize_cq);
668
669/* Memory regions */
670
671struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
672{
673	struct ib_mr *mr;
 
 
 
 
 
674
675	mr = pd->device->get_dma_mr(pd, mr_access_flags);
676
677	if (!IS_ERR(mr)) {
678		mr->device  = pd->device;
679		mr->pd      = pd;
680		mr->uobject = NULL;
681		atomic_inc(&pd->usecnt);
682		atomic_set(&mr->usecnt, 0);
683	}
684
685	return mr;
686}
687EXPORT_SYMBOL(ib_get_dma_mr);
688
689struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
690			     struct ib_phys_buf *phys_buf_array,
691			     int num_phys_buf,
692			     int mr_access_flags,
693			     u64 *iova_start)
694{
695	struct ib_mr *mr;
 
 
 
 
 
696
697	if (!pd->device->reg_phys_mr)
698		return ERR_PTR(-ENOSYS);
699
700	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
701				     mr_access_flags, iova_start);
702
703	if (!IS_ERR(mr)) {
704		mr->device  = pd->device;
705		mr->pd      = pd;
706		mr->uobject = NULL;
707		atomic_inc(&pd->usecnt);
708		atomic_set(&mr->usecnt, 0);
709	}
710
711	return mr;
712}
713EXPORT_SYMBOL(ib_reg_phys_mr);
714
715int ib_rereg_phys_mr(struct ib_mr *mr,
716		     int mr_rereg_mask,
717		     struct ib_pd *pd,
718		     struct ib_phys_buf *phys_buf_array,
719		     int num_phys_buf,
720		     int mr_access_flags,
721		     u64 *iova_start)
722{
723	struct ib_pd *old_pd;
724	int ret;
725
 
 
 
 
726	if (!mr->device->rereg_phys_mr)
727		return -ENOSYS;
728
729	if (atomic_read(&mr->usecnt))
730		return -EBUSY;
731
732	old_pd = mr->pd;
733
734	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
735					phys_buf_array, num_phys_buf,
736					mr_access_flags, iova_start);
737
738	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
739		atomic_dec(&old_pd->usecnt);
740		atomic_inc(&pd->usecnt);
741	}
742
743	return ret;
744}
745EXPORT_SYMBOL(ib_rereg_phys_mr);
746
747int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
748{
749	return mr->device->query_mr ?
750		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
751}
752EXPORT_SYMBOL(ib_query_mr);
753
754int ib_dereg_mr(struct ib_mr *mr)
755{
756	struct ib_pd *pd;
757	int ret;
758
759	if (atomic_read(&mr->usecnt))
760		return -EBUSY;
761
762	pd = mr->pd;
763	ret = mr->device->dereg_mr(mr);
764	if (!ret)
765		atomic_dec(&pd->usecnt);
766
767	return ret;
768}
769EXPORT_SYMBOL(ib_dereg_mr);
770
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
771struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
772{
773	struct ib_mr *mr;
774
775	if (!pd->device->alloc_fast_reg_mr)
776		return ERR_PTR(-ENOSYS);
777
778	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
779
780	if (!IS_ERR(mr)) {
781		mr->device  = pd->device;
782		mr->pd      = pd;
783		mr->uobject = NULL;
784		atomic_inc(&pd->usecnt);
785		atomic_set(&mr->usecnt, 0);
786	}
787
788	return mr;
789}
790EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
791
792struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
793							  int max_page_list_len)
794{
795	struct ib_fast_reg_page_list *page_list;
796
797	if (!device->alloc_fast_reg_page_list)
798		return ERR_PTR(-ENOSYS);
799
800	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
801
802	if (!IS_ERR(page_list)) {
803		page_list->device = device;
804		page_list->max_page_list_len = max_page_list_len;
805	}
806
807	return page_list;
808}
809EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
810
811void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
812{
813	page_list->device->free_fast_reg_page_list(page_list);
814}
815EXPORT_SYMBOL(ib_free_fast_reg_page_list);
816
817/* Memory windows */
818
819struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
820{
821	struct ib_mw *mw;
822
823	if (!pd->device->alloc_mw)
824		return ERR_PTR(-ENOSYS);
825
826	mw = pd->device->alloc_mw(pd);
827	if (!IS_ERR(mw)) {
828		mw->device  = pd->device;
829		mw->pd      = pd;
830		mw->uobject = NULL;
 
831		atomic_inc(&pd->usecnt);
832	}
833
834	return mw;
835}
836EXPORT_SYMBOL(ib_alloc_mw);
837
838int ib_dealloc_mw(struct ib_mw *mw)
839{
840	struct ib_pd *pd;
841	int ret;
842
843	pd = mw->pd;
844	ret = mw->device->dealloc_mw(mw);
845	if (!ret)
846		atomic_dec(&pd->usecnt);
847
848	return ret;
849}
850EXPORT_SYMBOL(ib_dealloc_mw);
851
852/* "Fast" memory regions */
853
854struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
855			    int mr_access_flags,
856			    struct ib_fmr_attr *fmr_attr)
857{
858	struct ib_fmr *fmr;
859
860	if (!pd->device->alloc_fmr)
861		return ERR_PTR(-ENOSYS);
862
863	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
864	if (!IS_ERR(fmr)) {
865		fmr->device = pd->device;
866		fmr->pd     = pd;
867		atomic_inc(&pd->usecnt);
868	}
869
870	return fmr;
871}
872EXPORT_SYMBOL(ib_alloc_fmr);
873
874int ib_unmap_fmr(struct list_head *fmr_list)
875{
876	struct ib_fmr *fmr;
877
878	if (list_empty(fmr_list))
879		return 0;
880
881	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
882	return fmr->device->unmap_fmr(fmr_list);
883}
884EXPORT_SYMBOL(ib_unmap_fmr);
885
886int ib_dealloc_fmr(struct ib_fmr *fmr)
887{
888	struct ib_pd *pd;
889	int ret;
890
891	pd = fmr->pd;
892	ret = fmr->device->dealloc_fmr(fmr);
893	if (!ret)
894		atomic_dec(&pd->usecnt);
895
896	return ret;
897}
898EXPORT_SYMBOL(ib_dealloc_fmr);
899
900/* Multicast groups */
901
902int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
903{
 
 
904	if (!qp->device->attach_mcast)
905		return -ENOSYS;
906	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
907		return -EINVAL;
908
909	return qp->device->attach_mcast(qp, gid, lid);
 
 
 
910}
911EXPORT_SYMBOL(ib_attach_mcast);
912
913int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
914{
 
 
915	if (!qp->device->detach_mcast)
916		return -ENOSYS;
917	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
918		return -EINVAL;
919
920	return qp->device->detach_mcast(qp, gid, lid);
 
 
 
921}
922EXPORT_SYMBOL(ib_detach_mcast);
v3.15
   1/*
   2 * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
   3 * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
   4 * Copyright (c) 2004 Intel Corporation.  All rights reserved.
   5 * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
   6 * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
   7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
   8 * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
   9 *
  10 * This software is available to you under a choice of one of two
  11 * licenses.  You may choose to be licensed under the terms of the GNU
  12 * General Public License (GPL) Version 2, available from the file
  13 * COPYING in the main directory of this source tree, or the
  14 * OpenIB.org BSD license below:
  15 *
  16 *     Redistribution and use in source and binary forms, with or
  17 *     without modification, are permitted provided that the following
  18 *     conditions are met:
  19 *
  20 *      - Redistributions of source code must retain the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer.
  23 *
  24 *      - Redistributions in binary form must reproduce the above
  25 *        copyright notice, this list of conditions and the following
  26 *        disclaimer in the documentation and/or other materials
  27 *        provided with the distribution.
  28 *
  29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36 * SOFTWARE.
  37 */
  38
  39#include <linux/errno.h>
  40#include <linux/err.h>
  41#include <linux/export.h>
  42#include <linux/string.h>
  43#include <linux/slab.h>
  44
  45#include <rdma/ib_verbs.h>
  46#include <rdma/ib_cache.h>
  47#include <rdma/ib_addr.h>
  48
  49#include "core_priv.h"
  50
  51int ib_rate_to_mult(enum ib_rate rate)
  52{
  53	switch (rate) {
  54	case IB_RATE_2_5_GBPS: return  1;
  55	case IB_RATE_5_GBPS:   return  2;
  56	case IB_RATE_10_GBPS:  return  4;
  57	case IB_RATE_20_GBPS:  return  8;
  58	case IB_RATE_30_GBPS:  return 12;
  59	case IB_RATE_40_GBPS:  return 16;
  60	case IB_RATE_60_GBPS:  return 24;
  61	case IB_RATE_80_GBPS:  return 32;
  62	case IB_RATE_120_GBPS: return 48;
  63	default:	       return -1;
  64	}
  65}
  66EXPORT_SYMBOL(ib_rate_to_mult);
  67
  68enum ib_rate mult_to_ib_rate(int mult)
  69{
  70	switch (mult) {
  71	case 1:  return IB_RATE_2_5_GBPS;
  72	case 2:  return IB_RATE_5_GBPS;
  73	case 4:  return IB_RATE_10_GBPS;
  74	case 8:  return IB_RATE_20_GBPS;
  75	case 12: return IB_RATE_30_GBPS;
  76	case 16: return IB_RATE_40_GBPS;
  77	case 24: return IB_RATE_60_GBPS;
  78	case 32: return IB_RATE_80_GBPS;
  79	case 48: return IB_RATE_120_GBPS;
  80	default: return IB_RATE_PORT_CURRENT;
  81	}
  82}
  83EXPORT_SYMBOL(mult_to_ib_rate);
  84
  85int ib_rate_to_mbps(enum ib_rate rate)
  86{
  87	switch (rate) {
  88	case IB_RATE_2_5_GBPS: return 2500;
  89	case IB_RATE_5_GBPS:   return 5000;
  90	case IB_RATE_10_GBPS:  return 10000;
  91	case IB_RATE_20_GBPS:  return 20000;
  92	case IB_RATE_30_GBPS:  return 30000;
  93	case IB_RATE_40_GBPS:  return 40000;
  94	case IB_RATE_60_GBPS:  return 60000;
  95	case IB_RATE_80_GBPS:  return 80000;
  96	case IB_RATE_120_GBPS: return 120000;
  97	case IB_RATE_14_GBPS:  return 14062;
  98	case IB_RATE_56_GBPS:  return 56250;
  99	case IB_RATE_112_GBPS: return 112500;
 100	case IB_RATE_168_GBPS: return 168750;
 101	case IB_RATE_25_GBPS:  return 25781;
 102	case IB_RATE_100_GBPS: return 103125;
 103	case IB_RATE_200_GBPS: return 206250;
 104	case IB_RATE_300_GBPS: return 309375;
 105	default:	       return -1;
 106	}
 107}
 108EXPORT_SYMBOL(ib_rate_to_mbps);
 109
 110enum rdma_transport_type
 111rdma_node_get_transport(enum rdma_node_type node_type)
 112{
 113	switch (node_type) {
 114	case RDMA_NODE_IB_CA:
 115	case RDMA_NODE_IB_SWITCH:
 116	case RDMA_NODE_IB_ROUTER:
 117		return RDMA_TRANSPORT_IB;
 118	case RDMA_NODE_RNIC:
 119		return RDMA_TRANSPORT_IWARP;
 120	case RDMA_NODE_USNIC:
 121		return RDMA_TRANSPORT_USNIC;
 122	case RDMA_NODE_USNIC_UDP:
 123		return RDMA_TRANSPORT_USNIC_UDP;
 124	default:
 125		BUG();
 126		return 0;
 127	}
 128}
 129EXPORT_SYMBOL(rdma_node_get_transport);
 130
 131enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
 132{
 133	if (device->get_link_layer)
 134		return device->get_link_layer(device, port_num);
 135
 136	switch (rdma_node_get_transport(device->node_type)) {
 137	case RDMA_TRANSPORT_IB:
 138		return IB_LINK_LAYER_INFINIBAND;
 139	case RDMA_TRANSPORT_IWARP:
 140	case RDMA_TRANSPORT_USNIC:
 141	case RDMA_TRANSPORT_USNIC_UDP:
 142		return IB_LINK_LAYER_ETHERNET;
 143	default:
 144		return IB_LINK_LAYER_UNSPECIFIED;
 145	}
 146}
 147EXPORT_SYMBOL(rdma_port_get_link_layer);
 148
 149/* Protection domains */
 150
 151struct ib_pd *ib_alloc_pd(struct ib_device *device)
 152{
 153	struct ib_pd *pd;
 154
 155	pd = device->alloc_pd(device, NULL, NULL);
 156
 157	if (!IS_ERR(pd)) {
 158		pd->device  = device;
 159		pd->uobject = NULL;
 160		atomic_set(&pd->usecnt, 0);
 161	}
 162
 163	return pd;
 164}
 165EXPORT_SYMBOL(ib_alloc_pd);
 166
 167int ib_dealloc_pd(struct ib_pd *pd)
 168{
 169	if (atomic_read(&pd->usecnt))
 170		return -EBUSY;
 171
 172	return pd->device->dealloc_pd(pd);
 173}
 174EXPORT_SYMBOL(ib_dealloc_pd);
 175
 176/* Address handles */
 177
 178struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
 179{
 180	struct ib_ah *ah;
 181
 182	ah = pd->device->create_ah(pd, ah_attr);
 183
 184	if (!IS_ERR(ah)) {
 185		ah->device  = pd->device;
 186		ah->pd      = pd;
 187		ah->uobject = NULL;
 188		atomic_inc(&pd->usecnt);
 189	}
 190
 191	return ah;
 192}
 193EXPORT_SYMBOL(ib_create_ah);
 194
 195int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
 196		       struct ib_grh *grh, struct ib_ah_attr *ah_attr)
 197{
 198	u32 flow_class;
 199	u16 gid_index;
 200	int ret;
 201	int is_eth = (rdma_port_get_link_layer(device, port_num) ==
 202			IB_LINK_LAYER_ETHERNET);
 203
 204	memset(ah_attr, 0, sizeof *ah_attr);
 205	if (is_eth) {
 206		if (!(wc->wc_flags & IB_WC_GRH))
 207			return -EPROTOTYPE;
 208
 209		if (wc->wc_flags & IB_WC_WITH_SMAC &&
 210		    wc->wc_flags & IB_WC_WITH_VLAN) {
 211			memcpy(ah_attr->dmac, wc->smac, ETH_ALEN);
 212			ah_attr->vlan_id = wc->vlan_id;
 213		} else {
 214			ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid,
 215					ah_attr->dmac, &ah_attr->vlan_id);
 216			if (ret)
 217				return ret;
 218		}
 219	} else {
 220		ah_attr->vlan_id = 0xffff;
 221	}
 222
 223	ah_attr->dlid = wc->slid;
 224	ah_attr->sl = wc->sl;
 225	ah_attr->src_path_bits = wc->dlid_path_bits;
 226	ah_attr->port_num = port_num;
 227
 228	if (wc->wc_flags & IB_WC_GRH) {
 229		ah_attr->ah_flags = IB_AH_GRH;
 230		ah_attr->grh.dgid = grh->sgid;
 231
 232		ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
 233					 &gid_index);
 234		if (ret)
 235			return ret;
 236
 237		ah_attr->grh.sgid_index = (u8) gid_index;
 238		flow_class = be32_to_cpu(grh->version_tclass_flow);
 239		ah_attr->grh.flow_label = flow_class & 0xFFFFF;
 240		ah_attr->grh.hop_limit = 0xFF;
 241		ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
 242	}
 243	return 0;
 244}
 245EXPORT_SYMBOL(ib_init_ah_from_wc);
 246
 247struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
 248				   struct ib_grh *grh, u8 port_num)
 249{
 250	struct ib_ah_attr ah_attr;
 251	int ret;
 252
 253	ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
 254	if (ret)
 255		return ERR_PTR(ret);
 256
 257	return ib_create_ah(pd, &ah_attr);
 258}
 259EXPORT_SYMBOL(ib_create_ah_from_wc);
 260
 261int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
 262{
 263	return ah->device->modify_ah ?
 264		ah->device->modify_ah(ah, ah_attr) :
 265		-ENOSYS;
 266}
 267EXPORT_SYMBOL(ib_modify_ah);
 268
 269int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
 270{
 271	return ah->device->query_ah ?
 272		ah->device->query_ah(ah, ah_attr) :
 273		-ENOSYS;
 274}
 275EXPORT_SYMBOL(ib_query_ah);
 276
 277int ib_destroy_ah(struct ib_ah *ah)
 278{
 279	struct ib_pd *pd;
 280	int ret;
 281
 282	pd = ah->pd;
 283	ret = ah->device->destroy_ah(ah);
 284	if (!ret)
 285		atomic_dec(&pd->usecnt);
 286
 287	return ret;
 288}
 289EXPORT_SYMBOL(ib_destroy_ah);
 290
 291/* Shared receive queues */
 292
 293struct ib_srq *ib_create_srq(struct ib_pd *pd,
 294			     struct ib_srq_init_attr *srq_init_attr)
 295{
 296	struct ib_srq *srq;
 297
 298	if (!pd->device->create_srq)
 299		return ERR_PTR(-ENOSYS);
 300
 301	srq = pd->device->create_srq(pd, srq_init_attr, NULL);
 302
 303	if (!IS_ERR(srq)) {
 304		srq->device    	   = pd->device;
 305		srq->pd        	   = pd;
 306		srq->uobject       = NULL;
 307		srq->event_handler = srq_init_attr->event_handler;
 308		srq->srq_context   = srq_init_attr->srq_context;
 309		srq->srq_type      = srq_init_attr->srq_type;
 310		if (srq->srq_type == IB_SRQT_XRC) {
 311			srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
 312			srq->ext.xrc.cq   = srq_init_attr->ext.xrc.cq;
 313			atomic_inc(&srq->ext.xrc.xrcd->usecnt);
 314			atomic_inc(&srq->ext.xrc.cq->usecnt);
 315		}
 316		atomic_inc(&pd->usecnt);
 317		atomic_set(&srq->usecnt, 0);
 318	}
 319
 320	return srq;
 321}
 322EXPORT_SYMBOL(ib_create_srq);
 323
 324int ib_modify_srq(struct ib_srq *srq,
 325		  struct ib_srq_attr *srq_attr,
 326		  enum ib_srq_attr_mask srq_attr_mask)
 327{
 328	return srq->device->modify_srq ?
 329		srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
 330		-ENOSYS;
 331}
 332EXPORT_SYMBOL(ib_modify_srq);
 333
 334int ib_query_srq(struct ib_srq *srq,
 335		 struct ib_srq_attr *srq_attr)
 336{
 337	return srq->device->query_srq ?
 338		srq->device->query_srq(srq, srq_attr) : -ENOSYS;
 339}
 340EXPORT_SYMBOL(ib_query_srq);
 341
 342int ib_destroy_srq(struct ib_srq *srq)
 343{
 344	struct ib_pd *pd;
 345	enum ib_srq_type srq_type;
 346	struct ib_xrcd *uninitialized_var(xrcd);
 347	struct ib_cq *uninitialized_var(cq);
 348	int ret;
 349
 350	if (atomic_read(&srq->usecnt))
 351		return -EBUSY;
 352
 353	pd = srq->pd;
 354	srq_type = srq->srq_type;
 355	if (srq_type == IB_SRQT_XRC) {
 356		xrcd = srq->ext.xrc.xrcd;
 357		cq = srq->ext.xrc.cq;
 358	}
 359
 360	ret = srq->device->destroy_srq(srq);
 361	if (!ret) {
 362		atomic_dec(&pd->usecnt);
 363		if (srq_type == IB_SRQT_XRC) {
 364			atomic_dec(&xrcd->usecnt);
 365			atomic_dec(&cq->usecnt);
 366		}
 367	}
 368
 369	return ret;
 370}
 371EXPORT_SYMBOL(ib_destroy_srq);
 372
 373/* Queue pairs */
 374
 375static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 376{
 377	struct ib_qp *qp = context;
 378	unsigned long flags;
 379
 380	spin_lock_irqsave(&qp->device->event_handler_lock, flags);
 381	list_for_each_entry(event->element.qp, &qp->open_list, open_list)
 382		if (event->element.qp->event_handler)
 383			event->element.qp->event_handler(event, event->element.qp->qp_context);
 384	spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
 385}
 386
 387static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
 388{
 389	mutex_lock(&xrcd->tgt_qp_mutex);
 390	list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
 391	mutex_unlock(&xrcd->tgt_qp_mutex);
 392}
 393
 394static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
 395				  void (*event_handler)(struct ib_event *, void *),
 396				  void *qp_context)
 397{
 398	struct ib_qp *qp;
 399	unsigned long flags;
 400
 401	qp = kzalloc(sizeof *qp, GFP_KERNEL);
 402	if (!qp)
 403		return ERR_PTR(-ENOMEM);
 404
 405	qp->real_qp = real_qp;
 406	atomic_inc(&real_qp->usecnt);
 407	qp->device = real_qp->device;
 408	qp->event_handler = event_handler;
 409	qp->qp_context = qp_context;
 410	qp->qp_num = real_qp->qp_num;
 411	qp->qp_type = real_qp->qp_type;
 412
 413	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
 414	list_add(&qp->open_list, &real_qp->open_list);
 415	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 416
 417	return qp;
 418}
 419
 420struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
 421			 struct ib_qp_open_attr *qp_open_attr)
 422{
 423	struct ib_qp *qp, *real_qp;
 424
 425	if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
 426		return ERR_PTR(-EINVAL);
 427
 428	qp = ERR_PTR(-EINVAL);
 429	mutex_lock(&xrcd->tgt_qp_mutex);
 430	list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
 431		if (real_qp->qp_num == qp_open_attr->qp_num) {
 432			qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
 433					  qp_open_attr->qp_context);
 434			break;
 435		}
 436	}
 437	mutex_unlock(&xrcd->tgt_qp_mutex);
 438	return qp;
 439}
 440EXPORT_SYMBOL(ib_open_qp);
 441
 442struct ib_qp *ib_create_qp(struct ib_pd *pd,
 443			   struct ib_qp_init_attr *qp_init_attr)
 444{
 445	struct ib_qp *qp, *real_qp;
 446	struct ib_device *device;
 447
 448	device = pd ? pd->device : qp_init_attr->xrcd->device;
 449	qp = device->create_qp(pd, qp_init_attr, NULL);
 450
 451	if (!IS_ERR(qp)) {
 452		qp->device     = device;
 453		qp->real_qp    = qp;
 454		qp->uobject    = NULL;
 455		qp->qp_type    = qp_init_attr->qp_type;
 456
 457		atomic_set(&qp->usecnt, 0);
 458		if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
 459			qp->event_handler = __ib_shared_qp_event_handler;
 460			qp->qp_context = qp;
 461			qp->pd = NULL;
 462			qp->send_cq = qp->recv_cq = NULL;
 463			qp->srq = NULL;
 464			qp->xrcd = qp_init_attr->xrcd;
 465			atomic_inc(&qp_init_attr->xrcd->usecnt);
 466			INIT_LIST_HEAD(&qp->open_list);
 467
 468			real_qp = qp;
 469			qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
 470					  qp_init_attr->qp_context);
 471			if (!IS_ERR(qp))
 472				__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
 473			else
 474				real_qp->device->destroy_qp(real_qp);
 475		} else {
 476			qp->event_handler = qp_init_attr->event_handler;
 477			qp->qp_context = qp_init_attr->qp_context;
 478			if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
 479				qp->recv_cq = NULL;
 480				qp->srq = NULL;
 481			} else {
 482				qp->recv_cq = qp_init_attr->recv_cq;
 483				atomic_inc(&qp_init_attr->recv_cq->usecnt);
 484				qp->srq = qp_init_attr->srq;
 485				if (qp->srq)
 486					atomic_inc(&qp_init_attr->srq->usecnt);
 487			}
 488
 489			qp->pd	    = pd;
 490			qp->send_cq = qp_init_attr->send_cq;
 491			qp->xrcd    = NULL;
 492
 493			atomic_inc(&pd->usecnt);
 494			atomic_inc(&qp_init_attr->send_cq->usecnt);
 495		}
 496	}
 497
 498	return qp;
 499}
 500EXPORT_SYMBOL(ib_create_qp);
 501
 502static const struct {
 503	int			valid;
 504	enum ib_qp_attr_mask	req_param[IB_QPT_MAX];
 505	enum ib_qp_attr_mask	req_param_add_eth[IB_QPT_MAX];
 506	enum ib_qp_attr_mask	opt_param[IB_QPT_MAX];
 507	enum ib_qp_attr_mask	opt_param_add_eth[IB_QPT_MAX];
 508} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
 509	[IB_QPS_RESET] = {
 510		[IB_QPS_RESET] = { .valid = 1 },
 511		[IB_QPS_INIT]  = {
 512			.valid = 1,
 513			.req_param = {
 514				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
 515						IB_QP_PORT			|
 516						IB_QP_QKEY),
 517				[IB_QPT_RAW_PACKET] = IB_QP_PORT,
 518				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
 519						IB_QP_PORT			|
 520						IB_QP_ACCESS_FLAGS),
 521				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
 522						IB_QP_PORT			|
 523						IB_QP_ACCESS_FLAGS),
 524				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
 525						IB_QP_PORT			|
 526						IB_QP_ACCESS_FLAGS),
 527				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
 528						IB_QP_PORT			|
 529						IB_QP_ACCESS_FLAGS),
 530				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
 531						IB_QP_QKEY),
 532				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
 533						IB_QP_QKEY),
 534			}
 535		},
 536	},
 537	[IB_QPS_INIT]  = {
 538		[IB_QPS_RESET] = { .valid = 1 },
 539		[IB_QPS_ERR] =   { .valid = 1 },
 540		[IB_QPS_INIT]  = {
 541			.valid = 1,
 542			.opt_param = {
 543				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
 544						IB_QP_PORT			|
 545						IB_QP_QKEY),
 546				[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|
 547						IB_QP_PORT			|
 548						IB_QP_ACCESS_FLAGS),
 549				[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|
 550						IB_QP_PORT			|
 551						IB_QP_ACCESS_FLAGS),
 552				[IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX		|
 553						IB_QP_PORT			|
 554						IB_QP_ACCESS_FLAGS),
 555				[IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX		|
 556						IB_QP_PORT			|
 557						IB_QP_ACCESS_FLAGS),
 558				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
 559						IB_QP_QKEY),
 560				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
 561						IB_QP_QKEY),
 562			}
 563		},
 564		[IB_QPS_RTR]   = {
 565			.valid = 1,
 566			.req_param = {
 567				[IB_QPT_UC]  = (IB_QP_AV			|
 568						IB_QP_PATH_MTU			|
 569						IB_QP_DEST_QPN			|
 570						IB_QP_RQ_PSN),
 571				[IB_QPT_RC]  = (IB_QP_AV			|
 572						IB_QP_PATH_MTU			|
 573						IB_QP_DEST_QPN			|
 574						IB_QP_RQ_PSN			|
 575						IB_QP_MAX_DEST_RD_ATOMIC	|
 576						IB_QP_MIN_RNR_TIMER),
 577				[IB_QPT_XRC_INI] = (IB_QP_AV			|
 578						IB_QP_PATH_MTU			|
 579						IB_QP_DEST_QPN			|
 580						IB_QP_RQ_PSN),
 581				[IB_QPT_XRC_TGT] = (IB_QP_AV			|
 582						IB_QP_PATH_MTU			|
 583						IB_QP_DEST_QPN			|
 584						IB_QP_RQ_PSN			|
 585						IB_QP_MAX_DEST_RD_ATOMIC	|
 586						IB_QP_MIN_RNR_TIMER),
 587			},
 588			.req_param_add_eth = {
 589				[IB_QPT_RC]  = (IB_QP_SMAC),
 590				[IB_QPT_UC]  = (IB_QP_SMAC),
 591				[IB_QPT_XRC_INI]  = (IB_QP_SMAC),
 592				[IB_QPT_XRC_TGT]  = (IB_QP_SMAC)
 593			},
 594			.opt_param = {
 595				 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
 596						 IB_QP_QKEY),
 597				 [IB_QPT_UC]  = (IB_QP_ALT_PATH			|
 598						 IB_QP_ACCESS_FLAGS		|
 599						 IB_QP_PKEY_INDEX),
 600				 [IB_QPT_RC]  = (IB_QP_ALT_PATH			|
 601						 IB_QP_ACCESS_FLAGS		|
 602						 IB_QP_PKEY_INDEX),
 603				 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH		|
 604						 IB_QP_ACCESS_FLAGS		|
 605						 IB_QP_PKEY_INDEX),
 606				 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH		|
 607						 IB_QP_ACCESS_FLAGS		|
 608						 IB_QP_PKEY_INDEX),
 609				 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
 610						 IB_QP_QKEY),
 611				 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
 612						 IB_QP_QKEY),
 613			 },
 614			.opt_param_add_eth = {
 615				[IB_QPT_RC]  = (IB_QP_ALT_SMAC			|
 616						IB_QP_VID			|
 617						IB_QP_ALT_VID),
 618				[IB_QPT_UC]  = (IB_QP_ALT_SMAC			|
 619						IB_QP_VID			|
 620						IB_QP_ALT_VID),
 621				[IB_QPT_XRC_INI]  = (IB_QP_ALT_SMAC			|
 622						IB_QP_VID			|
 623						IB_QP_ALT_VID),
 624				[IB_QPT_XRC_TGT]  = (IB_QP_ALT_SMAC			|
 625						IB_QP_VID			|
 626						IB_QP_ALT_VID)
 627			}
 628		}
 629	},
 630	[IB_QPS_RTR]   = {
 631		[IB_QPS_RESET] = { .valid = 1 },
 632		[IB_QPS_ERR] =   { .valid = 1 },
 633		[IB_QPS_RTS]   = {
 634			.valid = 1,
 635			.req_param = {
 636				[IB_QPT_UD]  = IB_QP_SQ_PSN,
 637				[IB_QPT_UC]  = IB_QP_SQ_PSN,
 638				[IB_QPT_RC]  = (IB_QP_TIMEOUT			|
 639						IB_QP_RETRY_CNT			|
 640						IB_QP_RNR_RETRY			|
 641						IB_QP_SQ_PSN			|
 642						IB_QP_MAX_QP_RD_ATOMIC),
 643				[IB_QPT_XRC_INI] = (IB_QP_TIMEOUT		|
 644						IB_QP_RETRY_CNT			|
 645						IB_QP_RNR_RETRY			|
 646						IB_QP_SQ_PSN			|
 647						IB_QP_MAX_QP_RD_ATOMIC),
 648				[IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT		|
 649						IB_QP_SQ_PSN),
 650				[IB_QPT_SMI] = IB_QP_SQ_PSN,
 651				[IB_QPT_GSI] = IB_QP_SQ_PSN,
 652			},
 653			.opt_param = {
 654				 [IB_QPT_UD]  = (IB_QP_CUR_STATE		|
 655						 IB_QP_QKEY),
 656				 [IB_QPT_UC]  = (IB_QP_CUR_STATE		|
 657						 IB_QP_ALT_PATH			|
 658						 IB_QP_ACCESS_FLAGS		|
 659						 IB_QP_PATH_MIG_STATE),
 660				 [IB_QPT_RC]  = (IB_QP_CUR_STATE		|
 661						 IB_QP_ALT_PATH			|
 662						 IB_QP_ACCESS_FLAGS		|
 663						 IB_QP_MIN_RNR_TIMER		|
 664						 IB_QP_PATH_MIG_STATE),
 665				 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
 666						 IB_QP_ALT_PATH			|
 667						 IB_QP_ACCESS_FLAGS		|
 668						 IB_QP_PATH_MIG_STATE),
 669				 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
 670						 IB_QP_ALT_PATH			|
 671						 IB_QP_ACCESS_FLAGS		|
 672						 IB_QP_MIN_RNR_TIMER		|
 673						 IB_QP_PATH_MIG_STATE),
 674				 [IB_QPT_SMI] = (IB_QP_CUR_STATE		|
 675						 IB_QP_QKEY),
 676				 [IB_QPT_GSI] = (IB_QP_CUR_STATE		|
 677						 IB_QP_QKEY),
 678			 }
 679		}
 680	},
 681	[IB_QPS_RTS]   = {
 682		[IB_QPS_RESET] = { .valid = 1 },
 683		[IB_QPS_ERR] =   { .valid = 1 },
 684		[IB_QPS_RTS]   = {
 685			.valid = 1,
 686			.opt_param = {
 687				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
 688						IB_QP_QKEY),
 689				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
 690						IB_QP_ACCESS_FLAGS		|
 691						IB_QP_ALT_PATH			|
 692						IB_QP_PATH_MIG_STATE),
 693				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
 694						IB_QP_ACCESS_FLAGS		|
 695						IB_QP_ALT_PATH			|
 696						IB_QP_PATH_MIG_STATE		|
 697						IB_QP_MIN_RNR_TIMER),
 698				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
 699						IB_QP_ACCESS_FLAGS		|
 700						IB_QP_ALT_PATH			|
 701						IB_QP_PATH_MIG_STATE),
 702				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
 703						IB_QP_ACCESS_FLAGS		|
 704						IB_QP_ALT_PATH			|
 705						IB_QP_PATH_MIG_STATE		|
 706						IB_QP_MIN_RNR_TIMER),
 707				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
 708						IB_QP_QKEY),
 709				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
 710						IB_QP_QKEY),
 711			}
 712		},
 713		[IB_QPS_SQD]   = {
 714			.valid = 1,
 715			.opt_param = {
 716				[IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
 717				[IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
 718				[IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
 719				[IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
 720				[IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
 721				[IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
 722				[IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
 723			}
 724		},
 725	},
 726	[IB_QPS_SQD]   = {
 727		[IB_QPS_RESET] = { .valid = 1 },
 728		[IB_QPS_ERR] =   { .valid = 1 },
 729		[IB_QPS_RTS]   = {
 730			.valid = 1,
 731			.opt_param = {
 732				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
 733						IB_QP_QKEY),
 734				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
 735						IB_QP_ALT_PATH			|
 736						IB_QP_ACCESS_FLAGS		|
 737						IB_QP_PATH_MIG_STATE),
 738				[IB_QPT_RC]  = (IB_QP_CUR_STATE			|
 739						IB_QP_ALT_PATH			|
 740						IB_QP_ACCESS_FLAGS		|
 741						IB_QP_MIN_RNR_TIMER		|
 742						IB_QP_PATH_MIG_STATE),
 743				[IB_QPT_XRC_INI] = (IB_QP_CUR_STATE		|
 744						IB_QP_ALT_PATH			|
 745						IB_QP_ACCESS_FLAGS		|
 746						IB_QP_PATH_MIG_STATE),
 747				[IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE		|
 748						IB_QP_ALT_PATH			|
 749						IB_QP_ACCESS_FLAGS		|
 750						IB_QP_MIN_RNR_TIMER		|
 751						IB_QP_PATH_MIG_STATE),
 752				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
 753						IB_QP_QKEY),
 754				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
 755						IB_QP_QKEY),
 756			}
 757		},
 758		[IB_QPS_SQD]   = {
 759			.valid = 1,
 760			.opt_param = {
 761				[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|
 762						IB_QP_QKEY),
 763				[IB_QPT_UC]  = (IB_QP_AV			|
 764						IB_QP_ALT_PATH			|
 765						IB_QP_ACCESS_FLAGS		|
 766						IB_QP_PKEY_INDEX		|
 767						IB_QP_PATH_MIG_STATE),
 768				[IB_QPT_RC]  = (IB_QP_PORT			|
 769						IB_QP_AV			|
 770						IB_QP_TIMEOUT			|
 771						IB_QP_RETRY_CNT			|
 772						IB_QP_RNR_RETRY			|
 773						IB_QP_MAX_QP_RD_ATOMIC		|
 774						IB_QP_MAX_DEST_RD_ATOMIC	|
 775						IB_QP_ALT_PATH			|
 776						IB_QP_ACCESS_FLAGS		|
 777						IB_QP_PKEY_INDEX		|
 778						IB_QP_MIN_RNR_TIMER		|
 779						IB_QP_PATH_MIG_STATE),
 780				[IB_QPT_XRC_INI] = (IB_QP_PORT			|
 781						IB_QP_AV			|
 782						IB_QP_TIMEOUT			|
 783						IB_QP_RETRY_CNT			|
 784						IB_QP_RNR_RETRY			|
 785						IB_QP_MAX_QP_RD_ATOMIC		|
 786						IB_QP_ALT_PATH			|
 787						IB_QP_ACCESS_FLAGS		|
 788						IB_QP_PKEY_INDEX		|
 789						IB_QP_PATH_MIG_STATE),
 790				[IB_QPT_XRC_TGT] = (IB_QP_PORT			|
 791						IB_QP_AV			|
 792						IB_QP_TIMEOUT			|
 793						IB_QP_MAX_DEST_RD_ATOMIC	|
 794						IB_QP_ALT_PATH			|
 795						IB_QP_ACCESS_FLAGS		|
 796						IB_QP_PKEY_INDEX		|
 797						IB_QP_MIN_RNR_TIMER		|
 798						IB_QP_PATH_MIG_STATE),
 799				[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|
 800						IB_QP_QKEY),
 801				[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|
 802						IB_QP_QKEY),
 803			}
 804		}
 805	},
 806	[IB_QPS_SQE]   = {
 807		[IB_QPS_RESET] = { .valid = 1 },
 808		[IB_QPS_ERR] =   { .valid = 1 },
 809		[IB_QPS_RTS]   = {
 810			.valid = 1,
 811			.opt_param = {
 812				[IB_QPT_UD]  = (IB_QP_CUR_STATE			|
 813						IB_QP_QKEY),
 814				[IB_QPT_UC]  = (IB_QP_CUR_STATE			|
 815						IB_QP_ACCESS_FLAGS),
 816				[IB_QPT_SMI] = (IB_QP_CUR_STATE			|
 817						IB_QP_QKEY),
 818				[IB_QPT_GSI] = (IB_QP_CUR_STATE			|
 819						IB_QP_QKEY),
 820			}
 821		}
 822	},
 823	[IB_QPS_ERR] = {
 824		[IB_QPS_RESET] = { .valid = 1 },
 825		[IB_QPS_ERR] =   { .valid = 1 }
 826	}
 827};
 828
 829int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
 830		       enum ib_qp_type type, enum ib_qp_attr_mask mask,
 831		       enum rdma_link_layer ll)
 832{
 833	enum ib_qp_attr_mask req_param, opt_param;
 834
 835	if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
 836	    next_state < 0 || next_state > IB_QPS_ERR)
 837		return 0;
 838
 839	if (mask & IB_QP_CUR_STATE  &&
 840	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
 841	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
 842		return 0;
 843
 844	if (!qp_state_table[cur_state][next_state].valid)
 845		return 0;
 846
 847	req_param = qp_state_table[cur_state][next_state].req_param[type];
 848	opt_param = qp_state_table[cur_state][next_state].opt_param[type];
 849
 850	if (ll == IB_LINK_LAYER_ETHERNET) {
 851		req_param |= qp_state_table[cur_state][next_state].
 852			req_param_add_eth[type];
 853		opt_param |= qp_state_table[cur_state][next_state].
 854			opt_param_add_eth[type];
 855	}
 856
 857	if ((mask & req_param) != req_param)
 858		return 0;
 859
 860	if (mask & ~(req_param | opt_param | IB_QP_STATE))
 861		return 0;
 862
 863	return 1;
 864}
 865EXPORT_SYMBOL(ib_modify_qp_is_ok);
 866
 867int ib_resolve_eth_l2_attrs(struct ib_qp *qp,
 868			    struct ib_qp_attr *qp_attr, int *qp_attr_mask)
 869{
 870	int           ret = 0;
 871	union ib_gid  sgid;
 872
 873	if ((*qp_attr_mask & IB_QP_AV)  &&
 874	    (rdma_port_get_link_layer(qp->device, qp_attr->ah_attr.port_num) == IB_LINK_LAYER_ETHERNET)) {
 875		ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num,
 876				   qp_attr->ah_attr.grh.sgid_index, &sgid);
 877		if (ret)
 878			goto out;
 879		if (rdma_link_local_addr((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw)) {
 880			rdma_get_ll_mac((struct in6_addr *)qp_attr->ah_attr.grh.dgid.raw, qp_attr->ah_attr.dmac);
 881			rdma_get_ll_mac((struct in6_addr *)sgid.raw, qp_attr->smac);
 882			qp_attr->vlan_id = rdma_get_vlan_id(&sgid);
 883		} else {
 884			ret = rdma_addr_find_dmac_by_grh(&sgid, &qp_attr->ah_attr.grh.dgid,
 885					qp_attr->ah_attr.dmac, &qp_attr->vlan_id);
 886			if (ret)
 887				goto out;
 888			ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr->smac, NULL);
 889			if (ret)
 890				goto out;
 891		}
 892		*qp_attr_mask |= IB_QP_SMAC;
 893		if (qp_attr->vlan_id < 0xFFFF)
 894			*qp_attr_mask |= IB_QP_VID;
 895	}
 896out:
 897	return ret;
 898}
 899EXPORT_SYMBOL(ib_resolve_eth_l2_attrs);
 900
 901
 902int ib_modify_qp(struct ib_qp *qp,
 903		 struct ib_qp_attr *qp_attr,
 904		 int qp_attr_mask)
 905{
 906	int ret;
 907
 908	ret = ib_resolve_eth_l2_attrs(qp, qp_attr, &qp_attr_mask);
 909	if (ret)
 910		return ret;
 911
 912	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
 913}
 914EXPORT_SYMBOL(ib_modify_qp);
 915
 916int ib_query_qp(struct ib_qp *qp,
 917		struct ib_qp_attr *qp_attr,
 918		int qp_attr_mask,
 919		struct ib_qp_init_attr *qp_init_attr)
 920{
 921	return qp->device->query_qp ?
 922		qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
 923		-ENOSYS;
 924}
 925EXPORT_SYMBOL(ib_query_qp);
 926
 927int ib_close_qp(struct ib_qp *qp)
 928{
 929	struct ib_qp *real_qp;
 930	unsigned long flags;
 931
 932	real_qp = qp->real_qp;
 933	if (real_qp == qp)
 934		return -EINVAL;
 935
 936	spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
 937	list_del(&qp->open_list);
 938	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 939
 940	atomic_dec(&real_qp->usecnt);
 941	kfree(qp);
 942
 943	return 0;
 944}
 945EXPORT_SYMBOL(ib_close_qp);
 946
 947static int __ib_destroy_shared_qp(struct ib_qp *qp)
 948{
 949	struct ib_xrcd *xrcd;
 950	struct ib_qp *real_qp;
 951	int ret;
 952
 953	real_qp = qp->real_qp;
 954	xrcd = real_qp->xrcd;
 955
 956	mutex_lock(&xrcd->tgt_qp_mutex);
 957	ib_close_qp(qp);
 958	if (atomic_read(&real_qp->usecnt) == 0)
 959		list_del(&real_qp->xrcd_list);
 960	else
 961		real_qp = NULL;
 962	mutex_unlock(&xrcd->tgt_qp_mutex);
 963
 964	if (real_qp) {
 965		ret = ib_destroy_qp(real_qp);
 966		if (!ret)
 967			atomic_dec(&xrcd->usecnt);
 968		else
 969			__ib_insert_xrcd_qp(xrcd, real_qp);
 970	}
 971
 972	return 0;
 973}
 974
 975int ib_destroy_qp(struct ib_qp *qp)
 976{
 977	struct ib_pd *pd;
 978	struct ib_cq *scq, *rcq;
 979	struct ib_srq *srq;
 980	int ret;
 981
 982	if (atomic_read(&qp->usecnt))
 983		return -EBUSY;
 984
 985	if (qp->real_qp != qp)
 986		return __ib_destroy_shared_qp(qp);
 987
 988	pd   = qp->pd;
 989	scq  = qp->send_cq;
 990	rcq  = qp->recv_cq;
 991	srq  = qp->srq;
 992
 993	ret = qp->device->destroy_qp(qp);
 994	if (!ret) {
 995		if (pd)
 996			atomic_dec(&pd->usecnt);
 997		if (scq)
 998			atomic_dec(&scq->usecnt);
 999		if (rcq)
1000			atomic_dec(&rcq->usecnt);
1001		if (srq)
1002			atomic_dec(&srq->usecnt);
1003	}
1004
1005	return ret;
1006}
1007EXPORT_SYMBOL(ib_destroy_qp);
1008
1009/* Completion queues */
1010
1011struct ib_cq *ib_create_cq(struct ib_device *device,
1012			   ib_comp_handler comp_handler,
1013			   void (*event_handler)(struct ib_event *, void *),
1014			   void *cq_context, int cqe, int comp_vector)
1015{
1016	struct ib_cq *cq;
1017
1018	cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
1019
1020	if (!IS_ERR(cq)) {
1021		cq->device        = device;
1022		cq->uobject       = NULL;
1023		cq->comp_handler  = comp_handler;
1024		cq->event_handler = event_handler;
1025		cq->cq_context    = cq_context;
1026		atomic_set(&cq->usecnt, 0);
1027	}
1028
1029	return cq;
1030}
1031EXPORT_SYMBOL(ib_create_cq);
1032
1033int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1034{
1035	return cq->device->modify_cq ?
1036		cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1037}
1038EXPORT_SYMBOL(ib_modify_cq);
1039
1040int ib_destroy_cq(struct ib_cq *cq)
1041{
1042	if (atomic_read(&cq->usecnt))
1043		return -EBUSY;
1044
1045	return cq->device->destroy_cq(cq);
1046}
1047EXPORT_SYMBOL(ib_destroy_cq);
1048
1049int ib_resize_cq(struct ib_cq *cq, int cqe)
1050{
1051	return cq->device->resize_cq ?
1052		cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1053}
1054EXPORT_SYMBOL(ib_resize_cq);
1055
1056/* Memory regions */
1057
1058struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
1059{
1060	struct ib_mr *mr;
1061	int err;
1062
1063	err = ib_check_mr_access(mr_access_flags);
1064	if (err)
1065		return ERR_PTR(err);
1066
1067	mr = pd->device->get_dma_mr(pd, mr_access_flags);
1068
1069	if (!IS_ERR(mr)) {
1070		mr->device  = pd->device;
1071		mr->pd      = pd;
1072		mr->uobject = NULL;
1073		atomic_inc(&pd->usecnt);
1074		atomic_set(&mr->usecnt, 0);
1075	}
1076
1077	return mr;
1078}
1079EXPORT_SYMBOL(ib_get_dma_mr);
1080
1081struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1082			     struct ib_phys_buf *phys_buf_array,
1083			     int num_phys_buf,
1084			     int mr_access_flags,
1085			     u64 *iova_start)
1086{
1087	struct ib_mr *mr;
1088	int err;
1089
1090	err = ib_check_mr_access(mr_access_flags);
1091	if (err)
1092		return ERR_PTR(err);
1093
1094	if (!pd->device->reg_phys_mr)
1095		return ERR_PTR(-ENOSYS);
1096
1097	mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
1098				     mr_access_flags, iova_start);
1099
1100	if (!IS_ERR(mr)) {
1101		mr->device  = pd->device;
1102		mr->pd      = pd;
1103		mr->uobject = NULL;
1104		atomic_inc(&pd->usecnt);
1105		atomic_set(&mr->usecnt, 0);
1106	}
1107
1108	return mr;
1109}
1110EXPORT_SYMBOL(ib_reg_phys_mr);
1111
1112int ib_rereg_phys_mr(struct ib_mr *mr,
1113		     int mr_rereg_mask,
1114		     struct ib_pd *pd,
1115		     struct ib_phys_buf *phys_buf_array,
1116		     int num_phys_buf,
1117		     int mr_access_flags,
1118		     u64 *iova_start)
1119{
1120	struct ib_pd *old_pd;
1121	int ret;
1122
1123	ret = ib_check_mr_access(mr_access_flags);
1124	if (ret)
1125		return ret;
1126
1127	if (!mr->device->rereg_phys_mr)
1128		return -ENOSYS;
1129
1130	if (atomic_read(&mr->usecnt))
1131		return -EBUSY;
1132
1133	old_pd = mr->pd;
1134
1135	ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
1136					phys_buf_array, num_phys_buf,
1137					mr_access_flags, iova_start);
1138
1139	if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
1140		atomic_dec(&old_pd->usecnt);
1141		atomic_inc(&pd->usecnt);
1142	}
1143
1144	return ret;
1145}
1146EXPORT_SYMBOL(ib_rereg_phys_mr);
1147
1148int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
1149{
1150	return mr->device->query_mr ?
1151		mr->device->query_mr(mr, mr_attr) : -ENOSYS;
1152}
1153EXPORT_SYMBOL(ib_query_mr);
1154
1155int ib_dereg_mr(struct ib_mr *mr)
1156{
1157	struct ib_pd *pd;
1158	int ret;
1159
1160	if (atomic_read(&mr->usecnt))
1161		return -EBUSY;
1162
1163	pd = mr->pd;
1164	ret = mr->device->dereg_mr(mr);
1165	if (!ret)
1166		atomic_dec(&pd->usecnt);
1167
1168	return ret;
1169}
1170EXPORT_SYMBOL(ib_dereg_mr);
1171
1172struct ib_mr *ib_create_mr(struct ib_pd *pd,
1173			   struct ib_mr_init_attr *mr_init_attr)
1174{
1175	struct ib_mr *mr;
1176
1177	if (!pd->device->create_mr)
1178		return ERR_PTR(-ENOSYS);
1179
1180	mr = pd->device->create_mr(pd, mr_init_attr);
1181
1182	if (!IS_ERR(mr)) {
1183		mr->device  = pd->device;
1184		mr->pd      = pd;
1185		mr->uobject = NULL;
1186		atomic_inc(&pd->usecnt);
1187		atomic_set(&mr->usecnt, 0);
1188	}
1189
1190	return mr;
1191}
1192EXPORT_SYMBOL(ib_create_mr);
1193
1194int ib_destroy_mr(struct ib_mr *mr)
1195{
1196	struct ib_pd *pd;
1197	int ret;
1198
1199	if (atomic_read(&mr->usecnt))
1200		return -EBUSY;
1201
1202	pd = mr->pd;
1203	ret = mr->device->destroy_mr(mr);
1204	if (!ret)
1205		atomic_dec(&pd->usecnt);
1206
1207	return ret;
1208}
1209EXPORT_SYMBOL(ib_destroy_mr);
1210
1211struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
1212{
1213	struct ib_mr *mr;
1214
1215	if (!pd->device->alloc_fast_reg_mr)
1216		return ERR_PTR(-ENOSYS);
1217
1218	mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
1219
1220	if (!IS_ERR(mr)) {
1221		mr->device  = pd->device;
1222		mr->pd      = pd;
1223		mr->uobject = NULL;
1224		atomic_inc(&pd->usecnt);
1225		atomic_set(&mr->usecnt, 0);
1226	}
1227
1228	return mr;
1229}
1230EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
1231
1232struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
1233							  int max_page_list_len)
1234{
1235	struct ib_fast_reg_page_list *page_list;
1236
1237	if (!device->alloc_fast_reg_page_list)
1238		return ERR_PTR(-ENOSYS);
1239
1240	page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
1241
1242	if (!IS_ERR(page_list)) {
1243		page_list->device = device;
1244		page_list->max_page_list_len = max_page_list_len;
1245	}
1246
1247	return page_list;
1248}
1249EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
1250
1251void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1252{
1253	page_list->device->free_fast_reg_page_list(page_list);
1254}
1255EXPORT_SYMBOL(ib_free_fast_reg_page_list);
1256
1257/* Memory windows */
1258
1259struct ib_mw *ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
1260{
1261	struct ib_mw *mw;
1262
1263	if (!pd->device->alloc_mw)
1264		return ERR_PTR(-ENOSYS);
1265
1266	mw = pd->device->alloc_mw(pd, type);
1267	if (!IS_ERR(mw)) {
1268		mw->device  = pd->device;
1269		mw->pd      = pd;
1270		mw->uobject = NULL;
1271		mw->type    = type;
1272		atomic_inc(&pd->usecnt);
1273	}
1274
1275	return mw;
1276}
1277EXPORT_SYMBOL(ib_alloc_mw);
1278
1279int ib_dealloc_mw(struct ib_mw *mw)
1280{
1281	struct ib_pd *pd;
1282	int ret;
1283
1284	pd = mw->pd;
1285	ret = mw->device->dealloc_mw(mw);
1286	if (!ret)
1287		atomic_dec(&pd->usecnt);
1288
1289	return ret;
1290}
1291EXPORT_SYMBOL(ib_dealloc_mw);
1292
1293/* "Fast" memory regions */
1294
1295struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1296			    int mr_access_flags,
1297			    struct ib_fmr_attr *fmr_attr)
1298{
1299	struct ib_fmr *fmr;
1300
1301	if (!pd->device->alloc_fmr)
1302		return ERR_PTR(-ENOSYS);
1303
1304	fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1305	if (!IS_ERR(fmr)) {
1306		fmr->device = pd->device;
1307		fmr->pd     = pd;
1308		atomic_inc(&pd->usecnt);
1309	}
1310
1311	return fmr;
1312}
1313EXPORT_SYMBOL(ib_alloc_fmr);
1314
1315int ib_unmap_fmr(struct list_head *fmr_list)
1316{
1317	struct ib_fmr *fmr;
1318
1319	if (list_empty(fmr_list))
1320		return 0;
1321
1322	fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1323	return fmr->device->unmap_fmr(fmr_list);
1324}
1325EXPORT_SYMBOL(ib_unmap_fmr);
1326
1327int ib_dealloc_fmr(struct ib_fmr *fmr)
1328{
1329	struct ib_pd *pd;
1330	int ret;
1331
1332	pd = fmr->pd;
1333	ret = fmr->device->dealloc_fmr(fmr);
1334	if (!ret)
1335		atomic_dec(&pd->usecnt);
1336
1337	return ret;
1338}
1339EXPORT_SYMBOL(ib_dealloc_fmr);
1340
1341/* Multicast groups */
1342
1343int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1344{
1345	int ret;
1346
1347	if (!qp->device->attach_mcast)
1348		return -ENOSYS;
1349	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1350		return -EINVAL;
1351
1352	ret = qp->device->attach_mcast(qp, gid, lid);
1353	if (!ret)
1354		atomic_inc(&qp->usecnt);
1355	return ret;
1356}
1357EXPORT_SYMBOL(ib_attach_mcast);
1358
1359int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1360{
1361	int ret;
1362
1363	if (!qp->device->detach_mcast)
1364		return -ENOSYS;
1365	if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1366		return -EINVAL;
1367
1368	ret = qp->device->detach_mcast(qp, gid, lid);
1369	if (!ret)
1370		atomic_dec(&qp->usecnt);
1371	return ret;
1372}
1373EXPORT_SYMBOL(ib_detach_mcast);
1374
1375struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1376{
1377	struct ib_xrcd *xrcd;
1378
1379	if (!device->alloc_xrcd)
1380		return ERR_PTR(-ENOSYS);
1381
1382	xrcd = device->alloc_xrcd(device, NULL, NULL);
1383	if (!IS_ERR(xrcd)) {
1384		xrcd->device = device;
1385		xrcd->inode = NULL;
1386		atomic_set(&xrcd->usecnt, 0);
1387		mutex_init(&xrcd->tgt_qp_mutex);
1388		INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1389	}
1390
1391	return xrcd;
1392}
1393EXPORT_SYMBOL(ib_alloc_xrcd);
1394
1395int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1396{
1397	struct ib_qp *qp;
1398	int ret;
1399
1400	if (atomic_read(&xrcd->usecnt))
1401		return -EBUSY;
1402
1403	while (!list_empty(&xrcd->tgt_qp_list)) {
1404		qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1405		ret = ib_destroy_qp(qp);
1406		if (ret)
1407			return ret;
1408	}
1409
1410	return xrcd->device->dealloc_xrcd(xrcd);
1411}
1412EXPORT_SYMBOL(ib_dealloc_xrcd);
1413
1414struct ib_flow *ib_create_flow(struct ib_qp *qp,
1415			       struct ib_flow_attr *flow_attr,
1416			       int domain)
1417{
1418	struct ib_flow *flow_id;
1419	if (!qp->device->create_flow)
1420		return ERR_PTR(-ENOSYS);
1421
1422	flow_id = qp->device->create_flow(qp, flow_attr, domain);
1423	if (!IS_ERR(flow_id))
1424		atomic_inc(&qp->usecnt);
1425	return flow_id;
1426}
1427EXPORT_SYMBOL(ib_create_flow);
1428
1429int ib_destroy_flow(struct ib_flow *flow_id)
1430{
1431	int err;
1432	struct ib_qp *qp = flow_id->qp;
1433
1434	err = qp->device->destroy_flow(flow_id);
1435	if (!err)
1436		atomic_dec(&qp->usecnt);
1437	return err;
1438}
1439EXPORT_SYMBOL(ib_destroy_flow);
1440
1441int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1442		       struct ib_mr_status *mr_status)
1443{
1444	return mr->device->check_mr_status ?
1445		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1446}
1447EXPORT_SYMBOL(ib_check_mr_status);