Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v3.15.
  1/*
  2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *	   Redistribution and use in source and binary forms, with or
 12 *	   without modification, are permitted provided that the following
 13 *	   conditions are met:
 14 *
 15 *		- Redistributions of source code must retain the above
 16 *		  copyright notice, this list of conditions and the following
 17 *		  disclaimer.
 18 *
 19 *		- Redistributions in binary form must reproduce the above
 20 *		  copyright notice, this list of conditions and the following
 21 *		  disclaimer in the documentation and/or other materials
 22 *		  provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/skbuff.h>
 35#include <linux/delay.h>
 36#include <linux/sched.h>
 37
 38#include "rxe.h"
 39#include "rxe_loc.h"
 40#include "rxe_queue.h"
 41#include "rxe_task.h"
 42
 43char *rxe_qp_state_name[] = {
 44	[QP_STATE_RESET]	= "RESET",
 45	[QP_STATE_INIT]		= "INIT",
 46	[QP_STATE_READY]	= "READY",
 47	[QP_STATE_DRAIN]	= "DRAIN",
 48	[QP_STATE_DRAINED]	= "DRAINED",
 49	[QP_STATE_ERROR]	= "ERROR",
 50};
 51
 52static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
 53			  int has_srq)
 54{
 55	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
 56		pr_warn("invalid send wr = %d > %d\n",
 57			cap->max_send_wr, rxe->attr.max_qp_wr);
 58		goto err1;
 59	}
 60
 61	if (cap->max_send_sge > rxe->attr.max_sge) {
 62		pr_warn("invalid send sge = %d > %d\n",
 63			cap->max_send_sge, rxe->attr.max_sge);
 64		goto err1;
 65	}
 66
 67	if (!has_srq) {
 68		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
 69			pr_warn("invalid recv wr = %d > %d\n",
 70				cap->max_recv_wr, rxe->attr.max_qp_wr);
 71			goto err1;
 72		}
 73
 74		if (cap->max_recv_sge > rxe->attr.max_sge) {
 75			pr_warn("invalid recv sge = %d > %d\n",
 76				cap->max_recv_sge, rxe->attr.max_sge);
 77			goto err1;
 78		}
 79	}
 80
 81	if (cap->max_inline_data > rxe->max_inline_data) {
 82		pr_warn("invalid max inline data = %d > %d\n",
 83			cap->max_inline_data, rxe->max_inline_data);
 84		goto err1;
 85	}
 86
 87	return 0;
 88
 89err1:
 90	return -EINVAL;
 91}
 92
 93int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
 94{
 95	struct ib_qp_cap *cap = &init->cap;
 96	struct rxe_port *port;
 97	int port_num = init->port_num;
 98
 99	if (!init->recv_cq || !init->send_cq) {
100		pr_warn("missing cq\n");
101		goto err1;
102	}
103
104	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
105		goto err1;
106
107	if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
108		if (port_num != 1) {
109			pr_warn("invalid port = %d\n", port_num);
110			goto err1;
111		}
112
113		port = &rxe->port;
114
115		if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
116			pr_warn("SMI QP exists for port %d\n", port_num);
117			goto err1;
118		}
119
120		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
121			pr_warn("GSI QP exists for port %d\n", port_num);
122			goto err1;
123		}
124	}
125
126	return 0;
127
128err1:
129	return -EINVAL;
130}
131
132static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
133{
134	qp->resp.res_head = 0;
135	qp->resp.res_tail = 0;
136	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
137
138	if (!qp->resp.resources)
139		return -ENOMEM;
140
141	return 0;
142}
143
144static void free_rd_atomic_resources(struct rxe_qp *qp)
145{
146	if (qp->resp.resources) {
147		int i;
148
149		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
150			struct resp_res *res = &qp->resp.resources[i];
151
152			free_rd_atomic_resource(qp, res);
153		}
154		kfree(qp->resp.resources);
155		qp->resp.resources = NULL;
156	}
157}
158
159void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
160{
161	if (res->type == RXE_ATOMIC_MASK) {
162		rxe_drop_ref(qp);
163		kfree_skb(res->atomic.skb);
164	} else if (res->type == RXE_READ_MASK) {
165		if (res->read.mr)
166			rxe_drop_ref(res->read.mr);
167	}
168	res->type = 0;
169}
170
171static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
172{
173	int i;
174	struct resp_res *res;
175
176	if (qp->resp.resources) {
177		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
178			res = &qp->resp.resources[i];
179			free_rd_atomic_resource(qp, res);
180		}
181	}
182}
183
184static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
185			     struct ib_qp_init_attr *init)
186{
187	struct rxe_port *port;
188	u32 qpn;
189
190	qp->sq_sig_type		= init->sq_sig_type;
191	qp->attr.path_mtu	= 1;
192	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
193
194	qpn			= qp->pelem.index;
195	port			= &rxe->port;
196
197	switch (init->qp_type) {
198	case IB_QPT_SMI:
199		qp->ibqp.qp_num		= 0;
200		port->qp_smi_index	= qpn;
201		qp->attr.port_num	= init->port_num;
202		break;
203
204	case IB_QPT_GSI:
205		qp->ibqp.qp_num		= 1;
206		port->qp_gsi_index	= qpn;
207		qp->attr.port_num	= init->port_num;
208		break;
209
210	default:
211		qp->ibqp.qp_num		= qpn;
212		break;
213	}
214
215	INIT_LIST_HEAD(&qp->grp_list);
216
217	skb_queue_head_init(&qp->send_pkts);
218
219	spin_lock_init(&qp->grp_lock);
220	spin_lock_init(&qp->state_lock);
221
222	atomic_set(&qp->ssn, 0);
223	atomic_set(&qp->skb_out, 0);
224}
225
226static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
227			   struct ib_qp_init_attr *init,
228			   struct ib_ucontext *context, struct ib_udata *udata)
229{
230	int err;
231	int wqe_size;
232
233	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
234	if (err < 0)
235		return err;
236	qp->sk->sk->sk_user_data = qp;
237
238	qp->sq.max_wr		= init->cap.max_send_wr;
239	qp->sq.max_sge		= init->cap.max_send_sge;
240	qp->sq.max_inline	= init->cap.max_inline_data;
241
242	wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
243			 qp->sq.max_sge * sizeof(struct ib_sge),
244			 sizeof(struct rxe_send_wqe) +
245			 qp->sq.max_inline);
246
247	qp->sq.queue = rxe_queue_init(rxe,
248				      &qp->sq.max_wr,
249				      wqe_size);
250	if (!qp->sq.queue)
251		return -ENOMEM;
252
253	err = do_mmap_info(rxe, udata, true,
254			   context, qp->sq.queue->buf,
255			   qp->sq.queue->buf_size, &qp->sq.queue->ip);
256
257	if (err) {
258		kvfree(qp->sq.queue->buf);
259		kfree(qp->sq.queue);
260		return err;
261	}
262
263	qp->req.wqe_index	= producer_index(qp->sq.queue);
264	qp->req.state		= QP_STATE_RESET;
265	qp->req.opcode		= -1;
266	qp->comp.opcode		= -1;
267
268	spin_lock_init(&qp->sq.sq_lock);
269	skb_queue_head_init(&qp->req_pkts);
270
271	rxe_init_task(rxe, &qp->req.task, qp,
272		      rxe_requester, "req");
273	rxe_init_task(rxe, &qp->comp.task, qp,
274		      rxe_completer, "comp");
275
276	init_timer(&qp->rnr_nak_timer);
277	qp->rnr_nak_timer.function = rnr_nak_timer;
278	qp->rnr_nak_timer.data = (unsigned long)qp;
279
280	init_timer(&qp->retrans_timer);
281	qp->retrans_timer.function = retransmit_timer;
282	qp->retrans_timer.data = (unsigned long)qp;
283	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
284
285	return 0;
286}
287
288static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
289			    struct ib_qp_init_attr *init,
290			    struct ib_ucontext *context, struct ib_udata *udata)
291{
292	int err;
293	int wqe_size;
294
295	if (!qp->srq) {
296		qp->rq.max_wr		= init->cap.max_recv_wr;
297		qp->rq.max_sge		= init->cap.max_recv_sge;
298
299		wqe_size = rcv_wqe_size(qp->rq.max_sge);
300
301		pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
302			 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
303
304		qp->rq.queue = rxe_queue_init(rxe,
305					      &qp->rq.max_wr,
306					      wqe_size);
307		if (!qp->rq.queue)
308			return -ENOMEM;
309
310		err = do_mmap_info(rxe, udata, false, context,
311				   qp->rq.queue->buf,
312				   qp->rq.queue->buf_size,
313				   &qp->rq.queue->ip);
314		if (err) {
315			kvfree(qp->rq.queue->buf);
316			kfree(qp->rq.queue);
317			return err;
318		}
319	}
320
321	spin_lock_init(&qp->rq.producer_lock);
322	spin_lock_init(&qp->rq.consumer_lock);
323
324	skb_queue_head_init(&qp->resp_pkts);
325
326	rxe_init_task(rxe, &qp->resp.task, qp,
327		      rxe_responder, "resp");
328
329	qp->resp.opcode		= OPCODE_NONE;
330	qp->resp.msn		= 0;
331	qp->resp.state		= QP_STATE_RESET;
332
333	return 0;
334}
335
336/* called by the create qp verb */
337int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
338		     struct ib_qp_init_attr *init, struct ib_udata *udata,
339		     struct ib_pd *ibpd)
340{
341	int err;
342	struct rxe_cq *rcq = to_rcq(init->recv_cq);
343	struct rxe_cq *scq = to_rcq(init->send_cq);
344	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
345	struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
346
347	rxe_add_ref(pd);
348	rxe_add_ref(rcq);
349	rxe_add_ref(scq);
350	if (srq)
351		rxe_add_ref(srq);
352
353	qp->pd			= pd;
354	qp->rcq			= rcq;
355	qp->scq			= scq;
356	qp->srq			= srq;
357
358	rxe_qp_init_misc(rxe, qp, init);
359
360	err = rxe_qp_init_req(rxe, qp, init, context, udata);
361	if (err)
362		goto err1;
363
364	err = rxe_qp_init_resp(rxe, qp, init, context, udata);
365	if (err)
366		goto err2;
367
368	qp->attr.qp_state = IB_QPS_RESET;
369	qp->valid = 1;
370
371	return 0;
372
373err2:
374	rxe_queue_cleanup(qp->sq.queue);
375err1:
376	if (srq)
377		rxe_drop_ref(srq);
378	rxe_drop_ref(scq);
379	rxe_drop_ref(rcq);
380	rxe_drop_ref(pd);
381
382	return err;
383}
384
385/* called by the query qp verb */
386int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
387{
388	init->event_handler		= qp->ibqp.event_handler;
389	init->qp_context		= qp->ibqp.qp_context;
390	init->send_cq			= qp->ibqp.send_cq;
391	init->recv_cq			= qp->ibqp.recv_cq;
392	init->srq			= qp->ibqp.srq;
393
394	init->cap.max_send_wr		= qp->sq.max_wr;
395	init->cap.max_send_sge		= qp->sq.max_sge;
396	init->cap.max_inline_data	= qp->sq.max_inline;
397
398	if (!qp->srq) {
399		init->cap.max_recv_wr		= qp->rq.max_wr;
400		init->cap.max_recv_sge		= qp->rq.max_sge;
401	}
402
403	init->sq_sig_type		= qp->sq_sig_type;
404
405	init->qp_type			= qp->ibqp.qp_type;
406	init->port_num			= 1;
407
408	return 0;
409}
410
411/* called by the modify qp verb, this routine checks all the parameters before
412 * making any changes
413 */
414int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
415		    struct ib_qp_attr *attr, int mask)
416{
417	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
418					attr->cur_qp_state : qp->attr.qp_state;
419	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
420					attr->qp_state : cur_state;
421
422	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
423				IB_LINK_LAYER_ETHERNET)) {
424		pr_warn("invalid mask or state for qp\n");
425		goto err1;
426	}
427
428	if (mask & IB_QP_STATE) {
429		if (cur_state == IB_QPS_SQD) {
430			if (qp->req.state == QP_STATE_DRAIN &&
431			    new_state != IB_QPS_ERR)
432				goto err1;
433		}
434	}
435
436	if (mask & IB_QP_PORT) {
437		if (attr->port_num != 1) {
438			pr_warn("invalid port %d\n", attr->port_num);
439			goto err1;
440		}
441	}
442
443	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
444		goto err1;
445
446	if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
447		goto err1;
448
449	if (mask & IB_QP_ALT_PATH) {
450		if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
451			goto err1;
452		if (attr->alt_port_num != 1) {
453			pr_warn("invalid alt port %d\n", attr->alt_port_num);
454			goto err1;
455		}
456		if (attr->alt_timeout > 31) {
457			pr_warn("invalid QP alt timeout %d > 31\n",
458				attr->alt_timeout);
459			goto err1;
460		}
461	}
462
463	if (mask & IB_QP_PATH_MTU) {
464		struct rxe_port *port = &rxe->port;
465
466		enum ib_mtu max_mtu = port->attr.max_mtu;
467		enum ib_mtu mtu = attr->path_mtu;
468
469		if (mtu > max_mtu) {
470			pr_debug("invalid mtu (%d) > (%d)\n",
471				 ib_mtu_enum_to_int(mtu),
472				 ib_mtu_enum_to_int(max_mtu));
473			goto err1;
474		}
475	}
476
477	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
478		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
479			pr_warn("invalid max_rd_atomic %d > %d\n",
480				attr->max_rd_atomic,
481				rxe->attr.max_qp_rd_atom);
482			goto err1;
483		}
484	}
485
486	if (mask & IB_QP_TIMEOUT) {
487		if (attr->timeout > 31) {
488			pr_warn("invalid QP timeout %d > 31\n",
489				attr->timeout);
490			goto err1;
491		}
492	}
493
494	return 0;
495
496err1:
497	return -EINVAL;
498}
499
500/* move the qp to the reset state */
501static void rxe_qp_reset(struct rxe_qp *qp)
502{
503	/* stop tasks from running */
504	rxe_disable_task(&qp->resp.task);
505
506	/* stop request/comp */
507	if (qp->sq.queue) {
508		if (qp_type(qp) == IB_QPT_RC)
509			rxe_disable_task(&qp->comp.task);
510		rxe_disable_task(&qp->req.task);
511	}
512
513	/* move qp to the reset state */
514	qp->req.state = QP_STATE_RESET;
515	qp->resp.state = QP_STATE_RESET;
516
517	/* let state machines reset themselves drain work and packet queues
518	 * etc.
519	 */
520	__rxe_do_task(&qp->resp.task);
521
522	if (qp->sq.queue) {
523		__rxe_do_task(&qp->comp.task);
524		__rxe_do_task(&qp->req.task);
525		rxe_queue_reset(qp->sq.queue);
526	}
527
528	/* cleanup attributes */
529	atomic_set(&qp->ssn, 0);
530	qp->req.opcode = -1;
531	qp->req.need_retry = 0;
532	qp->req.noack_pkts = 0;
533	qp->resp.msn = 0;
534	qp->resp.opcode = -1;
535	qp->resp.drop_msg = 0;
536	qp->resp.goto_error = 0;
537	qp->resp.sent_psn_nak = 0;
538
539	if (qp->resp.mr) {
540		rxe_drop_ref(qp->resp.mr);
541		qp->resp.mr = NULL;
542	}
543
544	cleanup_rd_atomic_resources(qp);
545
546	/* reenable tasks */
547	rxe_enable_task(&qp->resp.task);
548
549	if (qp->sq.queue) {
550		if (qp_type(qp) == IB_QPT_RC)
551			rxe_enable_task(&qp->comp.task);
552
553		rxe_enable_task(&qp->req.task);
554	}
555}
556
557/* drain the send queue */
558static void rxe_qp_drain(struct rxe_qp *qp)
559{
560	if (qp->sq.queue) {
561		if (qp->req.state != QP_STATE_DRAINED) {
562			qp->req.state = QP_STATE_DRAIN;
563			if (qp_type(qp) == IB_QPT_RC)
564				rxe_run_task(&qp->comp.task, 1);
565			else
566				__rxe_do_task(&qp->comp.task);
567			rxe_run_task(&qp->req.task, 1);
568		}
569	}
570}
571
572/* move the qp to the error state */
573void rxe_qp_error(struct rxe_qp *qp)
574{
575	qp->req.state = QP_STATE_ERROR;
576	qp->resp.state = QP_STATE_ERROR;
577	qp->attr.qp_state = IB_QPS_ERR;
578
579	/* drain work and packet queues */
580	rxe_run_task(&qp->resp.task, 1);
581
582	if (qp_type(qp) == IB_QPT_RC)
583		rxe_run_task(&qp->comp.task, 1);
584	else
585		__rxe_do_task(&qp->comp.task);
586	rxe_run_task(&qp->req.task, 1);
587}
588
589/* called by the modify qp verb */
590int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
591		     struct ib_udata *udata)
592{
593	int err;
594	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
595	union ib_gid sgid;
596	struct ib_gid_attr sgid_attr;
597
598	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
599		int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
600
601		qp->attr.max_rd_atomic = max_rd_atomic;
602		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
603	}
604
605	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
606		int max_dest_rd_atomic =
607			__roundup_pow_of_two(attr->max_dest_rd_atomic);
608
609		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
610
611		free_rd_atomic_resources(qp);
612
613		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
614		if (err)
615			return err;
616	}
617
618	if (mask & IB_QP_CUR_STATE)
619		qp->attr.cur_qp_state = attr->qp_state;
620
621	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
622		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
623
624	if (mask & IB_QP_ACCESS_FLAGS)
625		qp->attr.qp_access_flags = attr->qp_access_flags;
626
627	if (mask & IB_QP_PKEY_INDEX)
628		qp->attr.pkey_index = attr->pkey_index;
629
630	if (mask & IB_QP_PORT)
631		qp->attr.port_num = attr->port_num;
632
633	if (mask & IB_QP_QKEY)
634		qp->attr.qkey = attr->qkey;
635
636	if (mask & IB_QP_AV) {
637		ib_get_cached_gid(&rxe->ib_dev, 1,
638				  attr->ah_attr.grh.sgid_index, &sgid,
639				  &sgid_attr);
640		rxe_av_from_attr(rxe, attr->port_num, &qp->pri_av,
641				 &attr->ah_attr);
642		rxe_av_fill_ip_info(rxe, &qp->pri_av, &attr->ah_attr,
643				    &sgid_attr, &sgid);
644		if (sgid_attr.ndev)
645			dev_put(sgid_attr.ndev);
646	}
647
648	if (mask & IB_QP_ALT_PATH) {
649		ib_get_cached_gid(&rxe->ib_dev, 1,
650				  attr->alt_ah_attr.grh.sgid_index, &sgid,
651				  &sgid_attr);
652
653		rxe_av_from_attr(rxe, attr->alt_port_num, &qp->alt_av,
654				 &attr->alt_ah_attr);
655		rxe_av_fill_ip_info(rxe, &qp->alt_av, &attr->alt_ah_attr,
656				    &sgid_attr, &sgid);
657		if (sgid_attr.ndev)
658			dev_put(sgid_attr.ndev);
659
660		qp->attr.alt_port_num = attr->alt_port_num;
661		qp->attr.alt_pkey_index = attr->alt_pkey_index;
662		qp->attr.alt_timeout = attr->alt_timeout;
663	}
664
665	if (mask & IB_QP_PATH_MTU) {
666		qp->attr.path_mtu = attr->path_mtu;
667		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
668	}
669
670	if (mask & IB_QP_TIMEOUT) {
671		qp->attr.timeout = attr->timeout;
672		if (attr->timeout == 0) {
673			qp->qp_timeout_jiffies = 0;
674		} else {
675			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
676			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
677
678			qp->qp_timeout_jiffies = j ? j : 1;
679		}
680	}
681
682	if (mask & IB_QP_RETRY_CNT) {
683		qp->attr.retry_cnt = attr->retry_cnt;
684		qp->comp.retry_cnt = attr->retry_cnt;
685		pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
686			 attr->retry_cnt);
687	}
688
689	if (mask & IB_QP_RNR_RETRY) {
690		qp->attr.rnr_retry = attr->rnr_retry;
691		qp->comp.rnr_retry = attr->rnr_retry;
692		pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
693			 attr->rnr_retry);
694	}
695
696	if (mask & IB_QP_RQ_PSN) {
697		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
698		qp->resp.psn = qp->attr.rq_psn;
699		pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
700			 qp->resp.psn);
701	}
702
703	if (mask & IB_QP_MIN_RNR_TIMER) {
704		qp->attr.min_rnr_timer = attr->min_rnr_timer;
705		pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
706			 attr->min_rnr_timer);
707	}
708
709	if (mask & IB_QP_SQ_PSN) {
710		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
711		qp->req.psn = qp->attr.sq_psn;
712		qp->comp.psn = qp->attr.sq_psn;
713		pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
714	}
715
716	if (mask & IB_QP_PATH_MIG_STATE)
717		qp->attr.path_mig_state = attr->path_mig_state;
718
719	if (mask & IB_QP_DEST_QPN)
720		qp->attr.dest_qp_num = attr->dest_qp_num;
721
722	if (mask & IB_QP_STATE) {
723		qp->attr.qp_state = attr->qp_state;
724
725		switch (attr->qp_state) {
726		case IB_QPS_RESET:
727			pr_debug("qp#%d state -> RESET\n", qp_num(qp));
728			rxe_qp_reset(qp);
729			break;
730
731		case IB_QPS_INIT:
732			pr_debug("qp#%d state -> INIT\n", qp_num(qp));
733			qp->req.state = QP_STATE_INIT;
734			qp->resp.state = QP_STATE_INIT;
735			break;
736
737		case IB_QPS_RTR:
738			pr_debug("qp#%d state -> RTR\n", qp_num(qp));
739			qp->resp.state = QP_STATE_READY;
740			break;
741
742		case IB_QPS_RTS:
743			pr_debug("qp#%d state -> RTS\n", qp_num(qp));
744			qp->req.state = QP_STATE_READY;
745			break;
746
747		case IB_QPS_SQD:
748			pr_debug("qp#%d state -> SQD\n", qp_num(qp));
749			rxe_qp_drain(qp);
750			break;
751
752		case IB_QPS_SQE:
753			pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
754			/* Not possible from modify_qp. */
755			break;
756
757		case IB_QPS_ERR:
758			pr_debug("qp#%d state -> ERR\n", qp_num(qp));
759			rxe_qp_error(qp);
760			break;
761		}
762	}
763
764	return 0;
765}
766
767/* called by the query qp verb */
768int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
769{
770	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
771
772	*attr = qp->attr;
773
774	attr->rq_psn				= qp->resp.psn;
775	attr->sq_psn				= qp->req.psn;
776
777	attr->cap.max_send_wr			= qp->sq.max_wr;
778	attr->cap.max_send_sge			= qp->sq.max_sge;
779	attr->cap.max_inline_data		= qp->sq.max_inline;
780
781	if (!qp->srq) {
782		attr->cap.max_recv_wr		= qp->rq.max_wr;
783		attr->cap.max_recv_sge		= qp->rq.max_sge;
784	}
785
786	rxe_av_to_attr(rxe, &qp->pri_av, &attr->ah_attr);
787	rxe_av_to_attr(rxe, &qp->alt_av, &attr->alt_ah_attr);
788
789	if (qp->req.state == QP_STATE_DRAIN) {
790		attr->sq_draining = 1;
791		/* applications that get this state
792		 * typically spin on it. yield the
793		 * processor
794		 */
795		cond_resched();
796	} else {
797		attr->sq_draining = 0;
798	}
799
800	pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
801
802	return 0;
803}
804
805/* called by the destroy qp verb */
806void rxe_qp_destroy(struct rxe_qp *qp)
807{
808	qp->valid = 0;
809	qp->qp_timeout_jiffies = 0;
810	rxe_cleanup_task(&qp->resp.task);
811
812	del_timer_sync(&qp->retrans_timer);
813	del_timer_sync(&qp->rnr_nak_timer);
814
815	rxe_cleanup_task(&qp->req.task);
816	rxe_cleanup_task(&qp->comp.task);
817
818	/* flush out any receive wr's or pending requests */
819	__rxe_do_task(&qp->req.task);
820	if (qp->sq.queue) {
821		__rxe_do_task(&qp->comp.task);
822		__rxe_do_task(&qp->req.task);
823	}
824}
825
826/* called when the last reference to the qp is dropped */
827void rxe_qp_cleanup(void *arg)
828{
829	struct rxe_qp *qp = arg;
830
831	rxe_drop_all_mcast_groups(qp);
832
833	if (qp->sq.queue)
834		rxe_queue_cleanup(qp->sq.queue);
835
836	if (qp->srq)
837		rxe_drop_ref(qp->srq);
838
839	if (qp->rq.queue)
840		rxe_queue_cleanup(qp->rq.queue);
841
842	if (qp->scq)
843		rxe_drop_ref(qp->scq);
844	if (qp->rcq)
845		rxe_drop_ref(qp->rcq);
846	if (qp->pd)
847		rxe_drop_ref(qp->pd);
848
849	if (qp->resp.mr) {
850		rxe_drop_ref(qp->resp.mr);
851		qp->resp.mr = NULL;
852	}
853
854	free_rd_atomic_resources(qp);
855
856	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
857	sock_release(qp->sk);
858}