Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5 */
  6
  7#include <linux/skbuff.h>
  8#include <linux/delay.h>
  9#include <linux/sched.h>
 10#include <linux/vmalloc.h>
 11#include <rdma/uverbs_ioctl.h>
 12
 13#include "rxe.h"
 14#include "rxe_loc.h"
 15#include "rxe_queue.h"
 16#include "rxe_task.h"
 17
 18static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
 19			  int has_srq)
 20{
 21	if (cap->max_send_wr > rxe->attr.max_qp_wr) {
 22		rxe_dbg(rxe, "invalid send wr = %u > %d\n",
 23			 cap->max_send_wr, rxe->attr.max_qp_wr);
 24		goto err1;
 25	}
 26
 27	if (cap->max_send_sge > rxe->attr.max_send_sge) {
 28		rxe_dbg(rxe, "invalid send sge = %u > %d\n",
 29			 cap->max_send_sge, rxe->attr.max_send_sge);
 30		goto err1;
 31	}
 32
 33	if (!has_srq) {
 34		if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
 35			rxe_dbg(rxe, "invalid recv wr = %u > %d\n",
 36				 cap->max_recv_wr, rxe->attr.max_qp_wr);
 37			goto err1;
 38		}
 39
 40		if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
 41			rxe_dbg(rxe, "invalid recv sge = %u > %d\n",
 42				 cap->max_recv_sge, rxe->attr.max_recv_sge);
 43			goto err1;
 44		}
 45	}
 46
 47	if (cap->max_inline_data > rxe->max_inline_data) {
 48		rxe_dbg(rxe, "invalid max inline data = %u > %d\n",
 49			 cap->max_inline_data, rxe->max_inline_data);
 50		goto err1;
 51	}
 52
 53	return 0;
 54
 55err1:
 56	return -EINVAL;
 57}
 58
 59int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
 60{
 61	struct ib_qp_cap *cap = &init->cap;
 62	struct rxe_port *port;
 63	int port_num = init->port_num;
 64
 65	switch (init->qp_type) {
 66	case IB_QPT_GSI:
 67	case IB_QPT_RC:
 68	case IB_QPT_UC:
 69	case IB_QPT_UD:
 70		break;
 71	default:
 72		return -EOPNOTSUPP;
 73	}
 74
 75	if (!init->recv_cq || !init->send_cq) {
 76		rxe_dbg(rxe, "missing cq\n");
 77		goto err1;
 78	}
 79
 80	if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
 81		goto err1;
 82
 83	if (init->qp_type == IB_QPT_GSI) {
 84		if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
 85			rxe_dbg(rxe, "invalid port = %d\n", port_num);
 86			goto err1;
 87		}
 88
 89		port = &rxe->port;
 90
 91		if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
 92			rxe_dbg(rxe, "GSI QP exists for port %d\n", port_num);
 93			goto err1;
 94		}
 95	}
 96
 97	return 0;
 98
 99err1:
100	return -EINVAL;
101}
102
103static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104{
105	qp->resp.res_head = 0;
106	qp->resp.res_tail = 0;
107	qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109	if (!qp->resp.resources)
110		return -ENOMEM;
111
112	return 0;
113}
114
115static void free_rd_atomic_resources(struct rxe_qp *qp)
116{
117	if (qp->resp.resources) {
118		int i;
119
120		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121			struct resp_res *res = &qp->resp.resources[i];
122
123			free_rd_atomic_resource(res);
124		}
125		kfree(qp->resp.resources);
126		qp->resp.resources = NULL;
127	}
128}
129
130void free_rd_atomic_resource(struct resp_res *res)
131{
132	res->type = 0;
133}
134
135static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136{
137	int i;
138	struct resp_res *res;
139
140	if (qp->resp.resources) {
141		for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142			res = &qp->resp.resources[i];
143			free_rd_atomic_resource(res);
144		}
145	}
146}
147
148static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149			     struct ib_qp_init_attr *init)
150{
151	struct rxe_port *port;
152	u32 qpn;
153
154	qp->sq_sig_type		= init->sq_sig_type;
155	qp->attr.path_mtu	= 1;
156	qp->mtu			= ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158	qpn			= qp->elem.index;
159	port			= &rxe->port;
160
161	switch (init->qp_type) {
162	case IB_QPT_GSI:
163		qp->ibqp.qp_num		= 1;
164		port->qp_gsi_index	= qpn;
165		qp->attr.port_num	= init->port_num;
166		break;
167
168	default:
169		qp->ibqp.qp_num		= qpn;
170		break;
171	}
172
173	spin_lock_init(&qp->state_lock);
174
175	spin_lock_init(&qp->sq.sq_lock);
176	spin_lock_init(&qp->rq.producer_lock);
177	spin_lock_init(&qp->rq.consumer_lock);
178
179	atomic_set(&qp->ssn, 0);
180	atomic_set(&qp->skb_out, 0);
181}
182
183static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
184			   struct ib_qp_init_attr *init, struct ib_udata *udata,
185			   struct rxe_create_qp_resp __user *uresp)
186{
187	int err;
188	int wqe_size;
189	enum queue_type type;
190
191	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
192	if (err < 0)
193		return err;
194	qp->sk->sk->sk_user_data = qp;
195
196	/* pick a source UDP port number for this QP based on
197	 * the source QPN. this spreads traffic for different QPs
198	 * across different NIC RX queues (while using a single
199	 * flow for a given QP to maintain packet order).
200	 * the port number must be in the Dynamic Ports range
201	 * (0xc000 - 0xffff).
202	 */
203	qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
204	qp->sq.max_wr		= init->cap.max_send_wr;
205
206	/* These caps are limited by rxe_qp_chk_cap() done by the caller */
207	wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
208			 init->cap.max_inline_data);
209	qp->sq.max_sge = init->cap.max_send_sge =
210		wqe_size / sizeof(struct ib_sge);
211	qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
212	wqe_size += sizeof(struct rxe_send_wqe);
213
214	type = QUEUE_TYPE_FROM_CLIENT;
215	qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
216				wqe_size, type);
217	if (!qp->sq.queue)
218		return -ENOMEM;
219
220	err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
221			   qp->sq.queue->buf, qp->sq.queue->buf_size,
222			   &qp->sq.queue->ip);
223
224	if (err) {
225		vfree(qp->sq.queue->buf);
226		kfree(qp->sq.queue);
227		qp->sq.queue = NULL;
228		return err;
229	}
230
231	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
232					       QUEUE_TYPE_FROM_CLIENT);
233
234	qp->req.state		= QP_STATE_RESET;
235	qp->comp.state		= QP_STATE_RESET;
236	qp->req.opcode		= -1;
237	qp->comp.opcode		= -1;
238
239	skb_queue_head_init(&qp->req_pkts);
240
241	rxe_init_task(&qp->req.task, qp, rxe_requester);
242	rxe_init_task(&qp->comp.task, qp, rxe_completer);
243
244	qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
245	if (init->qp_type == IB_QPT_RC) {
246		timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
247		timer_setup(&qp->retrans_timer, retransmit_timer, 0);
248	}
249	return 0;
250}
251
252static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
253			    struct ib_qp_init_attr *init,
254			    struct ib_udata *udata,
255			    struct rxe_create_qp_resp __user *uresp)
256{
257	int err;
258	int wqe_size;
259	enum queue_type type;
260
261	if (!qp->srq) {
262		qp->rq.max_wr		= init->cap.max_recv_wr;
263		qp->rq.max_sge		= init->cap.max_recv_sge;
264
265		wqe_size = rcv_wqe_size(qp->rq.max_sge);
266
267		type = QUEUE_TYPE_FROM_CLIENT;
268		qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
269					wqe_size, type);
270		if (!qp->rq.queue)
271			return -ENOMEM;
272
273		err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
274				   qp->rq.queue->buf, qp->rq.queue->buf_size,
275				   &qp->rq.queue->ip);
276		if (err) {
277			vfree(qp->rq.queue->buf);
278			kfree(qp->rq.queue);
279			qp->rq.queue = NULL;
280			return err;
281		}
282	}
283
284	skb_queue_head_init(&qp->resp_pkts);
285
286	rxe_init_task(&qp->resp.task, qp, rxe_responder);
287
288	qp->resp.opcode		= OPCODE_NONE;
289	qp->resp.msn		= 0;
290	qp->resp.state		= QP_STATE_RESET;
291
292	return 0;
293}
294
295/* called by the create qp verb */
296int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
297		     struct ib_qp_init_attr *init,
298		     struct rxe_create_qp_resp __user *uresp,
299		     struct ib_pd *ibpd,
300		     struct ib_udata *udata)
301{
302	int err;
303	struct rxe_cq *rcq = to_rcq(init->recv_cq);
304	struct rxe_cq *scq = to_rcq(init->send_cq);
305	struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
306
307	rxe_get(pd);
308	rxe_get(rcq);
309	rxe_get(scq);
310	if (srq)
311		rxe_get(srq);
312
313	qp->pd			= pd;
314	qp->rcq			= rcq;
315	qp->scq			= scq;
316	qp->srq			= srq;
317
318	atomic_inc(&rcq->num_wq);
319	atomic_inc(&scq->num_wq);
320
321	rxe_qp_init_misc(rxe, qp, init);
322
323	err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
324	if (err)
325		goto err1;
326
327	err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
328	if (err)
329		goto err2;
330
331	qp->attr.qp_state = IB_QPS_RESET;
332	qp->valid = 1;
333
334	return 0;
335
336err2:
337	rxe_queue_cleanup(qp->sq.queue);
338	qp->sq.queue = NULL;
339err1:
340	atomic_dec(&rcq->num_wq);
341	atomic_dec(&scq->num_wq);
342
343	qp->pd = NULL;
344	qp->rcq = NULL;
345	qp->scq = NULL;
346	qp->srq = NULL;
347
348	if (srq)
349		rxe_put(srq);
350	rxe_put(scq);
351	rxe_put(rcq);
352	rxe_put(pd);
353
354	return err;
355}
356
357/* called by the query qp verb */
358int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
359{
360	init->event_handler		= qp->ibqp.event_handler;
361	init->qp_context		= qp->ibqp.qp_context;
362	init->send_cq			= qp->ibqp.send_cq;
363	init->recv_cq			= qp->ibqp.recv_cq;
364	init->srq			= qp->ibqp.srq;
365
366	init->cap.max_send_wr		= qp->sq.max_wr;
367	init->cap.max_send_sge		= qp->sq.max_sge;
368	init->cap.max_inline_data	= qp->sq.max_inline;
369
370	if (!qp->srq) {
371		init->cap.max_recv_wr		= qp->rq.max_wr;
372		init->cap.max_recv_sge		= qp->rq.max_sge;
373	}
374
375	init->sq_sig_type		= qp->sq_sig_type;
376
377	init->qp_type			= qp->ibqp.qp_type;
378	init->port_num			= 1;
379
380	return 0;
381}
382
383/* called by the modify qp verb, this routine checks all the parameters before
384 * making any changes
385 */
386int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
387		    struct ib_qp_attr *attr, int mask)
388{
389	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
390					attr->cur_qp_state : qp->attr.qp_state;
391	enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
392					attr->qp_state : cur_state;
393
394	if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
395		rxe_dbg_qp(qp, "invalid mask or state\n");
396		goto err1;
397	}
398
399	if (mask & IB_QP_STATE) {
400		if (cur_state == IB_QPS_SQD) {
401			if (qp->req.state == QP_STATE_DRAIN &&
402			    new_state != IB_QPS_ERR)
403				goto err1;
404		}
405	}
406
407	if (mask & IB_QP_PORT) {
408		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
409			rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
410			goto err1;
411		}
412	}
413
414	if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
415		goto err1;
416
417	if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
418		goto err1;
419
420	if (mask & IB_QP_ALT_PATH) {
421		if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
422			goto err1;
423		if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
424			rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
425			goto err1;
426		}
427		if (attr->alt_timeout > 31) {
428			rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
429				 attr->alt_timeout);
430			goto err1;
431		}
432	}
433
434	if (mask & IB_QP_PATH_MTU) {
435		struct rxe_port *port = &rxe->port;
436
437		enum ib_mtu max_mtu = port->attr.max_mtu;
438		enum ib_mtu mtu = attr->path_mtu;
439
440		if (mtu > max_mtu) {
441			rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
442				 ib_mtu_enum_to_int(mtu),
443				 ib_mtu_enum_to_int(max_mtu));
444			goto err1;
445		}
446	}
447
448	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
449		if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
450			rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
451				 attr->max_rd_atomic,
452				 rxe->attr.max_qp_rd_atom);
453			goto err1;
454		}
455	}
456
457	if (mask & IB_QP_TIMEOUT) {
458		if (attr->timeout > 31) {
459			rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
460					attr->timeout);
461			goto err1;
462		}
463	}
464
465	return 0;
466
467err1:
468	return -EINVAL;
469}
470
471/* move the qp to the reset state */
472static void rxe_qp_reset(struct rxe_qp *qp)
473{
474	/* stop tasks from running */
475	rxe_disable_task(&qp->resp.task);
476
477	/* stop request/comp */
478	if (qp->sq.queue) {
479		if (qp_type(qp) == IB_QPT_RC)
480			rxe_disable_task(&qp->comp.task);
481		rxe_disable_task(&qp->req.task);
482	}
483
484	/* move qp to the reset state */
485	qp->req.state = QP_STATE_RESET;
486	qp->comp.state = QP_STATE_RESET;
487	qp->resp.state = QP_STATE_RESET;
488
489	/* let state machines reset themselves drain work and packet queues
490	 * etc.
491	 */
492	__rxe_do_task(&qp->resp.task);
493
494	if (qp->sq.queue) {
495		__rxe_do_task(&qp->comp.task);
496		__rxe_do_task(&qp->req.task);
497		rxe_queue_reset(qp->sq.queue);
498	}
499
500	/* cleanup attributes */
501	atomic_set(&qp->ssn, 0);
502	qp->req.opcode = -1;
503	qp->req.need_retry = 0;
504	qp->req.wait_for_rnr_timer = 0;
505	qp->req.noack_pkts = 0;
506	qp->resp.msn = 0;
507	qp->resp.opcode = -1;
508	qp->resp.drop_msg = 0;
509	qp->resp.goto_error = 0;
510	qp->resp.sent_psn_nak = 0;
511
512	if (qp->resp.mr) {
513		rxe_put(qp->resp.mr);
514		qp->resp.mr = NULL;
515	}
516
517	cleanup_rd_atomic_resources(qp);
518
519	/* reenable tasks */
520	rxe_enable_task(&qp->resp.task);
521
522	if (qp->sq.queue) {
523		if (qp_type(qp) == IB_QPT_RC)
524			rxe_enable_task(&qp->comp.task);
525
526		rxe_enable_task(&qp->req.task);
527	}
528}
529
530/* drain the send queue */
531static void rxe_qp_drain(struct rxe_qp *qp)
532{
533	if (qp->sq.queue) {
534		if (qp->req.state != QP_STATE_DRAINED) {
535			qp->req.state = QP_STATE_DRAIN;
536			if (qp_type(qp) == IB_QPT_RC)
537				rxe_sched_task(&qp->comp.task);
538			else
539				__rxe_do_task(&qp->comp.task);
540			rxe_sched_task(&qp->req.task);
541		}
542	}
543}
544
545/* move the qp to the error state */
546void rxe_qp_error(struct rxe_qp *qp)
547{
548	qp->req.state = QP_STATE_ERROR;
549	qp->resp.state = QP_STATE_ERROR;
550	qp->comp.state = QP_STATE_ERROR;
551	qp->attr.qp_state = IB_QPS_ERR;
552
553	/* drain work and packet queues */
554	rxe_sched_task(&qp->resp.task);
555
556	if (qp_type(qp) == IB_QPT_RC)
557		rxe_sched_task(&qp->comp.task);
558	else
559		__rxe_do_task(&qp->comp.task);
560	rxe_sched_task(&qp->req.task);
561}
562
563/* called by the modify qp verb */
564int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
565		     struct ib_udata *udata)
566{
567	int err;
568
569	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
570		int max_rd_atomic = attr->max_rd_atomic ?
571			roundup_pow_of_two(attr->max_rd_atomic) : 0;
572
573		qp->attr.max_rd_atomic = max_rd_atomic;
574		atomic_set(&qp->req.rd_atomic, max_rd_atomic);
575	}
576
577	if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
578		int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
579			roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
580
581		qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
582
583		free_rd_atomic_resources(qp);
584
585		err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
586		if (err)
587			return err;
588	}
589
590	if (mask & IB_QP_CUR_STATE)
591		qp->attr.cur_qp_state = attr->qp_state;
592
593	if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
594		qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
595
596	if (mask & IB_QP_ACCESS_FLAGS)
597		qp->attr.qp_access_flags = attr->qp_access_flags;
598
599	if (mask & IB_QP_PKEY_INDEX)
600		qp->attr.pkey_index = attr->pkey_index;
601
602	if (mask & IB_QP_PORT)
603		qp->attr.port_num = attr->port_num;
604
605	if (mask & IB_QP_QKEY)
606		qp->attr.qkey = attr->qkey;
607
608	if (mask & IB_QP_AV)
609		rxe_init_av(&attr->ah_attr, &qp->pri_av);
610
611	if (mask & IB_QP_ALT_PATH) {
612		rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
613		qp->attr.alt_port_num = attr->alt_port_num;
614		qp->attr.alt_pkey_index = attr->alt_pkey_index;
615		qp->attr.alt_timeout = attr->alt_timeout;
616	}
617
618	if (mask & IB_QP_PATH_MTU) {
619		qp->attr.path_mtu = attr->path_mtu;
620		qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
621	}
622
623	if (mask & IB_QP_TIMEOUT) {
624		qp->attr.timeout = attr->timeout;
625		if (attr->timeout == 0) {
626			qp->qp_timeout_jiffies = 0;
627		} else {
628			/* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
629			int j = nsecs_to_jiffies(4096ULL << attr->timeout);
630
631			qp->qp_timeout_jiffies = j ? j : 1;
632		}
633	}
634
635	if (mask & IB_QP_RETRY_CNT) {
636		qp->attr.retry_cnt = attr->retry_cnt;
637		qp->comp.retry_cnt = attr->retry_cnt;
638		rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
639	}
640
641	if (mask & IB_QP_RNR_RETRY) {
642		qp->attr.rnr_retry = attr->rnr_retry;
643		qp->comp.rnr_retry = attr->rnr_retry;
644		rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
645	}
646
647	if (mask & IB_QP_RQ_PSN) {
648		qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
649		qp->resp.psn = qp->attr.rq_psn;
650		rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
651	}
652
653	if (mask & IB_QP_MIN_RNR_TIMER) {
654		qp->attr.min_rnr_timer = attr->min_rnr_timer;
655		rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
656			 attr->min_rnr_timer);
657	}
658
659	if (mask & IB_QP_SQ_PSN) {
660		qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
661		qp->req.psn = qp->attr.sq_psn;
662		qp->comp.psn = qp->attr.sq_psn;
663		rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
664	}
665
666	if (mask & IB_QP_PATH_MIG_STATE)
667		qp->attr.path_mig_state = attr->path_mig_state;
668
669	if (mask & IB_QP_DEST_QPN)
670		qp->attr.dest_qp_num = attr->dest_qp_num;
671
672	if (mask & IB_QP_STATE) {
673		qp->attr.qp_state = attr->qp_state;
674
675		switch (attr->qp_state) {
676		case IB_QPS_RESET:
677			rxe_dbg_qp(qp, "state -> RESET\n");
678			rxe_qp_reset(qp);
679			break;
680
681		case IB_QPS_INIT:
682			rxe_dbg_qp(qp, "state -> INIT\n");
683			qp->req.state = QP_STATE_INIT;
684			qp->resp.state = QP_STATE_INIT;
685			qp->comp.state = QP_STATE_INIT;
686			break;
687
688		case IB_QPS_RTR:
689			rxe_dbg_qp(qp, "state -> RTR\n");
690			qp->resp.state = QP_STATE_READY;
691			break;
692
693		case IB_QPS_RTS:
694			rxe_dbg_qp(qp, "state -> RTS\n");
695			qp->req.state = QP_STATE_READY;
696			qp->comp.state = QP_STATE_READY;
697			break;
698
699		case IB_QPS_SQD:
700			rxe_dbg_qp(qp, "state -> SQD\n");
701			rxe_qp_drain(qp);
702			break;
703
704		case IB_QPS_SQE:
705			rxe_dbg_qp(qp, "state -> SQE !!?\n");
706			/* Not possible from modify_qp. */
707			break;
708
709		case IB_QPS_ERR:
710			rxe_dbg_qp(qp, "state -> ERR\n");
711			rxe_qp_error(qp);
712			break;
713		}
714	}
715
716	return 0;
717}
718
719/* called by the query qp verb */
720int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
721{
722	*attr = qp->attr;
723
724	attr->rq_psn				= qp->resp.psn;
725	attr->sq_psn				= qp->req.psn;
726
727	attr->cap.max_send_wr			= qp->sq.max_wr;
728	attr->cap.max_send_sge			= qp->sq.max_sge;
729	attr->cap.max_inline_data		= qp->sq.max_inline;
730
731	if (!qp->srq) {
732		attr->cap.max_recv_wr		= qp->rq.max_wr;
733		attr->cap.max_recv_sge		= qp->rq.max_sge;
734	}
735
736	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
737	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
738
739	if (qp->req.state == QP_STATE_DRAIN) {
740		attr->sq_draining = 1;
741		/* applications that get this state
742		 * typically spin on it. yield the
743		 * processor
744		 */
745		cond_resched();
746	} else {
747		attr->sq_draining = 0;
748	}
749
750	rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);
751
752	return 0;
753}
754
755int rxe_qp_chk_destroy(struct rxe_qp *qp)
756{
757	/* See IBA o10-2.2.3
758	 * An attempt to destroy a QP while attached to a mcast group
759	 * will fail immediately.
760	 */
761	if (atomic_read(&qp->mcg_num)) {
762		rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
763		return -EBUSY;
764	}
765
766	return 0;
767}
768
769/* called when the last reference to the qp is dropped */
770static void rxe_qp_do_cleanup(struct work_struct *work)
771{
772	struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
773
774	qp->valid = 0;
775	qp->qp_timeout_jiffies = 0;
776	rxe_cleanup_task(&qp->resp.task);
777
778	if (qp_type(qp) == IB_QPT_RC) {
779		del_timer_sync(&qp->retrans_timer);
780		del_timer_sync(&qp->rnr_nak_timer);
781	}
782
783	rxe_cleanup_task(&qp->req.task);
784	rxe_cleanup_task(&qp->comp.task);
785
786	/* flush out any receive wr's or pending requests */
787	if (qp->req.task.func)
788		__rxe_do_task(&qp->req.task);
789
790	if (qp->sq.queue) {
791		__rxe_do_task(&qp->comp.task);
792		__rxe_do_task(&qp->req.task);
793	}
794
795	if (qp->sq.queue)
796		rxe_queue_cleanup(qp->sq.queue);
797
798	if (qp->srq)
799		rxe_put(qp->srq);
800
801	if (qp->rq.queue)
802		rxe_queue_cleanup(qp->rq.queue);
803
804	if (qp->scq) {
805		atomic_dec(&qp->scq->num_wq);
806		rxe_put(qp->scq);
807	}
808
809	if (qp->rcq) {
810		atomic_dec(&qp->rcq->num_wq);
811		rxe_put(qp->rcq);
812	}
813
814	if (qp->pd)
815		rxe_put(qp->pd);
816
817	if (qp->resp.mr)
818		rxe_put(qp->resp.mr);
819
820	free_rd_atomic_resources(qp);
821
822	if (qp->sk) {
823		if (qp_type(qp) == IB_QPT_RC)
824			sk_dst_reset(qp->sk->sk);
825
826		kernel_sock_shutdown(qp->sk, SHUT_RDWR);
827		sock_release(qp->sk);
828	}
829}
830
831/* called when the last reference to the qp is dropped */
832void rxe_qp_cleanup(struct rxe_pool_elem *elem)
833{
834	struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
835
836	execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
837}