Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
  1/*
  2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/bug.h>
 34#include <linux/errno.h>
 
 35#include <linux/spinlock.h>
 36
 37#include "usnic_log.h"
 38#include "usnic_vnic.h"
 39#include "usnic_fwd.h"
 40#include "usnic_uiom.h"
 41#include "usnic_debugfs.h"
 42#include "usnic_ib_qp_grp.h"
 43#include "usnic_ib_sysfs.h"
 44#include "usnic_transport.h"
 45
 46#define DFLT_RQ_IDX	0
 47
 48const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
 49{
 50	switch (state) {
 51	case IB_QPS_RESET:
 52		return "Rst";
 53	case IB_QPS_INIT:
 54		return "Init";
 55	case IB_QPS_RTR:
 56		return "RTR";
 57	case IB_QPS_RTS:
 58		return "RTS";
 59	case IB_QPS_SQD:
 60		return "SQD";
 61	case IB_QPS_SQE:
 62		return "SQE";
 63	case IB_QPS_ERR:
 64		return "ERR";
 65	default:
 66		return "UNKNOWN STATE";
 67
 68	}
 69}
 70
 71int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
 72{
 73	return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
 74}
 75
 76int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
 77{
 78	struct usnic_ib_qp_grp *qp_grp = obj;
 79	struct usnic_ib_qp_grp_flow *default_flow;
 80	if (obj) {
 81		default_flow = list_first_entry(&qp_grp->flows_lst,
 82					struct usnic_ib_qp_grp_flow, link);
 83		return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
 84					qp_grp->ibqp.qp_num,
 85					usnic_ib_qp_grp_state_to_string(
 86							qp_grp->state),
 87					qp_grp->owner_pid,
 88					usnic_vnic_get_index(qp_grp->vf->vnic),
 89					default_flow->flow->flow_id);
 90	} else {
 91		return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
 92	}
 93}
 94
 95static struct usnic_vnic_res_chunk *
 96get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
 97{
 98	lockdep_assert_held(&qp_grp->lock);
 99	/*
100	 * The QP res chunk, used to derive qp indices,
101	 * are just indices of the RQs
102	 */
103	return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
104}
105
106static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
107{
108
109	int status;
110	int i, vnic_idx;
111	struct usnic_vnic_res_chunk *res_chunk;
112	struct usnic_vnic_res *res;
113
114	lockdep_assert_held(&qp_grp->lock);
115
116	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
117
118	res_chunk = get_qp_res_chunk(qp_grp);
119	if (IS_ERR(res_chunk)) {
120		usnic_err("Unable to get qp res with err %ld\n",
121				PTR_ERR(res_chunk));
122		return PTR_ERR(res_chunk);
123	}
124
125	for (i = 0; i < res_chunk->cnt; i++) {
126		res = res_chunk->res[i];
127		status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
128						res->vnic_idx);
129		if (status) {
130			usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
131					res->vnic_idx, qp_grp->ufdev->name,
132					vnic_idx, status);
133			goto out_err;
134		}
135	}
136
137	return 0;
138
139out_err:
140	for (i--; i >= 0; i--) {
141		res = res_chunk->res[i];
142		usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
143					res->vnic_idx);
144	}
145
146	return status;
147}
148
149static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
150{
151	int i, vnic_idx;
152	struct usnic_vnic_res_chunk *res_chunk;
153	struct usnic_vnic_res *res;
154	int status = 0;
155
156	lockdep_assert_held(&qp_grp->lock);
157	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
158
159	res_chunk = get_qp_res_chunk(qp_grp);
160	if (IS_ERR(res_chunk)) {
161		usnic_err("Unable to get qp res with err %ld\n",
162			PTR_ERR(res_chunk));
163		return PTR_ERR(res_chunk);
164	}
165
166	for (i = 0; i < res_chunk->cnt; i++) {
167		res = res_chunk->res[i];
168		status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
169						res->vnic_idx);
170		if (status) {
171			usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
172					res->vnic_idx,
173					qp_grp->ufdev->name,
174					vnic_idx, status);
175		}
176	}
177
178	return status;
179
180}
181
182static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
183				struct usnic_filter_action *uaction)
184{
185	struct usnic_vnic_res_chunk *res_chunk;
186
187	res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
188	if (IS_ERR(res_chunk)) {
189		usnic_err("Unable to get %s with err %ld\n",
190			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
191			PTR_ERR(res_chunk));
192		return PTR_ERR(res_chunk);
193	}
194
195	uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
196	uaction->action.type = FILTER_ACTION_RQ_STEERING;
197	uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
198
199	return 0;
200}
201
202static struct usnic_ib_qp_grp_flow*
203create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
204			struct usnic_transport_spec *trans_spec)
205{
206	uint16_t port_num;
207	int err;
208	struct filter filter;
209	struct usnic_filter_action uaction;
210	struct usnic_ib_qp_grp_flow *qp_flow;
211	struct usnic_fwd_flow *flow;
212	enum usnic_transport_type trans_type;
213
214	trans_type = trans_spec->trans_type;
215	port_num = trans_spec->usnic_roce.port_num;
216
217	/* Reserve Port */
218	port_num = usnic_transport_rsrv_port(trans_type, port_num);
219	if (port_num == 0)
220		return ERR_PTR(-EINVAL);
221
222	/* Create Flow */
223	usnic_fwd_init_usnic_filter(&filter, port_num);
224	err = init_filter_action(qp_grp, &uaction);
225	if (err)
226		goto out_unreserve_port;
227
228	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
229	if (IS_ERR_OR_NULL(flow)) {
230		err = flow ? PTR_ERR(flow) : -EFAULT;
231		goto out_unreserve_port;
232	}
233
234	/* Create Flow Handle */
235	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
236	if (!qp_flow) {
237		err = -ENOMEM;
238		goto out_dealloc_flow;
239	}
240	qp_flow->flow = flow;
241	qp_flow->trans_type = trans_type;
242	qp_flow->usnic_roce.port_num = port_num;
243	qp_flow->qp_grp = qp_grp;
244	return qp_flow;
245
246out_dealloc_flow:
247	usnic_fwd_dealloc_flow(flow);
248out_unreserve_port:
249	usnic_transport_unrsrv_port(trans_type, port_num);
250	return ERR_PTR(err);
251}
252
253static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
254{
255	usnic_fwd_dealloc_flow(qp_flow->flow);
256	usnic_transport_unrsrv_port(qp_flow->trans_type,
257					qp_flow->usnic_roce.port_num);
258	kfree(qp_flow);
259}
260
261static struct usnic_ib_qp_grp_flow*
262create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
263		struct usnic_transport_spec *trans_spec)
264{
265	struct socket *sock;
266	int sock_fd;
267	int err;
268	struct filter filter;
269	struct usnic_filter_action uaction;
270	struct usnic_ib_qp_grp_flow *qp_flow;
271	struct usnic_fwd_flow *flow;
272	enum usnic_transport_type trans_type;
273	uint32_t addr;
274	uint16_t port_num;
275	int proto;
276
277	trans_type = trans_spec->trans_type;
278	sock_fd = trans_spec->udp.sock_fd;
279
280	/* Get and check socket */
281	sock = usnic_transport_get_socket(sock_fd);
282	if (IS_ERR_OR_NULL(sock))
283		return ERR_CAST(sock);
284
285	err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
286	if (err)
287		goto out_put_sock;
288
289	if (proto != IPPROTO_UDP) {
290		usnic_err("Protocol for fd %d is not UDP", sock_fd);
291		err = -EPERM;
292		goto out_put_sock;
293	}
294
295	/* Create flow */
296	usnic_fwd_init_udp_filter(&filter, addr, port_num);
297	err = init_filter_action(qp_grp, &uaction);
298	if (err)
299		goto out_put_sock;
300
301	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
302	if (IS_ERR_OR_NULL(flow)) {
303		err = flow ? PTR_ERR(flow) : -EFAULT;
304		goto out_put_sock;
305	}
306
307	/* Create qp_flow */
308	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
309	if (!qp_flow) {
310		err = -ENOMEM;
311		goto out_dealloc_flow;
312	}
313	qp_flow->flow = flow;
314	qp_flow->trans_type = trans_type;
315	qp_flow->udp.sock = sock;
316	qp_flow->qp_grp = qp_grp;
317	return qp_flow;
318
319out_dealloc_flow:
320	usnic_fwd_dealloc_flow(flow);
321out_put_sock:
322	usnic_transport_put_socket(sock);
323	return ERR_PTR(err);
324}
325
326static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
327{
328	usnic_fwd_dealloc_flow(qp_flow->flow);
329	usnic_transport_put_socket(qp_flow->udp.sock);
330	kfree(qp_flow);
331}
332
333static struct usnic_ib_qp_grp_flow*
334create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
335			struct usnic_transport_spec *trans_spec)
336{
337	struct usnic_ib_qp_grp_flow *qp_flow;
338	enum usnic_transport_type trans_type;
339
340	trans_type = trans_spec->trans_type;
341	switch (trans_type) {
342	case USNIC_TRANSPORT_ROCE_CUSTOM:
343		qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
344		break;
345	case USNIC_TRANSPORT_IPV4_UDP:
346		qp_flow = create_udp_flow(qp_grp, trans_spec);
347		break;
348	default:
349		usnic_err("Unsupported transport %u\n",
350				trans_spec->trans_type);
351		return ERR_PTR(-EINVAL);
352	}
353
354	if (!IS_ERR_OR_NULL(qp_flow)) {
355		list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
356		usnic_debugfs_flow_add(qp_flow);
357	}
358
359
360	return qp_flow;
361}
362
363static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
364{
365	usnic_debugfs_flow_remove(qp_flow);
366	list_del(&qp_flow->link);
367
368	switch (qp_flow->trans_type) {
369	case USNIC_TRANSPORT_ROCE_CUSTOM:
370		release_roce_custom_flow(qp_flow);
371		break;
372	case USNIC_TRANSPORT_IPV4_UDP:
373		release_udp_flow(qp_flow);
374		break;
375	default:
376		WARN(1, "Unsupported transport %u\n",
377				qp_flow->trans_type);
378		break;
379	}
380}
381
382static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
383{
384	struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
385	list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
386		release_and_remove_flow(qp_flow);
387}
388
389int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
390				enum ib_qp_state new_state,
391				void *data)
392{
393	int status = 0;
 
394	struct ib_event ib_event;
395	enum ib_qp_state old_state;
396	struct usnic_transport_spec *trans_spec;
397	struct usnic_ib_qp_grp_flow *qp_flow;
398
399	old_state = qp_grp->state;
 
400	trans_spec = (struct usnic_transport_spec *) data;
401
402	spin_lock(&qp_grp->lock);
403	switch (new_state) {
404	case IB_QPS_RESET:
405		switch (old_state) {
406		case IB_QPS_RESET:
407			/* NO-OP */
408			break;
409		case IB_QPS_INIT:
410			release_and_remove_all_flows(qp_grp);
411			status = 0;
412			break;
413		case IB_QPS_RTR:
414		case IB_QPS_RTS:
415		case IB_QPS_ERR:
416			status = disable_qp_grp(qp_grp);
417			release_and_remove_all_flows(qp_grp);
418			break;
419		default:
420			status = -EINVAL;
421		}
422		break;
423	case IB_QPS_INIT:
424		switch (old_state) {
425		case IB_QPS_RESET:
426			if (trans_spec) {
427				qp_flow = create_and_add_flow(qp_grp,
428								trans_spec);
429				if (IS_ERR_OR_NULL(qp_flow)) {
430					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
431					break;
432				}
433			} else {
434				/*
435				 * Optional to specify filters.
436				 */
437				status = 0;
438			}
439			break;
440		case IB_QPS_INIT:
441			if (trans_spec) {
442				qp_flow = create_and_add_flow(qp_grp,
443								trans_spec);
444				if (IS_ERR_OR_NULL(qp_flow)) {
445					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
446					break;
447				}
448			} else {
449				/*
450				 * Doesn't make sense to go into INIT state
451				 * from INIT state w/o adding filters.
452				 */
453				status = -EINVAL;
454			}
455			break;
456		case IB_QPS_RTR:
457			status = disable_qp_grp(qp_grp);
458			break;
459		case IB_QPS_RTS:
460			status = disable_qp_grp(qp_grp);
461			break;
462		default:
463			status = -EINVAL;
464		}
465		break;
466	case IB_QPS_RTR:
467		switch (old_state) {
468		case IB_QPS_INIT:
469			status = enable_qp_grp(qp_grp);
470			break;
471		default:
472			status = -EINVAL;
473		}
474		break;
475	case IB_QPS_RTS:
476		switch (old_state) {
477		case IB_QPS_RTR:
478			/* NO-OP FOR NOW */
479			break;
480		default:
481			status = -EINVAL;
482		}
483		break;
484	case IB_QPS_ERR:
485		ib_event.device = &qp_grp->vf->pf->ib_dev;
486		ib_event.element.qp = &qp_grp->ibqp;
487		ib_event.event = IB_EVENT_QP_FATAL;
488
489		switch (old_state) {
490		case IB_QPS_RESET:
491			qp_grp->ibqp.event_handler(&ib_event,
492					qp_grp->ibqp.qp_context);
493			break;
494		case IB_QPS_INIT:
495			release_and_remove_all_flows(qp_grp);
496			qp_grp->ibqp.event_handler(&ib_event,
497					qp_grp->ibqp.qp_context);
498			break;
499		case IB_QPS_RTR:
500		case IB_QPS_RTS:
501			status = disable_qp_grp(qp_grp);
502			release_and_remove_all_flows(qp_grp);
503			qp_grp->ibqp.event_handler(&ib_event,
504					qp_grp->ibqp.qp_context);
505			break;
506		default:
507			status = -EINVAL;
508		}
509		break;
510	default:
511		status = -EINVAL;
512	}
513	spin_unlock(&qp_grp->lock);
514
515	if (!status) {
516		qp_grp->state = new_state;
517		usnic_info("Transitioned %u from %s to %s",
518		qp_grp->grp_id,
519		usnic_ib_qp_grp_state_to_string(old_state),
520		usnic_ib_qp_grp_state_to_string(new_state));
521	} else {
522		usnic_err("Failed to transition %u from %s to %s",
523		qp_grp->grp_id,
524		usnic_ib_qp_grp_state_to_string(old_state),
525		usnic_ib_qp_grp_state_to_string(new_state));
526	}
527
528	return status;
529}
530
531static struct usnic_vnic_res_chunk**
532alloc_res_chunk_list(struct usnic_vnic *vnic,
533			struct usnic_vnic_res_spec *res_spec, void *owner_obj)
534{
535	enum usnic_vnic_res_type res_type;
536	struct usnic_vnic_res_chunk **res_chunk_list;
537	int err, i, res_cnt, res_lst_sz;
538
539	for (res_lst_sz = 0;
540		res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
541		res_lst_sz++) {
542		/* Do Nothing */
543	}
544
545	res_chunk_list = kcalloc(res_lst_sz + 1, sizeof(*res_chunk_list),
546					GFP_ATOMIC);
547	if (!res_chunk_list)
548		return ERR_PTR(-ENOMEM);
549
550	for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
551		i++) {
552		res_type = res_spec->resources[i].type;
553		res_cnt = res_spec->resources[i].cnt;
554
555		res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
556					res_cnt, owner_obj);
557		if (IS_ERR_OR_NULL(res_chunk_list[i])) {
558			err = res_chunk_list[i] ?
559					PTR_ERR(res_chunk_list[i]) : -ENOMEM;
560			usnic_err("Failed to get %s from %s with err %d\n",
561				usnic_vnic_res_type_to_str(res_type),
562				usnic_vnic_pci_name(vnic),
563				err);
564			goto out_free_res;
565		}
566	}
567
568	return res_chunk_list;
569
570out_free_res:
571	for (i--; i >= 0; i--)
572		usnic_vnic_put_resources(res_chunk_list[i]);
573	kfree(res_chunk_list);
574	return ERR_PTR(err);
575}
576
577static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
578{
579	int i;
580	for (i = 0; res_chunk_list[i]; i++)
581		usnic_vnic_put_resources(res_chunk_list[i]);
582	kfree(res_chunk_list);
583}
584
585static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
586				struct usnic_ib_pd *pd,
587				struct usnic_ib_qp_grp *qp_grp)
588{
589	int err;
590	struct pci_dev *pdev;
591
592	lockdep_assert_held(&vf->lock);
593
594	pdev = usnic_vnic_get_pdev(vf->vnic);
595	if (vf->qp_grp_ref_cnt == 0) {
596		err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
597		if (err) {
598			usnic_err("Failed to attach %s to domain\n",
599					pci_name(pdev));
600			return err;
601		}
602		vf->pd = pd;
603	}
604	vf->qp_grp_ref_cnt++;
605
606	WARN_ON(vf->pd != pd);
607	qp_grp->vf = vf;
608
609	return 0;
610}
611
612static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
613{
614	struct pci_dev *pdev;
615	struct usnic_ib_pd *pd;
616
617	lockdep_assert_held(&qp_grp->vf->lock);
618
619	pd = qp_grp->vf->pd;
620	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
621	if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
622		qp_grp->vf->pd = NULL;
623		usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
624	}
625	qp_grp->vf = NULL;
626}
627
628static void log_spec(struct usnic_vnic_res_spec *res_spec)
629{
630	char buf[512];
631	usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
632	usnic_dbg("%s\n", buf);
633}
634
635static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
636				uint32_t *id)
637{
638	enum usnic_transport_type trans_type = qp_flow->trans_type;
639	int err;
640	uint16_t port_num = 0;
641
642	switch (trans_type) {
643	case USNIC_TRANSPORT_ROCE_CUSTOM:
644		*id = qp_flow->usnic_roce.port_num;
645		break;
646	case USNIC_TRANSPORT_IPV4_UDP:
647		err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
648							NULL, NULL,
649							&port_num);
650		if (err)
651			return err;
652		/*
653		 * Copy port_num to stack first and then to *id,
654		 * so that the short to int cast works for little
655		 * and big endian systems.
656		 */
657		*id = port_num;
658		break;
659	default:
660		usnic_err("Unsupported transport %u\n", trans_type);
661		return -EINVAL;
662	}
663
664	return 0;
665}
666
667int usnic_ib_qp_grp_create(struct usnic_ib_qp_grp *qp_grp,
668			   struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
669			   struct usnic_ib_pd *pd,
670			   struct usnic_vnic_res_spec *res_spec,
671			   struct usnic_transport_spec *transport_spec)
672{
 
673	int err;
674	enum usnic_transport_type transport = transport_spec->trans_type;
675	struct usnic_ib_qp_grp_flow *qp_flow;
676
677	lockdep_assert_held(&vf->lock);
678
679	err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
680						res_spec);
681	if (err) {
682		usnic_err("Spec does not meet minimum req for transport %d\n",
683				transport);
684		log_spec(res_spec);
685		return err;
686	}
687
 
 
 
 
688	qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
689							qp_grp);
690	if (IS_ERR_OR_NULL(qp_grp->res_chunk_list))
691		return qp_grp->res_chunk_list ?
692				     PTR_ERR(qp_grp->res_chunk_list) :
693				     -ENOMEM;
 
694
695	err = qp_grp_and_vf_bind(vf, pd, qp_grp);
696	if (err)
697		goto out_free_res;
698
699	INIT_LIST_HEAD(&qp_grp->flows_lst);
700	spin_lock_init(&qp_grp->lock);
701	qp_grp->ufdev = ufdev;
702	qp_grp->state = IB_QPS_RESET;
703	qp_grp->owner_pid = current->pid;
704
705	qp_flow = create_and_add_flow(qp_grp, transport_spec);
706	if (IS_ERR_OR_NULL(qp_flow)) {
707		usnic_err("Unable to create and add flow with err %ld\n",
708				PTR_ERR(qp_flow));
709		err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
710		goto out_qp_grp_vf_unbind;
711	}
712
713	err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
714	if (err)
715		goto out_release_flow;
716	qp_grp->ibqp.qp_num = qp_grp->grp_id;
717
718	usnic_ib_sysfs_qpn_add(qp_grp);
719
720	return 0;
721
722out_release_flow:
723	release_and_remove_flow(qp_flow);
724out_qp_grp_vf_unbind:
725	qp_grp_and_vf_unbind(qp_grp);
726out_free_res:
727	free_qp_grp_res(qp_grp->res_chunk_list);
728	return err;
 
 
 
729}
730
731void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
732{
733
734	WARN_ON(qp_grp->state != IB_QPS_RESET);
735	lockdep_assert_held(&qp_grp->vf->lock);
736
737	release_and_remove_all_flows(qp_grp);
738	usnic_ib_sysfs_qpn_remove(qp_grp);
739	qp_grp_and_vf_unbind(qp_grp);
740	free_qp_grp_res(qp_grp->res_chunk_list);
 
741}
742
743struct usnic_vnic_res_chunk*
744usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
745				enum usnic_vnic_res_type res_type)
746{
747	int i;
748
749	for (i = 0; qp_grp->res_chunk_list[i]; i++) {
750		if (qp_grp->res_chunk_list[i]->type == res_type)
751			return qp_grp->res_chunk_list[i];
752	}
753
754	return ERR_PTR(-EINVAL);
755}
v4.10.11
  1/*
  2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 */
 33#include <linux/bug.h>
 34#include <linux/errno.h>
 35#include <linux/module.h>
 36#include <linux/spinlock.h>
 37
 38#include "usnic_log.h"
 39#include "usnic_vnic.h"
 40#include "usnic_fwd.h"
 41#include "usnic_uiom.h"
 42#include "usnic_debugfs.h"
 43#include "usnic_ib_qp_grp.h"
 44#include "usnic_ib_sysfs.h"
 45#include "usnic_transport.h"
 46
 47#define DFLT_RQ_IDX	0
 48
 49const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
 50{
 51	switch (state) {
 52	case IB_QPS_RESET:
 53		return "Rst";
 54	case IB_QPS_INIT:
 55		return "Init";
 56	case IB_QPS_RTR:
 57		return "RTR";
 58	case IB_QPS_RTS:
 59		return "RTS";
 60	case IB_QPS_SQD:
 61		return "SQD";
 62	case IB_QPS_SQE:
 63		return "SQE";
 64	case IB_QPS_ERR:
 65		return "ERR";
 66	default:
 67		return "UNKNOWN STATE";
 68
 69	}
 70}
 71
 72int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
 73{
 74	return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
 75}
 76
 77int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
 78{
 79	struct usnic_ib_qp_grp *qp_grp = obj;
 80	struct usnic_ib_qp_grp_flow *default_flow;
 81	if (obj) {
 82		default_flow = list_first_entry(&qp_grp->flows_lst,
 83					struct usnic_ib_qp_grp_flow, link);
 84		return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
 85					qp_grp->ibqp.qp_num,
 86					usnic_ib_qp_grp_state_to_string(
 87							qp_grp->state),
 88					qp_grp->owner_pid,
 89					usnic_vnic_get_index(qp_grp->vf->vnic),
 90					default_flow->flow->flow_id);
 91	} else {
 92		return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
 93	}
 94}
 95
 96static struct usnic_vnic_res_chunk *
 97get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
 98{
 99	lockdep_assert_held(&qp_grp->lock);
100	/*
101	 * The QP res chunk, used to derive qp indices,
102	 * are just indices of the RQs
103	 */
104	return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
105}
106
107static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
108{
109
110	int status;
111	int i, vnic_idx;
112	struct usnic_vnic_res_chunk *res_chunk;
113	struct usnic_vnic_res *res;
114
115	lockdep_assert_held(&qp_grp->lock);
116
117	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
118
119	res_chunk = get_qp_res_chunk(qp_grp);
120	if (IS_ERR(res_chunk)) {
121		usnic_err("Unable to get qp res with err %ld\n",
122				PTR_ERR(res_chunk));
123		return PTR_ERR(res_chunk);
124	}
125
126	for (i = 0; i < res_chunk->cnt; i++) {
127		res = res_chunk->res[i];
128		status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
129						res->vnic_idx);
130		if (status) {
131			usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
132					res->vnic_idx, qp_grp->ufdev->name,
133					vnic_idx, status);
134			goto out_err;
135		}
136	}
137
138	return 0;
139
140out_err:
141	for (i--; i >= 0; i--) {
142		res = res_chunk->res[i];
143		usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
144					res->vnic_idx);
145	}
146
147	return status;
148}
149
150static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
151{
152	int i, vnic_idx;
153	struct usnic_vnic_res_chunk *res_chunk;
154	struct usnic_vnic_res *res;
155	int status = 0;
156
157	lockdep_assert_held(&qp_grp->lock);
158	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
159
160	res_chunk = get_qp_res_chunk(qp_grp);
161	if (IS_ERR(res_chunk)) {
162		usnic_err("Unable to get qp res with err %ld\n",
163			PTR_ERR(res_chunk));
164		return PTR_ERR(res_chunk);
165	}
166
167	for (i = 0; i < res_chunk->cnt; i++) {
168		res = res_chunk->res[i];
169		status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
170						res->vnic_idx);
171		if (status) {
172			usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
173					res->vnic_idx,
174					qp_grp->ufdev->name,
175					vnic_idx, status);
176		}
177	}
178
179	return status;
180
181}
182
183static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
184				struct usnic_filter_action *uaction)
185{
186	struct usnic_vnic_res_chunk *res_chunk;
187
188	res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
189	if (IS_ERR(res_chunk)) {
190		usnic_err("Unable to get %s with err %ld\n",
191			usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
192			PTR_ERR(res_chunk));
193		return PTR_ERR(res_chunk);
194	}
195
196	uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
197	uaction->action.type = FILTER_ACTION_RQ_STEERING;
198	uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
199
200	return 0;
201}
202
203static struct usnic_ib_qp_grp_flow*
204create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
205			struct usnic_transport_spec *trans_spec)
206{
207	uint16_t port_num;
208	int err;
209	struct filter filter;
210	struct usnic_filter_action uaction;
211	struct usnic_ib_qp_grp_flow *qp_flow;
212	struct usnic_fwd_flow *flow;
213	enum usnic_transport_type trans_type;
214
215	trans_type = trans_spec->trans_type;
216	port_num = trans_spec->usnic_roce.port_num;
217
218	/* Reserve Port */
219	port_num = usnic_transport_rsrv_port(trans_type, port_num);
220	if (port_num == 0)
221		return ERR_PTR(-EINVAL);
222
223	/* Create Flow */
224	usnic_fwd_init_usnic_filter(&filter, port_num);
225	err = init_filter_action(qp_grp, &uaction);
226	if (err)
227		goto out_unreserve_port;
228
229	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
230	if (IS_ERR_OR_NULL(flow)) {
231		err = flow ? PTR_ERR(flow) : -EFAULT;
232		goto out_unreserve_port;
233	}
234
235	/* Create Flow Handle */
236	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
237	if (!qp_flow) {
238		err = -ENOMEM;
239		goto out_dealloc_flow;
240	}
241	qp_flow->flow = flow;
242	qp_flow->trans_type = trans_type;
243	qp_flow->usnic_roce.port_num = port_num;
244	qp_flow->qp_grp = qp_grp;
245	return qp_flow;
246
247out_dealloc_flow:
248	usnic_fwd_dealloc_flow(flow);
249out_unreserve_port:
250	usnic_transport_unrsrv_port(trans_type, port_num);
251	return ERR_PTR(err);
252}
253
254static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
255{
256	usnic_fwd_dealloc_flow(qp_flow->flow);
257	usnic_transport_unrsrv_port(qp_flow->trans_type,
258					qp_flow->usnic_roce.port_num);
259	kfree(qp_flow);
260}
261
262static struct usnic_ib_qp_grp_flow*
263create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
264		struct usnic_transport_spec *trans_spec)
265{
266	struct socket *sock;
267	int sock_fd;
268	int err;
269	struct filter filter;
270	struct usnic_filter_action uaction;
271	struct usnic_ib_qp_grp_flow *qp_flow;
272	struct usnic_fwd_flow *flow;
273	enum usnic_transport_type trans_type;
274	uint32_t addr;
275	uint16_t port_num;
276	int proto;
277
278	trans_type = trans_spec->trans_type;
279	sock_fd = trans_spec->udp.sock_fd;
280
281	/* Get and check socket */
282	sock = usnic_transport_get_socket(sock_fd);
283	if (IS_ERR_OR_NULL(sock))
284		return ERR_CAST(sock);
285
286	err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
287	if (err)
288		goto out_put_sock;
289
290	if (proto != IPPROTO_UDP) {
291		usnic_err("Protocol for fd %d is not UDP", sock_fd);
292		err = -EPERM;
293		goto out_put_sock;
294	}
295
296	/* Create flow */
297	usnic_fwd_init_udp_filter(&filter, addr, port_num);
298	err = init_filter_action(qp_grp, &uaction);
299	if (err)
300		goto out_put_sock;
301
302	flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
303	if (IS_ERR_OR_NULL(flow)) {
304		err = flow ? PTR_ERR(flow) : -EFAULT;
305		goto out_put_sock;
306	}
307
308	/* Create qp_flow */
309	qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
310	if (!qp_flow) {
311		err = -ENOMEM;
312		goto out_dealloc_flow;
313	}
314	qp_flow->flow = flow;
315	qp_flow->trans_type = trans_type;
316	qp_flow->udp.sock = sock;
317	qp_flow->qp_grp = qp_grp;
318	return qp_flow;
319
320out_dealloc_flow:
321	usnic_fwd_dealloc_flow(flow);
322out_put_sock:
323	usnic_transport_put_socket(sock);
324	return ERR_PTR(err);
325}
326
327static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
328{
329	usnic_fwd_dealloc_flow(qp_flow->flow);
330	usnic_transport_put_socket(qp_flow->udp.sock);
331	kfree(qp_flow);
332}
333
334static struct usnic_ib_qp_grp_flow*
335create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
336			struct usnic_transport_spec *trans_spec)
337{
338	struct usnic_ib_qp_grp_flow *qp_flow;
339	enum usnic_transport_type trans_type;
340
341	trans_type = trans_spec->trans_type;
342	switch (trans_type) {
343	case USNIC_TRANSPORT_ROCE_CUSTOM:
344		qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
345		break;
346	case USNIC_TRANSPORT_IPV4_UDP:
347		qp_flow = create_udp_flow(qp_grp, trans_spec);
348		break;
349	default:
350		usnic_err("Unsupported transport %u\n",
351				trans_spec->trans_type);
352		return ERR_PTR(-EINVAL);
353	}
354
355	if (!IS_ERR_OR_NULL(qp_flow)) {
356		list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
357		usnic_debugfs_flow_add(qp_flow);
358	}
359
360
361	return qp_flow;
362}
363
364static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
365{
366	usnic_debugfs_flow_remove(qp_flow);
367	list_del(&qp_flow->link);
368
369	switch (qp_flow->trans_type) {
370	case USNIC_TRANSPORT_ROCE_CUSTOM:
371		release_roce_custom_flow(qp_flow);
372		break;
373	case USNIC_TRANSPORT_IPV4_UDP:
374		release_udp_flow(qp_flow);
375		break;
376	default:
377		WARN(1, "Unsupported transport %u\n",
378				qp_flow->trans_type);
379		break;
380	}
381}
382
383static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
384{
385	struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
386	list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
387		release_and_remove_flow(qp_flow);
388}
389
390int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
391				enum ib_qp_state new_state,
392				void *data)
393{
394	int status = 0;
395	int vnic_idx;
396	struct ib_event ib_event;
397	enum ib_qp_state old_state;
398	struct usnic_transport_spec *trans_spec;
399	struct usnic_ib_qp_grp_flow *qp_flow;
400
401	old_state = qp_grp->state;
402	vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
403	trans_spec = (struct usnic_transport_spec *) data;
404
405	spin_lock(&qp_grp->lock);
406	switch (new_state) {
407	case IB_QPS_RESET:
408		switch (old_state) {
409		case IB_QPS_RESET:
410			/* NO-OP */
411			break;
412		case IB_QPS_INIT:
413			release_and_remove_all_flows(qp_grp);
414			status = 0;
415			break;
416		case IB_QPS_RTR:
417		case IB_QPS_RTS:
418		case IB_QPS_ERR:
419			status = disable_qp_grp(qp_grp);
420			release_and_remove_all_flows(qp_grp);
421			break;
422		default:
423			status = -EINVAL;
424		}
425		break;
426	case IB_QPS_INIT:
427		switch (old_state) {
428		case IB_QPS_RESET:
429			if (trans_spec) {
430				qp_flow = create_and_add_flow(qp_grp,
431								trans_spec);
432				if (IS_ERR_OR_NULL(qp_flow)) {
433					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
434					break;
435				}
436			} else {
437				/*
438				 * Optional to specify filters.
439				 */
440				status = 0;
441			}
442			break;
443		case IB_QPS_INIT:
444			if (trans_spec) {
445				qp_flow = create_and_add_flow(qp_grp,
446								trans_spec);
447				if (IS_ERR_OR_NULL(qp_flow)) {
448					status = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
449					break;
450				}
451			} else {
452				/*
453				 * Doesn't make sense to go into INIT state
454				 * from INIT state w/o adding filters.
455				 */
456				status = -EINVAL;
457			}
458			break;
459		case IB_QPS_RTR:
460			status = disable_qp_grp(qp_grp);
461			break;
462		case IB_QPS_RTS:
463			status = disable_qp_grp(qp_grp);
464			break;
465		default:
466			status = -EINVAL;
467		}
468		break;
469	case IB_QPS_RTR:
470		switch (old_state) {
471		case IB_QPS_INIT:
472			status = enable_qp_grp(qp_grp);
473			break;
474		default:
475			status = -EINVAL;
476		}
477		break;
478	case IB_QPS_RTS:
479		switch (old_state) {
480		case IB_QPS_RTR:
481			/* NO-OP FOR NOW */
482			break;
483		default:
484			status = -EINVAL;
485		}
486		break;
487	case IB_QPS_ERR:
488		ib_event.device = &qp_grp->vf->pf->ib_dev;
489		ib_event.element.qp = &qp_grp->ibqp;
490		ib_event.event = IB_EVENT_QP_FATAL;
491
492		switch (old_state) {
493		case IB_QPS_RESET:
494			qp_grp->ibqp.event_handler(&ib_event,
495					qp_grp->ibqp.qp_context);
496			break;
497		case IB_QPS_INIT:
498			release_and_remove_all_flows(qp_grp);
499			qp_grp->ibqp.event_handler(&ib_event,
500					qp_grp->ibqp.qp_context);
501			break;
502		case IB_QPS_RTR:
503		case IB_QPS_RTS:
504			status = disable_qp_grp(qp_grp);
505			release_and_remove_all_flows(qp_grp);
506			qp_grp->ibqp.event_handler(&ib_event,
507					qp_grp->ibqp.qp_context);
508			break;
509		default:
510			status = -EINVAL;
511		}
512		break;
513	default:
514		status = -EINVAL;
515	}
516	spin_unlock(&qp_grp->lock);
517
518	if (!status) {
519		qp_grp->state = new_state;
520		usnic_info("Transitioned %u from %s to %s",
521		qp_grp->grp_id,
522		usnic_ib_qp_grp_state_to_string(old_state),
523		usnic_ib_qp_grp_state_to_string(new_state));
524	} else {
525		usnic_err("Failed to transition %u from %s to %s",
526		qp_grp->grp_id,
527		usnic_ib_qp_grp_state_to_string(old_state),
528		usnic_ib_qp_grp_state_to_string(new_state));
529	}
530
531	return status;
532}
533
534static struct usnic_vnic_res_chunk**
535alloc_res_chunk_list(struct usnic_vnic *vnic,
536			struct usnic_vnic_res_spec *res_spec, void *owner_obj)
537{
538	enum usnic_vnic_res_type res_type;
539	struct usnic_vnic_res_chunk **res_chunk_list;
540	int err, i, res_cnt, res_lst_sz;
541
542	for (res_lst_sz = 0;
543		res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
544		res_lst_sz++) {
545		/* Do Nothing */
546	}
547
548	res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
549					GFP_ATOMIC);
550	if (!res_chunk_list)
551		return ERR_PTR(-ENOMEM);
552
553	for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
554		i++) {
555		res_type = res_spec->resources[i].type;
556		res_cnt = res_spec->resources[i].cnt;
557
558		res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
559					res_cnt, owner_obj);
560		if (IS_ERR_OR_NULL(res_chunk_list[i])) {
561			err = res_chunk_list[i] ?
562					PTR_ERR(res_chunk_list[i]) : -ENOMEM;
563			usnic_err("Failed to get %s from %s with err %d\n",
564				usnic_vnic_res_type_to_str(res_type),
565				usnic_vnic_pci_name(vnic),
566				err);
567			goto out_free_res;
568		}
569	}
570
571	return res_chunk_list;
572
573out_free_res:
574	for (i--; i >= 0; i--)
575		usnic_vnic_put_resources(res_chunk_list[i]);
576	kfree(res_chunk_list);
577	return ERR_PTR(err);
578}
579
580static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
581{
582	int i;
583	for (i = 0; res_chunk_list[i]; i++)
584		usnic_vnic_put_resources(res_chunk_list[i]);
585	kfree(res_chunk_list);
586}
587
588static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
589				struct usnic_ib_pd *pd,
590				struct usnic_ib_qp_grp *qp_grp)
591{
592	int err;
593	struct pci_dev *pdev;
594
595	lockdep_assert_held(&vf->lock);
596
597	pdev = usnic_vnic_get_pdev(vf->vnic);
598	if (vf->qp_grp_ref_cnt == 0) {
599		err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
600		if (err) {
601			usnic_err("Failed to attach %s to domain\n",
602					pci_name(pdev));
603			return err;
604		}
605		vf->pd = pd;
606	}
607	vf->qp_grp_ref_cnt++;
608
609	WARN_ON(vf->pd != pd);
610	qp_grp->vf = vf;
611
612	return 0;
613}
614
615static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
616{
617	struct pci_dev *pdev;
618	struct usnic_ib_pd *pd;
619
620	lockdep_assert_held(&qp_grp->vf->lock);
621
622	pd = qp_grp->vf->pd;
623	pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
624	if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
625		qp_grp->vf->pd = NULL;
626		usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
627	}
628	qp_grp->vf = NULL;
629}
630
631static void log_spec(struct usnic_vnic_res_spec *res_spec)
632{
633	char buf[512];
634	usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
635	usnic_dbg("%s\n", buf);
636}
637
638static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
639				uint32_t *id)
640{
641	enum usnic_transport_type trans_type = qp_flow->trans_type;
642	int err;
643	uint16_t port_num = 0;
644
645	switch (trans_type) {
646	case USNIC_TRANSPORT_ROCE_CUSTOM:
647		*id = qp_flow->usnic_roce.port_num;
648		break;
649	case USNIC_TRANSPORT_IPV4_UDP:
650		err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
651							NULL, NULL,
652							&port_num);
653		if (err)
654			return err;
655		/*
656		 * Copy port_num to stack first and then to *id,
657		 * so that the short to int cast works for little
658		 * and big endian systems.
659		 */
660		*id = port_num;
661		break;
662	default:
663		usnic_err("Unsupported transport %u\n", trans_type);
664		return -EINVAL;
665	}
666
667	return 0;
668}
669
670struct usnic_ib_qp_grp *
671usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
672			struct usnic_ib_pd *pd,
673			struct usnic_vnic_res_spec *res_spec,
674			struct usnic_transport_spec *transport_spec)
675{
676	struct usnic_ib_qp_grp *qp_grp;
677	int err;
678	enum usnic_transport_type transport = transport_spec->trans_type;
679	struct usnic_ib_qp_grp_flow *qp_flow;
680
681	lockdep_assert_held(&vf->lock);
682
683	err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
684						res_spec);
685	if (err) {
686		usnic_err("Spec does not meet miniumum req for transport %d\n",
687				transport);
688		log_spec(res_spec);
689		return ERR_PTR(err);
690	}
691
692	qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
693	if (!qp_grp)
694		return NULL;
695
696	qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
697							qp_grp);
698	if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
699		err = qp_grp->res_chunk_list ?
700				PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
701		goto out_free_qp_grp;
702	}
703
704	err = qp_grp_and_vf_bind(vf, pd, qp_grp);
705	if (err)
706		goto out_free_res;
707
708	INIT_LIST_HEAD(&qp_grp->flows_lst);
709	spin_lock_init(&qp_grp->lock);
710	qp_grp->ufdev = ufdev;
711	qp_grp->state = IB_QPS_RESET;
712	qp_grp->owner_pid = current->pid;
713
714	qp_flow = create_and_add_flow(qp_grp, transport_spec);
715	if (IS_ERR_OR_NULL(qp_flow)) {
716		usnic_err("Unable to create and add flow with err %ld\n",
717				PTR_ERR(qp_flow));
718		err = qp_flow ? PTR_ERR(qp_flow) : -EFAULT;
719		goto out_qp_grp_vf_unbind;
720	}
721
722	err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
723	if (err)
724		goto out_release_flow;
725	qp_grp->ibqp.qp_num = qp_grp->grp_id;
726
727	usnic_ib_sysfs_qpn_add(qp_grp);
728
729	return qp_grp;
730
731out_release_flow:
732	release_and_remove_flow(qp_flow);
733out_qp_grp_vf_unbind:
734	qp_grp_and_vf_unbind(qp_grp);
735out_free_res:
736	free_qp_grp_res(qp_grp->res_chunk_list);
737out_free_qp_grp:
738	kfree(qp_grp);
739
740	return ERR_PTR(err);
741}
742
743void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
744{
745
746	WARN_ON(qp_grp->state != IB_QPS_RESET);
747	lockdep_assert_held(&qp_grp->vf->lock);
748
749	release_and_remove_all_flows(qp_grp);
750	usnic_ib_sysfs_qpn_remove(qp_grp);
751	qp_grp_and_vf_unbind(qp_grp);
752	free_qp_grp_res(qp_grp->res_chunk_list);
753	kfree(qp_grp);
754}
755
756struct usnic_vnic_res_chunk*
757usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
758				enum usnic_vnic_res_type res_type)
759{
760	int i;
761
762	for (i = 0; qp_grp->res_chunk_list[i]; i++) {
763		if (qp_grp->res_chunk_list[i]->type == res_type)
764			return qp_grp->res_chunk_list[i];
765	}
766
767	return ERR_PTR(-EINVAL);
768}