Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright(c) 2016 Intel Corporation.
  3 *
  4 * This file is provided under a dual BSD/GPLv2 license.  When using or
  5 * redistributing this file, you may do so under either license.
  6 *
  7 * GPL LICENSE SUMMARY
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of version 2 of the GNU General Public License as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 16 * General Public License for more details.
 17 *
 18 * BSD LICENSE
 19 *
 20 * Redistribution and use in source and binary forms, with or without
 21 * modification, are permitted provided that the following conditions
 22 * are met:
 23 *
 24 *  - Redistributions of source code must retain the above copyright
 25 *    notice, this list of conditions and the following disclaimer.
 26 *  - Redistributions in binary form must reproduce the above copyright
 27 *    notice, this list of conditions and the following disclaimer in
 28 *    the documentation and/or other materials provided with the
 29 *    distribution.
 30 *  - Neither the name of Intel Corporation nor the names of its
 31 *    contributors may be used to endorse or promote products derived
 32 *    from this software without specific prior written permission.
 33 *
 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 45 *
 46 */
 47
 48#include <linux/slab.h>
 49#include <linux/sched.h>
 50#include <linux/rculist.h>
 51#include <rdma/rdma_vt.h>
 52#include <rdma/rdmavt_qp.h>
 53
 54#include "mcast.h"
 55
 56/**
 57 * rvt_driver_mcast - init resources for multicast
 58 * @rdi: rvt dev struct
 59 *
 60 * This is per device that registers with rdmavt
 61 */
 62void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
 63{
 64	/*
 65	 * Anything that needs setup for multicast on a per driver or per rdi
 66	 * basis should be done in here.
 67	 */
 68	spin_lock_init(&rdi->n_mcast_grps_lock);
 69}
 70
 71/**
 72 * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
 73 * @qp: the QP to link
 74 */
 75static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
 76{
 77	struct rvt_mcast_qp *mqp;
 78
 79	mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
 80	if (!mqp)
 81		goto bail;
 82
 83	mqp->qp = qp;
 84	atomic_inc(&qp->refcount);
 85
 86bail:
 87	return mqp;
 88}
 89
 90static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
 91{
 92	struct rvt_qp *qp = mqp->qp;
 93
 94	/* Notify hfi1_destroy_qp() if it is waiting. */
 95	if (atomic_dec_and_test(&qp->refcount))
 96		wake_up(&qp->wait);
 97
 98	kfree(mqp);
 99}
100
101/**
102 * mcast_alloc - allocate the multicast GID structure
103 * @mgid: the multicast GID
104 *
105 * A list of QPs will be attached to this structure.
106 */
107static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
108{
109	struct rvt_mcast *mcast;
110
111	mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
112	if (!mcast)
113		goto bail;
114
115	mcast->mgid = *mgid;
116	INIT_LIST_HEAD(&mcast->qp_list);
117	init_waitqueue_head(&mcast->wait);
118	atomic_set(&mcast->refcount, 0);
119
120bail:
121	return mcast;
122}
123
124static void rvt_mcast_free(struct rvt_mcast *mcast)
125{
126	struct rvt_mcast_qp *p, *tmp;
127
128	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
129		rvt_mcast_qp_free(p);
130
131	kfree(mcast);
132}
133
134/**
135 * rvt_mcast_find - search the global table for the given multicast GID
136 * @ibp: the IB port structure
137 * @mgid: the multicast GID to search for
138 *
139 * The caller is responsible for decrementing the reference count if found.
140 *
141 * Return: NULL if not found.
142 */
143struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
144{
145	struct rb_node *n;
146	unsigned long flags;
147	struct rvt_mcast *found = NULL;
148
149	spin_lock_irqsave(&ibp->lock, flags);
150	n = ibp->mcast_tree.rb_node;
151	while (n) {
152		int ret;
153		struct rvt_mcast *mcast;
154
155		mcast = rb_entry(n, struct rvt_mcast, rb_node);
156
157		ret = memcmp(mgid->raw, mcast->mgid.raw,
158			     sizeof(union ib_gid));
159		if (ret < 0) {
160			n = n->rb_left;
161		} else if (ret > 0) {
162			n = n->rb_right;
163		} else {
164			atomic_inc(&mcast->refcount);
165			found = mcast;
166			break;
167		}
168	}
169	spin_unlock_irqrestore(&ibp->lock, flags);
170	return found;
171}
172EXPORT_SYMBOL(rvt_mcast_find);
173
174/**
175 * mcast_add - insert mcast GID into table and attach QP struct
176 * @mcast: the mcast GID table
177 * @mqp: the QP to attach
178 *
179 * Return: zero if both were added.  Return EEXIST if the GID was already in
180 * the table but the QP was added.  Return ESRCH if the QP was already
181 * attached and neither structure was added.
182 */
183static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
184			 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
185{
186	struct rb_node **n = &ibp->mcast_tree.rb_node;
187	struct rb_node *pn = NULL;
188	int ret;
189
190	spin_lock_irq(&ibp->lock);
191
192	while (*n) {
193		struct rvt_mcast *tmcast;
194		struct rvt_mcast_qp *p;
195
196		pn = *n;
197		tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
198
199		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
200			     sizeof(union ib_gid));
201		if (ret < 0) {
202			n = &pn->rb_left;
203			continue;
204		}
205		if (ret > 0) {
206			n = &pn->rb_right;
207			continue;
208		}
209
210		/* Search the QP list to see if this is already there. */
211		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
212			if (p->qp == mqp->qp) {
213				ret = ESRCH;
214				goto bail;
215			}
216		}
217		if (tmcast->n_attached ==
218		    rdi->dparms.props.max_mcast_qp_attach) {
219			ret = ENOMEM;
220			goto bail;
221		}
222
223		tmcast->n_attached++;
224
225		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
226		ret = EEXIST;
227		goto bail;
228	}
229
230	spin_lock(&rdi->n_mcast_grps_lock);
231	if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
232		spin_unlock(&rdi->n_mcast_grps_lock);
233		ret = ENOMEM;
234		goto bail;
235	}
236
237	rdi->n_mcast_grps_allocated++;
238	spin_unlock(&rdi->n_mcast_grps_lock);
239
240	mcast->n_attached++;
241
242	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
243
244	atomic_inc(&mcast->refcount);
245	rb_link_node(&mcast->rb_node, pn, n);
246	rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
247
248	ret = 0;
249
250bail:
251	spin_unlock_irq(&ibp->lock);
252
253	return ret;
254}
255
256/**
257 * rvt_attach_mcast - attach a qp to a multicast group
258 * @ibqp: Infiniband qp
259 * @igd: multicast guid
260 * @lid: multicast lid
261 *
262 * Return: 0 on success
263 */
264int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
265{
266	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
267	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
268	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
269	struct rvt_mcast *mcast;
270	struct rvt_mcast_qp *mqp;
271	int ret = -ENOMEM;
272
273	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
274		return -EINVAL;
275
276	/*
277	 * Allocate data structures since its better to do this outside of
278	 * spin locks and it will most likely be needed.
279	 */
280	mcast = rvt_mcast_alloc(gid);
281	if (!mcast)
282		return -ENOMEM;
283
284	mqp = rvt_mcast_qp_alloc(qp);
285	if (!mqp)
286		goto bail_mcast;
287
288	switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
289	case ESRCH:
290		/* Neither was used: OK to attach the same QP twice. */
291		ret = 0;
292		goto bail_mqp;
293	case EEXIST: /* The mcast wasn't used */
294		ret = 0;
295		goto bail_mcast;
296	case ENOMEM:
297		/* Exceeded the maximum number of mcast groups. */
298		ret = -ENOMEM;
299		goto bail_mqp;
300	default:
301		break;
302	}
303
304	return 0;
305
306bail_mqp:
307	rvt_mcast_qp_free(mqp);
308
309bail_mcast:
310	rvt_mcast_free(mcast);
311
312	return ret;
313}
314
315/**
316 * rvt_detach_mcast - remove a qp from a multicast group
317 * @ibqp: Infiniband qp
318 * @igd: multicast guid
319 * @lid: multicast lid
320 *
321 * Return: 0 on success
322 */
323int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
324{
325	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
326	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
327	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
328	struct rvt_mcast *mcast = NULL;
329	struct rvt_mcast_qp *p, *tmp, *delp = NULL;
330	struct rb_node *n;
331	int last = 0;
332	int ret = 0;
333
334	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
335		return -EINVAL;
336
337	spin_lock_irq(&ibp->lock);
338
339	/* Find the GID in the mcast table. */
340	n = ibp->mcast_tree.rb_node;
341	while (1) {
342		if (!n) {
343			spin_unlock_irq(&ibp->lock);
344			return -EINVAL;
345		}
346
347		mcast = rb_entry(n, struct rvt_mcast, rb_node);
348		ret = memcmp(gid->raw, mcast->mgid.raw,
349			     sizeof(union ib_gid));
350		if (ret < 0)
351			n = n->rb_left;
352		else if (ret > 0)
353			n = n->rb_right;
354		else
355			break;
356	}
357
358	/* Search the QP list. */
359	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
360		if (p->qp != qp)
361			continue;
362		/*
363		 * We found it, so remove it, but don't poison the forward
364		 * link until we are sure there are no list walkers.
365		 */
366		list_del_rcu(&p->list);
367		mcast->n_attached--;
368		delp = p;
369
370		/* If this was the last attached QP, remove the GID too. */
371		if (list_empty(&mcast->qp_list)) {
372			rb_erase(&mcast->rb_node, &ibp->mcast_tree);
373			last = 1;
374		}
375		break;
376	}
377
378	spin_unlock_irq(&ibp->lock);
379	/* QP not attached */
380	if (!delp)
381		return -EINVAL;
382
383	/*
384	 * Wait for any list walkers to finish before freeing the
385	 * list element.
386	 */
387	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
388	rvt_mcast_qp_free(delp);
389
390	if (last) {
391		atomic_dec(&mcast->refcount);
392		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
393		rvt_mcast_free(mcast);
394		spin_lock_irq(&rdi->n_mcast_grps_lock);
395		rdi->n_mcast_grps_allocated--;
396		spin_unlock_irq(&rdi->n_mcast_grps_lock);
397	}
398
399	return 0;
400}
401
402/**
403 *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
404 *@rdi: rvt dev struct
405 *
406 * Return: in use count
407 */
408int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
409{
410	int i;
411	int in_use = 0;
412
413	for (i = 0; i < rdi->dparms.nports; i++)
414		if (rdi->ports[i]->mcast_tree.rb_node)
415			in_use++;
416	return in_use;
417}