Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright(c) 2016 Intel Corporation.
  3 *
  4 * This file is provided under a dual BSD/GPLv2 license.  When using or
  5 * redistributing this file, you may do so under either license.
  6 *
  7 * GPL LICENSE SUMMARY
  8 *
  9 * This program is free software; you can redistribute it and/or modify
 10 * it under the terms of version 2 of the GNU General Public License as
 11 * published by the Free Software Foundation.
 12 *
 13 * This program is distributed in the hope that it will be useful, but
 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 16 * General Public License for more details.
 17 *
 18 * BSD LICENSE
 19 *
 20 * Redistribution and use in source and binary forms, with or without
 21 * modification, are permitted provided that the following conditions
 22 * are met:
 23 *
 24 *  - Redistributions of source code must retain the above copyright
 25 *    notice, this list of conditions and the following disclaimer.
 26 *  - Redistributions in binary form must reproduce the above copyright
 27 *    notice, this list of conditions and the following disclaimer in
 28 *    the documentation and/or other materials provided with the
 29 *    distribution.
 30 *  - Neither the name of Intel Corporation nor the names of its
 31 *    contributors may be used to endorse or promote products derived
 32 *    from this software without specific prior written permission.
 33 *
 34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 45 *
 46 */
 47
 48#include <linux/slab.h>
 49#include <linux/sched.h>
 50#include <linux/rculist.h>
 51#include <rdma/rdma_vt.h>
 52#include <rdma/rdmavt_qp.h>
 53
 54#include "mcast.h"
 55
 56/**
 57 * rvt_driver_mcast - init resources for multicast
 58 * @rdi: rvt dev struct
 59 *
 60 * This is per device that registers with rdmavt
 61 */
 62void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
 63{
 64	/*
 65	 * Anything that needs setup for multicast on a per driver or per rdi
 66	 * basis should be done in here.
 67	 */
 68	spin_lock_init(&rdi->n_mcast_grps_lock);
 69}
 70
 71/**
 72 * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
 73 * @qp: the QP to link
 74 */
 75static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
 76{
 77	struct rvt_mcast_qp *mqp;
 78
 79	mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
 80	if (!mqp)
 81		goto bail;
 82
 83	mqp->qp = qp;
 84	rvt_get_qp(qp);
 85
 86bail:
 87	return mqp;
 88}
 89
 90static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
 91{
 92	struct rvt_qp *qp = mqp->qp;
 93
 94	/* Notify hfi1_destroy_qp() if it is waiting. */
 95	rvt_put_qp(qp);
 96
 97	kfree(mqp);
 98}
 99
100/**
101 * mcast_alloc - allocate the multicast GID structure
102 * @mgid: the multicast GID
103 * @lid: the muilticast LID (host order)
104 *
105 * A list of QPs will be attached to this structure.
106 */
107static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid, u16 lid)
108{
109	struct rvt_mcast *mcast;
110
111	mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
112	if (!mcast)
113		goto bail;
114
115	mcast->mcast_addr.mgid = *mgid;
116	mcast->mcast_addr.lid = lid;
117
118	INIT_LIST_HEAD(&mcast->qp_list);
119	init_waitqueue_head(&mcast->wait);
120	atomic_set(&mcast->refcount, 0);
121
122bail:
123	return mcast;
124}
125
126static void rvt_mcast_free(struct rvt_mcast *mcast)
127{
128	struct rvt_mcast_qp *p, *tmp;
129
130	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
131		rvt_mcast_qp_free(p);
132
133	kfree(mcast);
134}
135
136/**
137 * rvt_mcast_find - search the global table for the given multicast GID/LID
138 * NOTE: It is valid to have 1 MLID with multiple MGIDs.  It is not valid
139 * to have 1 MGID with multiple MLIDs.
140 * @ibp: the IB port structure
141 * @mgid: the multicast GID to search for
142 * @lid: the multicast LID portion of the multicast address (host order)
143 *
144 * The caller is responsible for decrementing the reference count if found.
145 *
146 * Return: NULL if not found.
147 */
148struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
149				 u16 lid)
150{
151	struct rb_node *n;
152	unsigned long flags;
153	struct rvt_mcast *found = NULL;
154
155	spin_lock_irqsave(&ibp->lock, flags);
156	n = ibp->mcast_tree.rb_node;
157	while (n) {
158		int ret;
159		struct rvt_mcast *mcast;
160
161		mcast = rb_entry(n, struct rvt_mcast, rb_node);
162
163		ret = memcmp(mgid->raw, mcast->mcast_addr.mgid.raw,
164			     sizeof(*mgid));
165		if (ret < 0) {
166			n = n->rb_left;
167		} else if (ret > 0) {
168			n = n->rb_right;
169		} else {
170			/* MGID/MLID must match */
171			if (mcast->mcast_addr.lid == lid) {
172				atomic_inc(&mcast->refcount);
173				found = mcast;
174			}
175			break;
176		}
177	}
178	spin_unlock_irqrestore(&ibp->lock, flags);
179	return found;
180}
181EXPORT_SYMBOL(rvt_mcast_find);
182
183/**
184 * mcast_add - insert mcast GID into table and attach QP struct
185 * @mcast: the mcast GID table
186 * @mqp: the QP to attach
187 *
188 * Return: zero if both were added.  Return EEXIST if the GID was already in
189 * the table but the QP was added.  Return ESRCH if the QP was already
190 * attached and neither structure was added. Return EINVAL if the MGID was
191 * found, but the MLID did NOT match.
192 */
193static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
194			 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
195{
196	struct rb_node **n = &ibp->mcast_tree.rb_node;
197	struct rb_node *pn = NULL;
198	int ret;
199
200	spin_lock_irq(&ibp->lock);
201
202	while (*n) {
203		struct rvt_mcast *tmcast;
204		struct rvt_mcast_qp *p;
205
206		pn = *n;
207		tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
208
209		ret = memcmp(mcast->mcast_addr.mgid.raw,
210			     tmcast->mcast_addr.mgid.raw,
211			     sizeof(mcast->mcast_addr.mgid));
212		if (ret < 0) {
213			n = &pn->rb_left;
214			continue;
215		}
216		if (ret > 0) {
217			n = &pn->rb_right;
218			continue;
219		}
220
221		if (tmcast->mcast_addr.lid != mcast->mcast_addr.lid) {
222			ret = EINVAL;
223			goto bail;
224		}
225
226		/* Search the QP list to see if this is already there. */
227		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
228			if (p->qp == mqp->qp) {
229				ret = ESRCH;
230				goto bail;
231			}
232		}
233		if (tmcast->n_attached ==
234		    rdi->dparms.props.max_mcast_qp_attach) {
235			ret = ENOMEM;
236			goto bail;
237		}
238
239		tmcast->n_attached++;
240
241		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
242		ret = EEXIST;
243		goto bail;
244	}
245
246	spin_lock(&rdi->n_mcast_grps_lock);
247	if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
248		spin_unlock(&rdi->n_mcast_grps_lock);
249		ret = ENOMEM;
250		goto bail;
251	}
252
253	rdi->n_mcast_grps_allocated++;
254	spin_unlock(&rdi->n_mcast_grps_lock);
255
256	mcast->n_attached++;
257
258	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
259
260	atomic_inc(&mcast->refcount);
261	rb_link_node(&mcast->rb_node, pn, n);
262	rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
263
264	ret = 0;
265
266bail:
267	spin_unlock_irq(&ibp->lock);
268
269	return ret;
270}
271
272/**
273 * rvt_attach_mcast - attach a qp to a multicast group
274 * @ibqp: Infiniband qp
275 * @gid: multicast guid
276 * @lid: multicast lid
277 *
278 * Return: 0 on success
279 */
280int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
281{
282	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
283	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
284	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
285	struct rvt_mcast *mcast;
286	struct rvt_mcast_qp *mqp;
287	int ret = -ENOMEM;
288
289	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
290		return -EINVAL;
291
292	/*
293	 * Allocate data structures since its better to do this outside of
294	 * spin locks and it will most likely be needed.
295	 */
296	mcast = rvt_mcast_alloc(gid, lid);
297	if (!mcast)
298		return -ENOMEM;
299
300	mqp = rvt_mcast_qp_alloc(qp);
301	if (!mqp)
302		goto bail_mcast;
303
304	switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
305	case ESRCH:
306		/* Neither was used: OK to attach the same QP twice. */
307		ret = 0;
308		goto bail_mqp;
309	case EEXIST: /* The mcast wasn't used */
310		ret = 0;
311		goto bail_mcast;
312	case ENOMEM:
313		/* Exceeded the maximum number of mcast groups. */
314		ret = -ENOMEM;
315		goto bail_mqp;
316	case EINVAL:
317		/* Invalid MGID/MLID pair */
318		ret = -EINVAL;
319		goto bail_mqp;
320	default:
321		break;
322	}
323
324	return 0;
325
326bail_mqp:
327	rvt_mcast_qp_free(mqp);
328
329bail_mcast:
330	rvt_mcast_free(mcast);
331
332	return ret;
333}
334
335/**
336 * rvt_detach_mcast - remove a qp from a multicast group
337 * @ibqp: Infiniband qp
338 * @gid: multicast guid
339 * @lid: multicast lid
340 *
341 * Return: 0 on success
342 */
343int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
344{
345	struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
346	struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
347	struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
348	struct rvt_mcast *mcast = NULL;
349	struct rvt_mcast_qp *p, *tmp, *delp = NULL;
350	struct rb_node *n;
351	int last = 0;
352	int ret = 0;
353
354	if (ibqp->qp_num <= 1)
355		return -EINVAL;
356
357	spin_lock_irq(&ibp->lock);
358
359	/* Find the GID in the mcast table. */
360	n = ibp->mcast_tree.rb_node;
361	while (1) {
362		if (!n) {
363			spin_unlock_irq(&ibp->lock);
364			return -EINVAL;
365		}
366
367		mcast = rb_entry(n, struct rvt_mcast, rb_node);
368		ret = memcmp(gid->raw, mcast->mcast_addr.mgid.raw,
369			     sizeof(*gid));
370		if (ret < 0) {
371			n = n->rb_left;
372		} else if (ret > 0) {
373			n = n->rb_right;
374		} else {
375			/* MGID/MLID must match */
376			if (mcast->mcast_addr.lid != lid) {
377				spin_unlock_irq(&ibp->lock);
378				return -EINVAL;
379			}
380			break;
381		}
382	}
383
384	/* Search the QP list. */
385	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
386		if (p->qp != qp)
387			continue;
388		/*
389		 * We found it, so remove it, but don't poison the forward
390		 * link until we are sure there are no list walkers.
391		 */
392		list_del_rcu(&p->list);
393		mcast->n_attached--;
394		delp = p;
395
396		/* If this was the last attached QP, remove the GID too. */
397		if (list_empty(&mcast->qp_list)) {
398			rb_erase(&mcast->rb_node, &ibp->mcast_tree);
399			last = 1;
400		}
401		break;
402	}
403
404	spin_unlock_irq(&ibp->lock);
405	/* QP not attached */
406	if (!delp)
407		return -EINVAL;
408
409	/*
410	 * Wait for any list walkers to finish before freeing the
411	 * list element.
412	 */
413	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
414	rvt_mcast_qp_free(delp);
415
416	if (last) {
417		atomic_dec(&mcast->refcount);
418		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
419		rvt_mcast_free(mcast);
420		spin_lock_irq(&rdi->n_mcast_grps_lock);
421		rdi->n_mcast_grps_allocated--;
422		spin_unlock_irq(&rdi->n_mcast_grps_lock);
423	}
424
425	return 0;
426}
427
428/**
429 *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
430 *@rdi: rvt dev struct
431 *
432 * Return: in use count
433 */
434int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
435{
436	int i;
437	int in_use = 0;
438
439	for (i = 0; i < rdi->dparms.nports; i++)
440		if (rdi->ports[i]->mcast_tree.rb_node)
441			in_use++;
442	return in_use;
443}