Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *	- Redistributions of source code must retain the above
 16 *	  copyright notice, this list of conditions and the following
 17 *	  disclaimer.
 18 *
 19 *	- Redistributions in binary form must retailuce the above
 20 *	  copyright notice, this list of conditions and the following
 21 *	  disclaimer in the documentation and/or other materials
 22 *	  provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/vmalloc.h>
 35#include "rxe.h"
 36#include "rxe_loc.h"
 37#include "rxe_queue.h"
 38
 39int do_mmap_info(struct rxe_dev *rxe,
 40		 struct ib_udata *udata,
 41		 bool is_req,
 42		 struct ib_ucontext *context,
 43		 struct rxe_queue_buf *buf,
 44		 size_t buf_size,
 45		 struct rxe_mmap_info **ip_p)
 46{
 47	int err;
 48	u32 len, offset;
 49	struct rxe_mmap_info *ip = NULL;
 50
 51	if (udata) {
 52		if (is_req) {
 53			len = udata->outlen - sizeof(struct mminfo);
 54			offset = sizeof(struct mminfo);
 55		} else {
 56			len = udata->outlen;
 57			offset = 0;
 58		}
 59
 60		if (len < sizeof(ip->info))
 61			goto err1;
 62
 63		ip = rxe_create_mmap_info(rxe, buf_size, context, buf);
 64		if (!ip)
 65			goto err1;
 
 66
 67		err = copy_to_user(udata->outbuf + offset, &ip->info,
 68				   sizeof(ip->info));
 69		if (err)
 70			goto err2;
 
 71
 72		spin_lock_bh(&rxe->pending_lock);
 73		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
 74		spin_unlock_bh(&rxe->pending_lock);
 75	}
 76
 77	*ip_p = ip;
 78
 79	return 0;
 80
 81err2:
 82	kfree(ip);
 83err1:
 84	return -EINVAL;
 85}
 86
 87inline void rxe_queue_reset(struct rxe_queue *q)
 88{
 89	/* queue is comprised from header and the memory
 90	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
 91	 * reset only the queue itself and not the management header
 92	 */
 93	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
 94}
 95
 96struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
 97				 int *num_elem,
 98				 unsigned int elem_size)
 99{
100	struct rxe_queue *q;
101	size_t buf_size;
102	unsigned int num_slots;
103
104	/* num_elem == 0 is allowed, but uninteresting */
105	if (*num_elem < 0)
106		goto err1;
107
108	q = kmalloc(sizeof(*q), GFP_KERNEL);
109	if (!q)
110		goto err1;
111
112	q->rxe = rxe;
 
113
114	/* used in resize, only need to copy used part of queue */
115	q->elem_size = elem_size;
116
117	/* pad element up to at least a cacheline and always a power of 2 */
118	if (elem_size < cache_line_size())
119		elem_size = cache_line_size();
120	elem_size = roundup_pow_of_two(elem_size);
121
122	q->log2_elem_size = order_base_2(elem_size);
123
124	num_slots = *num_elem + 1;
125	num_slots = roundup_pow_of_two(num_slots);
126	q->index_mask = num_slots - 1;
127
128	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
129
130	q->buf = vmalloc_user(buf_size);
131	if (!q->buf)
132		goto err2;
133
134	q->buf->log2_elem_size = q->log2_elem_size;
135	q->buf->index_mask = q->index_mask;
136
137	q->buf_size = buf_size;
138
139	*num_elem = num_slots - 1;
140	return q;
141
142err2:
143	kfree(q);
144err1:
145	return NULL;
146}
147
148/* copies elements from original q to new q and then swaps the contents of the
149 * two q headers. This is so that if anyone is holding a pointer to q it will
150 * still work
151 */
152static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
153			 unsigned int num_elem)
154{
155	if (!queue_empty(q) && (num_elem < queue_count(q)))
156		return -EINVAL;
157
158	while (!queue_empty(q)) {
159		memcpy(producer_addr(new_q), consumer_addr(q),
160		       new_q->elem_size);
161		advance_producer(new_q);
162		advance_consumer(q);
 
163	}
164
165	swap(*q, *new_q);
166
167	return 0;
168}
169
170int rxe_queue_resize(struct rxe_queue *q,
171		     unsigned int *num_elem_p,
172		     unsigned int elem_size,
173		     struct ib_ucontext *context,
174		     struct ib_udata *udata,
175		     spinlock_t *producer_lock,
176		     spinlock_t *consumer_lock)
177{
178	struct rxe_queue *new_q;
179	unsigned int num_elem = *num_elem_p;
180	int err;
181	unsigned long flags = 0, flags1;
182
183	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size);
184	if (!new_q)
185		return -ENOMEM;
186
187	err = do_mmap_info(new_q->rxe, udata, false, context, new_q->buf,
188			   new_q->buf_size, &new_q->ip);
189	if (err) {
190		vfree(new_q->buf);
191		kfree(new_q);
192		goto err1;
193	}
194
195	spin_lock_irqsave(consumer_lock, flags1);
196
197	if (producer_lock) {
198		spin_lock_irqsave(producer_lock, flags);
199		err = resize_finish(q, new_q, num_elem);
200		spin_unlock_irqrestore(producer_lock, flags);
201	} else {
202		err = resize_finish(q, new_q, num_elem);
203	}
204
205	spin_unlock_irqrestore(consumer_lock, flags1);
206
207	rxe_queue_cleanup(new_q);	/* new/old dep on err */
208	if (err)
209		goto err1;
210
211	*num_elem_p = num_elem;
212	return 0;
213
214err1:
215	return err;
216}
217
218void rxe_queue_cleanup(struct rxe_queue *q)
219{
220	if (q->ip)
221		kref_put(&q->ip->ref, rxe_mmap_release);
222	else
223		vfree(q->buf);
224
225	kfree(q);
226}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/vmalloc.h>
  8#include "rxe.h"
  9#include "rxe_loc.h"
 10#include "rxe_queue.h"
 11
 12int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
 13		 struct ib_udata *udata, struct rxe_queue_buf *buf,
 14		 size_t buf_size, struct rxe_mmap_info **ip_p)
 
 
 
 
 15{
 16	int err;
 
 17	struct rxe_mmap_info *ip = NULL;
 18
 19	if (outbuf) {
 20		ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
 21		if (IS_ERR(ip)) {
 22			err = PTR_ERR(ip);
 
 
 
 
 
 
 
 
 
 
 23			goto err1;
 24		}
 25
 26		if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
 27			err = -EFAULT;
 
 28			goto err2;
 29		}
 30
 31		spin_lock_bh(&rxe->pending_lock);
 32		list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
 33		spin_unlock_bh(&rxe->pending_lock);
 34	}
 35
 36	*ip_p = ip;
 37
 38	return 0;
 39
 40err2:
 41	kfree(ip);
 42err1:
 43	return err;
 44}
 45
 46inline void rxe_queue_reset(struct rxe_queue *q)
 47{
 48	/* queue is comprised from header and the memory
 49	 * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h
 50	 * reset only the queue itself and not the management header
 51	 */
 52	memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf));
 53}
 54
 55struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
 56			unsigned int elem_size, enum queue_type type)
 
 57{
 58	struct rxe_queue *q;
 59	size_t buf_size;
 60	unsigned int num_slots;
 61
 62	/* num_elem == 0 is allowed, but uninteresting */
 63	if (*num_elem < 0)
 64		goto err1;
 65
 66	q = kzalloc(sizeof(*q), GFP_KERNEL);
 67	if (!q)
 68		goto err1;
 69
 70	q->rxe = rxe;
 71	q->type = type;
 72
 73	/* used in resize, only need to copy used part of queue */
 74	q->elem_size = elem_size;
 75
 76	/* pad element up to at least a cacheline and always a power of 2 */
 77	if (elem_size < cache_line_size())
 78		elem_size = cache_line_size();
 79	elem_size = roundup_pow_of_two(elem_size);
 80
 81	q->log2_elem_size = order_base_2(elem_size);
 82
 83	num_slots = *num_elem + 1;
 84	num_slots = roundup_pow_of_two(num_slots);
 85	q->index_mask = num_slots - 1;
 86
 87	buf_size = sizeof(struct rxe_queue_buf) + num_slots * elem_size;
 88
 89	q->buf = vmalloc_user(buf_size);
 90	if (!q->buf)
 91		goto err2;
 92
 93	q->buf->log2_elem_size = q->log2_elem_size;
 94	q->buf->index_mask = q->index_mask;
 95
 96	q->buf_size = buf_size;
 97
 98	*num_elem = num_slots - 1;
 99	return q;
100
101err2:
102	kfree(q);
103err1:
104	return NULL;
105}
106
107/* copies elements from original q to new q and then swaps the contents of the
108 * two q headers. This is so that if anyone is holding a pointer to q it will
109 * still work
110 */
111static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
112			 unsigned int num_elem)
113{
114	if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
115		return -EINVAL;
116
117	while (!queue_empty(q, q->type)) {
118		memcpy(producer_addr(new_q, new_q->type),
119					consumer_addr(q, q->type),
120					new_q->elem_size);
121		advance_producer(new_q, new_q->type);
122		advance_consumer(q, q->type);
123	}
124
125	swap(*q, *new_q);
126
127	return 0;
128}
129
130int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
131		     unsigned int elem_size, struct ib_udata *udata,
132		     struct mminfo __user *outbuf, spinlock_t *producer_lock,
 
 
 
133		     spinlock_t *consumer_lock)
134{
135	struct rxe_queue *new_q;
136	unsigned int num_elem = *num_elem_p;
137	int err;
138	unsigned long flags = 0, flags1;
139
140	new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
141	if (!new_q)
142		return -ENOMEM;
143
144	err = do_mmap_info(new_q->rxe, outbuf, udata, new_q->buf,
145			   new_q->buf_size, &new_q->ip);
146	if (err) {
147		vfree(new_q->buf);
148		kfree(new_q);
149		goto err1;
150	}
151
152	spin_lock_irqsave(consumer_lock, flags1);
153
154	if (producer_lock) {
155		spin_lock_irqsave(producer_lock, flags);
156		err = resize_finish(q, new_q, num_elem);
157		spin_unlock_irqrestore(producer_lock, flags);
158	} else {
159		err = resize_finish(q, new_q, num_elem);
160	}
161
162	spin_unlock_irqrestore(consumer_lock, flags1);
163
164	rxe_queue_cleanup(new_q);	/* new/old dep on err */
165	if (err)
166		goto err1;
167
168	*num_elem_p = num_elem;
169	return 0;
170
171err1:
172	return err;
173}
174
175void rxe_queue_cleanup(struct rxe_queue *q)
176{
177	if (q->ip)
178		kref_put(&q->ip->ref, rxe_mmap_release);
179	else
180		vfree(q->buf);
181
182	kfree(q);
183}