Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1/*
  2 * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
  3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4 *
  5 * This software is available to you under a choice of one of two
  6 * licenses.  You may choose to be licensed under the terms of the GNU
  7 * General Public License (GPL) Version 2, available from the file
  8 * COPYING in the main directory of this source tree, or the
  9 * OpenIB.org BSD license below:
 10 *
 11 *     Redistribution and use in source and binary forms, with or
 12 *     without modification, are permitted provided that the following
 13 *     conditions are met:
 14 *
 15 *      - Redistributions of source code must retain the above
 16 *        copyright notice, this list of conditions and the following
 17 *        disclaimer.
 18 *
 19 *      - Redistributions in binary form must reproduce the above
 20 *        copyright notice, this list of conditions and the following
 21 *        disclaimer in the documentation and/or other materials
 22 *        provided with the distribution.
 23 *
 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 31 * SOFTWARE.
 32 */
 33
 34#include <linux/errno.h>
 35#include <linux/slab.h>
 36#include <linux/mm.h>
 37#include <linux/bitmap.h>
 38#include <linux/dma-mapping.h>
 39#include <linux/vmalloc.h>
 40
 41#include "mlx4.h"
 42
 43u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
 44{
 45	u32 obj;
 46
 47	spin_lock(&bitmap->lock);
 48
 49	obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
 50	if (obj >= bitmap->max) {
 51		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 52				& bitmap->mask;
 53		obj = find_first_zero_bit(bitmap->table, bitmap->max);
 54	}
 55
 56	if (obj < bitmap->max) {
 57		set_bit(obj, bitmap->table);
 58		bitmap->last = (obj + 1);
 59		if (bitmap->last == bitmap->max)
 60			bitmap->last = 0;
 61		obj |= bitmap->top;
 62	} else
 63		obj = -1;
 64
 65	if (obj != -1)
 66		--bitmap->avail;
 67
 68	spin_unlock(&bitmap->lock);
 69
 70	return obj;
 71}
 72
 73void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
 74{
 75	mlx4_bitmap_free_range(bitmap, obj, 1);
 76}
 77
 78u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
 79{
 80	u32 obj;
 81
 82	if (likely(cnt == 1 && align == 1))
 83		return mlx4_bitmap_alloc(bitmap);
 84
 85	spin_lock(&bitmap->lock);
 86
 87	obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
 88				bitmap->last, cnt, align - 1);
 89	if (obj >= bitmap->max) {
 90		bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
 91				& bitmap->mask;
 92		obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
 93						0, cnt, align - 1);
 94	}
 95
 96	if (obj < bitmap->max) {
 97		bitmap_set(bitmap->table, obj, cnt);
 98		if (obj == bitmap->last) {
 99			bitmap->last = (obj + cnt);
100			if (bitmap->last >= bitmap->max)
101				bitmap->last = 0;
102		}
103		obj |= bitmap->top;
104	} else
105		obj = -1;
106
107	if (obj != -1)
108		bitmap->avail -= cnt;
109
110	spin_unlock(&bitmap->lock);
111
112	return obj;
113}
114
115u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
116{
117	return bitmap->avail;
118}
119
120void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
121{
122	obj &= bitmap->max + bitmap->reserved_top - 1;
123
124	spin_lock(&bitmap->lock);
125	bitmap_clear(bitmap->table, obj, cnt);
126	bitmap->last = min(bitmap->last, obj);
127	bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
128			& bitmap->mask;
129	bitmap->avail += cnt;
130	spin_unlock(&bitmap->lock);
131}
132
133int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
134		     u32 reserved_bot, u32 reserved_top)
135{
136	/* num must be a power of 2 */
137	if (num != roundup_pow_of_two(num))
138		return -EINVAL;
139
140	bitmap->last = 0;
141	bitmap->top  = 0;
142	bitmap->max  = num - reserved_top;
143	bitmap->mask = mask;
144	bitmap->reserved_top = reserved_top;
145	bitmap->avail = num - reserved_top - reserved_bot;
146	spin_lock_init(&bitmap->lock);
147	bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
148				sizeof (long), GFP_KERNEL);
149	if (!bitmap->table)
150		return -ENOMEM;
151
152	bitmap_set(bitmap->table, 0, reserved_bot);
153
154	return 0;
155}
156
157void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap)
158{
159	kfree(bitmap->table);
160}
161
162/*
163 * Handling for queue buffers -- we allocate a bunch of memory and
164 * register it in a memory region at HCA virtual address 0.  If the
165 * requested size is > max_direct, we split the allocation into
166 * multiple pages, so we don't require too much contiguous memory.
167 */
168
169int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
170		   struct mlx4_buf *buf)
171{
172	dma_addr_t t;
173
174	if (size <= max_direct) {
175		buf->nbufs        = 1;
176		buf->npages       = 1;
177		buf->page_shift   = get_order(size) + PAGE_SHIFT;
178		buf->direct.buf   = dma_alloc_coherent(&dev->pdev->dev,
179						       size, &t, GFP_KERNEL);
180		if (!buf->direct.buf)
181			return -ENOMEM;
182
183		buf->direct.map = t;
184
185		while (t & ((1 << buf->page_shift) - 1)) {
186			--buf->page_shift;
187			buf->npages *= 2;
188		}
189
190		memset(buf->direct.buf, 0, size);
191	} else {
192		int i;
193
194		buf->direct.buf  = NULL;
195		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
196		buf->npages      = buf->nbufs;
197		buf->page_shift  = PAGE_SHIFT;
198		buf->page_list   = kcalloc(buf->nbufs, sizeof(*buf->page_list),
199					   GFP_KERNEL);
200		if (!buf->page_list)
201			return -ENOMEM;
202
203		for (i = 0; i < buf->nbufs; ++i) {
204			buf->page_list[i].buf =
205				dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
206						   &t, GFP_KERNEL);
207			if (!buf->page_list[i].buf)
208				goto err_free;
209
210			buf->page_list[i].map = t;
211
212			memset(buf->page_list[i].buf, 0, PAGE_SIZE);
213		}
214
215		if (BITS_PER_LONG == 64) {
216			struct page **pages;
217			pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
218			if (!pages)
219				goto err_free;
220			for (i = 0; i < buf->nbufs; ++i)
221				pages[i] = virt_to_page(buf->page_list[i].buf);
222			buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
223			kfree(pages);
224			if (!buf->direct.buf)
225				goto err_free;
226		}
227	}
228
229	return 0;
230
231err_free:
232	mlx4_buf_free(dev, size, buf);
233
234	return -ENOMEM;
235}
236EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
237
238void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
239{
240	int i;
241
242	if (buf->nbufs == 1)
243		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
244				  buf->direct.map);
245	else {
246		if (BITS_PER_LONG == 64 && buf->direct.buf)
247			vunmap(buf->direct.buf);
248
249		for (i = 0; i < buf->nbufs; ++i)
250			if (buf->page_list[i].buf)
251				dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
252						  buf->page_list[i].buf,
253						  buf->page_list[i].map);
254		kfree(buf->page_list);
255	}
256}
257EXPORT_SYMBOL_GPL(mlx4_buf_free);
258
259static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
260{
261	struct mlx4_db_pgdir *pgdir;
262
263	pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
264	if (!pgdir)
265		return NULL;
266
267	bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
268	pgdir->bits[0] = pgdir->order0;
269	pgdir->bits[1] = pgdir->order1;
270	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
271					    &pgdir->db_dma, GFP_KERNEL);
272	if (!pgdir->db_page) {
273		kfree(pgdir);
274		return NULL;
275	}
276
277	return pgdir;
278}
279
280static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
281				    struct mlx4_db *db, int order)
282{
283	int o;
284	int i;
285
286	for (o = order; o <= 1; ++o) {
287		i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
288		if (i < MLX4_DB_PER_PAGE >> o)
289			goto found;
290	}
291
292	return -ENOMEM;
293
294found:
295	clear_bit(i, pgdir->bits[o]);
296
297	i <<= o;
298
299	if (o > order)
300		set_bit(i ^ 1, pgdir->bits[order]);
301
302	db->u.pgdir = pgdir;
303	db->index   = i;
304	db->db      = pgdir->db_page + db->index;
305	db->dma     = pgdir->db_dma  + db->index * 4;
306	db->order   = order;
307
308	return 0;
309}
310
311int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
312{
313	struct mlx4_priv *priv = mlx4_priv(dev);
314	struct mlx4_db_pgdir *pgdir;
315	int ret = 0;
316
317	mutex_lock(&priv->pgdir_mutex);
318
319	list_for_each_entry(pgdir, &priv->pgdir_list, list)
320		if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
321			goto out;
322
323	pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
324	if (!pgdir) {
325		ret = -ENOMEM;
326		goto out;
327	}
328
329	list_add(&pgdir->list, &priv->pgdir_list);
330
331	/* This should never fail -- we just allocated an empty page: */
332	WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
333
334out:
335	mutex_unlock(&priv->pgdir_mutex);
336
337	return ret;
338}
339EXPORT_SYMBOL_GPL(mlx4_db_alloc);
340
341void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
342{
343	struct mlx4_priv *priv = mlx4_priv(dev);
344	int o;
345	int i;
346
347	mutex_lock(&priv->pgdir_mutex);
348
349	o = db->order;
350	i = db->index;
351
352	if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
353		clear_bit(i ^ 1, db->u.pgdir->order0);
354		++o;
355	}
356	i >>= o;
357	set_bit(i, db->u.pgdir->bits[o]);
358
359	if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
360		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
361				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
362		list_del(&db->u.pgdir->list);
363		kfree(db->u.pgdir);
364	}
365
366	mutex_unlock(&priv->pgdir_mutex);
367}
368EXPORT_SYMBOL_GPL(mlx4_db_free);
369
370int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
371		       int size, int max_direct)
372{
373	int err;
374
375	err = mlx4_db_alloc(dev, &wqres->db, 1);
376	if (err)
377		return err;
378
379	*wqres->db.db = 0;
380
381	err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
382	if (err)
383		goto err_db;
384
385	err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
386			    &wqres->mtt);
387	if (err)
388		goto err_buf;
389
390	err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
391	if (err)
392		goto err_mtt;
393
394	return 0;
395
396err_mtt:
397	mlx4_mtt_cleanup(dev, &wqres->mtt);
398err_buf:
399	mlx4_buf_free(dev, size, &wqres->buf);
400err_db:
401	mlx4_db_free(dev, &wqres->db);
402
403	return err;
404}
405EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
406
407void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
408		       int size)
409{
410	mlx4_mtt_cleanup(dev, &wqres->mtt);
411	mlx4_buf_free(dev, size, &wqres->buf);
412	mlx4_db_free(dev, &wqres->db);
413}
414EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);