Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Takashi Iwai <tiwai@suse.de>
5 *
6 * Generic memory allocators
7 */
8
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/dma-mapping.h>
12#include <linux/dma-map-ops.h>
13#include <linux/genalloc.h>
14#include <linux/highmem.h>
15#include <linux/vmalloc.h>
16#ifdef CONFIG_X86
17#include <asm/set_memory.h>
18#endif
19#include <sound/memalloc.h>
20
21struct snd_malloc_ops {
22 void *(*alloc)(struct snd_dma_buffer *dmab, size_t size);
23 void (*free)(struct snd_dma_buffer *dmab);
24 dma_addr_t (*get_addr)(struct snd_dma_buffer *dmab, size_t offset);
25 struct page *(*get_page)(struct snd_dma_buffer *dmab, size_t offset);
26 unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
27 unsigned int ofs, unsigned int size);
28 int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
29 void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode);
30};
31
32#define DEFAULT_GFP \
33 (GFP_KERNEL | \
34 __GFP_RETRY_MAYFAIL | /* don't trigger OOM-killer */ \
35 __GFP_NOWARN) /* no stack trace print - this call is non-critical */
36
37static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
38
39static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
40{
41 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
42
43 if (WARN_ON_ONCE(!ops || !ops->alloc))
44 return NULL;
45 return ops->alloc(dmab, size);
46}
47
48/**
49 * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
50 * type and direction
51 * @type: the DMA buffer type
52 * @device: the device pointer
53 * @dir: DMA direction
54 * @size: the buffer size to allocate
55 * @dmab: buffer allocation record to store the allocated data
56 *
57 * Calls the memory-allocator function for the corresponding
58 * buffer type.
59 *
60 * Return: Zero if the buffer with the given size is allocated successfully,
61 * otherwise a negative value on error.
62 */
63int snd_dma_alloc_dir_pages(int type, struct device *device,
64 enum dma_data_direction dir, size_t size,
65 struct snd_dma_buffer *dmab)
66{
67 if (WARN_ON(!size))
68 return -ENXIO;
69 if (WARN_ON(!dmab))
70 return -ENXIO;
71
72 size = PAGE_ALIGN(size);
73 dmab->dev.type = type;
74 dmab->dev.dev = device;
75 dmab->dev.dir = dir;
76 dmab->bytes = 0;
77 dmab->addr = 0;
78 dmab->private_data = NULL;
79 dmab->area = __snd_dma_alloc_pages(dmab, size);
80 if (!dmab->area)
81 return -ENOMEM;
82 dmab->bytes = size;
83 return 0;
84}
85EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
86
87/**
88 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
89 * @type: the DMA buffer type
90 * @device: the device pointer
91 * @size: the buffer size to allocate
92 * @dmab: buffer allocation record to store the allocated data
93 *
94 * Calls the memory-allocator function for the corresponding
95 * buffer type. When no space is left, this function reduces the size and
96 * tries to allocate again. The size actually allocated is stored in
97 * res_size argument.
98 *
99 * Return: Zero if the buffer with the given size is allocated successfully,
100 * otherwise a negative value on error.
101 */
102int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
103 struct snd_dma_buffer *dmab)
104{
105 int err;
106
107 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
108 if (err != -ENOMEM)
109 return err;
110 if (size <= PAGE_SIZE)
111 return -ENOMEM;
112 size >>= 1;
113 size = PAGE_SIZE << get_order(size);
114 }
115 if (! dmab->area)
116 return -ENOMEM;
117 return 0;
118}
119EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
120
121/**
122 * snd_dma_free_pages - release the allocated buffer
123 * @dmab: the buffer allocation record to release
124 *
125 * Releases the allocated buffer via snd_dma_alloc_pages().
126 */
127void snd_dma_free_pages(struct snd_dma_buffer *dmab)
128{
129 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
130
131 if (ops && ops->free)
132 ops->free(dmab);
133}
134EXPORT_SYMBOL(snd_dma_free_pages);
135
136/* called by devres */
137static void __snd_release_pages(struct device *dev, void *res)
138{
139 snd_dma_free_pages(res);
140}
141
142/**
143 * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
144 * @dev: the device pointer
145 * @type: the DMA buffer type
146 * @dir: DMA direction
147 * @size: the buffer size to allocate
148 *
149 * Allocate buffer pages depending on the given type and manage using devres.
150 * The pages will be released automatically at the device removal.
151 *
152 * Unlike snd_dma_alloc_pages(), this function requires the real device pointer,
153 * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or
154 * SNDRV_DMA_TYPE_VMALLOC type.
155 *
156 * Return: the snd_dma_buffer object at success, or NULL if failed
157 */
158struct snd_dma_buffer *
159snd_devm_alloc_dir_pages(struct device *dev, int type,
160 enum dma_data_direction dir, size_t size)
161{
162 struct snd_dma_buffer *dmab;
163 int err;
164
165 if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
166 type == SNDRV_DMA_TYPE_VMALLOC))
167 return NULL;
168
169 dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL);
170 if (!dmab)
171 return NULL;
172
173 err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
174 if (err < 0) {
175 devres_free(dmab);
176 return NULL;
177 }
178
179 devres_add(dev, dmab);
180 return dmab;
181}
182EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
183
184/**
185 * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
186 * @dmab: buffer allocation information
187 * @area: VM area information
188 *
189 * Return: zero if successful, or a negative error code
190 */
191int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
192 struct vm_area_struct *area)
193{
194 const struct snd_malloc_ops *ops;
195
196 if (!dmab)
197 return -ENOENT;
198 ops = snd_dma_get_ops(dmab);
199 if (ops && ops->mmap)
200 return ops->mmap(dmab, area);
201 else
202 return -ENOENT;
203}
204EXPORT_SYMBOL(snd_dma_buffer_mmap);
205
206#ifdef CONFIG_HAS_DMA
207/**
208 * snd_dma_buffer_sync - sync DMA buffer between CPU and device
209 * @dmab: buffer allocation information
210 * @mode: sync mode
211 */
212void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
213 enum snd_dma_sync_mode mode)
214{
215 const struct snd_malloc_ops *ops;
216
217 if (!dmab || !dmab->dev.need_sync)
218 return;
219 ops = snd_dma_get_ops(dmab);
220 if (ops && ops->sync)
221 ops->sync(dmab, mode);
222}
223EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
224#endif /* CONFIG_HAS_DMA */
225
226/**
227 * snd_sgbuf_get_addr - return the physical address at the corresponding offset
228 * @dmab: buffer allocation information
229 * @offset: offset in the ring buffer
230 *
231 * Return: the physical address
232 */
233dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
234{
235 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
236
237 if (ops && ops->get_addr)
238 return ops->get_addr(dmab, offset);
239 else
240 return dmab->addr + offset;
241}
242EXPORT_SYMBOL(snd_sgbuf_get_addr);
243
244/**
245 * snd_sgbuf_get_page - return the physical page at the corresponding offset
246 * @dmab: buffer allocation information
247 * @offset: offset in the ring buffer
248 *
249 * Return: the page pointer
250 */
251struct page *snd_sgbuf_get_page(struct snd_dma_buffer *dmab, size_t offset)
252{
253 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
254
255 if (ops && ops->get_page)
256 return ops->get_page(dmab, offset);
257 else
258 return virt_to_page(dmab->area + offset);
259}
260EXPORT_SYMBOL(snd_sgbuf_get_page);
261
262/**
263 * snd_sgbuf_get_chunk_size - compute the max chunk size with continuous pages
264 * on sg-buffer
265 * @dmab: buffer allocation information
266 * @ofs: offset in the ring buffer
267 * @size: the requested size
268 *
269 * Return: the chunk size
270 */
271unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
272 unsigned int ofs, unsigned int size)
273{
274 const struct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
275
276 if (ops && ops->get_chunk_size)
277 return ops->get_chunk_size(dmab, ofs, size);
278 else
279 return size;
280}
281EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
282
283/*
284 * Continuous pages allocator
285 */
286static void *do_alloc_pages(struct device *dev, size_t size, dma_addr_t *addr,
287 bool wc)
288{
289 void *p;
290 gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
291
292 again:
293 p = alloc_pages_exact(size, gfp);
294 if (!p)
295 return NULL;
296 *addr = page_to_phys(virt_to_page(p));
297 if (!dev)
298 return p;
299 if ((*addr + size - 1) & ~dev->coherent_dma_mask) {
300 if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
301 gfp |= GFP_DMA32;
302 goto again;
303 }
304 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
305 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
306 goto again;
307 }
308 }
309#ifdef CONFIG_X86
310 if (wc)
311 set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
312#endif
313 return p;
314}
315
316static void do_free_pages(void *p, size_t size, bool wc)
317{
318#ifdef CONFIG_X86
319 if (wc)
320 set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
321#endif
322 free_pages_exact(p, size);
323}
324
325
326static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
327{
328 return do_alloc_pages(dmab->dev.dev, size, &dmab->addr, false);
329}
330
331static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
332{
333 do_free_pages(dmab->area, dmab->bytes, false);
334}
335
336static int snd_dma_continuous_mmap(struct snd_dma_buffer *dmab,
337 struct vm_area_struct *area)
338{
339 return remap_pfn_range(area, area->vm_start,
340 dmab->addr >> PAGE_SHIFT,
341 area->vm_end - area->vm_start,
342 area->vm_page_prot);
343}
344
345static const struct snd_malloc_ops snd_dma_continuous_ops = {
346 .alloc = snd_dma_continuous_alloc,
347 .free = snd_dma_continuous_free,
348 .mmap = snd_dma_continuous_mmap,
349};
350
351/*
352 * VMALLOC allocator
353 */
354static void *snd_dma_vmalloc_alloc(struct snd_dma_buffer *dmab, size_t size)
355{
356 return vmalloc(size);
357}
358
359static void snd_dma_vmalloc_free(struct snd_dma_buffer *dmab)
360{
361 vfree(dmab->area);
362}
363
364static int snd_dma_vmalloc_mmap(struct snd_dma_buffer *dmab,
365 struct vm_area_struct *area)
366{
367 return remap_vmalloc_range(area, dmab->area, 0);
368}
369
370#define get_vmalloc_page_addr(dmab, offset) \
371 page_to_phys(vmalloc_to_page((dmab)->area + (offset)))
372
373static dma_addr_t snd_dma_vmalloc_get_addr(struct snd_dma_buffer *dmab,
374 size_t offset)
375{
376 return get_vmalloc_page_addr(dmab, offset) + offset % PAGE_SIZE;
377}
378
379static struct page *snd_dma_vmalloc_get_page(struct snd_dma_buffer *dmab,
380 size_t offset)
381{
382 return vmalloc_to_page(dmab->area + offset);
383}
384
385static unsigned int
386snd_dma_vmalloc_get_chunk_size(struct snd_dma_buffer *dmab,
387 unsigned int ofs, unsigned int size)
388{
389 unsigned int start, end;
390 unsigned long addr;
391
392 start = ALIGN_DOWN(ofs, PAGE_SIZE);
393 end = ofs + size - 1; /* the last byte address */
394 /* check page continuity */
395 addr = get_vmalloc_page_addr(dmab, start);
396 for (;;) {
397 start += PAGE_SIZE;
398 if (start > end)
399 break;
400 addr += PAGE_SIZE;
401 if (get_vmalloc_page_addr(dmab, start) != addr)
402 return start - ofs;
403 }
404 /* ok, all on continuous pages */
405 return size;
406}
407
408static const struct snd_malloc_ops snd_dma_vmalloc_ops = {
409 .alloc = snd_dma_vmalloc_alloc,
410 .free = snd_dma_vmalloc_free,
411 .mmap = snd_dma_vmalloc_mmap,
412 .get_addr = snd_dma_vmalloc_get_addr,
413 .get_page = snd_dma_vmalloc_get_page,
414 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
415};
416
417#ifdef CONFIG_HAS_DMA
418/*
419 * IRAM allocator
420 */
421#ifdef CONFIG_GENERIC_ALLOCATOR
422static void *snd_dma_iram_alloc(struct snd_dma_buffer *dmab, size_t size)
423{
424 struct device *dev = dmab->dev.dev;
425 struct gen_pool *pool;
426 void *p;
427
428 if (dev->of_node) {
429 pool = of_gen_pool_get(dev->of_node, "iram", 0);
430 /* Assign the pool into private_data field */
431 dmab->private_data = pool;
432
433 p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE);
434 if (p)
435 return p;
436 }
437
438 /* Internal memory might have limited size and no enough space,
439 * so if we fail to malloc, try to fetch memory traditionally.
440 */
441 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
442 return __snd_dma_alloc_pages(dmab, size);
443}
444
445static void snd_dma_iram_free(struct snd_dma_buffer *dmab)
446{
447 struct gen_pool *pool = dmab->private_data;
448
449 if (pool && dmab->area)
450 gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
451}
452
453static int snd_dma_iram_mmap(struct snd_dma_buffer *dmab,
454 struct vm_area_struct *area)
455{
456 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
457 return remap_pfn_range(area, area->vm_start,
458 dmab->addr >> PAGE_SHIFT,
459 area->vm_end - area->vm_start,
460 area->vm_page_prot);
461}
462
463static const struct snd_malloc_ops snd_dma_iram_ops = {
464 .alloc = snd_dma_iram_alloc,
465 .free = snd_dma_iram_free,
466 .mmap = snd_dma_iram_mmap,
467};
468#endif /* CONFIG_GENERIC_ALLOCATOR */
469
470/*
471 * Coherent device pages allocator
472 */
473static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
474{
475 return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
476}
477
478static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
479{
480 dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
481}
482
483static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
484 struct vm_area_struct *area)
485{
486 return dma_mmap_coherent(dmab->dev.dev, area,
487 dmab->area, dmab->addr, dmab->bytes);
488}
489
490static const struct snd_malloc_ops snd_dma_dev_ops = {
491 .alloc = snd_dma_dev_alloc,
492 .free = snd_dma_dev_free,
493 .mmap = snd_dma_dev_mmap,
494};
495
496/*
497 * Write-combined pages
498 */
499#ifdef CONFIG_SND_DMA_SGBUF
500/* x86-specific allocations */
501static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
502{
503 void *p = do_alloc_pages(dmab->dev.dev, size, &dmab->addr, true);
504
505 if (!p)
506 return NULL;
507 dmab->addr = dma_map_single(dmab->dev.dev, p, size, DMA_BIDIRECTIONAL);
508 if (dma_mapping_error(dmab->dev.dev, dmab->addr)) {
509 do_free_pages(dmab->area, size, true);
510 return NULL;
511 }
512 return p;
513}
514
515static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
516{
517 dma_unmap_single(dmab->dev.dev, dmab->addr, dmab->bytes,
518 DMA_BIDIRECTIONAL);
519 do_free_pages(dmab->area, dmab->bytes, true);
520}
521
522static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
523 struct vm_area_struct *area)
524{
525 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
526 return dma_mmap_coherent(dmab->dev.dev, area,
527 dmab->area, dmab->addr, dmab->bytes);
528}
529#else
530static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
531{
532 return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
533}
534
535static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
536{
537 dma_free_wc(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
538}
539
540static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
541 struct vm_area_struct *area)
542{
543 return dma_mmap_wc(dmab->dev.dev, area,
544 dmab->area, dmab->addr, dmab->bytes);
545}
546#endif
547
548static const struct snd_malloc_ops snd_dma_wc_ops = {
549 .alloc = snd_dma_wc_alloc,
550 .free = snd_dma_wc_free,
551 .mmap = snd_dma_wc_mmap,
552};
553
554/*
555 * Non-contiguous pages allocator
556 */
557static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
558{
559 struct sg_table *sgt;
560 void *p;
561
562 sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
563 DEFAULT_GFP, 0);
564 if (!sgt)
565 return NULL;
566
567 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
568 sg_dma_address(sgt->sgl));
569 p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
570 if (p) {
571 dmab->private_data = sgt;
572 /* store the first page address for convenience */
573 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
574 } else {
575 dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
576 }
577 return p;
578}
579
580static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
581{
582 dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
583 dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
584 dmab->dev.dir);
585}
586
587static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
588 struct vm_area_struct *area)
589{
590 return dma_mmap_noncontiguous(dmab->dev.dev, area,
591 dmab->bytes, dmab->private_data);
592}
593
594static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
595 enum snd_dma_sync_mode mode)
596{
597 if (mode == SNDRV_DMA_SYNC_CPU) {
598 if (dmab->dev.dir == DMA_TO_DEVICE)
599 return;
600 invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
601 dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
602 dmab->dev.dir);
603 } else {
604 if (dmab->dev.dir == DMA_FROM_DEVICE)
605 return;
606 flush_kernel_vmap_range(dmab->area, dmab->bytes);
607 dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
608 dmab->dev.dir);
609 }
610}
611
612static inline void snd_dma_noncontig_iter_set(struct snd_dma_buffer *dmab,
613 struct sg_page_iter *piter,
614 size_t offset)
615{
616 struct sg_table *sgt = dmab->private_data;
617
618 __sg_page_iter_start(piter, sgt->sgl, sgt->orig_nents,
619 offset >> PAGE_SHIFT);
620}
621
622static dma_addr_t snd_dma_noncontig_get_addr(struct snd_dma_buffer *dmab,
623 size_t offset)
624{
625 struct sg_dma_page_iter iter;
626
627 snd_dma_noncontig_iter_set(dmab, &iter.base, offset);
628 __sg_page_iter_dma_next(&iter);
629 return sg_page_iter_dma_address(&iter) + offset % PAGE_SIZE;
630}
631
632static struct page *snd_dma_noncontig_get_page(struct snd_dma_buffer *dmab,
633 size_t offset)
634{
635 struct sg_page_iter iter;
636
637 snd_dma_noncontig_iter_set(dmab, &iter, offset);
638 __sg_page_iter_next(&iter);
639 return sg_page_iter_page(&iter);
640}
641
642static unsigned int
643snd_dma_noncontig_get_chunk_size(struct snd_dma_buffer *dmab,
644 unsigned int ofs, unsigned int size)
645{
646 struct sg_dma_page_iter iter;
647 unsigned int start, end;
648 unsigned long addr;
649
650 start = ALIGN_DOWN(ofs, PAGE_SIZE);
651 end = ofs + size - 1; /* the last byte address */
652 snd_dma_noncontig_iter_set(dmab, &iter.base, start);
653 if (!__sg_page_iter_dma_next(&iter))
654 return 0;
655 /* check page continuity */
656 addr = sg_page_iter_dma_address(&iter);
657 for (;;) {
658 start += PAGE_SIZE;
659 if (start > end)
660 break;
661 addr += PAGE_SIZE;
662 if (!__sg_page_iter_dma_next(&iter) ||
663 sg_page_iter_dma_address(&iter) != addr)
664 return start - ofs;
665 }
666 /* ok, all on continuous pages */
667 return size;
668}
669
670static const struct snd_malloc_ops snd_dma_noncontig_ops = {
671 .alloc = snd_dma_noncontig_alloc,
672 .free = snd_dma_noncontig_free,
673 .mmap = snd_dma_noncontig_mmap,
674 .sync = snd_dma_noncontig_sync,
675 .get_addr = snd_dma_noncontig_get_addr,
676 .get_page = snd_dma_noncontig_get_page,
677 .get_chunk_size = snd_dma_noncontig_get_chunk_size,
678};
679
680#ifdef CONFIG_SND_DMA_SGBUF
681/* Fallback SG-buffer allocations for x86 */
682struct snd_dma_sg_fallback {
683 struct sg_table sgt; /* used by get_addr - must be the first item */
684 size_t count;
685 struct page **pages;
686 unsigned int *npages;
687};
688
689static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
690 struct snd_dma_sg_fallback *sgbuf)
691{
692 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
693 size_t i, size;
694
695 if (sgbuf->pages && sgbuf->npages) {
696 i = 0;
697 while (i < sgbuf->count) {
698 size = sgbuf->npages[i];
699 if (!size)
700 break;
701 do_free_pages(page_address(sgbuf->pages[i]),
702 size << PAGE_SHIFT, wc);
703 i += size;
704 }
705 }
706 kvfree(sgbuf->pages);
707 kvfree(sgbuf->npages);
708 kfree(sgbuf);
709}
710
711/* fallback manual S/G buffer allocations */
712static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
713{
714 bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG;
715 struct snd_dma_sg_fallback *sgbuf;
716 struct page **pagep, *curp;
717 size_t chunk;
718 dma_addr_t addr;
719 unsigned int idx, npages;
720 void *p;
721
722 sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
723 if (!sgbuf)
724 return NULL;
725 size = PAGE_ALIGN(size);
726 sgbuf->count = size >> PAGE_SHIFT;
727 sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
728 sgbuf->npages = kvcalloc(sgbuf->count, sizeof(*sgbuf->npages), GFP_KERNEL);
729 if (!sgbuf->pages || !sgbuf->npages)
730 goto error;
731
732 pagep = sgbuf->pages;
733 chunk = size;
734 idx = 0;
735 while (size > 0) {
736 chunk = min(size, chunk);
737 p = do_alloc_pages(dmab->dev.dev, chunk, &addr, wc);
738 if (!p) {
739 if (chunk <= PAGE_SIZE)
740 goto error;
741 chunk >>= 1;
742 chunk = PAGE_SIZE << get_order(chunk);
743 continue;
744 }
745
746 size -= chunk;
747 /* fill pages */
748 npages = chunk >> PAGE_SHIFT;
749 sgbuf->npages[idx] = npages;
750 idx += npages;
751 curp = virt_to_page(p);
752 while (npages--)
753 *pagep++ = curp++;
754 }
755
756 if (sg_alloc_table_from_pages(&sgbuf->sgt, sgbuf->pages, sgbuf->count,
757 0, sgbuf->count << PAGE_SHIFT, GFP_KERNEL))
758 goto error;
759
760 if (dma_map_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0))
761 goto error_dma_map;
762
763 p = vmap(sgbuf->pages, sgbuf->count, VM_MAP, PAGE_KERNEL);
764 if (!p)
765 goto error_vmap;
766
767 dmab->private_data = sgbuf;
768 /* store the first page address for convenience */
769 dmab->addr = snd_sgbuf_get_addr(dmab, 0);
770 return p;
771
772 error_vmap:
773 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
774 error_dma_map:
775 sg_free_table(&sgbuf->sgt);
776 error:
777 __snd_dma_sg_fallback_free(dmab, sgbuf);
778 return NULL;
779}
780
781static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
782{
783 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
784
785 vunmap(dmab->area);
786 dma_unmap_sgtable(dmab->dev.dev, &sgbuf->sgt, DMA_BIDIRECTIONAL, 0);
787 sg_free_table(&sgbuf->sgt);
788 __snd_dma_sg_fallback_free(dmab, dmab->private_data);
789}
790
791static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
792 struct vm_area_struct *area)
793{
794 struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
795
796 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
797 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
798 return vm_map_pages(area, sgbuf->pages, sgbuf->count);
799}
800
801static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
802{
803 int type = dmab->dev.type;
804 void *p;
805
806 /* try the standard DMA API allocation at first */
807 if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
808 dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;
809 else
810 dmab->dev.type = SNDRV_DMA_TYPE_DEV;
811 p = __snd_dma_alloc_pages(dmab, size);
812 if (p)
813 return p;
814
815 dmab->dev.type = type; /* restore the type */
816 return snd_dma_sg_fallback_alloc(dmab, size);
817}
818
819static const struct snd_malloc_ops snd_dma_sg_ops = {
820 .alloc = snd_dma_sg_alloc,
821 .free = snd_dma_sg_fallback_free,
822 .mmap = snd_dma_sg_fallback_mmap,
823 /* reuse noncontig helper */
824 .get_addr = snd_dma_noncontig_get_addr,
825 /* reuse vmalloc helpers */
826 .get_page = snd_dma_vmalloc_get_page,
827 .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
828};
829#endif /* CONFIG_SND_DMA_SGBUF */
830
831/*
832 * Non-coherent pages allocator
833 */
834static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
835{
836 void *p;
837
838 p = dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
839 dmab->dev.dir, DEFAULT_GFP);
840 if (p)
841 dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->addr);
842 return p;
843}
844
845static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
846{
847 dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
848 dmab->addr, dmab->dev.dir);
849}
850
851static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
852 struct vm_area_struct *area)
853{
854 area->vm_page_prot = vm_get_page_prot(area->vm_flags);
855 return dma_mmap_pages(dmab->dev.dev, area,
856 area->vm_end - area->vm_start,
857 virt_to_page(dmab->area));
858}
859
860static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
861 enum snd_dma_sync_mode mode)
862{
863 if (mode == SNDRV_DMA_SYNC_CPU) {
864 if (dmab->dev.dir != DMA_TO_DEVICE)
865 dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
866 dmab->bytes, dmab->dev.dir);
867 } else {
868 if (dmab->dev.dir != DMA_FROM_DEVICE)
869 dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
870 dmab->bytes, dmab->dev.dir);
871 }
872}
873
874static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
875 .alloc = snd_dma_noncoherent_alloc,
876 .free = snd_dma_noncoherent_free,
877 .mmap = snd_dma_noncoherent_mmap,
878 .sync = snd_dma_noncoherent_sync,
879};
880
881#endif /* CONFIG_HAS_DMA */
882
883/*
884 * Entry points
885 */
886static const struct snd_malloc_ops *snd_dma_ops[] = {
887 [SNDRV_DMA_TYPE_CONTINUOUS] = &snd_dma_continuous_ops,
888 [SNDRV_DMA_TYPE_VMALLOC] = &snd_dma_vmalloc_ops,
889#ifdef CONFIG_HAS_DMA
890 [SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
891 [SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
892 [SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
893 [SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
894#ifdef CONFIG_SND_DMA_SGBUF
895 [SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
896 [SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
897#endif
898#ifdef CONFIG_GENERIC_ALLOCATOR
899 [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
900#endif /* CONFIG_GENERIC_ALLOCATOR */
901#endif /* CONFIG_HAS_DMA */
902};
903
904static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
905{
906 if (WARN_ON_ONCE(!dmab))
907 return NULL;
908 if (WARN_ON_ONCE(dmab->dev.type <= SNDRV_DMA_TYPE_UNKNOWN ||
909 dmab->dev.type >= ARRAY_SIZE(snd_dma_ops)))
910 return NULL;
911 return snd_dma_ops[dmab->dev.type];
912}
1/*
2 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
3 * Takashi Iwai <tiwai@suse.de>
4 *
5 * Generic memory allocators
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/proc_fs.h>
26#include <linux/init.h>
27#include <linux/pci.h>
28#include <linux/slab.h>
29#include <linux/mm.h>
30#include <linux/seq_file.h>
31#include <asm/uaccess.h>
32#include <linux/dma-mapping.h>
33#include <linux/moduleparam.h>
34#include <linux/mutex.h>
35#include <sound/memalloc.h>
36
37
38MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
39MODULE_DESCRIPTION("Memory allocator for ALSA system.");
40MODULE_LICENSE("GPL");
41
42
43/*
44 */
45
46static DEFINE_MUTEX(list_mutex);
47static LIST_HEAD(mem_list_head);
48
49/* buffer preservation list */
50struct snd_mem_list {
51 struct snd_dma_buffer buffer;
52 unsigned int id;
53 struct list_head list;
54};
55
56/* id for pre-allocated buffers */
57#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
58
59/*
60 *
61 * Generic memory allocators
62 *
63 */
64
65static long snd_allocated_pages; /* holding the number of allocated pages */
66
67static inline void inc_snd_pages(int order)
68{
69 snd_allocated_pages += 1 << order;
70}
71
72static inline void dec_snd_pages(int order)
73{
74 snd_allocated_pages -= 1 << order;
75}
76
77/**
78 * snd_malloc_pages - allocate pages with the given size
79 * @size: the size to allocate in bytes
80 * @gfp_flags: the allocation conditions, GFP_XXX
81 *
82 * Allocates the physically contiguous pages with the given size.
83 *
84 * Returns the pointer of the buffer, or NULL if no enoguh memory.
85 */
86void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
87{
88 int pg;
89 void *res;
90
91 if (WARN_ON(!size))
92 return NULL;
93 if (WARN_ON(!gfp_flags))
94 return NULL;
95 gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
96 pg = get_order(size);
97 if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
98 inc_snd_pages(pg);
99 return res;
100}
101
102/**
103 * snd_free_pages - release the pages
104 * @ptr: the buffer pointer to release
105 * @size: the allocated buffer size
106 *
107 * Releases the buffer allocated via snd_malloc_pages().
108 */
109void snd_free_pages(void *ptr, size_t size)
110{
111 int pg;
112
113 if (ptr == NULL)
114 return;
115 pg = get_order(size);
116 dec_snd_pages(pg);
117 free_pages((unsigned long) ptr, pg);
118}
119
120/*
121 *
122 * Bus-specific memory allocators
123 *
124 */
125
126#ifdef CONFIG_HAS_DMA
127/* allocate the coherent DMA pages */
128static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
129{
130 int pg;
131 void *res;
132 gfp_t gfp_flags;
133
134 if (WARN_ON(!dma))
135 return NULL;
136 pg = get_order(size);
137 gfp_flags = GFP_KERNEL
138 | __GFP_COMP /* compound page lets parts be mapped */
139 | __GFP_NORETRY /* don't trigger OOM-killer */
140 | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
141 res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
142 if (res != NULL)
143 inc_snd_pages(pg);
144
145 return res;
146}
147
148/* free the coherent DMA pages */
149static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
150 dma_addr_t dma)
151{
152 int pg;
153
154 if (ptr == NULL)
155 return;
156 pg = get_order(size);
157 dec_snd_pages(pg);
158 dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
159}
160#endif /* CONFIG_HAS_DMA */
161
162/*
163 *
164 * ALSA generic memory management
165 *
166 */
167
168
169/**
170 * snd_dma_alloc_pages - allocate the buffer area according to the given type
171 * @type: the DMA buffer type
172 * @device: the device pointer
173 * @size: the buffer size to allocate
174 * @dmab: buffer allocation record to store the allocated data
175 *
176 * Calls the memory-allocator function for the corresponding
177 * buffer type.
178 *
179 * Returns zero if the buffer with the given size is allocated successfully,
180 * other a negative value at error.
181 */
182int snd_dma_alloc_pages(int type, struct device *device, size_t size,
183 struct snd_dma_buffer *dmab)
184{
185 if (WARN_ON(!size))
186 return -ENXIO;
187 if (WARN_ON(!dmab))
188 return -ENXIO;
189
190 dmab->dev.type = type;
191 dmab->dev.dev = device;
192 dmab->bytes = 0;
193 switch (type) {
194 case SNDRV_DMA_TYPE_CONTINUOUS:
195 dmab->area = snd_malloc_pages(size,
196 (__force gfp_t)(unsigned long)device);
197 dmab->addr = 0;
198 break;
199#ifdef CONFIG_HAS_DMA
200 case SNDRV_DMA_TYPE_DEV:
201 dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
202 break;
203#endif
204#ifdef CONFIG_SND_DMA_SGBUF
205 case SNDRV_DMA_TYPE_DEV_SG:
206 snd_malloc_sgbuf_pages(device, size, dmab, NULL);
207 break;
208#endif
209 default:
210 printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
211 dmab->area = NULL;
212 dmab->addr = 0;
213 return -ENXIO;
214 }
215 if (! dmab->area)
216 return -ENOMEM;
217 dmab->bytes = size;
218 return 0;
219}
220
221/**
222 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
223 * @type: the DMA buffer type
224 * @device: the device pointer
225 * @size: the buffer size to allocate
226 * @dmab: buffer allocation record to store the allocated data
227 *
228 * Calls the memory-allocator function for the corresponding
229 * buffer type. When no space is left, this function reduces the size and
230 * tries to allocate again. The size actually allocated is stored in
231 * res_size argument.
232 *
233 * Returns zero if the buffer with the given size is allocated successfully,
234 * other a negative value at error.
235 */
236int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
237 struct snd_dma_buffer *dmab)
238{
239 int err;
240
241 while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
242 size_t aligned_size;
243 if (err != -ENOMEM)
244 return err;
245 if (size <= PAGE_SIZE)
246 return -ENOMEM;
247 aligned_size = PAGE_SIZE << get_order(size);
248 if (size != aligned_size)
249 size = aligned_size;
250 else
251 size >>= 1;
252 }
253 if (! dmab->area)
254 return -ENOMEM;
255 return 0;
256}
257
258
259/**
260 * snd_dma_free_pages - release the allocated buffer
261 * @dmab: the buffer allocation record to release
262 *
263 * Releases the allocated buffer via snd_dma_alloc_pages().
264 */
265void snd_dma_free_pages(struct snd_dma_buffer *dmab)
266{
267 switch (dmab->dev.type) {
268 case SNDRV_DMA_TYPE_CONTINUOUS:
269 snd_free_pages(dmab->area, dmab->bytes);
270 break;
271#ifdef CONFIG_HAS_DMA
272 case SNDRV_DMA_TYPE_DEV:
273 snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
274 break;
275#endif
276#ifdef CONFIG_SND_DMA_SGBUF
277 case SNDRV_DMA_TYPE_DEV_SG:
278 snd_free_sgbuf_pages(dmab);
279 break;
280#endif
281 default:
282 printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
283 }
284}
285
286
287/**
288 * snd_dma_get_reserved - get the reserved buffer for the given device
289 * @dmab: the buffer allocation record to store
290 * @id: the buffer id
291 *
292 * Looks for the reserved-buffer list and re-uses if the same buffer
293 * is found in the list. When the buffer is found, it's removed from the free list.
294 *
295 * Returns the size of buffer if the buffer is found, or zero if not found.
296 */
297size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
298{
299 struct snd_mem_list *mem;
300
301 if (WARN_ON(!dmab))
302 return 0;
303
304 mutex_lock(&list_mutex);
305 list_for_each_entry(mem, &mem_list_head, list) {
306 if (mem->id == id &&
307 (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
308 ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
309 struct device *dev = dmab->dev.dev;
310 list_del(&mem->list);
311 *dmab = mem->buffer;
312 if (dmab->dev.dev == NULL)
313 dmab->dev.dev = dev;
314 kfree(mem);
315 mutex_unlock(&list_mutex);
316 return dmab->bytes;
317 }
318 }
319 mutex_unlock(&list_mutex);
320 return 0;
321}
322
323/**
324 * snd_dma_reserve_buf - reserve the buffer
325 * @dmab: the buffer to reserve
326 * @id: the buffer id
327 *
328 * Reserves the given buffer as a reserved buffer.
329 *
330 * Returns zero if successful, or a negative code at error.
331 */
332int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
333{
334 struct snd_mem_list *mem;
335
336 if (WARN_ON(!dmab))
337 return -EINVAL;
338 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
339 if (! mem)
340 return -ENOMEM;
341 mutex_lock(&list_mutex);
342 mem->buffer = *dmab;
343 mem->id = id;
344 list_add_tail(&mem->list, &mem_list_head);
345 mutex_unlock(&list_mutex);
346 return 0;
347}
348
349/*
350 * purge all reserved buffers
351 */
352static void free_all_reserved_pages(void)
353{
354 struct list_head *p;
355 struct snd_mem_list *mem;
356
357 mutex_lock(&list_mutex);
358 while (! list_empty(&mem_list_head)) {
359 p = mem_list_head.next;
360 mem = list_entry(p, struct snd_mem_list, list);
361 list_del(p);
362 snd_dma_free_pages(&mem->buffer);
363 kfree(mem);
364 }
365 mutex_unlock(&list_mutex);
366}
367
368
369#ifdef CONFIG_PROC_FS
370/*
371 * proc file interface
372 */
373#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
374static struct proc_dir_entry *snd_mem_proc;
375
376static int snd_mem_proc_read(struct seq_file *seq, void *offset)
377{
378 long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
379 struct snd_mem_list *mem;
380 int devno;
381 static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
382
383 mutex_lock(&list_mutex);
384 seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
385 pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
386 devno = 0;
387 list_for_each_entry(mem, &mem_list_head, list) {
388 devno++;
389 seq_printf(seq, "buffer %d : ID %08x : type %s\n",
390 devno, mem->id, types[mem->buffer.dev.type]);
391 seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
392 (unsigned long)mem->buffer.addr,
393 (int)mem->buffer.bytes);
394 }
395 mutex_unlock(&list_mutex);
396 return 0;
397}
398
399static int snd_mem_proc_open(struct inode *inode, struct file *file)
400{
401 return single_open(file, snd_mem_proc_read, NULL);
402}
403
404/* FIXME: for pci only - other bus? */
405#ifdef CONFIG_PCI
406#define gettoken(bufp) strsep(bufp, " \t\n")
407
408static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
409 size_t count, loff_t * ppos)
410{
411 char buf[128];
412 char *token, *p;
413
414 if (count > sizeof(buf) - 1)
415 return -EINVAL;
416 if (copy_from_user(buf, buffer, count))
417 return -EFAULT;
418 buf[count] = '\0';
419
420 p = buf;
421 token = gettoken(&p);
422 if (! token || *token == '#')
423 return count;
424 if (strcmp(token, "add") == 0) {
425 char *endp;
426 int vendor, device, size, buffers;
427 long mask;
428 int i, alloced;
429 struct pci_dev *pci;
430
431 if ((token = gettoken(&p)) == NULL ||
432 (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
433 (token = gettoken(&p)) == NULL ||
434 (device = simple_strtol(token, NULL, 0)) <= 0 ||
435 (token = gettoken(&p)) == NULL ||
436 (mask = simple_strtol(token, NULL, 0)) < 0 ||
437 (token = gettoken(&p)) == NULL ||
438 (size = memparse(token, &endp)) < 64*1024 ||
439 size > 16*1024*1024 /* too big */ ||
440 (token = gettoken(&p)) == NULL ||
441 (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
442 buffers > 4) {
443 printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
444 return count;
445 }
446 vendor &= 0xffff;
447 device &= 0xffff;
448
449 alloced = 0;
450 pci = NULL;
451 while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
452 if (mask > 0 && mask < 0xffffffff) {
453 if (pci_set_dma_mask(pci, mask) < 0 ||
454 pci_set_consistent_dma_mask(pci, mask) < 0) {
455 printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
456 pci_dev_put(pci);
457 return count;
458 }
459 }
460 for (i = 0; i < buffers; i++) {
461 struct snd_dma_buffer dmab;
462 memset(&dmab, 0, sizeof(dmab));
463 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
464 size, &dmab) < 0) {
465 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
466 pci_dev_put(pci);
467 return count;
468 }
469 snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
470 }
471 alloced++;
472 }
473 if (! alloced) {
474 for (i = 0; i < buffers; i++) {
475 struct snd_dma_buffer dmab;
476 memset(&dmab, 0, sizeof(dmab));
477 /* FIXME: We can allocate only in ZONE_DMA
478 * without a device pointer!
479 */
480 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
481 size, &dmab) < 0) {
482 printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
483 break;
484 }
485 snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
486 }
487 }
488 } else if (strcmp(token, "erase") == 0)
489 /* FIXME: need for releasing each buffer chunk? */
490 free_all_reserved_pages();
491 else
492 printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
493 return count;
494}
495#endif /* CONFIG_PCI */
496
497static const struct file_operations snd_mem_proc_fops = {
498 .owner = THIS_MODULE,
499 .open = snd_mem_proc_open,
500 .read = seq_read,
501#ifdef CONFIG_PCI
502 .write = snd_mem_proc_write,
503#endif
504 .llseek = seq_lseek,
505 .release = single_release,
506};
507
508#endif /* CONFIG_PROC_FS */
509
510/*
511 * module entry
512 */
513
514static int __init snd_mem_init(void)
515{
516#ifdef CONFIG_PROC_FS
517 snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
518 &snd_mem_proc_fops);
519#endif
520 return 0;
521}
522
523static void __exit snd_mem_exit(void)
524{
525 remove_proc_entry(SND_MEM_PROC_FILE, NULL);
526 free_all_reserved_pages();
527 if (snd_allocated_pages > 0)
528 printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
529}
530
531
532module_init(snd_mem_init)
533module_exit(snd_mem_exit)
534
535
536/*
537 * exports
538 */
539EXPORT_SYMBOL(snd_dma_alloc_pages);
540EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
541EXPORT_SYMBOL(snd_dma_free_pages);
542
543EXPORT_SYMBOL(snd_dma_get_reserved_buf);
544EXPORT_SYMBOL(snd_dma_reserve_buf);
545
546EXPORT_SYMBOL(snd_malloc_pages);
547EXPORT_SYMBOL(snd_free_pages);