Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Scatter-Gather buffer
  4 *
  5 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/slab.h>
  9#include <linux/mm.h>
 10#include <linux/vmalloc.h>
 11#include <linux/export.h>
 12#include <asm/pgtable.h>
 13#include <sound/memalloc.h>
 14
 15
 16/* table entries are align to 32 */
 17#define SGBUF_TBL_ALIGN		32
 18#define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
 19
 20int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
 21{
 22	struct snd_sg_buf *sgbuf = dmab->private_data;
 23	struct snd_dma_buffer tmpb;
 24	int i;
 25
 26	if (! sgbuf)
 27		return -EINVAL;
 28
 29	vunmap(dmab->area);
 30	dmab->area = NULL;
 31
 32	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
 33	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
 34		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
 35	tmpb.dev.dev = sgbuf->dev;
 36	for (i = 0; i < sgbuf->pages; i++) {
 37		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
 38			continue; /* continuous pages */
 39		tmpb.area = sgbuf->table[i].buf;
 40		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
 41		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
 42		snd_dma_free_pages(&tmpb);
 43	}
 44
 45	kfree(sgbuf->table);
 46	kfree(sgbuf->page_table);
 47	kfree(sgbuf);
 48	dmab->private_data = NULL;
 49	
 50	return 0;
 51}
 52
 53#define MAX_ALLOC_PAGES		32
 54
 55void *snd_malloc_sgbuf_pages(struct device *device,
 56			     size_t size, struct snd_dma_buffer *dmab,
 57			     size_t *res_size)
 58{
 59	struct snd_sg_buf *sgbuf;
 60	unsigned int i, pages, chunk, maxpages;
 61	struct snd_dma_buffer tmpb;
 62	struct snd_sg_page *table;
 63	struct page **pgtable;
 64	int type = SNDRV_DMA_TYPE_DEV;
 65	pgprot_t prot = PAGE_KERNEL;
 66
 67	dmab->area = NULL;
 68	dmab->addr = 0;
 69	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
 70	if (! sgbuf)
 71		return NULL;
 72	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
 73		type = SNDRV_DMA_TYPE_DEV_UC;
 74#ifdef pgprot_noncached
 75		prot = pgprot_noncached(PAGE_KERNEL);
 76#endif
 77	}
 78	sgbuf->dev = device;
 79	pages = snd_sgbuf_aligned_pages(size);
 80	sgbuf->tblsize = sgbuf_align_table(pages);
 81	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
 82	if (!table)
 83		goto _failed;
 84	sgbuf->table = table;
 85	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
 86	if (!pgtable)
 87		goto _failed;
 88	sgbuf->page_table = pgtable;
 89
 90	/* allocate pages */
 91	maxpages = MAX_ALLOC_PAGES;
 92	while (pages > 0) {
 93		chunk = pages;
 94		/* don't be too eager to take a huge chunk */
 95		if (chunk > maxpages)
 96			chunk = maxpages;
 97		chunk <<= PAGE_SHIFT;
 98		if (snd_dma_alloc_pages_fallback(type, device,
 99						 chunk, &tmpb) < 0) {
100			if (!sgbuf->pages)
101				goto _failed;
102			if (!res_size)
103				goto _failed;
104			size = sgbuf->pages * PAGE_SIZE;
105			break;
106		}
107		chunk = tmpb.bytes >> PAGE_SHIFT;
108		for (i = 0; i < chunk; i++) {
109			table->buf = tmpb.area;
110			table->addr = tmpb.addr;
111			if (!i)
112				table->addr |= chunk; /* mark head */
113			table++;
114			*pgtable++ = virt_to_page(tmpb.area);
115			tmpb.area += PAGE_SIZE;
116			tmpb.addr += PAGE_SIZE;
117		}
118		sgbuf->pages += chunk;
119		pages -= chunk;
120		if (chunk < maxpages)
121			maxpages = chunk;
122	}
123
124	sgbuf->size = size;
125	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
126	if (! dmab->area)
127		goto _failed;
128	if (res_size)
129		*res_size = sgbuf->size;
130	return dmab->area;
131
132 _failed:
133	snd_free_sgbuf_pages(dmab); /* free the table */
134	return NULL;
135}
136
137/*
138 * compute the max chunk size with continuous pages on sg-buffer
139 */
140unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
141				      unsigned int ofs, unsigned int size)
142{
143	struct snd_sg_buf *sg = dmab->private_data;
144	unsigned int start, end, pg;
145
146	start = ofs >> PAGE_SHIFT;
147	end = (ofs + size - 1) >> PAGE_SHIFT;
148	/* check page continuity */
149	pg = sg->table[start].addr >> PAGE_SHIFT;
150	for (;;) {
151		start++;
152		if (start > end)
153			break;
154		pg++;
155		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
156			return (start << PAGE_SHIFT) - ofs;
157	}
158	/* ok, all on continuous pages */
159	return size;
160}
161EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
v4.6
 
  1/*
  2 * Scatter-Gather buffer
  3 *
  4 *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
  5 *
  6 *   This program is free software; you can redistribute it and/or modify
  7 *   it under the terms of the GNU General Public License as published by
  8 *   the Free Software Foundation; either version 2 of the License, or
  9 *   (at your option) any later version.
 10 *
 11 *   This program is distributed in the hope that it will be useful,
 12 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 *   GNU General Public License for more details.
 15 *
 16 *   You should have received a copy of the GNU General Public License
 17 *   along with this program; if not, write to the Free Software
 18 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 19 *
 20 */
 21
 22#include <linux/slab.h>
 23#include <linux/mm.h>
 24#include <linux/vmalloc.h>
 25#include <linux/export.h>
 
 26#include <sound/memalloc.h>
 27
 28
 29/* table entries are align to 32 */
 30#define SGBUF_TBL_ALIGN		32
 31#define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
 32
 33int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
 34{
 35	struct snd_sg_buf *sgbuf = dmab->private_data;
 36	struct snd_dma_buffer tmpb;
 37	int i;
 38
 39	if (! sgbuf)
 40		return -EINVAL;
 41
 42	vunmap(dmab->area);
 43	dmab->area = NULL;
 44
 45	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
 
 
 46	tmpb.dev.dev = sgbuf->dev;
 47	for (i = 0; i < sgbuf->pages; i++) {
 48		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
 49			continue; /* continuous pages */
 50		tmpb.area = sgbuf->table[i].buf;
 51		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
 52		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
 53		snd_dma_free_pages(&tmpb);
 54	}
 55
 56	kfree(sgbuf->table);
 57	kfree(sgbuf->page_table);
 58	kfree(sgbuf);
 59	dmab->private_data = NULL;
 60	
 61	return 0;
 62}
 63
 64#define MAX_ALLOC_PAGES		32
 65
 66void *snd_malloc_sgbuf_pages(struct device *device,
 67			     size_t size, struct snd_dma_buffer *dmab,
 68			     size_t *res_size)
 69{
 70	struct snd_sg_buf *sgbuf;
 71	unsigned int i, pages, chunk, maxpages;
 72	struct snd_dma_buffer tmpb;
 73	struct snd_sg_page *table;
 74	struct page **pgtable;
 
 
 75
 76	dmab->area = NULL;
 77	dmab->addr = 0;
 78	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
 79	if (! sgbuf)
 80		return NULL;
 
 
 
 
 
 
 81	sgbuf->dev = device;
 82	pages = snd_sgbuf_aligned_pages(size);
 83	sgbuf->tblsize = sgbuf_align_table(pages);
 84	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
 85	if (!table)
 86		goto _failed;
 87	sgbuf->table = table;
 88	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
 89	if (!pgtable)
 90		goto _failed;
 91	sgbuf->page_table = pgtable;
 92
 93	/* allocate pages */
 94	maxpages = MAX_ALLOC_PAGES;
 95	while (pages > 0) {
 96		chunk = pages;
 97		/* don't be too eager to take a huge chunk */
 98		if (chunk > maxpages)
 99			chunk = maxpages;
100		chunk <<= PAGE_SHIFT;
101		if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
102						 chunk, &tmpb) < 0) {
103			if (!sgbuf->pages)
104				goto _failed;
105			if (!res_size)
106				goto _failed;
107			size = sgbuf->pages * PAGE_SIZE;
108			break;
109		}
110		chunk = tmpb.bytes >> PAGE_SHIFT;
111		for (i = 0; i < chunk; i++) {
112			table->buf = tmpb.area;
113			table->addr = tmpb.addr;
114			if (!i)
115				table->addr |= chunk; /* mark head */
116			table++;
117			*pgtable++ = virt_to_page(tmpb.area);
118			tmpb.area += PAGE_SIZE;
119			tmpb.addr += PAGE_SIZE;
120		}
121		sgbuf->pages += chunk;
122		pages -= chunk;
123		if (chunk < maxpages)
124			maxpages = chunk;
125	}
126
127	sgbuf->size = size;
128	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
129	if (! dmab->area)
130		goto _failed;
131	if (res_size)
132		*res_size = sgbuf->size;
133	return dmab->area;
134
135 _failed:
136	snd_free_sgbuf_pages(dmab); /* free the table */
137	return NULL;
138}
139
140/*
141 * compute the max chunk size with continuous pages on sg-buffer
142 */
143unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
144				      unsigned int ofs, unsigned int size)
145{
146	struct snd_sg_buf *sg = dmab->private_data;
147	unsigned int start, end, pg;
148
149	start = ofs >> PAGE_SHIFT;
150	end = (ofs + size - 1) >> PAGE_SHIFT;
151	/* check page continuity */
152	pg = sg->table[start].addr >> PAGE_SHIFT;
153	for (;;) {
154		start++;
155		if (start > end)
156			break;
157		pg++;
158		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
159			return (start << PAGE_SHIFT) - ofs;
160	}
161	/* ok, all on continuous pages */
162	return size;
163}
164EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);