Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  ALSA sequencer Memory Manager
  4 *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  5 *                        Jaroslav Kysela <perex@perex.cz>
  6 *                2000 by Takashi Iwai <tiwai@suse.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/init.h>
 10#include <linux/export.h>
 11#include <linux/slab.h>
 12#include <linux/sched/signal.h>
 13#include <linux/mm.h>
 14#include <sound/core.h>
 15
 16#include <sound/seq_kernel.h>
 17#include "seq_memory.h"
 18#include "seq_queue.h"
 19#include "seq_info.h"
 20#include "seq_lock.h"
 21
 22static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
 23{
 24	return pool->total_elements - atomic_read(&pool->counter);
 25}
 26
 27static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
 28{
 29	return snd_seq_pool_available(pool) >= pool->room;
 30}
 31
 32/*
 33 * Variable length event:
 34 * The event like sysex uses variable length type.
 35 * The external data may be stored in three different formats.
 36 * 1) kernel space
 37 *    This is the normal case.
 38 *      ext.data.len = length
 39 *      ext.data.ptr = buffer pointer
 40 * 2) user space
 41 *    When an event is generated via read(), the external data is
 42 *    kept in user space until expanded.
 43 *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
 44 *      ext.data.ptr = userspace pointer
 45 * 3) chained cells
 46 *    When the variable length event is enqueued (in prioq or fifo),
 47 *    the external data is decomposed to several cells.
 48 *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
 49 *      ext.data.ptr = the additiona cell head
 50 *         -> cell.next -> cell.next -> ..
 51 */
 52
 53/*
 54 * exported:
 55 * call dump function to expand external data.
 56 */
 57
 58static int get_var_len(const struct snd_seq_event *event)
 59{
 60	if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
 61		return -EINVAL;
 62
 63	return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
 64}
 65
 66int snd_seq_dump_var_event(const struct snd_seq_event *event,
 67			   snd_seq_dump_func_t func, void *private_data)
 68{
 69	int len, err;
 70	struct snd_seq_event_cell *cell;
 71
 72	if ((len = get_var_len(event)) <= 0)
 73		return len;
 74
 75	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
 76		char buf[32];
 77		char __user *curptr = (char __force __user *)event->data.ext.ptr;
 78		while (len > 0) {
 79			int size = sizeof(buf);
 80			if (len < size)
 81				size = len;
 82			if (copy_from_user(buf, curptr, size))
 83				return -EFAULT;
 84			err = func(private_data, buf, size);
 85			if (err < 0)
 86				return err;
 87			curptr += size;
 88			len -= size;
 89		}
 90		return 0;
 91	}
 92	if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
 93		return func(private_data, event->data.ext.ptr, len);
 
 94
 95	cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
 96	for (; len > 0 && cell; cell = cell->next) {
 97		int size = sizeof(struct snd_seq_event);
 98		if (len < size)
 99			size = len;
100		err = func(private_data, &cell->event, size);
101		if (err < 0)
102			return err;
103		len -= size;
104	}
105	return 0;
106}
 
107EXPORT_SYMBOL(snd_seq_dump_var_event);
108
109
110/*
111 * exported:
112 * expand the variable length event to linear buffer space.
113 */
114
115static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
116{
117	memcpy(*bufptr, src, size);
118	*bufptr += size;
119	return 0;
120}
121
122static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
123{
124	if (copy_to_user(*bufptr, src, size))
125		return -EFAULT;
126	*bufptr += size;
127	return 0;
128}
129
130int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
131			     int in_kernel, int size_aligned)
132{
133	int len, newlen;
134	int err;
135
136	if ((len = get_var_len(event)) < 0)
137		return len;
138	newlen = len;
139	if (size_aligned > 0)
140		newlen = roundup(len, size_aligned);
141	if (count < newlen)
142		return -EAGAIN;
143
144	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
145		if (! in_kernel)
146			return -EINVAL;
147		if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
148			return -EFAULT;
149		return newlen;
150	}
151	err = snd_seq_dump_var_event(event,
152				     in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
153				     (snd_seq_dump_func_t)seq_copy_in_user,
154				     &buf);
155	return err < 0 ? err : newlen;
156}
 
157EXPORT_SYMBOL(snd_seq_expand_var_event);
158
159/*
160 * release this cell, free extended data if available
161 */
162
163static inline void free_cell(struct snd_seq_pool *pool,
164			     struct snd_seq_event_cell *cell)
165{
166	cell->next = pool->free;
167	pool->free = cell;
168	atomic_dec(&pool->counter);
169}
170
171void snd_seq_cell_free(struct snd_seq_event_cell * cell)
172{
173	unsigned long flags;
174	struct snd_seq_pool *pool;
175
176	if (snd_BUG_ON(!cell))
177		return;
178	pool = cell->pool;
179	if (snd_BUG_ON(!pool))
180		return;
181
182	spin_lock_irqsave(&pool->lock, flags);
183	free_cell(pool, cell);
184	if (snd_seq_ev_is_variable(&cell->event)) {
185		if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
186			struct snd_seq_event_cell *curp, *nextptr;
187			curp = cell->event.data.ext.ptr;
188			for (; curp; curp = nextptr) {
189				nextptr = curp->next;
190				curp->next = pool->free;
191				free_cell(pool, curp);
192			}
193		}
194	}
195	if (waitqueue_active(&pool->output_sleep)) {
196		/* has enough space now? */
197		if (snd_seq_output_ok(pool))
198			wake_up(&pool->output_sleep);
199	}
200	spin_unlock_irqrestore(&pool->lock, flags);
201}
202
203
204/*
205 * allocate an event cell.
206 */
207static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
208			      struct snd_seq_event_cell **cellp,
209			      int nonblock, struct file *file,
210			      struct mutex *mutexp)
211{
212	struct snd_seq_event_cell *cell;
213	unsigned long flags;
214	int err = -EAGAIN;
215	wait_queue_entry_t wait;
216
217	if (pool == NULL)
218		return -EINVAL;
219
220	*cellp = NULL;
221
222	init_waitqueue_entry(&wait, current);
223	spin_lock_irqsave(&pool->lock, flags);
224	if (pool->ptr == NULL) {	/* not initialized */
225		pr_debug("ALSA: seq: pool is not initialized\n");
226		err = -EINVAL;
227		goto __error;
228	}
229	while (pool->free == NULL && ! nonblock && ! pool->closing) {
230
231		set_current_state(TASK_INTERRUPTIBLE);
232		add_wait_queue(&pool->output_sleep, &wait);
233		spin_unlock_irqrestore(&pool->lock, flags);
234		if (mutexp)
235			mutex_unlock(mutexp);
236		schedule();
237		if (mutexp)
238			mutex_lock(mutexp);
239		spin_lock_irqsave(&pool->lock, flags);
240		remove_wait_queue(&pool->output_sleep, &wait);
241		/* interrupted? */
242		if (signal_pending(current)) {
243			err = -ERESTARTSYS;
244			goto __error;
245		}
246	}
247	if (pool->closing) { /* closing.. */
248		err = -ENOMEM;
249		goto __error;
250	}
251
252	cell = pool->free;
253	if (cell) {
254		int used;
255		pool->free = cell->next;
256		atomic_inc(&pool->counter);
257		used = atomic_read(&pool->counter);
258		if (pool->max_used < used)
259			pool->max_used = used;
260		pool->event_alloc_success++;
261		/* clear cell pointers */
262		cell->next = NULL;
263		err = 0;
264	} else
265		pool->event_alloc_failures++;
266	*cellp = cell;
267
268__error:
269	spin_unlock_irqrestore(&pool->lock, flags);
270	return err;
271}
272
273
274/*
275 * duplicate the event to a cell.
276 * if the event has external data, the data is decomposed to additional
277 * cells.
278 */
279int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
280		      struct snd_seq_event_cell **cellp, int nonblock,
281		      struct file *file, struct mutex *mutexp)
282{
283	int ncells, err;
284	unsigned int extlen;
285	struct snd_seq_event_cell *cell;
286
287	*cellp = NULL;
288
289	ncells = 0;
290	extlen = 0;
291	if (snd_seq_ev_is_variable(event)) {
292		extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
293		ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
294	}
295	if (ncells >= pool->total_elements)
296		return -ENOMEM;
297
298	err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
299	if (err < 0)
300		return err;
301
302	/* copy the event */
303	cell->event = *event;
304
305	/* decompose */
306	if (snd_seq_ev_is_variable(event)) {
307		int len = extlen;
308		int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
309		int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
310		struct snd_seq_event_cell *src, *tmp, *tail;
311		char *buf;
312
313		cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
314		cell->event.data.ext.ptr = NULL;
315
316		src = (struct snd_seq_event_cell *)event->data.ext.ptr;
317		buf = (char *)event->data.ext.ptr;
318		tail = NULL;
319
320		while (ncells-- > 0) {
321			int size = sizeof(struct snd_seq_event);
322			if (len < size)
323				size = len;
324			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
325						 mutexp);
326			if (err < 0)
327				goto __error;
328			if (cell->event.data.ext.ptr == NULL)
329				cell->event.data.ext.ptr = tmp;
330			if (tail)
331				tail->next = tmp;
332			tail = tmp;
333			/* copy chunk */
334			if (is_chained && src) {
335				tmp->event = src->event;
336				src = src->next;
337			} else if (is_usrptr) {
338				if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
339					err = -EFAULT;
340					goto __error;
341				}
342			} else {
343				memcpy(&tmp->event, buf, size);
344			}
345			buf += size;
346			len -= size;
347		}
348	}
349
350	*cellp = cell;
351	return 0;
352
353__error:
354	snd_seq_cell_free(cell);
355	return err;
356}
357  
358
359/* poll wait */
360int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
361			   poll_table *wait)
362{
363	poll_wait(file, &pool->output_sleep, wait);
364	return snd_seq_output_ok(pool);
365}
366
367
368/* allocate room specified number of events */
369int snd_seq_pool_init(struct snd_seq_pool *pool)
370{
371	int cell;
372	struct snd_seq_event_cell *cellptr;
 
373
374	if (snd_BUG_ON(!pool))
375		return -EINVAL;
 
 
376
377	cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
378				 GFP_KERNEL);
379	if (!cellptr)
380		return -ENOMEM;
381
382	/* add new cells to the free cell list */
383	spin_lock_irq(&pool->lock);
384	if (pool->ptr) {
385		spin_unlock_irq(&pool->lock);
386		kvfree(cellptr);
387		return 0;
388	}
389
390	pool->ptr = cellptr;
 
391	pool->free = NULL;
392
393	for (cell = 0; cell < pool->size; cell++) {
394		cellptr = pool->ptr + cell;
395		cellptr->pool = pool;
396		cellptr->next = pool->free;
397		pool->free = cellptr;
398	}
399	pool->room = (pool->size + 1) / 2;
400
401	/* init statistics */
402	pool->max_used = 0;
403	pool->total_elements = pool->size;
404	spin_unlock_irq(&pool->lock);
405	return 0;
406}
407
408/* refuse the further insertion to the pool */
409void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
410{
411	unsigned long flags;
412
413	if (snd_BUG_ON(!pool))
414		return;
415	spin_lock_irqsave(&pool->lock, flags);
416	pool->closing = 1;
417	spin_unlock_irqrestore(&pool->lock, flags);
 
418}
419
420/* remove events */
421int snd_seq_pool_done(struct snd_seq_pool *pool)
422{
 
423	struct snd_seq_event_cell *ptr;
 
424
425	if (snd_BUG_ON(!pool))
426		return -EINVAL;
427
428	/* wait for closing all threads */
 
 
 
 
429	if (waitqueue_active(&pool->output_sleep))
430		wake_up(&pool->output_sleep);
431
432	while (atomic_read(&pool->counter) > 0)
 
 
 
 
433		schedule_timeout_uninterruptible(1);
 
 
434	
435	/* release all resources */
436	spin_lock_irq(&pool->lock);
437	ptr = pool->ptr;
438	pool->ptr = NULL;
439	pool->free = NULL;
440	pool->total_elements = 0;
441	spin_unlock_irq(&pool->lock);
442
443	kvfree(ptr);
444
445	spin_lock_irq(&pool->lock);
446	pool->closing = 0;
447	spin_unlock_irq(&pool->lock);
448
449	return 0;
450}
451
452
453/* init new memory pool */
454struct snd_seq_pool *snd_seq_pool_new(int poolsize)
455{
456	struct snd_seq_pool *pool;
457
458	/* create pool block */
459	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
460	if (!pool)
 
461		return NULL;
 
462	spin_lock_init(&pool->lock);
463	pool->ptr = NULL;
464	pool->free = NULL;
465	pool->total_elements = 0;
466	atomic_set(&pool->counter, 0);
467	pool->closing = 0;
468	init_waitqueue_head(&pool->output_sleep);
469	
470	pool->size = poolsize;
471
472	/* init statistics */
473	pool->max_used = 0;
474	return pool;
475}
476
477/* remove memory pool */
478int snd_seq_pool_delete(struct snd_seq_pool **ppool)
479{
480	struct snd_seq_pool *pool = *ppool;
481
482	*ppool = NULL;
483	if (pool == NULL)
484		return 0;
485	snd_seq_pool_mark_closing(pool);
486	snd_seq_pool_done(pool);
487	kfree(pool);
488	return 0;
489}
 
 
 
 
 
 
 
 
 
 
 
 
490
491/* exported to seq_clientmgr.c */
492void snd_seq_info_pool(struct snd_info_buffer *buffer,
493		       struct snd_seq_pool *pool, char *space)
494{
495	if (pool == NULL)
496		return;
497	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
498	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
499	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
500	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
501	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
502}
v3.5.6
 
  1/*
  2 *  ALSA sequencer Memory Manager
  3 *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  4 *                        Jaroslav Kysela <perex@perex.cz>
  5 *                2000 by Takashi Iwai <tiwai@suse.de>
  6 *
  7 *   This program is free software; you can redistribute it and/or modify
  8 *   it under the terms of the GNU General Public License as published by
  9 *   the Free Software Foundation; either version 2 of the License, or
 10 *   (at your option) any later version.
 11 *
 12 *   This program is distributed in the hope that it will be useful,
 13 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 *   GNU General Public License for more details.
 16 *
 17 *   You should have received a copy of the GNU General Public License
 18 *   along with this program; if not, write to the Free Software
 19 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 20 *
 21 */
 22
 23#include <linux/init.h>
 24#include <linux/export.h>
 25#include <linux/slab.h>
 26#include <linux/vmalloc.h>
 
 27#include <sound/core.h>
 28
 29#include <sound/seq_kernel.h>
 30#include "seq_memory.h"
 31#include "seq_queue.h"
 32#include "seq_info.h"
 33#include "seq_lock.h"
 34
 35static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
 36{
 37	return pool->total_elements - atomic_read(&pool->counter);
 38}
 39
 40static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
 41{
 42	return snd_seq_pool_available(pool) >= pool->room;
 43}
 44
 45/*
 46 * Variable length event:
 47 * The event like sysex uses variable length type.
 48 * The external data may be stored in three different formats.
 49 * 1) kernel space
 50 *    This is the normal case.
 51 *      ext.data.len = length
 52 *      ext.data.ptr = buffer pointer
 53 * 2) user space
 54 *    When an event is generated via read(), the external data is
 55 *    kept in user space until expanded.
 56 *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
 57 *      ext.data.ptr = userspace pointer
 58 * 3) chained cells
 59 *    When the variable length event is enqueued (in prioq or fifo),
 60 *    the external data is decomposed to several cells.
 61 *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
 62 *      ext.data.ptr = the additiona cell head
 63 *         -> cell.next -> cell.next -> ..
 64 */
 65
 66/*
 67 * exported:
 68 * call dump function to expand external data.
 69 */
 70
 71static int get_var_len(const struct snd_seq_event *event)
 72{
 73	if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
 74		return -EINVAL;
 75
 76	return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
 77}
 78
 79int snd_seq_dump_var_event(const struct snd_seq_event *event,
 80			   snd_seq_dump_func_t func, void *private_data)
 81{
 82	int len, err;
 83	struct snd_seq_event_cell *cell;
 84
 85	if ((len = get_var_len(event)) <= 0)
 86		return len;
 87
 88	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
 89		char buf[32];
 90		char __user *curptr = (char __force __user *)event->data.ext.ptr;
 91		while (len > 0) {
 92			int size = sizeof(buf);
 93			if (len < size)
 94				size = len;
 95			if (copy_from_user(buf, curptr, size))
 96				return -EFAULT;
 97			err = func(private_data, buf, size);
 98			if (err < 0)
 99				return err;
100			curptr += size;
101			len -= size;
102		}
103		return 0;
104	} if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) {
 
105		return func(private_data, event->data.ext.ptr, len);
106	}
107
108	cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
109	for (; len > 0 && cell; cell = cell->next) {
110		int size = sizeof(struct snd_seq_event);
111		if (len < size)
112			size = len;
113		err = func(private_data, &cell->event, size);
114		if (err < 0)
115			return err;
116		len -= size;
117	}
118	return 0;
119}
120
121EXPORT_SYMBOL(snd_seq_dump_var_event);
122
123
124/*
125 * exported:
126 * expand the variable length event to linear buffer space.
127 */
128
129static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
130{
131	memcpy(*bufptr, src, size);
132	*bufptr += size;
133	return 0;
134}
135
136static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
137{
138	if (copy_to_user(*bufptr, src, size))
139		return -EFAULT;
140	*bufptr += size;
141	return 0;
142}
143
144int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
145			     int in_kernel, int size_aligned)
146{
147	int len, newlen;
148	int err;
149
150	if ((len = get_var_len(event)) < 0)
151		return len;
152	newlen = len;
153	if (size_aligned > 0)
154		newlen = roundup(len, size_aligned);
155	if (count < newlen)
156		return -EAGAIN;
157
158	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
159		if (! in_kernel)
160			return -EINVAL;
161		if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
162			return -EFAULT;
163		return newlen;
164	}
165	err = snd_seq_dump_var_event(event,
166				     in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
167				     (snd_seq_dump_func_t)seq_copy_in_user,
168				     &buf);
169	return err < 0 ? err : newlen;
170}
171
172EXPORT_SYMBOL(snd_seq_expand_var_event);
173
174/*
175 * release this cell, free extended data if available
176 */
177
178static inline void free_cell(struct snd_seq_pool *pool,
179			     struct snd_seq_event_cell *cell)
180{
181	cell->next = pool->free;
182	pool->free = cell;
183	atomic_dec(&pool->counter);
184}
185
186void snd_seq_cell_free(struct snd_seq_event_cell * cell)
187{
188	unsigned long flags;
189	struct snd_seq_pool *pool;
190
191	if (snd_BUG_ON(!cell))
192		return;
193	pool = cell->pool;
194	if (snd_BUG_ON(!pool))
195		return;
196
197	spin_lock_irqsave(&pool->lock, flags);
198	free_cell(pool, cell);
199	if (snd_seq_ev_is_variable(&cell->event)) {
200		if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
201			struct snd_seq_event_cell *curp, *nextptr;
202			curp = cell->event.data.ext.ptr;
203			for (; curp; curp = nextptr) {
204				nextptr = curp->next;
205				curp->next = pool->free;
206				free_cell(pool, curp);
207			}
208		}
209	}
210	if (waitqueue_active(&pool->output_sleep)) {
211		/* has enough space now? */
212		if (snd_seq_output_ok(pool))
213			wake_up(&pool->output_sleep);
214	}
215	spin_unlock_irqrestore(&pool->lock, flags);
216}
217
218
219/*
220 * allocate an event cell.
221 */
222static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
223			      struct snd_seq_event_cell **cellp,
224			      int nonblock, struct file *file)
 
225{
226	struct snd_seq_event_cell *cell;
227	unsigned long flags;
228	int err = -EAGAIN;
229	wait_queue_t wait;
230
231	if (pool == NULL)
232		return -EINVAL;
233
234	*cellp = NULL;
235
236	init_waitqueue_entry(&wait, current);
237	spin_lock_irqsave(&pool->lock, flags);
238	if (pool->ptr == NULL) {	/* not initialized */
239		snd_printd("seq: pool is not initialized\n");
240		err = -EINVAL;
241		goto __error;
242	}
243	while (pool->free == NULL && ! nonblock && ! pool->closing) {
244
245		set_current_state(TASK_INTERRUPTIBLE);
246		add_wait_queue(&pool->output_sleep, &wait);
247		spin_unlock_irq(&pool->lock);
 
 
248		schedule();
249		spin_lock_irq(&pool->lock);
 
 
250		remove_wait_queue(&pool->output_sleep, &wait);
251		/* interrupted? */
252		if (signal_pending(current)) {
253			err = -ERESTARTSYS;
254			goto __error;
255		}
256	}
257	if (pool->closing) { /* closing.. */
258		err = -ENOMEM;
259		goto __error;
260	}
261
262	cell = pool->free;
263	if (cell) {
264		int used;
265		pool->free = cell->next;
266		atomic_inc(&pool->counter);
267		used = atomic_read(&pool->counter);
268		if (pool->max_used < used)
269			pool->max_used = used;
270		pool->event_alloc_success++;
271		/* clear cell pointers */
272		cell->next = NULL;
273		err = 0;
274	} else
275		pool->event_alloc_failures++;
276	*cellp = cell;
277
278__error:
279	spin_unlock_irqrestore(&pool->lock, flags);
280	return err;
281}
282
283
284/*
285 * duplicate the event to a cell.
286 * if the event has external data, the data is decomposed to additional
287 * cells.
288 */
289int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
290		      struct snd_seq_event_cell **cellp, int nonblock,
291		      struct file *file)
292{
293	int ncells, err;
294	unsigned int extlen;
295	struct snd_seq_event_cell *cell;
296
297	*cellp = NULL;
298
299	ncells = 0;
300	extlen = 0;
301	if (snd_seq_ev_is_variable(event)) {
302		extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
303		ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
304	}
305	if (ncells >= pool->total_elements)
306		return -ENOMEM;
307
308	err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
309	if (err < 0)
310		return err;
311
312	/* copy the event */
313	cell->event = *event;
314
315	/* decompose */
316	if (snd_seq_ev_is_variable(event)) {
317		int len = extlen;
318		int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
319		int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
320		struct snd_seq_event_cell *src, *tmp, *tail;
321		char *buf;
322
323		cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
324		cell->event.data.ext.ptr = NULL;
325
326		src = (struct snd_seq_event_cell *)event->data.ext.ptr;
327		buf = (char *)event->data.ext.ptr;
328		tail = NULL;
329
330		while (ncells-- > 0) {
331			int size = sizeof(struct snd_seq_event);
332			if (len < size)
333				size = len;
334			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
 
335			if (err < 0)
336				goto __error;
337			if (cell->event.data.ext.ptr == NULL)
338				cell->event.data.ext.ptr = tmp;
339			if (tail)
340				tail->next = tmp;
341			tail = tmp;
342			/* copy chunk */
343			if (is_chained && src) {
344				tmp->event = src->event;
345				src = src->next;
346			} else if (is_usrptr) {
347				if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
348					err = -EFAULT;
349					goto __error;
350				}
351			} else {
352				memcpy(&tmp->event, buf, size);
353			}
354			buf += size;
355			len -= size;
356		}
357	}
358
359	*cellp = cell;
360	return 0;
361
362__error:
363	snd_seq_cell_free(cell);
364	return err;
365}
366  
367
368/* poll wait */
369int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
370			   poll_table *wait)
371{
372	poll_wait(file, &pool->output_sleep, wait);
373	return snd_seq_output_ok(pool);
374}
375
376
377/* allocate room specified number of events */
378int snd_seq_pool_init(struct snd_seq_pool *pool)
379{
380	int cell;
381	struct snd_seq_event_cell *cellptr;
382	unsigned long flags;
383
384	if (snd_BUG_ON(!pool))
385		return -EINVAL;
386	if (pool->ptr)			/* should be atomic? */
387		return 0;
388
389	pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
390	if (pool->ptr == NULL) {
391		snd_printd("seq: malloc for sequencer events failed\n");
392		return -ENOMEM;
 
 
 
 
 
 
 
393	}
394
395	/* add new cells to the free cell list */
396	spin_lock_irqsave(&pool->lock, flags);
397	pool->free = NULL;
398
399	for (cell = 0; cell < pool->size; cell++) {
400		cellptr = pool->ptr + cell;
401		cellptr->pool = pool;
402		cellptr->next = pool->free;
403		pool->free = cellptr;
404	}
405	pool->room = (pool->size + 1) / 2;
406
407	/* init statistics */
408	pool->max_used = 0;
409	pool->total_elements = pool->size;
 
 
 
 
 
 
 
 
 
 
 
 
 
410	spin_unlock_irqrestore(&pool->lock, flags);
411	return 0;
412}
413
414/* remove events */
415int snd_seq_pool_done(struct snd_seq_pool *pool)
416{
417	unsigned long flags;
418	struct snd_seq_event_cell *ptr;
419	int max_count = 5 * HZ;
420
421	if (snd_BUG_ON(!pool))
422		return -EINVAL;
423
424	/* wait for closing all threads */
425	spin_lock_irqsave(&pool->lock, flags);
426	pool->closing = 1;
427	spin_unlock_irqrestore(&pool->lock, flags);
428
429	if (waitqueue_active(&pool->output_sleep))
430		wake_up(&pool->output_sleep);
431
432	while (atomic_read(&pool->counter) > 0) {
433		if (max_count == 0) {
434			snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
435			break;
436		}
437		schedule_timeout_uninterruptible(1);
438		max_count--;
439	}
440	
441	/* release all resources */
442	spin_lock_irqsave(&pool->lock, flags);
443	ptr = pool->ptr;
444	pool->ptr = NULL;
445	pool->free = NULL;
446	pool->total_elements = 0;
447	spin_unlock_irqrestore(&pool->lock, flags);
448
449	vfree(ptr);
450
451	spin_lock_irqsave(&pool->lock, flags);
452	pool->closing = 0;
453	spin_unlock_irqrestore(&pool->lock, flags);
454
455	return 0;
456}
457
458
459/* init new memory pool */
460struct snd_seq_pool *snd_seq_pool_new(int poolsize)
461{
462	struct snd_seq_pool *pool;
463
464	/* create pool block */
465	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
466	if (pool == NULL) {
467		snd_printd("seq: malloc failed for pool\n");
468		return NULL;
469	}
470	spin_lock_init(&pool->lock);
471	pool->ptr = NULL;
472	pool->free = NULL;
473	pool->total_elements = 0;
474	atomic_set(&pool->counter, 0);
475	pool->closing = 0;
476	init_waitqueue_head(&pool->output_sleep);
477	
478	pool->size = poolsize;
479
480	/* init statistics */
481	pool->max_used = 0;
482	return pool;
483}
484
485/* remove memory pool */
486int snd_seq_pool_delete(struct snd_seq_pool **ppool)
487{
488	struct snd_seq_pool *pool = *ppool;
489
490	*ppool = NULL;
491	if (pool == NULL)
492		return 0;
 
493	snd_seq_pool_done(pool);
494	kfree(pool);
495	return 0;
496}
497
498/* initialize sequencer memory */
499int __init snd_sequencer_memory_init(void)
500{
501	return 0;
502}
503
504/* release sequencer memory */
505void __exit snd_sequencer_memory_done(void)
506{
507}
508
509
510/* exported to seq_clientmgr.c */
511void snd_seq_info_pool(struct snd_info_buffer *buffer,
512		       struct snd_seq_pool *pool, char *space)
513{
514	if (pool == NULL)
515		return;
516	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
517	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
518	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
519	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
520	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
521}