Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *   ALSA sequencer FIFO
  4 *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  5 */
  6
  7#include <sound/core.h>
  8#include <linux/slab.h>
  9#include <linux/sched/signal.h>
 10
 11#include "seq_fifo.h"
 12#include "seq_lock.h"
 13
 14
 15/* FIFO */
 16
 17/* create new fifo */
 18struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
 19{
 20	struct snd_seq_fifo *f;
 21
 22	f = kzalloc(sizeof(*f), GFP_KERNEL);
 23	if (!f)
 24		return NULL;
 25
 26	f->pool = snd_seq_pool_new(poolsize);
 27	if (f->pool == NULL) {
 28		kfree(f);
 29		return NULL;
 30	}
 31	if (snd_seq_pool_init(f->pool) < 0) {
 32		snd_seq_pool_delete(&f->pool);
 33		kfree(f);
 34		return NULL;
 35	}
 36
 37	spin_lock_init(&f->lock);
 38	snd_use_lock_init(&f->use_lock);
 39	init_waitqueue_head(&f->input_sleep);
 40	atomic_set(&f->overflow, 0);
 41
 42	f->head = NULL;
 43	f->tail = NULL;
 44	f->cells = 0;
 45	
 46	return f;
 47}
 48
 49void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
 50{
 51	struct snd_seq_fifo *f;
 52
 53	if (snd_BUG_ON(!fifo))
 54		return;
 55	f = *fifo;
 56	if (snd_BUG_ON(!f))
 57		return;
 58	*fifo = NULL;
 59
 60	if (f->pool)
 61		snd_seq_pool_mark_closing(f->pool);
 62
 63	snd_seq_fifo_clear(f);
 64
 65	/* wake up clients if any */
 66	if (waitqueue_active(&f->input_sleep))
 67		wake_up(&f->input_sleep);
 68
 69	/* release resources...*/
 70	/*....................*/
 71
 72	if (f->pool) {
 73		snd_seq_pool_done(f->pool);
 74		snd_seq_pool_delete(&f->pool);
 75	}
 76	
 77	kfree(f);
 78}
 79
 80static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
 81
 82/* clear queue */
 83void snd_seq_fifo_clear(struct snd_seq_fifo *f)
 84{
 85	struct snd_seq_event_cell *cell;
 86
 87	/* clear overflow flag */
 88	atomic_set(&f->overflow, 0);
 89
 90	snd_use_lock_sync(&f->use_lock);
 91	spin_lock_irq(&f->lock);
 92	/* drain the fifo */
 93	while ((cell = fifo_cell_out(f)) != NULL) {
 94		snd_seq_cell_free(cell);
 95	}
 96	spin_unlock_irq(&f->lock);
 97}
 98
 99
100/* enqueue event to fifo */
101int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
102			  struct snd_seq_event *event)
103{
104	struct snd_seq_event_cell *cell;
105	unsigned long flags;
106	int err;
107
108	if (snd_BUG_ON(!f))
109		return -EINVAL;
110
111	snd_use_lock_use(&f->use_lock);
112	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
113	if (err < 0) {
114		if ((err == -ENOMEM) || (err == -EAGAIN))
115			atomic_inc(&f->overflow);
116		snd_use_lock_free(&f->use_lock);
117		return err;
118	}
119		
120	/* append new cells to fifo */
121	spin_lock_irqsave(&f->lock, flags);
122	if (f->tail != NULL)
123		f->tail->next = cell;
124	f->tail = cell;
125	if (f->head == NULL)
126		f->head = cell;
127	cell->next = NULL;
128	f->cells++;
129	spin_unlock_irqrestore(&f->lock, flags);
130
131	/* wakeup client */
132	if (waitqueue_active(&f->input_sleep))
133		wake_up(&f->input_sleep);
134
135	snd_use_lock_free(&f->use_lock);
136
137	return 0; /* success */
138
139}
140
141/* dequeue cell from fifo */
142static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
143{
144	struct snd_seq_event_cell *cell;
145
146	if ((cell = f->head) != NULL) {
 
147		f->head = cell->next;
148
149		/* reset tail if this was the last element */
150		if (f->tail == cell)
151			f->tail = NULL;
152
153		cell->next = NULL;
154		f->cells--;
155	}
156
157	return cell;
158}
159
160/* dequeue cell from fifo and copy on user space */
161int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
162			  struct snd_seq_event_cell **cellp, int nonblock)
163{
164	struct snd_seq_event_cell *cell;
165	unsigned long flags;
166	wait_queue_entry_t wait;
167
168	if (snd_BUG_ON(!f))
169		return -EINVAL;
170
171	*cellp = NULL;
172	init_waitqueue_entry(&wait, current);
173	spin_lock_irqsave(&f->lock, flags);
174	while ((cell = fifo_cell_out(f)) == NULL) {
175		if (nonblock) {
176			/* non-blocking - return immediately */
177			spin_unlock_irqrestore(&f->lock, flags);
178			return -EAGAIN;
179		}
180		set_current_state(TASK_INTERRUPTIBLE);
181		add_wait_queue(&f->input_sleep, &wait);
182		spin_unlock_irqrestore(&f->lock, flags);
183		schedule();
184		spin_lock_irqsave(&f->lock, flags);
185		remove_wait_queue(&f->input_sleep, &wait);
186		if (signal_pending(current)) {
187			spin_unlock_irqrestore(&f->lock, flags);
188			return -ERESTARTSYS;
189		}
190	}
191	spin_unlock_irqrestore(&f->lock, flags);
192	*cellp = cell;
193
194	return 0;
195}
196
197
198void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
199			       struct snd_seq_event_cell *cell)
200{
201	unsigned long flags;
202
203	if (cell) {
204		spin_lock_irqsave(&f->lock, flags);
205		cell->next = f->head;
206		f->head = cell;
207		if (!f->tail)
208			f->tail = cell;
209		f->cells++;
210		spin_unlock_irqrestore(&f->lock, flags);
211	}
212}
213
214
215/* polling; return non-zero if queue is available */
216int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
217			   poll_table *wait)
218{
219	poll_wait(file, &f->input_sleep, wait);
220	return (f->cells > 0);
221}
222
223/* change the size of pool; all old events are removed */
224int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
225{
226	struct snd_seq_pool *newpool, *oldpool;
227	struct snd_seq_event_cell *cell, *next, *oldhead;
228
229	if (snd_BUG_ON(!f || !f->pool))
230		return -EINVAL;
231
232	/* allocate new pool */
233	newpool = snd_seq_pool_new(poolsize);
234	if (newpool == NULL)
235		return -ENOMEM;
236	if (snd_seq_pool_init(newpool) < 0) {
237		snd_seq_pool_delete(&newpool);
238		return -ENOMEM;
239	}
240
241	spin_lock_irq(&f->lock);
242	/* remember old pool */
243	oldpool = f->pool;
244	oldhead = f->head;
245	/* exchange pools */
246	f->pool = newpool;
247	f->head = NULL;
248	f->tail = NULL;
249	f->cells = 0;
250	/* NOTE: overflow flag is not cleared */
251	spin_unlock_irq(&f->lock);
252
253	/* close the old pool and wait until all users are gone */
254	snd_seq_pool_mark_closing(oldpool);
255	snd_use_lock_sync(&f->use_lock);
256
257	/* release cells in old pool */
258	for (cell = oldhead; cell; cell = next) {
259		next = cell->next;
260		snd_seq_cell_free(cell);
261	}
262	snd_seq_pool_delete(&oldpool);
263
264	return 0;
265}
266
267/* get the number of unused cells safely */
268int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
269{
270	unsigned long flags;
271	int cells;
272
273	if (!f)
274		return 0;
275
276	snd_use_lock_use(&f->use_lock);
277	spin_lock_irqsave(&f->lock, flags);
278	cells = snd_seq_unused_cells(f->pool);
279	spin_unlock_irqrestore(&f->lock, flags);
280	snd_use_lock_free(&f->use_lock);
281	return cells;
282}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *   ALSA sequencer FIFO
  4 *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
  5 */
  6
  7#include <sound/core.h>
  8#include <linux/slab.h>
  9#include <linux/sched/signal.h>
 10
 11#include "seq_fifo.h"
 12#include "seq_lock.h"
 13
 14
 15/* FIFO */
 16
 17/* create new fifo */
 18struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
 19{
 20	struct snd_seq_fifo *f;
 21
 22	f = kzalloc(sizeof(*f), GFP_KERNEL);
 23	if (!f)
 24		return NULL;
 25
 26	f->pool = snd_seq_pool_new(poolsize);
 27	if (f->pool == NULL) {
 28		kfree(f);
 29		return NULL;
 30	}
 31	if (snd_seq_pool_init(f->pool) < 0) {
 32		snd_seq_pool_delete(&f->pool);
 33		kfree(f);
 34		return NULL;
 35	}
 36
 37	spin_lock_init(&f->lock);
 38	snd_use_lock_init(&f->use_lock);
 39	init_waitqueue_head(&f->input_sleep);
 40	atomic_set(&f->overflow, 0);
 41
 42	f->head = NULL;
 43	f->tail = NULL;
 44	f->cells = 0;
 45	
 46	return f;
 47}
 48
 49void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
 50{
 51	struct snd_seq_fifo *f;
 52
 53	if (snd_BUG_ON(!fifo))
 54		return;
 55	f = *fifo;
 56	if (snd_BUG_ON(!f))
 57		return;
 58	*fifo = NULL;
 59
 60	if (f->pool)
 61		snd_seq_pool_mark_closing(f->pool);
 62
 63	snd_seq_fifo_clear(f);
 64
 65	/* wake up clients if any */
 66	if (waitqueue_active(&f->input_sleep))
 67		wake_up(&f->input_sleep);
 68
 69	/* release resources...*/
 70	/*....................*/
 71
 72	if (f->pool) {
 73		snd_seq_pool_done(f->pool);
 74		snd_seq_pool_delete(&f->pool);
 75	}
 76	
 77	kfree(f);
 78}
 79
 80static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
 81
 82/* clear queue */
 83void snd_seq_fifo_clear(struct snd_seq_fifo *f)
 84{
 85	struct snd_seq_event_cell *cell;
 86
 87	/* clear overflow flag */
 88	atomic_set(&f->overflow, 0);
 89
 90	snd_use_lock_sync(&f->use_lock);
 91	guard(spinlock_irq)(&f->lock);
 92	/* drain the fifo */
 93	while ((cell = fifo_cell_out(f)) != NULL) {
 94		snd_seq_cell_free(cell);
 95	}
 
 96}
 97
 98
 99/* enqueue event to fifo */
100int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
101			  struct snd_seq_event *event)
102{
103	struct snd_seq_event_cell *cell;
 
104	int err;
105
106	if (snd_BUG_ON(!f))
107		return -EINVAL;
108
109	snd_use_lock_use(&f->use_lock);
110	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
111	if (err < 0) {
112		if ((err == -ENOMEM) || (err == -EAGAIN))
113			atomic_inc(&f->overflow);
114		snd_use_lock_free(&f->use_lock);
115		return err;
116	}
117		
118	/* append new cells to fifo */
119	scoped_guard(spinlock_irqsave, &f->lock) {
120		if (f->tail != NULL)
121			f->tail->next = cell;
122		f->tail = cell;
123		if (f->head == NULL)
124			f->head = cell;
125		cell->next = NULL;
126		f->cells++;
127	}
128
129	/* wakeup client */
130	if (waitqueue_active(&f->input_sleep))
131		wake_up(&f->input_sleep);
132
133	snd_use_lock_free(&f->use_lock);
134
135	return 0; /* success */
136
137}
138
139/* dequeue cell from fifo */
140static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
141{
142	struct snd_seq_event_cell *cell;
143
144	cell = f->head;
145	if (cell) {
146		f->head = cell->next;
147
148		/* reset tail if this was the last element */
149		if (f->tail == cell)
150			f->tail = NULL;
151
152		cell->next = NULL;
153		f->cells--;
154	}
155
156	return cell;
157}
158
159/* dequeue cell from fifo and copy on user space */
160int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
161			  struct snd_seq_event_cell **cellp, int nonblock)
162{
163	struct snd_seq_event_cell *cell;
164	unsigned long flags;
165	wait_queue_entry_t wait;
166
167	if (snd_BUG_ON(!f))
168		return -EINVAL;
169
170	*cellp = NULL;
171	init_waitqueue_entry(&wait, current);
172	spin_lock_irqsave(&f->lock, flags);
173	while ((cell = fifo_cell_out(f)) == NULL) {
174		if (nonblock) {
175			/* non-blocking - return immediately */
176			spin_unlock_irqrestore(&f->lock, flags);
177			return -EAGAIN;
178		}
179		set_current_state(TASK_INTERRUPTIBLE);
180		add_wait_queue(&f->input_sleep, &wait);
181		spin_unlock_irqrestore(&f->lock, flags);
182		schedule();
183		spin_lock_irqsave(&f->lock, flags);
184		remove_wait_queue(&f->input_sleep, &wait);
185		if (signal_pending(current)) {
186			spin_unlock_irqrestore(&f->lock, flags);
187			return -ERESTARTSYS;
188		}
189	}
190	spin_unlock_irqrestore(&f->lock, flags);
191	*cellp = cell;
192
193	return 0;
194}
195
196
197void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
198			       struct snd_seq_event_cell *cell)
199{
 
 
200	if (cell) {
201		guard(spinlock_irqsave)(&f->lock);
202		cell->next = f->head;
203		f->head = cell;
204		if (!f->tail)
205			f->tail = cell;
206		f->cells++;
 
207	}
208}
209
210
211/* polling; return non-zero if queue is available */
212int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
213			   poll_table *wait)
214{
215	poll_wait(file, &f->input_sleep, wait);
216	return (f->cells > 0);
217}
218
219/* change the size of pool; all old events are removed */
220int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
221{
222	struct snd_seq_pool *newpool, *oldpool;
223	struct snd_seq_event_cell *cell, *next, *oldhead;
224
225	if (snd_BUG_ON(!f || !f->pool))
226		return -EINVAL;
227
228	/* allocate new pool */
229	newpool = snd_seq_pool_new(poolsize);
230	if (newpool == NULL)
231		return -ENOMEM;
232	if (snd_seq_pool_init(newpool) < 0) {
233		snd_seq_pool_delete(&newpool);
234		return -ENOMEM;
235	}
236
237	scoped_guard(spinlock_irq, &f->lock) {
238		/* remember old pool */
239		oldpool = f->pool;
240		oldhead = f->head;
241		/* exchange pools */
242		f->pool = newpool;
243		f->head = NULL;
244		f->tail = NULL;
245		f->cells = 0;
246		/* NOTE: overflow flag is not cleared */
247	}
248
249	/* close the old pool and wait until all users are gone */
250	snd_seq_pool_mark_closing(oldpool);
251	snd_use_lock_sync(&f->use_lock);
252
253	/* release cells in old pool */
254	for (cell = oldhead; cell; cell = next) {
255		next = cell->next;
256		snd_seq_cell_free(cell);
257	}
258	snd_seq_pool_delete(&oldpool);
259
260	return 0;
261}
262
263/* get the number of unused cells safely */
264int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
265{
 
266	int cells;
267
268	if (!f)
269		return 0;
270
271	snd_use_lock_use(&f->use_lock);
272	scoped_guard(spinlock_irqsave, &f->lock)
273		cells = snd_seq_unused_cells(f->pool);
 
274	snd_use_lock_free(&f->use_lock);
275	return cells;
276}