Loading...
1/*
2 * compress_core.c - compress offload core
3 *
4 * Copyright (C) 2011 Intel Corporation
5 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
6 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
26#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
27
28#include <linux/file.h>
29#include <linux/fs.h>
30#include <linux/list.h>
31#include <linux/math64.h>
32#include <linux/mm.h>
33#include <linux/mutex.h>
34#include <linux/poll.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/types.h>
38#include <linux/uio.h>
39#include <linux/uaccess.h>
40#include <linux/module.h>
41#include <sound/core.h>
42#include <sound/initval.h>
43#include <sound/compress_params.h>
44#include <sound/compress_offload.h>
45#include <sound/compress_driver.h>
46
47/* TODO:
48 * - add substream support for multiple devices in case of
49 * SND_DYNAMIC_MINORS is not used
50 * - Multiple node representation
51 * driver should be able to register multiple nodes
52 */
53
54static DEFINE_MUTEX(device_mutex);
55
56struct snd_compr_file {
57 unsigned long caps;
58 struct snd_compr_stream stream;
59};
60
61/*
62 * a note on stream states used:
63 * we use follwing states in the compressed core
64 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
65 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
66 * calling SNDRV_COMPRESS_SET_PARAMS. running streams will come to this
67 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
68 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
69 * decoding/encoding and rendering/capturing data.
70 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
71 * by calling SNDRV_COMPRESS_DRAIN.
72 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
73 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
74 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
75 */
76static int snd_compr_open(struct inode *inode, struct file *f)
77{
78 struct snd_compr *compr;
79 struct snd_compr_file *data;
80 struct snd_compr_runtime *runtime;
81 enum snd_compr_direction dirn;
82 int maj = imajor(inode);
83 int ret;
84
85 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
86 dirn = SND_COMPRESS_PLAYBACK;
87 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
88 dirn = SND_COMPRESS_CAPTURE;
89 else
90 return -EINVAL;
91
92 if (maj == snd_major)
93 compr = snd_lookup_minor_data(iminor(inode),
94 SNDRV_DEVICE_TYPE_COMPRESS);
95 else
96 return -EBADFD;
97
98 if (compr == NULL) {
99 pr_err("no device data!!!\n");
100 return -ENODEV;
101 }
102
103 if (dirn != compr->direction) {
104 pr_err("this device doesn't support this direction\n");
105 snd_card_unref(compr->card);
106 return -EINVAL;
107 }
108
109 data = kzalloc(sizeof(*data), GFP_KERNEL);
110 if (!data) {
111 snd_card_unref(compr->card);
112 return -ENOMEM;
113 }
114 data->stream.ops = compr->ops;
115 data->stream.direction = dirn;
116 data->stream.private_data = compr->private_data;
117 data->stream.device = compr;
118 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
119 if (!runtime) {
120 kfree(data);
121 snd_card_unref(compr->card);
122 return -ENOMEM;
123 }
124 runtime->state = SNDRV_PCM_STATE_OPEN;
125 init_waitqueue_head(&runtime->sleep);
126 data->stream.runtime = runtime;
127 f->private_data = (void *)data;
128 mutex_lock(&compr->lock);
129 ret = compr->ops->open(&data->stream);
130 mutex_unlock(&compr->lock);
131 if (ret) {
132 kfree(runtime);
133 kfree(data);
134 }
135 snd_card_unref(compr->card);
136 return ret;
137}
138
139static int snd_compr_free(struct inode *inode, struct file *f)
140{
141 struct snd_compr_file *data = f->private_data;
142 struct snd_compr_runtime *runtime = data->stream.runtime;
143
144 switch (runtime->state) {
145 case SNDRV_PCM_STATE_RUNNING:
146 case SNDRV_PCM_STATE_DRAINING:
147 case SNDRV_PCM_STATE_PAUSED:
148 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
149 break;
150 default:
151 break;
152 }
153
154 data->stream.ops->free(&data->stream);
155 kfree(data->stream.runtime->buffer);
156 kfree(data->stream.runtime);
157 kfree(data);
158 return 0;
159}
160
161static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
162 struct snd_compr_tstamp *tstamp)
163{
164 if (!stream->ops->pointer)
165 return -ENOTSUPP;
166 stream->ops->pointer(stream, tstamp);
167 pr_debug("dsp consumed till %d total %d bytes\n",
168 tstamp->byte_offset, tstamp->copied_total);
169 if (stream->direction == SND_COMPRESS_PLAYBACK)
170 stream->runtime->total_bytes_transferred = tstamp->copied_total;
171 else
172 stream->runtime->total_bytes_available = tstamp->copied_total;
173 return 0;
174}
175
176static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
177 struct snd_compr_avail *avail)
178{
179 memset(avail, 0, sizeof(*avail));
180 snd_compr_update_tstamp(stream, &avail->tstamp);
181 /* Still need to return avail even if tstamp can't be filled in */
182
183 if (stream->runtime->total_bytes_available == 0 &&
184 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
185 stream->direction == SND_COMPRESS_PLAYBACK) {
186 pr_debug("detected init and someone forgot to do a write\n");
187 return stream->runtime->buffer_size;
188 }
189 pr_debug("app wrote %lld, DSP consumed %lld\n",
190 stream->runtime->total_bytes_available,
191 stream->runtime->total_bytes_transferred);
192 if (stream->runtime->total_bytes_available ==
193 stream->runtime->total_bytes_transferred) {
194 if (stream->direction == SND_COMPRESS_PLAYBACK) {
195 pr_debug("both pointers are same, returning full avail\n");
196 return stream->runtime->buffer_size;
197 } else {
198 pr_debug("both pointers are same, returning no avail\n");
199 return 0;
200 }
201 }
202
203 avail->avail = stream->runtime->total_bytes_available -
204 stream->runtime->total_bytes_transferred;
205 if (stream->direction == SND_COMPRESS_PLAYBACK)
206 avail->avail = stream->runtime->buffer_size - avail->avail;
207
208 pr_debug("ret avail as %lld\n", avail->avail);
209 return avail->avail;
210}
211
212static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
213{
214 struct snd_compr_avail avail;
215
216 return snd_compr_calc_avail(stream, &avail);
217}
218
219static int
220snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
221{
222 struct snd_compr_avail ioctl_avail;
223 size_t avail;
224
225 avail = snd_compr_calc_avail(stream, &ioctl_avail);
226 ioctl_avail.avail = avail;
227
228 if (copy_to_user((__u64 __user *)arg,
229 &ioctl_avail, sizeof(ioctl_avail)))
230 return -EFAULT;
231 return 0;
232}
233
234static int snd_compr_write_data(struct snd_compr_stream *stream,
235 const char __user *buf, size_t count)
236{
237 void *dstn;
238 size_t copy;
239 struct snd_compr_runtime *runtime = stream->runtime;
240 /* 64-bit Modulus */
241 u64 app_pointer = div64_u64(runtime->total_bytes_available,
242 runtime->buffer_size);
243 app_pointer = runtime->total_bytes_available -
244 (app_pointer * runtime->buffer_size);
245
246 dstn = runtime->buffer + app_pointer;
247 pr_debug("copying %ld at %lld\n",
248 (unsigned long)count, app_pointer);
249 if (count < runtime->buffer_size - app_pointer) {
250 if (copy_from_user(dstn, buf, count))
251 return -EFAULT;
252 } else {
253 copy = runtime->buffer_size - app_pointer;
254 if (copy_from_user(dstn, buf, copy))
255 return -EFAULT;
256 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
257 return -EFAULT;
258 }
259 /* if DSP cares, let it know data has been written */
260 if (stream->ops->ack)
261 stream->ops->ack(stream, count);
262 return count;
263}
264
265static ssize_t snd_compr_write(struct file *f, const char __user *buf,
266 size_t count, loff_t *offset)
267{
268 struct snd_compr_file *data = f->private_data;
269 struct snd_compr_stream *stream;
270 size_t avail;
271 int retval;
272
273 if (snd_BUG_ON(!data))
274 return -EFAULT;
275
276 stream = &data->stream;
277 mutex_lock(&stream->device->lock);
278 /* write is allowed when stream is running or has been steup */
279 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
280 stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
281 mutex_unlock(&stream->device->lock);
282 return -EBADFD;
283 }
284
285 avail = snd_compr_get_avail(stream);
286 pr_debug("avail returned %ld\n", (unsigned long)avail);
287 /* calculate how much we can write to buffer */
288 if (avail > count)
289 avail = count;
290
291 if (stream->ops->copy) {
292 char __user* cbuf = (char __user*)buf;
293 retval = stream->ops->copy(stream, cbuf, avail);
294 } else {
295 retval = snd_compr_write_data(stream, buf, avail);
296 }
297 if (retval > 0)
298 stream->runtime->total_bytes_available += retval;
299
300 /* while initiating the stream, write should be called before START
301 * call, so in setup move state */
302 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
303 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
304 pr_debug("stream prepared, Houston we are good to go\n");
305 }
306
307 mutex_unlock(&stream->device->lock);
308 return retval;
309}
310
311
312static ssize_t snd_compr_read(struct file *f, char __user *buf,
313 size_t count, loff_t *offset)
314{
315 struct snd_compr_file *data = f->private_data;
316 struct snd_compr_stream *stream;
317 size_t avail;
318 int retval;
319
320 if (snd_BUG_ON(!data))
321 return -EFAULT;
322
323 stream = &data->stream;
324 mutex_lock(&stream->device->lock);
325
326 /* read is allowed when stream is running, paused, draining and setup
327 * (yes setup is state which we transition to after stop, so if user
328 * wants to read data after stop we allow that)
329 */
330 switch (stream->runtime->state) {
331 case SNDRV_PCM_STATE_OPEN:
332 case SNDRV_PCM_STATE_PREPARED:
333 case SNDRV_PCM_STATE_XRUN:
334 case SNDRV_PCM_STATE_SUSPENDED:
335 case SNDRV_PCM_STATE_DISCONNECTED:
336 retval = -EBADFD;
337 goto out;
338 }
339
340 avail = snd_compr_get_avail(stream);
341 pr_debug("avail returned %ld\n", (unsigned long)avail);
342 /* calculate how much we can read from buffer */
343 if (avail > count)
344 avail = count;
345
346 if (stream->ops->copy) {
347 retval = stream->ops->copy(stream, buf, avail);
348 } else {
349 retval = -ENXIO;
350 goto out;
351 }
352 if (retval > 0)
353 stream->runtime->total_bytes_transferred += retval;
354
355out:
356 mutex_unlock(&stream->device->lock);
357 return retval;
358}
359
360static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
361{
362 return -ENXIO;
363}
364
365static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
366{
367 if (stream->direction == SND_COMPRESS_PLAYBACK)
368 return POLLOUT | POLLWRNORM;
369 else
370 return POLLIN | POLLRDNORM;
371}
372
373static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
374{
375 struct snd_compr_file *data = f->private_data;
376 struct snd_compr_stream *stream;
377 size_t avail;
378 int retval = 0;
379
380 if (snd_BUG_ON(!data))
381 return -EFAULT;
382 stream = &data->stream;
383 if (snd_BUG_ON(!stream))
384 return -EFAULT;
385
386 mutex_lock(&stream->device->lock);
387 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
388 retval = -EBADFD;
389 goto out;
390 }
391 poll_wait(f, &stream->runtime->sleep, wait);
392
393 avail = snd_compr_get_avail(stream);
394 pr_debug("avail is %ld\n", (unsigned long)avail);
395 /* check if we have at least one fragment to fill */
396 switch (stream->runtime->state) {
397 case SNDRV_PCM_STATE_DRAINING:
398 /* stream has been woken up after drain is complete
399 * draining done so set stream state to stopped
400 */
401 retval = snd_compr_get_poll(stream);
402 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
403 break;
404 case SNDRV_PCM_STATE_RUNNING:
405 case SNDRV_PCM_STATE_PREPARED:
406 case SNDRV_PCM_STATE_PAUSED:
407 if (avail >= stream->runtime->fragment_size)
408 retval = snd_compr_get_poll(stream);
409 break;
410 default:
411 if (stream->direction == SND_COMPRESS_PLAYBACK)
412 retval = POLLOUT | POLLWRNORM | POLLERR;
413 else
414 retval = POLLIN | POLLRDNORM | POLLERR;
415 break;
416 }
417out:
418 mutex_unlock(&stream->device->lock);
419 return retval;
420}
421
422static int
423snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
424{
425 int retval;
426 struct snd_compr_caps caps;
427
428 if (!stream->ops->get_caps)
429 return -ENXIO;
430
431 memset(&caps, 0, sizeof(caps));
432 retval = stream->ops->get_caps(stream, &caps);
433 if (retval)
434 goto out;
435 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
436 retval = -EFAULT;
437out:
438 return retval;
439}
440
441static int
442snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
443{
444 int retval;
445 struct snd_compr_codec_caps *caps;
446
447 if (!stream->ops->get_codec_caps)
448 return -ENXIO;
449
450 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
451 if (!caps)
452 return -ENOMEM;
453
454 retval = stream->ops->get_codec_caps(stream, caps);
455 if (retval)
456 goto out;
457 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
458 retval = -EFAULT;
459
460out:
461 kfree(caps);
462 return retval;
463}
464
465/* revisit this with snd_pcm_preallocate_xxx */
466static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
467 struct snd_compr_params *params)
468{
469 unsigned int buffer_size;
470 void *buffer;
471
472 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
473 if (stream->ops->copy) {
474 buffer = NULL;
475 /* if copy is defined the driver will be required to copy
476 * the data from core
477 */
478 } else {
479 buffer = kmalloc(buffer_size, GFP_KERNEL);
480 if (!buffer)
481 return -ENOMEM;
482 }
483 stream->runtime->fragment_size = params->buffer.fragment_size;
484 stream->runtime->fragments = params->buffer.fragments;
485 stream->runtime->buffer = buffer;
486 stream->runtime->buffer_size = buffer_size;
487 return 0;
488}
489
490static int snd_compress_check_input(struct snd_compr_params *params)
491{
492 /* first let's check the buffer parameter's */
493 if (params->buffer.fragment_size == 0 ||
494 params->buffer.fragments > SIZE_MAX / params->buffer.fragment_size)
495 return -EINVAL;
496
497 /* now codec parameters */
498 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
499 return -EINVAL;
500
501 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
502 return -EINVAL;
503
504 return 0;
505}
506
507static int
508snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
509{
510 struct snd_compr_params *params;
511 int retval;
512
513 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
514 /*
515 * we should allow parameter change only when stream has been
516 * opened not in other cases
517 */
518 params = kmalloc(sizeof(*params), GFP_KERNEL);
519 if (!params)
520 return -ENOMEM;
521 if (copy_from_user(params, (void __user *)arg, sizeof(*params))) {
522 retval = -EFAULT;
523 goto out;
524 }
525
526 retval = snd_compress_check_input(params);
527 if (retval)
528 goto out;
529
530 retval = snd_compr_allocate_buffer(stream, params);
531 if (retval) {
532 retval = -ENOMEM;
533 goto out;
534 }
535
536 retval = stream->ops->set_params(stream, params);
537 if (retval)
538 goto out;
539
540 stream->metadata_set = false;
541 stream->next_track = false;
542
543 if (stream->direction == SND_COMPRESS_PLAYBACK)
544 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
545 else
546 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
547 } else {
548 return -EPERM;
549 }
550out:
551 kfree(params);
552 return retval;
553}
554
555static int
556snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
557{
558 struct snd_codec *params;
559 int retval;
560
561 if (!stream->ops->get_params)
562 return -EBADFD;
563
564 params = kzalloc(sizeof(*params), GFP_KERNEL);
565 if (!params)
566 return -ENOMEM;
567 retval = stream->ops->get_params(stream, params);
568 if (retval)
569 goto out;
570 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
571 retval = -EFAULT;
572
573out:
574 kfree(params);
575 return retval;
576}
577
578static int
579snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
580{
581 struct snd_compr_metadata metadata;
582 int retval;
583
584 if (!stream->ops->get_metadata)
585 return -ENXIO;
586
587 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
588 return -EFAULT;
589
590 retval = stream->ops->get_metadata(stream, &metadata);
591 if (retval != 0)
592 return retval;
593
594 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
595 return -EFAULT;
596
597 return 0;
598}
599
600static int
601snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
602{
603 struct snd_compr_metadata metadata;
604 int retval;
605
606 if (!stream->ops->set_metadata)
607 return -ENXIO;
608 /*
609 * we should allow parameter change only when stream has been
610 * opened not in other cases
611 */
612 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
613 return -EFAULT;
614
615 retval = stream->ops->set_metadata(stream, &metadata);
616 stream->metadata_set = true;
617
618 return retval;
619}
620
621static inline int
622snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
623{
624 struct snd_compr_tstamp tstamp = {0};
625 int ret;
626
627 ret = snd_compr_update_tstamp(stream, &tstamp);
628 if (ret == 0)
629 ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
630 &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
631 return ret;
632}
633
634static int snd_compr_pause(struct snd_compr_stream *stream)
635{
636 int retval;
637
638 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
639 return -EPERM;
640 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
641 if (!retval)
642 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
643 return retval;
644}
645
646static int snd_compr_resume(struct snd_compr_stream *stream)
647{
648 int retval;
649
650 if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
651 return -EPERM;
652 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
653 if (!retval)
654 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
655 return retval;
656}
657
658static int snd_compr_start(struct snd_compr_stream *stream)
659{
660 int retval;
661
662 if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
663 return -EPERM;
664 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
665 if (!retval)
666 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
667 return retval;
668}
669
670static int snd_compr_stop(struct snd_compr_stream *stream)
671{
672 int retval;
673
674 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
675 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
676 return -EPERM;
677 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
678 if (!retval) {
679 snd_compr_drain_notify(stream);
680 stream->runtime->total_bytes_available = 0;
681 stream->runtime->total_bytes_transferred = 0;
682 }
683 return retval;
684}
685
686static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
687{
688 int ret;
689
690 /*
691 * We are called with lock held. So drop the lock while we wait for
692 * drain complete notfication from the driver
693 *
694 * It is expected that driver will notify the drain completion and then
695 * stream will be moved to SETUP state, even if draining resulted in an
696 * error. We can trigger next track after this.
697 */
698 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
699 mutex_unlock(&stream->device->lock);
700
701 /* we wait for drain to complete here, drain can return when
702 * interruption occurred, wait returned error or success.
703 * For the first two cases we don't do anything different here and
704 * return after waking up
705 */
706
707 ret = wait_event_interruptible(stream->runtime->sleep,
708 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
709 if (ret == -ERESTARTSYS)
710 pr_debug("wait aborted by a signal");
711 else if (ret)
712 pr_debug("wait for drain failed with %d\n", ret);
713
714
715 wake_up(&stream->runtime->sleep);
716 mutex_lock(&stream->device->lock);
717
718 return ret;
719}
720
721static int snd_compr_drain(struct snd_compr_stream *stream)
722{
723 int retval;
724
725 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
726 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
727 return -EPERM;
728
729 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
730 if (retval) {
731 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
732 wake_up(&stream->runtime->sleep);
733 return retval;
734 }
735
736 return snd_compress_wait_for_drain(stream);
737}
738
739static int snd_compr_next_track(struct snd_compr_stream *stream)
740{
741 int retval;
742
743 /* only a running stream can transition to next track */
744 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
745 return -EPERM;
746
747 /* you can signal next track isf this is intended to be a gapless stream
748 * and current track metadata is set
749 */
750 if (stream->metadata_set == false)
751 return -EPERM;
752
753 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
754 if (retval != 0)
755 return retval;
756 stream->metadata_set = false;
757 stream->next_track = true;
758 return 0;
759}
760
761static int snd_compr_partial_drain(struct snd_compr_stream *stream)
762{
763 int retval;
764 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
765 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
766 return -EPERM;
767 /* stream can be drained only when next track has been signalled */
768 if (stream->next_track == false)
769 return -EPERM;
770
771 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
772 if (retval) {
773 pr_debug("Partial drain returned failure\n");
774 wake_up(&stream->runtime->sleep);
775 return retval;
776 }
777
778 stream->next_track = false;
779 return snd_compress_wait_for_drain(stream);
780}
781
782static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
783{
784 struct snd_compr_file *data = f->private_data;
785 struct snd_compr_stream *stream;
786 int retval = -ENOTTY;
787
788 if (snd_BUG_ON(!data))
789 return -EFAULT;
790 stream = &data->stream;
791 if (snd_BUG_ON(!stream))
792 return -EFAULT;
793 mutex_lock(&stream->device->lock);
794 switch (_IOC_NR(cmd)) {
795 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
796 retval = put_user(SNDRV_COMPRESS_VERSION,
797 (int __user *)arg) ? -EFAULT : 0;
798 break;
799 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
800 retval = snd_compr_get_caps(stream, arg);
801 break;
802 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
803 retval = snd_compr_get_codec_caps(stream, arg);
804 break;
805 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
806 retval = snd_compr_set_params(stream, arg);
807 break;
808 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
809 retval = snd_compr_get_params(stream, arg);
810 break;
811 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
812 retval = snd_compr_set_metadata(stream, arg);
813 break;
814 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
815 retval = snd_compr_get_metadata(stream, arg);
816 break;
817 case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
818 retval = snd_compr_tstamp(stream, arg);
819 break;
820 case _IOC_NR(SNDRV_COMPRESS_AVAIL):
821 retval = snd_compr_ioctl_avail(stream, arg);
822 break;
823 case _IOC_NR(SNDRV_COMPRESS_PAUSE):
824 retval = snd_compr_pause(stream);
825 break;
826 case _IOC_NR(SNDRV_COMPRESS_RESUME):
827 retval = snd_compr_resume(stream);
828 break;
829 case _IOC_NR(SNDRV_COMPRESS_START):
830 retval = snd_compr_start(stream);
831 break;
832 case _IOC_NR(SNDRV_COMPRESS_STOP):
833 retval = snd_compr_stop(stream);
834 break;
835 case _IOC_NR(SNDRV_COMPRESS_DRAIN):
836 retval = snd_compr_drain(stream);
837 break;
838 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
839 retval = snd_compr_partial_drain(stream);
840 break;
841 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
842 retval = snd_compr_next_track(stream);
843 break;
844
845 }
846 mutex_unlock(&stream->device->lock);
847 return retval;
848}
849
850static const struct file_operations snd_compr_file_ops = {
851 .owner = THIS_MODULE,
852 .open = snd_compr_open,
853 .release = snd_compr_free,
854 .write = snd_compr_write,
855 .read = snd_compr_read,
856 .unlocked_ioctl = snd_compr_ioctl,
857 .mmap = snd_compr_mmap,
858 .poll = snd_compr_poll,
859};
860
861static int snd_compress_dev_register(struct snd_device *device)
862{
863 int ret = -EINVAL;
864 char str[16];
865 struct snd_compr *compr;
866
867 if (snd_BUG_ON(!device || !device->device_data))
868 return -EBADFD;
869 compr = device->device_data;
870
871 sprintf(str, "comprC%iD%i", compr->card->number, compr->device);
872 pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
873 compr->direction);
874 /* register compressed device */
875 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
876 compr->device, &snd_compr_file_ops, compr, str);
877 if (ret < 0) {
878 pr_err("snd_register_device failed\n %d", ret);
879 return ret;
880 }
881 return ret;
882
883}
884
885static int snd_compress_dev_disconnect(struct snd_device *device)
886{
887 struct snd_compr *compr;
888
889 compr = device->device_data;
890 snd_unregister_device(SNDRV_DEVICE_TYPE_COMPRESS, compr->card,
891 compr->device);
892 return 0;
893}
894
895/*
896 * snd_compress_new: create new compress device
897 * @card: sound card pointer
898 * @device: device number
899 * @dirn: device direction, should be of type enum snd_compr_direction
900 * @compr: compress device pointer
901 */
902int snd_compress_new(struct snd_card *card, int device,
903 int dirn, struct snd_compr *compr)
904{
905 static struct snd_device_ops ops = {
906 .dev_free = NULL,
907 .dev_register = snd_compress_dev_register,
908 .dev_disconnect = snd_compress_dev_disconnect,
909 };
910
911 compr->card = card;
912 compr->device = device;
913 compr->direction = dirn;
914 return snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
915}
916EXPORT_SYMBOL_GPL(snd_compress_new);
917
918static int snd_compress_add_device(struct snd_compr *device)
919{
920 int ret;
921
922 if (!device->card)
923 return -EINVAL;
924
925 /* register the card */
926 ret = snd_card_register(device->card);
927 if (ret)
928 goto out;
929 return 0;
930
931out:
932 pr_err("failed with %d\n", ret);
933 return ret;
934
935}
936
937static int snd_compress_remove_device(struct snd_compr *device)
938{
939 return snd_card_free(device->card);
940}
941
942/**
943 * snd_compress_register - register compressed device
944 *
945 * @device: compressed device to register
946 */
947int snd_compress_register(struct snd_compr *device)
948{
949 int retval;
950
951 if (device->name == NULL || device->dev == NULL || device->ops == NULL)
952 return -EINVAL;
953
954 pr_debug("Registering compressed device %s\n", device->name);
955 if (snd_BUG_ON(!device->ops->open))
956 return -EINVAL;
957 if (snd_BUG_ON(!device->ops->free))
958 return -EINVAL;
959 if (snd_BUG_ON(!device->ops->set_params))
960 return -EINVAL;
961 if (snd_BUG_ON(!device->ops->trigger))
962 return -EINVAL;
963
964 mutex_init(&device->lock);
965
966 /* register a compressed card */
967 mutex_lock(&device_mutex);
968 retval = snd_compress_add_device(device);
969 mutex_unlock(&device_mutex);
970 return retval;
971}
972EXPORT_SYMBOL_GPL(snd_compress_register);
973
974int snd_compress_deregister(struct snd_compr *device)
975{
976 pr_debug("Removing compressed device %s\n", device->name);
977 mutex_lock(&device_mutex);
978 snd_compress_remove_device(device);
979 mutex_unlock(&device_mutex);
980 return 0;
981}
982EXPORT_SYMBOL_GPL(snd_compress_deregister);
983
984static int __init snd_compress_init(void)
985{
986 return 0;
987}
988
989static void __exit snd_compress_exit(void)
990{
991}
992
993module_init(snd_compress_init);
994module_exit(snd_compress_exit);
995
996MODULE_DESCRIPTION("ALSA Compressed offload framework");
997MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
998MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * compress_core.c - compress offload core
4 *
5 * Copyright (C) 2011 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 */
12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/list.h>
18#include <linux/math64.h>
19#include <linux/mm.h>
20#include <linux/mutex.h>
21#include <linux/poll.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/uio.h>
26#include <linux/uaccess.h>
27#include <linux/dma-buf.h>
28#include <linux/module.h>
29#include <linux/compat.h>
30#include <sound/core.h>
31#include <sound/initval.h>
32#include <sound/info.h>
33#include <sound/compress_params.h>
34#include <sound/compress_offload.h>
35#include <sound/compress_driver.h>
36
37/* struct snd_compr_codec_caps overflows the ioctl bit size for some
38 * architectures, so we need to disable the relevant ioctls.
39 */
40#if _IOC_SIZEBITS < 14
41#define COMPR_CODEC_CAPS_OVERFLOW
42#endif
43
44/* TODO:
45 * - add substream support for multiple devices in case of
46 * SND_DYNAMIC_MINORS is not used
47 * - Multiple node representation
48 * driver should be able to register multiple nodes
49 */
50
51struct snd_compr_file {
52 unsigned long caps;
53 struct snd_compr_stream stream;
54};
55
56static void error_delayed_work(struct work_struct *work);
57
58#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
59static void snd_compr_task_free_all(struct snd_compr_stream *stream);
60#else
61static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
62#endif
63
64/*
65 * a note on stream states used:
66 * we use following states in the compressed core
67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
69 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
70 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
72 * playback only). User after setting up stream writes the data buffer
73 * before starting the stream.
74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
75 * decoding/encoding and rendering/capturing data.
76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
77 * by calling SNDRV_COMPRESS_DRAIN.
78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
79 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
80 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
81 */
82static int snd_compr_open(struct inode *inode, struct file *f)
83{
84 struct snd_compr *compr;
85 struct snd_compr_file *data;
86 struct snd_compr_runtime *runtime;
87 enum snd_compr_direction dirn;
88 int maj = imajor(inode);
89 int ret;
90
91 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
92 dirn = SND_COMPRESS_PLAYBACK;
93 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
94 dirn = SND_COMPRESS_CAPTURE;
95 else if ((f->f_flags & O_ACCMODE) == O_RDWR)
96 dirn = SND_COMPRESS_ACCEL;
97 else
98 return -EINVAL;
99
100 if (maj == snd_major)
101 compr = snd_lookup_minor_data(iminor(inode),
102 SNDRV_DEVICE_TYPE_COMPRESS);
103 else
104 return -EBADFD;
105
106 if (compr == NULL) {
107 pr_err("no device data!!!\n");
108 return -ENODEV;
109 }
110
111 if (dirn != compr->direction) {
112 pr_err("this device doesn't support this direction\n");
113 snd_card_unref(compr->card);
114 return -EINVAL;
115 }
116
117 data = kzalloc(sizeof(*data), GFP_KERNEL);
118 if (!data) {
119 snd_card_unref(compr->card);
120 return -ENOMEM;
121 }
122
123 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
124
125 data->stream.ops = compr->ops;
126 data->stream.direction = dirn;
127 data->stream.private_data = compr->private_data;
128 data->stream.device = compr;
129 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
130 if (!runtime) {
131 kfree(data);
132 snd_card_unref(compr->card);
133 return -ENOMEM;
134 }
135 runtime->state = SNDRV_PCM_STATE_OPEN;
136 init_waitqueue_head(&runtime->sleep);
137#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
138 INIT_LIST_HEAD(&runtime->tasks);
139#endif
140 data->stream.runtime = runtime;
141 f->private_data = (void *)data;
142 scoped_guard(mutex, &compr->lock)
143 ret = compr->ops->open(&data->stream);
144 if (ret) {
145 kfree(runtime);
146 kfree(data);
147 }
148 snd_card_unref(compr->card);
149 return ret;
150}
151
152static int snd_compr_free(struct inode *inode, struct file *f)
153{
154 struct snd_compr_file *data = f->private_data;
155 struct snd_compr_runtime *runtime = data->stream.runtime;
156
157 cancel_delayed_work_sync(&data->stream.error_work);
158
159 switch (runtime->state) {
160 case SNDRV_PCM_STATE_RUNNING:
161 case SNDRV_PCM_STATE_DRAINING:
162 case SNDRV_PCM_STATE_PAUSED:
163 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
164 break;
165 default:
166 break;
167 }
168
169 snd_compr_task_free_all(&data->stream);
170
171 data->stream.ops->free(&data->stream);
172 if (!data->stream.runtime->dma_buffer_p)
173 kfree(data->stream.runtime->buffer);
174 kfree(data->stream.runtime);
175 kfree(data);
176 return 0;
177}
178
179static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
180 struct snd_compr_tstamp *tstamp)
181{
182 if (!stream->ops->pointer)
183 return -ENOTSUPP;
184 stream->ops->pointer(stream, tstamp);
185 pr_debug("dsp consumed till %d total %d bytes\n",
186 tstamp->byte_offset, tstamp->copied_total);
187 if (stream->direction == SND_COMPRESS_PLAYBACK)
188 stream->runtime->total_bytes_transferred = tstamp->copied_total;
189 else
190 stream->runtime->total_bytes_available = tstamp->copied_total;
191 return 0;
192}
193
194static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
195 struct snd_compr_avail *avail)
196{
197 memset(avail, 0, sizeof(*avail));
198 snd_compr_update_tstamp(stream, &avail->tstamp);
199 /* Still need to return avail even if tstamp can't be filled in */
200
201 if (stream->runtime->total_bytes_available == 0 &&
202 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
203 stream->direction == SND_COMPRESS_PLAYBACK) {
204 pr_debug("detected init and someone forgot to do a write\n");
205 return stream->runtime->buffer_size;
206 }
207 pr_debug("app wrote %lld, DSP consumed %lld\n",
208 stream->runtime->total_bytes_available,
209 stream->runtime->total_bytes_transferred);
210 if (stream->runtime->total_bytes_available ==
211 stream->runtime->total_bytes_transferred) {
212 if (stream->direction == SND_COMPRESS_PLAYBACK) {
213 pr_debug("both pointers are same, returning full avail\n");
214 return stream->runtime->buffer_size;
215 } else {
216 pr_debug("both pointers are same, returning no avail\n");
217 return 0;
218 }
219 }
220
221 avail->avail = stream->runtime->total_bytes_available -
222 stream->runtime->total_bytes_transferred;
223 if (stream->direction == SND_COMPRESS_PLAYBACK)
224 avail->avail = stream->runtime->buffer_size - avail->avail;
225
226 pr_debug("ret avail as %lld\n", avail->avail);
227 return avail->avail;
228}
229
230static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
231{
232 struct snd_compr_avail avail;
233
234 return snd_compr_calc_avail(stream, &avail);
235}
236
237static int
238snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
239{
240 struct snd_compr_avail ioctl_avail;
241 size_t avail;
242
243 if (stream->direction == SND_COMPRESS_ACCEL)
244 return -EBADFD;
245
246 avail = snd_compr_calc_avail(stream, &ioctl_avail);
247 ioctl_avail.avail = avail;
248
249 switch (stream->runtime->state) {
250 case SNDRV_PCM_STATE_OPEN:
251 return -EBADFD;
252 case SNDRV_PCM_STATE_XRUN:
253 return -EPIPE;
254 default:
255 break;
256 }
257
258 if (copy_to_user((__u64 __user *)arg,
259 &ioctl_avail, sizeof(ioctl_avail)))
260 return -EFAULT;
261 return 0;
262}
263
264static int snd_compr_write_data(struct snd_compr_stream *stream,
265 const char __user *buf, size_t count)
266{
267 void *dstn;
268 size_t copy;
269 struct snd_compr_runtime *runtime = stream->runtime;
270 /* 64-bit Modulus */
271 u64 app_pointer = div64_u64(runtime->total_bytes_available,
272 runtime->buffer_size);
273 app_pointer = runtime->total_bytes_available -
274 (app_pointer * runtime->buffer_size);
275
276 dstn = runtime->buffer + app_pointer;
277 pr_debug("copying %ld at %lld\n",
278 (unsigned long)count, app_pointer);
279 if (count < runtime->buffer_size - app_pointer) {
280 if (copy_from_user(dstn, buf, count))
281 return -EFAULT;
282 } else {
283 copy = runtime->buffer_size - app_pointer;
284 if (copy_from_user(dstn, buf, copy))
285 return -EFAULT;
286 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
287 return -EFAULT;
288 }
289 /* if DSP cares, let it know data has been written */
290 if (stream->ops->ack)
291 stream->ops->ack(stream, count);
292 return count;
293}
294
295static ssize_t snd_compr_write(struct file *f, const char __user *buf,
296 size_t count, loff_t *offset)
297{
298 struct snd_compr_file *data = f->private_data;
299 struct snd_compr_stream *stream;
300 size_t avail;
301 int retval;
302
303 if (snd_BUG_ON(!data))
304 return -EFAULT;
305
306 stream = &data->stream;
307 if (stream->direction == SND_COMPRESS_ACCEL)
308 return -EBADFD;
309 guard(mutex)(&stream->device->lock);
310 /* write is allowed when stream is running or has been setup */
311 switch (stream->runtime->state) {
312 case SNDRV_PCM_STATE_SETUP:
313 case SNDRV_PCM_STATE_PREPARED:
314 case SNDRV_PCM_STATE_RUNNING:
315 break;
316 default:
317 return -EBADFD;
318 }
319
320 avail = snd_compr_get_avail(stream);
321 pr_debug("avail returned %ld\n", (unsigned long)avail);
322 /* calculate how much we can write to buffer */
323 if (avail > count)
324 avail = count;
325
326 if (stream->ops->copy) {
327 char __user* cbuf = (char __user*)buf;
328 retval = stream->ops->copy(stream, cbuf, avail);
329 } else {
330 retval = snd_compr_write_data(stream, buf, avail);
331 }
332 if (retval > 0)
333 stream->runtime->total_bytes_available += retval;
334
335 /* while initiating the stream, write should be called before START
336 * call, so in setup move state */
337 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
338 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
339 pr_debug("stream prepared, Houston we are good to go\n");
340 }
341
342 return retval;
343}
344
345
346static ssize_t snd_compr_read(struct file *f, char __user *buf,
347 size_t count, loff_t *offset)
348{
349 struct snd_compr_file *data = f->private_data;
350 struct snd_compr_stream *stream;
351 size_t avail;
352 int retval;
353
354 if (snd_BUG_ON(!data))
355 return -EFAULT;
356
357 stream = &data->stream;
358 if (stream->direction == SND_COMPRESS_ACCEL)
359 return -EBADFD;
360 guard(mutex)(&stream->device->lock);
361
362 /* read is allowed when stream is running, paused, draining and setup
363 * (yes setup is state which we transition to after stop, so if user
364 * wants to read data after stop we allow that)
365 */
366 switch (stream->runtime->state) {
367 case SNDRV_PCM_STATE_OPEN:
368 case SNDRV_PCM_STATE_PREPARED:
369 case SNDRV_PCM_STATE_SUSPENDED:
370 case SNDRV_PCM_STATE_DISCONNECTED:
371 return -EBADFD;
372 case SNDRV_PCM_STATE_XRUN:
373 return -EPIPE;
374 }
375
376 avail = snd_compr_get_avail(stream);
377 pr_debug("avail returned %ld\n", (unsigned long)avail);
378 /* calculate how much we can read from buffer */
379 if (avail > count)
380 avail = count;
381
382 if (stream->ops->copy)
383 retval = stream->ops->copy(stream, buf, avail);
384 else
385 return -ENXIO;
386 if (retval > 0)
387 stream->runtime->total_bytes_transferred += retval;
388
389 return retval;
390}
391
392static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
393{
394 return -ENXIO;
395}
396
397static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
398{
399 if (stream->direction == SND_COMPRESS_PLAYBACK)
400 return EPOLLOUT | EPOLLWRNORM;
401 else
402 return EPOLLIN | EPOLLRDNORM;
403}
404
405static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
406{
407 struct snd_compr_file *data = f->private_data;
408 struct snd_compr_stream *stream;
409 struct snd_compr_runtime *runtime;
410 size_t avail;
411 __poll_t retval = 0;
412
413 if (snd_BUG_ON(!data))
414 return EPOLLERR;
415
416 stream = &data->stream;
417 runtime = stream->runtime;
418
419 guard(mutex)(&stream->device->lock);
420
421 switch (runtime->state) {
422 case SNDRV_PCM_STATE_OPEN:
423 case SNDRV_PCM_STATE_XRUN:
424 return snd_compr_get_poll(stream) | EPOLLERR;
425 default:
426 break;
427 }
428
429 poll_wait(f, &runtime->sleep, wait);
430
431#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
432 if (stream->direction == SND_COMPRESS_ACCEL) {
433 struct snd_compr_task_runtime *task;
434 if (runtime->fragments > runtime->active_tasks)
435 retval |= EPOLLOUT | EPOLLWRNORM;
436 task = list_first_entry_or_null(&runtime->tasks,
437 struct snd_compr_task_runtime,
438 list);
439 if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
440 retval |= EPOLLIN | EPOLLRDNORM;
441 return retval;
442 }
443#endif
444
445 avail = snd_compr_get_avail(stream);
446 pr_debug("avail is %ld\n", (unsigned long)avail);
447 /* check if we have at least one fragment to fill */
448 switch (runtime->state) {
449 case SNDRV_PCM_STATE_DRAINING:
450 /* stream has been woken up after drain is complete
451 * draining done so set stream state to stopped
452 */
453 retval = snd_compr_get_poll(stream);
454 runtime->state = SNDRV_PCM_STATE_SETUP;
455 break;
456 case SNDRV_PCM_STATE_RUNNING:
457 case SNDRV_PCM_STATE_PREPARED:
458 case SNDRV_PCM_STATE_PAUSED:
459 if (avail >= runtime->fragment_size)
460 retval = snd_compr_get_poll(stream);
461 break;
462 default:
463 return snd_compr_get_poll(stream) | EPOLLERR;
464 }
465
466 return retval;
467}
468
469static int
470snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
471{
472 int retval;
473 struct snd_compr_caps caps;
474
475 if (!stream->ops->get_caps)
476 return -ENXIO;
477
478 memset(&caps, 0, sizeof(caps));
479 retval = stream->ops->get_caps(stream, &caps);
480 if (retval)
481 goto out;
482 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
483 retval = -EFAULT;
484out:
485 return retval;
486}
487
488#ifndef COMPR_CODEC_CAPS_OVERFLOW
489static int
490snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
491{
492 int retval;
493 struct snd_compr_codec_caps *caps __free(kfree) = NULL;
494
495 if (!stream->ops->get_codec_caps)
496 return -ENXIO;
497
498 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
499 if (!caps)
500 return -ENOMEM;
501
502 retval = stream->ops->get_codec_caps(stream, caps);
503 if (retval)
504 return retval;
505 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
506 return -EFAULT;
507 return retval;
508}
509#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
510
511int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
512{
513 struct snd_dma_buffer *dmab;
514 int ret;
515
516 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
517 return -EINVAL;
518 dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
519 if (!dmab)
520 return -ENOMEM;
521 dmab->dev = stream->dma_buffer.dev;
522 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
523 if (ret < 0) {
524 kfree(dmab);
525 return ret;
526 }
527
528 snd_compr_set_runtime_buffer(stream, dmab);
529 stream->runtime->dma_bytes = size;
530 return 1;
531}
532EXPORT_SYMBOL(snd_compr_malloc_pages);
533
534int snd_compr_free_pages(struct snd_compr_stream *stream)
535{
536 struct snd_compr_runtime *runtime;
537
538 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
539 return -EINVAL;
540 runtime = stream->runtime;
541 if (runtime->dma_area == NULL)
542 return 0;
543 if (runtime->dma_buffer_p != &stream->dma_buffer) {
544 /* It's a newly allocated buffer. Release it now. */
545 snd_dma_free_pages(runtime->dma_buffer_p);
546 kfree(runtime->dma_buffer_p);
547 }
548
549 snd_compr_set_runtime_buffer(stream, NULL);
550 return 0;
551}
552EXPORT_SYMBOL(snd_compr_free_pages);
553
554/* revisit this with snd_pcm_preallocate_xxx */
555static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
556 struct snd_compr_params *params)
557{
558 unsigned int buffer_size;
559 void *buffer = NULL;
560
561 if (stream->direction == SND_COMPRESS_ACCEL)
562 goto params;
563
564 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
565 if (stream->ops->copy) {
566 buffer = NULL;
567 /* if copy is defined the driver will be required to copy
568 * the data from core
569 */
570 } else {
571 if (stream->runtime->dma_buffer_p) {
572
573 if (buffer_size > stream->runtime->dma_buffer_p->bytes)
574 dev_err(stream->device->dev,
575 "Not enough DMA buffer");
576 else
577 buffer = stream->runtime->dma_buffer_p->area;
578
579 } else {
580 buffer = kmalloc(buffer_size, GFP_KERNEL);
581 }
582
583 if (!buffer)
584 return -ENOMEM;
585 }
586
587 stream->runtime->buffer = buffer;
588 stream->runtime->buffer_size = buffer_size;
589params:
590 stream->runtime->fragment_size = params->buffer.fragment_size;
591 stream->runtime->fragments = params->buffer.fragments;
592 return 0;
593}
594
595static int
596snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
597{
598 u32 max_fragments;
599
600 /* first let's check the buffer parameter's */
601 if (params->buffer.fragment_size == 0)
602 return -EINVAL;
603
604 if (stream->direction == SND_COMPRESS_ACCEL)
605 max_fragments = 64; /* safe value */
606 else
607 max_fragments = U32_MAX / params->buffer.fragment_size;
608
609 if (params->buffer.fragments > max_fragments ||
610 params->buffer.fragments == 0)
611 return -EINVAL;
612
613 /* now codec parameters */
614 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
615 return -EINVAL;
616
617 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
618 return -EINVAL;
619
620 return 0;
621}
622
623static int
624snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
625{
626 struct snd_compr_params *params __free(kfree) = NULL;
627 int retval;
628
629 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
630 /*
631 * we should allow parameter change only when stream has been
632 * opened not in other cases
633 */
634 params = memdup_user((void __user *)arg, sizeof(*params));
635 if (IS_ERR(params))
636 return PTR_ERR(params);
637
638 retval = snd_compress_check_input(stream, params);
639 if (retval)
640 return retval;
641
642 retval = snd_compr_allocate_buffer(stream, params);
643 if (retval)
644 return -ENOMEM;
645
646 retval = stream->ops->set_params(stream, params);
647 if (retval)
648 return retval;
649
650 if (stream->next_track)
651 return retval;
652
653 stream->metadata_set = false;
654 stream->next_track = false;
655
656 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
657 } else {
658 return -EPERM;
659 }
660 return retval;
661}
662
663static int
664snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
665{
666 struct snd_codec *params __free(kfree) = NULL;
667 int retval;
668
669 if (!stream->ops->get_params)
670 return -EBADFD;
671
672 params = kzalloc(sizeof(*params), GFP_KERNEL);
673 if (!params)
674 return -ENOMEM;
675 retval = stream->ops->get_params(stream, params);
676 if (retval)
677 return retval;
678 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
679 return -EFAULT;
680 return retval;
681}
682
683static int
684snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
685{
686 struct snd_compr_metadata metadata;
687 int retval;
688
689 if (!stream->ops->get_metadata)
690 return -ENXIO;
691
692 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
693 return -EFAULT;
694
695 retval = stream->ops->get_metadata(stream, &metadata);
696 if (retval != 0)
697 return retval;
698
699 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
700 return -EFAULT;
701
702 return 0;
703}
704
705static int
706snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
707{
708 struct snd_compr_metadata metadata;
709 int retval;
710
711 if (!stream->ops->set_metadata)
712 return -ENXIO;
713 /*
714 * we should allow parameter change only when stream has been
715 * opened not in other cases
716 */
717 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
718 return -EFAULT;
719
720 retval = stream->ops->set_metadata(stream, &metadata);
721 stream->metadata_set = true;
722
723 return retval;
724}
725
726static inline int
727snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
728{
729 struct snd_compr_tstamp tstamp = {0};
730 int ret;
731
732 ret = snd_compr_update_tstamp(stream, &tstamp);
733 if (ret == 0)
734 ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
735 &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
736 return ret;
737}
738
739static int snd_compr_pause(struct snd_compr_stream *stream)
740{
741 int retval;
742
743 switch (stream->runtime->state) {
744 case SNDRV_PCM_STATE_RUNNING:
745 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
746 if (!retval)
747 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
748 break;
749 case SNDRV_PCM_STATE_DRAINING:
750 if (!stream->device->use_pause_in_draining)
751 return -EPERM;
752 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
753 if (!retval)
754 stream->pause_in_draining = true;
755 break;
756 default:
757 return -EPERM;
758 }
759 return retval;
760}
761
762static int snd_compr_resume(struct snd_compr_stream *stream)
763{
764 int retval;
765
766 switch (stream->runtime->state) {
767 case SNDRV_PCM_STATE_PAUSED:
768 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
769 if (!retval)
770 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
771 break;
772 case SNDRV_PCM_STATE_DRAINING:
773 if (!stream->pause_in_draining)
774 return -EPERM;
775 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
776 if (!retval)
777 stream->pause_in_draining = false;
778 break;
779 default:
780 return -EPERM;
781 }
782 return retval;
783}
784
785static int snd_compr_start(struct snd_compr_stream *stream)
786{
787 int retval;
788
789 switch (stream->runtime->state) {
790 case SNDRV_PCM_STATE_SETUP:
791 if (stream->direction != SND_COMPRESS_CAPTURE)
792 return -EPERM;
793 break;
794 case SNDRV_PCM_STATE_PREPARED:
795 break;
796 default:
797 return -EPERM;
798 }
799
800 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
801 if (!retval)
802 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
803 return retval;
804}
805
806static int snd_compr_stop(struct snd_compr_stream *stream)
807{
808 int retval;
809
810 switch (stream->runtime->state) {
811 case SNDRV_PCM_STATE_OPEN:
812 case SNDRV_PCM_STATE_SETUP:
813 case SNDRV_PCM_STATE_PREPARED:
814 return -EPERM;
815 default:
816 break;
817 }
818
819 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
820 if (!retval) {
821 /* clear flags and stop any drain wait */
822 stream->partial_drain = false;
823 stream->metadata_set = false;
824 stream->pause_in_draining = false;
825 snd_compr_drain_notify(stream);
826 stream->runtime->total_bytes_available = 0;
827 stream->runtime->total_bytes_transferred = 0;
828 }
829 return retval;
830}
831
832static void error_delayed_work(struct work_struct *work)
833{
834 struct snd_compr_stream *stream;
835
836 stream = container_of(work, struct snd_compr_stream, error_work.work);
837
838 guard(mutex)(&stream->device->lock);
839
840 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
841 wake_up(&stream->runtime->sleep);
842}
843
844/**
845 * snd_compr_stop_error: Report a fatal error on a stream
846 * @stream: pointer to stream
847 * @state: state to transition the stream to
848 *
849 * Stop the stream and set its state.
850 *
851 * Should be called with compressed device lock held.
852 *
853 * Return: zero if successful, or a negative error code
854 */
855int snd_compr_stop_error(struct snd_compr_stream *stream,
856 snd_pcm_state_t state)
857{
858 if (stream->runtime->state == state)
859 return 0;
860
861 stream->runtime->state = state;
862
863 pr_debug("Changing state to: %d\n", state);
864
865 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
866
867 return 0;
868}
869EXPORT_SYMBOL_GPL(snd_compr_stop_error);
870
871static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
872{
873 int ret;
874
875 /*
876 * We are called with lock held. So drop the lock while we wait for
877 * drain complete notification from the driver
878 *
879 * It is expected that driver will notify the drain completion and then
880 * stream will be moved to SETUP state, even if draining resulted in an
881 * error. We can trigger next track after this.
882 */
883 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
884 mutex_unlock(&stream->device->lock);
885
886 /* we wait for drain to complete here, drain can return when
887 * interruption occurred, wait returned error or success.
888 * For the first two cases we don't do anything different here and
889 * return after waking up
890 */
891
892 ret = wait_event_interruptible(stream->runtime->sleep,
893 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
894 if (ret == -ERESTARTSYS)
895 pr_debug("wait aborted by a signal\n");
896 else if (ret)
897 pr_debug("wait for drain failed with %d\n", ret);
898
899
900 wake_up(&stream->runtime->sleep);
901 mutex_lock(&stream->device->lock);
902
903 return ret;
904}
905
906static int snd_compr_drain(struct snd_compr_stream *stream)
907{
908 int retval;
909
910 switch (stream->runtime->state) {
911 case SNDRV_PCM_STATE_OPEN:
912 case SNDRV_PCM_STATE_SETUP:
913 case SNDRV_PCM_STATE_PREPARED:
914 case SNDRV_PCM_STATE_PAUSED:
915 return -EPERM;
916 case SNDRV_PCM_STATE_XRUN:
917 return -EPIPE;
918 default:
919 break;
920 }
921
922 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
923 if (retval) {
924 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
925 wake_up(&stream->runtime->sleep);
926 return retval;
927 }
928
929 return snd_compress_wait_for_drain(stream);
930}
931
932static int snd_compr_next_track(struct snd_compr_stream *stream)
933{
934 int retval;
935
936 /* only a running stream can transition to next track */
937 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
938 return -EPERM;
939
940 /* next track doesn't have any meaning for capture streams */
941 if (stream->direction == SND_COMPRESS_CAPTURE)
942 return -EPERM;
943
944 /* you can signal next track if this is intended to be a gapless stream
945 * and current track metadata is set
946 */
947 if (stream->metadata_set == false)
948 return -EPERM;
949
950 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
951 if (retval != 0)
952 return retval;
953 stream->metadata_set = false;
954 stream->next_track = true;
955 return 0;
956}
957
958static int snd_compr_partial_drain(struct snd_compr_stream *stream)
959{
960 int retval;
961
962 switch (stream->runtime->state) {
963 case SNDRV_PCM_STATE_OPEN:
964 case SNDRV_PCM_STATE_SETUP:
965 case SNDRV_PCM_STATE_PREPARED:
966 case SNDRV_PCM_STATE_PAUSED:
967 return -EPERM;
968 case SNDRV_PCM_STATE_XRUN:
969 return -EPIPE;
970 default:
971 break;
972 }
973
974 /* partial drain doesn't have any meaning for capture streams */
975 if (stream->direction == SND_COMPRESS_CAPTURE)
976 return -EPERM;
977
978 /* stream can be drained only when next track has been signalled */
979 if (stream->next_track == false)
980 return -EPERM;
981
982 stream->partial_drain = true;
983 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
984 if (retval) {
985 pr_debug("Partial drain returned failure\n");
986 wake_up(&stream->runtime->sleep);
987 return retval;
988 }
989
990 stream->next_track = false;
991 return snd_compress_wait_for_drain(stream);
992}
993
994#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
995
996static struct snd_compr_task_runtime *
997snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
998{
999 struct snd_compr_task_runtime *task;
1000
1001 list_for_each_entry(task, &stream->runtime->tasks, list) {
1002 if (task->seqno == seqno)
1003 return task;
1004 }
1005 return NULL;
1006}
1007
1008static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1009{
1010 if (task->output)
1011 dma_buf_put(task->output);
1012 if (task->input)
1013 dma_buf_put(task->input);
1014 kfree(task);
1015}
1016
1017static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1018{
1019 u64 seqno = ++stream->runtime->task_seqno;
1020 if (seqno == 0)
1021 seqno = ++stream->runtime->task_seqno;
1022 return seqno;
1023}
1024
1025static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1026{
1027 struct snd_compr_task_runtime *task;
1028 int retval, fd_i, fd_o;
1029
1030 if (stream->runtime->total_tasks >= stream->runtime->fragments)
1031 return -EBUSY;
1032 if (utask->origin_seqno != 0 || utask->input_size != 0)
1033 return -EINVAL;
1034 task = kzalloc(sizeof(*task), GFP_KERNEL);
1035 if (task == NULL)
1036 return -ENOMEM;
1037 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1038 task->input_size = utask->input_size;
1039 retval = stream->ops->task_create(stream, task);
1040 if (retval < 0)
1041 goto cleanup;
1042 /* similar functionality as in dma_buf_fd(), but ensure that both
1043 file descriptors are allocated before fd_install() */
1044 if (!task->input || !task->input->file || !task->output || !task->output->file) {
1045 retval = -EINVAL;
1046 goto cleanup;
1047 }
1048 fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1049 if (fd_i < 0)
1050 goto cleanup;
1051 fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1052 if (fd_o < 0) {
1053 put_unused_fd(fd_i);
1054 goto cleanup;
1055 }
1056 /* keep dmabuf reference until freed with task free ioctl */
1057 get_dma_buf(task->input);
1058 get_dma_buf(task->output);
1059 fd_install(fd_i, task->input->file);
1060 fd_install(fd_o, task->output->file);
1061 utask->input_fd = fd_i;
1062 utask->output_fd = fd_o;
1063 list_add_tail(&task->list, &stream->runtime->tasks);
1064 stream->runtime->total_tasks++;
1065 return 0;
1066cleanup:
1067 snd_compr_task_free(task);
1068 return retval;
1069}
1070
1071static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1072{
1073 struct snd_compr_task *task __free(kfree) = NULL;
1074 int retval;
1075
1076 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1077 return -EPERM;
1078 task = memdup_user((void __user *)arg, sizeof(*task));
1079 if (IS_ERR(task))
1080 return PTR_ERR(task);
1081 retval = snd_compr_task_new(stream, task);
1082 if (retval >= 0)
1083 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1084 retval = -EFAULT;
1085 return retval;
1086}
1087
1088static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1089 struct snd_compr_task *utask)
1090{
1091 if (task == NULL)
1092 return -EINVAL;
1093 if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1094 return -EBUSY;
1095 if (utask->input_size > task->input->size)
1096 return -EINVAL;
1097 task->flags = utask->flags;
1098 task->input_size = utask->input_size;
1099 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1100 return 0;
1101}
1102
1103static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1104{
1105 struct snd_compr_task_runtime *task;
1106 int retval;
1107
1108 if (utask->origin_seqno > 0) {
1109 task = snd_compr_find_task(stream, utask->origin_seqno);
1110 retval = snd_compr_task_start_prepare(task, utask);
1111 if (retval < 0)
1112 return retval;
1113 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1114 utask->origin_seqno = 0;
1115 list_move_tail(&task->list, &stream->runtime->tasks);
1116 } else {
1117 task = snd_compr_find_task(stream, utask->seqno);
1118 if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1119 return -EBUSY;
1120 retval = snd_compr_task_start_prepare(task, utask);
1121 if (retval < 0)
1122 return retval;
1123 }
1124 retval = stream->ops->task_start(stream, task);
1125 if (retval >= 0) {
1126 task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1127 stream->runtime->active_tasks++;
1128 }
1129 return retval;
1130}
1131
1132static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1133{
1134 struct snd_compr_task *task __free(kfree) = NULL;
1135 int retval;
1136
1137 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1138 return -EPERM;
1139 task = memdup_user((void __user *)arg, sizeof(*task));
1140 if (IS_ERR(task))
1141 return PTR_ERR(task);
1142 retval = snd_compr_task_start(stream, task);
1143 if (retval >= 0)
1144 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1145 retval = -EFAULT;
1146 return retval;
1147}
1148
1149static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1150 struct snd_compr_task_runtime *task)
1151{
1152 if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1153 return;
1154 stream->ops->task_stop(stream, task);
1155 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1156 stream->runtime->active_tasks--;
1157 list_move_tail(&task->list, &stream->runtime->tasks);
1158 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1159}
1160
1161static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1162 struct snd_compr_task_runtime *task)
1163{
1164 snd_compr_task_stop_one(stream, task);
1165 stream->ops->task_free(stream, task);
1166 list_del(&task->list);
1167 snd_compr_task_free(task);
1168 stream->runtime->total_tasks--;
1169}
1170
1171static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1172{
1173 struct snd_compr_task_runtime *task, *temp;
1174
1175 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1176 snd_compr_task_free_one(stream, task);
1177}
1178
1179typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1180 struct snd_compr_task_runtime *task);
1181
1182static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1183 snd_compr_seq_func_t fcn)
1184{
1185 struct snd_compr_task_runtime *task, *temp;
1186 __u64 seqno;
1187 int retval;
1188
1189 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1190 return -EPERM;
1191 retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1192 if (retval)
1193 return -EFAULT;
1194 retval = 0;
1195 if (seqno == 0) {
1196 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1197 fcn(stream, task);
1198 } else {
1199 task = snd_compr_find_task(stream, seqno);
1200 if (task == NULL) {
1201 retval = -EINVAL;
1202 } else {
1203 fcn(stream, task);
1204 }
1205 }
1206 return retval;
1207}
1208
1209static int snd_compr_task_status(struct snd_compr_stream *stream,
1210 struct snd_compr_task_status *status)
1211{
1212 struct snd_compr_task_runtime *task;
1213
1214 task = snd_compr_find_task(stream, status->seqno);
1215 if (task == NULL)
1216 return -EINVAL;
1217 status->input_size = task->input_size;
1218 status->output_size = task->output_size;
1219 status->state = task->state;
1220 return 0;
1221}
1222
1223static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1224{
1225 struct snd_compr_task_status *status __free(kfree) = NULL;
1226 int retval;
1227
1228 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1229 return -EPERM;
1230 status = memdup_user((void __user *)arg, sizeof(*status));
1231 if (IS_ERR(status))
1232 return PTR_ERR(status);
1233 retval = snd_compr_task_status(stream, status);
1234 if (retval >= 0)
1235 if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1236 retval = -EFAULT;
1237 return retval;
1238}
1239
1240/**
1241 * snd_compr_task_finished: Notify that the task was finished
1242 * @stream: pointer to stream
1243 * @task: runtime task structure
1244 *
1245 * Set the finished task state and notify waiters.
1246 */
1247void snd_compr_task_finished(struct snd_compr_stream *stream,
1248 struct snd_compr_task_runtime *task)
1249{
1250 guard(mutex)(&stream->device->lock);
1251 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1252 stream->runtime->active_tasks--;
1253 task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1254 wake_up(&stream->runtime->sleep);
1255}
1256EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1257
1258MODULE_IMPORT_NS("DMA_BUF");
1259#endif /* CONFIG_SND_COMPRESS_ACCEL */
1260
1261static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1262{
1263 struct snd_compr_file *data = f->private_data;
1264 struct snd_compr_stream *stream;
1265
1266 if (snd_BUG_ON(!data))
1267 return -EFAULT;
1268
1269 stream = &data->stream;
1270
1271 guard(mutex)(&stream->device->lock);
1272 switch (_IOC_NR(cmd)) {
1273 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
1274 return put_user(SNDRV_COMPRESS_VERSION,
1275 (int __user *)arg) ? -EFAULT : 0;
1276 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
1277 return snd_compr_get_caps(stream, arg);
1278#ifndef COMPR_CODEC_CAPS_OVERFLOW
1279 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
1280 return snd_compr_get_codec_caps(stream, arg);
1281#endif
1282 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
1283 return snd_compr_set_params(stream, arg);
1284 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
1285 return snd_compr_get_params(stream, arg);
1286 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
1287 return snd_compr_set_metadata(stream, arg);
1288 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1289 return snd_compr_get_metadata(stream, arg);
1290 }
1291
1292 if (stream->direction == SND_COMPRESS_ACCEL) {
1293#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1294 switch (_IOC_NR(cmd)) {
1295 case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
1296 return snd_compr_task_create(stream, arg);
1297 case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
1298 return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1299 case _IOC_NR(SNDRV_COMPRESS_TASK_START):
1300 return snd_compr_task_start_ioctl(stream, arg);
1301 case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
1302 return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1303 case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
1304 return snd_compr_task_status_ioctl(stream, arg);
1305 }
1306#endif
1307 return -ENOTTY;
1308 }
1309
1310 switch (_IOC_NR(cmd)) {
1311 case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1312 return snd_compr_tstamp(stream, arg);
1313 case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1314 return snd_compr_ioctl_avail(stream, arg);
1315 case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1316 return snd_compr_pause(stream);
1317 case _IOC_NR(SNDRV_COMPRESS_RESUME):
1318 return snd_compr_resume(stream);
1319 case _IOC_NR(SNDRV_COMPRESS_START):
1320 return snd_compr_start(stream);
1321 case _IOC_NR(SNDRV_COMPRESS_STOP):
1322 return snd_compr_stop(stream);
1323 case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1324 return snd_compr_drain(stream);
1325 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1326 return snd_compr_partial_drain(stream);
1327 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1328 return snd_compr_next_track(stream);
1329 }
1330
1331 return -ENOTTY;
1332}
1333
1334/* support of 32bit userspace on 64bit platforms */
1335#ifdef CONFIG_COMPAT
1336static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1337 unsigned long arg)
1338{
1339 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1340}
1341#endif
1342
1343static const struct file_operations snd_compr_file_ops = {
1344 .owner = THIS_MODULE,
1345 .open = snd_compr_open,
1346 .release = snd_compr_free,
1347 .write = snd_compr_write,
1348 .read = snd_compr_read,
1349 .unlocked_ioctl = snd_compr_ioctl,
1350#ifdef CONFIG_COMPAT
1351 .compat_ioctl = snd_compr_ioctl_compat,
1352#endif
1353 .mmap = snd_compr_mmap,
1354 .poll = snd_compr_poll,
1355};
1356
1357static int snd_compress_dev_register(struct snd_device *device)
1358{
1359 int ret;
1360 struct snd_compr *compr;
1361
1362 if (snd_BUG_ON(!device || !device->device_data))
1363 return -EBADFD;
1364 compr = device->device_data;
1365
1366 pr_debug("reg device %s, direction %d\n", compr->name,
1367 compr->direction);
1368 /* register compressed device */
1369 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1370 compr->card, compr->device,
1371 &snd_compr_file_ops, compr, compr->dev);
1372 if (ret < 0) {
1373 pr_err("snd_register_device failed %d\n", ret);
1374 return ret;
1375 }
1376 return ret;
1377
1378}
1379
1380static int snd_compress_dev_disconnect(struct snd_device *device)
1381{
1382 struct snd_compr *compr;
1383
1384 compr = device->device_data;
1385 snd_unregister_device(compr->dev);
1386 return 0;
1387}
1388
1389#ifdef CONFIG_SND_VERBOSE_PROCFS
1390static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1391 struct snd_info_buffer *buffer)
1392{
1393 struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1394
1395 snd_iprintf(buffer, "card: %d\n", compr->card->number);
1396 snd_iprintf(buffer, "device: %d\n", compr->device);
1397 snd_iprintf(buffer, "stream: %s\n",
1398 compr->direction == SND_COMPRESS_PLAYBACK
1399 ? "PLAYBACK" : "CAPTURE");
1400 snd_iprintf(buffer, "id: %s\n", compr->id);
1401}
1402
1403static int snd_compress_proc_init(struct snd_compr *compr)
1404{
1405 struct snd_info_entry *entry;
1406 char name[16];
1407
1408 sprintf(name, "compr%i", compr->device);
1409 entry = snd_info_create_card_entry(compr->card, name,
1410 compr->card->proc_root);
1411 if (!entry)
1412 return -ENOMEM;
1413 entry->mode = S_IFDIR | 0555;
1414 compr->proc_root = entry;
1415
1416 entry = snd_info_create_card_entry(compr->card, "info",
1417 compr->proc_root);
1418 if (entry)
1419 snd_info_set_text_ops(entry, compr,
1420 snd_compress_proc_info_read);
1421 compr->proc_info_entry = entry;
1422
1423 return 0;
1424}
1425
1426static void snd_compress_proc_done(struct snd_compr *compr)
1427{
1428 snd_info_free_entry(compr->proc_info_entry);
1429 compr->proc_info_entry = NULL;
1430 snd_info_free_entry(compr->proc_root);
1431 compr->proc_root = NULL;
1432}
1433
1434static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1435{
1436 strscpy(compr->id, id, sizeof(compr->id));
1437}
1438#else
1439static inline int snd_compress_proc_init(struct snd_compr *compr)
1440{
1441 return 0;
1442}
1443
1444static inline void snd_compress_proc_done(struct snd_compr *compr)
1445{
1446}
1447
1448static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1449{
1450}
1451#endif
1452
1453static int snd_compress_dev_free(struct snd_device *device)
1454{
1455 struct snd_compr *compr;
1456
1457 compr = device->device_data;
1458 snd_compress_proc_done(compr);
1459 put_device(compr->dev);
1460 return 0;
1461}
1462
1463/**
1464 * snd_compress_new: create new compress device
1465 * @card: sound card pointer
1466 * @device: device number
1467 * @dirn: device direction, should be of type enum snd_compr_direction
1468 * @id: ID string
1469 * @compr: compress device pointer
1470 *
1471 * Return: zero if successful, or a negative error code
1472 */
1473int snd_compress_new(struct snd_card *card, int device,
1474 int dirn, const char *id, struct snd_compr *compr)
1475{
1476 static const struct snd_device_ops ops = {
1477 .dev_free = snd_compress_dev_free,
1478 .dev_register = snd_compress_dev_register,
1479 .dev_disconnect = snd_compress_dev_disconnect,
1480 };
1481 int ret;
1482
1483#if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1484 if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1485 return -EINVAL;
1486#endif
1487
1488 compr->card = card;
1489 compr->device = device;
1490 compr->direction = dirn;
1491 mutex_init(&compr->lock);
1492
1493 snd_compress_set_id(compr, id);
1494
1495 ret = snd_device_alloc(&compr->dev, card);
1496 if (ret)
1497 return ret;
1498 dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1499
1500 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1501 if (ret == 0)
1502 snd_compress_proc_init(compr);
1503 else
1504 put_device(compr->dev);
1505
1506 return ret;
1507}
1508EXPORT_SYMBOL_GPL(snd_compress_new);
1509
1510MODULE_DESCRIPTION("ALSA Compressed offload framework");
1511MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1512MODULE_LICENSE("GPL v2");