Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * compress_core.c - compress offload core
4 *
5 * Copyright (C) 2011 Intel Corporation
6 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
7 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 */
12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
14
15#include <linux/file.h>
16#include <linux/fs.h>
17#include <linux/list.h>
18#include <linux/math64.h>
19#include <linux/mm.h>
20#include <linux/mutex.h>
21#include <linux/poll.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/uio.h>
26#include <linux/uaccess.h>
27#include <linux/dma-buf.h>
28#include <linux/module.h>
29#include <linux/compat.h>
30#include <sound/core.h>
31#include <sound/initval.h>
32#include <sound/info.h>
33#include <sound/compress_params.h>
34#include <sound/compress_offload.h>
35#include <sound/compress_driver.h>
36
37/* struct snd_compr_codec_caps overflows the ioctl bit size for some
38 * architectures, so we need to disable the relevant ioctls.
39 */
40#if _IOC_SIZEBITS < 14
41#define COMPR_CODEC_CAPS_OVERFLOW
42#endif
43
44/* TODO:
45 * - add substream support for multiple devices in case of
46 * SND_DYNAMIC_MINORS is not used
47 * - Multiple node representation
48 * driver should be able to register multiple nodes
49 */
50
51struct snd_compr_file {
52 unsigned long caps;
53 struct snd_compr_stream stream;
54};
55
56static void error_delayed_work(struct work_struct *work);
57
58#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
59static void snd_compr_task_free_all(struct snd_compr_stream *stream);
60#else
61static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
62#endif
63
64/*
65 * a note on stream states used:
66 * we use following states in the compressed core
67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
69 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
70 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
72 * playback only). User after setting up stream writes the data buffer
73 * before starting the stream.
74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
75 * decoding/encoding and rendering/capturing data.
76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
77 * by calling SNDRV_COMPRESS_DRAIN.
78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
79 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
80 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
81 */
82static int snd_compr_open(struct inode *inode, struct file *f)
83{
84 struct snd_compr *compr;
85 struct snd_compr_file *data;
86 struct snd_compr_runtime *runtime;
87 enum snd_compr_direction dirn;
88 int maj = imajor(inode);
89 int ret;
90
91 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
92 dirn = SND_COMPRESS_PLAYBACK;
93 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
94 dirn = SND_COMPRESS_CAPTURE;
95 else if ((f->f_flags & O_ACCMODE) == O_RDWR)
96 dirn = SND_COMPRESS_ACCEL;
97 else
98 return -EINVAL;
99
100 if (maj == snd_major)
101 compr = snd_lookup_minor_data(iminor(inode),
102 SNDRV_DEVICE_TYPE_COMPRESS);
103 else
104 return -EBADFD;
105
106 if (compr == NULL) {
107 pr_err("no device data!!!\n");
108 return -ENODEV;
109 }
110
111 if (dirn != compr->direction) {
112 pr_err("this device doesn't support this direction\n");
113 snd_card_unref(compr->card);
114 return -EINVAL;
115 }
116
117 data = kzalloc(sizeof(*data), GFP_KERNEL);
118 if (!data) {
119 snd_card_unref(compr->card);
120 return -ENOMEM;
121 }
122
123 INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
124
125 data->stream.ops = compr->ops;
126 data->stream.direction = dirn;
127 data->stream.private_data = compr->private_data;
128 data->stream.device = compr;
129 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
130 if (!runtime) {
131 kfree(data);
132 snd_card_unref(compr->card);
133 return -ENOMEM;
134 }
135 runtime->state = SNDRV_PCM_STATE_OPEN;
136 init_waitqueue_head(&runtime->sleep);
137#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
138 INIT_LIST_HEAD(&runtime->tasks);
139#endif
140 data->stream.runtime = runtime;
141 f->private_data = (void *)data;
142 scoped_guard(mutex, &compr->lock)
143 ret = compr->ops->open(&data->stream);
144 if (ret) {
145 kfree(runtime);
146 kfree(data);
147 }
148 snd_card_unref(compr->card);
149 return ret;
150}
151
152static int snd_compr_free(struct inode *inode, struct file *f)
153{
154 struct snd_compr_file *data = f->private_data;
155 struct snd_compr_runtime *runtime = data->stream.runtime;
156
157 cancel_delayed_work_sync(&data->stream.error_work);
158
159 switch (runtime->state) {
160 case SNDRV_PCM_STATE_RUNNING:
161 case SNDRV_PCM_STATE_DRAINING:
162 case SNDRV_PCM_STATE_PAUSED:
163 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
164 break;
165 default:
166 break;
167 }
168
169 snd_compr_task_free_all(&data->stream);
170
171 data->stream.ops->free(&data->stream);
172 if (!data->stream.runtime->dma_buffer_p)
173 kfree(data->stream.runtime->buffer);
174 kfree(data->stream.runtime);
175 kfree(data);
176 return 0;
177}
178
179static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
180 struct snd_compr_tstamp *tstamp)
181{
182 if (!stream->ops->pointer)
183 return -ENOTSUPP;
184 stream->ops->pointer(stream, tstamp);
185 pr_debug("dsp consumed till %d total %d bytes\n",
186 tstamp->byte_offset, tstamp->copied_total);
187 if (stream->direction == SND_COMPRESS_PLAYBACK)
188 stream->runtime->total_bytes_transferred = tstamp->copied_total;
189 else
190 stream->runtime->total_bytes_available = tstamp->copied_total;
191 return 0;
192}
193
194static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
195 struct snd_compr_avail *avail)
196{
197 memset(avail, 0, sizeof(*avail));
198 snd_compr_update_tstamp(stream, &avail->tstamp);
199 /* Still need to return avail even if tstamp can't be filled in */
200
201 if (stream->runtime->total_bytes_available == 0 &&
202 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
203 stream->direction == SND_COMPRESS_PLAYBACK) {
204 pr_debug("detected init and someone forgot to do a write\n");
205 return stream->runtime->buffer_size;
206 }
207 pr_debug("app wrote %lld, DSP consumed %lld\n",
208 stream->runtime->total_bytes_available,
209 stream->runtime->total_bytes_transferred);
210 if (stream->runtime->total_bytes_available ==
211 stream->runtime->total_bytes_transferred) {
212 if (stream->direction == SND_COMPRESS_PLAYBACK) {
213 pr_debug("both pointers are same, returning full avail\n");
214 return stream->runtime->buffer_size;
215 } else {
216 pr_debug("both pointers are same, returning no avail\n");
217 return 0;
218 }
219 }
220
221 avail->avail = stream->runtime->total_bytes_available -
222 stream->runtime->total_bytes_transferred;
223 if (stream->direction == SND_COMPRESS_PLAYBACK)
224 avail->avail = stream->runtime->buffer_size - avail->avail;
225
226 pr_debug("ret avail as %lld\n", avail->avail);
227 return avail->avail;
228}
229
230static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
231{
232 struct snd_compr_avail avail;
233
234 return snd_compr_calc_avail(stream, &avail);
235}
236
237static int
238snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
239{
240 struct snd_compr_avail ioctl_avail;
241 size_t avail;
242
243 if (stream->direction == SND_COMPRESS_ACCEL)
244 return -EBADFD;
245
246 avail = snd_compr_calc_avail(stream, &ioctl_avail);
247 ioctl_avail.avail = avail;
248
249 switch (stream->runtime->state) {
250 case SNDRV_PCM_STATE_OPEN:
251 return -EBADFD;
252 case SNDRV_PCM_STATE_XRUN:
253 return -EPIPE;
254 default:
255 break;
256 }
257
258 if (copy_to_user((__u64 __user *)arg,
259 &ioctl_avail, sizeof(ioctl_avail)))
260 return -EFAULT;
261 return 0;
262}
263
264static int snd_compr_write_data(struct snd_compr_stream *stream,
265 const char __user *buf, size_t count)
266{
267 void *dstn;
268 size_t copy;
269 struct snd_compr_runtime *runtime = stream->runtime;
270 /* 64-bit Modulus */
271 u64 app_pointer = div64_u64(runtime->total_bytes_available,
272 runtime->buffer_size);
273 app_pointer = runtime->total_bytes_available -
274 (app_pointer * runtime->buffer_size);
275
276 dstn = runtime->buffer + app_pointer;
277 pr_debug("copying %ld at %lld\n",
278 (unsigned long)count, app_pointer);
279 if (count < runtime->buffer_size - app_pointer) {
280 if (copy_from_user(dstn, buf, count))
281 return -EFAULT;
282 } else {
283 copy = runtime->buffer_size - app_pointer;
284 if (copy_from_user(dstn, buf, copy))
285 return -EFAULT;
286 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
287 return -EFAULT;
288 }
289 /* if DSP cares, let it know data has been written */
290 if (stream->ops->ack)
291 stream->ops->ack(stream, count);
292 return count;
293}
294
295static ssize_t snd_compr_write(struct file *f, const char __user *buf,
296 size_t count, loff_t *offset)
297{
298 struct snd_compr_file *data = f->private_data;
299 struct snd_compr_stream *stream;
300 size_t avail;
301 int retval;
302
303 if (snd_BUG_ON(!data))
304 return -EFAULT;
305
306 stream = &data->stream;
307 if (stream->direction == SND_COMPRESS_ACCEL)
308 return -EBADFD;
309 guard(mutex)(&stream->device->lock);
310 /* write is allowed when stream is running or has been setup */
311 switch (stream->runtime->state) {
312 case SNDRV_PCM_STATE_SETUP:
313 case SNDRV_PCM_STATE_PREPARED:
314 case SNDRV_PCM_STATE_RUNNING:
315 break;
316 default:
317 return -EBADFD;
318 }
319
320 avail = snd_compr_get_avail(stream);
321 pr_debug("avail returned %ld\n", (unsigned long)avail);
322 /* calculate how much we can write to buffer */
323 if (avail > count)
324 avail = count;
325
326 if (stream->ops->copy) {
327 char __user* cbuf = (char __user*)buf;
328 retval = stream->ops->copy(stream, cbuf, avail);
329 } else {
330 retval = snd_compr_write_data(stream, buf, avail);
331 }
332 if (retval > 0)
333 stream->runtime->total_bytes_available += retval;
334
335 /* while initiating the stream, write should be called before START
336 * call, so in setup move state */
337 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
338 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
339 pr_debug("stream prepared, Houston we are good to go\n");
340 }
341
342 return retval;
343}
344
345
346static ssize_t snd_compr_read(struct file *f, char __user *buf,
347 size_t count, loff_t *offset)
348{
349 struct snd_compr_file *data = f->private_data;
350 struct snd_compr_stream *stream;
351 size_t avail;
352 int retval;
353
354 if (snd_BUG_ON(!data))
355 return -EFAULT;
356
357 stream = &data->stream;
358 if (stream->direction == SND_COMPRESS_ACCEL)
359 return -EBADFD;
360 guard(mutex)(&stream->device->lock);
361
362 /* read is allowed when stream is running, paused, draining and setup
363 * (yes setup is state which we transition to after stop, so if user
364 * wants to read data after stop we allow that)
365 */
366 switch (stream->runtime->state) {
367 case SNDRV_PCM_STATE_OPEN:
368 case SNDRV_PCM_STATE_PREPARED:
369 case SNDRV_PCM_STATE_SUSPENDED:
370 case SNDRV_PCM_STATE_DISCONNECTED:
371 return -EBADFD;
372 case SNDRV_PCM_STATE_XRUN:
373 return -EPIPE;
374 }
375
376 avail = snd_compr_get_avail(stream);
377 pr_debug("avail returned %ld\n", (unsigned long)avail);
378 /* calculate how much we can read from buffer */
379 if (avail > count)
380 avail = count;
381
382 if (stream->ops->copy)
383 retval = stream->ops->copy(stream, buf, avail);
384 else
385 return -ENXIO;
386 if (retval > 0)
387 stream->runtime->total_bytes_transferred += retval;
388
389 return retval;
390}
391
392static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
393{
394 return -ENXIO;
395}
396
397static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
398{
399 if (stream->direction == SND_COMPRESS_PLAYBACK)
400 return EPOLLOUT | EPOLLWRNORM;
401 else
402 return EPOLLIN | EPOLLRDNORM;
403}
404
405static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
406{
407 struct snd_compr_file *data = f->private_data;
408 struct snd_compr_stream *stream;
409 struct snd_compr_runtime *runtime;
410 size_t avail;
411 __poll_t retval = 0;
412
413 if (snd_BUG_ON(!data))
414 return EPOLLERR;
415
416 stream = &data->stream;
417 runtime = stream->runtime;
418
419 guard(mutex)(&stream->device->lock);
420
421 switch (runtime->state) {
422 case SNDRV_PCM_STATE_OPEN:
423 case SNDRV_PCM_STATE_XRUN:
424 return snd_compr_get_poll(stream) | EPOLLERR;
425 default:
426 break;
427 }
428
429 poll_wait(f, &runtime->sleep, wait);
430
431#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
432 if (stream->direction == SND_COMPRESS_ACCEL) {
433 struct snd_compr_task_runtime *task;
434 if (runtime->fragments > runtime->active_tasks)
435 retval |= EPOLLOUT | EPOLLWRNORM;
436 task = list_first_entry_or_null(&runtime->tasks,
437 struct snd_compr_task_runtime,
438 list);
439 if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
440 retval |= EPOLLIN | EPOLLRDNORM;
441 return retval;
442 }
443#endif
444
445 avail = snd_compr_get_avail(stream);
446 pr_debug("avail is %ld\n", (unsigned long)avail);
447 /* check if we have at least one fragment to fill */
448 switch (runtime->state) {
449 case SNDRV_PCM_STATE_DRAINING:
450 /* stream has been woken up after drain is complete
451 * draining done so set stream state to stopped
452 */
453 retval = snd_compr_get_poll(stream);
454 runtime->state = SNDRV_PCM_STATE_SETUP;
455 break;
456 case SNDRV_PCM_STATE_RUNNING:
457 case SNDRV_PCM_STATE_PREPARED:
458 case SNDRV_PCM_STATE_PAUSED:
459 if (avail >= runtime->fragment_size)
460 retval = snd_compr_get_poll(stream);
461 break;
462 default:
463 return snd_compr_get_poll(stream) | EPOLLERR;
464 }
465
466 return retval;
467}
468
469static int
470snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
471{
472 int retval;
473 struct snd_compr_caps caps;
474
475 if (!stream->ops->get_caps)
476 return -ENXIO;
477
478 memset(&caps, 0, sizeof(caps));
479 retval = stream->ops->get_caps(stream, &caps);
480 if (retval)
481 goto out;
482 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
483 retval = -EFAULT;
484out:
485 return retval;
486}
487
488#ifndef COMPR_CODEC_CAPS_OVERFLOW
489static int
490snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
491{
492 int retval;
493 struct snd_compr_codec_caps *caps __free(kfree) = NULL;
494
495 if (!stream->ops->get_codec_caps)
496 return -ENXIO;
497
498 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
499 if (!caps)
500 return -ENOMEM;
501
502 retval = stream->ops->get_codec_caps(stream, caps);
503 if (retval)
504 return retval;
505 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
506 return -EFAULT;
507 return retval;
508}
509#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
510
511int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
512{
513 struct snd_dma_buffer *dmab;
514 int ret;
515
516 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
517 return -EINVAL;
518 dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
519 if (!dmab)
520 return -ENOMEM;
521 dmab->dev = stream->dma_buffer.dev;
522 ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
523 if (ret < 0) {
524 kfree(dmab);
525 return ret;
526 }
527
528 snd_compr_set_runtime_buffer(stream, dmab);
529 stream->runtime->dma_bytes = size;
530 return 1;
531}
532EXPORT_SYMBOL(snd_compr_malloc_pages);
533
534int snd_compr_free_pages(struct snd_compr_stream *stream)
535{
536 struct snd_compr_runtime *runtime;
537
538 if (snd_BUG_ON(!(stream) || !(stream)->runtime))
539 return -EINVAL;
540 runtime = stream->runtime;
541 if (runtime->dma_area == NULL)
542 return 0;
543 if (runtime->dma_buffer_p != &stream->dma_buffer) {
544 /* It's a newly allocated buffer. Release it now. */
545 snd_dma_free_pages(runtime->dma_buffer_p);
546 kfree(runtime->dma_buffer_p);
547 }
548
549 snd_compr_set_runtime_buffer(stream, NULL);
550 return 0;
551}
552EXPORT_SYMBOL(snd_compr_free_pages);
553
554/* revisit this with snd_pcm_preallocate_xxx */
555static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
556 struct snd_compr_params *params)
557{
558 unsigned int buffer_size;
559 void *buffer = NULL;
560
561 if (stream->direction == SND_COMPRESS_ACCEL)
562 goto params;
563
564 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
565 if (stream->ops->copy) {
566 buffer = NULL;
567 /* if copy is defined the driver will be required to copy
568 * the data from core
569 */
570 } else {
571 if (stream->runtime->dma_buffer_p) {
572
573 if (buffer_size > stream->runtime->dma_buffer_p->bytes)
574 dev_err(stream->device->dev,
575 "Not enough DMA buffer");
576 else
577 buffer = stream->runtime->dma_buffer_p->area;
578
579 } else {
580 buffer = kmalloc(buffer_size, GFP_KERNEL);
581 }
582
583 if (!buffer)
584 return -ENOMEM;
585 }
586
587 stream->runtime->buffer = buffer;
588 stream->runtime->buffer_size = buffer_size;
589params:
590 stream->runtime->fragment_size = params->buffer.fragment_size;
591 stream->runtime->fragments = params->buffer.fragments;
592 return 0;
593}
594
595static int
596snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
597{
598 u32 max_fragments;
599
600 /* first let's check the buffer parameter's */
601 if (params->buffer.fragment_size == 0)
602 return -EINVAL;
603
604 if (stream->direction == SND_COMPRESS_ACCEL)
605 max_fragments = 64; /* safe value */
606 else
607 max_fragments = U32_MAX / params->buffer.fragment_size;
608
609 if (params->buffer.fragments > max_fragments ||
610 params->buffer.fragments == 0)
611 return -EINVAL;
612
613 /* now codec parameters */
614 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
615 return -EINVAL;
616
617 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
618 return -EINVAL;
619
620 return 0;
621}
622
623static int
624snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
625{
626 struct snd_compr_params *params __free(kfree) = NULL;
627 int retval;
628
629 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
630 /*
631 * we should allow parameter change only when stream has been
632 * opened not in other cases
633 */
634 params = memdup_user((void __user *)arg, sizeof(*params));
635 if (IS_ERR(params))
636 return PTR_ERR(params);
637
638 retval = snd_compress_check_input(stream, params);
639 if (retval)
640 return retval;
641
642 retval = snd_compr_allocate_buffer(stream, params);
643 if (retval)
644 return -ENOMEM;
645
646 retval = stream->ops->set_params(stream, params);
647 if (retval)
648 return retval;
649
650 if (stream->next_track)
651 return retval;
652
653 stream->metadata_set = false;
654 stream->next_track = false;
655
656 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
657 } else {
658 return -EPERM;
659 }
660 return retval;
661}
662
663static int
664snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
665{
666 struct snd_codec *params __free(kfree) = NULL;
667 int retval;
668
669 if (!stream->ops->get_params)
670 return -EBADFD;
671
672 params = kzalloc(sizeof(*params), GFP_KERNEL);
673 if (!params)
674 return -ENOMEM;
675 retval = stream->ops->get_params(stream, params);
676 if (retval)
677 return retval;
678 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
679 return -EFAULT;
680 return retval;
681}
682
683static int
684snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
685{
686 struct snd_compr_metadata metadata;
687 int retval;
688
689 if (!stream->ops->get_metadata)
690 return -ENXIO;
691
692 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
693 return -EFAULT;
694
695 retval = stream->ops->get_metadata(stream, &metadata);
696 if (retval != 0)
697 return retval;
698
699 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
700 return -EFAULT;
701
702 return 0;
703}
704
705static int
706snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
707{
708 struct snd_compr_metadata metadata;
709 int retval;
710
711 if (!stream->ops->set_metadata)
712 return -ENXIO;
713 /*
714 * we should allow parameter change only when stream has been
715 * opened not in other cases
716 */
717 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
718 return -EFAULT;
719
720 retval = stream->ops->set_metadata(stream, &metadata);
721 stream->metadata_set = true;
722
723 return retval;
724}
725
726static inline int
727snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
728{
729 struct snd_compr_tstamp tstamp = {0};
730 int ret;
731
732 ret = snd_compr_update_tstamp(stream, &tstamp);
733 if (ret == 0)
734 ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
735 &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
736 return ret;
737}
738
739static int snd_compr_pause(struct snd_compr_stream *stream)
740{
741 int retval;
742
743 switch (stream->runtime->state) {
744 case SNDRV_PCM_STATE_RUNNING:
745 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
746 if (!retval)
747 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
748 break;
749 case SNDRV_PCM_STATE_DRAINING:
750 if (!stream->device->use_pause_in_draining)
751 return -EPERM;
752 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
753 if (!retval)
754 stream->pause_in_draining = true;
755 break;
756 default:
757 return -EPERM;
758 }
759 return retval;
760}
761
762static int snd_compr_resume(struct snd_compr_stream *stream)
763{
764 int retval;
765
766 switch (stream->runtime->state) {
767 case SNDRV_PCM_STATE_PAUSED:
768 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
769 if (!retval)
770 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
771 break;
772 case SNDRV_PCM_STATE_DRAINING:
773 if (!stream->pause_in_draining)
774 return -EPERM;
775 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
776 if (!retval)
777 stream->pause_in_draining = false;
778 break;
779 default:
780 return -EPERM;
781 }
782 return retval;
783}
784
785static int snd_compr_start(struct snd_compr_stream *stream)
786{
787 int retval;
788
789 switch (stream->runtime->state) {
790 case SNDRV_PCM_STATE_SETUP:
791 if (stream->direction != SND_COMPRESS_CAPTURE)
792 return -EPERM;
793 break;
794 case SNDRV_PCM_STATE_PREPARED:
795 break;
796 default:
797 return -EPERM;
798 }
799
800 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
801 if (!retval)
802 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
803 return retval;
804}
805
806static int snd_compr_stop(struct snd_compr_stream *stream)
807{
808 int retval;
809
810 switch (stream->runtime->state) {
811 case SNDRV_PCM_STATE_OPEN:
812 case SNDRV_PCM_STATE_SETUP:
813 case SNDRV_PCM_STATE_PREPARED:
814 return -EPERM;
815 default:
816 break;
817 }
818
819 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
820 if (!retval) {
821 /* clear flags and stop any drain wait */
822 stream->partial_drain = false;
823 stream->metadata_set = false;
824 stream->pause_in_draining = false;
825 snd_compr_drain_notify(stream);
826 stream->runtime->total_bytes_available = 0;
827 stream->runtime->total_bytes_transferred = 0;
828 }
829 return retval;
830}
831
832static void error_delayed_work(struct work_struct *work)
833{
834 struct snd_compr_stream *stream;
835
836 stream = container_of(work, struct snd_compr_stream, error_work.work);
837
838 guard(mutex)(&stream->device->lock);
839
840 stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
841 wake_up(&stream->runtime->sleep);
842}
843
844/**
845 * snd_compr_stop_error: Report a fatal error on a stream
846 * @stream: pointer to stream
847 * @state: state to transition the stream to
848 *
849 * Stop the stream and set its state.
850 *
851 * Should be called with compressed device lock held.
852 *
853 * Return: zero if successful, or a negative error code
854 */
855int snd_compr_stop_error(struct snd_compr_stream *stream,
856 snd_pcm_state_t state)
857{
858 if (stream->runtime->state == state)
859 return 0;
860
861 stream->runtime->state = state;
862
863 pr_debug("Changing state to: %d\n", state);
864
865 queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
866
867 return 0;
868}
869EXPORT_SYMBOL_GPL(snd_compr_stop_error);
870
871static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
872{
873 int ret;
874
875 /*
876 * We are called with lock held. So drop the lock while we wait for
877 * drain complete notification from the driver
878 *
879 * It is expected that driver will notify the drain completion and then
880 * stream will be moved to SETUP state, even if draining resulted in an
881 * error. We can trigger next track after this.
882 */
883 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
884 mutex_unlock(&stream->device->lock);
885
886 /* we wait for drain to complete here, drain can return when
887 * interruption occurred, wait returned error or success.
888 * For the first two cases we don't do anything different here and
889 * return after waking up
890 */
891
892 ret = wait_event_interruptible(stream->runtime->sleep,
893 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
894 if (ret == -ERESTARTSYS)
895 pr_debug("wait aborted by a signal\n");
896 else if (ret)
897 pr_debug("wait for drain failed with %d\n", ret);
898
899
900 wake_up(&stream->runtime->sleep);
901 mutex_lock(&stream->device->lock);
902
903 return ret;
904}
905
906static int snd_compr_drain(struct snd_compr_stream *stream)
907{
908 int retval;
909
910 switch (stream->runtime->state) {
911 case SNDRV_PCM_STATE_OPEN:
912 case SNDRV_PCM_STATE_SETUP:
913 case SNDRV_PCM_STATE_PREPARED:
914 case SNDRV_PCM_STATE_PAUSED:
915 return -EPERM;
916 case SNDRV_PCM_STATE_XRUN:
917 return -EPIPE;
918 default:
919 break;
920 }
921
922 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
923 if (retval) {
924 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
925 wake_up(&stream->runtime->sleep);
926 return retval;
927 }
928
929 return snd_compress_wait_for_drain(stream);
930}
931
932static int snd_compr_next_track(struct snd_compr_stream *stream)
933{
934 int retval;
935
936 /* only a running stream can transition to next track */
937 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
938 return -EPERM;
939
940 /* next track doesn't have any meaning for capture streams */
941 if (stream->direction == SND_COMPRESS_CAPTURE)
942 return -EPERM;
943
944 /* you can signal next track if this is intended to be a gapless stream
945 * and current track metadata is set
946 */
947 if (stream->metadata_set == false)
948 return -EPERM;
949
950 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
951 if (retval != 0)
952 return retval;
953 stream->metadata_set = false;
954 stream->next_track = true;
955 return 0;
956}
957
958static int snd_compr_partial_drain(struct snd_compr_stream *stream)
959{
960 int retval;
961
962 switch (stream->runtime->state) {
963 case SNDRV_PCM_STATE_OPEN:
964 case SNDRV_PCM_STATE_SETUP:
965 case SNDRV_PCM_STATE_PREPARED:
966 case SNDRV_PCM_STATE_PAUSED:
967 return -EPERM;
968 case SNDRV_PCM_STATE_XRUN:
969 return -EPIPE;
970 default:
971 break;
972 }
973
974 /* partial drain doesn't have any meaning for capture streams */
975 if (stream->direction == SND_COMPRESS_CAPTURE)
976 return -EPERM;
977
978 /* stream can be drained only when next track has been signalled */
979 if (stream->next_track == false)
980 return -EPERM;
981
982 stream->partial_drain = true;
983 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
984 if (retval) {
985 pr_debug("Partial drain returned failure\n");
986 wake_up(&stream->runtime->sleep);
987 return retval;
988 }
989
990 stream->next_track = false;
991 return snd_compress_wait_for_drain(stream);
992}
993
994#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
995
996static struct snd_compr_task_runtime *
997snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
998{
999 struct snd_compr_task_runtime *task;
1000
1001 list_for_each_entry(task, &stream->runtime->tasks, list) {
1002 if (task->seqno == seqno)
1003 return task;
1004 }
1005 return NULL;
1006}
1007
1008static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1009{
1010 if (task->output)
1011 dma_buf_put(task->output);
1012 if (task->input)
1013 dma_buf_put(task->input);
1014 kfree(task);
1015}
1016
1017static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1018{
1019 u64 seqno = ++stream->runtime->task_seqno;
1020 if (seqno == 0)
1021 seqno = ++stream->runtime->task_seqno;
1022 return seqno;
1023}
1024
1025static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1026{
1027 struct snd_compr_task_runtime *task;
1028 int retval, fd_i, fd_o;
1029
1030 if (stream->runtime->total_tasks >= stream->runtime->fragments)
1031 return -EBUSY;
1032 if (utask->origin_seqno != 0 || utask->input_size != 0)
1033 return -EINVAL;
1034 task = kzalloc(sizeof(*task), GFP_KERNEL);
1035 if (task == NULL)
1036 return -ENOMEM;
1037 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1038 task->input_size = utask->input_size;
1039 retval = stream->ops->task_create(stream, task);
1040 if (retval < 0)
1041 goto cleanup;
1042 /* similar functionality as in dma_buf_fd(), but ensure that both
1043 file descriptors are allocated before fd_install() */
1044 if (!task->input || !task->input->file || !task->output || !task->output->file) {
1045 retval = -EINVAL;
1046 goto cleanup;
1047 }
1048 fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1049 if (fd_i < 0)
1050 goto cleanup;
1051 fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1052 if (fd_o < 0) {
1053 put_unused_fd(fd_i);
1054 goto cleanup;
1055 }
1056 /* keep dmabuf reference until freed with task free ioctl */
1057 get_dma_buf(task->input);
1058 get_dma_buf(task->output);
1059 fd_install(fd_i, task->input->file);
1060 fd_install(fd_o, task->output->file);
1061 utask->input_fd = fd_i;
1062 utask->output_fd = fd_o;
1063 list_add_tail(&task->list, &stream->runtime->tasks);
1064 stream->runtime->total_tasks++;
1065 return 0;
1066cleanup:
1067 snd_compr_task_free(task);
1068 return retval;
1069}
1070
1071static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1072{
1073 struct snd_compr_task *task __free(kfree) = NULL;
1074 int retval;
1075
1076 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1077 return -EPERM;
1078 task = memdup_user((void __user *)arg, sizeof(*task));
1079 if (IS_ERR(task))
1080 return PTR_ERR(task);
1081 retval = snd_compr_task_new(stream, task);
1082 if (retval >= 0)
1083 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1084 retval = -EFAULT;
1085 return retval;
1086}
1087
1088static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1089 struct snd_compr_task *utask)
1090{
1091 if (task == NULL)
1092 return -EINVAL;
1093 if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1094 return -EBUSY;
1095 if (utask->input_size > task->input->size)
1096 return -EINVAL;
1097 task->flags = utask->flags;
1098 task->input_size = utask->input_size;
1099 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1100 return 0;
1101}
1102
1103static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1104{
1105 struct snd_compr_task_runtime *task;
1106 int retval;
1107
1108 if (utask->origin_seqno > 0) {
1109 task = snd_compr_find_task(stream, utask->origin_seqno);
1110 retval = snd_compr_task_start_prepare(task, utask);
1111 if (retval < 0)
1112 return retval;
1113 task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1114 utask->origin_seqno = 0;
1115 list_move_tail(&task->list, &stream->runtime->tasks);
1116 } else {
1117 task = snd_compr_find_task(stream, utask->seqno);
1118 if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1119 return -EBUSY;
1120 retval = snd_compr_task_start_prepare(task, utask);
1121 if (retval < 0)
1122 return retval;
1123 }
1124 retval = stream->ops->task_start(stream, task);
1125 if (retval >= 0) {
1126 task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1127 stream->runtime->active_tasks++;
1128 }
1129 return retval;
1130}
1131
1132static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1133{
1134 struct snd_compr_task *task __free(kfree) = NULL;
1135 int retval;
1136
1137 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1138 return -EPERM;
1139 task = memdup_user((void __user *)arg, sizeof(*task));
1140 if (IS_ERR(task))
1141 return PTR_ERR(task);
1142 retval = snd_compr_task_start(stream, task);
1143 if (retval >= 0)
1144 if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1145 retval = -EFAULT;
1146 return retval;
1147}
1148
1149static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1150 struct snd_compr_task_runtime *task)
1151{
1152 if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1153 return;
1154 stream->ops->task_stop(stream, task);
1155 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1156 stream->runtime->active_tasks--;
1157 list_move_tail(&task->list, &stream->runtime->tasks);
1158 task->state = SND_COMPRESS_TASK_STATE_IDLE;
1159}
1160
1161static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1162 struct snd_compr_task_runtime *task)
1163{
1164 snd_compr_task_stop_one(stream, task);
1165 stream->ops->task_free(stream, task);
1166 list_del(&task->list);
1167 snd_compr_task_free(task);
1168 stream->runtime->total_tasks--;
1169}
1170
1171static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1172{
1173 struct snd_compr_task_runtime *task, *temp;
1174
1175 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1176 snd_compr_task_free_one(stream, task);
1177}
1178
1179typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1180 struct snd_compr_task_runtime *task);
1181
1182static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1183 snd_compr_seq_func_t fcn)
1184{
1185 struct snd_compr_task_runtime *task, *temp;
1186 __u64 seqno;
1187 int retval;
1188
1189 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1190 return -EPERM;
1191 retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1192 if (retval)
1193 return -EFAULT;
1194 retval = 0;
1195 if (seqno == 0) {
1196 list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1197 fcn(stream, task);
1198 } else {
1199 task = snd_compr_find_task(stream, seqno);
1200 if (task == NULL) {
1201 retval = -EINVAL;
1202 } else {
1203 fcn(stream, task);
1204 }
1205 }
1206 return retval;
1207}
1208
1209static int snd_compr_task_status(struct snd_compr_stream *stream,
1210 struct snd_compr_task_status *status)
1211{
1212 struct snd_compr_task_runtime *task;
1213
1214 task = snd_compr_find_task(stream, status->seqno);
1215 if (task == NULL)
1216 return -EINVAL;
1217 status->input_size = task->input_size;
1218 status->output_size = task->output_size;
1219 status->state = task->state;
1220 return 0;
1221}
1222
1223static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1224{
1225 struct snd_compr_task_status *status __free(kfree) = NULL;
1226 int retval;
1227
1228 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1229 return -EPERM;
1230 status = memdup_user((void __user *)arg, sizeof(*status));
1231 if (IS_ERR(status))
1232 return PTR_ERR(status);
1233 retval = snd_compr_task_status(stream, status);
1234 if (retval >= 0)
1235 if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1236 retval = -EFAULT;
1237 return retval;
1238}
1239
1240/**
1241 * snd_compr_task_finished: Notify that the task was finished
1242 * @stream: pointer to stream
1243 * @task: runtime task structure
1244 *
1245 * Set the finished task state and notify waiters.
1246 */
1247void snd_compr_task_finished(struct snd_compr_stream *stream,
1248 struct snd_compr_task_runtime *task)
1249{
1250 guard(mutex)(&stream->device->lock);
1251 if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1252 stream->runtime->active_tasks--;
1253 task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1254 wake_up(&stream->runtime->sleep);
1255}
1256EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1257
1258MODULE_IMPORT_NS("DMA_BUF");
1259#endif /* CONFIG_SND_COMPRESS_ACCEL */
1260
1261static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1262{
1263 struct snd_compr_file *data = f->private_data;
1264 struct snd_compr_stream *stream;
1265
1266 if (snd_BUG_ON(!data))
1267 return -EFAULT;
1268
1269 stream = &data->stream;
1270
1271 guard(mutex)(&stream->device->lock);
1272 switch (_IOC_NR(cmd)) {
1273 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
1274 return put_user(SNDRV_COMPRESS_VERSION,
1275 (int __user *)arg) ? -EFAULT : 0;
1276 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
1277 return snd_compr_get_caps(stream, arg);
1278#ifndef COMPR_CODEC_CAPS_OVERFLOW
1279 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
1280 return snd_compr_get_codec_caps(stream, arg);
1281#endif
1282 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
1283 return snd_compr_set_params(stream, arg);
1284 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
1285 return snd_compr_get_params(stream, arg);
1286 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
1287 return snd_compr_set_metadata(stream, arg);
1288 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1289 return snd_compr_get_metadata(stream, arg);
1290 }
1291
1292 if (stream->direction == SND_COMPRESS_ACCEL) {
1293#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1294 switch (_IOC_NR(cmd)) {
1295 case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
1296 return snd_compr_task_create(stream, arg);
1297 case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
1298 return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1299 case _IOC_NR(SNDRV_COMPRESS_TASK_START):
1300 return snd_compr_task_start_ioctl(stream, arg);
1301 case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
1302 return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1303 case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
1304 return snd_compr_task_status_ioctl(stream, arg);
1305 }
1306#endif
1307 return -ENOTTY;
1308 }
1309
1310 switch (_IOC_NR(cmd)) {
1311 case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1312 return snd_compr_tstamp(stream, arg);
1313 case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1314 return snd_compr_ioctl_avail(stream, arg);
1315 case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1316 return snd_compr_pause(stream);
1317 case _IOC_NR(SNDRV_COMPRESS_RESUME):
1318 return snd_compr_resume(stream);
1319 case _IOC_NR(SNDRV_COMPRESS_START):
1320 return snd_compr_start(stream);
1321 case _IOC_NR(SNDRV_COMPRESS_STOP):
1322 return snd_compr_stop(stream);
1323 case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1324 return snd_compr_drain(stream);
1325 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1326 return snd_compr_partial_drain(stream);
1327 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1328 return snd_compr_next_track(stream);
1329 }
1330
1331 return -ENOTTY;
1332}
1333
1334/* support of 32bit userspace on 64bit platforms */
1335#ifdef CONFIG_COMPAT
1336static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1337 unsigned long arg)
1338{
1339 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1340}
1341#endif
1342
1343static const struct file_operations snd_compr_file_ops = {
1344 .owner = THIS_MODULE,
1345 .open = snd_compr_open,
1346 .release = snd_compr_free,
1347 .write = snd_compr_write,
1348 .read = snd_compr_read,
1349 .unlocked_ioctl = snd_compr_ioctl,
1350#ifdef CONFIG_COMPAT
1351 .compat_ioctl = snd_compr_ioctl_compat,
1352#endif
1353 .mmap = snd_compr_mmap,
1354 .poll = snd_compr_poll,
1355};
1356
1357static int snd_compress_dev_register(struct snd_device *device)
1358{
1359 int ret;
1360 struct snd_compr *compr;
1361
1362 if (snd_BUG_ON(!device || !device->device_data))
1363 return -EBADFD;
1364 compr = device->device_data;
1365
1366 pr_debug("reg device %s, direction %d\n", compr->name,
1367 compr->direction);
1368 /* register compressed device */
1369 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1370 compr->card, compr->device,
1371 &snd_compr_file_ops, compr, compr->dev);
1372 if (ret < 0) {
1373 pr_err("snd_register_device failed %d\n", ret);
1374 return ret;
1375 }
1376 return ret;
1377
1378}
1379
1380static int snd_compress_dev_disconnect(struct snd_device *device)
1381{
1382 struct snd_compr *compr;
1383
1384 compr = device->device_data;
1385 snd_unregister_device(compr->dev);
1386 return 0;
1387}
1388
1389#ifdef CONFIG_SND_VERBOSE_PROCFS
1390static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1391 struct snd_info_buffer *buffer)
1392{
1393 struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1394
1395 snd_iprintf(buffer, "card: %d\n", compr->card->number);
1396 snd_iprintf(buffer, "device: %d\n", compr->device);
1397 snd_iprintf(buffer, "stream: %s\n",
1398 compr->direction == SND_COMPRESS_PLAYBACK
1399 ? "PLAYBACK" : "CAPTURE");
1400 snd_iprintf(buffer, "id: %s\n", compr->id);
1401}
1402
1403static int snd_compress_proc_init(struct snd_compr *compr)
1404{
1405 struct snd_info_entry *entry;
1406 char name[16];
1407
1408 sprintf(name, "compr%i", compr->device);
1409 entry = snd_info_create_card_entry(compr->card, name,
1410 compr->card->proc_root);
1411 if (!entry)
1412 return -ENOMEM;
1413 entry->mode = S_IFDIR | 0555;
1414 compr->proc_root = entry;
1415
1416 entry = snd_info_create_card_entry(compr->card, "info",
1417 compr->proc_root);
1418 if (entry)
1419 snd_info_set_text_ops(entry, compr,
1420 snd_compress_proc_info_read);
1421 compr->proc_info_entry = entry;
1422
1423 return 0;
1424}
1425
1426static void snd_compress_proc_done(struct snd_compr *compr)
1427{
1428 snd_info_free_entry(compr->proc_info_entry);
1429 compr->proc_info_entry = NULL;
1430 snd_info_free_entry(compr->proc_root);
1431 compr->proc_root = NULL;
1432}
1433
1434static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1435{
1436 strscpy(compr->id, id, sizeof(compr->id));
1437}
1438#else
1439static inline int snd_compress_proc_init(struct snd_compr *compr)
1440{
1441 return 0;
1442}
1443
1444static inline void snd_compress_proc_done(struct snd_compr *compr)
1445{
1446}
1447
1448static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1449{
1450}
1451#endif
1452
1453static int snd_compress_dev_free(struct snd_device *device)
1454{
1455 struct snd_compr *compr;
1456
1457 compr = device->device_data;
1458 snd_compress_proc_done(compr);
1459 put_device(compr->dev);
1460 return 0;
1461}
1462
1463/**
1464 * snd_compress_new: create new compress device
1465 * @card: sound card pointer
1466 * @device: device number
1467 * @dirn: device direction, should be of type enum snd_compr_direction
1468 * @id: ID string
1469 * @compr: compress device pointer
1470 *
1471 * Return: zero if successful, or a negative error code
1472 */
1473int snd_compress_new(struct snd_card *card, int device,
1474 int dirn, const char *id, struct snd_compr *compr)
1475{
1476 static const struct snd_device_ops ops = {
1477 .dev_free = snd_compress_dev_free,
1478 .dev_register = snd_compress_dev_register,
1479 .dev_disconnect = snd_compress_dev_disconnect,
1480 };
1481 int ret;
1482
1483#if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1484 if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1485 return -EINVAL;
1486#endif
1487
1488 compr->card = card;
1489 compr->device = device;
1490 compr->direction = dirn;
1491 mutex_init(&compr->lock);
1492
1493 snd_compress_set_id(compr, id);
1494
1495 ret = snd_device_alloc(&compr->dev, card);
1496 if (ret)
1497 return ret;
1498 dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1499
1500 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1501 if (ret == 0)
1502 snd_compress_proc_init(compr);
1503 else
1504 put_device(compr->dev);
1505
1506 return ret;
1507}
1508EXPORT_SYMBOL_GPL(snd_compress_new);
1509
1510MODULE_DESCRIPTION("ALSA Compressed offload framework");
1511MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1512MODULE_LICENSE("GPL v2");
1/*
2 * compress_core.c - compress offload core
3 *
4 * Copyright (C) 2011 Intel Corporation
5 * Authors: Vinod Koul <vinod.koul@linux.intel.com>
6 * Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
26#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
27
28#include <linux/file.h>
29#include <linux/fs.h>
30#include <linux/list.h>
31#include <linux/math64.h>
32#include <linux/mm.h>
33#include <linux/mutex.h>
34#include <linux/poll.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <linux/types.h>
38#include <linux/uio.h>
39#include <linux/uaccess.h>
40#include <linux/module.h>
41#include <linux/compat.h>
42#include <sound/core.h>
43#include <sound/initval.h>
44#include <sound/info.h>
45#include <sound/compress_params.h>
46#include <sound/compress_offload.h>
47#include <sound/compress_driver.h>
48
49/* struct snd_compr_codec_caps overflows the ioctl bit size for some
50 * architectures, so we need to disable the relevant ioctls.
51 */
52#if _IOC_SIZEBITS < 14
53#define COMPR_CODEC_CAPS_OVERFLOW
54#endif
55
56/* TODO:
57 * - add substream support for multiple devices in case of
58 * SND_DYNAMIC_MINORS is not used
59 * - Multiple node representation
60 * driver should be able to register multiple nodes
61 */
62
63static DEFINE_MUTEX(device_mutex);
64
65struct snd_compr_file {
66 unsigned long caps;
67 struct snd_compr_stream stream;
68};
69
70/*
71 * a note on stream states used:
72 * we use following states in the compressed core
73 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
74 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
75 * calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
76 * state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
77 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
78 * playback only). User after setting up stream writes the data buffer
79 * before starting the stream.
80 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
81 * decoding/encoding and rendering/capturing data.
82 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
83 * by calling SNDRV_COMPRESS_DRAIN.
84 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
85 * SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
86 * SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
87 */
88static int snd_compr_open(struct inode *inode, struct file *f)
89{
90 struct snd_compr *compr;
91 struct snd_compr_file *data;
92 struct snd_compr_runtime *runtime;
93 enum snd_compr_direction dirn;
94 int maj = imajor(inode);
95 int ret;
96
97 if ((f->f_flags & O_ACCMODE) == O_WRONLY)
98 dirn = SND_COMPRESS_PLAYBACK;
99 else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
100 dirn = SND_COMPRESS_CAPTURE;
101 else
102 return -EINVAL;
103
104 if (maj == snd_major)
105 compr = snd_lookup_minor_data(iminor(inode),
106 SNDRV_DEVICE_TYPE_COMPRESS);
107 else
108 return -EBADFD;
109
110 if (compr == NULL) {
111 pr_err("no device data!!!\n");
112 return -ENODEV;
113 }
114
115 if (dirn != compr->direction) {
116 pr_err("this device doesn't support this direction\n");
117 snd_card_unref(compr->card);
118 return -EINVAL;
119 }
120
121 data = kzalloc(sizeof(*data), GFP_KERNEL);
122 if (!data) {
123 snd_card_unref(compr->card);
124 return -ENOMEM;
125 }
126 data->stream.ops = compr->ops;
127 data->stream.direction = dirn;
128 data->stream.private_data = compr->private_data;
129 data->stream.device = compr;
130 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
131 if (!runtime) {
132 kfree(data);
133 snd_card_unref(compr->card);
134 return -ENOMEM;
135 }
136 runtime->state = SNDRV_PCM_STATE_OPEN;
137 init_waitqueue_head(&runtime->sleep);
138 data->stream.runtime = runtime;
139 f->private_data = (void *)data;
140 mutex_lock(&compr->lock);
141 ret = compr->ops->open(&data->stream);
142 mutex_unlock(&compr->lock);
143 if (ret) {
144 kfree(runtime);
145 kfree(data);
146 }
147 snd_card_unref(compr->card);
148 return ret;
149}
150
151static int snd_compr_free(struct inode *inode, struct file *f)
152{
153 struct snd_compr_file *data = f->private_data;
154 struct snd_compr_runtime *runtime = data->stream.runtime;
155
156 switch (runtime->state) {
157 case SNDRV_PCM_STATE_RUNNING:
158 case SNDRV_PCM_STATE_DRAINING:
159 case SNDRV_PCM_STATE_PAUSED:
160 data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
161 break;
162 default:
163 break;
164 }
165
166 data->stream.ops->free(&data->stream);
167 kfree(data->stream.runtime->buffer);
168 kfree(data->stream.runtime);
169 kfree(data);
170 return 0;
171}
172
173static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
174 struct snd_compr_tstamp *tstamp)
175{
176 if (!stream->ops->pointer)
177 return -ENOTSUPP;
178 stream->ops->pointer(stream, tstamp);
179 pr_debug("dsp consumed till %d total %d bytes\n",
180 tstamp->byte_offset, tstamp->copied_total);
181 if (stream->direction == SND_COMPRESS_PLAYBACK)
182 stream->runtime->total_bytes_transferred = tstamp->copied_total;
183 else
184 stream->runtime->total_bytes_available = tstamp->copied_total;
185 return 0;
186}
187
188static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
189 struct snd_compr_avail *avail)
190{
191 memset(avail, 0, sizeof(*avail));
192 snd_compr_update_tstamp(stream, &avail->tstamp);
193 /* Still need to return avail even if tstamp can't be filled in */
194
195 if (stream->runtime->total_bytes_available == 0 &&
196 stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
197 stream->direction == SND_COMPRESS_PLAYBACK) {
198 pr_debug("detected init and someone forgot to do a write\n");
199 return stream->runtime->buffer_size;
200 }
201 pr_debug("app wrote %lld, DSP consumed %lld\n",
202 stream->runtime->total_bytes_available,
203 stream->runtime->total_bytes_transferred);
204 if (stream->runtime->total_bytes_available ==
205 stream->runtime->total_bytes_transferred) {
206 if (stream->direction == SND_COMPRESS_PLAYBACK) {
207 pr_debug("both pointers are same, returning full avail\n");
208 return stream->runtime->buffer_size;
209 } else {
210 pr_debug("both pointers are same, returning no avail\n");
211 return 0;
212 }
213 }
214
215 avail->avail = stream->runtime->total_bytes_available -
216 stream->runtime->total_bytes_transferred;
217 if (stream->direction == SND_COMPRESS_PLAYBACK)
218 avail->avail = stream->runtime->buffer_size - avail->avail;
219
220 pr_debug("ret avail as %lld\n", avail->avail);
221 return avail->avail;
222}
223
224static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
225{
226 struct snd_compr_avail avail;
227
228 return snd_compr_calc_avail(stream, &avail);
229}
230
231static int
232snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
233{
234 struct snd_compr_avail ioctl_avail;
235 size_t avail;
236
237 avail = snd_compr_calc_avail(stream, &ioctl_avail);
238 ioctl_avail.avail = avail;
239
240 if (copy_to_user((__u64 __user *)arg,
241 &ioctl_avail, sizeof(ioctl_avail)))
242 return -EFAULT;
243 return 0;
244}
245
246static int snd_compr_write_data(struct snd_compr_stream *stream,
247 const char __user *buf, size_t count)
248{
249 void *dstn;
250 size_t copy;
251 struct snd_compr_runtime *runtime = stream->runtime;
252 /* 64-bit Modulus */
253 u64 app_pointer = div64_u64(runtime->total_bytes_available,
254 runtime->buffer_size);
255 app_pointer = runtime->total_bytes_available -
256 (app_pointer * runtime->buffer_size);
257
258 dstn = runtime->buffer + app_pointer;
259 pr_debug("copying %ld at %lld\n",
260 (unsigned long)count, app_pointer);
261 if (count < runtime->buffer_size - app_pointer) {
262 if (copy_from_user(dstn, buf, count))
263 return -EFAULT;
264 } else {
265 copy = runtime->buffer_size - app_pointer;
266 if (copy_from_user(dstn, buf, copy))
267 return -EFAULT;
268 if (copy_from_user(runtime->buffer, buf + copy, count - copy))
269 return -EFAULT;
270 }
271 /* if DSP cares, let it know data has been written */
272 if (stream->ops->ack)
273 stream->ops->ack(stream, count);
274 return count;
275}
276
277static ssize_t snd_compr_write(struct file *f, const char __user *buf,
278 size_t count, loff_t *offset)
279{
280 struct snd_compr_file *data = f->private_data;
281 struct snd_compr_stream *stream;
282 size_t avail;
283 int retval;
284
285 if (snd_BUG_ON(!data))
286 return -EFAULT;
287
288 stream = &data->stream;
289 mutex_lock(&stream->device->lock);
290 /* write is allowed when stream is running or has been steup */
291 if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
292 stream->runtime->state != SNDRV_PCM_STATE_PREPARED &&
293 stream->runtime->state != SNDRV_PCM_STATE_RUNNING) {
294 mutex_unlock(&stream->device->lock);
295 return -EBADFD;
296 }
297
298 avail = snd_compr_get_avail(stream);
299 pr_debug("avail returned %ld\n", (unsigned long)avail);
300 /* calculate how much we can write to buffer */
301 if (avail > count)
302 avail = count;
303
304 if (stream->ops->copy) {
305 char __user* cbuf = (char __user*)buf;
306 retval = stream->ops->copy(stream, cbuf, avail);
307 } else {
308 retval = snd_compr_write_data(stream, buf, avail);
309 }
310 if (retval > 0)
311 stream->runtime->total_bytes_available += retval;
312
313 /* while initiating the stream, write should be called before START
314 * call, so in setup move state */
315 if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
316 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
317 pr_debug("stream prepared, Houston we are good to go\n");
318 }
319
320 mutex_unlock(&stream->device->lock);
321 return retval;
322}
323
324
325static ssize_t snd_compr_read(struct file *f, char __user *buf,
326 size_t count, loff_t *offset)
327{
328 struct snd_compr_file *data = f->private_data;
329 struct snd_compr_stream *stream;
330 size_t avail;
331 int retval;
332
333 if (snd_BUG_ON(!data))
334 return -EFAULT;
335
336 stream = &data->stream;
337 mutex_lock(&stream->device->lock);
338
339 /* read is allowed when stream is running, paused, draining and setup
340 * (yes setup is state which we transition to after stop, so if user
341 * wants to read data after stop we allow that)
342 */
343 switch (stream->runtime->state) {
344 case SNDRV_PCM_STATE_OPEN:
345 case SNDRV_PCM_STATE_PREPARED:
346 case SNDRV_PCM_STATE_XRUN:
347 case SNDRV_PCM_STATE_SUSPENDED:
348 case SNDRV_PCM_STATE_DISCONNECTED:
349 retval = -EBADFD;
350 goto out;
351 }
352
353 avail = snd_compr_get_avail(stream);
354 pr_debug("avail returned %ld\n", (unsigned long)avail);
355 /* calculate how much we can read from buffer */
356 if (avail > count)
357 avail = count;
358
359 if (stream->ops->copy) {
360 retval = stream->ops->copy(stream, buf, avail);
361 } else {
362 retval = -ENXIO;
363 goto out;
364 }
365 if (retval > 0)
366 stream->runtime->total_bytes_transferred += retval;
367
368out:
369 mutex_unlock(&stream->device->lock);
370 return retval;
371}
372
373static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
374{
375 return -ENXIO;
376}
377
378static inline int snd_compr_get_poll(struct snd_compr_stream *stream)
379{
380 if (stream->direction == SND_COMPRESS_PLAYBACK)
381 return POLLOUT | POLLWRNORM;
382 else
383 return POLLIN | POLLRDNORM;
384}
385
386static unsigned int snd_compr_poll(struct file *f, poll_table *wait)
387{
388 struct snd_compr_file *data = f->private_data;
389 struct snd_compr_stream *stream;
390 size_t avail;
391 int retval = 0;
392
393 if (snd_BUG_ON(!data))
394 return -EFAULT;
395 stream = &data->stream;
396 if (snd_BUG_ON(!stream))
397 return -EFAULT;
398
399 mutex_lock(&stream->device->lock);
400 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
401 retval = -EBADFD;
402 goto out;
403 }
404 poll_wait(f, &stream->runtime->sleep, wait);
405
406 avail = snd_compr_get_avail(stream);
407 pr_debug("avail is %ld\n", (unsigned long)avail);
408 /* check if we have at least one fragment to fill */
409 switch (stream->runtime->state) {
410 case SNDRV_PCM_STATE_DRAINING:
411 /* stream has been woken up after drain is complete
412 * draining done so set stream state to stopped
413 */
414 retval = snd_compr_get_poll(stream);
415 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
416 break;
417 case SNDRV_PCM_STATE_RUNNING:
418 case SNDRV_PCM_STATE_PREPARED:
419 case SNDRV_PCM_STATE_PAUSED:
420 if (avail >= stream->runtime->fragment_size)
421 retval = snd_compr_get_poll(stream);
422 break;
423 default:
424 if (stream->direction == SND_COMPRESS_PLAYBACK)
425 retval = POLLOUT | POLLWRNORM | POLLERR;
426 else
427 retval = POLLIN | POLLRDNORM | POLLERR;
428 break;
429 }
430out:
431 mutex_unlock(&stream->device->lock);
432 return retval;
433}
434
435static int
436snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
437{
438 int retval;
439 struct snd_compr_caps caps;
440
441 if (!stream->ops->get_caps)
442 return -ENXIO;
443
444 memset(&caps, 0, sizeof(caps));
445 retval = stream->ops->get_caps(stream, &caps);
446 if (retval)
447 goto out;
448 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
449 retval = -EFAULT;
450out:
451 return retval;
452}
453
454#ifndef COMPR_CODEC_CAPS_OVERFLOW
455static int
456snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
457{
458 int retval;
459 struct snd_compr_codec_caps *caps;
460
461 if (!stream->ops->get_codec_caps)
462 return -ENXIO;
463
464 caps = kzalloc(sizeof(*caps), GFP_KERNEL);
465 if (!caps)
466 return -ENOMEM;
467
468 retval = stream->ops->get_codec_caps(stream, caps);
469 if (retval)
470 goto out;
471 if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
472 retval = -EFAULT;
473
474out:
475 kfree(caps);
476 return retval;
477}
478#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
479
480/* revisit this with snd_pcm_preallocate_xxx */
481static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
482 struct snd_compr_params *params)
483{
484 unsigned int buffer_size;
485 void *buffer;
486
487 buffer_size = params->buffer.fragment_size * params->buffer.fragments;
488 if (stream->ops->copy) {
489 buffer = NULL;
490 /* if copy is defined the driver will be required to copy
491 * the data from core
492 */
493 } else {
494 buffer = kmalloc(buffer_size, GFP_KERNEL);
495 if (!buffer)
496 return -ENOMEM;
497 }
498 stream->runtime->fragment_size = params->buffer.fragment_size;
499 stream->runtime->fragments = params->buffer.fragments;
500 stream->runtime->buffer = buffer;
501 stream->runtime->buffer_size = buffer_size;
502 return 0;
503}
504
505static int snd_compress_check_input(struct snd_compr_params *params)
506{
507 /* first let's check the buffer parameter's */
508 if (params->buffer.fragment_size == 0 ||
509 params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
510 return -EINVAL;
511
512 /* now codec parameters */
513 if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
514 return -EINVAL;
515
516 if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
517 return -EINVAL;
518
519 return 0;
520}
521
522static int
523snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
524{
525 struct snd_compr_params *params;
526 int retval;
527
528 if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
529 /*
530 * we should allow parameter change only when stream has been
531 * opened not in other cases
532 */
533 params = kmalloc(sizeof(*params), GFP_KERNEL);
534 if (!params)
535 return -ENOMEM;
536 if (copy_from_user(params, (void __user *)arg, sizeof(*params))) {
537 retval = -EFAULT;
538 goto out;
539 }
540
541 retval = snd_compress_check_input(params);
542 if (retval)
543 goto out;
544
545 retval = snd_compr_allocate_buffer(stream, params);
546 if (retval) {
547 retval = -ENOMEM;
548 goto out;
549 }
550
551 retval = stream->ops->set_params(stream, params);
552 if (retval)
553 goto out;
554
555 stream->metadata_set = false;
556 stream->next_track = false;
557
558 if (stream->direction == SND_COMPRESS_PLAYBACK)
559 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
560 else
561 stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
562 } else {
563 return -EPERM;
564 }
565out:
566 kfree(params);
567 return retval;
568}
569
570static int
571snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
572{
573 struct snd_codec *params;
574 int retval;
575
576 if (!stream->ops->get_params)
577 return -EBADFD;
578
579 params = kzalloc(sizeof(*params), GFP_KERNEL);
580 if (!params)
581 return -ENOMEM;
582 retval = stream->ops->get_params(stream, params);
583 if (retval)
584 goto out;
585 if (copy_to_user((char __user *)arg, params, sizeof(*params)))
586 retval = -EFAULT;
587
588out:
589 kfree(params);
590 return retval;
591}
592
593static int
594snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
595{
596 struct snd_compr_metadata metadata;
597 int retval;
598
599 if (!stream->ops->get_metadata)
600 return -ENXIO;
601
602 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
603 return -EFAULT;
604
605 retval = stream->ops->get_metadata(stream, &metadata);
606 if (retval != 0)
607 return retval;
608
609 if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
610 return -EFAULT;
611
612 return 0;
613}
614
615static int
616snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
617{
618 struct snd_compr_metadata metadata;
619 int retval;
620
621 if (!stream->ops->set_metadata)
622 return -ENXIO;
623 /*
624 * we should allow parameter change only when stream has been
625 * opened not in other cases
626 */
627 if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
628 return -EFAULT;
629
630 retval = stream->ops->set_metadata(stream, &metadata);
631 stream->metadata_set = true;
632
633 return retval;
634}
635
636static inline int
637snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
638{
639 struct snd_compr_tstamp tstamp = {0};
640 int ret;
641
642 ret = snd_compr_update_tstamp(stream, &tstamp);
643 if (ret == 0)
644 ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
645 &tstamp, sizeof(tstamp)) ? -EFAULT : 0;
646 return ret;
647}
648
649static int snd_compr_pause(struct snd_compr_stream *stream)
650{
651 int retval;
652
653 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
654 return -EPERM;
655 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
656 if (!retval)
657 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
658 return retval;
659}
660
661static int snd_compr_resume(struct snd_compr_stream *stream)
662{
663 int retval;
664
665 if (stream->runtime->state != SNDRV_PCM_STATE_PAUSED)
666 return -EPERM;
667 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
668 if (!retval)
669 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
670 return retval;
671}
672
673static int snd_compr_start(struct snd_compr_stream *stream)
674{
675 int retval;
676
677 if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
678 return -EPERM;
679 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
680 if (!retval)
681 stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
682 return retval;
683}
684
685static int snd_compr_stop(struct snd_compr_stream *stream)
686{
687 int retval;
688
689 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
690 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
691 return -EPERM;
692 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
693 if (!retval) {
694 snd_compr_drain_notify(stream);
695 stream->runtime->total_bytes_available = 0;
696 stream->runtime->total_bytes_transferred = 0;
697 }
698 return retval;
699}
700
701static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
702{
703 int ret;
704
705 /*
706 * We are called with lock held. So drop the lock while we wait for
707 * drain complete notification from the driver
708 *
709 * It is expected that driver will notify the drain completion and then
710 * stream will be moved to SETUP state, even if draining resulted in an
711 * error. We can trigger next track after this.
712 */
713 stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
714 mutex_unlock(&stream->device->lock);
715
716 /* we wait for drain to complete here, drain can return when
717 * interruption occurred, wait returned error or success.
718 * For the first two cases we don't do anything different here and
719 * return after waking up
720 */
721
722 ret = wait_event_interruptible(stream->runtime->sleep,
723 (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
724 if (ret == -ERESTARTSYS)
725 pr_debug("wait aborted by a signal");
726 else if (ret)
727 pr_debug("wait for drain failed with %d\n", ret);
728
729
730 wake_up(&stream->runtime->sleep);
731 mutex_lock(&stream->device->lock);
732
733 return ret;
734}
735
736static int snd_compr_drain(struct snd_compr_stream *stream)
737{
738 int retval;
739
740 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
741 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
742 return -EPERM;
743
744 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
745 if (retval) {
746 pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
747 wake_up(&stream->runtime->sleep);
748 return retval;
749 }
750
751 return snd_compress_wait_for_drain(stream);
752}
753
754static int snd_compr_next_track(struct snd_compr_stream *stream)
755{
756 int retval;
757
758 /* only a running stream can transition to next track */
759 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
760 return -EPERM;
761
762 /* you can signal next track if this is intended to be a gapless stream
763 * and current track metadata is set
764 */
765 if (stream->metadata_set == false)
766 return -EPERM;
767
768 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
769 if (retval != 0)
770 return retval;
771 stream->metadata_set = false;
772 stream->next_track = true;
773 return 0;
774}
775
776static int snd_compr_partial_drain(struct snd_compr_stream *stream)
777{
778 int retval;
779 if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
780 stream->runtime->state == SNDRV_PCM_STATE_SETUP)
781 return -EPERM;
782 /* stream can be drained only when next track has been signalled */
783 if (stream->next_track == false)
784 return -EPERM;
785
786 retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
787 if (retval) {
788 pr_debug("Partial drain returned failure\n");
789 wake_up(&stream->runtime->sleep);
790 return retval;
791 }
792
793 stream->next_track = false;
794 return snd_compress_wait_for_drain(stream);
795}
796
797static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
798{
799 struct snd_compr_file *data = f->private_data;
800 struct snd_compr_stream *stream;
801 int retval = -ENOTTY;
802
803 if (snd_BUG_ON(!data))
804 return -EFAULT;
805 stream = &data->stream;
806 if (snd_BUG_ON(!stream))
807 return -EFAULT;
808 mutex_lock(&stream->device->lock);
809 switch (_IOC_NR(cmd)) {
810 case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
811 retval = put_user(SNDRV_COMPRESS_VERSION,
812 (int __user *)arg) ? -EFAULT : 0;
813 break;
814 case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
815 retval = snd_compr_get_caps(stream, arg);
816 break;
817#ifndef COMPR_CODEC_CAPS_OVERFLOW
818 case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
819 retval = snd_compr_get_codec_caps(stream, arg);
820 break;
821#endif
822 case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
823 retval = snd_compr_set_params(stream, arg);
824 break;
825 case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
826 retval = snd_compr_get_params(stream, arg);
827 break;
828 case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
829 retval = snd_compr_set_metadata(stream, arg);
830 break;
831 case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
832 retval = snd_compr_get_metadata(stream, arg);
833 break;
834 case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
835 retval = snd_compr_tstamp(stream, arg);
836 break;
837 case _IOC_NR(SNDRV_COMPRESS_AVAIL):
838 retval = snd_compr_ioctl_avail(stream, arg);
839 break;
840 case _IOC_NR(SNDRV_COMPRESS_PAUSE):
841 retval = snd_compr_pause(stream);
842 break;
843 case _IOC_NR(SNDRV_COMPRESS_RESUME):
844 retval = snd_compr_resume(stream);
845 break;
846 case _IOC_NR(SNDRV_COMPRESS_START):
847 retval = snd_compr_start(stream);
848 break;
849 case _IOC_NR(SNDRV_COMPRESS_STOP):
850 retval = snd_compr_stop(stream);
851 break;
852 case _IOC_NR(SNDRV_COMPRESS_DRAIN):
853 retval = snd_compr_drain(stream);
854 break;
855 case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
856 retval = snd_compr_partial_drain(stream);
857 break;
858 case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
859 retval = snd_compr_next_track(stream);
860 break;
861
862 }
863 mutex_unlock(&stream->device->lock);
864 return retval;
865}
866
867/* support of 32bit userspace on 64bit platforms */
868#ifdef CONFIG_COMPAT
869static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
870 unsigned long arg)
871{
872 return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
873}
874#endif
875
876static const struct file_operations snd_compr_file_ops = {
877 .owner = THIS_MODULE,
878 .open = snd_compr_open,
879 .release = snd_compr_free,
880 .write = snd_compr_write,
881 .read = snd_compr_read,
882 .unlocked_ioctl = snd_compr_ioctl,
883#ifdef CONFIG_COMPAT
884 .compat_ioctl = snd_compr_ioctl_compat,
885#endif
886 .mmap = snd_compr_mmap,
887 .poll = snd_compr_poll,
888};
889
890static int snd_compress_dev_register(struct snd_device *device)
891{
892 int ret = -EINVAL;
893 char str[16];
894 struct snd_compr *compr;
895
896 if (snd_BUG_ON(!device || !device->device_data))
897 return -EBADFD;
898 compr = device->device_data;
899
900 pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
901 compr->direction);
902 /* register compressed device */
903 ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
904 compr->card, compr->device,
905 &snd_compr_file_ops, compr, &compr->dev);
906 if (ret < 0) {
907 pr_err("snd_register_device failed\n %d", ret);
908 return ret;
909 }
910 return ret;
911
912}
913
914static int snd_compress_dev_disconnect(struct snd_device *device)
915{
916 struct snd_compr *compr;
917
918 compr = device->device_data;
919 snd_unregister_device(&compr->dev);
920 return 0;
921}
922
923#ifdef CONFIG_SND_VERBOSE_PROCFS
924static void snd_compress_proc_info_read(struct snd_info_entry *entry,
925 struct snd_info_buffer *buffer)
926{
927 struct snd_compr *compr = (struct snd_compr *)entry->private_data;
928
929 snd_iprintf(buffer, "card: %d\n", compr->card->number);
930 snd_iprintf(buffer, "device: %d\n", compr->device);
931 snd_iprintf(buffer, "stream: %s\n",
932 compr->direction == SND_COMPRESS_PLAYBACK
933 ? "PLAYBACK" : "CAPTURE");
934 snd_iprintf(buffer, "id: %s\n", compr->id);
935}
936
937static int snd_compress_proc_init(struct snd_compr *compr)
938{
939 struct snd_info_entry *entry;
940 char name[16];
941
942 sprintf(name, "compr%i", compr->device);
943 entry = snd_info_create_card_entry(compr->card, name,
944 compr->card->proc_root);
945 if (!entry)
946 return -ENOMEM;
947 entry->mode = S_IFDIR | S_IRUGO | S_IXUGO;
948 if (snd_info_register(entry) < 0) {
949 snd_info_free_entry(entry);
950 return -ENOMEM;
951 }
952 compr->proc_root = entry;
953
954 entry = snd_info_create_card_entry(compr->card, "info",
955 compr->proc_root);
956 if (entry) {
957 snd_info_set_text_ops(entry, compr,
958 snd_compress_proc_info_read);
959 if (snd_info_register(entry) < 0) {
960 snd_info_free_entry(entry);
961 entry = NULL;
962 }
963 }
964 compr->proc_info_entry = entry;
965
966 return 0;
967}
968
969static void snd_compress_proc_done(struct snd_compr *compr)
970{
971 snd_info_free_entry(compr->proc_info_entry);
972 compr->proc_info_entry = NULL;
973 snd_info_free_entry(compr->proc_root);
974 compr->proc_root = NULL;
975}
976
977static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
978{
979 strlcpy(compr->id, id, sizeof(compr->id));
980}
981#else
982static inline int snd_compress_proc_init(struct snd_compr *compr)
983{
984 return 0;
985}
986
987static inline void snd_compress_proc_done(struct snd_compr *compr)
988{
989}
990
991static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
992{
993}
994#endif
995
996static int snd_compress_dev_free(struct snd_device *device)
997{
998 struct snd_compr *compr;
999
1000 compr = device->device_data;
1001 snd_compress_proc_done(compr);
1002 put_device(&compr->dev);
1003 return 0;
1004}
1005
1006/*
1007 * snd_compress_new: create new compress device
1008 * @card: sound card pointer
1009 * @device: device number
1010 * @dirn: device direction, should be of type enum snd_compr_direction
1011 * @compr: compress device pointer
1012 */
1013int snd_compress_new(struct snd_card *card, int device,
1014 int dirn, const char *id, struct snd_compr *compr)
1015{
1016 static struct snd_device_ops ops = {
1017 .dev_free = snd_compress_dev_free,
1018 .dev_register = snd_compress_dev_register,
1019 .dev_disconnect = snd_compress_dev_disconnect,
1020 };
1021 int ret;
1022
1023 compr->card = card;
1024 compr->device = device;
1025 compr->direction = dirn;
1026
1027 snd_compress_set_id(compr, id);
1028
1029 snd_device_initialize(&compr->dev, card);
1030 dev_set_name(&compr->dev, "comprC%iD%i", card->number, device);
1031
1032 ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1033 if (ret == 0)
1034 snd_compress_proc_init(compr);
1035
1036 return ret;
1037}
1038EXPORT_SYMBOL_GPL(snd_compress_new);
1039
1040static int snd_compress_add_device(struct snd_compr *device)
1041{
1042 int ret;
1043
1044 if (!device->card)
1045 return -EINVAL;
1046
1047 /* register the card */
1048 ret = snd_card_register(device->card);
1049 if (ret)
1050 goto out;
1051 return 0;
1052
1053out:
1054 pr_err("failed with %d\n", ret);
1055 return ret;
1056
1057}
1058
1059static int snd_compress_remove_device(struct snd_compr *device)
1060{
1061 return snd_card_free(device->card);
1062}
1063
1064/**
1065 * snd_compress_register - register compressed device
1066 *
1067 * @device: compressed device to register
1068 */
1069int snd_compress_register(struct snd_compr *device)
1070{
1071 int retval;
1072
1073 if (device->name == NULL || device->ops == NULL)
1074 return -EINVAL;
1075
1076 pr_debug("Registering compressed device %s\n", device->name);
1077 if (snd_BUG_ON(!device->ops->open))
1078 return -EINVAL;
1079 if (snd_BUG_ON(!device->ops->free))
1080 return -EINVAL;
1081 if (snd_BUG_ON(!device->ops->set_params))
1082 return -EINVAL;
1083 if (snd_BUG_ON(!device->ops->trigger))
1084 return -EINVAL;
1085
1086 mutex_init(&device->lock);
1087
1088 /* register a compressed card */
1089 mutex_lock(&device_mutex);
1090 retval = snd_compress_add_device(device);
1091 mutex_unlock(&device_mutex);
1092 return retval;
1093}
1094EXPORT_SYMBOL_GPL(snd_compress_register);
1095
1096int snd_compress_deregister(struct snd_compr *device)
1097{
1098 pr_debug("Removing compressed device %s\n", device->name);
1099 mutex_lock(&device_mutex);
1100 snd_compress_remove_device(device);
1101 mutex_unlock(&device_mutex);
1102 return 0;
1103}
1104EXPORT_SYMBOL_GPL(snd_compress_deregister);
1105
1106static int __init snd_compress_init(void)
1107{
1108 return 0;
1109}
1110
1111static void __exit snd_compress_exit(void)
1112{
1113}
1114
1115module_init(snd_compress_init);
1116module_exit(snd_compress_exit);
1117
1118MODULE_DESCRIPTION("ALSA Compressed offload framework");
1119MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1120MODULE_LICENSE("GPL v2");