Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  compress_core.c - compress offload core
   4 *
   5 *  Copyright (C) 2011 Intel Corporation
   6 *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
   7 *		Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
   8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 */
  12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
  14
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/list.h>
  18#include <linux/math64.h>
  19#include <linux/mm.h>
  20#include <linux/mutex.h>
  21#include <linux/poll.h>
  22#include <linux/slab.h>
  23#include <linux/sched.h>
  24#include <linux/types.h>
  25#include <linux/uio.h>
  26#include <linux/uaccess.h>
  27#include <linux/dma-buf.h>
  28#include <linux/module.h>
  29#include <linux/compat.h>
  30#include <sound/core.h>
  31#include <sound/initval.h>
  32#include <sound/info.h>
  33#include <sound/compress_params.h>
  34#include <sound/compress_offload.h>
  35#include <sound/compress_driver.h>
  36
  37/* struct snd_compr_codec_caps overflows the ioctl bit size for some
  38 * architectures, so we need to disable the relevant ioctls.
  39 */
  40#if _IOC_SIZEBITS < 14
  41#define COMPR_CODEC_CAPS_OVERFLOW
  42#endif
  43
  44/* TODO:
  45 * - add substream support for multiple devices in case of
  46 *	SND_DYNAMIC_MINORS is not used
  47 * - Multiple node representation
  48 *	driver should be able to register multiple nodes
  49 */
  50
  51struct snd_compr_file {
  52	unsigned long caps;
  53	struct snd_compr_stream stream;
  54};
  55
  56static void error_delayed_work(struct work_struct *work);
  57
  58#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
  59static void snd_compr_task_free_all(struct snd_compr_stream *stream);
  60#else
  61static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
  62#endif
  63
  64/*
  65 * a note on stream states used:
  66 * we use following states in the compressed core
  67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
  68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
  69 *	calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
  70 *	state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
  71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
  72 *	playback only). User after setting up stream writes the data buffer
  73 *	before starting the stream.
  74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
  75 *	decoding/encoding and rendering/capturing data.
  76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
  77 *	by calling SNDRV_COMPRESS_DRAIN.
  78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
  79 *	SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
  80 *	SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
  81 */
  82static int snd_compr_open(struct inode *inode, struct file *f)
  83{
  84	struct snd_compr *compr;
  85	struct snd_compr_file *data;
  86	struct snd_compr_runtime *runtime;
  87	enum snd_compr_direction dirn;
  88	int maj = imajor(inode);
  89	int ret;
  90
  91	if ((f->f_flags & O_ACCMODE) == O_WRONLY)
  92		dirn = SND_COMPRESS_PLAYBACK;
  93	else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
  94		dirn = SND_COMPRESS_CAPTURE;
  95	else if ((f->f_flags & O_ACCMODE) == O_RDWR)
  96		dirn = SND_COMPRESS_ACCEL;
  97	else
  98		return -EINVAL;
  99
 100	if (maj == snd_major)
 101		compr = snd_lookup_minor_data(iminor(inode),
 102					SNDRV_DEVICE_TYPE_COMPRESS);
 103	else
 104		return -EBADFD;
 105
 106	if (compr == NULL) {
 107		pr_err("no device data!!!\n");
 108		return -ENODEV;
 109	}
 110
 111	if (dirn != compr->direction) {
 112		pr_err("this device doesn't support this direction\n");
 113		snd_card_unref(compr->card);
 114		return -EINVAL;
 115	}
 116
 117	data = kzalloc(sizeof(*data), GFP_KERNEL);
 118	if (!data) {
 119		snd_card_unref(compr->card);
 120		return -ENOMEM;
 121	}
 122
 123	INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
 124
 125	data->stream.ops = compr->ops;
 126	data->stream.direction = dirn;
 127	data->stream.private_data = compr->private_data;
 128	data->stream.device = compr;
 129	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 130	if (!runtime) {
 131		kfree(data);
 132		snd_card_unref(compr->card);
 133		return -ENOMEM;
 134	}
 135	runtime->state = SNDRV_PCM_STATE_OPEN;
 136	init_waitqueue_head(&runtime->sleep);
 137#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 138	INIT_LIST_HEAD(&runtime->tasks);
 139#endif
 140	data->stream.runtime = runtime;
 141	f->private_data = (void *)data;
 142	scoped_guard(mutex, &compr->lock)
 143		ret = compr->ops->open(&data->stream);
 
 144	if (ret) {
 145		kfree(runtime);
 146		kfree(data);
 147	}
 148	snd_card_unref(compr->card);
 149	return ret;
 150}
 151
 152static int snd_compr_free(struct inode *inode, struct file *f)
 153{
 154	struct snd_compr_file *data = f->private_data;
 155	struct snd_compr_runtime *runtime = data->stream.runtime;
 156
 157	cancel_delayed_work_sync(&data->stream.error_work);
 158
 159	switch (runtime->state) {
 160	case SNDRV_PCM_STATE_RUNNING:
 161	case SNDRV_PCM_STATE_DRAINING:
 162	case SNDRV_PCM_STATE_PAUSED:
 163		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
 164		break;
 165	default:
 166		break;
 167	}
 168
 169	snd_compr_task_free_all(&data->stream);
 170
 171	data->stream.ops->free(&data->stream);
 172	if (!data->stream.runtime->dma_buffer_p)
 173		kfree(data->stream.runtime->buffer);
 174	kfree(data->stream.runtime);
 175	kfree(data);
 176	return 0;
 177}
 178
 179static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
 180		struct snd_compr_tstamp *tstamp)
 181{
 182	if (!stream->ops->pointer)
 183		return -ENOTSUPP;
 184	stream->ops->pointer(stream, tstamp);
 185	pr_debug("dsp consumed till %d total %d bytes\n",
 186		tstamp->byte_offset, tstamp->copied_total);
 187	if (stream->direction == SND_COMPRESS_PLAYBACK)
 188		stream->runtime->total_bytes_transferred = tstamp->copied_total;
 189	else
 190		stream->runtime->total_bytes_available = tstamp->copied_total;
 191	return 0;
 192}
 193
 194static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
 195		struct snd_compr_avail *avail)
 196{
 197	memset(avail, 0, sizeof(*avail));
 198	snd_compr_update_tstamp(stream, &avail->tstamp);
 199	/* Still need to return avail even if tstamp can't be filled in */
 200
 201	if (stream->runtime->total_bytes_available == 0 &&
 202			stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
 203			stream->direction == SND_COMPRESS_PLAYBACK) {
 204		pr_debug("detected init and someone forgot to do a write\n");
 205		return stream->runtime->buffer_size;
 206	}
 207	pr_debug("app wrote %lld, DSP consumed %lld\n",
 208			stream->runtime->total_bytes_available,
 209			stream->runtime->total_bytes_transferred);
 210	if (stream->runtime->total_bytes_available ==
 211				stream->runtime->total_bytes_transferred) {
 212		if (stream->direction == SND_COMPRESS_PLAYBACK) {
 213			pr_debug("both pointers are same, returning full avail\n");
 214			return stream->runtime->buffer_size;
 215		} else {
 216			pr_debug("both pointers are same, returning no avail\n");
 217			return 0;
 218		}
 219	}
 220
 221	avail->avail = stream->runtime->total_bytes_available -
 222			stream->runtime->total_bytes_transferred;
 223	if (stream->direction == SND_COMPRESS_PLAYBACK)
 224		avail->avail = stream->runtime->buffer_size - avail->avail;
 225
 226	pr_debug("ret avail as %lld\n", avail->avail);
 227	return avail->avail;
 228}
 229
 230static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
 231{
 232	struct snd_compr_avail avail;
 233
 234	return snd_compr_calc_avail(stream, &avail);
 235}
 236
 237static int
 238snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
 239{
 240	struct snd_compr_avail ioctl_avail;
 241	size_t avail;
 242
 243	if (stream->direction == SND_COMPRESS_ACCEL)
 244		return -EBADFD;
 245
 246	avail = snd_compr_calc_avail(stream, &ioctl_avail);
 247	ioctl_avail.avail = avail;
 248
 249	switch (stream->runtime->state) {
 250	case SNDRV_PCM_STATE_OPEN:
 251		return -EBADFD;
 252	case SNDRV_PCM_STATE_XRUN:
 253		return -EPIPE;
 254	default:
 255		break;
 256	}
 257
 258	if (copy_to_user((__u64 __user *)arg,
 259				&ioctl_avail, sizeof(ioctl_avail)))
 260		return -EFAULT;
 261	return 0;
 262}
 263
 264static int snd_compr_write_data(struct snd_compr_stream *stream,
 265	       const char __user *buf, size_t count)
 266{
 267	void *dstn;
 268	size_t copy;
 269	struct snd_compr_runtime *runtime = stream->runtime;
 270	/* 64-bit Modulus */
 271	u64 app_pointer = div64_u64(runtime->total_bytes_available,
 272				    runtime->buffer_size);
 273	app_pointer = runtime->total_bytes_available -
 274		      (app_pointer * runtime->buffer_size);
 275
 276	dstn = runtime->buffer + app_pointer;
 277	pr_debug("copying %ld at %lld\n",
 278			(unsigned long)count, app_pointer);
 279	if (count < runtime->buffer_size - app_pointer) {
 280		if (copy_from_user(dstn, buf, count))
 281			return -EFAULT;
 282	} else {
 283		copy = runtime->buffer_size - app_pointer;
 284		if (copy_from_user(dstn, buf, copy))
 285			return -EFAULT;
 286		if (copy_from_user(runtime->buffer, buf + copy, count - copy))
 287			return -EFAULT;
 288	}
 289	/* if DSP cares, let it know data has been written */
 290	if (stream->ops->ack)
 291		stream->ops->ack(stream, count);
 292	return count;
 293}
 294
 295static ssize_t snd_compr_write(struct file *f, const char __user *buf,
 296		size_t count, loff_t *offset)
 297{
 298	struct snd_compr_file *data = f->private_data;
 299	struct snd_compr_stream *stream;
 300	size_t avail;
 301	int retval;
 302
 303	if (snd_BUG_ON(!data))
 304		return -EFAULT;
 305
 306	stream = &data->stream;
 307	if (stream->direction == SND_COMPRESS_ACCEL)
 308		return -EBADFD;
 309	guard(mutex)(&stream->device->lock);
 310	/* write is allowed when stream is running or has been setup */
 311	switch (stream->runtime->state) {
 312	case SNDRV_PCM_STATE_SETUP:
 313	case SNDRV_PCM_STATE_PREPARED:
 314	case SNDRV_PCM_STATE_RUNNING:
 315		break;
 316	default:
 
 317		return -EBADFD;
 318	}
 319
 320	avail = snd_compr_get_avail(stream);
 321	pr_debug("avail returned %ld\n", (unsigned long)avail);
 322	/* calculate how much we can write to buffer */
 323	if (avail > count)
 324		avail = count;
 325
 326	if (stream->ops->copy) {
 327		char __user* cbuf = (char __user*)buf;
 328		retval = stream->ops->copy(stream, cbuf, avail);
 329	} else {
 330		retval = snd_compr_write_data(stream, buf, avail);
 331	}
 332	if (retval > 0)
 333		stream->runtime->total_bytes_available += retval;
 334
 335	/* while initiating the stream, write should be called before START
 336	 * call, so in setup move state */
 337	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
 338		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
 339		pr_debug("stream prepared, Houston we are good to go\n");
 340	}
 341
 
 342	return retval;
 343}
 344
 345
 346static ssize_t snd_compr_read(struct file *f, char __user *buf,
 347		size_t count, loff_t *offset)
 348{
 349	struct snd_compr_file *data = f->private_data;
 350	struct snd_compr_stream *stream;
 351	size_t avail;
 352	int retval;
 353
 354	if (snd_BUG_ON(!data))
 355		return -EFAULT;
 356
 357	stream = &data->stream;
 358	if (stream->direction == SND_COMPRESS_ACCEL)
 359		return -EBADFD;
 360	guard(mutex)(&stream->device->lock);
 361
 362	/* read is allowed when stream is running, paused, draining and setup
 363	 * (yes setup is state which we transition to after stop, so if user
 364	 * wants to read data after stop we allow that)
 365	 */
 366	switch (stream->runtime->state) {
 367	case SNDRV_PCM_STATE_OPEN:
 368	case SNDRV_PCM_STATE_PREPARED:
 369	case SNDRV_PCM_STATE_SUSPENDED:
 370	case SNDRV_PCM_STATE_DISCONNECTED:
 371		return -EBADFD;
 
 372	case SNDRV_PCM_STATE_XRUN:
 373		return -EPIPE;
 
 374	}
 375
 376	avail = snd_compr_get_avail(stream);
 377	pr_debug("avail returned %ld\n", (unsigned long)avail);
 378	/* calculate how much we can read from buffer */
 379	if (avail > count)
 380		avail = count;
 381
 382	if (stream->ops->copy)
 383		retval = stream->ops->copy(stream, buf, avail);
 384	else
 385		return -ENXIO;
 
 
 386	if (retval > 0)
 387		stream->runtime->total_bytes_transferred += retval;
 388
 
 
 389	return retval;
 390}
 391
 392static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
 393{
 394	return -ENXIO;
 395}
 396
 397static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
 398{
 399	if (stream->direction == SND_COMPRESS_PLAYBACK)
 400		return EPOLLOUT | EPOLLWRNORM;
 401	else
 402		return EPOLLIN | EPOLLRDNORM;
 403}
 404
 405static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
 406{
 407	struct snd_compr_file *data = f->private_data;
 408	struct snd_compr_stream *stream;
 409	struct snd_compr_runtime *runtime;
 410	size_t avail;
 411	__poll_t retval = 0;
 412
 413	if (snd_BUG_ON(!data))
 414		return EPOLLERR;
 415
 416	stream = &data->stream;
 417	runtime = stream->runtime;
 418
 419	guard(mutex)(&stream->device->lock);
 420
 421	switch (runtime->state) {
 422	case SNDRV_PCM_STATE_OPEN:
 423	case SNDRV_PCM_STATE_XRUN:
 424		return snd_compr_get_poll(stream) | EPOLLERR;
 
 425	default:
 426		break;
 427	}
 428
 429	poll_wait(f, &runtime->sleep, wait);
 430
 431#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 432	if (stream->direction == SND_COMPRESS_ACCEL) {
 433		struct snd_compr_task_runtime *task;
 434		if (runtime->fragments > runtime->active_tasks)
 435			retval |= EPOLLOUT | EPOLLWRNORM;
 436		task = list_first_entry_or_null(&runtime->tasks,
 437						struct snd_compr_task_runtime,
 438						list);
 439		if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
 440			retval |= EPOLLIN | EPOLLRDNORM;
 441		return retval;
 442	}
 443#endif
 444
 445	avail = snd_compr_get_avail(stream);
 446	pr_debug("avail is %ld\n", (unsigned long)avail);
 447	/* check if we have at least one fragment to fill */
 448	switch (runtime->state) {
 449	case SNDRV_PCM_STATE_DRAINING:
 450		/* stream has been woken up after drain is complete
 451		 * draining done so set stream state to stopped
 452		 */
 453		retval = snd_compr_get_poll(stream);
 454		runtime->state = SNDRV_PCM_STATE_SETUP;
 455		break;
 456	case SNDRV_PCM_STATE_RUNNING:
 457	case SNDRV_PCM_STATE_PREPARED:
 458	case SNDRV_PCM_STATE_PAUSED:
 459		if (avail >= runtime->fragment_size)
 460			retval = snd_compr_get_poll(stream);
 461		break;
 462	default:
 463		return snd_compr_get_poll(stream) | EPOLLERR;
 
 464	}
 465
 
 466	return retval;
 467}
 468
 469static int
 470snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
 471{
 472	int retval;
 473	struct snd_compr_caps caps;
 474
 475	if (!stream->ops->get_caps)
 476		return -ENXIO;
 477
 478	memset(&caps, 0, sizeof(caps));
 479	retval = stream->ops->get_caps(stream, &caps);
 480	if (retval)
 481		goto out;
 482	if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
 483		retval = -EFAULT;
 484out:
 485	return retval;
 486}
 487
 488#ifndef COMPR_CODEC_CAPS_OVERFLOW
 489static int
 490snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 491{
 492	int retval;
 493	struct snd_compr_codec_caps *caps __free(kfree) = NULL;
 494
 495	if (!stream->ops->get_codec_caps)
 496		return -ENXIO;
 497
 498	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 499	if (!caps)
 500		return -ENOMEM;
 501
 502	retval = stream->ops->get_codec_caps(stream, caps);
 503	if (retval)
 504		return retval;
 505	if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
 506		return -EFAULT;
 
 
 
 507	return retval;
 508}
 509#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 510
 511int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
 512{
 513	struct snd_dma_buffer *dmab;
 514	int ret;
 515
 516	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 517		return -EINVAL;
 518	dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
 519	if (!dmab)
 520		return -ENOMEM;
 521	dmab->dev = stream->dma_buffer.dev;
 522	ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
 523	if (ret < 0) {
 524		kfree(dmab);
 525		return ret;
 526	}
 527
 528	snd_compr_set_runtime_buffer(stream, dmab);
 529	stream->runtime->dma_bytes = size;
 530	return 1;
 531}
 532EXPORT_SYMBOL(snd_compr_malloc_pages);
 533
 534int snd_compr_free_pages(struct snd_compr_stream *stream)
 535{
 536	struct snd_compr_runtime *runtime;
 537
 538	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 539		return -EINVAL;
 540	runtime = stream->runtime;
 541	if (runtime->dma_area == NULL)
 542		return 0;
 543	if (runtime->dma_buffer_p != &stream->dma_buffer) {
 544		/* It's a newly allocated buffer. Release it now. */
 545		snd_dma_free_pages(runtime->dma_buffer_p);
 546		kfree(runtime->dma_buffer_p);
 547	}
 548
 549	snd_compr_set_runtime_buffer(stream, NULL);
 550	return 0;
 551}
 552EXPORT_SYMBOL(snd_compr_free_pages);
 553
 554/* revisit this with snd_pcm_preallocate_xxx */
 555static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
 556		struct snd_compr_params *params)
 557{
 558	unsigned int buffer_size;
 559	void *buffer = NULL;
 560
 561	if (stream->direction == SND_COMPRESS_ACCEL)
 562		goto params;
 563
 564	buffer_size = params->buffer.fragment_size * params->buffer.fragments;
 565	if (stream->ops->copy) {
 566		buffer = NULL;
 567		/* if copy is defined the driver will be required to copy
 568		 * the data from core
 569		 */
 570	} else {
 571		if (stream->runtime->dma_buffer_p) {
 572
 573			if (buffer_size > stream->runtime->dma_buffer_p->bytes)
 574				dev_err(stream->device->dev,
 575						"Not enough DMA buffer");
 576			else
 577				buffer = stream->runtime->dma_buffer_p->area;
 578
 579		} else {
 580			buffer = kmalloc(buffer_size, GFP_KERNEL);
 581		}
 582
 583		if (!buffer)
 584			return -ENOMEM;
 585	}
 586
 587	stream->runtime->buffer = buffer;
 588	stream->runtime->buffer_size = buffer_size;
 589params:
 590	stream->runtime->fragment_size = params->buffer.fragment_size;
 591	stream->runtime->fragments = params->buffer.fragments;
 
 
 592	return 0;
 593}
 594
 595static int
 596snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
 597{
 598	u32 max_fragments;
 599
 600	/* first let's check the buffer parameter's */
 601	if (params->buffer.fragment_size == 0)
 602		return -EINVAL;
 603
 604	if (stream->direction == SND_COMPRESS_ACCEL)
 605		max_fragments = 64;			/* safe value */
 606	else
 607		max_fragments = U32_MAX / params->buffer.fragment_size;
 608
 609	if (params->buffer.fragments > max_fragments ||
 610	    params->buffer.fragments == 0)
 611		return -EINVAL;
 612
 613	/* now codec parameters */
 614	if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
 615		return -EINVAL;
 616
 617	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
 618		return -EINVAL;
 619
 620	return 0;
 621}
 622
 623static int
 624snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 625{
 626	struct snd_compr_params *params __free(kfree) = NULL;
 627	int retval;
 628
 629	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
 630		/*
 631		 * we should allow parameter change only when stream has been
 632		 * opened not in other cases
 633		 */
 634		params = memdup_user((void __user *)arg, sizeof(*params));
 635		if (IS_ERR(params))
 636			return PTR_ERR(params);
 637
 638		retval = snd_compress_check_input(stream, params);
 639		if (retval)
 640			return retval;
 641
 642		retval = snd_compr_allocate_buffer(stream, params);
 643		if (retval)
 644			return -ENOMEM;
 
 
 645
 646		retval = stream->ops->set_params(stream, params);
 647		if (retval)
 648			return retval;
 649
 650		if (stream->next_track)
 651			return retval;
 652
 653		stream->metadata_set = false;
 654		stream->next_track = false;
 655
 656		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 657	} else {
 658		return -EPERM;
 659	}
 
 
 660	return retval;
 661}
 662
 663static int
 664snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
 665{
 666	struct snd_codec *params __free(kfree) = NULL;
 667	int retval;
 668
 669	if (!stream->ops->get_params)
 670		return -EBADFD;
 671
 672	params = kzalloc(sizeof(*params), GFP_KERNEL);
 673	if (!params)
 674		return -ENOMEM;
 675	retval = stream->ops->get_params(stream, params);
 676	if (retval)
 677		return retval;
 678	if (copy_to_user((char __user *)arg, params, sizeof(*params)))
 679		return -EFAULT;
 
 
 
 680	return retval;
 681}
 682
 683static int
 684snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
 685{
 686	struct snd_compr_metadata metadata;
 687	int retval;
 688
 689	if (!stream->ops->get_metadata)
 690		return -ENXIO;
 691
 692	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 693		return -EFAULT;
 694
 695	retval = stream->ops->get_metadata(stream, &metadata);
 696	if (retval != 0)
 697		return retval;
 698
 699	if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
 700		return -EFAULT;
 701
 702	return 0;
 703}
 704
 705static int
 706snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
 707{
 708	struct snd_compr_metadata metadata;
 709	int retval;
 710
 711	if (!stream->ops->set_metadata)
 712		return -ENXIO;
 713	/*
 714	* we should allow parameter change only when stream has been
 715	* opened not in other cases
 716	*/
 717	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 718		return -EFAULT;
 719
 720	retval = stream->ops->set_metadata(stream, &metadata);
 721	stream->metadata_set = true;
 722
 723	return retval;
 724}
 725
 726static inline int
 727snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
 728{
 729	struct snd_compr_tstamp tstamp = {0};
 730	int ret;
 731
 732	ret = snd_compr_update_tstamp(stream, &tstamp);
 733	if (ret == 0)
 734		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
 735			&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
 736	return ret;
 737}
 738
 739static int snd_compr_pause(struct snd_compr_stream *stream)
 740{
 741	int retval;
 742
 743	switch (stream->runtime->state) {
 744	case SNDRV_PCM_STATE_RUNNING:
 745		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 746		if (!retval)
 747			stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
 748		break;
 749	case SNDRV_PCM_STATE_DRAINING:
 750		if (!stream->device->use_pause_in_draining)
 751			return -EPERM;
 752		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 753		if (!retval)
 754			stream->pause_in_draining = true;
 755		break;
 756	default:
 757		return -EPERM;
 758	}
 759	return retval;
 760}
 761
 762static int snd_compr_resume(struct snd_compr_stream *stream)
 763{
 764	int retval;
 765
 766	switch (stream->runtime->state) {
 767	case SNDRV_PCM_STATE_PAUSED:
 768		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 769		if (!retval)
 770			stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 771		break;
 772	case SNDRV_PCM_STATE_DRAINING:
 773		if (!stream->pause_in_draining)
 774			return -EPERM;
 775		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 776		if (!retval)
 777			stream->pause_in_draining = false;
 778		break;
 779	default:
 780		return -EPERM;
 781	}
 782	return retval;
 783}
 784
 785static int snd_compr_start(struct snd_compr_stream *stream)
 786{
 787	int retval;
 788
 789	switch (stream->runtime->state) {
 790	case SNDRV_PCM_STATE_SETUP:
 791		if (stream->direction != SND_COMPRESS_CAPTURE)
 792			return -EPERM;
 793		break;
 794	case SNDRV_PCM_STATE_PREPARED:
 795		break;
 796	default:
 797		return -EPERM;
 798	}
 799
 800	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 801	if (!retval)
 802		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 803	return retval;
 804}
 805
 806static int snd_compr_stop(struct snd_compr_stream *stream)
 807{
 808	int retval;
 809
 810	switch (stream->runtime->state) {
 811	case SNDRV_PCM_STATE_OPEN:
 812	case SNDRV_PCM_STATE_SETUP:
 813	case SNDRV_PCM_STATE_PREPARED:
 814		return -EPERM;
 815	default:
 816		break;
 817	}
 818
 819	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 820	if (!retval) {
 821		/* clear flags and stop any drain wait */
 822		stream->partial_drain = false;
 823		stream->metadata_set = false;
 824		stream->pause_in_draining = false;
 825		snd_compr_drain_notify(stream);
 826		stream->runtime->total_bytes_available = 0;
 827		stream->runtime->total_bytes_transferred = 0;
 828	}
 829	return retval;
 830}
 831
 832static void error_delayed_work(struct work_struct *work)
 833{
 834	struct snd_compr_stream *stream;
 835
 836	stream = container_of(work, struct snd_compr_stream, error_work.work);
 837
 838	guard(mutex)(&stream->device->lock);
 839
 840	stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 841	wake_up(&stream->runtime->sleep);
 
 
 842}
 843
 844/**
 845 * snd_compr_stop_error: Report a fatal error on a stream
 846 * @stream: pointer to stream
 847 * @state: state to transition the stream to
 848 *
 849 * Stop the stream and set its state.
 850 *
 851 * Should be called with compressed device lock held.
 852 *
 853 * Return: zero if successful, or a negative error code
 854 */
 855int snd_compr_stop_error(struct snd_compr_stream *stream,
 856			 snd_pcm_state_t state)
 857{
 858	if (stream->runtime->state == state)
 859		return 0;
 860
 861	stream->runtime->state = state;
 862
 863	pr_debug("Changing state to: %d\n", state);
 864
 865	queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
 866
 867	return 0;
 868}
 869EXPORT_SYMBOL_GPL(snd_compr_stop_error);
 870
 871static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
 872{
 873	int ret;
 874
 875	/*
 876	 * We are called with lock held. So drop the lock while we wait for
 877	 * drain complete notification from the driver
 878	 *
 879	 * It is expected that driver will notify the drain completion and then
 880	 * stream will be moved to SETUP state, even if draining resulted in an
 881	 * error. We can trigger next track after this.
 882	 */
 883	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 884	mutex_unlock(&stream->device->lock);
 885
 886	/* we wait for drain to complete here, drain can return when
 887	 * interruption occurred, wait returned error or success.
 888	 * For the first two cases we don't do anything different here and
 889	 * return after waking up
 890	 */
 891
 892	ret = wait_event_interruptible(stream->runtime->sleep,
 893			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
 894	if (ret == -ERESTARTSYS)
 895		pr_debug("wait aborted by a signal\n");
 896	else if (ret)
 897		pr_debug("wait for drain failed with %d\n", ret);
 898
 899
 900	wake_up(&stream->runtime->sleep);
 901	mutex_lock(&stream->device->lock);
 902
 903	return ret;
 904}
 905
 906static int snd_compr_drain(struct snd_compr_stream *stream)
 907{
 908	int retval;
 909
 910	switch (stream->runtime->state) {
 911	case SNDRV_PCM_STATE_OPEN:
 912	case SNDRV_PCM_STATE_SETUP:
 913	case SNDRV_PCM_STATE_PREPARED:
 914	case SNDRV_PCM_STATE_PAUSED:
 915		return -EPERM;
 916	case SNDRV_PCM_STATE_XRUN:
 917		return -EPIPE;
 918	default:
 919		break;
 920	}
 921
 922	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
 923	if (retval) {
 924		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 925		wake_up(&stream->runtime->sleep);
 926		return retval;
 927	}
 928
 929	return snd_compress_wait_for_drain(stream);
 930}
 931
 932static int snd_compr_next_track(struct snd_compr_stream *stream)
 933{
 934	int retval;
 935
 936	/* only a running stream can transition to next track */
 937	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 938		return -EPERM;
 939
 940	/* next track doesn't have any meaning for capture streams */
 941	if (stream->direction == SND_COMPRESS_CAPTURE)
 942		return -EPERM;
 943
 944	/* you can signal next track if this is intended to be a gapless stream
 945	 * and current track metadata is set
 946	 */
 947	if (stream->metadata_set == false)
 948		return -EPERM;
 949
 950	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
 951	if (retval != 0)
 952		return retval;
 953	stream->metadata_set = false;
 954	stream->next_track = true;
 955	return 0;
 956}
 957
 958static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 959{
 960	int retval;
 961
 962	switch (stream->runtime->state) {
 963	case SNDRV_PCM_STATE_OPEN:
 964	case SNDRV_PCM_STATE_SETUP:
 965	case SNDRV_PCM_STATE_PREPARED:
 966	case SNDRV_PCM_STATE_PAUSED:
 967		return -EPERM;
 968	case SNDRV_PCM_STATE_XRUN:
 969		return -EPIPE;
 970	default:
 971		break;
 972	}
 973
 974	/* partial drain doesn't have any meaning for capture streams */
 975	if (stream->direction == SND_COMPRESS_CAPTURE)
 976		return -EPERM;
 977
 978	/* stream can be drained only when next track has been signalled */
 979	if (stream->next_track == false)
 980		return -EPERM;
 981
 982	stream->partial_drain = true;
 983	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
 984	if (retval) {
 985		pr_debug("Partial drain returned failure\n");
 986		wake_up(&stream->runtime->sleep);
 987		return retval;
 988	}
 989
 990	stream->next_track = false;
 991	return snd_compress_wait_for_drain(stream);
 992}
 993
 994#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 995
 996static struct snd_compr_task_runtime *
 997snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
 998{
 999	struct snd_compr_task_runtime *task;
1000
1001	list_for_each_entry(task, &stream->runtime->tasks, list) {
1002		if (task->seqno == seqno)
1003			return task;
1004	}
1005	return NULL;
1006}
1007
1008static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1009{
1010	if (task->output)
1011		dma_buf_put(task->output);
1012	if (task->input)
1013		dma_buf_put(task->input);
1014	kfree(task);
1015}
1016
1017static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1018{
1019	u64 seqno = ++stream->runtime->task_seqno;
1020	if (seqno == 0)
1021		seqno = ++stream->runtime->task_seqno;
1022	return seqno;
1023}
1024
1025static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1026{
1027	struct snd_compr_task_runtime *task;
1028	int retval, fd_i, fd_o;
1029
1030	if (stream->runtime->total_tasks >= stream->runtime->fragments)
1031		return -EBUSY;
1032	if (utask->origin_seqno != 0 || utask->input_size != 0)
1033		return -EINVAL;
1034	task = kzalloc(sizeof(*task), GFP_KERNEL);
1035	if (task == NULL)
1036		return -ENOMEM;
1037	task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1038	task->input_size = utask->input_size;
1039	retval = stream->ops->task_create(stream, task);
1040	if (retval < 0)
1041		goto cleanup;
1042	/* similar functionality as in dma_buf_fd(), but ensure that both
1043	   file descriptors are allocated before fd_install() */
1044	if (!task->input || !task->input->file || !task->output || !task->output->file) {
1045		retval = -EINVAL;
1046		goto cleanup;
1047	}
1048	fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1049	if (fd_i < 0)
1050		goto cleanup;
1051	fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1052	if (fd_o < 0) {
1053		put_unused_fd(fd_i);
1054		goto cleanup;
1055	}
1056	/* keep dmabuf reference until freed with task free ioctl */
1057	get_dma_buf(task->input);
1058	get_dma_buf(task->output);
1059	fd_install(fd_i, task->input->file);
1060	fd_install(fd_o, task->output->file);
1061	utask->input_fd = fd_i;
1062	utask->output_fd = fd_o;
1063	list_add_tail(&task->list, &stream->runtime->tasks);
1064	stream->runtime->total_tasks++;
1065	return 0;
1066cleanup:
1067	snd_compr_task_free(task);
1068	return retval;
1069}
1070
1071static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1072{
1073	struct snd_compr_task *task __free(kfree) = NULL;
1074	int retval;
1075
1076	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1077		return -EPERM;
1078	task = memdup_user((void __user *)arg, sizeof(*task));
1079	if (IS_ERR(task))
1080		return PTR_ERR(task);
1081	retval = snd_compr_task_new(stream, task);
1082	if (retval >= 0)
1083		if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1084			retval = -EFAULT;
1085	return retval;
1086}
1087
1088static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1089					struct snd_compr_task *utask)
1090{
1091	if (task == NULL)
1092		return -EINVAL;
1093	if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1094		return -EBUSY;
1095	if (utask->input_size > task->input->size)
1096		return -EINVAL;
1097	task->flags = utask->flags;
1098	task->input_size = utask->input_size;
1099	task->state = SND_COMPRESS_TASK_STATE_IDLE;
1100	return 0;
1101}
1102
1103static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1104{
1105	struct snd_compr_task_runtime *task;
1106	int retval;
1107
1108	if (utask->origin_seqno > 0) {
1109		task = snd_compr_find_task(stream, utask->origin_seqno);
1110		retval = snd_compr_task_start_prepare(task, utask);
1111		if (retval < 0)
1112			return retval;
1113		task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1114		utask->origin_seqno = 0;
1115		list_move_tail(&task->list, &stream->runtime->tasks);
1116	} else {
1117		task = snd_compr_find_task(stream, utask->seqno);
1118		if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1119			return -EBUSY;
1120		retval = snd_compr_task_start_prepare(task, utask);
1121		if (retval < 0)
1122			return retval;
1123	}
1124	retval = stream->ops->task_start(stream, task);
1125	if (retval >= 0) {
1126		task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1127		stream->runtime->active_tasks++;
1128	}
1129	return retval;
1130}
1131
1132static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1133{
1134	struct snd_compr_task *task __free(kfree) = NULL;
1135	int retval;
1136
1137	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1138		return -EPERM;
1139	task = memdup_user((void __user *)arg, sizeof(*task));
1140	if (IS_ERR(task))
1141		return PTR_ERR(task);
1142	retval = snd_compr_task_start(stream, task);
1143	if (retval >= 0)
1144		if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1145			retval = -EFAULT;
1146	return retval;
1147}
1148
1149static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1150					struct snd_compr_task_runtime *task)
1151{
1152	if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1153		return;
1154	stream->ops->task_stop(stream, task);
1155	if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1156		stream->runtime->active_tasks--;
1157	list_move_tail(&task->list, &stream->runtime->tasks);
1158	task->state = SND_COMPRESS_TASK_STATE_IDLE;
1159}
1160
1161static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1162					struct snd_compr_task_runtime *task)
1163{
1164	snd_compr_task_stop_one(stream, task);
1165	stream->ops->task_free(stream, task);
1166	list_del(&task->list);
1167	snd_compr_task_free(task);
1168	stream->runtime->total_tasks--;
1169}
1170
1171static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1172{
1173	struct snd_compr_task_runtime *task, *temp;
1174
1175	list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1176		snd_compr_task_free_one(stream, task);
1177}
1178
1179typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1180					struct snd_compr_task_runtime *task);
1181
1182static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1183					snd_compr_seq_func_t fcn)
1184{
1185	struct snd_compr_task_runtime *task, *temp;
1186	__u64 seqno;
1187	int retval;
1188
1189	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1190		return -EPERM;
1191	retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1192	if (retval)
1193		return -EFAULT;
1194	retval = 0;
1195	if (seqno == 0) {
1196		list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1197			fcn(stream, task);
1198	} else {
1199		task = snd_compr_find_task(stream, seqno);
1200		if (task == NULL) {
1201			retval = -EINVAL;
1202		} else {
1203			fcn(stream, task);
1204		}
1205	}
1206	return retval;
1207}
1208
1209static int snd_compr_task_status(struct snd_compr_stream *stream,
1210					struct snd_compr_task_status *status)
1211{
1212	struct snd_compr_task_runtime *task;
1213
1214	task = snd_compr_find_task(stream, status->seqno);
1215	if (task == NULL)
1216		return -EINVAL;
1217	status->input_size = task->input_size;
1218	status->output_size = task->output_size;
1219	status->state = task->state;
1220	return 0;
1221}
1222
1223static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1224{
1225	struct snd_compr_task_status *status __free(kfree) = NULL;
1226	int retval;
1227
1228	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1229		return -EPERM;
1230	status = memdup_user((void __user *)arg, sizeof(*status));
1231	if (IS_ERR(status))
1232		return PTR_ERR(status);
1233	retval = snd_compr_task_status(stream, status);
1234	if (retval >= 0)
1235		if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1236			retval = -EFAULT;
1237	return retval;
1238}
1239
1240/**
1241 * snd_compr_task_finished: Notify that the task was finished
1242 * @stream: pointer to stream
1243 * @task: runtime task structure
1244 *
1245 * Set the finished task state and notify waiters.
1246 */
1247void snd_compr_task_finished(struct snd_compr_stream *stream,
1248			    struct snd_compr_task_runtime *task)
1249{
1250	guard(mutex)(&stream->device->lock);
1251	if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1252		stream->runtime->active_tasks--;
1253	task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1254	wake_up(&stream->runtime->sleep);
1255}
1256EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1257
1258MODULE_IMPORT_NS("DMA_BUF");
1259#endif /* CONFIG_SND_COMPRESS_ACCEL */
1260
1261static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1262{
1263	struct snd_compr_file *data = f->private_data;
1264	struct snd_compr_stream *stream;
 
1265
1266	if (snd_BUG_ON(!data))
1267		return -EFAULT;
1268
1269	stream = &data->stream;
1270
1271	guard(mutex)(&stream->device->lock);
1272	switch (_IOC_NR(cmd)) {
1273	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
1274		return put_user(SNDRV_COMPRESS_VERSION,
1275				(int __user *)arg) ? -EFAULT : 0;
 
1276	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
1277		return snd_compr_get_caps(stream, arg);
 
1278#ifndef COMPR_CODEC_CAPS_OVERFLOW
1279	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
1280		return snd_compr_get_codec_caps(stream, arg);
 
1281#endif
1282	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
1283		return snd_compr_set_params(stream, arg);
 
1284	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
1285		return snd_compr_get_params(stream, arg);
 
1286	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
1287		return snd_compr_set_metadata(stream, arg);
 
1288	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1289		return snd_compr_get_metadata(stream, arg);
1290	}
1291
1292	if (stream->direction == SND_COMPRESS_ACCEL) {
1293#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1294		switch (_IOC_NR(cmd)) {
1295		case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
1296			return snd_compr_task_create(stream, arg);
1297		case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
1298			return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1299		case _IOC_NR(SNDRV_COMPRESS_TASK_START):
1300			return snd_compr_task_start_ioctl(stream, arg);
1301		case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
1302			return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1303		case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
1304			return snd_compr_task_status_ioctl(stream, arg);
1305		}
1306#endif
1307		return -ENOTTY;
1308	}
1309
1310	switch (_IOC_NR(cmd)) {
1311	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1312		return snd_compr_tstamp(stream, arg);
 
1313	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1314		return snd_compr_ioctl_avail(stream, arg);
 
1315	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1316		return snd_compr_pause(stream);
 
1317	case _IOC_NR(SNDRV_COMPRESS_RESUME):
1318		return snd_compr_resume(stream);
 
1319	case _IOC_NR(SNDRV_COMPRESS_START):
1320		return snd_compr_start(stream);
 
1321	case _IOC_NR(SNDRV_COMPRESS_STOP):
1322		return snd_compr_stop(stream);
 
1323	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1324		return snd_compr_drain(stream);
 
1325	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1326		return snd_compr_partial_drain(stream);
 
1327	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1328		return snd_compr_next_track(stream);
1329	}
1330
1331	return -ENOTTY;
 
 
1332}
1333
1334/* support of 32bit userspace on 64bit platforms */
1335#ifdef CONFIG_COMPAT
1336static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1337						unsigned long arg)
1338{
1339	return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1340}
1341#endif
1342
1343static const struct file_operations snd_compr_file_ops = {
1344		.owner =	THIS_MODULE,
1345		.open =		snd_compr_open,
1346		.release =	snd_compr_free,
1347		.write =	snd_compr_write,
1348		.read =		snd_compr_read,
1349		.unlocked_ioctl = snd_compr_ioctl,
1350#ifdef CONFIG_COMPAT
1351		.compat_ioctl = snd_compr_ioctl_compat,
1352#endif
1353		.mmap =		snd_compr_mmap,
1354		.poll =		snd_compr_poll,
1355};
1356
1357static int snd_compress_dev_register(struct snd_device *device)
1358{
1359	int ret;
1360	struct snd_compr *compr;
1361
1362	if (snd_BUG_ON(!device || !device->device_data))
1363		return -EBADFD;
1364	compr = device->device_data;
1365
1366	pr_debug("reg device %s, direction %d\n", compr->name,
1367			compr->direction);
1368	/* register compressed device */
1369	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1370				  compr->card, compr->device,
1371				  &snd_compr_file_ops, compr, compr->dev);
1372	if (ret < 0) {
1373		pr_err("snd_register_device failed %d\n", ret);
1374		return ret;
1375	}
1376	return ret;
1377
1378}
1379
1380static int snd_compress_dev_disconnect(struct snd_device *device)
1381{
1382	struct snd_compr *compr;
1383
1384	compr = device->device_data;
1385	snd_unregister_device(compr->dev);
1386	return 0;
1387}
1388
1389#ifdef CONFIG_SND_VERBOSE_PROCFS
1390static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1391					struct snd_info_buffer *buffer)
1392{
1393	struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1394
1395	snd_iprintf(buffer, "card: %d\n", compr->card->number);
1396	snd_iprintf(buffer, "device: %d\n", compr->device);
1397	snd_iprintf(buffer, "stream: %s\n",
1398			compr->direction == SND_COMPRESS_PLAYBACK
1399				? "PLAYBACK" : "CAPTURE");
1400	snd_iprintf(buffer, "id: %s\n", compr->id);
1401}
1402
1403static int snd_compress_proc_init(struct snd_compr *compr)
1404{
1405	struct snd_info_entry *entry;
1406	char name[16];
1407
1408	sprintf(name, "compr%i", compr->device);
1409	entry = snd_info_create_card_entry(compr->card, name,
1410					   compr->card->proc_root);
1411	if (!entry)
1412		return -ENOMEM;
1413	entry->mode = S_IFDIR | 0555;
1414	compr->proc_root = entry;
1415
1416	entry = snd_info_create_card_entry(compr->card, "info",
1417					   compr->proc_root);
1418	if (entry)
1419		snd_info_set_text_ops(entry, compr,
1420				      snd_compress_proc_info_read);
1421	compr->proc_info_entry = entry;
1422
1423	return 0;
1424}
1425
1426static void snd_compress_proc_done(struct snd_compr *compr)
1427{
1428	snd_info_free_entry(compr->proc_info_entry);
1429	compr->proc_info_entry = NULL;
1430	snd_info_free_entry(compr->proc_root);
1431	compr->proc_root = NULL;
1432}
1433
1434static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1435{
1436	strscpy(compr->id, id, sizeof(compr->id));
1437}
1438#else
1439static inline int snd_compress_proc_init(struct snd_compr *compr)
1440{
1441	return 0;
1442}
1443
1444static inline void snd_compress_proc_done(struct snd_compr *compr)
1445{
1446}
1447
1448static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1449{
1450}
1451#endif
1452
1453static int snd_compress_dev_free(struct snd_device *device)
1454{
1455	struct snd_compr *compr;
1456
1457	compr = device->device_data;
1458	snd_compress_proc_done(compr);
1459	put_device(compr->dev);
1460	return 0;
1461}
1462
1463/**
1464 * snd_compress_new: create new compress device
1465 * @card: sound card pointer
1466 * @device: device number
1467 * @dirn: device direction, should be of type enum snd_compr_direction
1468 * @id: ID string
1469 * @compr: compress device pointer
1470 *
1471 * Return: zero if successful, or a negative error code
1472 */
1473int snd_compress_new(struct snd_card *card, int device,
1474			int dirn, const char *id, struct snd_compr *compr)
1475{
1476	static const struct snd_device_ops ops = {
1477		.dev_free = snd_compress_dev_free,
1478		.dev_register = snd_compress_dev_register,
1479		.dev_disconnect = snd_compress_dev_disconnect,
1480	};
1481	int ret;
1482
1483#if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1484	if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1485		return -EINVAL;
1486#endif
1487
1488	compr->card = card;
1489	compr->device = device;
1490	compr->direction = dirn;
1491	mutex_init(&compr->lock);
1492
1493	snd_compress_set_id(compr, id);
1494
1495	ret = snd_device_alloc(&compr->dev, card);
1496	if (ret)
1497		return ret;
1498	dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1499
1500	ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1501	if (ret == 0)
1502		snd_compress_proc_init(compr);
1503	else
1504		put_device(compr->dev);
1505
1506	return ret;
1507}
1508EXPORT_SYMBOL_GPL(snd_compress_new);
1509
1510MODULE_DESCRIPTION("ALSA Compressed offload framework");
1511MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1512MODULE_LICENSE("GPL v2");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  compress_core.c - compress offload core
   4 *
   5 *  Copyright (C) 2011 Intel Corporation
   6 *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
   7 *		Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
   8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 */
  12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
  14
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/list.h>
  18#include <linux/math64.h>
  19#include <linux/mm.h>
  20#include <linux/mutex.h>
  21#include <linux/poll.h>
  22#include <linux/slab.h>
  23#include <linux/sched.h>
  24#include <linux/types.h>
  25#include <linux/uio.h>
  26#include <linux/uaccess.h>
 
  27#include <linux/module.h>
  28#include <linux/compat.h>
  29#include <sound/core.h>
  30#include <sound/initval.h>
  31#include <sound/info.h>
  32#include <sound/compress_params.h>
  33#include <sound/compress_offload.h>
  34#include <sound/compress_driver.h>
  35
  36/* struct snd_compr_codec_caps overflows the ioctl bit size for some
  37 * architectures, so we need to disable the relevant ioctls.
  38 */
  39#if _IOC_SIZEBITS < 14
  40#define COMPR_CODEC_CAPS_OVERFLOW
  41#endif
  42
  43/* TODO:
  44 * - add substream support for multiple devices in case of
  45 *	SND_DYNAMIC_MINORS is not used
  46 * - Multiple node representation
  47 *	driver should be able to register multiple nodes
  48 */
  49
  50struct snd_compr_file {
  51	unsigned long caps;
  52	struct snd_compr_stream stream;
  53};
  54
  55static void error_delayed_work(struct work_struct *work);
  56
 
 
 
 
 
 
  57/*
  58 * a note on stream states used:
  59 * we use following states in the compressed core
  60 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
  61 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
  62 *	calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
  63 *	state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
  64 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
  65 *	playback only). User after setting up stream writes the data buffer
  66 *	before starting the stream.
  67 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
  68 *	decoding/encoding and rendering/capturing data.
  69 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
  70 *	by calling SNDRV_COMPRESS_DRAIN.
  71 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
  72 *	SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
  73 *	SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
  74 */
  75static int snd_compr_open(struct inode *inode, struct file *f)
  76{
  77	struct snd_compr *compr;
  78	struct snd_compr_file *data;
  79	struct snd_compr_runtime *runtime;
  80	enum snd_compr_direction dirn;
  81	int maj = imajor(inode);
  82	int ret;
  83
  84	if ((f->f_flags & O_ACCMODE) == O_WRONLY)
  85		dirn = SND_COMPRESS_PLAYBACK;
  86	else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
  87		dirn = SND_COMPRESS_CAPTURE;
 
 
  88	else
  89		return -EINVAL;
  90
  91	if (maj == snd_major)
  92		compr = snd_lookup_minor_data(iminor(inode),
  93					SNDRV_DEVICE_TYPE_COMPRESS);
  94	else
  95		return -EBADFD;
  96
  97	if (compr == NULL) {
  98		pr_err("no device data!!!\n");
  99		return -ENODEV;
 100	}
 101
 102	if (dirn != compr->direction) {
 103		pr_err("this device doesn't support this direction\n");
 104		snd_card_unref(compr->card);
 105		return -EINVAL;
 106	}
 107
 108	data = kzalloc(sizeof(*data), GFP_KERNEL);
 109	if (!data) {
 110		snd_card_unref(compr->card);
 111		return -ENOMEM;
 112	}
 113
 114	INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
 115
 116	data->stream.ops = compr->ops;
 117	data->stream.direction = dirn;
 118	data->stream.private_data = compr->private_data;
 119	data->stream.device = compr;
 120	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 121	if (!runtime) {
 122		kfree(data);
 123		snd_card_unref(compr->card);
 124		return -ENOMEM;
 125	}
 126	runtime->state = SNDRV_PCM_STATE_OPEN;
 127	init_waitqueue_head(&runtime->sleep);
 
 
 
 128	data->stream.runtime = runtime;
 129	f->private_data = (void *)data;
 130	mutex_lock(&compr->lock);
 131	ret = compr->ops->open(&data->stream);
 132	mutex_unlock(&compr->lock);
 133	if (ret) {
 134		kfree(runtime);
 135		kfree(data);
 136	}
 137	snd_card_unref(compr->card);
 138	return ret;
 139}
 140
 141static int snd_compr_free(struct inode *inode, struct file *f)
 142{
 143	struct snd_compr_file *data = f->private_data;
 144	struct snd_compr_runtime *runtime = data->stream.runtime;
 145
 146	cancel_delayed_work_sync(&data->stream.error_work);
 147
 148	switch (runtime->state) {
 149	case SNDRV_PCM_STATE_RUNNING:
 150	case SNDRV_PCM_STATE_DRAINING:
 151	case SNDRV_PCM_STATE_PAUSED:
 152		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
 153		break;
 154	default:
 155		break;
 156	}
 157
 
 
 158	data->stream.ops->free(&data->stream);
 159	if (!data->stream.runtime->dma_buffer_p)
 160		kfree(data->stream.runtime->buffer);
 161	kfree(data->stream.runtime);
 162	kfree(data);
 163	return 0;
 164}
 165
 166static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
 167		struct snd_compr_tstamp *tstamp)
 168{
 169	if (!stream->ops->pointer)
 170		return -ENOTSUPP;
 171	stream->ops->pointer(stream, tstamp);
 172	pr_debug("dsp consumed till %d total %d bytes\n",
 173		tstamp->byte_offset, tstamp->copied_total);
 174	if (stream->direction == SND_COMPRESS_PLAYBACK)
 175		stream->runtime->total_bytes_transferred = tstamp->copied_total;
 176	else
 177		stream->runtime->total_bytes_available = tstamp->copied_total;
 178	return 0;
 179}
 180
 181static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
 182		struct snd_compr_avail *avail)
 183{
 184	memset(avail, 0, sizeof(*avail));
 185	snd_compr_update_tstamp(stream, &avail->tstamp);
 186	/* Still need to return avail even if tstamp can't be filled in */
 187
 188	if (stream->runtime->total_bytes_available == 0 &&
 189			stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
 190			stream->direction == SND_COMPRESS_PLAYBACK) {
 191		pr_debug("detected init and someone forgot to do a write\n");
 192		return stream->runtime->buffer_size;
 193	}
 194	pr_debug("app wrote %lld, DSP consumed %lld\n",
 195			stream->runtime->total_bytes_available,
 196			stream->runtime->total_bytes_transferred);
 197	if (stream->runtime->total_bytes_available ==
 198				stream->runtime->total_bytes_transferred) {
 199		if (stream->direction == SND_COMPRESS_PLAYBACK) {
 200			pr_debug("both pointers are same, returning full avail\n");
 201			return stream->runtime->buffer_size;
 202		} else {
 203			pr_debug("both pointers are same, returning no avail\n");
 204			return 0;
 205		}
 206	}
 207
 208	avail->avail = stream->runtime->total_bytes_available -
 209			stream->runtime->total_bytes_transferred;
 210	if (stream->direction == SND_COMPRESS_PLAYBACK)
 211		avail->avail = stream->runtime->buffer_size - avail->avail;
 212
 213	pr_debug("ret avail as %lld\n", avail->avail);
 214	return avail->avail;
 215}
 216
 217static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
 218{
 219	struct snd_compr_avail avail;
 220
 221	return snd_compr_calc_avail(stream, &avail);
 222}
 223
 224static int
 225snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
 226{
 227	struct snd_compr_avail ioctl_avail;
 228	size_t avail;
 229
 
 
 
 230	avail = snd_compr_calc_avail(stream, &ioctl_avail);
 231	ioctl_avail.avail = avail;
 232
 233	switch (stream->runtime->state) {
 234	case SNDRV_PCM_STATE_OPEN:
 235		return -EBADFD;
 236	case SNDRV_PCM_STATE_XRUN:
 237		return -EPIPE;
 238	default:
 239		break;
 240	}
 241
 242	if (copy_to_user((__u64 __user *)arg,
 243				&ioctl_avail, sizeof(ioctl_avail)))
 244		return -EFAULT;
 245	return 0;
 246}
 247
 248static int snd_compr_write_data(struct snd_compr_stream *stream,
 249	       const char __user *buf, size_t count)
 250{
 251	void *dstn;
 252	size_t copy;
 253	struct snd_compr_runtime *runtime = stream->runtime;
 254	/* 64-bit Modulus */
 255	u64 app_pointer = div64_u64(runtime->total_bytes_available,
 256				    runtime->buffer_size);
 257	app_pointer = runtime->total_bytes_available -
 258		      (app_pointer * runtime->buffer_size);
 259
 260	dstn = runtime->buffer + app_pointer;
 261	pr_debug("copying %ld at %lld\n",
 262			(unsigned long)count, app_pointer);
 263	if (count < runtime->buffer_size - app_pointer) {
 264		if (copy_from_user(dstn, buf, count))
 265			return -EFAULT;
 266	} else {
 267		copy = runtime->buffer_size - app_pointer;
 268		if (copy_from_user(dstn, buf, copy))
 269			return -EFAULT;
 270		if (copy_from_user(runtime->buffer, buf + copy, count - copy))
 271			return -EFAULT;
 272	}
 273	/* if DSP cares, let it know data has been written */
 274	if (stream->ops->ack)
 275		stream->ops->ack(stream, count);
 276	return count;
 277}
 278
 279static ssize_t snd_compr_write(struct file *f, const char __user *buf,
 280		size_t count, loff_t *offset)
 281{
 282	struct snd_compr_file *data = f->private_data;
 283	struct snd_compr_stream *stream;
 284	size_t avail;
 285	int retval;
 286
 287	if (snd_BUG_ON(!data))
 288		return -EFAULT;
 289
 290	stream = &data->stream;
 291	mutex_lock(&stream->device->lock);
 292	/* write is allowed when stream is running or has been steup */
 
 
 293	switch (stream->runtime->state) {
 294	case SNDRV_PCM_STATE_SETUP:
 295	case SNDRV_PCM_STATE_PREPARED:
 296	case SNDRV_PCM_STATE_RUNNING:
 297		break;
 298	default:
 299		mutex_unlock(&stream->device->lock);
 300		return -EBADFD;
 301	}
 302
 303	avail = snd_compr_get_avail(stream);
 304	pr_debug("avail returned %ld\n", (unsigned long)avail);
 305	/* calculate how much we can write to buffer */
 306	if (avail > count)
 307		avail = count;
 308
 309	if (stream->ops->copy) {
 310		char __user* cbuf = (char __user*)buf;
 311		retval = stream->ops->copy(stream, cbuf, avail);
 312	} else {
 313		retval = snd_compr_write_data(stream, buf, avail);
 314	}
 315	if (retval > 0)
 316		stream->runtime->total_bytes_available += retval;
 317
 318	/* while initiating the stream, write should be called before START
 319	 * call, so in setup move state */
 320	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
 321		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
 322		pr_debug("stream prepared, Houston we are good to go\n");
 323	}
 324
 325	mutex_unlock(&stream->device->lock);
 326	return retval;
 327}
 328
 329
 330static ssize_t snd_compr_read(struct file *f, char __user *buf,
 331		size_t count, loff_t *offset)
 332{
 333	struct snd_compr_file *data = f->private_data;
 334	struct snd_compr_stream *stream;
 335	size_t avail;
 336	int retval;
 337
 338	if (snd_BUG_ON(!data))
 339		return -EFAULT;
 340
 341	stream = &data->stream;
 342	mutex_lock(&stream->device->lock);
 
 
 343
 344	/* read is allowed when stream is running, paused, draining and setup
 345	 * (yes setup is state which we transition to after stop, so if user
 346	 * wants to read data after stop we allow that)
 347	 */
 348	switch (stream->runtime->state) {
 349	case SNDRV_PCM_STATE_OPEN:
 350	case SNDRV_PCM_STATE_PREPARED:
 351	case SNDRV_PCM_STATE_SUSPENDED:
 352	case SNDRV_PCM_STATE_DISCONNECTED:
 353		retval = -EBADFD;
 354		goto out;
 355	case SNDRV_PCM_STATE_XRUN:
 356		retval = -EPIPE;
 357		goto out;
 358	}
 359
 360	avail = snd_compr_get_avail(stream);
 361	pr_debug("avail returned %ld\n", (unsigned long)avail);
 362	/* calculate how much we can read from buffer */
 363	if (avail > count)
 364		avail = count;
 365
 366	if (stream->ops->copy) {
 367		retval = stream->ops->copy(stream, buf, avail);
 368	} else {
 369		retval = -ENXIO;
 370		goto out;
 371	}
 372	if (retval > 0)
 373		stream->runtime->total_bytes_transferred += retval;
 374
 375out:
 376	mutex_unlock(&stream->device->lock);
 377	return retval;
 378}
 379
 380static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
 381{
 382	return -ENXIO;
 383}
 384
 385static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
 386{
 387	if (stream->direction == SND_COMPRESS_PLAYBACK)
 388		return EPOLLOUT | EPOLLWRNORM;
 389	else
 390		return EPOLLIN | EPOLLRDNORM;
 391}
 392
 393static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
 394{
 395	struct snd_compr_file *data = f->private_data;
 396	struct snd_compr_stream *stream;
 
 397	size_t avail;
 398	__poll_t retval = 0;
 399
 400	if (snd_BUG_ON(!data))
 401		return EPOLLERR;
 402
 403	stream = &data->stream;
 
 404
 405	mutex_lock(&stream->device->lock);
 406
 407	switch (stream->runtime->state) {
 408	case SNDRV_PCM_STATE_OPEN:
 409	case SNDRV_PCM_STATE_XRUN:
 410		retval = snd_compr_get_poll(stream) | EPOLLERR;
 411		goto out;
 412	default:
 413		break;
 414	}
 415
 416	poll_wait(f, &stream->runtime->sleep, wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417
 418	avail = snd_compr_get_avail(stream);
 419	pr_debug("avail is %ld\n", (unsigned long)avail);
 420	/* check if we have at least one fragment to fill */
 421	switch (stream->runtime->state) {
 422	case SNDRV_PCM_STATE_DRAINING:
 423		/* stream has been woken up after drain is complete
 424		 * draining done so set stream state to stopped
 425		 */
 426		retval = snd_compr_get_poll(stream);
 427		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 428		break;
 429	case SNDRV_PCM_STATE_RUNNING:
 430	case SNDRV_PCM_STATE_PREPARED:
 431	case SNDRV_PCM_STATE_PAUSED:
 432		if (avail >= stream->runtime->fragment_size)
 433			retval = snd_compr_get_poll(stream);
 434		break;
 435	default:
 436		retval = snd_compr_get_poll(stream) | EPOLLERR;
 437		break;
 438	}
 439out:
 440	mutex_unlock(&stream->device->lock);
 441	return retval;
 442}
 443
 444static int
 445snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
 446{
 447	int retval;
 448	struct snd_compr_caps caps;
 449
 450	if (!stream->ops->get_caps)
 451		return -ENXIO;
 452
 453	memset(&caps, 0, sizeof(caps));
 454	retval = stream->ops->get_caps(stream, &caps);
 455	if (retval)
 456		goto out;
 457	if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
 458		retval = -EFAULT;
 459out:
 460	return retval;
 461}
 462
 463#ifndef COMPR_CODEC_CAPS_OVERFLOW
 464static int
 465snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 466{
 467	int retval;
 468	struct snd_compr_codec_caps *caps;
 469
 470	if (!stream->ops->get_codec_caps)
 471		return -ENXIO;
 472
 473	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 474	if (!caps)
 475		return -ENOMEM;
 476
 477	retval = stream->ops->get_codec_caps(stream, caps);
 478	if (retval)
 479		goto out;
 480	if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
 481		retval = -EFAULT;
 482
 483out:
 484	kfree(caps);
 485	return retval;
 486}
 487#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 488
 489int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
 490{
 491	struct snd_dma_buffer *dmab;
 492	int ret;
 493
 494	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 495		return -EINVAL;
 496	dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
 497	if (!dmab)
 498		return -ENOMEM;
 499	dmab->dev = stream->dma_buffer.dev;
 500	ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
 501	if (ret < 0) {
 502		kfree(dmab);
 503		return ret;
 504	}
 505
 506	snd_compr_set_runtime_buffer(stream, dmab);
 507	stream->runtime->dma_bytes = size;
 508	return 1;
 509}
 510EXPORT_SYMBOL(snd_compr_malloc_pages);
 511
 512int snd_compr_free_pages(struct snd_compr_stream *stream)
 513{
 514	struct snd_compr_runtime *runtime;
 515
 516	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 517		return -EINVAL;
 518	runtime = stream->runtime;
 519	if (runtime->dma_area == NULL)
 520		return 0;
 521	if (runtime->dma_buffer_p != &stream->dma_buffer) {
 522		/* It's a newly allocated buffer. Release it now. */
 523		snd_dma_free_pages(runtime->dma_buffer_p);
 524		kfree(runtime->dma_buffer_p);
 525	}
 526
 527	snd_compr_set_runtime_buffer(stream, NULL);
 528	return 0;
 529}
 530EXPORT_SYMBOL(snd_compr_free_pages);
 531
 532/* revisit this with snd_pcm_preallocate_xxx */
 533static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
 534		struct snd_compr_params *params)
 535{
 536	unsigned int buffer_size;
 537	void *buffer = NULL;
 538
 
 
 
 539	buffer_size = params->buffer.fragment_size * params->buffer.fragments;
 540	if (stream->ops->copy) {
 541		buffer = NULL;
 542		/* if copy is defined the driver will be required to copy
 543		 * the data from core
 544		 */
 545	} else {
 546		if (stream->runtime->dma_buffer_p) {
 547
 548			if (buffer_size > stream->runtime->dma_buffer_p->bytes)
 549				dev_err(stream->device->dev,
 550						"Not enough DMA buffer");
 551			else
 552				buffer = stream->runtime->dma_buffer_p->area;
 553
 554		} else {
 555			buffer = kmalloc(buffer_size, GFP_KERNEL);
 556		}
 557
 558		if (!buffer)
 559			return -ENOMEM;
 560	}
 
 
 
 
 561	stream->runtime->fragment_size = params->buffer.fragment_size;
 562	stream->runtime->fragments = params->buffer.fragments;
 563	stream->runtime->buffer = buffer;
 564	stream->runtime->buffer_size = buffer_size;
 565	return 0;
 566}
 567
 568static int snd_compress_check_input(struct snd_compr_params *params)
 
 569{
 
 
 570	/* first let's check the buffer parameter's */
 571	if (params->buffer.fragment_size == 0 ||
 572	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
 
 
 
 
 
 
 
 573	    params->buffer.fragments == 0)
 574		return -EINVAL;
 575
 576	/* now codec parameters */
 577	if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
 578		return -EINVAL;
 579
 580	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
 581		return -EINVAL;
 582
 583	return 0;
 584}
 585
 586static int
 587snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 588{
 589	struct snd_compr_params *params;
 590	int retval;
 591
 592	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
 593		/*
 594		 * we should allow parameter change only when stream has been
 595		 * opened not in other cases
 596		 */
 597		params = memdup_user((void __user *)arg, sizeof(*params));
 598		if (IS_ERR(params))
 599			return PTR_ERR(params);
 600
 601		retval = snd_compress_check_input(params);
 602		if (retval)
 603			goto out;
 604
 605		retval = snd_compr_allocate_buffer(stream, params);
 606		if (retval) {
 607			retval = -ENOMEM;
 608			goto out;
 609		}
 610
 611		retval = stream->ops->set_params(stream, params);
 612		if (retval)
 613			goto out;
 614
 615		if (stream->next_track)
 616			goto out;
 617
 618		stream->metadata_set = false;
 619		stream->next_track = false;
 620
 621		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 622	} else {
 623		return -EPERM;
 624	}
 625out:
 626	kfree(params);
 627	return retval;
 628}
 629
 630static int
 631snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
 632{
 633	struct snd_codec *params;
 634	int retval;
 635
 636	if (!stream->ops->get_params)
 637		return -EBADFD;
 638
 639	params = kzalloc(sizeof(*params), GFP_KERNEL);
 640	if (!params)
 641		return -ENOMEM;
 642	retval = stream->ops->get_params(stream, params);
 643	if (retval)
 644		goto out;
 645	if (copy_to_user((char __user *)arg, params, sizeof(*params)))
 646		retval = -EFAULT;
 647
 648out:
 649	kfree(params);
 650	return retval;
 651}
 652
 653static int
 654snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
 655{
 656	struct snd_compr_metadata metadata;
 657	int retval;
 658
 659	if (!stream->ops->get_metadata)
 660		return -ENXIO;
 661
 662	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 663		return -EFAULT;
 664
 665	retval = stream->ops->get_metadata(stream, &metadata);
 666	if (retval != 0)
 667		return retval;
 668
 669	if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
 670		return -EFAULT;
 671
 672	return 0;
 673}
 674
 675static int
 676snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
 677{
 678	struct snd_compr_metadata metadata;
 679	int retval;
 680
 681	if (!stream->ops->set_metadata)
 682		return -ENXIO;
 683	/*
 684	* we should allow parameter change only when stream has been
 685	* opened not in other cases
 686	*/
 687	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 688		return -EFAULT;
 689
 690	retval = stream->ops->set_metadata(stream, &metadata);
 691	stream->metadata_set = true;
 692
 693	return retval;
 694}
 695
 696static inline int
 697snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
 698{
 699	struct snd_compr_tstamp tstamp = {0};
 700	int ret;
 701
 702	ret = snd_compr_update_tstamp(stream, &tstamp);
 703	if (ret == 0)
 704		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
 705			&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
 706	return ret;
 707}
 708
 709static int snd_compr_pause(struct snd_compr_stream *stream)
 710{
 711	int retval;
 712
 713	switch (stream->runtime->state) {
 714	case SNDRV_PCM_STATE_RUNNING:
 715		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 716		if (!retval)
 717			stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
 718		break;
 719	case SNDRV_PCM_STATE_DRAINING:
 720		if (!stream->device->use_pause_in_draining)
 721			return -EPERM;
 722		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 723		if (!retval)
 724			stream->pause_in_draining = true;
 725		break;
 726	default:
 727		return -EPERM;
 728	}
 729	return retval;
 730}
 731
 732static int snd_compr_resume(struct snd_compr_stream *stream)
 733{
 734	int retval;
 735
 736	switch (stream->runtime->state) {
 737	case SNDRV_PCM_STATE_PAUSED:
 738		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 739		if (!retval)
 740			stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 741		break;
 742	case SNDRV_PCM_STATE_DRAINING:
 743		if (!stream->pause_in_draining)
 744			return -EPERM;
 745		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 746		if (!retval)
 747			stream->pause_in_draining = false;
 748		break;
 749	default:
 750		return -EPERM;
 751	}
 752	return retval;
 753}
 754
 755static int snd_compr_start(struct snd_compr_stream *stream)
 756{
 757	int retval;
 758
 759	switch (stream->runtime->state) {
 760	case SNDRV_PCM_STATE_SETUP:
 761		if (stream->direction != SND_COMPRESS_CAPTURE)
 762			return -EPERM;
 763		break;
 764	case SNDRV_PCM_STATE_PREPARED:
 765		break;
 766	default:
 767		return -EPERM;
 768	}
 769
 770	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 771	if (!retval)
 772		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 773	return retval;
 774}
 775
 776static int snd_compr_stop(struct snd_compr_stream *stream)
 777{
 778	int retval;
 779
 780	switch (stream->runtime->state) {
 781	case SNDRV_PCM_STATE_OPEN:
 782	case SNDRV_PCM_STATE_SETUP:
 783	case SNDRV_PCM_STATE_PREPARED:
 784		return -EPERM;
 785	default:
 786		break;
 787	}
 788
 789	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 790	if (!retval) {
 791		/* clear flags and stop any drain wait */
 792		stream->partial_drain = false;
 793		stream->metadata_set = false;
 794		stream->pause_in_draining = false;
 795		snd_compr_drain_notify(stream);
 796		stream->runtime->total_bytes_available = 0;
 797		stream->runtime->total_bytes_transferred = 0;
 798	}
 799	return retval;
 800}
 801
 802static void error_delayed_work(struct work_struct *work)
 803{
 804	struct snd_compr_stream *stream;
 805
 806	stream = container_of(work, struct snd_compr_stream, error_work.work);
 807
 808	mutex_lock(&stream->device->lock);
 809
 810	stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 811	wake_up(&stream->runtime->sleep);
 812
 813	mutex_unlock(&stream->device->lock);
 814}
 815
 816/**
 817 * snd_compr_stop_error: Report a fatal error on a stream
 818 * @stream: pointer to stream
 819 * @state: state to transition the stream to
 820 *
 821 * Stop the stream and set its state.
 822 *
 823 * Should be called with compressed device lock held.
 824 *
 825 * Return: zero if successful, or a negative error code
 826 */
 827int snd_compr_stop_error(struct snd_compr_stream *stream,
 828			 snd_pcm_state_t state)
 829{
 830	if (stream->runtime->state == state)
 831		return 0;
 832
 833	stream->runtime->state = state;
 834
 835	pr_debug("Changing state to: %d\n", state);
 836
 837	queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
 838
 839	return 0;
 840}
 841EXPORT_SYMBOL_GPL(snd_compr_stop_error);
 842
 843static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
 844{
 845	int ret;
 846
 847	/*
 848	 * We are called with lock held. So drop the lock while we wait for
 849	 * drain complete notification from the driver
 850	 *
 851	 * It is expected that driver will notify the drain completion and then
 852	 * stream will be moved to SETUP state, even if draining resulted in an
 853	 * error. We can trigger next track after this.
 854	 */
 855	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 856	mutex_unlock(&stream->device->lock);
 857
 858	/* we wait for drain to complete here, drain can return when
 859	 * interruption occurred, wait returned error or success.
 860	 * For the first two cases we don't do anything different here and
 861	 * return after waking up
 862	 */
 863
 864	ret = wait_event_interruptible(stream->runtime->sleep,
 865			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
 866	if (ret == -ERESTARTSYS)
 867		pr_debug("wait aborted by a signal\n");
 868	else if (ret)
 869		pr_debug("wait for drain failed with %d\n", ret);
 870
 871
 872	wake_up(&stream->runtime->sleep);
 873	mutex_lock(&stream->device->lock);
 874
 875	return ret;
 876}
 877
 878static int snd_compr_drain(struct snd_compr_stream *stream)
 879{
 880	int retval;
 881
 882	switch (stream->runtime->state) {
 883	case SNDRV_PCM_STATE_OPEN:
 884	case SNDRV_PCM_STATE_SETUP:
 885	case SNDRV_PCM_STATE_PREPARED:
 886	case SNDRV_PCM_STATE_PAUSED:
 887		return -EPERM;
 888	case SNDRV_PCM_STATE_XRUN:
 889		return -EPIPE;
 890	default:
 891		break;
 892	}
 893
 894	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
 895	if (retval) {
 896		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 897		wake_up(&stream->runtime->sleep);
 898		return retval;
 899	}
 900
 901	return snd_compress_wait_for_drain(stream);
 902}
 903
 904static int snd_compr_next_track(struct snd_compr_stream *stream)
 905{
 906	int retval;
 907
 908	/* only a running stream can transition to next track */
 909	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 910		return -EPERM;
 911
 912	/* next track doesn't have any meaning for capture streams */
 913	if (stream->direction == SND_COMPRESS_CAPTURE)
 914		return -EPERM;
 915
 916	/* you can signal next track if this is intended to be a gapless stream
 917	 * and current track metadata is set
 918	 */
 919	if (stream->metadata_set == false)
 920		return -EPERM;
 921
 922	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
 923	if (retval != 0)
 924		return retval;
 925	stream->metadata_set = false;
 926	stream->next_track = true;
 927	return 0;
 928}
 929
 930static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 931{
 932	int retval;
 933
 934	switch (stream->runtime->state) {
 935	case SNDRV_PCM_STATE_OPEN:
 936	case SNDRV_PCM_STATE_SETUP:
 937	case SNDRV_PCM_STATE_PREPARED:
 938	case SNDRV_PCM_STATE_PAUSED:
 939		return -EPERM;
 940	case SNDRV_PCM_STATE_XRUN:
 941		return -EPIPE;
 942	default:
 943		break;
 944	}
 945
 946	/* partial drain doesn't have any meaning for capture streams */
 947	if (stream->direction == SND_COMPRESS_CAPTURE)
 948		return -EPERM;
 949
 950	/* stream can be drained only when next track has been signalled */
 951	if (stream->next_track == false)
 952		return -EPERM;
 953
 954	stream->partial_drain = true;
 955	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
 956	if (retval) {
 957		pr_debug("Partial drain returned failure\n");
 958		wake_up(&stream->runtime->sleep);
 959		return retval;
 960	}
 961
 962	stream->next_track = false;
 963	return snd_compress_wait_for_drain(stream);
 964}
 965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 967{
 968	struct snd_compr_file *data = f->private_data;
 969	struct snd_compr_stream *stream;
 970	int retval = -ENOTTY;
 971
 972	if (snd_BUG_ON(!data))
 973		return -EFAULT;
 974
 975	stream = &data->stream;
 976
 977	mutex_lock(&stream->device->lock);
 978	switch (_IOC_NR(cmd)) {
 979	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
 980		retval = put_user(SNDRV_COMPRESS_VERSION,
 981				(int __user *)arg) ? -EFAULT : 0;
 982		break;
 983	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
 984		retval = snd_compr_get_caps(stream, arg);
 985		break;
 986#ifndef COMPR_CODEC_CAPS_OVERFLOW
 987	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
 988		retval = snd_compr_get_codec_caps(stream, arg);
 989		break;
 990#endif
 991	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
 992		retval = snd_compr_set_params(stream, arg);
 993		break;
 994	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
 995		retval = snd_compr_get_params(stream, arg);
 996		break;
 997	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
 998		retval = snd_compr_set_metadata(stream, arg);
 999		break;
1000	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1001		retval = snd_compr_get_metadata(stream, arg);
1002		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1003	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1004		retval = snd_compr_tstamp(stream, arg);
1005		break;
1006	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1007		retval = snd_compr_ioctl_avail(stream, arg);
1008		break;
1009	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1010		retval = snd_compr_pause(stream);
1011		break;
1012	case _IOC_NR(SNDRV_COMPRESS_RESUME):
1013		retval = snd_compr_resume(stream);
1014		break;
1015	case _IOC_NR(SNDRV_COMPRESS_START):
1016		retval = snd_compr_start(stream);
1017		break;
1018	case _IOC_NR(SNDRV_COMPRESS_STOP):
1019		retval = snd_compr_stop(stream);
1020		break;
1021	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1022		retval = snd_compr_drain(stream);
1023		break;
1024	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1025		retval = snd_compr_partial_drain(stream);
1026		break;
1027	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1028		retval = snd_compr_next_track(stream);
1029		break;
1030
1031	}
1032	mutex_unlock(&stream->device->lock);
1033	return retval;
1034}
1035
1036/* support of 32bit userspace on 64bit platforms */
1037#ifdef CONFIG_COMPAT
1038static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1039						unsigned long arg)
1040{
1041	return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1042}
1043#endif
1044
1045static const struct file_operations snd_compr_file_ops = {
1046		.owner =	THIS_MODULE,
1047		.open =		snd_compr_open,
1048		.release =	snd_compr_free,
1049		.write =	snd_compr_write,
1050		.read =		snd_compr_read,
1051		.unlocked_ioctl = snd_compr_ioctl,
1052#ifdef CONFIG_COMPAT
1053		.compat_ioctl = snd_compr_ioctl_compat,
1054#endif
1055		.mmap =		snd_compr_mmap,
1056		.poll =		snd_compr_poll,
1057};
1058
1059static int snd_compress_dev_register(struct snd_device *device)
1060{
1061	int ret;
1062	struct snd_compr *compr;
1063
1064	if (snd_BUG_ON(!device || !device->device_data))
1065		return -EBADFD;
1066	compr = device->device_data;
1067
1068	pr_debug("reg device %s, direction %d\n", compr->name,
1069			compr->direction);
1070	/* register compressed device */
1071	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1072				  compr->card, compr->device,
1073				  &snd_compr_file_ops, compr, compr->dev);
1074	if (ret < 0) {
1075		pr_err("snd_register_device failed %d\n", ret);
1076		return ret;
1077	}
1078	return ret;
1079
1080}
1081
1082static int snd_compress_dev_disconnect(struct snd_device *device)
1083{
1084	struct snd_compr *compr;
1085
1086	compr = device->device_data;
1087	snd_unregister_device(compr->dev);
1088	return 0;
1089}
1090
1091#ifdef CONFIG_SND_VERBOSE_PROCFS
1092static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1093					struct snd_info_buffer *buffer)
1094{
1095	struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1096
1097	snd_iprintf(buffer, "card: %d\n", compr->card->number);
1098	snd_iprintf(buffer, "device: %d\n", compr->device);
1099	snd_iprintf(buffer, "stream: %s\n",
1100			compr->direction == SND_COMPRESS_PLAYBACK
1101				? "PLAYBACK" : "CAPTURE");
1102	snd_iprintf(buffer, "id: %s\n", compr->id);
1103}
1104
1105static int snd_compress_proc_init(struct snd_compr *compr)
1106{
1107	struct snd_info_entry *entry;
1108	char name[16];
1109
1110	sprintf(name, "compr%i", compr->device);
1111	entry = snd_info_create_card_entry(compr->card, name,
1112					   compr->card->proc_root);
1113	if (!entry)
1114		return -ENOMEM;
1115	entry->mode = S_IFDIR | 0555;
1116	compr->proc_root = entry;
1117
1118	entry = snd_info_create_card_entry(compr->card, "info",
1119					   compr->proc_root);
1120	if (entry)
1121		snd_info_set_text_ops(entry, compr,
1122				      snd_compress_proc_info_read);
1123	compr->proc_info_entry = entry;
1124
1125	return 0;
1126}
1127
1128static void snd_compress_proc_done(struct snd_compr *compr)
1129{
1130	snd_info_free_entry(compr->proc_info_entry);
1131	compr->proc_info_entry = NULL;
1132	snd_info_free_entry(compr->proc_root);
1133	compr->proc_root = NULL;
1134}
1135
1136static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1137{
1138	strscpy(compr->id, id, sizeof(compr->id));
1139}
1140#else
1141static inline int snd_compress_proc_init(struct snd_compr *compr)
1142{
1143	return 0;
1144}
1145
1146static inline void snd_compress_proc_done(struct snd_compr *compr)
1147{
1148}
1149
1150static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1151{
1152}
1153#endif
1154
1155static int snd_compress_dev_free(struct snd_device *device)
1156{
1157	struct snd_compr *compr;
1158
1159	compr = device->device_data;
1160	snd_compress_proc_done(compr);
1161	put_device(compr->dev);
1162	return 0;
1163}
1164
1165/**
1166 * snd_compress_new: create new compress device
1167 * @card: sound card pointer
1168 * @device: device number
1169 * @dirn: device direction, should be of type enum snd_compr_direction
1170 * @id: ID string
1171 * @compr: compress device pointer
1172 *
1173 * Return: zero if successful, or a negative error code
1174 */
1175int snd_compress_new(struct snd_card *card, int device,
1176			int dirn, const char *id, struct snd_compr *compr)
1177{
1178	static const struct snd_device_ops ops = {
1179		.dev_free = snd_compress_dev_free,
1180		.dev_register = snd_compress_dev_register,
1181		.dev_disconnect = snd_compress_dev_disconnect,
1182	};
1183	int ret;
 
 
 
 
 
1184
1185	compr->card = card;
1186	compr->device = device;
1187	compr->direction = dirn;
1188	mutex_init(&compr->lock);
1189
1190	snd_compress_set_id(compr, id);
1191
1192	ret = snd_device_alloc(&compr->dev, card);
1193	if (ret)
1194		return ret;
1195	dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1196
1197	ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1198	if (ret == 0)
1199		snd_compress_proc_init(compr);
1200	else
1201		put_device(compr->dev);
1202
1203	return ret;
1204}
1205EXPORT_SYMBOL_GPL(snd_compress_new);
1206
1207MODULE_DESCRIPTION("ALSA Compressed offload framework");
1208MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1209MODULE_LICENSE("GPL v2");