Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  compress_core.c - compress offload core
   4 *
   5 *  Copyright (C) 2011 Intel Corporation
   6 *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
   7 *		Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
   8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 */
  12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
  14
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/list.h>
  18#include <linux/math64.h>
  19#include <linux/mm.h>
  20#include <linux/mutex.h>
  21#include <linux/poll.h>
  22#include <linux/slab.h>
  23#include <linux/sched.h>
  24#include <linux/types.h>
  25#include <linux/uio.h>
  26#include <linux/uaccess.h>
 
  27#include <linux/module.h>
  28#include <linux/compat.h>
  29#include <sound/core.h>
  30#include <sound/initval.h>
  31#include <sound/info.h>
  32#include <sound/compress_params.h>
  33#include <sound/compress_offload.h>
  34#include <sound/compress_driver.h>
  35
  36/* struct snd_compr_codec_caps overflows the ioctl bit size for some
  37 * architectures, so we need to disable the relevant ioctls.
  38 */
  39#if _IOC_SIZEBITS < 14
  40#define COMPR_CODEC_CAPS_OVERFLOW
  41#endif
  42
  43/* TODO:
  44 * - add substream support for multiple devices in case of
  45 *	SND_DYNAMIC_MINORS is not used
  46 * - Multiple node representation
  47 *	driver should be able to register multiple nodes
  48 */
  49
  50struct snd_compr_file {
  51	unsigned long caps;
  52	struct snd_compr_stream stream;
  53};
  54
  55static void error_delayed_work(struct work_struct *work);
  56
 
 
 
 
 
 
  57/*
  58 * a note on stream states used:
  59 * we use following states in the compressed core
  60 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
  61 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
  62 *	calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
  63 *	state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
  64 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
  65 *	playback only). User after setting up stream writes the data buffer
  66 *	before starting the stream.
  67 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
  68 *	decoding/encoding and rendering/capturing data.
  69 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
  70 *	by calling SNDRV_COMPRESS_DRAIN.
  71 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
  72 *	SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
  73 *	SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
  74 */
  75static int snd_compr_open(struct inode *inode, struct file *f)
  76{
  77	struct snd_compr *compr;
  78	struct snd_compr_file *data;
  79	struct snd_compr_runtime *runtime;
  80	enum snd_compr_direction dirn;
  81	int maj = imajor(inode);
  82	int ret;
  83
  84	if ((f->f_flags & O_ACCMODE) == O_WRONLY)
  85		dirn = SND_COMPRESS_PLAYBACK;
  86	else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
  87		dirn = SND_COMPRESS_CAPTURE;
 
 
  88	else
  89		return -EINVAL;
  90
  91	if (maj == snd_major)
  92		compr = snd_lookup_minor_data(iminor(inode),
  93					SNDRV_DEVICE_TYPE_COMPRESS);
  94	else
  95		return -EBADFD;
  96
  97	if (compr == NULL) {
  98		pr_err("no device data!!!\n");
  99		return -ENODEV;
 100	}
 101
 102	if (dirn != compr->direction) {
 103		pr_err("this device doesn't support this direction\n");
 104		snd_card_unref(compr->card);
 105		return -EINVAL;
 106	}
 107
 108	data = kzalloc(sizeof(*data), GFP_KERNEL);
 109	if (!data) {
 110		snd_card_unref(compr->card);
 111		return -ENOMEM;
 112	}
 113
 114	INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
 115
 116	data->stream.ops = compr->ops;
 117	data->stream.direction = dirn;
 118	data->stream.private_data = compr->private_data;
 119	data->stream.device = compr;
 120	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 121	if (!runtime) {
 122		kfree(data);
 123		snd_card_unref(compr->card);
 124		return -ENOMEM;
 125	}
 126	runtime->state = SNDRV_PCM_STATE_OPEN;
 127	init_waitqueue_head(&runtime->sleep);
 
 
 
 128	data->stream.runtime = runtime;
 129	f->private_data = (void *)data;
 130	mutex_lock(&compr->lock);
 131	ret = compr->ops->open(&data->stream);
 132	mutex_unlock(&compr->lock);
 133	if (ret) {
 134		kfree(runtime);
 135		kfree(data);
 136	}
 137	snd_card_unref(compr->card);
 138	return ret;
 139}
 140
 141static int snd_compr_free(struct inode *inode, struct file *f)
 142{
 143	struct snd_compr_file *data = f->private_data;
 144	struct snd_compr_runtime *runtime = data->stream.runtime;
 145
 146	cancel_delayed_work_sync(&data->stream.error_work);
 147
 148	switch (runtime->state) {
 149	case SNDRV_PCM_STATE_RUNNING:
 150	case SNDRV_PCM_STATE_DRAINING:
 151	case SNDRV_PCM_STATE_PAUSED:
 152		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
 153		break;
 154	default:
 155		break;
 156	}
 157
 
 
 158	data->stream.ops->free(&data->stream);
 159	if (!data->stream.runtime->dma_buffer_p)
 160		kfree(data->stream.runtime->buffer);
 161	kfree(data->stream.runtime);
 162	kfree(data);
 163	return 0;
 164}
 165
 166static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
 167		struct snd_compr_tstamp *tstamp)
 168{
 169	if (!stream->ops->pointer)
 170		return -ENOTSUPP;
 171	stream->ops->pointer(stream, tstamp);
 172	pr_debug("dsp consumed till %d total %d bytes\n",
 173		tstamp->byte_offset, tstamp->copied_total);
 174	if (stream->direction == SND_COMPRESS_PLAYBACK)
 175		stream->runtime->total_bytes_transferred = tstamp->copied_total;
 176	else
 177		stream->runtime->total_bytes_available = tstamp->copied_total;
 178	return 0;
 179}
 180
 181static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
 182		struct snd_compr_avail *avail)
 183{
 184	memset(avail, 0, sizeof(*avail));
 185	snd_compr_update_tstamp(stream, &avail->tstamp);
 186	/* Still need to return avail even if tstamp can't be filled in */
 187
 188	if (stream->runtime->total_bytes_available == 0 &&
 189			stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
 190			stream->direction == SND_COMPRESS_PLAYBACK) {
 191		pr_debug("detected init and someone forgot to do a write\n");
 192		return stream->runtime->buffer_size;
 193	}
 194	pr_debug("app wrote %lld, DSP consumed %lld\n",
 195			stream->runtime->total_bytes_available,
 196			stream->runtime->total_bytes_transferred);
 197	if (stream->runtime->total_bytes_available ==
 198				stream->runtime->total_bytes_transferred) {
 199		if (stream->direction == SND_COMPRESS_PLAYBACK) {
 200			pr_debug("both pointers are same, returning full avail\n");
 201			return stream->runtime->buffer_size;
 202		} else {
 203			pr_debug("both pointers are same, returning no avail\n");
 204			return 0;
 205		}
 206	}
 207
 208	avail->avail = stream->runtime->total_bytes_available -
 209			stream->runtime->total_bytes_transferred;
 210	if (stream->direction == SND_COMPRESS_PLAYBACK)
 211		avail->avail = stream->runtime->buffer_size - avail->avail;
 212
 213	pr_debug("ret avail as %lld\n", avail->avail);
 214	return avail->avail;
 215}
 216
 217static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
 218{
 219	struct snd_compr_avail avail;
 220
 221	return snd_compr_calc_avail(stream, &avail);
 222}
 223
 224static int
 225snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
 226{
 227	struct snd_compr_avail ioctl_avail;
 228	size_t avail;
 229
 
 
 
 230	avail = snd_compr_calc_avail(stream, &ioctl_avail);
 231	ioctl_avail.avail = avail;
 232
 233	switch (stream->runtime->state) {
 234	case SNDRV_PCM_STATE_OPEN:
 235		return -EBADFD;
 236	case SNDRV_PCM_STATE_XRUN:
 237		return -EPIPE;
 238	default:
 239		break;
 240	}
 241
 242	if (copy_to_user((__u64 __user *)arg,
 243				&ioctl_avail, sizeof(ioctl_avail)))
 244		return -EFAULT;
 245	return 0;
 246}
 247
 248static int snd_compr_write_data(struct snd_compr_stream *stream,
 249	       const char __user *buf, size_t count)
 250{
 251	void *dstn;
 252	size_t copy;
 253	struct snd_compr_runtime *runtime = stream->runtime;
 254	/* 64-bit Modulus */
 255	u64 app_pointer = div64_u64(runtime->total_bytes_available,
 256				    runtime->buffer_size);
 257	app_pointer = runtime->total_bytes_available -
 258		      (app_pointer * runtime->buffer_size);
 259
 260	dstn = runtime->buffer + app_pointer;
 261	pr_debug("copying %ld at %lld\n",
 262			(unsigned long)count, app_pointer);
 263	if (count < runtime->buffer_size - app_pointer) {
 264		if (copy_from_user(dstn, buf, count))
 265			return -EFAULT;
 266	} else {
 267		copy = runtime->buffer_size - app_pointer;
 268		if (copy_from_user(dstn, buf, copy))
 269			return -EFAULT;
 270		if (copy_from_user(runtime->buffer, buf + copy, count - copy))
 271			return -EFAULT;
 272	}
 273	/* if DSP cares, let it know data has been written */
 274	if (stream->ops->ack)
 275		stream->ops->ack(stream, count);
 276	return count;
 277}
 278
 279static ssize_t snd_compr_write(struct file *f, const char __user *buf,
 280		size_t count, loff_t *offset)
 281{
 282	struct snd_compr_file *data = f->private_data;
 283	struct snd_compr_stream *stream;
 284	size_t avail;
 285	int retval;
 286
 287	if (snd_BUG_ON(!data))
 288		return -EFAULT;
 289
 290	stream = &data->stream;
 291	mutex_lock(&stream->device->lock);
 292	/* write is allowed when stream is running or has been steup */
 
 
 293	switch (stream->runtime->state) {
 294	case SNDRV_PCM_STATE_SETUP:
 295	case SNDRV_PCM_STATE_PREPARED:
 296	case SNDRV_PCM_STATE_RUNNING:
 297		break;
 298	default:
 299		mutex_unlock(&stream->device->lock);
 300		return -EBADFD;
 301	}
 302
 303	avail = snd_compr_get_avail(stream);
 304	pr_debug("avail returned %ld\n", (unsigned long)avail);
 305	/* calculate how much we can write to buffer */
 306	if (avail > count)
 307		avail = count;
 308
 309	if (stream->ops->copy) {
 310		char __user* cbuf = (char __user*)buf;
 311		retval = stream->ops->copy(stream, cbuf, avail);
 312	} else {
 313		retval = snd_compr_write_data(stream, buf, avail);
 314	}
 315	if (retval > 0)
 316		stream->runtime->total_bytes_available += retval;
 317
 318	/* while initiating the stream, write should be called before START
 319	 * call, so in setup move state */
 320	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
 321		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
 322		pr_debug("stream prepared, Houston we are good to go\n");
 323	}
 324
 325	mutex_unlock(&stream->device->lock);
 326	return retval;
 327}
 328
 329
 330static ssize_t snd_compr_read(struct file *f, char __user *buf,
 331		size_t count, loff_t *offset)
 332{
 333	struct snd_compr_file *data = f->private_data;
 334	struct snd_compr_stream *stream;
 335	size_t avail;
 336	int retval;
 337
 338	if (snd_BUG_ON(!data))
 339		return -EFAULT;
 340
 341	stream = &data->stream;
 342	mutex_lock(&stream->device->lock);
 
 
 343
 344	/* read is allowed when stream is running, paused, draining and setup
 345	 * (yes setup is state which we transition to after stop, so if user
 346	 * wants to read data after stop we allow that)
 347	 */
 348	switch (stream->runtime->state) {
 349	case SNDRV_PCM_STATE_OPEN:
 350	case SNDRV_PCM_STATE_PREPARED:
 351	case SNDRV_PCM_STATE_SUSPENDED:
 352	case SNDRV_PCM_STATE_DISCONNECTED:
 353		retval = -EBADFD;
 354		goto out;
 355	case SNDRV_PCM_STATE_XRUN:
 356		retval = -EPIPE;
 357		goto out;
 358	}
 359
 360	avail = snd_compr_get_avail(stream);
 361	pr_debug("avail returned %ld\n", (unsigned long)avail);
 362	/* calculate how much we can read from buffer */
 363	if (avail > count)
 364		avail = count;
 365
 366	if (stream->ops->copy) {
 367		retval = stream->ops->copy(stream, buf, avail);
 368	} else {
 369		retval = -ENXIO;
 370		goto out;
 371	}
 372	if (retval > 0)
 373		stream->runtime->total_bytes_transferred += retval;
 374
 375out:
 376	mutex_unlock(&stream->device->lock);
 377	return retval;
 378}
 379
 380static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
 381{
 382	return -ENXIO;
 383}
 384
 385static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
 386{
 387	if (stream->direction == SND_COMPRESS_PLAYBACK)
 388		return EPOLLOUT | EPOLLWRNORM;
 389	else
 390		return EPOLLIN | EPOLLRDNORM;
 391}
 392
 393static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
 394{
 395	struct snd_compr_file *data = f->private_data;
 396	struct snd_compr_stream *stream;
 
 397	size_t avail;
 398	__poll_t retval = 0;
 399
 400	if (snd_BUG_ON(!data))
 401		return EPOLLERR;
 402
 403	stream = &data->stream;
 
 404
 405	mutex_lock(&stream->device->lock);
 406
 407	switch (stream->runtime->state) {
 408	case SNDRV_PCM_STATE_OPEN:
 409	case SNDRV_PCM_STATE_XRUN:
 410		retval = snd_compr_get_poll(stream) | EPOLLERR;
 411		goto out;
 412	default:
 413		break;
 414	}
 415
 416	poll_wait(f, &stream->runtime->sleep, wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417
 418	avail = snd_compr_get_avail(stream);
 419	pr_debug("avail is %ld\n", (unsigned long)avail);
 420	/* check if we have at least one fragment to fill */
 421	switch (stream->runtime->state) {
 422	case SNDRV_PCM_STATE_DRAINING:
 423		/* stream has been woken up after drain is complete
 424		 * draining done so set stream state to stopped
 425		 */
 426		retval = snd_compr_get_poll(stream);
 427		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 428		break;
 429	case SNDRV_PCM_STATE_RUNNING:
 430	case SNDRV_PCM_STATE_PREPARED:
 431	case SNDRV_PCM_STATE_PAUSED:
 432		if (avail >= stream->runtime->fragment_size)
 433			retval = snd_compr_get_poll(stream);
 434		break;
 435	default:
 436		retval = snd_compr_get_poll(stream) | EPOLLERR;
 437		break;
 438	}
 439out:
 440	mutex_unlock(&stream->device->lock);
 441	return retval;
 442}
 443
 444static int
 445snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
 446{
 447	int retval;
 448	struct snd_compr_caps caps;
 449
 450	if (!stream->ops->get_caps)
 451		return -ENXIO;
 452
 453	memset(&caps, 0, sizeof(caps));
 454	retval = stream->ops->get_caps(stream, &caps);
 455	if (retval)
 456		goto out;
 457	if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
 458		retval = -EFAULT;
 459out:
 460	return retval;
 461}
 462
 463#ifndef COMPR_CODEC_CAPS_OVERFLOW
 464static int
 465snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 466{
 467	int retval;
 468	struct snd_compr_codec_caps *caps;
 469
 470	if (!stream->ops->get_codec_caps)
 471		return -ENXIO;
 472
 473	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 474	if (!caps)
 475		return -ENOMEM;
 476
 477	retval = stream->ops->get_codec_caps(stream, caps);
 478	if (retval)
 479		goto out;
 480	if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
 481		retval = -EFAULT;
 482
 483out:
 484	kfree(caps);
 485	return retval;
 486}
 487#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 488
 489int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
 490{
 491	struct snd_dma_buffer *dmab;
 492	int ret;
 493
 494	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 495		return -EINVAL;
 496	dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
 497	if (!dmab)
 498		return -ENOMEM;
 499	dmab->dev = stream->dma_buffer.dev;
 500	ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
 501	if (ret < 0) {
 502		kfree(dmab);
 503		return ret;
 504	}
 505
 506	snd_compr_set_runtime_buffer(stream, dmab);
 507	stream->runtime->dma_bytes = size;
 508	return 1;
 509}
 510EXPORT_SYMBOL(snd_compr_malloc_pages);
 511
 512int snd_compr_free_pages(struct snd_compr_stream *stream)
 513{
 514	struct snd_compr_runtime *runtime;
 515
 516	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 517		return -EINVAL;
 518	runtime = stream->runtime;
 519	if (runtime->dma_area == NULL)
 520		return 0;
 521	if (runtime->dma_buffer_p != &stream->dma_buffer) {
 522		/* It's a newly allocated buffer. Release it now. */
 523		snd_dma_free_pages(runtime->dma_buffer_p);
 524		kfree(runtime->dma_buffer_p);
 525	}
 526
 527	snd_compr_set_runtime_buffer(stream, NULL);
 528	return 0;
 529}
 530EXPORT_SYMBOL(snd_compr_free_pages);
 531
 532/* revisit this with snd_pcm_preallocate_xxx */
 533static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
 534		struct snd_compr_params *params)
 535{
 536	unsigned int buffer_size;
 537	void *buffer = NULL;
 538
 
 
 
 539	buffer_size = params->buffer.fragment_size * params->buffer.fragments;
 540	if (stream->ops->copy) {
 541		buffer = NULL;
 542		/* if copy is defined the driver will be required to copy
 543		 * the data from core
 544		 */
 545	} else {
 546		if (stream->runtime->dma_buffer_p) {
 547
 548			if (buffer_size > stream->runtime->dma_buffer_p->bytes)
 549				dev_err(&stream->device->dev,
 550						"Not enough DMA buffer");
 551			else
 552				buffer = stream->runtime->dma_buffer_p->area;
 553
 554		} else {
 555			buffer = kmalloc(buffer_size, GFP_KERNEL);
 556		}
 557
 558		if (!buffer)
 559			return -ENOMEM;
 560	}
 561	stream->runtime->fragment_size = params->buffer.fragment_size;
 562	stream->runtime->fragments = params->buffer.fragments;
 563	stream->runtime->buffer = buffer;
 564	stream->runtime->buffer_size = buffer_size;
 
 
 
 565	return 0;
 566}
 567
 568static int snd_compress_check_input(struct snd_compr_params *params)
 
 569{
 
 
 570	/* first let's check the buffer parameter's */
 571	if (params->buffer.fragment_size == 0 ||
 572	    params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
 
 
 
 
 
 
 
 573	    params->buffer.fragments == 0)
 574		return -EINVAL;
 575
 576	/* now codec parameters */
 577	if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
 578		return -EINVAL;
 579
 580	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
 581		return -EINVAL;
 582
 583	return 0;
 584}
 585
 586static int
 587snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 588{
 589	struct snd_compr_params *params;
 590	int retval;
 591
 592	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN) {
 593		/*
 594		 * we should allow parameter change only when stream has been
 595		 * opened not in other cases
 596		 */
 597		params = memdup_user((void __user *)arg, sizeof(*params));
 598		if (IS_ERR(params))
 599			return PTR_ERR(params);
 600
 601		retval = snd_compress_check_input(params);
 602		if (retval)
 603			goto out;
 604
 605		retval = snd_compr_allocate_buffer(stream, params);
 606		if (retval) {
 607			retval = -ENOMEM;
 608			goto out;
 609		}
 610
 611		retval = stream->ops->set_params(stream, params);
 612		if (retval)
 613			goto out;
 
 
 
 614
 615		stream->metadata_set = false;
 616		stream->next_track = false;
 617
 618		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 619	} else {
 620		return -EPERM;
 621	}
 622out:
 623	kfree(params);
 624	return retval;
 625}
 626
 627static int
 628snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
 629{
 630	struct snd_codec *params;
 631	int retval;
 632
 633	if (!stream->ops->get_params)
 634		return -EBADFD;
 635
 636	params = kzalloc(sizeof(*params), GFP_KERNEL);
 637	if (!params)
 638		return -ENOMEM;
 639	retval = stream->ops->get_params(stream, params);
 640	if (retval)
 641		goto out;
 642	if (copy_to_user((char __user *)arg, params, sizeof(*params)))
 643		retval = -EFAULT;
 644
 645out:
 646	kfree(params);
 647	return retval;
 648}
 649
 650static int
 651snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
 652{
 653	struct snd_compr_metadata metadata;
 654	int retval;
 655
 656	if (!stream->ops->get_metadata)
 657		return -ENXIO;
 658
 659	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 660		return -EFAULT;
 661
 662	retval = stream->ops->get_metadata(stream, &metadata);
 663	if (retval != 0)
 664		return retval;
 665
 666	if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
 667		return -EFAULT;
 668
 669	return 0;
 670}
 671
 672static int
 673snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
 674{
 675	struct snd_compr_metadata metadata;
 676	int retval;
 677
 678	if (!stream->ops->set_metadata)
 679		return -ENXIO;
 680	/*
 681	* we should allow parameter change only when stream has been
 682	* opened not in other cases
 683	*/
 684	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 685		return -EFAULT;
 686
 687	retval = stream->ops->set_metadata(stream, &metadata);
 688	stream->metadata_set = true;
 689
 690	return retval;
 691}
 692
 693static inline int
 694snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
 695{
 696	struct snd_compr_tstamp tstamp = {0};
 697	int ret;
 698
 699	ret = snd_compr_update_tstamp(stream, &tstamp);
 700	if (ret == 0)
 701		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
 702			&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
 703	return ret;
 704}
 705
 706static int snd_compr_pause(struct snd_compr_stream *stream)
 707{
 708	int retval;
 709
 710	switch (stream->runtime->state) {
 711	case SNDRV_PCM_STATE_RUNNING:
 712		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 713		if (!retval)
 714			stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
 715		break;
 716	case SNDRV_PCM_STATE_DRAINING:
 717		if (!stream->device->use_pause_in_draining)
 718			return -EPERM;
 719		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 720		if (!retval)
 721			stream->pause_in_draining = true;
 722		break;
 723	default:
 724		return -EPERM;
 725	}
 726	return retval;
 727}
 728
 729static int snd_compr_resume(struct snd_compr_stream *stream)
 730{
 731	int retval;
 732
 733	switch (stream->runtime->state) {
 734	case SNDRV_PCM_STATE_PAUSED:
 735		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 736		if (!retval)
 737			stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 738		break;
 739	case SNDRV_PCM_STATE_DRAINING:
 740		if (!stream->pause_in_draining)
 741			return -EPERM;
 742		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 743		if (!retval)
 744			stream->pause_in_draining = false;
 745		break;
 746	default:
 747		return -EPERM;
 748	}
 749	return retval;
 750}
 751
 752static int snd_compr_start(struct snd_compr_stream *stream)
 753{
 754	int retval;
 755
 756	switch (stream->runtime->state) {
 757	case SNDRV_PCM_STATE_SETUP:
 758		if (stream->direction != SND_COMPRESS_CAPTURE)
 759			return -EPERM;
 760		break;
 761	case SNDRV_PCM_STATE_PREPARED:
 762		break;
 763	default:
 764		return -EPERM;
 765	}
 766
 767	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 768	if (!retval)
 769		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 770	return retval;
 771}
 772
 773static int snd_compr_stop(struct snd_compr_stream *stream)
 774{
 775	int retval;
 776
 777	switch (stream->runtime->state) {
 778	case SNDRV_PCM_STATE_OPEN:
 779	case SNDRV_PCM_STATE_SETUP:
 780	case SNDRV_PCM_STATE_PREPARED:
 781		return -EPERM;
 782	default:
 783		break;
 784	}
 785
 786	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 787	if (!retval) {
 788		/* clear flags and stop any drain wait */
 789		stream->partial_drain = false;
 790		stream->metadata_set = false;
 791		stream->pause_in_draining = false;
 792		snd_compr_drain_notify(stream);
 793		stream->runtime->total_bytes_available = 0;
 794		stream->runtime->total_bytes_transferred = 0;
 795	}
 796	return retval;
 797}
 798
 799static void error_delayed_work(struct work_struct *work)
 800{
 801	struct snd_compr_stream *stream;
 802
 803	stream = container_of(work, struct snd_compr_stream, error_work.work);
 804
 805	mutex_lock(&stream->device->lock);
 806
 807	stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 808	wake_up(&stream->runtime->sleep);
 809
 810	mutex_unlock(&stream->device->lock);
 811}
 812
 813/**
 814 * snd_compr_stop_error: Report a fatal error on a stream
 815 * @stream: pointer to stream
 816 * @state: state to transition the stream to
 817 *
 818 * Stop the stream and set its state.
 819 *
 820 * Should be called with compressed device lock held.
 821 *
 822 * Return: zero if successful, or a negative error code
 823 */
 824int snd_compr_stop_error(struct snd_compr_stream *stream,
 825			 snd_pcm_state_t state)
 826{
 827	if (stream->runtime->state == state)
 828		return 0;
 829
 830	stream->runtime->state = state;
 831
 832	pr_debug("Changing state to: %d\n", state);
 833
 834	queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
 835
 836	return 0;
 837}
 838EXPORT_SYMBOL_GPL(snd_compr_stop_error);
 839
 840static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
 841{
 842	int ret;
 843
 844	/*
 845	 * We are called with lock held. So drop the lock while we wait for
 846	 * drain complete notification from the driver
 847	 *
 848	 * It is expected that driver will notify the drain completion and then
 849	 * stream will be moved to SETUP state, even if draining resulted in an
 850	 * error. We can trigger next track after this.
 851	 */
 852	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 853	mutex_unlock(&stream->device->lock);
 854
 855	/* we wait for drain to complete here, drain can return when
 856	 * interruption occurred, wait returned error or success.
 857	 * For the first two cases we don't do anything different here and
 858	 * return after waking up
 859	 */
 860
 861	ret = wait_event_interruptible(stream->runtime->sleep,
 862			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
 863	if (ret == -ERESTARTSYS)
 864		pr_debug("wait aborted by a signal\n");
 865	else if (ret)
 866		pr_debug("wait for drain failed with %d\n", ret);
 867
 868
 869	wake_up(&stream->runtime->sleep);
 870	mutex_lock(&stream->device->lock);
 871
 872	return ret;
 873}
 874
 875static int snd_compr_drain(struct snd_compr_stream *stream)
 876{
 877	int retval;
 878
 879	switch (stream->runtime->state) {
 880	case SNDRV_PCM_STATE_OPEN:
 881	case SNDRV_PCM_STATE_SETUP:
 882	case SNDRV_PCM_STATE_PREPARED:
 883	case SNDRV_PCM_STATE_PAUSED:
 884		return -EPERM;
 885	case SNDRV_PCM_STATE_XRUN:
 886		return -EPIPE;
 887	default:
 888		break;
 889	}
 890
 891	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
 892	if (retval) {
 893		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 894		wake_up(&stream->runtime->sleep);
 895		return retval;
 896	}
 897
 898	return snd_compress_wait_for_drain(stream);
 899}
 900
 901static int snd_compr_next_track(struct snd_compr_stream *stream)
 902{
 903	int retval;
 904
 905	/* only a running stream can transition to next track */
 906	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 907		return -EPERM;
 908
 909	/* next track doesn't have any meaning for capture streams */
 910	if (stream->direction == SND_COMPRESS_CAPTURE)
 911		return -EPERM;
 912
 913	/* you can signal next track if this is intended to be a gapless stream
 914	 * and current track metadata is set
 915	 */
 916	if (stream->metadata_set == false)
 917		return -EPERM;
 918
 919	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
 920	if (retval != 0)
 921		return retval;
 922	stream->metadata_set = false;
 923	stream->next_track = true;
 924	return 0;
 925}
 926
 927static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 928{
 929	int retval;
 930
 931	switch (stream->runtime->state) {
 932	case SNDRV_PCM_STATE_OPEN:
 933	case SNDRV_PCM_STATE_SETUP:
 934	case SNDRV_PCM_STATE_PREPARED:
 935	case SNDRV_PCM_STATE_PAUSED:
 936		return -EPERM;
 937	case SNDRV_PCM_STATE_XRUN:
 938		return -EPIPE;
 939	default:
 940		break;
 941	}
 942
 943	/* partial drain doesn't have any meaning for capture streams */
 944	if (stream->direction == SND_COMPRESS_CAPTURE)
 945		return -EPERM;
 946
 947	/* stream can be drained only when next track has been signalled */
 948	if (stream->next_track == false)
 949		return -EPERM;
 950
 951	stream->partial_drain = true;
 952	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
 953	if (retval) {
 954		pr_debug("Partial drain returned failure\n");
 955		wake_up(&stream->runtime->sleep);
 956		return retval;
 957	}
 958
 959	stream->next_track = false;
 960	return snd_compress_wait_for_drain(stream);
 961}
 962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 963static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 964{
 965	struct snd_compr_file *data = f->private_data;
 966	struct snd_compr_stream *stream;
 967	int retval = -ENOTTY;
 968
 969	if (snd_BUG_ON(!data))
 970		return -EFAULT;
 971
 972	stream = &data->stream;
 973
 974	mutex_lock(&stream->device->lock);
 975	switch (_IOC_NR(cmd)) {
 976	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
 977		retval = put_user(SNDRV_COMPRESS_VERSION,
 978				(int __user *)arg) ? -EFAULT : 0;
 979		break;
 980	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
 981		retval = snd_compr_get_caps(stream, arg);
 982		break;
 983#ifndef COMPR_CODEC_CAPS_OVERFLOW
 984	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
 985		retval = snd_compr_get_codec_caps(stream, arg);
 986		break;
 987#endif
 988	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
 989		retval = snd_compr_set_params(stream, arg);
 990		break;
 991	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
 992		retval = snd_compr_get_params(stream, arg);
 993		break;
 994	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
 995		retval = snd_compr_set_metadata(stream, arg);
 996		break;
 997	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
 998		retval = snd_compr_get_metadata(stream, arg);
 999		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1000	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1001		retval = snd_compr_tstamp(stream, arg);
1002		break;
1003	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1004		retval = snd_compr_ioctl_avail(stream, arg);
1005		break;
1006	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1007		retval = snd_compr_pause(stream);
1008		break;
1009	case _IOC_NR(SNDRV_COMPRESS_RESUME):
1010		retval = snd_compr_resume(stream);
1011		break;
1012	case _IOC_NR(SNDRV_COMPRESS_START):
1013		retval = snd_compr_start(stream);
1014		break;
1015	case _IOC_NR(SNDRV_COMPRESS_STOP):
1016		retval = snd_compr_stop(stream);
1017		break;
1018	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1019		retval = snd_compr_drain(stream);
1020		break;
1021	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1022		retval = snd_compr_partial_drain(stream);
1023		break;
1024	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1025		retval = snd_compr_next_track(stream);
1026		break;
1027
1028	}
1029	mutex_unlock(&stream->device->lock);
1030	return retval;
1031}
1032
1033/* support of 32bit userspace on 64bit platforms */
1034#ifdef CONFIG_COMPAT
1035static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1036						unsigned long arg)
1037{
1038	return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1039}
1040#endif
1041
1042static const struct file_operations snd_compr_file_ops = {
1043		.owner =	THIS_MODULE,
1044		.open =		snd_compr_open,
1045		.release =	snd_compr_free,
1046		.write =	snd_compr_write,
1047		.read =		snd_compr_read,
1048		.unlocked_ioctl = snd_compr_ioctl,
1049#ifdef CONFIG_COMPAT
1050		.compat_ioctl = snd_compr_ioctl_compat,
1051#endif
1052		.mmap =		snd_compr_mmap,
1053		.poll =		snd_compr_poll,
1054};
1055
1056static int snd_compress_dev_register(struct snd_device *device)
1057{
1058	int ret;
1059	struct snd_compr *compr;
1060
1061	if (snd_BUG_ON(!device || !device->device_data))
1062		return -EBADFD;
1063	compr = device->device_data;
1064
1065	pr_debug("reg device %s, direction %d\n", compr->name,
1066			compr->direction);
1067	/* register compressed device */
1068	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1069				  compr->card, compr->device,
1070				  &snd_compr_file_ops, compr, &compr->dev);
1071	if (ret < 0) {
1072		pr_err("snd_register_device failed %d\n", ret);
1073		return ret;
1074	}
1075	return ret;
1076
1077}
1078
1079static int snd_compress_dev_disconnect(struct snd_device *device)
1080{
1081	struct snd_compr *compr;
1082
1083	compr = device->device_data;
1084	snd_unregister_device(&compr->dev);
1085	return 0;
1086}
1087
1088#ifdef CONFIG_SND_VERBOSE_PROCFS
1089static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1090					struct snd_info_buffer *buffer)
1091{
1092	struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1093
1094	snd_iprintf(buffer, "card: %d\n", compr->card->number);
1095	snd_iprintf(buffer, "device: %d\n", compr->device);
1096	snd_iprintf(buffer, "stream: %s\n",
1097			compr->direction == SND_COMPRESS_PLAYBACK
1098				? "PLAYBACK" : "CAPTURE");
1099	snd_iprintf(buffer, "id: %s\n", compr->id);
1100}
1101
1102static int snd_compress_proc_init(struct snd_compr *compr)
1103{
1104	struct snd_info_entry *entry;
1105	char name[16];
1106
1107	sprintf(name, "compr%i", compr->device);
1108	entry = snd_info_create_card_entry(compr->card, name,
1109					   compr->card->proc_root);
1110	if (!entry)
1111		return -ENOMEM;
1112	entry->mode = S_IFDIR | 0555;
1113	compr->proc_root = entry;
1114
1115	entry = snd_info_create_card_entry(compr->card, "info",
1116					   compr->proc_root);
1117	if (entry)
1118		snd_info_set_text_ops(entry, compr,
1119				      snd_compress_proc_info_read);
1120	compr->proc_info_entry = entry;
1121
1122	return 0;
1123}
1124
1125static void snd_compress_proc_done(struct snd_compr *compr)
1126{
1127	snd_info_free_entry(compr->proc_info_entry);
1128	compr->proc_info_entry = NULL;
1129	snd_info_free_entry(compr->proc_root);
1130	compr->proc_root = NULL;
1131}
1132
1133static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1134{
1135	strscpy(compr->id, id, sizeof(compr->id));
1136}
1137#else
1138static inline int snd_compress_proc_init(struct snd_compr *compr)
1139{
1140	return 0;
1141}
1142
1143static inline void snd_compress_proc_done(struct snd_compr *compr)
1144{
1145}
1146
1147static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1148{
1149}
1150#endif
1151
1152static int snd_compress_dev_free(struct snd_device *device)
1153{
1154	struct snd_compr *compr;
1155
1156	compr = device->device_data;
1157	snd_compress_proc_done(compr);
1158	put_device(&compr->dev);
1159	return 0;
1160}
1161
1162/**
1163 * snd_compress_new: create new compress device
1164 * @card: sound card pointer
1165 * @device: device number
1166 * @dirn: device direction, should be of type enum snd_compr_direction
1167 * @id: ID string
1168 * @compr: compress device pointer
1169 *
1170 * Return: zero if successful, or a negative error code
1171 */
1172int snd_compress_new(struct snd_card *card, int device,
1173			int dirn, const char *id, struct snd_compr *compr)
1174{
1175	static const struct snd_device_ops ops = {
1176		.dev_free = snd_compress_dev_free,
1177		.dev_register = snd_compress_dev_register,
1178		.dev_disconnect = snd_compress_dev_disconnect,
1179	};
1180	int ret;
1181
 
 
 
 
 
1182	compr->card = card;
1183	compr->device = device;
1184	compr->direction = dirn;
1185	mutex_init(&compr->lock);
1186
1187	snd_compress_set_id(compr, id);
1188
1189	snd_device_initialize(&compr->dev, card);
1190	dev_set_name(&compr->dev, "comprC%iD%i", card->number, device);
 
 
1191
1192	ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1193	if (ret == 0)
1194		snd_compress_proc_init(compr);
 
 
1195
1196	return ret;
1197}
1198EXPORT_SYMBOL_GPL(snd_compress_new);
1199
1200MODULE_DESCRIPTION("ALSA Compressed offload framework");
1201MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1202MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  compress_core.c - compress offload core
   4 *
   5 *  Copyright (C) 2011 Intel Corporation
   6 *  Authors:	Vinod Koul <vinod.koul@linux.intel.com>
   7 *		Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
   8 *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 */
  12#define FORMAT(fmt) "%s: %d: " fmt, __func__, __LINE__
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " FORMAT(fmt)
  14
  15#include <linux/file.h>
  16#include <linux/fs.h>
  17#include <linux/list.h>
  18#include <linux/math64.h>
  19#include <linux/mm.h>
  20#include <linux/mutex.h>
  21#include <linux/poll.h>
  22#include <linux/slab.h>
  23#include <linux/sched.h>
  24#include <linux/types.h>
  25#include <linux/uio.h>
  26#include <linux/uaccess.h>
  27#include <linux/dma-buf.h>
  28#include <linux/module.h>
  29#include <linux/compat.h>
  30#include <sound/core.h>
  31#include <sound/initval.h>
  32#include <sound/info.h>
  33#include <sound/compress_params.h>
  34#include <sound/compress_offload.h>
  35#include <sound/compress_driver.h>
  36
  37/* struct snd_compr_codec_caps overflows the ioctl bit size for some
  38 * architectures, so we need to disable the relevant ioctls.
  39 */
  40#if _IOC_SIZEBITS < 14
  41#define COMPR_CODEC_CAPS_OVERFLOW
  42#endif
  43
  44/* TODO:
  45 * - add substream support for multiple devices in case of
  46 *	SND_DYNAMIC_MINORS is not used
  47 * - Multiple node representation
  48 *	driver should be able to register multiple nodes
  49 */
  50
  51struct snd_compr_file {
  52	unsigned long caps;
  53	struct snd_compr_stream stream;
  54};
  55
  56static void error_delayed_work(struct work_struct *work);
  57
  58#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
  59static void snd_compr_task_free_all(struct snd_compr_stream *stream);
  60#else
  61static inline void snd_compr_task_free_all(struct snd_compr_stream *stream) { }
  62#endif
  63
  64/*
  65 * a note on stream states used:
  66 * we use following states in the compressed core
  67 * SNDRV_PCM_STATE_OPEN: When stream has been opened.
  68 * SNDRV_PCM_STATE_SETUP: When stream has been initialized. This is done by
  69 *	calling SNDRV_COMPRESS_SET_PARAMS. Running streams will come to this
  70 *	state at stop by calling SNDRV_COMPRESS_STOP, or at end of drain.
  71 * SNDRV_PCM_STATE_PREPARED: When a stream has been written to (for
  72 *	playback only). User after setting up stream writes the data buffer
  73 *	before starting the stream.
  74 * SNDRV_PCM_STATE_RUNNING: When stream has been started and is
  75 *	decoding/encoding and rendering/capturing data.
  76 * SNDRV_PCM_STATE_DRAINING: When stream is draining current data. This is done
  77 *	by calling SNDRV_COMPRESS_DRAIN.
  78 * SNDRV_PCM_STATE_PAUSED: When stream is paused. This is done by calling
  79 *	SNDRV_COMPRESS_PAUSE. It can be stopped or resumed by calling
  80 *	SNDRV_COMPRESS_STOP or SNDRV_COMPRESS_RESUME respectively.
  81 */
  82static int snd_compr_open(struct inode *inode, struct file *f)
  83{
  84	struct snd_compr *compr;
  85	struct snd_compr_file *data;
  86	struct snd_compr_runtime *runtime;
  87	enum snd_compr_direction dirn;
  88	int maj = imajor(inode);
  89	int ret;
  90
  91	if ((f->f_flags & O_ACCMODE) == O_WRONLY)
  92		dirn = SND_COMPRESS_PLAYBACK;
  93	else if ((f->f_flags & O_ACCMODE) == O_RDONLY)
  94		dirn = SND_COMPRESS_CAPTURE;
  95	else if ((f->f_flags & O_ACCMODE) == O_RDWR)
  96		dirn = SND_COMPRESS_ACCEL;
  97	else
  98		return -EINVAL;
  99
 100	if (maj == snd_major)
 101		compr = snd_lookup_minor_data(iminor(inode),
 102					SNDRV_DEVICE_TYPE_COMPRESS);
 103	else
 104		return -EBADFD;
 105
 106	if (compr == NULL) {
 107		pr_err("no device data!!!\n");
 108		return -ENODEV;
 109	}
 110
 111	if (dirn != compr->direction) {
 112		pr_err("this device doesn't support this direction\n");
 113		snd_card_unref(compr->card);
 114		return -EINVAL;
 115	}
 116
 117	data = kzalloc(sizeof(*data), GFP_KERNEL);
 118	if (!data) {
 119		snd_card_unref(compr->card);
 120		return -ENOMEM;
 121	}
 122
 123	INIT_DELAYED_WORK(&data->stream.error_work, error_delayed_work);
 124
 125	data->stream.ops = compr->ops;
 126	data->stream.direction = dirn;
 127	data->stream.private_data = compr->private_data;
 128	data->stream.device = compr;
 129	runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
 130	if (!runtime) {
 131		kfree(data);
 132		snd_card_unref(compr->card);
 133		return -ENOMEM;
 134	}
 135	runtime->state = SNDRV_PCM_STATE_OPEN;
 136	init_waitqueue_head(&runtime->sleep);
 137#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 138	INIT_LIST_HEAD(&runtime->tasks);
 139#endif
 140	data->stream.runtime = runtime;
 141	f->private_data = (void *)data;
 142	scoped_guard(mutex, &compr->lock)
 143		ret = compr->ops->open(&data->stream);
 
 144	if (ret) {
 145		kfree(runtime);
 146		kfree(data);
 147	}
 148	snd_card_unref(compr->card);
 149	return ret;
 150}
 151
 152static int snd_compr_free(struct inode *inode, struct file *f)
 153{
 154	struct snd_compr_file *data = f->private_data;
 155	struct snd_compr_runtime *runtime = data->stream.runtime;
 156
 157	cancel_delayed_work_sync(&data->stream.error_work);
 158
 159	switch (runtime->state) {
 160	case SNDRV_PCM_STATE_RUNNING:
 161	case SNDRV_PCM_STATE_DRAINING:
 162	case SNDRV_PCM_STATE_PAUSED:
 163		data->stream.ops->trigger(&data->stream, SNDRV_PCM_TRIGGER_STOP);
 164		break;
 165	default:
 166		break;
 167	}
 168
 169	snd_compr_task_free_all(&data->stream);
 170
 171	data->stream.ops->free(&data->stream);
 172	if (!data->stream.runtime->dma_buffer_p)
 173		kfree(data->stream.runtime->buffer);
 174	kfree(data->stream.runtime);
 175	kfree(data);
 176	return 0;
 177}
 178
 179static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
 180		struct snd_compr_tstamp *tstamp)
 181{
 182	if (!stream->ops->pointer)
 183		return -ENOTSUPP;
 184	stream->ops->pointer(stream, tstamp);
 185	pr_debug("dsp consumed till %d total %d bytes\n",
 186		tstamp->byte_offset, tstamp->copied_total);
 187	if (stream->direction == SND_COMPRESS_PLAYBACK)
 188		stream->runtime->total_bytes_transferred = tstamp->copied_total;
 189	else
 190		stream->runtime->total_bytes_available = tstamp->copied_total;
 191	return 0;
 192}
 193
 194static size_t snd_compr_calc_avail(struct snd_compr_stream *stream,
 195		struct snd_compr_avail *avail)
 196{
 197	memset(avail, 0, sizeof(*avail));
 198	snd_compr_update_tstamp(stream, &avail->tstamp);
 199	/* Still need to return avail even if tstamp can't be filled in */
 200
 201	if (stream->runtime->total_bytes_available == 0 &&
 202			stream->runtime->state == SNDRV_PCM_STATE_SETUP &&
 203			stream->direction == SND_COMPRESS_PLAYBACK) {
 204		pr_debug("detected init and someone forgot to do a write\n");
 205		return stream->runtime->buffer_size;
 206	}
 207	pr_debug("app wrote %lld, DSP consumed %lld\n",
 208			stream->runtime->total_bytes_available,
 209			stream->runtime->total_bytes_transferred);
 210	if (stream->runtime->total_bytes_available ==
 211				stream->runtime->total_bytes_transferred) {
 212		if (stream->direction == SND_COMPRESS_PLAYBACK) {
 213			pr_debug("both pointers are same, returning full avail\n");
 214			return stream->runtime->buffer_size;
 215		} else {
 216			pr_debug("both pointers are same, returning no avail\n");
 217			return 0;
 218		}
 219	}
 220
 221	avail->avail = stream->runtime->total_bytes_available -
 222			stream->runtime->total_bytes_transferred;
 223	if (stream->direction == SND_COMPRESS_PLAYBACK)
 224		avail->avail = stream->runtime->buffer_size - avail->avail;
 225
 226	pr_debug("ret avail as %lld\n", avail->avail);
 227	return avail->avail;
 228}
 229
 230static inline size_t snd_compr_get_avail(struct snd_compr_stream *stream)
 231{
 232	struct snd_compr_avail avail;
 233
 234	return snd_compr_calc_avail(stream, &avail);
 235}
 236
 237static int
 238snd_compr_ioctl_avail(struct snd_compr_stream *stream, unsigned long arg)
 239{
 240	struct snd_compr_avail ioctl_avail;
 241	size_t avail;
 242
 243	if (stream->direction == SND_COMPRESS_ACCEL)
 244		return -EBADFD;
 245
 246	avail = snd_compr_calc_avail(stream, &ioctl_avail);
 247	ioctl_avail.avail = avail;
 248
 249	switch (stream->runtime->state) {
 250	case SNDRV_PCM_STATE_OPEN:
 251		return -EBADFD;
 252	case SNDRV_PCM_STATE_XRUN:
 253		return -EPIPE;
 254	default:
 255		break;
 256	}
 257
 258	if (copy_to_user((__u64 __user *)arg,
 259				&ioctl_avail, sizeof(ioctl_avail)))
 260		return -EFAULT;
 261	return 0;
 262}
 263
 264static int snd_compr_write_data(struct snd_compr_stream *stream,
 265	       const char __user *buf, size_t count)
 266{
 267	void *dstn;
 268	size_t copy;
 269	struct snd_compr_runtime *runtime = stream->runtime;
 270	/* 64-bit Modulus */
 271	u64 app_pointer = div64_u64(runtime->total_bytes_available,
 272				    runtime->buffer_size);
 273	app_pointer = runtime->total_bytes_available -
 274		      (app_pointer * runtime->buffer_size);
 275
 276	dstn = runtime->buffer + app_pointer;
 277	pr_debug("copying %ld at %lld\n",
 278			(unsigned long)count, app_pointer);
 279	if (count < runtime->buffer_size - app_pointer) {
 280		if (copy_from_user(dstn, buf, count))
 281			return -EFAULT;
 282	} else {
 283		copy = runtime->buffer_size - app_pointer;
 284		if (copy_from_user(dstn, buf, copy))
 285			return -EFAULT;
 286		if (copy_from_user(runtime->buffer, buf + copy, count - copy))
 287			return -EFAULT;
 288	}
 289	/* if DSP cares, let it know data has been written */
 290	if (stream->ops->ack)
 291		stream->ops->ack(stream, count);
 292	return count;
 293}
 294
 295static ssize_t snd_compr_write(struct file *f, const char __user *buf,
 296		size_t count, loff_t *offset)
 297{
 298	struct snd_compr_file *data = f->private_data;
 299	struct snd_compr_stream *stream;
 300	size_t avail;
 301	int retval;
 302
 303	if (snd_BUG_ON(!data))
 304		return -EFAULT;
 305
 306	stream = &data->stream;
 307	if (stream->direction == SND_COMPRESS_ACCEL)
 308		return -EBADFD;
 309	guard(mutex)(&stream->device->lock);
 310	/* write is allowed when stream is running or has been setup */
 311	switch (stream->runtime->state) {
 312	case SNDRV_PCM_STATE_SETUP:
 313	case SNDRV_PCM_STATE_PREPARED:
 314	case SNDRV_PCM_STATE_RUNNING:
 315		break;
 316	default:
 
 317		return -EBADFD;
 318	}
 319
 320	avail = snd_compr_get_avail(stream);
 321	pr_debug("avail returned %ld\n", (unsigned long)avail);
 322	/* calculate how much we can write to buffer */
 323	if (avail > count)
 324		avail = count;
 325
 326	if (stream->ops->copy) {
 327		char __user* cbuf = (char __user*)buf;
 328		retval = stream->ops->copy(stream, cbuf, avail);
 329	} else {
 330		retval = snd_compr_write_data(stream, buf, avail);
 331	}
 332	if (retval > 0)
 333		stream->runtime->total_bytes_available += retval;
 334
 335	/* while initiating the stream, write should be called before START
 336	 * call, so in setup move state */
 337	if (stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
 338		stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
 339		pr_debug("stream prepared, Houston we are good to go\n");
 340	}
 341
 
 342	return retval;
 343}
 344
 345
 346static ssize_t snd_compr_read(struct file *f, char __user *buf,
 347		size_t count, loff_t *offset)
 348{
 349	struct snd_compr_file *data = f->private_data;
 350	struct snd_compr_stream *stream;
 351	size_t avail;
 352	int retval;
 353
 354	if (snd_BUG_ON(!data))
 355		return -EFAULT;
 356
 357	stream = &data->stream;
 358	if (stream->direction == SND_COMPRESS_ACCEL)
 359		return -EBADFD;
 360	guard(mutex)(&stream->device->lock);
 361
 362	/* read is allowed when stream is running, paused, draining and setup
 363	 * (yes setup is state which we transition to after stop, so if user
 364	 * wants to read data after stop we allow that)
 365	 */
 366	switch (stream->runtime->state) {
 367	case SNDRV_PCM_STATE_OPEN:
 368	case SNDRV_PCM_STATE_PREPARED:
 369	case SNDRV_PCM_STATE_SUSPENDED:
 370	case SNDRV_PCM_STATE_DISCONNECTED:
 371		return -EBADFD;
 
 372	case SNDRV_PCM_STATE_XRUN:
 373		return -EPIPE;
 
 374	}
 375
 376	avail = snd_compr_get_avail(stream);
 377	pr_debug("avail returned %ld\n", (unsigned long)avail);
 378	/* calculate how much we can read from buffer */
 379	if (avail > count)
 380		avail = count;
 381
 382	if (stream->ops->copy)
 383		retval = stream->ops->copy(stream, buf, avail);
 384	else
 385		return -ENXIO;
 
 
 386	if (retval > 0)
 387		stream->runtime->total_bytes_transferred += retval;
 388
 
 
 389	return retval;
 390}
 391
 392static int snd_compr_mmap(struct file *f, struct vm_area_struct *vma)
 393{
 394	return -ENXIO;
 395}
 396
 397static __poll_t snd_compr_get_poll(struct snd_compr_stream *stream)
 398{
 399	if (stream->direction == SND_COMPRESS_PLAYBACK)
 400		return EPOLLOUT | EPOLLWRNORM;
 401	else
 402		return EPOLLIN | EPOLLRDNORM;
 403}
 404
 405static __poll_t snd_compr_poll(struct file *f, poll_table *wait)
 406{
 407	struct snd_compr_file *data = f->private_data;
 408	struct snd_compr_stream *stream;
 409	struct snd_compr_runtime *runtime;
 410	size_t avail;
 411	__poll_t retval = 0;
 412
 413	if (snd_BUG_ON(!data))
 414		return EPOLLERR;
 415
 416	stream = &data->stream;
 417	runtime = stream->runtime;
 418
 419	guard(mutex)(&stream->device->lock);
 420
 421	switch (runtime->state) {
 422	case SNDRV_PCM_STATE_OPEN:
 423	case SNDRV_PCM_STATE_XRUN:
 424		return snd_compr_get_poll(stream) | EPOLLERR;
 
 425	default:
 426		break;
 427	}
 428
 429	poll_wait(f, &runtime->sleep, wait);
 430
 431#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 432	if (stream->direction == SND_COMPRESS_ACCEL) {
 433		struct snd_compr_task_runtime *task;
 434		if (runtime->fragments > runtime->active_tasks)
 435			retval |= EPOLLOUT | EPOLLWRNORM;
 436		task = list_first_entry_or_null(&runtime->tasks,
 437						struct snd_compr_task_runtime,
 438						list);
 439		if (task && task->state == SND_COMPRESS_TASK_STATE_FINISHED)
 440			retval |= EPOLLIN | EPOLLRDNORM;
 441		return retval;
 442	}
 443#endif
 444
 445	avail = snd_compr_get_avail(stream);
 446	pr_debug("avail is %ld\n", (unsigned long)avail);
 447	/* check if we have at least one fragment to fill */
 448	switch (runtime->state) {
 449	case SNDRV_PCM_STATE_DRAINING:
 450		/* stream has been woken up after drain is complete
 451		 * draining done so set stream state to stopped
 452		 */
 453		retval = snd_compr_get_poll(stream);
 454		runtime->state = SNDRV_PCM_STATE_SETUP;
 455		break;
 456	case SNDRV_PCM_STATE_RUNNING:
 457	case SNDRV_PCM_STATE_PREPARED:
 458	case SNDRV_PCM_STATE_PAUSED:
 459		if (avail >= runtime->fragment_size)
 460			retval = snd_compr_get_poll(stream);
 461		break;
 462	default:
 463		return snd_compr_get_poll(stream) | EPOLLERR;
 
 464	}
 465
 
 466	return retval;
 467}
 468
 469static int
 470snd_compr_get_caps(struct snd_compr_stream *stream, unsigned long arg)
 471{
 472	int retval;
 473	struct snd_compr_caps caps;
 474
 475	if (!stream->ops->get_caps)
 476		return -ENXIO;
 477
 478	memset(&caps, 0, sizeof(caps));
 479	retval = stream->ops->get_caps(stream, &caps);
 480	if (retval)
 481		goto out;
 482	if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
 483		retval = -EFAULT;
 484out:
 485	return retval;
 486}
 487
 488#ifndef COMPR_CODEC_CAPS_OVERFLOW
 489static int
 490snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 491{
 492	int retval;
 493	struct snd_compr_codec_caps *caps __free(kfree) = NULL;
 494
 495	if (!stream->ops->get_codec_caps)
 496		return -ENXIO;
 497
 498	caps = kzalloc(sizeof(*caps), GFP_KERNEL);
 499	if (!caps)
 500		return -ENOMEM;
 501
 502	retval = stream->ops->get_codec_caps(stream, caps);
 503	if (retval)
 504		return retval;
 505	if (copy_to_user((void __user *)arg, caps, sizeof(*caps)))
 506		return -EFAULT;
 
 
 
 507	return retval;
 508}
 509#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 510
 511int snd_compr_malloc_pages(struct snd_compr_stream *stream, size_t size)
 512{
 513	struct snd_dma_buffer *dmab;
 514	int ret;
 515
 516	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 517		return -EINVAL;
 518	dmab = kzalloc(sizeof(*dmab), GFP_KERNEL);
 519	if (!dmab)
 520		return -ENOMEM;
 521	dmab->dev = stream->dma_buffer.dev;
 522	ret = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev, size, dmab);
 523	if (ret < 0) {
 524		kfree(dmab);
 525		return ret;
 526	}
 527
 528	snd_compr_set_runtime_buffer(stream, dmab);
 529	stream->runtime->dma_bytes = size;
 530	return 1;
 531}
 532EXPORT_SYMBOL(snd_compr_malloc_pages);
 533
 534int snd_compr_free_pages(struct snd_compr_stream *stream)
 535{
 536	struct snd_compr_runtime *runtime;
 537
 538	if (snd_BUG_ON(!(stream) || !(stream)->runtime))
 539		return -EINVAL;
 540	runtime = stream->runtime;
 541	if (runtime->dma_area == NULL)
 542		return 0;
 543	if (runtime->dma_buffer_p != &stream->dma_buffer) {
 544		/* It's a newly allocated buffer. Release it now. */
 545		snd_dma_free_pages(runtime->dma_buffer_p);
 546		kfree(runtime->dma_buffer_p);
 547	}
 548
 549	snd_compr_set_runtime_buffer(stream, NULL);
 550	return 0;
 551}
 552EXPORT_SYMBOL(snd_compr_free_pages);
 553
 554/* revisit this with snd_pcm_preallocate_xxx */
 555static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
 556		struct snd_compr_params *params)
 557{
 558	unsigned int buffer_size;
 559	void *buffer = NULL;
 560
 561	if (stream->direction == SND_COMPRESS_ACCEL)
 562		goto params;
 563
 564	buffer_size = params->buffer.fragment_size * params->buffer.fragments;
 565	if (stream->ops->copy) {
 566		buffer = NULL;
 567		/* if copy is defined the driver will be required to copy
 568		 * the data from core
 569		 */
 570	} else {
 571		if (stream->runtime->dma_buffer_p) {
 572
 573			if (buffer_size > stream->runtime->dma_buffer_p->bytes)
 574				dev_err(stream->device->dev,
 575						"Not enough DMA buffer");
 576			else
 577				buffer = stream->runtime->dma_buffer_p->area;
 578
 579		} else {
 580			buffer = kmalloc(buffer_size, GFP_KERNEL);
 581		}
 582
 583		if (!buffer)
 584			return -ENOMEM;
 585	}
 586
 
 587	stream->runtime->buffer = buffer;
 588	stream->runtime->buffer_size = buffer_size;
 589params:
 590	stream->runtime->fragment_size = params->buffer.fragment_size;
 591	stream->runtime->fragments = params->buffer.fragments;
 592	return 0;
 593}
 594
 595static int
 596snd_compress_check_input(struct snd_compr_stream *stream, struct snd_compr_params *params)
 597{
 598	u32 max_fragments;
 599
 600	/* first let's check the buffer parameter's */
 601	if (params->buffer.fragment_size == 0)
 602		return -EINVAL;
 603
 604	if (stream->direction == SND_COMPRESS_ACCEL)
 605		max_fragments = 64;			/* safe value */
 606	else
 607		max_fragments = U32_MAX / params->buffer.fragment_size;
 608
 609	if (params->buffer.fragments > max_fragments ||
 610	    params->buffer.fragments == 0)
 611		return -EINVAL;
 612
 613	/* now codec parameters */
 614	if (params->codec.id == 0 || params->codec.id > SND_AUDIOCODEC_MAX)
 615		return -EINVAL;
 616
 617	if (params->codec.ch_in == 0 || params->codec.ch_out == 0)
 618		return -EINVAL;
 619
 620	return 0;
 621}
 622
 623static int
 624snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
 625{
 626	struct snd_compr_params *params __free(kfree) = NULL;
 627	int retval;
 628
 629	if (stream->runtime->state == SNDRV_PCM_STATE_OPEN || stream->next_track) {
 630		/*
 631		 * we should allow parameter change only when stream has been
 632		 * opened not in other cases
 633		 */
 634		params = memdup_user((void __user *)arg, sizeof(*params));
 635		if (IS_ERR(params))
 636			return PTR_ERR(params);
 637
 638		retval = snd_compress_check_input(stream, params);
 639		if (retval)
 640			return retval;
 641
 642		retval = snd_compr_allocate_buffer(stream, params);
 643		if (retval)
 644			return -ENOMEM;
 
 
 645
 646		retval = stream->ops->set_params(stream, params);
 647		if (retval)
 648			return retval;
 649
 650		if (stream->next_track)
 651			return retval;
 652
 653		stream->metadata_set = false;
 654		stream->next_track = false;
 655
 656		stream->runtime->state = SNDRV_PCM_STATE_SETUP;
 657	} else {
 658		return -EPERM;
 659	}
 
 
 660	return retval;
 661}
 662
 663static int
 664snd_compr_get_params(struct snd_compr_stream *stream, unsigned long arg)
 665{
 666	struct snd_codec *params __free(kfree) = NULL;
 667	int retval;
 668
 669	if (!stream->ops->get_params)
 670		return -EBADFD;
 671
 672	params = kzalloc(sizeof(*params), GFP_KERNEL);
 673	if (!params)
 674		return -ENOMEM;
 675	retval = stream->ops->get_params(stream, params);
 676	if (retval)
 677		return retval;
 678	if (copy_to_user((char __user *)arg, params, sizeof(*params)))
 679		return -EFAULT;
 
 
 
 680	return retval;
 681}
 682
 683static int
 684snd_compr_get_metadata(struct snd_compr_stream *stream, unsigned long arg)
 685{
 686	struct snd_compr_metadata metadata;
 687	int retval;
 688
 689	if (!stream->ops->get_metadata)
 690		return -ENXIO;
 691
 692	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 693		return -EFAULT;
 694
 695	retval = stream->ops->get_metadata(stream, &metadata);
 696	if (retval != 0)
 697		return retval;
 698
 699	if (copy_to_user((void __user *)arg, &metadata, sizeof(metadata)))
 700		return -EFAULT;
 701
 702	return 0;
 703}
 704
 705static int
 706snd_compr_set_metadata(struct snd_compr_stream *stream, unsigned long arg)
 707{
 708	struct snd_compr_metadata metadata;
 709	int retval;
 710
 711	if (!stream->ops->set_metadata)
 712		return -ENXIO;
 713	/*
 714	* we should allow parameter change only when stream has been
 715	* opened not in other cases
 716	*/
 717	if (copy_from_user(&metadata, (void __user *)arg, sizeof(metadata)))
 718		return -EFAULT;
 719
 720	retval = stream->ops->set_metadata(stream, &metadata);
 721	stream->metadata_set = true;
 722
 723	return retval;
 724}
 725
 726static inline int
 727snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
 728{
 729	struct snd_compr_tstamp tstamp = {0};
 730	int ret;
 731
 732	ret = snd_compr_update_tstamp(stream, &tstamp);
 733	if (ret == 0)
 734		ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
 735			&tstamp, sizeof(tstamp)) ? -EFAULT : 0;
 736	return ret;
 737}
 738
 739static int snd_compr_pause(struct snd_compr_stream *stream)
 740{
 741	int retval;
 742
 743	switch (stream->runtime->state) {
 744	case SNDRV_PCM_STATE_RUNNING:
 745		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 746		if (!retval)
 747			stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
 748		break;
 749	case SNDRV_PCM_STATE_DRAINING:
 750		if (!stream->device->use_pause_in_draining)
 751			return -EPERM;
 752		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
 753		if (!retval)
 754			stream->pause_in_draining = true;
 755		break;
 756	default:
 757		return -EPERM;
 758	}
 759	return retval;
 760}
 761
 762static int snd_compr_resume(struct snd_compr_stream *stream)
 763{
 764	int retval;
 765
 766	switch (stream->runtime->state) {
 767	case SNDRV_PCM_STATE_PAUSED:
 768		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 769		if (!retval)
 770			stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 771		break;
 772	case SNDRV_PCM_STATE_DRAINING:
 773		if (!stream->pause_in_draining)
 774			return -EPERM;
 775		retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
 776		if (!retval)
 777			stream->pause_in_draining = false;
 778		break;
 779	default:
 780		return -EPERM;
 781	}
 782	return retval;
 783}
 784
 785static int snd_compr_start(struct snd_compr_stream *stream)
 786{
 787	int retval;
 788
 789	switch (stream->runtime->state) {
 790	case SNDRV_PCM_STATE_SETUP:
 791		if (stream->direction != SND_COMPRESS_CAPTURE)
 792			return -EPERM;
 793		break;
 794	case SNDRV_PCM_STATE_PREPARED:
 795		break;
 796	default:
 797		return -EPERM;
 798	}
 799
 800	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
 801	if (!retval)
 802		stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
 803	return retval;
 804}
 805
 806static int snd_compr_stop(struct snd_compr_stream *stream)
 807{
 808	int retval;
 809
 810	switch (stream->runtime->state) {
 811	case SNDRV_PCM_STATE_OPEN:
 812	case SNDRV_PCM_STATE_SETUP:
 813	case SNDRV_PCM_STATE_PREPARED:
 814		return -EPERM;
 815	default:
 816		break;
 817	}
 818
 819	retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 820	if (!retval) {
 821		/* clear flags and stop any drain wait */
 822		stream->partial_drain = false;
 823		stream->metadata_set = false;
 824		stream->pause_in_draining = false;
 825		snd_compr_drain_notify(stream);
 826		stream->runtime->total_bytes_available = 0;
 827		stream->runtime->total_bytes_transferred = 0;
 828	}
 829	return retval;
 830}
 831
 832static void error_delayed_work(struct work_struct *work)
 833{
 834	struct snd_compr_stream *stream;
 835
 836	stream = container_of(work, struct snd_compr_stream, error_work.work);
 837
 838	guard(mutex)(&stream->device->lock);
 839
 840	stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
 841	wake_up(&stream->runtime->sleep);
 
 
 842}
 843
 844/**
 845 * snd_compr_stop_error: Report a fatal error on a stream
 846 * @stream: pointer to stream
 847 * @state: state to transition the stream to
 848 *
 849 * Stop the stream and set its state.
 850 *
 851 * Should be called with compressed device lock held.
 852 *
 853 * Return: zero if successful, or a negative error code
 854 */
 855int snd_compr_stop_error(struct snd_compr_stream *stream,
 856			 snd_pcm_state_t state)
 857{
 858	if (stream->runtime->state == state)
 859		return 0;
 860
 861	stream->runtime->state = state;
 862
 863	pr_debug("Changing state to: %d\n", state);
 864
 865	queue_delayed_work(system_power_efficient_wq, &stream->error_work, 0);
 866
 867	return 0;
 868}
 869EXPORT_SYMBOL_GPL(snd_compr_stop_error);
 870
 871static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
 872{
 873	int ret;
 874
 875	/*
 876	 * We are called with lock held. So drop the lock while we wait for
 877	 * drain complete notification from the driver
 878	 *
 879	 * It is expected that driver will notify the drain completion and then
 880	 * stream will be moved to SETUP state, even if draining resulted in an
 881	 * error. We can trigger next track after this.
 882	 */
 883	stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
 884	mutex_unlock(&stream->device->lock);
 885
 886	/* we wait for drain to complete here, drain can return when
 887	 * interruption occurred, wait returned error or success.
 888	 * For the first two cases we don't do anything different here and
 889	 * return after waking up
 890	 */
 891
 892	ret = wait_event_interruptible(stream->runtime->sleep,
 893			(stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
 894	if (ret == -ERESTARTSYS)
 895		pr_debug("wait aborted by a signal\n");
 896	else if (ret)
 897		pr_debug("wait for drain failed with %d\n", ret);
 898
 899
 900	wake_up(&stream->runtime->sleep);
 901	mutex_lock(&stream->device->lock);
 902
 903	return ret;
 904}
 905
 906static int snd_compr_drain(struct snd_compr_stream *stream)
 907{
 908	int retval;
 909
 910	switch (stream->runtime->state) {
 911	case SNDRV_PCM_STATE_OPEN:
 912	case SNDRV_PCM_STATE_SETUP:
 913	case SNDRV_PCM_STATE_PREPARED:
 914	case SNDRV_PCM_STATE_PAUSED:
 915		return -EPERM;
 916	case SNDRV_PCM_STATE_XRUN:
 917		return -EPIPE;
 918	default:
 919		break;
 920	}
 921
 922	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
 923	if (retval) {
 924		pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
 925		wake_up(&stream->runtime->sleep);
 926		return retval;
 927	}
 928
 929	return snd_compress_wait_for_drain(stream);
 930}
 931
 932static int snd_compr_next_track(struct snd_compr_stream *stream)
 933{
 934	int retval;
 935
 936	/* only a running stream can transition to next track */
 937	if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
 938		return -EPERM;
 939
 940	/* next track doesn't have any meaning for capture streams */
 941	if (stream->direction == SND_COMPRESS_CAPTURE)
 942		return -EPERM;
 943
 944	/* you can signal next track if this is intended to be a gapless stream
 945	 * and current track metadata is set
 946	 */
 947	if (stream->metadata_set == false)
 948		return -EPERM;
 949
 950	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_NEXT_TRACK);
 951	if (retval != 0)
 952		return retval;
 953	stream->metadata_set = false;
 954	stream->next_track = true;
 955	return 0;
 956}
 957
 958static int snd_compr_partial_drain(struct snd_compr_stream *stream)
 959{
 960	int retval;
 961
 962	switch (stream->runtime->state) {
 963	case SNDRV_PCM_STATE_OPEN:
 964	case SNDRV_PCM_STATE_SETUP:
 965	case SNDRV_PCM_STATE_PREPARED:
 966	case SNDRV_PCM_STATE_PAUSED:
 967		return -EPERM;
 968	case SNDRV_PCM_STATE_XRUN:
 969		return -EPIPE;
 970	default:
 971		break;
 972	}
 973
 974	/* partial drain doesn't have any meaning for capture streams */
 975	if (stream->direction == SND_COMPRESS_CAPTURE)
 976		return -EPERM;
 977
 978	/* stream can be drained only when next track has been signalled */
 979	if (stream->next_track == false)
 980		return -EPERM;
 981
 982	stream->partial_drain = true;
 983	retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
 984	if (retval) {
 985		pr_debug("Partial drain returned failure\n");
 986		wake_up(&stream->runtime->sleep);
 987		return retval;
 988	}
 989
 990	stream->next_track = false;
 991	return snd_compress_wait_for_drain(stream);
 992}
 993
 994#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
 995
 996static struct snd_compr_task_runtime *
 997snd_compr_find_task(struct snd_compr_stream *stream, __u64 seqno)
 998{
 999	struct snd_compr_task_runtime *task;
1000
1001	list_for_each_entry(task, &stream->runtime->tasks, list) {
1002		if (task->seqno == seqno)
1003			return task;
1004	}
1005	return NULL;
1006}
1007
1008static void snd_compr_task_free(struct snd_compr_task_runtime *task)
1009{
1010	if (task->output)
1011		dma_buf_put(task->output);
1012	if (task->input)
1013		dma_buf_put(task->input);
1014	kfree(task);
1015}
1016
1017static u64 snd_compr_seqno_next(struct snd_compr_stream *stream)
1018{
1019	u64 seqno = ++stream->runtime->task_seqno;
1020	if (seqno == 0)
1021		seqno = ++stream->runtime->task_seqno;
1022	return seqno;
1023}
1024
1025static int snd_compr_task_new(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1026{
1027	struct snd_compr_task_runtime *task;
1028	int retval, fd_i, fd_o;
1029
1030	if (stream->runtime->total_tasks >= stream->runtime->fragments)
1031		return -EBUSY;
1032	if (utask->origin_seqno != 0 || utask->input_size != 0)
1033		return -EINVAL;
1034	task = kzalloc(sizeof(*task), GFP_KERNEL);
1035	if (task == NULL)
1036		return -ENOMEM;
1037	task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1038	task->input_size = utask->input_size;
1039	retval = stream->ops->task_create(stream, task);
1040	if (retval < 0)
1041		goto cleanup;
1042	/* similar functionality as in dma_buf_fd(), but ensure that both
1043	   file descriptors are allocated before fd_install() */
1044	if (!task->input || !task->input->file || !task->output || !task->output->file) {
1045		retval = -EINVAL;
1046		goto cleanup;
1047	}
1048	fd_i = get_unused_fd_flags(O_WRONLY|O_CLOEXEC);
1049	if (fd_i < 0)
1050		goto cleanup;
1051	fd_o = get_unused_fd_flags(O_RDONLY|O_CLOEXEC);
1052	if (fd_o < 0) {
1053		put_unused_fd(fd_i);
1054		goto cleanup;
1055	}
1056	/* keep dmabuf reference until freed with task free ioctl */
1057	get_dma_buf(task->input);
1058	get_dma_buf(task->output);
1059	fd_install(fd_i, task->input->file);
1060	fd_install(fd_o, task->output->file);
1061	utask->input_fd = fd_i;
1062	utask->output_fd = fd_o;
1063	list_add_tail(&task->list, &stream->runtime->tasks);
1064	stream->runtime->total_tasks++;
1065	return 0;
1066cleanup:
1067	snd_compr_task_free(task);
1068	return retval;
1069}
1070
1071static int snd_compr_task_create(struct snd_compr_stream *stream, unsigned long arg)
1072{
1073	struct snd_compr_task *task __free(kfree) = NULL;
1074	int retval;
1075
1076	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1077		return -EPERM;
1078	task = memdup_user((void __user *)arg, sizeof(*task));
1079	if (IS_ERR(task))
1080		return PTR_ERR(task);
1081	retval = snd_compr_task_new(stream, task);
1082	if (retval >= 0)
1083		if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1084			retval = -EFAULT;
1085	return retval;
1086}
1087
1088static int snd_compr_task_start_prepare(struct snd_compr_task_runtime *task,
1089					struct snd_compr_task *utask)
1090{
1091	if (task == NULL)
1092		return -EINVAL;
1093	if (task->state >= SND_COMPRESS_TASK_STATE_FINISHED)
1094		return -EBUSY;
1095	if (utask->input_size > task->input->size)
1096		return -EINVAL;
1097	task->flags = utask->flags;
1098	task->input_size = utask->input_size;
1099	task->state = SND_COMPRESS_TASK_STATE_IDLE;
1100	return 0;
1101}
1102
1103static int snd_compr_task_start(struct snd_compr_stream *stream, struct snd_compr_task *utask)
1104{
1105	struct snd_compr_task_runtime *task;
1106	int retval;
1107
1108	if (utask->origin_seqno > 0) {
1109		task = snd_compr_find_task(stream, utask->origin_seqno);
1110		retval = snd_compr_task_start_prepare(task, utask);
1111		if (retval < 0)
1112			return retval;
1113		task->seqno = utask->seqno = snd_compr_seqno_next(stream);
1114		utask->origin_seqno = 0;
1115		list_move_tail(&task->list, &stream->runtime->tasks);
1116	} else {
1117		task = snd_compr_find_task(stream, utask->seqno);
1118		if (task && task->state != SND_COMPRESS_TASK_STATE_IDLE)
1119			return -EBUSY;
1120		retval = snd_compr_task_start_prepare(task, utask);
1121		if (retval < 0)
1122			return retval;
1123	}
1124	retval = stream->ops->task_start(stream, task);
1125	if (retval >= 0) {
1126		task->state = SND_COMPRESS_TASK_STATE_ACTIVE;
1127		stream->runtime->active_tasks++;
1128	}
1129	return retval;
1130}
1131
1132static int snd_compr_task_start_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1133{
1134	struct snd_compr_task *task __free(kfree) = NULL;
1135	int retval;
1136
1137	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1138		return -EPERM;
1139	task = memdup_user((void __user *)arg, sizeof(*task));
1140	if (IS_ERR(task))
1141		return PTR_ERR(task);
1142	retval = snd_compr_task_start(stream, task);
1143	if (retval >= 0)
1144		if (copy_to_user((void __user *)arg, task, sizeof(*task)))
1145			retval = -EFAULT;
1146	return retval;
1147}
1148
1149static void snd_compr_task_stop_one(struct snd_compr_stream *stream,
1150					struct snd_compr_task_runtime *task)
1151{
1152	if (task->state != SND_COMPRESS_TASK_STATE_ACTIVE)
1153		return;
1154	stream->ops->task_stop(stream, task);
1155	if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1156		stream->runtime->active_tasks--;
1157	list_move_tail(&task->list, &stream->runtime->tasks);
1158	task->state = SND_COMPRESS_TASK_STATE_IDLE;
1159}
1160
1161static void snd_compr_task_free_one(struct snd_compr_stream *stream,
1162					struct snd_compr_task_runtime *task)
1163{
1164	snd_compr_task_stop_one(stream, task);
1165	stream->ops->task_free(stream, task);
1166	list_del(&task->list);
1167	snd_compr_task_free(task);
1168	stream->runtime->total_tasks--;
1169}
1170
1171static void snd_compr_task_free_all(struct snd_compr_stream *stream)
1172{
1173	struct snd_compr_task_runtime *task, *temp;
1174
1175	list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1176		snd_compr_task_free_one(stream, task);
1177}
1178
1179typedef void (*snd_compr_seq_func_t)(struct snd_compr_stream *stream,
1180					struct snd_compr_task_runtime *task);
1181
1182static int snd_compr_task_seq(struct snd_compr_stream *stream, unsigned long arg,
1183					snd_compr_seq_func_t fcn)
1184{
1185	struct snd_compr_task_runtime *task, *temp;
1186	__u64 seqno;
1187	int retval;
1188
1189	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1190		return -EPERM;
1191	retval = copy_from_user(&seqno, (__u64 __user *)arg, sizeof(seqno));
1192	if (retval)
1193		return -EFAULT;
1194	retval = 0;
1195	if (seqno == 0) {
1196		list_for_each_entry_safe_reverse(task, temp, &stream->runtime->tasks, list)
1197			fcn(stream, task);
1198	} else {
1199		task = snd_compr_find_task(stream, seqno);
1200		if (task == NULL) {
1201			retval = -EINVAL;
1202		} else {
1203			fcn(stream, task);
1204		}
1205	}
1206	return retval;
1207}
1208
1209static int snd_compr_task_status(struct snd_compr_stream *stream,
1210					struct snd_compr_task_status *status)
1211{
1212	struct snd_compr_task_runtime *task;
1213
1214	task = snd_compr_find_task(stream, status->seqno);
1215	if (task == NULL)
1216		return -EINVAL;
1217	status->input_size = task->input_size;
1218	status->output_size = task->output_size;
1219	status->state = task->state;
1220	return 0;
1221}
1222
1223static int snd_compr_task_status_ioctl(struct snd_compr_stream *stream, unsigned long arg)
1224{
1225	struct snd_compr_task_status *status __free(kfree) = NULL;
1226	int retval;
1227
1228	if (stream->runtime->state != SNDRV_PCM_STATE_SETUP)
1229		return -EPERM;
1230	status = memdup_user((void __user *)arg, sizeof(*status));
1231	if (IS_ERR(status))
1232		return PTR_ERR(status);
1233	retval = snd_compr_task_status(stream, status);
1234	if (retval >= 0)
1235		if (copy_to_user((void __user *)arg, status, sizeof(*status)))
1236			retval = -EFAULT;
1237	return retval;
1238}
1239
1240/**
1241 * snd_compr_task_finished: Notify that the task was finished
1242 * @stream: pointer to stream
1243 * @task: runtime task structure
1244 *
1245 * Set the finished task state and notify waiters.
1246 */
1247void snd_compr_task_finished(struct snd_compr_stream *stream,
1248			    struct snd_compr_task_runtime *task)
1249{
1250	guard(mutex)(&stream->device->lock);
1251	if (!snd_BUG_ON(stream->runtime->active_tasks == 0))
1252		stream->runtime->active_tasks--;
1253	task->state = SND_COMPRESS_TASK_STATE_FINISHED;
1254	wake_up(&stream->runtime->sleep);
1255}
1256EXPORT_SYMBOL_GPL(snd_compr_task_finished);
1257
1258MODULE_IMPORT_NS("DMA_BUF");
1259#endif /* CONFIG_SND_COMPRESS_ACCEL */
1260
1261static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1262{
1263	struct snd_compr_file *data = f->private_data;
1264	struct snd_compr_stream *stream;
 
1265
1266	if (snd_BUG_ON(!data))
1267		return -EFAULT;
1268
1269	stream = &data->stream;
1270
1271	guard(mutex)(&stream->device->lock);
1272	switch (_IOC_NR(cmd)) {
1273	case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
1274		return put_user(SNDRV_COMPRESS_VERSION,
1275				(int __user *)arg) ? -EFAULT : 0;
 
1276	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
1277		return snd_compr_get_caps(stream, arg);
 
1278#ifndef COMPR_CODEC_CAPS_OVERFLOW
1279	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
1280		return snd_compr_get_codec_caps(stream, arg);
 
1281#endif
1282	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
1283		return snd_compr_set_params(stream, arg);
 
1284	case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
1285		return snd_compr_get_params(stream, arg);
 
1286	case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
1287		return snd_compr_set_metadata(stream, arg);
 
1288	case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
1289		return snd_compr_get_metadata(stream, arg);
1290	}
1291
1292	if (stream->direction == SND_COMPRESS_ACCEL) {
1293#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1294		switch (_IOC_NR(cmd)) {
1295		case _IOC_NR(SNDRV_COMPRESS_TASK_CREATE):
1296			return snd_compr_task_create(stream, arg);
1297		case _IOC_NR(SNDRV_COMPRESS_TASK_FREE):
1298			return snd_compr_task_seq(stream, arg, snd_compr_task_free_one);
1299		case _IOC_NR(SNDRV_COMPRESS_TASK_START):
1300			return snd_compr_task_start_ioctl(stream, arg);
1301		case _IOC_NR(SNDRV_COMPRESS_TASK_STOP):
1302			return snd_compr_task_seq(stream, arg, snd_compr_task_stop_one);
1303		case _IOC_NR(SNDRV_COMPRESS_TASK_STATUS):
1304			return snd_compr_task_status_ioctl(stream, arg);
1305		}
1306#endif
1307		return -ENOTTY;
1308	}
1309
1310	switch (_IOC_NR(cmd)) {
1311	case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
1312		return snd_compr_tstamp(stream, arg);
 
1313	case _IOC_NR(SNDRV_COMPRESS_AVAIL):
1314		return snd_compr_ioctl_avail(stream, arg);
 
1315	case _IOC_NR(SNDRV_COMPRESS_PAUSE):
1316		return snd_compr_pause(stream);
 
1317	case _IOC_NR(SNDRV_COMPRESS_RESUME):
1318		return snd_compr_resume(stream);
 
1319	case _IOC_NR(SNDRV_COMPRESS_START):
1320		return snd_compr_start(stream);
 
1321	case _IOC_NR(SNDRV_COMPRESS_STOP):
1322		return snd_compr_stop(stream);
 
1323	case _IOC_NR(SNDRV_COMPRESS_DRAIN):
1324		return snd_compr_drain(stream);
 
1325	case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
1326		return snd_compr_partial_drain(stream);
 
1327	case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
1328		return snd_compr_next_track(stream);
 
 
1329	}
1330
1331	return -ENOTTY;
1332}
1333
1334/* support of 32bit userspace on 64bit platforms */
1335#ifdef CONFIG_COMPAT
1336static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
1337						unsigned long arg)
1338{
1339	return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1340}
1341#endif
1342
1343static const struct file_operations snd_compr_file_ops = {
1344		.owner =	THIS_MODULE,
1345		.open =		snd_compr_open,
1346		.release =	snd_compr_free,
1347		.write =	snd_compr_write,
1348		.read =		snd_compr_read,
1349		.unlocked_ioctl = snd_compr_ioctl,
1350#ifdef CONFIG_COMPAT
1351		.compat_ioctl = snd_compr_ioctl_compat,
1352#endif
1353		.mmap =		snd_compr_mmap,
1354		.poll =		snd_compr_poll,
1355};
1356
1357static int snd_compress_dev_register(struct snd_device *device)
1358{
1359	int ret;
1360	struct snd_compr *compr;
1361
1362	if (snd_BUG_ON(!device || !device->device_data))
1363		return -EBADFD;
1364	compr = device->device_data;
1365
1366	pr_debug("reg device %s, direction %d\n", compr->name,
1367			compr->direction);
1368	/* register compressed device */
1369	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
1370				  compr->card, compr->device,
1371				  &snd_compr_file_ops, compr, compr->dev);
1372	if (ret < 0) {
1373		pr_err("snd_register_device failed %d\n", ret);
1374		return ret;
1375	}
1376	return ret;
1377
1378}
1379
1380static int snd_compress_dev_disconnect(struct snd_device *device)
1381{
1382	struct snd_compr *compr;
1383
1384	compr = device->device_data;
1385	snd_unregister_device(compr->dev);
1386	return 0;
1387}
1388
1389#ifdef CONFIG_SND_VERBOSE_PROCFS
1390static void snd_compress_proc_info_read(struct snd_info_entry *entry,
1391					struct snd_info_buffer *buffer)
1392{
1393	struct snd_compr *compr = (struct snd_compr *)entry->private_data;
1394
1395	snd_iprintf(buffer, "card: %d\n", compr->card->number);
1396	snd_iprintf(buffer, "device: %d\n", compr->device);
1397	snd_iprintf(buffer, "stream: %s\n",
1398			compr->direction == SND_COMPRESS_PLAYBACK
1399				? "PLAYBACK" : "CAPTURE");
1400	snd_iprintf(buffer, "id: %s\n", compr->id);
1401}
1402
1403static int snd_compress_proc_init(struct snd_compr *compr)
1404{
1405	struct snd_info_entry *entry;
1406	char name[16];
1407
1408	sprintf(name, "compr%i", compr->device);
1409	entry = snd_info_create_card_entry(compr->card, name,
1410					   compr->card->proc_root);
1411	if (!entry)
1412		return -ENOMEM;
1413	entry->mode = S_IFDIR | 0555;
1414	compr->proc_root = entry;
1415
1416	entry = snd_info_create_card_entry(compr->card, "info",
1417					   compr->proc_root);
1418	if (entry)
1419		snd_info_set_text_ops(entry, compr,
1420				      snd_compress_proc_info_read);
1421	compr->proc_info_entry = entry;
1422
1423	return 0;
1424}
1425
1426static void snd_compress_proc_done(struct snd_compr *compr)
1427{
1428	snd_info_free_entry(compr->proc_info_entry);
1429	compr->proc_info_entry = NULL;
1430	snd_info_free_entry(compr->proc_root);
1431	compr->proc_root = NULL;
1432}
1433
1434static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1435{
1436	strscpy(compr->id, id, sizeof(compr->id));
1437}
1438#else
1439static inline int snd_compress_proc_init(struct snd_compr *compr)
1440{
1441	return 0;
1442}
1443
1444static inline void snd_compress_proc_done(struct snd_compr *compr)
1445{
1446}
1447
1448static inline void snd_compress_set_id(struct snd_compr *compr, const char *id)
1449{
1450}
1451#endif
1452
1453static int snd_compress_dev_free(struct snd_device *device)
1454{
1455	struct snd_compr *compr;
1456
1457	compr = device->device_data;
1458	snd_compress_proc_done(compr);
1459	put_device(compr->dev);
1460	return 0;
1461}
1462
1463/**
1464 * snd_compress_new: create new compress device
1465 * @card: sound card pointer
1466 * @device: device number
1467 * @dirn: device direction, should be of type enum snd_compr_direction
1468 * @id: ID string
1469 * @compr: compress device pointer
1470 *
1471 * Return: zero if successful, or a negative error code
1472 */
1473int snd_compress_new(struct snd_card *card, int device,
1474			int dirn, const char *id, struct snd_compr *compr)
1475{
1476	static const struct snd_device_ops ops = {
1477		.dev_free = snd_compress_dev_free,
1478		.dev_register = snd_compress_dev_register,
1479		.dev_disconnect = snd_compress_dev_disconnect,
1480	};
1481	int ret;
1482
1483#if !IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL)
1484	if (snd_BUG_ON(dirn == SND_COMPRESS_ACCEL))
1485		return -EINVAL;
1486#endif
1487
1488	compr->card = card;
1489	compr->device = device;
1490	compr->direction = dirn;
1491	mutex_init(&compr->lock);
1492
1493	snd_compress_set_id(compr, id);
1494
1495	ret = snd_device_alloc(&compr->dev, card);
1496	if (ret)
1497		return ret;
1498	dev_set_name(compr->dev, "comprC%iD%i", card->number, device);
1499
1500	ret = snd_device_new(card, SNDRV_DEV_COMPRESS, compr, &ops);
1501	if (ret == 0)
1502		snd_compress_proc_init(compr);
1503	else
1504		put_device(compr->dev);
1505
1506	return ret;
1507}
1508EXPORT_SYMBOL_GPL(snd_compress_new);
1509
1510MODULE_DESCRIPTION("ALSA Compressed offload framework");
1511MODULE_AUTHOR("Vinod Koul <vinod.koul@linux.intel.com>");
1512MODULE_LICENSE("GPL v2");