Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Digital Audio (PCM) abstract layer
   4 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   5 *                   Abramo Bagnara <abramo@alsa-project.org>
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/sched/signal.h>
  10#include <linux/time.h>
  11#include <linux/math64.h>
  12#include <linux/export.h>
  13#include <sound/core.h>
  14#include <sound/control.h>
  15#include <sound/tlv.h>
  16#include <sound/info.h>
  17#include <sound/pcm.h>
  18#include <sound/pcm_params.h>
  19#include <sound/timer.h>
  20
  21#include "pcm_local.h"
  22
  23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
  24#define CREATE_TRACE_POINTS
  25#include "pcm_trace.h"
  26#else
  27#define trace_hwptr(substream, pos, in_interrupt)
  28#define trace_xrun(substream)
  29#define trace_hw_ptr_error(substream, reason)
  30#define trace_applptr(substream, prev, curr)
  31#endif
  32
  33static int fill_silence_frames(struct snd_pcm_substream *substream,
  34			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
  35
  36
  37static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
  38				       snd_pcm_uframes_t ptr,
  39				       snd_pcm_uframes_t new_ptr)
  40{
  41	snd_pcm_sframes_t delta;
  42
  43	delta = new_ptr - ptr;
  44	if (delta == 0)
  45		return;
  46	if (delta < 0)
  47		delta += runtime->boundary;
  48	if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
  49		runtime->silence_filled -= delta;
  50	else
  51		runtime->silence_filled = 0;
  52	runtime->silence_start = new_ptr;
  53}
  54
  55/*
  56 * fill ring buffer with silence
  57 * runtime->silence_start: starting pointer to silence area
  58 * runtime->silence_filled: size filled with silence
  59 * runtime->silence_threshold: threshold from application
  60 * runtime->silence_size: maximal size from application
  61 *
  62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
  63 */
  64void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
  65{
  66	struct snd_pcm_runtime *runtime = substream->runtime;
  67	snd_pcm_uframes_t frames, ofs, transfer;
  68	int err;
  69
  70	if (runtime->silence_size < runtime->boundary) {
  71		snd_pcm_sframes_t noise_dist;
  72		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
  73		update_silence_vars(runtime, runtime->silence_start, appl_ptr);
  74		/* initialization outside pointer updates */
  75		if (new_hw_ptr == ULONG_MAX)
  76			new_hw_ptr = runtime->status->hw_ptr;
  77		/* get hw_avail with the boundary crossing */
  78		noise_dist = appl_ptr - new_hw_ptr;
  79		if (noise_dist < 0)
  80			noise_dist += runtime->boundary;
  81		/* total noise distance */
  82		noise_dist += runtime->silence_filled;
 
 
 
  83		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
  84			return;
  85		frames = runtime->silence_threshold - noise_dist;
  86		if (frames > runtime->silence_size)
  87			frames = runtime->silence_size;
  88	} else {
  89		/*
  90		 * This filling mode aims at free-running mode (used for example by dmix),
  91		 * which doesn't update the application pointer.
  92		 */
  93		snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
  94		if (new_hw_ptr == ULONG_MAX) {
  95			/*
  96			 * Initialization, fill the whole unused buffer with silence.
  97			 *
  98			 * Usually, this is entered while stopped, before data is queued,
  99			 * so both pointers are expected to be zero.
 100			 */
 101			snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
 102			if (avail < 0)
 103				avail += runtime->boundary;
 104			/*
 105			 * In free-running mode, appl_ptr will be zero even while running,
 106			 * so we end up with a huge number. There is no useful way to
 107			 * handle this, so we just clear the whole buffer.
 108			 */
 109			runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
 110			runtime->silence_start = hw_ptr;
 111		} else {
 112			/* Silence the just played area immediately */
 113			update_silence_vars(runtime, hw_ptr, new_hw_ptr);
 
 
 
 
 
 
 
 
 
 114		}
 115		/*
 116		 * In this mode, silence_filled actually includes the valid
 117		 * sample data from the user.
 118		 */
 119		frames = runtime->buffer_size - runtime->silence_filled;
 120	}
 121	if (snd_BUG_ON(frames > runtime->buffer_size))
 122		return;
 123	if (frames == 0)
 124		return;
 125	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
 126	do {
 127		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
 128		err = fill_silence_frames(substream, ofs, transfer);
 129		snd_BUG_ON(err < 0);
 130		runtime->silence_filled += transfer;
 131		frames -= transfer;
 132		ofs = 0;
 133	} while (frames > 0);
 134	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 135}
 136
 137#ifdef CONFIG_SND_DEBUG
 138void snd_pcm_debug_name(struct snd_pcm_substream *substream,
 139			   char *name, size_t len)
 140{
 141	snprintf(name, len, "pcmC%dD%d%c:%d",
 142		 substream->pcm->card->number,
 143		 substream->pcm->device,
 144		 substream->stream ? 'c' : 'p',
 145		 substream->number);
 146}
 147EXPORT_SYMBOL(snd_pcm_debug_name);
 148#endif
 149
 150#define XRUN_DEBUG_BASIC	(1<<0)
 151#define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
 152#define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
 153
 154#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 155
 156#define xrun_debug(substream, mask) \
 157			((substream)->pstr->xrun_debug & (mask))
 158#else
 159#define xrun_debug(substream, mask)	0
 160#endif
 161
 162#define dump_stack_on_xrun(substream) do {			\
 163		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
 164			dump_stack();				\
 165	} while (0)
 166
 167/* call with stream lock held */
 168void __snd_pcm_xrun(struct snd_pcm_substream *substream)
 169{
 170	struct snd_pcm_runtime *runtime = substream->runtime;
 171
 172	trace_xrun(substream);
 173	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 174		struct timespec64 tstamp;
 175
 176		snd_pcm_gettime(runtime, &tstamp);
 177		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
 178		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
 179	}
 180	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
 181	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
 182		char name[16];
 183		snd_pcm_debug_name(substream, name, sizeof(name));
 184		pcm_warn(substream->pcm, "XRUN: %s\n", name);
 185		dump_stack_on_xrun(substream);
 186	}
 187}
 188
 189#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 190#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
 191	do {								\
 192		trace_hw_ptr_error(substream, reason);	\
 193		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
 194			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
 195					   (in_interrupt) ? 'Q' : 'P', ##args);	\
 196			dump_stack_on_xrun(substream);			\
 197		}							\
 198	} while (0)
 199
 200#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
 201
 202#define hw_ptr_error(substream, fmt, args...) do { } while (0)
 203
 204#endif
 205
 206int snd_pcm_update_state(struct snd_pcm_substream *substream,
 207			 struct snd_pcm_runtime *runtime)
 208{
 209	snd_pcm_uframes_t avail;
 210
 211	avail = snd_pcm_avail(substream);
 212	if (avail > runtime->avail_max)
 213		runtime->avail_max = avail;
 214	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
 215		if (avail >= runtime->buffer_size) {
 216			snd_pcm_drain_done(substream);
 217			return -EPIPE;
 218		}
 219	} else {
 220		if (avail >= runtime->stop_threshold) {
 221			__snd_pcm_xrun(substream);
 222			return -EPIPE;
 223		}
 224	}
 225	if (runtime->twake) {
 226		if (avail >= runtime->twake)
 227			wake_up(&runtime->tsleep);
 228	} else if (avail >= runtime->control->avail_min)
 229		wake_up(&runtime->sleep);
 230	return 0;
 231}
 232
 233static void update_audio_tstamp(struct snd_pcm_substream *substream,
 234				struct timespec64 *curr_tstamp,
 235				struct timespec64 *audio_tstamp)
 236{
 237	struct snd_pcm_runtime *runtime = substream->runtime;
 238	u64 audio_frames, audio_nsecs;
 239	struct timespec64 driver_tstamp;
 240
 241	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
 242		return;
 243
 244	if (!(substream->ops->get_time_info) ||
 245		(runtime->audio_tstamp_report.actual_type ==
 246			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 247
 248		/*
 249		 * provide audio timestamp derived from pointer position
 250		 * add delay only if requested
 251		 */
 252
 253		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
 254
 255		if (runtime->audio_tstamp_config.report_delay) {
 256			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 257				audio_frames -=  runtime->delay;
 258			else
 259				audio_frames +=  runtime->delay;
 260		}
 261		audio_nsecs = div_u64(audio_frames * 1000000000LL,
 262				runtime->rate);
 263		*audio_tstamp = ns_to_timespec64(audio_nsecs);
 264	}
 265
 266	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
 267	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
 268		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
 269		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
 270		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
 271		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
 272	}
 273
 274
 275	/*
 276	 * re-take a driver timestamp to let apps detect if the reference tstamp
 277	 * read by low-level hardware was provided with a delay
 278	 */
 279	snd_pcm_gettime(substream->runtime, &driver_tstamp);
 280	runtime->driver_tstamp = driver_tstamp;
 281}
 282
 283static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
 284				  unsigned int in_interrupt)
 285{
 286	struct snd_pcm_runtime *runtime = substream->runtime;
 287	snd_pcm_uframes_t pos;
 288	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
 289	snd_pcm_sframes_t hdelta, delta;
 290	unsigned long jdelta;
 291	unsigned long curr_jiffies;
 292	struct timespec64 curr_tstamp;
 293	struct timespec64 audio_tstamp;
 294	int crossed_boundary = 0;
 295
 296	old_hw_ptr = runtime->status->hw_ptr;
 297
 298	/*
 299	 * group pointer, time and jiffies reads to allow for more
 300	 * accurate correlations/corrections.
 301	 * The values are stored at the end of this routine after
 302	 * corrections for hw_ptr position
 303	 */
 304	pos = substream->ops->pointer(substream);
 305	curr_jiffies = jiffies;
 306	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 307		if ((substream->ops->get_time_info) &&
 308			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 309			substream->ops->get_time_info(substream, &curr_tstamp,
 310						&audio_tstamp,
 311						&runtime->audio_tstamp_config,
 312						&runtime->audio_tstamp_report);
 313
 314			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
 315			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
 316				snd_pcm_gettime(runtime, &curr_tstamp);
 317		} else
 318			snd_pcm_gettime(runtime, &curr_tstamp);
 319	}
 320
 321	if (pos == SNDRV_PCM_POS_XRUN) {
 322		__snd_pcm_xrun(substream);
 323		return -EPIPE;
 324	}
 325	if (pos >= runtime->buffer_size) {
 326		if (printk_ratelimit()) {
 327			char name[16];
 328			snd_pcm_debug_name(substream, name, sizeof(name));
 329			pcm_err(substream->pcm,
 330				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
 331				name, pos, runtime->buffer_size,
 332				runtime->period_size);
 333		}
 334		pos = 0;
 335	}
 336	pos -= pos % runtime->min_align;
 337	trace_hwptr(substream, pos, in_interrupt);
 338	hw_base = runtime->hw_ptr_base;
 339	new_hw_ptr = hw_base + pos;
 340	if (in_interrupt) {
 341		/* we know that one period was processed */
 342		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
 343		delta = runtime->hw_ptr_interrupt + runtime->period_size;
 344		if (delta > new_hw_ptr) {
 345			/* check for double acknowledged interrupts */
 346			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 347			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
 348				hw_base += runtime->buffer_size;
 349				if (hw_base >= runtime->boundary) {
 350					hw_base = 0;
 351					crossed_boundary++;
 352				}
 353				new_hw_ptr = hw_base + pos;
 354				goto __delta;
 355			}
 356		}
 357	}
 358	/* new_hw_ptr might be lower than old_hw_ptr in case when */
 359	/* pointer crosses the end of the ring buffer */
 360	if (new_hw_ptr < old_hw_ptr) {
 361		hw_base += runtime->buffer_size;
 362		if (hw_base >= runtime->boundary) {
 363			hw_base = 0;
 364			crossed_boundary++;
 365		}
 366		new_hw_ptr = hw_base + pos;
 367	}
 368      __delta:
 369	delta = new_hw_ptr - old_hw_ptr;
 370	if (delta < 0)
 371		delta += runtime->boundary;
 372
 373	if (runtime->no_period_wakeup) {
 374		snd_pcm_sframes_t xrun_threshold;
 375		/*
 376		 * Without regular period interrupts, we have to check
 377		 * the elapsed time to detect xruns.
 378		 */
 379		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 380		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
 381			goto no_delta_check;
 382		hdelta = jdelta - delta * HZ / runtime->rate;
 383		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
 384		while (hdelta > xrun_threshold) {
 385			delta += runtime->buffer_size;
 386			hw_base += runtime->buffer_size;
 387			if (hw_base >= runtime->boundary) {
 388				hw_base = 0;
 389				crossed_boundary++;
 390			}
 391			new_hw_ptr = hw_base + pos;
 392			hdelta -= runtime->hw_ptr_buffer_jiffies;
 393		}
 394		goto no_delta_check;
 395	}
 396
 397	/* something must be really wrong */
 398	if (delta >= runtime->buffer_size + runtime->period_size) {
 399		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
 400			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 401			     substream->stream, (long)pos,
 402			     (long)new_hw_ptr, (long)old_hw_ptr);
 403		return 0;
 404	}
 405
 406	/* Do jiffies check only in xrun_debug mode */
 407	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
 408		goto no_jiffies_check;
 409
 410	/* Skip the jiffies check for hardwares with BATCH flag.
 411	 * Such hardware usually just increases the position at each IRQ,
 412	 * thus it can't give any strange position.
 413	 */
 414	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
 415		goto no_jiffies_check;
 416	hdelta = delta;
 417	if (hdelta < runtime->delay)
 418		goto no_jiffies_check;
 419	hdelta -= runtime->delay;
 420	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 421	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 422		delta = jdelta /
 423			(((runtime->period_size * HZ) / runtime->rate)
 424								+ HZ/100);
 425		/* move new_hw_ptr according jiffies not pos variable */
 426		new_hw_ptr = old_hw_ptr;
 427		hw_base = delta;
 428		/* use loop to avoid checks for delta overflows */
 429		/* the delta value is small or zero in most cases */
 430		while (delta > 0) {
 431			new_hw_ptr += runtime->period_size;
 432			if (new_hw_ptr >= runtime->boundary) {
 433				new_hw_ptr -= runtime->boundary;
 434				crossed_boundary--;
 435			}
 436			delta--;
 437		}
 438		/* align hw_base to buffer_size */
 439		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
 440			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
 441			     (long)pos, (long)hdelta,
 442			     (long)runtime->period_size, jdelta,
 443			     ((hdelta * HZ) / runtime->rate), hw_base,
 444			     (unsigned long)old_hw_ptr,
 445			     (unsigned long)new_hw_ptr);
 446		/* reset values to proper state */
 447		delta = 0;
 448		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
 449	}
 450 no_jiffies_check:
 451	if (delta > runtime->period_size + runtime->period_size / 2) {
 452		hw_ptr_error(substream, in_interrupt,
 453			     "Lost interrupts?",
 454			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 455			     substream->stream, (long)delta,
 456			     (long)new_hw_ptr,
 457			     (long)old_hw_ptr);
 458	}
 459
 460 no_delta_check:
 461	if (runtime->status->hw_ptr == new_hw_ptr) {
 462		runtime->hw_ptr_jiffies = curr_jiffies;
 463		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 464		return 0;
 465	}
 466
 467	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
 468	    runtime->silence_size > 0)
 469		snd_pcm_playback_silence(substream, new_hw_ptr);
 470
 471	if (in_interrupt) {
 472		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
 473		if (delta < 0)
 474			delta += runtime->boundary;
 475		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
 476		runtime->hw_ptr_interrupt += delta;
 477		if (runtime->hw_ptr_interrupt >= runtime->boundary)
 478			runtime->hw_ptr_interrupt -= runtime->boundary;
 479	}
 480	runtime->hw_ptr_base = hw_base;
 481	runtime->status->hw_ptr = new_hw_ptr;
 482	runtime->hw_ptr_jiffies = curr_jiffies;
 483	if (crossed_boundary) {
 484		snd_BUG_ON(crossed_boundary != 1);
 485		runtime->hw_ptr_wrap += runtime->boundary;
 486	}
 487
 488	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 489
 490	return snd_pcm_update_state(substream, runtime);
 491}
 492
 493/* CAUTION: call it with irq disabled */
 494int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
 495{
 496	return snd_pcm_update_hw_ptr0(substream, 0);
 497}
 498
 499/**
 500 * snd_pcm_set_ops - set the PCM operators
 501 * @pcm: the pcm instance
 502 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
 503 * @ops: the operator table
 504 *
 505 * Sets the given PCM operators to the pcm instance.
 506 */
 507void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
 508		     const struct snd_pcm_ops *ops)
 509{
 510	struct snd_pcm_str *stream = &pcm->streams[direction];
 511	struct snd_pcm_substream *substream;
 512	
 513	for (substream = stream->substream; substream != NULL; substream = substream->next)
 514		substream->ops = ops;
 515}
 516EXPORT_SYMBOL(snd_pcm_set_ops);
 517
 518/**
 519 * snd_pcm_set_sync - set the PCM sync id
 520 * @substream: the pcm substream
 521 *
 522 * Sets the PCM sync identifier for the card.
 523 */
 524void snd_pcm_set_sync(struct snd_pcm_substream *substream)
 525{
 526	struct snd_pcm_runtime *runtime = substream->runtime;
 527	
 528	runtime->sync.id32[0] = substream->pcm->card->number;
 529	runtime->sync.id32[1] = -1;
 530	runtime->sync.id32[2] = -1;
 531	runtime->sync.id32[3] = -1;
 532}
 533EXPORT_SYMBOL(snd_pcm_set_sync);
 534
 535/*
 536 *  Standard ioctl routine
 537 */
 538
 539static inline unsigned int div32(unsigned int a, unsigned int b, 
 540				 unsigned int *r)
 541{
 542	if (b == 0) {
 543		*r = 0;
 544		return UINT_MAX;
 545	}
 546	*r = a % b;
 547	return a / b;
 548}
 549
 550static inline unsigned int div_down(unsigned int a, unsigned int b)
 551{
 552	if (b == 0)
 553		return UINT_MAX;
 554	return a / b;
 555}
 556
 557static inline unsigned int div_up(unsigned int a, unsigned int b)
 558{
 559	unsigned int r;
 560	unsigned int q;
 561	if (b == 0)
 562		return UINT_MAX;
 563	q = div32(a, b, &r);
 564	if (r)
 565		++q;
 566	return q;
 567}
 568
 569static inline unsigned int mul(unsigned int a, unsigned int b)
 570{
 571	if (a == 0)
 572		return 0;
 573	if (div_down(UINT_MAX, a) < b)
 574		return UINT_MAX;
 575	return a * b;
 576}
 577
 578static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 579				    unsigned int c, unsigned int *r)
 580{
 581	u_int64_t n = (u_int64_t) a * b;
 582	if (c == 0) {
 583		*r = 0;
 584		return UINT_MAX;
 585	}
 586	n = div_u64_rem(n, c, r);
 587	if (n >= UINT_MAX) {
 588		*r = 0;
 589		return UINT_MAX;
 590	}
 591	return n;
 592}
 593
 594/**
 595 * snd_interval_refine - refine the interval value of configurator
 596 * @i: the interval value to refine
 597 * @v: the interval value to refer to
 598 *
 599 * Refines the interval value with the reference value.
 600 * The interval is changed to the range satisfying both intervals.
 601 * The interval status (min, max, integer, etc.) are evaluated.
 602 *
 603 * Return: Positive if the value is changed, zero if it's not changed, or a
 604 * negative error code.
 605 */
 606int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
 607{
 608	int changed = 0;
 609	if (snd_BUG_ON(snd_interval_empty(i)))
 610		return -EINVAL;
 611	if (i->min < v->min) {
 612		i->min = v->min;
 613		i->openmin = v->openmin;
 614		changed = 1;
 615	} else if (i->min == v->min && !i->openmin && v->openmin) {
 616		i->openmin = 1;
 617		changed = 1;
 618	}
 619	if (i->max > v->max) {
 620		i->max = v->max;
 621		i->openmax = v->openmax;
 622		changed = 1;
 623	} else if (i->max == v->max && !i->openmax && v->openmax) {
 624		i->openmax = 1;
 625		changed = 1;
 626	}
 627	if (!i->integer && v->integer) {
 628		i->integer = 1;
 629		changed = 1;
 630	}
 631	if (i->integer) {
 632		if (i->openmin) {
 633			i->min++;
 634			i->openmin = 0;
 635		}
 636		if (i->openmax) {
 637			i->max--;
 638			i->openmax = 0;
 639		}
 640	} else if (!i->openmin && !i->openmax && i->min == i->max)
 641		i->integer = 1;
 642	if (snd_interval_checkempty(i)) {
 643		snd_interval_none(i);
 644		return -EINVAL;
 645	}
 646	return changed;
 647}
 648EXPORT_SYMBOL(snd_interval_refine);
 649
 650static int snd_interval_refine_first(struct snd_interval *i)
 651{
 652	const unsigned int last_max = i->max;
 653
 654	if (snd_BUG_ON(snd_interval_empty(i)))
 655		return -EINVAL;
 656	if (snd_interval_single(i))
 657		return 0;
 658	i->max = i->min;
 659	if (i->openmin)
 660		i->max++;
 661	/* only exclude max value if also excluded before refine */
 662	i->openmax = (i->openmax && i->max >= last_max);
 663	return 1;
 664}
 665
 666static int snd_interval_refine_last(struct snd_interval *i)
 667{
 668	const unsigned int last_min = i->min;
 669
 670	if (snd_BUG_ON(snd_interval_empty(i)))
 671		return -EINVAL;
 672	if (snd_interval_single(i))
 673		return 0;
 674	i->min = i->max;
 675	if (i->openmax)
 676		i->min--;
 677	/* only exclude min value if also excluded before refine */
 678	i->openmin = (i->openmin && i->min <= last_min);
 679	return 1;
 680}
 681
 682void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 683{
 684	if (a->empty || b->empty) {
 685		snd_interval_none(c);
 686		return;
 687	}
 688	c->empty = 0;
 689	c->min = mul(a->min, b->min);
 690	c->openmin = (a->openmin || b->openmin);
 691	c->max = mul(a->max,  b->max);
 692	c->openmax = (a->openmax || b->openmax);
 693	c->integer = (a->integer && b->integer);
 694}
 695
 696/**
 697 * snd_interval_div - refine the interval value with division
 698 * @a: dividend
 699 * @b: divisor
 700 * @c: quotient
 701 *
 702 * c = a / b
 703 *
 704 * Returns non-zero if the value is changed, zero if not changed.
 705 */
 706void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 707{
 708	unsigned int r;
 709	if (a->empty || b->empty) {
 710		snd_interval_none(c);
 711		return;
 712	}
 713	c->empty = 0;
 714	c->min = div32(a->min, b->max, &r);
 715	c->openmin = (r || a->openmin || b->openmax);
 716	if (b->min > 0) {
 717		c->max = div32(a->max, b->min, &r);
 718		if (r) {
 719			c->max++;
 720			c->openmax = 1;
 721		} else
 722			c->openmax = (a->openmax || b->openmin);
 723	} else {
 724		c->max = UINT_MAX;
 725		c->openmax = 0;
 726	}
 727	c->integer = 0;
 728}
 729
 730/**
 731 * snd_interval_muldivk - refine the interval value
 732 * @a: dividend 1
 733 * @b: dividend 2
 734 * @k: divisor (as integer)
 735 * @c: result
 736  *
 737 * c = a * b / k
 738 *
 739 * Returns non-zero if the value is changed, zero if not changed.
 740 */
 741void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
 742		      unsigned int k, struct snd_interval *c)
 743{
 744	unsigned int r;
 745	if (a->empty || b->empty) {
 746		snd_interval_none(c);
 747		return;
 748	}
 749	c->empty = 0;
 750	c->min = muldiv32(a->min, b->min, k, &r);
 751	c->openmin = (r || a->openmin || b->openmin);
 752	c->max = muldiv32(a->max, b->max, k, &r);
 753	if (r) {
 754		c->max++;
 755		c->openmax = 1;
 756	} else
 757		c->openmax = (a->openmax || b->openmax);
 758	c->integer = 0;
 759}
 760
 761/**
 762 * snd_interval_mulkdiv - refine the interval value
 763 * @a: dividend 1
 764 * @k: dividend 2 (as integer)
 765 * @b: divisor
 766 * @c: result
 767 *
 768 * c = a * k / b
 769 *
 770 * Returns non-zero if the value is changed, zero if not changed.
 771 */
 772void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
 773		      const struct snd_interval *b, struct snd_interval *c)
 774{
 775	unsigned int r;
 776	if (a->empty || b->empty) {
 777		snd_interval_none(c);
 778		return;
 779	}
 780	c->empty = 0;
 781	c->min = muldiv32(a->min, k, b->max, &r);
 782	c->openmin = (r || a->openmin || b->openmax);
 783	if (b->min > 0) {
 784		c->max = muldiv32(a->max, k, b->min, &r);
 785		if (r) {
 786			c->max++;
 787			c->openmax = 1;
 788		} else
 789			c->openmax = (a->openmax || b->openmin);
 790	} else {
 791		c->max = UINT_MAX;
 792		c->openmax = 0;
 793	}
 794	c->integer = 0;
 795}
 796
 797/* ---- */
 798
 799
 800/**
 801 * snd_interval_ratnum - refine the interval value
 802 * @i: interval to refine
 803 * @rats_count: number of ratnum_t 
 804 * @rats: ratnum_t array
 805 * @nump: pointer to store the resultant numerator
 806 * @denp: pointer to store the resultant denominator
 807 *
 808 * Return: Positive if the value is changed, zero if it's not changed, or a
 809 * negative error code.
 810 */
 811int snd_interval_ratnum(struct snd_interval *i,
 812			unsigned int rats_count, const struct snd_ratnum *rats,
 813			unsigned int *nump, unsigned int *denp)
 814{
 815	unsigned int best_num, best_den;
 816	int best_diff;
 817	unsigned int k;
 818	struct snd_interval t;
 819	int err;
 820	unsigned int result_num, result_den;
 821	int result_diff;
 822
 823	best_num = best_den = best_diff = 0;
 824	for (k = 0; k < rats_count; ++k) {
 825		unsigned int num = rats[k].num;
 826		unsigned int den;
 827		unsigned int q = i->min;
 828		int diff;
 829		if (q == 0)
 830			q = 1;
 831		den = div_up(num, q);
 832		if (den < rats[k].den_min)
 833			continue;
 834		if (den > rats[k].den_max)
 835			den = rats[k].den_max;
 836		else {
 837			unsigned int r;
 838			r = (den - rats[k].den_min) % rats[k].den_step;
 839			if (r != 0)
 840				den -= r;
 841		}
 842		diff = num - q * den;
 843		if (diff < 0)
 844			diff = -diff;
 845		if (best_num == 0 ||
 846		    diff * best_den < best_diff * den) {
 847			best_diff = diff;
 848			best_den = den;
 849			best_num = num;
 850		}
 851	}
 852	if (best_den == 0) {
 853		i->empty = 1;
 854		return -EINVAL;
 855	}
 856	t.min = div_down(best_num, best_den);
 857	t.openmin = !!(best_num % best_den);
 858	
 859	result_num = best_num;
 860	result_diff = best_diff;
 861	result_den = best_den;
 862	best_num = best_den = best_diff = 0;
 863	for (k = 0; k < rats_count; ++k) {
 864		unsigned int num = rats[k].num;
 865		unsigned int den;
 866		unsigned int q = i->max;
 867		int diff;
 868		if (q == 0) {
 869			i->empty = 1;
 870			return -EINVAL;
 871		}
 872		den = div_down(num, q);
 873		if (den > rats[k].den_max)
 874			continue;
 875		if (den < rats[k].den_min)
 876			den = rats[k].den_min;
 877		else {
 878			unsigned int r;
 879			r = (den - rats[k].den_min) % rats[k].den_step;
 880			if (r != 0)
 881				den += rats[k].den_step - r;
 882		}
 883		diff = q * den - num;
 884		if (diff < 0)
 885			diff = -diff;
 886		if (best_num == 0 ||
 887		    diff * best_den < best_diff * den) {
 888			best_diff = diff;
 889			best_den = den;
 890			best_num = num;
 891		}
 892	}
 893	if (best_den == 0) {
 894		i->empty = 1;
 895		return -EINVAL;
 896	}
 897	t.max = div_up(best_num, best_den);
 898	t.openmax = !!(best_num % best_den);
 899	t.integer = 0;
 900	err = snd_interval_refine(i, &t);
 901	if (err < 0)
 902		return err;
 903
 904	if (snd_interval_single(i)) {
 905		if (best_diff * result_den < result_diff * best_den) {
 906			result_num = best_num;
 907			result_den = best_den;
 908		}
 909		if (nump)
 910			*nump = result_num;
 911		if (denp)
 912			*denp = result_den;
 913	}
 914	return err;
 915}
 916EXPORT_SYMBOL(snd_interval_ratnum);
 917
 918/**
 919 * snd_interval_ratden - refine the interval value
 920 * @i: interval to refine
 921 * @rats_count: number of struct ratden
 922 * @rats: struct ratden array
 923 * @nump: pointer to store the resultant numerator
 924 * @denp: pointer to store the resultant denominator
 925 *
 926 * Return: Positive if the value is changed, zero if it's not changed, or a
 927 * negative error code.
 928 */
 929static int snd_interval_ratden(struct snd_interval *i,
 930			       unsigned int rats_count,
 931			       const struct snd_ratden *rats,
 932			       unsigned int *nump, unsigned int *denp)
 933{
 934	unsigned int best_num, best_diff, best_den;
 935	unsigned int k;
 936	struct snd_interval t;
 937	int err;
 938
 939	best_num = best_den = best_diff = 0;
 940	for (k = 0; k < rats_count; ++k) {
 941		unsigned int num;
 942		unsigned int den = rats[k].den;
 943		unsigned int q = i->min;
 944		int diff;
 945		num = mul(q, den);
 946		if (num > rats[k].num_max)
 947			continue;
 948		if (num < rats[k].num_min)
 949			num = rats[k].num_max;
 950		else {
 951			unsigned int r;
 952			r = (num - rats[k].num_min) % rats[k].num_step;
 953			if (r != 0)
 954				num += rats[k].num_step - r;
 955		}
 956		diff = num - q * den;
 957		if (best_num == 0 ||
 958		    diff * best_den < best_diff * den) {
 959			best_diff = diff;
 960			best_den = den;
 961			best_num = num;
 962		}
 963	}
 964	if (best_den == 0) {
 965		i->empty = 1;
 966		return -EINVAL;
 967	}
 968	t.min = div_down(best_num, best_den);
 969	t.openmin = !!(best_num % best_den);
 970	
 971	best_num = best_den = best_diff = 0;
 972	for (k = 0; k < rats_count; ++k) {
 973		unsigned int num;
 974		unsigned int den = rats[k].den;
 975		unsigned int q = i->max;
 976		int diff;
 977		num = mul(q, den);
 978		if (num < rats[k].num_min)
 979			continue;
 980		if (num > rats[k].num_max)
 981			num = rats[k].num_max;
 982		else {
 983			unsigned int r;
 984			r = (num - rats[k].num_min) % rats[k].num_step;
 985			if (r != 0)
 986				num -= r;
 987		}
 988		diff = q * den - num;
 989		if (best_num == 0 ||
 990		    diff * best_den < best_diff * den) {
 991			best_diff = diff;
 992			best_den = den;
 993			best_num = num;
 994		}
 995	}
 996	if (best_den == 0) {
 997		i->empty = 1;
 998		return -EINVAL;
 999	}
1000	t.max = div_up(best_num, best_den);
1001	t.openmax = !!(best_num % best_den);
1002	t.integer = 0;
1003	err = snd_interval_refine(i, &t);
1004	if (err < 0)
1005		return err;
1006
1007	if (snd_interval_single(i)) {
1008		if (nump)
1009			*nump = best_num;
1010		if (denp)
1011			*denp = best_den;
1012	}
1013	return err;
1014}
1015
1016/**
1017 * snd_interval_list - refine the interval value from the list
1018 * @i: the interval value to refine
1019 * @count: the number of elements in the list
1020 * @list: the value list
1021 * @mask: the bit-mask to evaluate
1022 *
1023 * Refines the interval value from the list.
1024 * When mask is non-zero, only the elements corresponding to bit 1 are
1025 * evaluated.
1026 *
1027 * Return: Positive if the value is changed, zero if it's not changed, or a
1028 * negative error code.
1029 */
1030int snd_interval_list(struct snd_interval *i, unsigned int count,
1031		      const unsigned int *list, unsigned int mask)
1032{
1033        unsigned int k;
1034	struct snd_interval list_range;
1035
1036	if (!count) {
1037		i->empty = 1;
1038		return -EINVAL;
1039	}
1040	snd_interval_any(&list_range);
1041	list_range.min = UINT_MAX;
1042	list_range.max = 0;
1043        for (k = 0; k < count; k++) {
1044		if (mask && !(mask & (1 << k)))
1045			continue;
1046		if (!snd_interval_test(i, list[k]))
1047			continue;
1048		list_range.min = min(list_range.min, list[k]);
1049		list_range.max = max(list_range.max, list[k]);
1050        }
1051	return snd_interval_refine(i, &list_range);
1052}
1053EXPORT_SYMBOL(snd_interval_list);
1054
1055/**
1056 * snd_interval_ranges - refine the interval value from the list of ranges
1057 * @i: the interval value to refine
1058 * @count: the number of elements in the list of ranges
1059 * @ranges: the ranges list
1060 * @mask: the bit-mask to evaluate
1061 *
1062 * Refines the interval value from the list of ranges.
1063 * When mask is non-zero, only the elements corresponding to bit 1 are
1064 * evaluated.
1065 *
1066 * Return: Positive if the value is changed, zero if it's not changed, or a
1067 * negative error code.
1068 */
1069int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1070			const struct snd_interval *ranges, unsigned int mask)
1071{
1072	unsigned int k;
1073	struct snd_interval range_union;
1074	struct snd_interval range;
1075
1076	if (!count) {
1077		snd_interval_none(i);
1078		return -EINVAL;
1079	}
1080	snd_interval_any(&range_union);
1081	range_union.min = UINT_MAX;
1082	range_union.max = 0;
1083	for (k = 0; k < count; k++) {
1084		if (mask && !(mask & (1 << k)))
1085			continue;
1086		snd_interval_copy(&range, &ranges[k]);
1087		if (snd_interval_refine(&range, i) < 0)
1088			continue;
1089		if (snd_interval_empty(&range))
1090			continue;
1091
1092		if (range.min < range_union.min) {
1093			range_union.min = range.min;
1094			range_union.openmin = 1;
1095		}
1096		if (range.min == range_union.min && !range.openmin)
1097			range_union.openmin = 0;
1098		if (range.max > range_union.max) {
1099			range_union.max = range.max;
1100			range_union.openmax = 1;
1101		}
1102		if (range.max == range_union.max && !range.openmax)
1103			range_union.openmax = 0;
1104	}
1105	return snd_interval_refine(i, &range_union);
1106}
1107EXPORT_SYMBOL(snd_interval_ranges);
1108
1109static int snd_interval_step(struct snd_interval *i, unsigned int step)
1110{
1111	unsigned int n;
1112	int changed = 0;
1113	n = i->min % step;
1114	if (n != 0 || i->openmin) {
1115		i->min += step - n;
1116		i->openmin = 0;
1117		changed = 1;
1118	}
1119	n = i->max % step;
1120	if (n != 0 || i->openmax) {
1121		i->max -= n;
1122		i->openmax = 0;
1123		changed = 1;
1124	}
1125	if (snd_interval_checkempty(i)) {
1126		i->empty = 1;
1127		return -EINVAL;
1128	}
1129	return changed;
1130}
1131
1132/* Info constraints helpers */
1133
1134/**
1135 * snd_pcm_hw_rule_add - add the hw-constraint rule
1136 * @runtime: the pcm runtime instance
1137 * @cond: condition bits
1138 * @var: the variable to evaluate
1139 * @func: the evaluation function
1140 * @private: the private data pointer passed to function
1141 * @dep: the dependent variables
1142 *
1143 * Return: Zero if successful, or a negative error code on failure.
1144 */
1145int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1146			int var,
1147			snd_pcm_hw_rule_func_t func, void *private,
1148			int dep, ...)
1149{
1150	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1151	struct snd_pcm_hw_rule *c;
1152	unsigned int k;
1153	va_list args;
1154	va_start(args, dep);
1155	if (constrs->rules_num >= constrs->rules_all) {
1156		struct snd_pcm_hw_rule *new;
1157		unsigned int new_rules = constrs->rules_all + 16;
1158		new = krealloc_array(constrs->rules, new_rules,
1159				     sizeof(*c), GFP_KERNEL);
1160		if (!new) {
1161			va_end(args);
1162			return -ENOMEM;
1163		}
1164		constrs->rules = new;
1165		constrs->rules_all = new_rules;
1166	}
1167	c = &constrs->rules[constrs->rules_num];
1168	c->cond = cond;
1169	c->func = func;
1170	c->var = var;
1171	c->private = private;
1172	k = 0;
1173	while (1) {
1174		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1175			va_end(args);
1176			return -EINVAL;
1177		}
1178		c->deps[k++] = dep;
1179		if (dep < 0)
1180			break;
1181		dep = va_arg(args, int);
1182	}
1183	constrs->rules_num++;
1184	va_end(args);
1185	return 0;
1186}
1187EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1188
1189/**
1190 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1191 * @runtime: PCM runtime instance
1192 * @var: hw_params variable to apply the mask
1193 * @mask: the bitmap mask
1194 *
1195 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1196 *
1197 * Return: Zero if successful, or a negative error code on failure.
1198 */
1199int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1200			       u_int32_t mask)
1201{
1202	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1203	struct snd_mask *maskp = constrs_mask(constrs, var);
1204	*maskp->bits &= mask;
1205	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1206	if (*maskp->bits == 0)
1207		return -EINVAL;
1208	return 0;
1209}
1210
1211/**
1212 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1213 * @runtime: PCM runtime instance
1214 * @var: hw_params variable to apply the mask
1215 * @mask: the 64bit bitmap mask
1216 *
1217 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1218 *
1219 * Return: Zero if successful, or a negative error code on failure.
1220 */
1221int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1222				 u_int64_t mask)
1223{
1224	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1225	struct snd_mask *maskp = constrs_mask(constrs, var);
1226	maskp->bits[0] &= (u_int32_t)mask;
1227	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1228	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1229	if (! maskp->bits[0] && ! maskp->bits[1])
1230		return -EINVAL;
1231	return 0;
1232}
1233EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1234
1235/**
1236 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1237 * @runtime: PCM runtime instance
1238 * @var: hw_params variable to apply the integer constraint
1239 *
1240 * Apply the constraint of integer to an interval parameter.
1241 *
1242 * Return: Positive if the value is changed, zero if it's not changed, or a
1243 * negative error code.
1244 */
1245int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1246{
1247	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1248	return snd_interval_setinteger(constrs_interval(constrs, var));
1249}
1250EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1251
1252/**
1253 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1254 * @runtime: PCM runtime instance
1255 * @var: hw_params variable to apply the range
1256 * @min: the minimal value
1257 * @max: the maximal value
1258 * 
1259 * Apply the min/max range constraint to an interval parameter.
1260 *
1261 * Return: Positive if the value is changed, zero if it's not changed, or a
1262 * negative error code.
1263 */
1264int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1265				 unsigned int min, unsigned int max)
1266{
1267	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268	struct snd_interval t;
1269	t.min = min;
1270	t.max = max;
1271	t.openmin = t.openmax = 0;
1272	t.integer = 0;
1273	return snd_interval_refine(constrs_interval(constrs, var), &t);
1274}
1275EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1276
1277static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1278				struct snd_pcm_hw_rule *rule)
1279{
1280	struct snd_pcm_hw_constraint_list *list = rule->private;
1281	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1282}		
1283
1284
1285/**
1286 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1287 * @runtime: PCM runtime instance
1288 * @cond: condition bits
1289 * @var: hw_params variable to apply the list constraint
1290 * @l: list
1291 * 
1292 * Apply the list of constraints to an interval parameter.
1293 *
1294 * Return: Zero if successful, or a negative error code on failure.
1295 */
1296int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1297			       unsigned int cond,
1298			       snd_pcm_hw_param_t var,
1299			       const struct snd_pcm_hw_constraint_list *l)
1300{
1301	return snd_pcm_hw_rule_add(runtime, cond, var,
1302				   snd_pcm_hw_rule_list, (void *)l,
1303				   var, -1);
1304}
1305EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1306
1307static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1308				  struct snd_pcm_hw_rule *rule)
1309{
1310	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1311	return snd_interval_ranges(hw_param_interval(params, rule->var),
1312				   r->count, r->ranges, r->mask);
1313}
1314
1315
1316/**
1317 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1318 * @runtime: PCM runtime instance
1319 * @cond: condition bits
1320 * @var: hw_params variable to apply the list of range constraints
1321 * @r: ranges
1322 *
1323 * Apply the list of range constraints to an interval parameter.
1324 *
1325 * Return: Zero if successful, or a negative error code on failure.
1326 */
1327int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1328				 unsigned int cond,
1329				 snd_pcm_hw_param_t var,
1330				 const struct snd_pcm_hw_constraint_ranges *r)
1331{
1332	return snd_pcm_hw_rule_add(runtime, cond, var,
1333				   snd_pcm_hw_rule_ranges, (void *)r,
1334				   var, -1);
1335}
1336EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1337
1338static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1339				   struct snd_pcm_hw_rule *rule)
1340{
1341	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1342	unsigned int num = 0, den = 0;
1343	int err;
1344	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1345				  r->nrats, r->rats, &num, &den);
1346	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1347		params->rate_num = num;
1348		params->rate_den = den;
1349	}
1350	return err;
1351}
1352
1353/**
1354 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1355 * @runtime: PCM runtime instance
1356 * @cond: condition bits
1357 * @var: hw_params variable to apply the ratnums constraint
1358 * @r: struct snd_ratnums constriants
1359 *
1360 * Return: Zero if successful, or a negative error code on failure.
1361 */
1362int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 
1363				  unsigned int cond,
1364				  snd_pcm_hw_param_t var,
1365				  const struct snd_pcm_hw_constraint_ratnums *r)
1366{
1367	return snd_pcm_hw_rule_add(runtime, cond, var,
1368				   snd_pcm_hw_rule_ratnums, (void *)r,
1369				   var, -1);
1370}
1371EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1372
1373static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1374				   struct snd_pcm_hw_rule *rule)
1375{
1376	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1377	unsigned int num = 0, den = 0;
1378	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1379				  r->nrats, r->rats, &num, &den);
1380	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1381		params->rate_num = num;
1382		params->rate_den = den;
1383	}
1384	return err;
1385}
1386
1387/**
1388 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1389 * @runtime: PCM runtime instance
1390 * @cond: condition bits
1391 * @var: hw_params variable to apply the ratdens constraint
1392 * @r: struct snd_ratdens constriants
1393 *
1394 * Return: Zero if successful, or a negative error code on failure.
1395 */
1396int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 
1397				  unsigned int cond,
1398				  snd_pcm_hw_param_t var,
1399				  const struct snd_pcm_hw_constraint_ratdens *r)
1400{
1401	return snd_pcm_hw_rule_add(runtime, cond, var,
1402				   snd_pcm_hw_rule_ratdens, (void *)r,
1403				   var, -1);
1404}
1405EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1406
1407static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1408				  struct snd_pcm_hw_rule *rule)
1409{
1410	unsigned int l = (unsigned long) rule->private;
1411	int width = l & 0xffff;
1412	unsigned int msbits = l >> 16;
1413	const struct snd_interval *i =
1414		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1415
1416	if (!snd_interval_single(i))
1417		return 0;
1418
1419	if ((snd_interval_value(i) == width) ||
1420	    (width == 0 && snd_interval_value(i) > msbits))
1421		params->msbits = min_not_zero(params->msbits, msbits);
1422
1423	return 0;
1424}
1425
1426/**
1427 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1428 * @runtime: PCM runtime instance
1429 * @cond: condition bits
1430 * @width: sample bits width
1431 * @msbits: msbits width
1432 *
1433 * This constraint will set the number of most significant bits (msbits) if a
1434 * sample format with the specified width has been select. If width is set to 0
1435 * the msbits will be set for any sample format with a width larger than the
1436 * specified msbits.
1437 *
1438 * Return: Zero if successful, or a negative error code on failure.
1439 */
1440int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 
1441				 unsigned int cond,
1442				 unsigned int width,
1443				 unsigned int msbits)
1444{
1445	unsigned long l = (msbits << 16) | width;
1446	return snd_pcm_hw_rule_add(runtime, cond, -1,
1447				    snd_pcm_hw_rule_msbits,
1448				    (void*) l,
1449				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1450}
1451EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1452
1453static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1454				struct snd_pcm_hw_rule *rule)
1455{
1456	unsigned long step = (unsigned long) rule->private;
1457	return snd_interval_step(hw_param_interval(params, rule->var), step);
1458}
1459
1460/**
1461 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1462 * @runtime: PCM runtime instance
1463 * @cond: condition bits
1464 * @var: hw_params variable to apply the step constraint
1465 * @step: step size
1466 *
1467 * Return: Zero if successful, or a negative error code on failure.
1468 */
1469int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1470			       unsigned int cond,
1471			       snd_pcm_hw_param_t var,
1472			       unsigned long step)
1473{
1474	return snd_pcm_hw_rule_add(runtime, cond, var, 
1475				   snd_pcm_hw_rule_step, (void *) step,
1476				   var, -1);
1477}
1478EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1479
1480static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1481{
1482	static const unsigned int pow2_sizes[] = {
1483		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1484		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1485		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1486		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1487	};
1488	return snd_interval_list(hw_param_interval(params, rule->var),
1489				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1490}		
1491
1492/**
1493 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1494 * @runtime: PCM runtime instance
1495 * @cond: condition bits
1496 * @var: hw_params variable to apply the power-of-2 constraint
1497 *
1498 * Return: Zero if successful, or a negative error code on failure.
1499 */
1500int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1501			       unsigned int cond,
1502			       snd_pcm_hw_param_t var)
1503{
1504	return snd_pcm_hw_rule_add(runtime, cond, var, 
1505				   snd_pcm_hw_rule_pow2, NULL,
1506				   var, -1);
1507}
1508EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1509
1510static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1511					   struct snd_pcm_hw_rule *rule)
1512{
1513	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1514	struct snd_interval *rate;
1515
1516	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1517	return snd_interval_list(rate, 1, &base_rate, 0);
1518}
1519
1520/**
1521 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1522 * @runtime: PCM runtime instance
1523 * @base_rate: the rate at which the hardware does not resample
1524 *
1525 * Return: Zero if successful, or a negative error code on failure.
1526 */
1527int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1528			       unsigned int base_rate)
1529{
1530	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1531				   SNDRV_PCM_HW_PARAM_RATE,
1532				   snd_pcm_hw_rule_noresample_func,
1533				   (void *)(uintptr_t)base_rate,
1534				   SNDRV_PCM_HW_PARAM_RATE, -1);
1535}
1536EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1537
1538static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1539				  snd_pcm_hw_param_t var)
1540{
1541	if (hw_is_mask(var)) {
1542		snd_mask_any(hw_param_mask(params, var));
1543		params->cmask |= 1 << var;
1544		params->rmask |= 1 << var;
1545		return;
1546	}
1547	if (hw_is_interval(var)) {
1548		snd_interval_any(hw_param_interval(params, var));
1549		params->cmask |= 1 << var;
1550		params->rmask |= 1 << var;
1551		return;
1552	}
1553	snd_BUG();
1554}
1555
1556void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1557{
1558	unsigned int k;
1559	memset(params, 0, sizeof(*params));
1560	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1561		_snd_pcm_hw_param_any(params, k);
1562	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1563		_snd_pcm_hw_param_any(params, k);
1564	params->info = ~0U;
1565}
1566EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1567
1568/**
1569 * snd_pcm_hw_param_value - return @params field @var value
1570 * @params: the hw_params instance
1571 * @var: parameter to retrieve
1572 * @dir: pointer to the direction (-1,0,1) or %NULL
1573 *
1574 * Return: The value for field @var if it's fixed in configuration space
1575 * defined by @params. -%EINVAL otherwise.
1576 */
1577int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1578			   snd_pcm_hw_param_t var, int *dir)
1579{
1580	if (hw_is_mask(var)) {
1581		const struct snd_mask *mask = hw_param_mask_c(params, var);
1582		if (!snd_mask_single(mask))
1583			return -EINVAL;
1584		if (dir)
1585			*dir = 0;
1586		return snd_mask_value(mask);
1587	}
1588	if (hw_is_interval(var)) {
1589		const struct snd_interval *i = hw_param_interval_c(params, var);
1590		if (!snd_interval_single(i))
1591			return -EINVAL;
1592		if (dir)
1593			*dir = i->openmin;
1594		return snd_interval_value(i);
1595	}
1596	return -EINVAL;
1597}
1598EXPORT_SYMBOL(snd_pcm_hw_param_value);
1599
1600void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1601				snd_pcm_hw_param_t var)
1602{
1603	if (hw_is_mask(var)) {
1604		snd_mask_none(hw_param_mask(params, var));
1605		params->cmask |= 1 << var;
1606		params->rmask |= 1 << var;
1607	} else if (hw_is_interval(var)) {
1608		snd_interval_none(hw_param_interval(params, var));
1609		params->cmask |= 1 << var;
1610		params->rmask |= 1 << var;
1611	} else {
1612		snd_BUG();
1613	}
1614}
1615EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1616
1617static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1618				   snd_pcm_hw_param_t var)
1619{
1620	int changed;
1621	if (hw_is_mask(var))
1622		changed = snd_mask_refine_first(hw_param_mask(params, var));
1623	else if (hw_is_interval(var))
1624		changed = snd_interval_refine_first(hw_param_interval(params, var));
1625	else
1626		return -EINVAL;
1627	if (changed > 0) {
1628		params->cmask |= 1 << var;
1629		params->rmask |= 1 << var;
1630	}
1631	return changed;
1632}
1633
1634
1635/**
1636 * snd_pcm_hw_param_first - refine config space and return minimum value
1637 * @pcm: PCM instance
1638 * @params: the hw_params instance
1639 * @var: parameter to retrieve
1640 * @dir: pointer to the direction (-1,0,1) or %NULL
1641 *
1642 * Inside configuration space defined by @params remove from @var all
1643 * values > minimum. Reduce configuration space accordingly.
1644 *
1645 * Return: The minimum, or a negative error code on failure.
1646 */
1647int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 
1648			   struct snd_pcm_hw_params *params, 
1649			   snd_pcm_hw_param_t var, int *dir)
1650{
1651	int changed = _snd_pcm_hw_param_first(params, var);
1652	if (changed < 0)
1653		return changed;
1654	if (params->rmask) {
1655		int err = snd_pcm_hw_refine(pcm, params);
1656		if (err < 0)
1657			return err;
1658	}
1659	return snd_pcm_hw_param_value(params, var, dir);
1660}
1661EXPORT_SYMBOL(snd_pcm_hw_param_first);
1662
1663static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1664				  snd_pcm_hw_param_t var)
1665{
1666	int changed;
1667	if (hw_is_mask(var))
1668		changed = snd_mask_refine_last(hw_param_mask(params, var));
1669	else if (hw_is_interval(var))
1670		changed = snd_interval_refine_last(hw_param_interval(params, var));
1671	else
1672		return -EINVAL;
1673	if (changed > 0) {
1674		params->cmask |= 1 << var;
1675		params->rmask |= 1 << var;
1676	}
1677	return changed;
1678}
1679
1680
1681/**
1682 * snd_pcm_hw_param_last - refine config space and return maximum value
1683 * @pcm: PCM instance
1684 * @params: the hw_params instance
1685 * @var: parameter to retrieve
1686 * @dir: pointer to the direction (-1,0,1) or %NULL
1687 *
1688 * Inside configuration space defined by @params remove from @var all
1689 * values < maximum. Reduce configuration space accordingly.
1690 *
1691 * Return: The maximum, or a negative error code on failure.
1692 */
1693int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 
1694			  struct snd_pcm_hw_params *params,
1695			  snd_pcm_hw_param_t var, int *dir)
1696{
1697	int changed = _snd_pcm_hw_param_last(params, var);
1698	if (changed < 0)
1699		return changed;
1700	if (params->rmask) {
1701		int err = snd_pcm_hw_refine(pcm, params);
1702		if (err < 0)
1703			return err;
1704	}
1705	return snd_pcm_hw_param_value(params, var, dir);
1706}
1707EXPORT_SYMBOL(snd_pcm_hw_param_last);
1708
1709/**
1710 * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1711 * @p: hardware parameters
1712 *
1713 * Return: The number of bits per sample based on the format,
1714 * subformat and msbits the specified hw params has.
1715 */
1716int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1717{
1718	snd_pcm_subformat_t subformat = params_subformat(p);
1719	snd_pcm_format_t format = params_format(p);
1720
1721	switch (format) {
1722	case SNDRV_PCM_FORMAT_S32_LE:
1723	case SNDRV_PCM_FORMAT_U32_LE:
1724	case SNDRV_PCM_FORMAT_S32_BE:
1725	case SNDRV_PCM_FORMAT_U32_BE:
1726		switch (subformat) {
1727		case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1728			return 20;
1729		case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1730			return 24;
1731		case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1732		case SNDRV_PCM_SUBFORMAT_STD:
1733		default:
1734			break;
1735		}
1736		fallthrough;
1737	default:
1738		return snd_pcm_format_width(format);
1739	}
1740}
1741EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1742
1743static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1744				   void *arg)
1745{
1746	struct snd_pcm_runtime *runtime = substream->runtime;
1747	unsigned long flags;
1748	snd_pcm_stream_lock_irqsave(substream, flags);
1749	if (snd_pcm_running(substream) &&
1750	    snd_pcm_update_hw_ptr(substream) >= 0)
1751		runtime->status->hw_ptr %= runtime->buffer_size;
1752	else {
1753		runtime->status->hw_ptr = 0;
1754		runtime->hw_ptr_wrap = 0;
1755	}
1756	snd_pcm_stream_unlock_irqrestore(substream, flags);
1757	return 0;
1758}
1759
1760static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1761					  void *arg)
1762{
1763	struct snd_pcm_channel_info *info = arg;
1764	struct snd_pcm_runtime *runtime = substream->runtime;
1765	int width;
1766	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1767		info->offset = -1;
1768		return 0;
1769	}
1770	width = snd_pcm_format_physical_width(runtime->format);
1771	if (width < 0)
1772		return width;
1773	info->offset = 0;
1774	switch (runtime->access) {
1775	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1776	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1777		info->first = info->channel * width;
1778		info->step = runtime->channels * width;
1779		break;
1780	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1781	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1782	{
1783		size_t size = runtime->dma_bytes / runtime->channels;
1784		info->first = info->channel * size * 8;
1785		info->step = width;
1786		break;
1787	}
1788	default:
1789		snd_BUG();
1790		break;
1791	}
1792	return 0;
1793}
1794
1795static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1796				       void *arg)
1797{
1798	struct snd_pcm_hw_params *params = arg;
1799	snd_pcm_format_t format;
1800	int channels;
1801	ssize_t frame_size;
1802
1803	params->fifo_size = substream->runtime->hw.fifo_size;
1804	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1805		format = params_format(params);
1806		channels = params_channels(params);
1807		frame_size = snd_pcm_format_size(format, channels);
1808		if (frame_size > 0)
1809			params->fifo_size /= frame_size;
1810	}
1811	return 0;
1812}
1813
1814/**
1815 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1816 * @substream: the pcm substream instance
1817 * @cmd: ioctl command
1818 * @arg: ioctl argument
1819 *
1820 * Processes the generic ioctl commands for PCM.
1821 * Can be passed as the ioctl callback for PCM ops.
1822 *
1823 * Return: Zero if successful, or a negative error code on failure.
1824 */
1825int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1826		      unsigned int cmd, void *arg)
1827{
1828	switch (cmd) {
1829	case SNDRV_PCM_IOCTL1_RESET:
1830		return snd_pcm_lib_ioctl_reset(substream, arg);
1831	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1832		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1833	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1834		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1835	}
1836	return -ENXIO;
1837}
1838EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1839
1840/**
1841 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1842 *						under acquired lock of PCM substream.
1843 * @substream: the instance of pcm substream.
1844 *
1845 * This function is called when the batch of audio data frames as the same size as the period of
1846 * buffer is already processed in audio data transmission.
1847 *
1848 * The call of function updates the status of runtime with the latest position of audio data
1849 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1850 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1851 * substream according to configured threshold.
1852 *
1853 * The function is intended to use for the case that PCM driver operates audio data frames under
1854 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1855 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1856 * since lock of PCM substream should be acquired in advance.
1857 *
1858 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1859 * function:
1860 *
1861 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1862 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1863 * - .get_time_info - to retrieve audio time stamp if needed.
1864 *
1865 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1866 */
1867void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1868{
1869	struct snd_pcm_runtime *runtime;
1870
1871	if (PCM_RUNTIME_CHECK(substream))
1872		return;
1873	runtime = substream->runtime;
1874
1875	if (!snd_pcm_running(substream) ||
1876	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1877		goto _end;
1878
1879#ifdef CONFIG_SND_PCM_TIMER
1880	if (substream->timer_running)
1881		snd_timer_interrupt(substream->timer, 1);
1882#endif
1883 _end:
1884	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1885}
1886EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1887
1888/**
1889 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1890 *			      PCM substream.
1891 * @substream: the instance of PCM substream.
1892 *
1893 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1894 * acquiring lock of PCM substream voluntarily.
1895 *
1896 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1897 * the batch of audio data frames as the same size as the period of buffer is already processed in
1898 * audio data transmission.
1899 */
1900void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1901{
1902	unsigned long flags;
1903
1904	if (snd_BUG_ON(!substream))
1905		return;
1906
1907	snd_pcm_stream_lock_irqsave(substream, flags);
1908	snd_pcm_period_elapsed_under_stream_lock(substream);
1909	snd_pcm_stream_unlock_irqrestore(substream, flags);
1910}
1911EXPORT_SYMBOL(snd_pcm_period_elapsed);
1912
1913/*
1914 * Wait until avail_min data becomes available
1915 * Returns a negative error code if any error occurs during operation.
1916 * The available space is stored on availp.  When err = 0 and avail = 0
1917 * on the capture stream, it indicates the stream is in DRAINING state.
1918 */
1919static int wait_for_avail(struct snd_pcm_substream *substream,
1920			      snd_pcm_uframes_t *availp)
1921{
1922	struct snd_pcm_runtime *runtime = substream->runtime;
1923	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1924	wait_queue_entry_t wait;
1925	int err = 0;
1926	snd_pcm_uframes_t avail = 0;
1927	long wait_time, tout;
1928
1929	init_waitqueue_entry(&wait, current);
1930	set_current_state(TASK_INTERRUPTIBLE);
1931	add_wait_queue(&runtime->tsleep, &wait);
1932
1933	if (runtime->no_period_wakeup)
1934		wait_time = MAX_SCHEDULE_TIMEOUT;
1935	else {
1936		/* use wait time from substream if available */
1937		if (substream->wait_time) {
1938			wait_time = substream->wait_time;
1939		} else {
1940			wait_time = 100;
1941
1942			if (runtime->rate) {
1943				long t = runtime->buffer_size * 1100 / runtime->rate;
 
1944				wait_time = max(t, wait_time);
1945			}
 
1946		}
1947		wait_time = msecs_to_jiffies(wait_time);
1948	}
1949
1950	for (;;) {
1951		if (signal_pending(current)) {
1952			err = -ERESTARTSYS;
1953			break;
1954		}
1955
1956		/*
1957		 * We need to check if space became available already
1958		 * (and thus the wakeup happened already) first to close
1959		 * the race of space already having become available.
1960		 * This check must happen after been added to the waitqueue
1961		 * and having current state be INTERRUPTIBLE.
1962		 */
1963		avail = snd_pcm_avail(substream);
1964		if (avail >= runtime->twake)
1965			break;
1966		snd_pcm_stream_unlock_irq(substream);
1967
1968		tout = schedule_timeout(wait_time);
1969
1970		snd_pcm_stream_lock_irq(substream);
1971		set_current_state(TASK_INTERRUPTIBLE);
1972		switch (runtime->state) {
1973		case SNDRV_PCM_STATE_SUSPENDED:
1974			err = -ESTRPIPE;
1975			goto _endloop;
1976		case SNDRV_PCM_STATE_XRUN:
1977			err = -EPIPE;
1978			goto _endloop;
1979		case SNDRV_PCM_STATE_DRAINING:
1980			if (is_playback)
1981				err = -EPIPE;
1982			else 
1983				avail = 0; /* indicate draining */
1984			goto _endloop;
1985		case SNDRV_PCM_STATE_OPEN:
1986		case SNDRV_PCM_STATE_SETUP:
1987		case SNDRV_PCM_STATE_DISCONNECTED:
1988			err = -EBADFD;
1989			goto _endloop;
1990		case SNDRV_PCM_STATE_PAUSED:
1991			continue;
1992		}
1993		if (!tout) {
1994			pcm_dbg(substream->pcm,
1995				"%s timeout (DMA or IRQ trouble?)\n",
1996				is_playback ? "playback write" : "capture read");
1997			err = -EIO;
1998			break;
1999		}
2000	}
2001 _endloop:
2002	set_current_state(TASK_RUNNING);
2003	remove_wait_queue(&runtime->tsleep, &wait);
2004	*availp = avail;
2005	return err;
2006}
2007	
2008typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2009			      int channel, unsigned long hwoff,
2010			      struct iov_iter *iter, unsigned long bytes);
2011
2012typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2013			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2014			  bool);
2015
2016/* calculate the target DMA-buffer position to be written/read */
2017static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2018			   int channel, unsigned long hwoff)
2019{
2020	return runtime->dma_area + hwoff +
2021		channel * (runtime->dma_bytes / runtime->channels);
2022}
2023
2024/* default copy ops for write; used for both interleaved and non- modes */
2025static int default_write_copy(struct snd_pcm_substream *substream,
2026			      int channel, unsigned long hwoff,
2027			      struct iov_iter *iter, unsigned long bytes)
2028{
2029	if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2030			   bytes, iter) != bytes)
2031		return -EFAULT;
2032	return 0;
2033}
2034
 
 
 
 
 
 
 
 
 
2035/* fill silence instead of copy data; called as a transfer helper
2036 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2037 * a NULL buffer is passed
2038 */
2039static int fill_silence(struct snd_pcm_substream *substream, int channel,
2040			unsigned long hwoff, struct iov_iter *iter,
2041			unsigned long bytes)
2042{
2043	struct snd_pcm_runtime *runtime = substream->runtime;
2044
2045	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2046		return 0;
2047	if (substream->ops->fill_silence)
2048		return substream->ops->fill_silence(substream, channel,
2049						    hwoff, bytes);
2050
2051	snd_pcm_format_set_silence(runtime->format,
2052				   get_dma_ptr(runtime, channel, hwoff),
2053				   bytes_to_samples(runtime, bytes));
2054	return 0;
2055}
2056
2057/* default copy ops for read; used for both interleaved and non- modes */
2058static int default_read_copy(struct snd_pcm_substream *substream,
2059			     int channel, unsigned long hwoff,
2060			     struct iov_iter *iter, unsigned long bytes)
2061{
2062	if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2063			 bytes, iter) != bytes)
 
2064		return -EFAULT;
2065	return 0;
2066}
2067
2068/* call transfer with the filled iov_iter */
2069static int do_transfer(struct snd_pcm_substream *substream, int c,
2070		       unsigned long hwoff, void *data, unsigned long bytes,
2071		       pcm_transfer_f transfer, bool in_kernel)
2072{
2073	struct iov_iter iter;
2074	int err, type;
2075
2076	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2077		type = ITER_SOURCE;
2078	else
2079		type = ITER_DEST;
2080
2081	if (in_kernel) {
2082		struct kvec kvec = { data, bytes };
2083
2084		iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2085		return transfer(substream, c, hwoff, &iter, bytes);
2086	}
2087
2088	err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2089	if (err)
2090		return err;
2091	return transfer(substream, c, hwoff, &iter, bytes);
2092}
2093
2094/* call transfer function with the converted pointers and sizes;
2095 * for interleaved mode, it's one shot for all samples
2096 */
2097static int interleaved_copy(struct snd_pcm_substream *substream,
2098			    snd_pcm_uframes_t hwoff, void *data,
2099			    snd_pcm_uframes_t off,
2100			    snd_pcm_uframes_t frames,
2101			    pcm_transfer_f transfer,
2102			    bool in_kernel)
2103{
2104	struct snd_pcm_runtime *runtime = substream->runtime;
2105
2106	/* convert to bytes */
2107	hwoff = frames_to_bytes(runtime, hwoff);
2108	off = frames_to_bytes(runtime, off);
2109	frames = frames_to_bytes(runtime, frames);
2110
2111	return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2112			   in_kernel);
2113}
2114
2115/* call transfer function with the converted pointers and sizes for each
2116 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2117 */
2118static int noninterleaved_copy(struct snd_pcm_substream *substream,
2119			       snd_pcm_uframes_t hwoff, void *data,
2120			       snd_pcm_uframes_t off,
2121			       snd_pcm_uframes_t frames,
2122			       pcm_transfer_f transfer,
2123			       bool in_kernel)
2124{
2125	struct snd_pcm_runtime *runtime = substream->runtime;
2126	int channels = runtime->channels;
2127	void **bufs = data;
2128	int c, err;
2129
2130	/* convert to bytes; note that it's not frames_to_bytes() here.
2131	 * in non-interleaved mode, we copy for each channel, thus
2132	 * each copy is n_samples bytes x channels = whole frames.
2133	 */
2134	off = samples_to_bytes(runtime, off);
2135	frames = samples_to_bytes(runtime, frames);
2136	hwoff = samples_to_bytes(runtime, hwoff);
2137	for (c = 0; c < channels; ++c, ++bufs) {
2138		if (!data || !*bufs)
2139			err = fill_silence(substream, c, hwoff, NULL, frames);
2140		else
2141			err = do_transfer(substream, c, hwoff, *bufs + off,
2142					  frames, transfer, in_kernel);
2143		if (err < 0)
2144			return err;
2145	}
2146	return 0;
2147}
2148
2149/* fill silence on the given buffer position;
2150 * called from snd_pcm_playback_silence()
2151 */
2152static int fill_silence_frames(struct snd_pcm_substream *substream,
2153			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2154{
2155	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2156	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2157		return interleaved_copy(substream, off, NULL, 0, frames,
2158					fill_silence, true);
2159	else
2160		return noninterleaved_copy(substream, off, NULL, 0, frames,
2161					   fill_silence, true);
2162}
2163
2164/* sanity-check for read/write methods */
2165static int pcm_sanity_check(struct snd_pcm_substream *substream)
2166{
2167	struct snd_pcm_runtime *runtime;
2168	if (PCM_RUNTIME_CHECK(substream))
2169		return -ENXIO;
2170	runtime = substream->runtime;
2171	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2172		return -EINVAL;
2173	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2174		return -EBADFD;
2175	return 0;
2176}
2177
2178static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2179{
2180	switch (runtime->state) {
2181	case SNDRV_PCM_STATE_PREPARED:
2182	case SNDRV_PCM_STATE_RUNNING:
2183	case SNDRV_PCM_STATE_PAUSED:
2184		return 0;
2185	case SNDRV_PCM_STATE_XRUN:
2186		return -EPIPE;
2187	case SNDRV_PCM_STATE_SUSPENDED:
2188		return -ESTRPIPE;
2189	default:
2190		return -EBADFD;
2191	}
2192}
2193
2194/* update to the given appl_ptr and call ack callback if needed;
2195 * when an error is returned, take back to the original value
2196 */
2197int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2198			   snd_pcm_uframes_t appl_ptr)
2199{
2200	struct snd_pcm_runtime *runtime = substream->runtime;
2201	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2202	snd_pcm_sframes_t diff;
2203	int ret;
2204
2205	if (old_appl_ptr == appl_ptr)
2206		return 0;
2207
2208	if (appl_ptr >= runtime->boundary)
2209		return -EINVAL;
2210	/*
2211	 * check if a rewind is requested by the application
2212	 */
2213	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2214		diff = appl_ptr - old_appl_ptr;
2215		if (diff >= 0) {
2216			if (diff > runtime->buffer_size)
2217				return -EINVAL;
2218		} else {
2219			if (runtime->boundary + diff > runtime->buffer_size)
2220				return -EINVAL;
2221		}
2222	}
2223
2224	runtime->control->appl_ptr = appl_ptr;
2225	if (substream->ops->ack) {
2226		ret = substream->ops->ack(substream);
2227		if (ret < 0) {
2228			runtime->control->appl_ptr = old_appl_ptr;
2229			if (ret == -EPIPE)
2230				__snd_pcm_xrun(substream);
2231			return ret;
2232		}
2233	}
2234
2235	trace_applptr(substream, old_appl_ptr, appl_ptr);
2236
2237	return 0;
2238}
2239
2240/* the common loop for read/write data */
2241snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2242				     void *data, bool interleaved,
2243				     snd_pcm_uframes_t size, bool in_kernel)
2244{
2245	struct snd_pcm_runtime *runtime = substream->runtime;
2246	snd_pcm_uframes_t xfer = 0;
2247	snd_pcm_uframes_t offset = 0;
2248	snd_pcm_uframes_t avail;
2249	pcm_copy_f writer;
2250	pcm_transfer_f transfer;
2251	bool nonblock;
2252	bool is_playback;
2253	int err;
2254
2255	err = pcm_sanity_check(substream);
2256	if (err < 0)
2257		return err;
2258
2259	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2260	if (interleaved) {
2261		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2262		    runtime->channels > 1)
2263			return -EINVAL;
2264		writer = interleaved_copy;
2265	} else {
2266		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2267			return -EINVAL;
2268		writer = noninterleaved_copy;
2269	}
2270
2271	if (!data) {
2272		if (is_playback)
2273			transfer = fill_silence;
2274		else
2275			return -EINVAL;
 
 
 
 
 
 
2276	} else {
2277		if (substream->ops->copy)
2278			transfer = substream->ops->copy;
2279		else
2280			transfer = is_playback ?
2281				default_write_copy : default_read_copy;
2282	}
2283
2284	if (size == 0)
2285		return 0;
2286
2287	nonblock = !!(substream->f_flags & O_NONBLOCK);
2288
2289	snd_pcm_stream_lock_irq(substream);
2290	err = pcm_accessible_state(runtime);
2291	if (err < 0)
2292		goto _end_unlock;
2293
2294	runtime->twake = runtime->control->avail_min ? : 1;
2295	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2296		snd_pcm_update_hw_ptr(substream);
2297
2298	/*
2299	 * If size < start_threshold, wait indefinitely. Another
2300	 * thread may start capture
2301	 */
2302	if (!is_playback &&
2303	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2304	    size >= runtime->start_threshold) {
2305		err = snd_pcm_start(substream);
2306		if (err < 0)
2307			goto _end_unlock;
2308	}
2309
2310	avail = snd_pcm_avail(substream);
2311
2312	while (size > 0) {
2313		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2314		snd_pcm_uframes_t cont;
2315		if (!avail) {
2316			if (!is_playback &&
2317			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2318				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2319				goto _end_unlock;
2320			}
2321			if (nonblock) {
2322				err = -EAGAIN;
2323				goto _end_unlock;
2324			}
2325			runtime->twake = min_t(snd_pcm_uframes_t, size,
2326					runtime->control->avail_min ? : 1);
2327			err = wait_for_avail(substream, &avail);
2328			if (err < 0)
2329				goto _end_unlock;
2330			if (!avail)
2331				continue; /* draining */
2332		}
2333		frames = size > avail ? avail : size;
2334		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2335		appl_ofs = appl_ptr % runtime->buffer_size;
2336		cont = runtime->buffer_size - appl_ofs;
2337		if (frames > cont)
2338			frames = cont;
2339		if (snd_BUG_ON(!frames)) {
2340			err = -EINVAL;
2341			goto _end_unlock;
2342		}
2343		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2344			err = -EBUSY;
2345			goto _end_unlock;
2346		}
2347		snd_pcm_stream_unlock_irq(substream);
2348		if (!is_playback)
2349			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2350		err = writer(substream, appl_ofs, data, offset, frames,
2351			     transfer, in_kernel);
2352		if (is_playback)
2353			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2354		snd_pcm_stream_lock_irq(substream);
2355		atomic_dec(&runtime->buffer_accessing);
2356		if (err < 0)
2357			goto _end_unlock;
2358		err = pcm_accessible_state(runtime);
2359		if (err < 0)
2360			goto _end_unlock;
2361		appl_ptr += frames;
2362		if (appl_ptr >= runtime->boundary)
2363			appl_ptr -= runtime->boundary;
2364		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2365		if (err < 0)
2366			goto _end_unlock;
2367
2368		offset += frames;
2369		size -= frames;
2370		xfer += frames;
2371		avail -= frames;
2372		if (is_playback &&
2373		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2374		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2375			err = snd_pcm_start(substream);
2376			if (err < 0)
2377				goto _end_unlock;
2378		}
2379	}
2380 _end_unlock:
2381	runtime->twake = 0;
2382	if (xfer > 0 && err >= 0)
2383		snd_pcm_update_state(substream, runtime);
2384	snd_pcm_stream_unlock_irq(substream);
2385	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2386}
2387EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2388
2389/*
2390 * standard channel mapping helpers
2391 */
2392
2393/* default channel maps for multi-channel playbacks, up to 8 channels */
2394const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2395	{ .channels = 1,
2396	  .map = { SNDRV_CHMAP_MONO } },
2397	{ .channels = 2,
2398	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2399	{ .channels = 4,
2400	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2401		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2402	{ .channels = 6,
2403	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2404		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2405		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2406	{ .channels = 8,
2407	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2408		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2409		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2410		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2411	{ }
2412};
2413EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2414
2415/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2416const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2417	{ .channels = 1,
2418	  .map = { SNDRV_CHMAP_MONO } },
2419	{ .channels = 2,
2420	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2421	{ .channels = 4,
2422	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2423		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2424	{ .channels = 6,
2425	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2426		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2427		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2428	{ .channels = 8,
2429	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2430		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2431		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2432		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2433	{ }
2434};
2435EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2436
2437static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2438{
2439	if (ch > info->max_channels)
2440		return false;
2441	return !info->channel_mask || (info->channel_mask & (1U << ch));
2442}
2443
2444static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2445			      struct snd_ctl_elem_info *uinfo)
2446{
2447	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2448
2449	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2450	uinfo->count = info->max_channels;
2451	uinfo->value.integer.min = 0;
2452	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2453	return 0;
2454}
2455
2456/* get callback for channel map ctl element
2457 * stores the channel position firstly matching with the current channels
2458 */
2459static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2460			     struct snd_ctl_elem_value *ucontrol)
2461{
2462	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2463	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2464	struct snd_pcm_substream *substream;
2465	const struct snd_pcm_chmap_elem *map;
2466
2467	if (!info->chmap)
2468		return -EINVAL;
2469	substream = snd_pcm_chmap_substream(info, idx);
2470	if (!substream)
2471		return -ENODEV;
2472	memset(ucontrol->value.integer.value, 0,
2473	       sizeof(long) * info->max_channels);
2474	if (!substream->runtime)
2475		return 0; /* no channels set */
2476	for (map = info->chmap; map->channels; map++) {
2477		int i;
2478		if (map->channels == substream->runtime->channels &&
2479		    valid_chmap_channels(info, map->channels)) {
2480			for (i = 0; i < map->channels; i++)
2481				ucontrol->value.integer.value[i] = map->map[i];
2482			return 0;
2483		}
2484	}
2485	return -EINVAL;
2486}
2487
2488/* tlv callback for channel map ctl element
2489 * expands the pre-defined channel maps in a form of TLV
2490 */
2491static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2492			     unsigned int size, unsigned int __user *tlv)
2493{
2494	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2495	const struct snd_pcm_chmap_elem *map;
2496	unsigned int __user *dst;
2497	int c, count = 0;
2498
2499	if (!info->chmap)
2500		return -EINVAL;
2501	if (size < 8)
2502		return -ENOMEM;
2503	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2504		return -EFAULT;
2505	size -= 8;
2506	dst = tlv + 2;
2507	for (map = info->chmap; map->channels; map++) {
2508		int chs_bytes = map->channels * 4;
2509		if (!valid_chmap_channels(info, map->channels))
2510			continue;
2511		if (size < 8)
2512			return -ENOMEM;
2513		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2514		    put_user(chs_bytes, dst + 1))
2515			return -EFAULT;
2516		dst += 2;
2517		size -= 8;
2518		count += 8;
2519		if (size < chs_bytes)
2520			return -ENOMEM;
2521		size -= chs_bytes;
2522		count += chs_bytes;
2523		for (c = 0; c < map->channels; c++) {
2524			if (put_user(map->map[c], dst))
2525				return -EFAULT;
2526			dst++;
2527		}
2528	}
2529	if (put_user(count, tlv + 1))
2530		return -EFAULT;
2531	return 0;
2532}
2533
2534static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2535{
2536	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2537	info->pcm->streams[info->stream].chmap_kctl = NULL;
2538	kfree(info);
2539}
2540
2541/**
2542 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2543 * @pcm: the assigned PCM instance
2544 * @stream: stream direction
2545 * @chmap: channel map elements (for query)
2546 * @max_channels: the max number of channels for the stream
2547 * @private_value: the value passed to each kcontrol's private_value field
2548 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2549 *
2550 * Create channel-mapping control elements assigned to the given PCM stream(s).
2551 * Return: Zero if successful, or a negative error value.
2552 */
2553int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2554			   const struct snd_pcm_chmap_elem *chmap,
2555			   int max_channels,
2556			   unsigned long private_value,
2557			   struct snd_pcm_chmap **info_ret)
2558{
2559	struct snd_pcm_chmap *info;
2560	struct snd_kcontrol_new knew = {
2561		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2562		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2563			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2564			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2565		.info = pcm_chmap_ctl_info,
2566		.get = pcm_chmap_ctl_get,
2567		.tlv.c = pcm_chmap_ctl_tlv,
2568	};
2569	int err;
2570
2571	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2572		return -EBUSY;
2573	info = kzalloc(sizeof(*info), GFP_KERNEL);
2574	if (!info)
2575		return -ENOMEM;
2576	info->pcm = pcm;
2577	info->stream = stream;
2578	info->chmap = chmap;
2579	info->max_channels = max_channels;
2580	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2581		knew.name = "Playback Channel Map";
2582	else
2583		knew.name = "Capture Channel Map";
2584	knew.device = pcm->device;
2585	knew.count = pcm->streams[stream].substream_count;
2586	knew.private_value = private_value;
2587	info->kctl = snd_ctl_new1(&knew, info);
2588	if (!info->kctl) {
2589		kfree(info);
2590		return -ENOMEM;
2591	}
2592	info->kctl->private_free = pcm_chmap_ctl_private_free;
2593	err = snd_ctl_add(pcm->card, info->kctl);
2594	if (err < 0)
2595		return err;
2596	pcm->streams[stream].chmap_kctl = info->kctl;
2597	if (info_ret)
2598		*info_ret = info;
2599	return 0;
2600}
2601EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Digital Audio (PCM) abstract layer
   4 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   5 *                   Abramo Bagnara <abramo@alsa-project.org>
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/sched/signal.h>
  10#include <linux/time.h>
  11#include <linux/math64.h>
  12#include <linux/export.h>
  13#include <sound/core.h>
  14#include <sound/control.h>
  15#include <sound/tlv.h>
  16#include <sound/info.h>
  17#include <sound/pcm.h>
  18#include <sound/pcm_params.h>
  19#include <sound/timer.h>
  20
  21#include "pcm_local.h"
  22
  23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
  24#define CREATE_TRACE_POINTS
  25#include "pcm_trace.h"
  26#else
  27#define trace_hwptr(substream, pos, in_interrupt)
  28#define trace_xrun(substream)
  29#define trace_hw_ptr_error(substream, reason)
  30#define trace_applptr(substream, prev, curr)
  31#endif
  32
  33static int fill_silence_frames(struct snd_pcm_substream *substream,
  34			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
  35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36/*
  37 * fill ring buffer with silence
  38 * runtime->silence_start: starting pointer to silence area
  39 * runtime->silence_filled: size filled with silence
  40 * runtime->silence_threshold: threshold from application
  41 * runtime->silence_size: maximal size from application
  42 *
  43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
  44 */
  45void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
  46{
  47	struct snd_pcm_runtime *runtime = substream->runtime;
  48	snd_pcm_uframes_t frames, ofs, transfer;
  49	int err;
  50
  51	if (runtime->silence_size < runtime->boundary) {
  52		snd_pcm_sframes_t noise_dist, n;
  53		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
  54		if (runtime->silence_start != appl_ptr) {
  55			n = appl_ptr - runtime->silence_start;
  56			if (n < 0)
  57				n += runtime->boundary;
  58			if ((snd_pcm_uframes_t)n < runtime->silence_filled)
  59				runtime->silence_filled -= n;
  60			else
  61				runtime->silence_filled = 0;
  62			runtime->silence_start = appl_ptr;
  63		}
  64		if (runtime->silence_filled >= runtime->buffer_size)
  65			return;
  66		noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
  67		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
  68			return;
  69		frames = runtime->silence_threshold - noise_dist;
  70		if (frames > runtime->silence_size)
  71			frames = runtime->silence_size;
  72	} else {
  73		if (new_hw_ptr == ULONG_MAX) {	/* initialization */
  74			snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
  75			if (avail > runtime->buffer_size)
  76				avail = runtime->buffer_size;
  77			runtime->silence_filled = avail > 0 ? avail : 0;
  78			runtime->silence_start = (runtime->status->hw_ptr +
  79						  runtime->silence_filled) %
  80						 runtime->boundary;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81		} else {
  82			ofs = runtime->status->hw_ptr;
  83			frames = new_hw_ptr - ofs;
  84			if ((snd_pcm_sframes_t)frames < 0)
  85				frames += runtime->boundary;
  86			runtime->silence_filled -= frames;
  87			if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
  88				runtime->silence_filled = 0;
  89				runtime->silence_start = new_hw_ptr;
  90			} else {
  91				runtime->silence_start = ofs;
  92			}
  93		}
 
 
 
 
  94		frames = runtime->buffer_size - runtime->silence_filled;
  95	}
  96	if (snd_BUG_ON(frames > runtime->buffer_size))
  97		return;
  98	if (frames == 0)
  99		return;
 100	ofs = runtime->silence_start % runtime->buffer_size;
 101	while (frames > 0) {
 102		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
 103		err = fill_silence_frames(substream, ofs, transfer);
 104		snd_BUG_ON(err < 0);
 105		runtime->silence_filled += transfer;
 106		frames -= transfer;
 107		ofs = 0;
 108	}
 
 109}
 110
 111#ifdef CONFIG_SND_DEBUG
 112void snd_pcm_debug_name(struct snd_pcm_substream *substream,
 113			   char *name, size_t len)
 114{
 115	snprintf(name, len, "pcmC%dD%d%c:%d",
 116		 substream->pcm->card->number,
 117		 substream->pcm->device,
 118		 substream->stream ? 'c' : 'p',
 119		 substream->number);
 120}
 121EXPORT_SYMBOL(snd_pcm_debug_name);
 122#endif
 123
 124#define XRUN_DEBUG_BASIC	(1<<0)
 125#define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
 126#define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
 127
 128#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 129
 130#define xrun_debug(substream, mask) \
 131			((substream)->pstr->xrun_debug & (mask))
 132#else
 133#define xrun_debug(substream, mask)	0
 134#endif
 135
 136#define dump_stack_on_xrun(substream) do {			\
 137		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
 138			dump_stack();				\
 139	} while (0)
 140
 141/* call with stream lock held */
 142void __snd_pcm_xrun(struct snd_pcm_substream *substream)
 143{
 144	struct snd_pcm_runtime *runtime = substream->runtime;
 145
 146	trace_xrun(substream);
 147	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 148		struct timespec64 tstamp;
 149
 150		snd_pcm_gettime(runtime, &tstamp);
 151		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
 152		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
 153	}
 154	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
 155	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
 156		char name[16];
 157		snd_pcm_debug_name(substream, name, sizeof(name));
 158		pcm_warn(substream->pcm, "XRUN: %s\n", name);
 159		dump_stack_on_xrun(substream);
 160	}
 161}
 162
 163#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 164#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
 165	do {								\
 166		trace_hw_ptr_error(substream, reason);	\
 167		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
 168			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
 169					   (in_interrupt) ? 'Q' : 'P', ##args);	\
 170			dump_stack_on_xrun(substream);			\
 171		}							\
 172	} while (0)
 173
 174#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
 175
 176#define hw_ptr_error(substream, fmt, args...) do { } while (0)
 177
 178#endif
 179
 180int snd_pcm_update_state(struct snd_pcm_substream *substream,
 181			 struct snd_pcm_runtime *runtime)
 182{
 183	snd_pcm_uframes_t avail;
 184
 185	avail = snd_pcm_avail(substream);
 186	if (avail > runtime->avail_max)
 187		runtime->avail_max = avail;
 188	if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
 189		if (avail >= runtime->buffer_size) {
 190			snd_pcm_drain_done(substream);
 191			return -EPIPE;
 192		}
 193	} else {
 194		if (avail >= runtime->stop_threshold) {
 195			__snd_pcm_xrun(substream);
 196			return -EPIPE;
 197		}
 198	}
 199	if (runtime->twake) {
 200		if (avail >= runtime->twake)
 201			wake_up(&runtime->tsleep);
 202	} else if (avail >= runtime->control->avail_min)
 203		wake_up(&runtime->sleep);
 204	return 0;
 205}
 206
 207static void update_audio_tstamp(struct snd_pcm_substream *substream,
 208				struct timespec64 *curr_tstamp,
 209				struct timespec64 *audio_tstamp)
 210{
 211	struct snd_pcm_runtime *runtime = substream->runtime;
 212	u64 audio_frames, audio_nsecs;
 213	struct timespec64 driver_tstamp;
 214
 215	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
 216		return;
 217
 218	if (!(substream->ops->get_time_info) ||
 219		(runtime->audio_tstamp_report.actual_type ==
 220			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 221
 222		/*
 223		 * provide audio timestamp derived from pointer position
 224		 * add delay only if requested
 225		 */
 226
 227		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
 228
 229		if (runtime->audio_tstamp_config.report_delay) {
 230			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 231				audio_frames -=  runtime->delay;
 232			else
 233				audio_frames +=  runtime->delay;
 234		}
 235		audio_nsecs = div_u64(audio_frames * 1000000000LL,
 236				runtime->rate);
 237		*audio_tstamp = ns_to_timespec64(audio_nsecs);
 238	}
 239
 240	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
 241	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
 242		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
 243		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
 244		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
 245		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
 246	}
 247
 248
 249	/*
 250	 * re-take a driver timestamp to let apps detect if the reference tstamp
 251	 * read by low-level hardware was provided with a delay
 252	 */
 253	snd_pcm_gettime(substream->runtime, &driver_tstamp);
 254	runtime->driver_tstamp = driver_tstamp;
 255}
 256
 257static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
 258				  unsigned int in_interrupt)
 259{
 260	struct snd_pcm_runtime *runtime = substream->runtime;
 261	snd_pcm_uframes_t pos;
 262	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
 263	snd_pcm_sframes_t hdelta, delta;
 264	unsigned long jdelta;
 265	unsigned long curr_jiffies;
 266	struct timespec64 curr_tstamp;
 267	struct timespec64 audio_tstamp;
 268	int crossed_boundary = 0;
 269
 270	old_hw_ptr = runtime->status->hw_ptr;
 271
 272	/*
 273	 * group pointer, time and jiffies reads to allow for more
 274	 * accurate correlations/corrections.
 275	 * The values are stored at the end of this routine after
 276	 * corrections for hw_ptr position
 277	 */
 278	pos = substream->ops->pointer(substream);
 279	curr_jiffies = jiffies;
 280	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 281		if ((substream->ops->get_time_info) &&
 282			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 283			substream->ops->get_time_info(substream, &curr_tstamp,
 284						&audio_tstamp,
 285						&runtime->audio_tstamp_config,
 286						&runtime->audio_tstamp_report);
 287
 288			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
 289			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
 290				snd_pcm_gettime(runtime, &curr_tstamp);
 291		} else
 292			snd_pcm_gettime(runtime, &curr_tstamp);
 293	}
 294
 295	if (pos == SNDRV_PCM_POS_XRUN) {
 296		__snd_pcm_xrun(substream);
 297		return -EPIPE;
 298	}
 299	if (pos >= runtime->buffer_size) {
 300		if (printk_ratelimit()) {
 301			char name[16];
 302			snd_pcm_debug_name(substream, name, sizeof(name));
 303			pcm_err(substream->pcm,
 304				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
 305				name, pos, runtime->buffer_size,
 306				runtime->period_size);
 307		}
 308		pos = 0;
 309	}
 310	pos -= pos % runtime->min_align;
 311	trace_hwptr(substream, pos, in_interrupt);
 312	hw_base = runtime->hw_ptr_base;
 313	new_hw_ptr = hw_base + pos;
 314	if (in_interrupt) {
 315		/* we know that one period was processed */
 316		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
 317		delta = runtime->hw_ptr_interrupt + runtime->period_size;
 318		if (delta > new_hw_ptr) {
 319			/* check for double acknowledged interrupts */
 320			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 321			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
 322				hw_base += runtime->buffer_size;
 323				if (hw_base >= runtime->boundary) {
 324					hw_base = 0;
 325					crossed_boundary++;
 326				}
 327				new_hw_ptr = hw_base + pos;
 328				goto __delta;
 329			}
 330		}
 331	}
 332	/* new_hw_ptr might be lower than old_hw_ptr in case when */
 333	/* pointer crosses the end of the ring buffer */
 334	if (new_hw_ptr < old_hw_ptr) {
 335		hw_base += runtime->buffer_size;
 336		if (hw_base >= runtime->boundary) {
 337			hw_base = 0;
 338			crossed_boundary++;
 339		}
 340		new_hw_ptr = hw_base + pos;
 341	}
 342      __delta:
 343	delta = new_hw_ptr - old_hw_ptr;
 344	if (delta < 0)
 345		delta += runtime->boundary;
 346
 347	if (runtime->no_period_wakeup) {
 348		snd_pcm_sframes_t xrun_threshold;
 349		/*
 350		 * Without regular period interrupts, we have to check
 351		 * the elapsed time to detect xruns.
 352		 */
 353		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 354		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
 355			goto no_delta_check;
 356		hdelta = jdelta - delta * HZ / runtime->rate;
 357		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
 358		while (hdelta > xrun_threshold) {
 359			delta += runtime->buffer_size;
 360			hw_base += runtime->buffer_size;
 361			if (hw_base >= runtime->boundary) {
 362				hw_base = 0;
 363				crossed_boundary++;
 364			}
 365			new_hw_ptr = hw_base + pos;
 366			hdelta -= runtime->hw_ptr_buffer_jiffies;
 367		}
 368		goto no_delta_check;
 369	}
 370
 371	/* something must be really wrong */
 372	if (delta >= runtime->buffer_size + runtime->period_size) {
 373		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
 374			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 375			     substream->stream, (long)pos,
 376			     (long)new_hw_ptr, (long)old_hw_ptr);
 377		return 0;
 378	}
 379
 380	/* Do jiffies check only in xrun_debug mode */
 381	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
 382		goto no_jiffies_check;
 383
 384	/* Skip the jiffies check for hardwares with BATCH flag.
 385	 * Such hardware usually just increases the position at each IRQ,
 386	 * thus it can't give any strange position.
 387	 */
 388	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
 389		goto no_jiffies_check;
 390	hdelta = delta;
 391	if (hdelta < runtime->delay)
 392		goto no_jiffies_check;
 393	hdelta -= runtime->delay;
 394	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 395	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 396		delta = jdelta /
 397			(((runtime->period_size * HZ) / runtime->rate)
 398								+ HZ/100);
 399		/* move new_hw_ptr according jiffies not pos variable */
 400		new_hw_ptr = old_hw_ptr;
 401		hw_base = delta;
 402		/* use loop to avoid checks for delta overflows */
 403		/* the delta value is small or zero in most cases */
 404		while (delta > 0) {
 405			new_hw_ptr += runtime->period_size;
 406			if (new_hw_ptr >= runtime->boundary) {
 407				new_hw_ptr -= runtime->boundary;
 408				crossed_boundary--;
 409			}
 410			delta--;
 411		}
 412		/* align hw_base to buffer_size */
 413		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
 414			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
 415			     (long)pos, (long)hdelta,
 416			     (long)runtime->period_size, jdelta,
 417			     ((hdelta * HZ) / runtime->rate), hw_base,
 418			     (unsigned long)old_hw_ptr,
 419			     (unsigned long)new_hw_ptr);
 420		/* reset values to proper state */
 421		delta = 0;
 422		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
 423	}
 424 no_jiffies_check:
 425	if (delta > runtime->period_size + runtime->period_size / 2) {
 426		hw_ptr_error(substream, in_interrupt,
 427			     "Lost interrupts?",
 428			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 429			     substream->stream, (long)delta,
 430			     (long)new_hw_ptr,
 431			     (long)old_hw_ptr);
 432	}
 433
 434 no_delta_check:
 435	if (runtime->status->hw_ptr == new_hw_ptr) {
 436		runtime->hw_ptr_jiffies = curr_jiffies;
 437		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 438		return 0;
 439	}
 440
 441	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
 442	    runtime->silence_size > 0)
 443		snd_pcm_playback_silence(substream, new_hw_ptr);
 444
 445	if (in_interrupt) {
 446		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
 447		if (delta < 0)
 448			delta += runtime->boundary;
 449		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
 450		runtime->hw_ptr_interrupt += delta;
 451		if (runtime->hw_ptr_interrupt >= runtime->boundary)
 452			runtime->hw_ptr_interrupt -= runtime->boundary;
 453	}
 454	runtime->hw_ptr_base = hw_base;
 455	runtime->status->hw_ptr = new_hw_ptr;
 456	runtime->hw_ptr_jiffies = curr_jiffies;
 457	if (crossed_boundary) {
 458		snd_BUG_ON(crossed_boundary != 1);
 459		runtime->hw_ptr_wrap += runtime->boundary;
 460	}
 461
 462	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 463
 464	return snd_pcm_update_state(substream, runtime);
 465}
 466
 467/* CAUTION: call it with irq disabled */
 468int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
 469{
 470	return snd_pcm_update_hw_ptr0(substream, 0);
 471}
 472
 473/**
 474 * snd_pcm_set_ops - set the PCM operators
 475 * @pcm: the pcm instance
 476 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
 477 * @ops: the operator table
 478 *
 479 * Sets the given PCM operators to the pcm instance.
 480 */
 481void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
 482		     const struct snd_pcm_ops *ops)
 483{
 484	struct snd_pcm_str *stream = &pcm->streams[direction];
 485	struct snd_pcm_substream *substream;
 486	
 487	for (substream = stream->substream; substream != NULL; substream = substream->next)
 488		substream->ops = ops;
 489}
 490EXPORT_SYMBOL(snd_pcm_set_ops);
 491
 492/**
 493 * snd_pcm_set_sync - set the PCM sync id
 494 * @substream: the pcm substream
 495 *
 496 * Sets the PCM sync identifier for the card.
 497 */
 498void snd_pcm_set_sync(struct snd_pcm_substream *substream)
 499{
 500	struct snd_pcm_runtime *runtime = substream->runtime;
 501	
 502	runtime->sync.id32[0] = substream->pcm->card->number;
 503	runtime->sync.id32[1] = -1;
 504	runtime->sync.id32[2] = -1;
 505	runtime->sync.id32[3] = -1;
 506}
 507EXPORT_SYMBOL(snd_pcm_set_sync);
 508
 509/*
 510 *  Standard ioctl routine
 511 */
 512
 513static inline unsigned int div32(unsigned int a, unsigned int b, 
 514				 unsigned int *r)
 515{
 516	if (b == 0) {
 517		*r = 0;
 518		return UINT_MAX;
 519	}
 520	*r = a % b;
 521	return a / b;
 522}
 523
 524static inline unsigned int div_down(unsigned int a, unsigned int b)
 525{
 526	if (b == 0)
 527		return UINT_MAX;
 528	return a / b;
 529}
 530
 531static inline unsigned int div_up(unsigned int a, unsigned int b)
 532{
 533	unsigned int r;
 534	unsigned int q;
 535	if (b == 0)
 536		return UINT_MAX;
 537	q = div32(a, b, &r);
 538	if (r)
 539		++q;
 540	return q;
 541}
 542
 543static inline unsigned int mul(unsigned int a, unsigned int b)
 544{
 545	if (a == 0)
 546		return 0;
 547	if (div_down(UINT_MAX, a) < b)
 548		return UINT_MAX;
 549	return a * b;
 550}
 551
 552static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 553				    unsigned int c, unsigned int *r)
 554{
 555	u_int64_t n = (u_int64_t) a * b;
 556	if (c == 0) {
 557		*r = 0;
 558		return UINT_MAX;
 559	}
 560	n = div_u64_rem(n, c, r);
 561	if (n >= UINT_MAX) {
 562		*r = 0;
 563		return UINT_MAX;
 564	}
 565	return n;
 566}
 567
 568/**
 569 * snd_interval_refine - refine the interval value of configurator
 570 * @i: the interval value to refine
 571 * @v: the interval value to refer to
 572 *
 573 * Refines the interval value with the reference value.
 574 * The interval is changed to the range satisfying both intervals.
 575 * The interval status (min, max, integer, etc.) are evaluated.
 576 *
 577 * Return: Positive if the value is changed, zero if it's not changed, or a
 578 * negative error code.
 579 */
 580int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
 581{
 582	int changed = 0;
 583	if (snd_BUG_ON(snd_interval_empty(i)))
 584		return -EINVAL;
 585	if (i->min < v->min) {
 586		i->min = v->min;
 587		i->openmin = v->openmin;
 588		changed = 1;
 589	} else if (i->min == v->min && !i->openmin && v->openmin) {
 590		i->openmin = 1;
 591		changed = 1;
 592	}
 593	if (i->max > v->max) {
 594		i->max = v->max;
 595		i->openmax = v->openmax;
 596		changed = 1;
 597	} else if (i->max == v->max && !i->openmax && v->openmax) {
 598		i->openmax = 1;
 599		changed = 1;
 600	}
 601	if (!i->integer && v->integer) {
 602		i->integer = 1;
 603		changed = 1;
 604	}
 605	if (i->integer) {
 606		if (i->openmin) {
 607			i->min++;
 608			i->openmin = 0;
 609		}
 610		if (i->openmax) {
 611			i->max--;
 612			i->openmax = 0;
 613		}
 614	} else if (!i->openmin && !i->openmax && i->min == i->max)
 615		i->integer = 1;
 616	if (snd_interval_checkempty(i)) {
 617		snd_interval_none(i);
 618		return -EINVAL;
 619	}
 620	return changed;
 621}
 622EXPORT_SYMBOL(snd_interval_refine);
 623
 624static int snd_interval_refine_first(struct snd_interval *i)
 625{
 626	const unsigned int last_max = i->max;
 627
 628	if (snd_BUG_ON(snd_interval_empty(i)))
 629		return -EINVAL;
 630	if (snd_interval_single(i))
 631		return 0;
 632	i->max = i->min;
 633	if (i->openmin)
 634		i->max++;
 635	/* only exclude max value if also excluded before refine */
 636	i->openmax = (i->openmax && i->max >= last_max);
 637	return 1;
 638}
 639
 640static int snd_interval_refine_last(struct snd_interval *i)
 641{
 642	const unsigned int last_min = i->min;
 643
 644	if (snd_BUG_ON(snd_interval_empty(i)))
 645		return -EINVAL;
 646	if (snd_interval_single(i))
 647		return 0;
 648	i->min = i->max;
 649	if (i->openmax)
 650		i->min--;
 651	/* only exclude min value if also excluded before refine */
 652	i->openmin = (i->openmin && i->min <= last_min);
 653	return 1;
 654}
 655
 656void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 657{
 658	if (a->empty || b->empty) {
 659		snd_interval_none(c);
 660		return;
 661	}
 662	c->empty = 0;
 663	c->min = mul(a->min, b->min);
 664	c->openmin = (a->openmin || b->openmin);
 665	c->max = mul(a->max,  b->max);
 666	c->openmax = (a->openmax || b->openmax);
 667	c->integer = (a->integer && b->integer);
 668}
 669
 670/**
 671 * snd_interval_div - refine the interval value with division
 672 * @a: dividend
 673 * @b: divisor
 674 * @c: quotient
 675 *
 676 * c = a / b
 677 *
 678 * Returns non-zero if the value is changed, zero if not changed.
 679 */
 680void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 681{
 682	unsigned int r;
 683	if (a->empty || b->empty) {
 684		snd_interval_none(c);
 685		return;
 686	}
 687	c->empty = 0;
 688	c->min = div32(a->min, b->max, &r);
 689	c->openmin = (r || a->openmin || b->openmax);
 690	if (b->min > 0) {
 691		c->max = div32(a->max, b->min, &r);
 692		if (r) {
 693			c->max++;
 694			c->openmax = 1;
 695		} else
 696			c->openmax = (a->openmax || b->openmin);
 697	} else {
 698		c->max = UINT_MAX;
 699		c->openmax = 0;
 700	}
 701	c->integer = 0;
 702}
 703
 704/**
 705 * snd_interval_muldivk - refine the interval value
 706 * @a: dividend 1
 707 * @b: dividend 2
 708 * @k: divisor (as integer)
 709 * @c: result
 710  *
 711 * c = a * b / k
 712 *
 713 * Returns non-zero if the value is changed, zero if not changed.
 714 */
 715void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
 716		      unsigned int k, struct snd_interval *c)
 717{
 718	unsigned int r;
 719	if (a->empty || b->empty) {
 720		snd_interval_none(c);
 721		return;
 722	}
 723	c->empty = 0;
 724	c->min = muldiv32(a->min, b->min, k, &r);
 725	c->openmin = (r || a->openmin || b->openmin);
 726	c->max = muldiv32(a->max, b->max, k, &r);
 727	if (r) {
 728		c->max++;
 729		c->openmax = 1;
 730	} else
 731		c->openmax = (a->openmax || b->openmax);
 732	c->integer = 0;
 733}
 734
 735/**
 736 * snd_interval_mulkdiv - refine the interval value
 737 * @a: dividend 1
 738 * @k: dividend 2 (as integer)
 739 * @b: divisor
 740 * @c: result
 741 *
 742 * c = a * k / b
 743 *
 744 * Returns non-zero if the value is changed, zero if not changed.
 745 */
 746void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
 747		      const struct snd_interval *b, struct snd_interval *c)
 748{
 749	unsigned int r;
 750	if (a->empty || b->empty) {
 751		snd_interval_none(c);
 752		return;
 753	}
 754	c->empty = 0;
 755	c->min = muldiv32(a->min, k, b->max, &r);
 756	c->openmin = (r || a->openmin || b->openmax);
 757	if (b->min > 0) {
 758		c->max = muldiv32(a->max, k, b->min, &r);
 759		if (r) {
 760			c->max++;
 761			c->openmax = 1;
 762		} else
 763			c->openmax = (a->openmax || b->openmin);
 764	} else {
 765		c->max = UINT_MAX;
 766		c->openmax = 0;
 767	}
 768	c->integer = 0;
 769}
 770
 771/* ---- */
 772
 773
 774/**
 775 * snd_interval_ratnum - refine the interval value
 776 * @i: interval to refine
 777 * @rats_count: number of ratnum_t 
 778 * @rats: ratnum_t array
 779 * @nump: pointer to store the resultant numerator
 780 * @denp: pointer to store the resultant denominator
 781 *
 782 * Return: Positive if the value is changed, zero if it's not changed, or a
 783 * negative error code.
 784 */
 785int snd_interval_ratnum(struct snd_interval *i,
 786			unsigned int rats_count, const struct snd_ratnum *rats,
 787			unsigned int *nump, unsigned int *denp)
 788{
 789	unsigned int best_num, best_den;
 790	int best_diff;
 791	unsigned int k;
 792	struct snd_interval t;
 793	int err;
 794	unsigned int result_num, result_den;
 795	int result_diff;
 796
 797	best_num = best_den = best_diff = 0;
 798	for (k = 0; k < rats_count; ++k) {
 799		unsigned int num = rats[k].num;
 800		unsigned int den;
 801		unsigned int q = i->min;
 802		int diff;
 803		if (q == 0)
 804			q = 1;
 805		den = div_up(num, q);
 806		if (den < rats[k].den_min)
 807			continue;
 808		if (den > rats[k].den_max)
 809			den = rats[k].den_max;
 810		else {
 811			unsigned int r;
 812			r = (den - rats[k].den_min) % rats[k].den_step;
 813			if (r != 0)
 814				den -= r;
 815		}
 816		diff = num - q * den;
 817		if (diff < 0)
 818			diff = -diff;
 819		if (best_num == 0 ||
 820		    diff * best_den < best_diff * den) {
 821			best_diff = diff;
 822			best_den = den;
 823			best_num = num;
 824		}
 825	}
 826	if (best_den == 0) {
 827		i->empty = 1;
 828		return -EINVAL;
 829	}
 830	t.min = div_down(best_num, best_den);
 831	t.openmin = !!(best_num % best_den);
 832	
 833	result_num = best_num;
 834	result_diff = best_diff;
 835	result_den = best_den;
 836	best_num = best_den = best_diff = 0;
 837	for (k = 0; k < rats_count; ++k) {
 838		unsigned int num = rats[k].num;
 839		unsigned int den;
 840		unsigned int q = i->max;
 841		int diff;
 842		if (q == 0) {
 843			i->empty = 1;
 844			return -EINVAL;
 845		}
 846		den = div_down(num, q);
 847		if (den > rats[k].den_max)
 848			continue;
 849		if (den < rats[k].den_min)
 850			den = rats[k].den_min;
 851		else {
 852			unsigned int r;
 853			r = (den - rats[k].den_min) % rats[k].den_step;
 854			if (r != 0)
 855				den += rats[k].den_step - r;
 856		}
 857		diff = q * den - num;
 858		if (diff < 0)
 859			diff = -diff;
 860		if (best_num == 0 ||
 861		    diff * best_den < best_diff * den) {
 862			best_diff = diff;
 863			best_den = den;
 864			best_num = num;
 865		}
 866	}
 867	if (best_den == 0) {
 868		i->empty = 1;
 869		return -EINVAL;
 870	}
 871	t.max = div_up(best_num, best_den);
 872	t.openmax = !!(best_num % best_den);
 873	t.integer = 0;
 874	err = snd_interval_refine(i, &t);
 875	if (err < 0)
 876		return err;
 877
 878	if (snd_interval_single(i)) {
 879		if (best_diff * result_den < result_diff * best_den) {
 880			result_num = best_num;
 881			result_den = best_den;
 882		}
 883		if (nump)
 884			*nump = result_num;
 885		if (denp)
 886			*denp = result_den;
 887	}
 888	return err;
 889}
 890EXPORT_SYMBOL(snd_interval_ratnum);
 891
 892/**
 893 * snd_interval_ratden - refine the interval value
 894 * @i: interval to refine
 895 * @rats_count: number of struct ratden
 896 * @rats: struct ratden array
 897 * @nump: pointer to store the resultant numerator
 898 * @denp: pointer to store the resultant denominator
 899 *
 900 * Return: Positive if the value is changed, zero if it's not changed, or a
 901 * negative error code.
 902 */
 903static int snd_interval_ratden(struct snd_interval *i,
 904			       unsigned int rats_count,
 905			       const struct snd_ratden *rats,
 906			       unsigned int *nump, unsigned int *denp)
 907{
 908	unsigned int best_num, best_diff, best_den;
 909	unsigned int k;
 910	struct snd_interval t;
 911	int err;
 912
 913	best_num = best_den = best_diff = 0;
 914	for (k = 0; k < rats_count; ++k) {
 915		unsigned int num;
 916		unsigned int den = rats[k].den;
 917		unsigned int q = i->min;
 918		int diff;
 919		num = mul(q, den);
 920		if (num > rats[k].num_max)
 921			continue;
 922		if (num < rats[k].num_min)
 923			num = rats[k].num_max;
 924		else {
 925			unsigned int r;
 926			r = (num - rats[k].num_min) % rats[k].num_step;
 927			if (r != 0)
 928				num += rats[k].num_step - r;
 929		}
 930		diff = num - q * den;
 931		if (best_num == 0 ||
 932		    diff * best_den < best_diff * den) {
 933			best_diff = diff;
 934			best_den = den;
 935			best_num = num;
 936		}
 937	}
 938	if (best_den == 0) {
 939		i->empty = 1;
 940		return -EINVAL;
 941	}
 942	t.min = div_down(best_num, best_den);
 943	t.openmin = !!(best_num % best_den);
 944	
 945	best_num = best_den = best_diff = 0;
 946	for (k = 0; k < rats_count; ++k) {
 947		unsigned int num;
 948		unsigned int den = rats[k].den;
 949		unsigned int q = i->max;
 950		int diff;
 951		num = mul(q, den);
 952		if (num < rats[k].num_min)
 953			continue;
 954		if (num > rats[k].num_max)
 955			num = rats[k].num_max;
 956		else {
 957			unsigned int r;
 958			r = (num - rats[k].num_min) % rats[k].num_step;
 959			if (r != 0)
 960				num -= r;
 961		}
 962		diff = q * den - num;
 963		if (best_num == 0 ||
 964		    diff * best_den < best_diff * den) {
 965			best_diff = diff;
 966			best_den = den;
 967			best_num = num;
 968		}
 969	}
 970	if (best_den == 0) {
 971		i->empty = 1;
 972		return -EINVAL;
 973	}
 974	t.max = div_up(best_num, best_den);
 975	t.openmax = !!(best_num % best_den);
 976	t.integer = 0;
 977	err = snd_interval_refine(i, &t);
 978	if (err < 0)
 979		return err;
 980
 981	if (snd_interval_single(i)) {
 982		if (nump)
 983			*nump = best_num;
 984		if (denp)
 985			*denp = best_den;
 986	}
 987	return err;
 988}
 989
 990/**
 991 * snd_interval_list - refine the interval value from the list
 992 * @i: the interval value to refine
 993 * @count: the number of elements in the list
 994 * @list: the value list
 995 * @mask: the bit-mask to evaluate
 996 *
 997 * Refines the interval value from the list.
 998 * When mask is non-zero, only the elements corresponding to bit 1 are
 999 * evaluated.
1000 *
1001 * Return: Positive if the value is changed, zero if it's not changed, or a
1002 * negative error code.
1003 */
1004int snd_interval_list(struct snd_interval *i, unsigned int count,
1005		      const unsigned int *list, unsigned int mask)
1006{
1007        unsigned int k;
1008	struct snd_interval list_range;
1009
1010	if (!count) {
1011		i->empty = 1;
1012		return -EINVAL;
1013	}
1014	snd_interval_any(&list_range);
1015	list_range.min = UINT_MAX;
1016	list_range.max = 0;
1017        for (k = 0; k < count; k++) {
1018		if (mask && !(mask & (1 << k)))
1019			continue;
1020		if (!snd_interval_test(i, list[k]))
1021			continue;
1022		list_range.min = min(list_range.min, list[k]);
1023		list_range.max = max(list_range.max, list[k]);
1024        }
1025	return snd_interval_refine(i, &list_range);
1026}
1027EXPORT_SYMBOL(snd_interval_list);
1028
1029/**
1030 * snd_interval_ranges - refine the interval value from the list of ranges
1031 * @i: the interval value to refine
1032 * @count: the number of elements in the list of ranges
1033 * @ranges: the ranges list
1034 * @mask: the bit-mask to evaluate
1035 *
1036 * Refines the interval value from the list of ranges.
1037 * When mask is non-zero, only the elements corresponding to bit 1 are
1038 * evaluated.
1039 *
1040 * Return: Positive if the value is changed, zero if it's not changed, or a
1041 * negative error code.
1042 */
1043int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1044			const struct snd_interval *ranges, unsigned int mask)
1045{
1046	unsigned int k;
1047	struct snd_interval range_union;
1048	struct snd_interval range;
1049
1050	if (!count) {
1051		snd_interval_none(i);
1052		return -EINVAL;
1053	}
1054	snd_interval_any(&range_union);
1055	range_union.min = UINT_MAX;
1056	range_union.max = 0;
1057	for (k = 0; k < count; k++) {
1058		if (mask && !(mask & (1 << k)))
1059			continue;
1060		snd_interval_copy(&range, &ranges[k]);
1061		if (snd_interval_refine(&range, i) < 0)
1062			continue;
1063		if (snd_interval_empty(&range))
1064			continue;
1065
1066		if (range.min < range_union.min) {
1067			range_union.min = range.min;
1068			range_union.openmin = 1;
1069		}
1070		if (range.min == range_union.min && !range.openmin)
1071			range_union.openmin = 0;
1072		if (range.max > range_union.max) {
1073			range_union.max = range.max;
1074			range_union.openmax = 1;
1075		}
1076		if (range.max == range_union.max && !range.openmax)
1077			range_union.openmax = 0;
1078	}
1079	return snd_interval_refine(i, &range_union);
1080}
1081EXPORT_SYMBOL(snd_interval_ranges);
1082
1083static int snd_interval_step(struct snd_interval *i, unsigned int step)
1084{
1085	unsigned int n;
1086	int changed = 0;
1087	n = i->min % step;
1088	if (n != 0 || i->openmin) {
1089		i->min += step - n;
1090		i->openmin = 0;
1091		changed = 1;
1092	}
1093	n = i->max % step;
1094	if (n != 0 || i->openmax) {
1095		i->max -= n;
1096		i->openmax = 0;
1097		changed = 1;
1098	}
1099	if (snd_interval_checkempty(i)) {
1100		i->empty = 1;
1101		return -EINVAL;
1102	}
1103	return changed;
1104}
1105
1106/* Info constraints helpers */
1107
1108/**
1109 * snd_pcm_hw_rule_add - add the hw-constraint rule
1110 * @runtime: the pcm runtime instance
1111 * @cond: condition bits
1112 * @var: the variable to evaluate
1113 * @func: the evaluation function
1114 * @private: the private data pointer passed to function
1115 * @dep: the dependent variables
1116 *
1117 * Return: Zero if successful, or a negative error code on failure.
1118 */
1119int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1120			int var,
1121			snd_pcm_hw_rule_func_t func, void *private,
1122			int dep, ...)
1123{
1124	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1125	struct snd_pcm_hw_rule *c;
1126	unsigned int k;
1127	va_list args;
1128	va_start(args, dep);
1129	if (constrs->rules_num >= constrs->rules_all) {
1130		struct snd_pcm_hw_rule *new;
1131		unsigned int new_rules = constrs->rules_all + 16;
1132		new = krealloc_array(constrs->rules, new_rules,
1133				     sizeof(*c), GFP_KERNEL);
1134		if (!new) {
1135			va_end(args);
1136			return -ENOMEM;
1137		}
1138		constrs->rules = new;
1139		constrs->rules_all = new_rules;
1140	}
1141	c = &constrs->rules[constrs->rules_num];
1142	c->cond = cond;
1143	c->func = func;
1144	c->var = var;
1145	c->private = private;
1146	k = 0;
1147	while (1) {
1148		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1149			va_end(args);
1150			return -EINVAL;
1151		}
1152		c->deps[k++] = dep;
1153		if (dep < 0)
1154			break;
1155		dep = va_arg(args, int);
1156	}
1157	constrs->rules_num++;
1158	va_end(args);
1159	return 0;
1160}
1161EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1162
1163/**
1164 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1165 * @runtime: PCM runtime instance
1166 * @var: hw_params variable to apply the mask
1167 * @mask: the bitmap mask
1168 *
1169 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1170 *
1171 * Return: Zero if successful, or a negative error code on failure.
1172 */
1173int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1174			       u_int32_t mask)
1175{
1176	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1177	struct snd_mask *maskp = constrs_mask(constrs, var);
1178	*maskp->bits &= mask;
1179	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1180	if (*maskp->bits == 0)
1181		return -EINVAL;
1182	return 0;
1183}
1184
1185/**
1186 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1187 * @runtime: PCM runtime instance
1188 * @var: hw_params variable to apply the mask
1189 * @mask: the 64bit bitmap mask
1190 *
1191 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1192 *
1193 * Return: Zero if successful, or a negative error code on failure.
1194 */
1195int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1196				 u_int64_t mask)
1197{
1198	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1199	struct snd_mask *maskp = constrs_mask(constrs, var);
1200	maskp->bits[0] &= (u_int32_t)mask;
1201	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1202	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1203	if (! maskp->bits[0] && ! maskp->bits[1])
1204		return -EINVAL;
1205	return 0;
1206}
1207EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1208
1209/**
1210 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the integer constraint
1213 *
1214 * Apply the constraint of integer to an interval parameter.
1215 *
1216 * Return: Positive if the value is changed, zero if it's not changed, or a
1217 * negative error code.
1218 */
1219int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1220{
1221	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1222	return snd_interval_setinteger(constrs_interval(constrs, var));
1223}
1224EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1225
1226/**
1227 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1228 * @runtime: PCM runtime instance
1229 * @var: hw_params variable to apply the range
1230 * @min: the minimal value
1231 * @max: the maximal value
1232 * 
1233 * Apply the min/max range constraint to an interval parameter.
1234 *
1235 * Return: Positive if the value is changed, zero if it's not changed, or a
1236 * negative error code.
1237 */
1238int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1239				 unsigned int min, unsigned int max)
1240{
1241	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1242	struct snd_interval t;
1243	t.min = min;
1244	t.max = max;
1245	t.openmin = t.openmax = 0;
1246	t.integer = 0;
1247	return snd_interval_refine(constrs_interval(constrs, var), &t);
1248}
1249EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1250
1251static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1252				struct snd_pcm_hw_rule *rule)
1253{
1254	struct snd_pcm_hw_constraint_list *list = rule->private;
1255	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1256}		
1257
1258
1259/**
1260 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1261 * @runtime: PCM runtime instance
1262 * @cond: condition bits
1263 * @var: hw_params variable to apply the list constraint
1264 * @l: list
1265 * 
1266 * Apply the list of constraints to an interval parameter.
1267 *
1268 * Return: Zero if successful, or a negative error code on failure.
1269 */
1270int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1271			       unsigned int cond,
1272			       snd_pcm_hw_param_t var,
1273			       const struct snd_pcm_hw_constraint_list *l)
1274{
1275	return snd_pcm_hw_rule_add(runtime, cond, var,
1276				   snd_pcm_hw_rule_list, (void *)l,
1277				   var, -1);
1278}
1279EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1280
1281static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1282				  struct snd_pcm_hw_rule *rule)
1283{
1284	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1285	return snd_interval_ranges(hw_param_interval(params, rule->var),
1286				   r->count, r->ranges, r->mask);
1287}
1288
1289
1290/**
1291 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1292 * @runtime: PCM runtime instance
1293 * @cond: condition bits
1294 * @var: hw_params variable to apply the list of range constraints
1295 * @r: ranges
1296 *
1297 * Apply the list of range constraints to an interval parameter.
1298 *
1299 * Return: Zero if successful, or a negative error code on failure.
1300 */
1301int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1302				 unsigned int cond,
1303				 snd_pcm_hw_param_t var,
1304				 const struct snd_pcm_hw_constraint_ranges *r)
1305{
1306	return snd_pcm_hw_rule_add(runtime, cond, var,
1307				   snd_pcm_hw_rule_ranges, (void *)r,
1308				   var, -1);
1309}
1310EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1311
1312static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1313				   struct snd_pcm_hw_rule *rule)
1314{
1315	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1316	unsigned int num = 0, den = 0;
1317	int err;
1318	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1319				  r->nrats, r->rats, &num, &den);
1320	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1321		params->rate_num = num;
1322		params->rate_den = den;
1323	}
1324	return err;
1325}
1326
1327/**
1328 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1329 * @runtime: PCM runtime instance
1330 * @cond: condition bits
1331 * @var: hw_params variable to apply the ratnums constraint
1332 * @r: struct snd_ratnums constriants
1333 *
1334 * Return: Zero if successful, or a negative error code on failure.
1335 */
1336int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 
1337				  unsigned int cond,
1338				  snd_pcm_hw_param_t var,
1339				  const struct snd_pcm_hw_constraint_ratnums *r)
1340{
1341	return snd_pcm_hw_rule_add(runtime, cond, var,
1342				   snd_pcm_hw_rule_ratnums, (void *)r,
1343				   var, -1);
1344}
1345EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1346
1347static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1348				   struct snd_pcm_hw_rule *rule)
1349{
1350	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1351	unsigned int num = 0, den = 0;
1352	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1353				  r->nrats, r->rats, &num, &den);
1354	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1355		params->rate_num = num;
1356		params->rate_den = den;
1357	}
1358	return err;
1359}
1360
1361/**
1362 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1363 * @runtime: PCM runtime instance
1364 * @cond: condition bits
1365 * @var: hw_params variable to apply the ratdens constraint
1366 * @r: struct snd_ratdens constriants
1367 *
1368 * Return: Zero if successful, or a negative error code on failure.
1369 */
1370int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 
1371				  unsigned int cond,
1372				  snd_pcm_hw_param_t var,
1373				  const struct snd_pcm_hw_constraint_ratdens *r)
1374{
1375	return snd_pcm_hw_rule_add(runtime, cond, var,
1376				   snd_pcm_hw_rule_ratdens, (void *)r,
1377				   var, -1);
1378}
1379EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1380
1381static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1382				  struct snd_pcm_hw_rule *rule)
1383{
1384	unsigned int l = (unsigned long) rule->private;
1385	int width = l & 0xffff;
1386	unsigned int msbits = l >> 16;
1387	const struct snd_interval *i =
1388		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1389
1390	if (!snd_interval_single(i))
1391		return 0;
1392
1393	if ((snd_interval_value(i) == width) ||
1394	    (width == 0 && snd_interval_value(i) > msbits))
1395		params->msbits = min_not_zero(params->msbits, msbits);
1396
1397	return 0;
1398}
1399
1400/**
1401 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1402 * @runtime: PCM runtime instance
1403 * @cond: condition bits
1404 * @width: sample bits width
1405 * @msbits: msbits width
1406 *
1407 * This constraint will set the number of most significant bits (msbits) if a
1408 * sample format with the specified width has been select. If width is set to 0
1409 * the msbits will be set for any sample format with a width larger than the
1410 * specified msbits.
1411 *
1412 * Return: Zero if successful, or a negative error code on failure.
1413 */
1414int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 
1415				 unsigned int cond,
1416				 unsigned int width,
1417				 unsigned int msbits)
1418{
1419	unsigned long l = (msbits << 16) | width;
1420	return snd_pcm_hw_rule_add(runtime, cond, -1,
1421				    snd_pcm_hw_rule_msbits,
1422				    (void*) l,
1423				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1424}
1425EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1426
1427static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1428				struct snd_pcm_hw_rule *rule)
1429{
1430	unsigned long step = (unsigned long) rule->private;
1431	return snd_interval_step(hw_param_interval(params, rule->var), step);
1432}
1433
1434/**
1435 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1436 * @runtime: PCM runtime instance
1437 * @cond: condition bits
1438 * @var: hw_params variable to apply the step constraint
1439 * @step: step size
1440 *
1441 * Return: Zero if successful, or a negative error code on failure.
1442 */
1443int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1444			       unsigned int cond,
1445			       snd_pcm_hw_param_t var,
1446			       unsigned long step)
1447{
1448	return snd_pcm_hw_rule_add(runtime, cond, var, 
1449				   snd_pcm_hw_rule_step, (void *) step,
1450				   var, -1);
1451}
1452EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1453
1454static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1455{
1456	static const unsigned int pow2_sizes[] = {
1457		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1458		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1459		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1460		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1461	};
1462	return snd_interval_list(hw_param_interval(params, rule->var),
1463				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1464}		
1465
1466/**
1467 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1468 * @runtime: PCM runtime instance
1469 * @cond: condition bits
1470 * @var: hw_params variable to apply the power-of-2 constraint
1471 *
1472 * Return: Zero if successful, or a negative error code on failure.
1473 */
1474int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1475			       unsigned int cond,
1476			       snd_pcm_hw_param_t var)
1477{
1478	return snd_pcm_hw_rule_add(runtime, cond, var, 
1479				   snd_pcm_hw_rule_pow2, NULL,
1480				   var, -1);
1481}
1482EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1483
1484static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1485					   struct snd_pcm_hw_rule *rule)
1486{
1487	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1488	struct snd_interval *rate;
1489
1490	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1491	return snd_interval_list(rate, 1, &base_rate, 0);
1492}
1493
1494/**
1495 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1496 * @runtime: PCM runtime instance
1497 * @base_rate: the rate at which the hardware does not resample
1498 *
1499 * Return: Zero if successful, or a negative error code on failure.
1500 */
1501int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1502			       unsigned int base_rate)
1503{
1504	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1505				   SNDRV_PCM_HW_PARAM_RATE,
1506				   snd_pcm_hw_rule_noresample_func,
1507				   (void *)(uintptr_t)base_rate,
1508				   SNDRV_PCM_HW_PARAM_RATE, -1);
1509}
1510EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1511
1512static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1513				  snd_pcm_hw_param_t var)
1514{
1515	if (hw_is_mask(var)) {
1516		snd_mask_any(hw_param_mask(params, var));
1517		params->cmask |= 1 << var;
1518		params->rmask |= 1 << var;
1519		return;
1520	}
1521	if (hw_is_interval(var)) {
1522		snd_interval_any(hw_param_interval(params, var));
1523		params->cmask |= 1 << var;
1524		params->rmask |= 1 << var;
1525		return;
1526	}
1527	snd_BUG();
1528}
1529
1530void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1531{
1532	unsigned int k;
1533	memset(params, 0, sizeof(*params));
1534	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1535		_snd_pcm_hw_param_any(params, k);
1536	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1537		_snd_pcm_hw_param_any(params, k);
1538	params->info = ~0U;
1539}
1540EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1541
1542/**
1543 * snd_pcm_hw_param_value - return @params field @var value
1544 * @params: the hw_params instance
1545 * @var: parameter to retrieve
1546 * @dir: pointer to the direction (-1,0,1) or %NULL
1547 *
1548 * Return: The value for field @var if it's fixed in configuration space
1549 * defined by @params. -%EINVAL otherwise.
1550 */
1551int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1552			   snd_pcm_hw_param_t var, int *dir)
1553{
1554	if (hw_is_mask(var)) {
1555		const struct snd_mask *mask = hw_param_mask_c(params, var);
1556		if (!snd_mask_single(mask))
1557			return -EINVAL;
1558		if (dir)
1559			*dir = 0;
1560		return snd_mask_value(mask);
1561	}
1562	if (hw_is_interval(var)) {
1563		const struct snd_interval *i = hw_param_interval_c(params, var);
1564		if (!snd_interval_single(i))
1565			return -EINVAL;
1566		if (dir)
1567			*dir = i->openmin;
1568		return snd_interval_value(i);
1569	}
1570	return -EINVAL;
1571}
1572EXPORT_SYMBOL(snd_pcm_hw_param_value);
1573
1574void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1575				snd_pcm_hw_param_t var)
1576{
1577	if (hw_is_mask(var)) {
1578		snd_mask_none(hw_param_mask(params, var));
1579		params->cmask |= 1 << var;
1580		params->rmask |= 1 << var;
1581	} else if (hw_is_interval(var)) {
1582		snd_interval_none(hw_param_interval(params, var));
1583		params->cmask |= 1 << var;
1584		params->rmask |= 1 << var;
1585	} else {
1586		snd_BUG();
1587	}
1588}
1589EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1590
1591static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1592				   snd_pcm_hw_param_t var)
1593{
1594	int changed;
1595	if (hw_is_mask(var))
1596		changed = snd_mask_refine_first(hw_param_mask(params, var));
1597	else if (hw_is_interval(var))
1598		changed = snd_interval_refine_first(hw_param_interval(params, var));
1599	else
1600		return -EINVAL;
1601	if (changed > 0) {
1602		params->cmask |= 1 << var;
1603		params->rmask |= 1 << var;
1604	}
1605	return changed;
1606}
1607
1608
1609/**
1610 * snd_pcm_hw_param_first - refine config space and return minimum value
1611 * @pcm: PCM instance
1612 * @params: the hw_params instance
1613 * @var: parameter to retrieve
1614 * @dir: pointer to the direction (-1,0,1) or %NULL
1615 *
1616 * Inside configuration space defined by @params remove from @var all
1617 * values > minimum. Reduce configuration space accordingly.
1618 *
1619 * Return: The minimum, or a negative error code on failure.
1620 */
1621int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 
1622			   struct snd_pcm_hw_params *params, 
1623			   snd_pcm_hw_param_t var, int *dir)
1624{
1625	int changed = _snd_pcm_hw_param_first(params, var);
1626	if (changed < 0)
1627		return changed;
1628	if (params->rmask) {
1629		int err = snd_pcm_hw_refine(pcm, params);
1630		if (err < 0)
1631			return err;
1632	}
1633	return snd_pcm_hw_param_value(params, var, dir);
1634}
1635EXPORT_SYMBOL(snd_pcm_hw_param_first);
1636
1637static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1638				  snd_pcm_hw_param_t var)
1639{
1640	int changed;
1641	if (hw_is_mask(var))
1642		changed = snd_mask_refine_last(hw_param_mask(params, var));
1643	else if (hw_is_interval(var))
1644		changed = snd_interval_refine_last(hw_param_interval(params, var));
1645	else
1646		return -EINVAL;
1647	if (changed > 0) {
1648		params->cmask |= 1 << var;
1649		params->rmask |= 1 << var;
1650	}
1651	return changed;
1652}
1653
1654
1655/**
1656 * snd_pcm_hw_param_last - refine config space and return maximum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1661 *
1662 * Inside configuration space defined by @params remove from @var all
1663 * values < maximum. Reduce configuration space accordingly.
1664 *
1665 * Return: The maximum, or a negative error code on failure.
1666 */
1667int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 
1668			  struct snd_pcm_hw_params *params,
1669			  snd_pcm_hw_param_t var, int *dir)
1670{
1671	int changed = _snd_pcm_hw_param_last(params, var);
1672	if (changed < 0)
1673		return changed;
1674	if (params->rmask) {
1675		int err = snd_pcm_hw_refine(pcm, params);
1676		if (err < 0)
1677			return err;
1678	}
1679	return snd_pcm_hw_param_value(params, var, dir);
1680}
1681EXPORT_SYMBOL(snd_pcm_hw_param_last);
1682
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1683static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1684				   void *arg)
1685{
1686	struct snd_pcm_runtime *runtime = substream->runtime;
1687	unsigned long flags;
1688	snd_pcm_stream_lock_irqsave(substream, flags);
1689	if (snd_pcm_running(substream) &&
1690	    snd_pcm_update_hw_ptr(substream) >= 0)
1691		runtime->status->hw_ptr %= runtime->buffer_size;
1692	else {
1693		runtime->status->hw_ptr = 0;
1694		runtime->hw_ptr_wrap = 0;
1695	}
1696	snd_pcm_stream_unlock_irqrestore(substream, flags);
1697	return 0;
1698}
1699
1700static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1701					  void *arg)
1702{
1703	struct snd_pcm_channel_info *info = arg;
1704	struct snd_pcm_runtime *runtime = substream->runtime;
1705	int width;
1706	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1707		info->offset = -1;
1708		return 0;
1709	}
1710	width = snd_pcm_format_physical_width(runtime->format);
1711	if (width < 0)
1712		return width;
1713	info->offset = 0;
1714	switch (runtime->access) {
1715	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1716	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1717		info->first = info->channel * width;
1718		info->step = runtime->channels * width;
1719		break;
1720	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1721	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1722	{
1723		size_t size = runtime->dma_bytes / runtime->channels;
1724		info->first = info->channel * size * 8;
1725		info->step = width;
1726		break;
1727	}
1728	default:
1729		snd_BUG();
1730		break;
1731	}
1732	return 0;
1733}
1734
1735static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1736				       void *arg)
1737{
1738	struct snd_pcm_hw_params *params = arg;
1739	snd_pcm_format_t format;
1740	int channels;
1741	ssize_t frame_size;
1742
1743	params->fifo_size = substream->runtime->hw.fifo_size;
1744	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1745		format = params_format(params);
1746		channels = params_channels(params);
1747		frame_size = snd_pcm_format_size(format, channels);
1748		if (frame_size > 0)
1749			params->fifo_size /= frame_size;
1750	}
1751	return 0;
1752}
1753
1754/**
1755 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1756 * @substream: the pcm substream instance
1757 * @cmd: ioctl command
1758 * @arg: ioctl argument
1759 *
1760 * Processes the generic ioctl commands for PCM.
1761 * Can be passed as the ioctl callback for PCM ops.
1762 *
1763 * Return: Zero if successful, or a negative error code on failure.
1764 */
1765int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1766		      unsigned int cmd, void *arg)
1767{
1768	switch (cmd) {
1769	case SNDRV_PCM_IOCTL1_RESET:
1770		return snd_pcm_lib_ioctl_reset(substream, arg);
1771	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1772		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1773	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1774		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1775	}
1776	return -ENXIO;
1777}
1778EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1779
1780/**
1781 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1782 *						under acquired lock of PCM substream.
1783 * @substream: the instance of pcm substream.
1784 *
1785 * This function is called when the batch of audio data frames as the same size as the period of
1786 * buffer is already processed in audio data transmission.
1787 *
1788 * The call of function updates the status of runtime with the latest position of audio data
1789 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1790 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1791 * substream according to configured threshold.
1792 *
1793 * The function is intended to use for the case that PCM driver operates audio data frames under
1794 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1795 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1796 * since lock of PCM substream should be acquired in advance.
1797 *
1798 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1799 * function:
1800 *
1801 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1802 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1803 * - .get_time_info - to retrieve audio time stamp if needed.
1804 *
1805 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1806 */
1807void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1808{
1809	struct snd_pcm_runtime *runtime;
1810
1811	if (PCM_RUNTIME_CHECK(substream))
1812		return;
1813	runtime = substream->runtime;
1814
1815	if (!snd_pcm_running(substream) ||
1816	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1817		goto _end;
1818
1819#ifdef CONFIG_SND_PCM_TIMER
1820	if (substream->timer_running)
1821		snd_timer_interrupt(substream->timer, 1);
1822#endif
1823 _end:
1824	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1825}
1826EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1827
1828/**
1829 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1830 *			      PCM substream.
1831 * @substream: the instance of PCM substream.
1832 *
1833 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1834 * acquiring lock of PCM substream voluntarily.
1835 *
1836 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1837 * the batch of audio data frames as the same size as the period of buffer is already processed in
1838 * audio data transmission.
1839 */
1840void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1841{
1842	unsigned long flags;
1843
1844	if (snd_BUG_ON(!substream))
1845		return;
1846
1847	snd_pcm_stream_lock_irqsave(substream, flags);
1848	snd_pcm_period_elapsed_under_stream_lock(substream);
1849	snd_pcm_stream_unlock_irqrestore(substream, flags);
1850}
1851EXPORT_SYMBOL(snd_pcm_period_elapsed);
1852
1853/*
1854 * Wait until avail_min data becomes available
1855 * Returns a negative error code if any error occurs during operation.
1856 * The available space is stored on availp.  When err = 0 and avail = 0
1857 * on the capture stream, it indicates the stream is in DRAINING state.
1858 */
1859static int wait_for_avail(struct snd_pcm_substream *substream,
1860			      snd_pcm_uframes_t *availp)
1861{
1862	struct snd_pcm_runtime *runtime = substream->runtime;
1863	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1864	wait_queue_entry_t wait;
1865	int err = 0;
1866	snd_pcm_uframes_t avail = 0;
1867	long wait_time, tout;
1868
1869	init_waitqueue_entry(&wait, current);
1870	set_current_state(TASK_INTERRUPTIBLE);
1871	add_wait_queue(&runtime->tsleep, &wait);
1872
1873	if (runtime->no_period_wakeup)
1874		wait_time = MAX_SCHEDULE_TIMEOUT;
1875	else {
1876		/* use wait time from substream if available */
1877		if (substream->wait_time) {
1878			wait_time = substream->wait_time;
1879		} else {
1880			wait_time = 10;
1881
1882			if (runtime->rate) {
1883				long t = runtime->period_size * 2 /
1884					 runtime->rate;
1885				wait_time = max(t, wait_time);
1886			}
1887			wait_time = msecs_to_jiffies(wait_time * 1000);
1888		}
 
1889	}
1890
1891	for (;;) {
1892		if (signal_pending(current)) {
1893			err = -ERESTARTSYS;
1894			break;
1895		}
1896
1897		/*
1898		 * We need to check if space became available already
1899		 * (and thus the wakeup happened already) first to close
1900		 * the race of space already having become available.
1901		 * This check must happen after been added to the waitqueue
1902		 * and having current state be INTERRUPTIBLE.
1903		 */
1904		avail = snd_pcm_avail(substream);
1905		if (avail >= runtime->twake)
1906			break;
1907		snd_pcm_stream_unlock_irq(substream);
1908
1909		tout = schedule_timeout(wait_time);
1910
1911		snd_pcm_stream_lock_irq(substream);
1912		set_current_state(TASK_INTERRUPTIBLE);
1913		switch (runtime->status->state) {
1914		case SNDRV_PCM_STATE_SUSPENDED:
1915			err = -ESTRPIPE;
1916			goto _endloop;
1917		case SNDRV_PCM_STATE_XRUN:
1918			err = -EPIPE;
1919			goto _endloop;
1920		case SNDRV_PCM_STATE_DRAINING:
1921			if (is_playback)
1922				err = -EPIPE;
1923			else 
1924				avail = 0; /* indicate draining */
1925			goto _endloop;
1926		case SNDRV_PCM_STATE_OPEN:
1927		case SNDRV_PCM_STATE_SETUP:
1928		case SNDRV_PCM_STATE_DISCONNECTED:
1929			err = -EBADFD;
1930			goto _endloop;
1931		case SNDRV_PCM_STATE_PAUSED:
1932			continue;
1933		}
1934		if (!tout) {
1935			pcm_dbg(substream->pcm,
1936				"%s write error (DMA or IRQ trouble?)\n",
1937				is_playback ? "playback" : "capture");
1938			err = -EIO;
1939			break;
1940		}
1941	}
1942 _endloop:
1943	set_current_state(TASK_RUNNING);
1944	remove_wait_queue(&runtime->tsleep, &wait);
1945	*availp = avail;
1946	return err;
1947}
1948	
1949typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1950			      int channel, unsigned long hwoff,
1951			      void *buf, unsigned long bytes);
1952
1953typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1954			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
 
1955
1956/* calculate the target DMA-buffer position to be written/read */
1957static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1958			   int channel, unsigned long hwoff)
1959{
1960	return runtime->dma_area + hwoff +
1961		channel * (runtime->dma_bytes / runtime->channels);
1962}
1963
1964/* default copy_user ops for write; used for both interleaved and non- modes */
1965static int default_write_copy(struct snd_pcm_substream *substream,
1966			      int channel, unsigned long hwoff,
1967			      void *buf, unsigned long bytes)
1968{
1969	if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1970			   (void __user *)buf, bytes))
1971		return -EFAULT;
1972	return 0;
1973}
1974
1975/* default copy_kernel ops for write */
1976static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1977				     int channel, unsigned long hwoff,
1978				     void *buf, unsigned long bytes)
1979{
1980	memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1981	return 0;
1982}
1983
1984/* fill silence instead of copy data; called as a transfer helper
1985 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1986 * a NULL buffer is passed
1987 */
1988static int fill_silence(struct snd_pcm_substream *substream, int channel,
1989			unsigned long hwoff, void *buf, unsigned long bytes)
 
1990{
1991	struct snd_pcm_runtime *runtime = substream->runtime;
1992
1993	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1994		return 0;
1995	if (substream->ops->fill_silence)
1996		return substream->ops->fill_silence(substream, channel,
1997						    hwoff, bytes);
1998
1999	snd_pcm_format_set_silence(runtime->format,
2000				   get_dma_ptr(runtime, channel, hwoff),
2001				   bytes_to_samples(runtime, bytes));
2002	return 0;
2003}
2004
2005/* default copy_user ops for read; used for both interleaved and non- modes */
2006static int default_read_copy(struct snd_pcm_substream *substream,
2007			     int channel, unsigned long hwoff,
2008			     void *buf, unsigned long bytes)
2009{
2010	if (copy_to_user((void __user *)buf,
2011			 get_dma_ptr(substream->runtime, channel, hwoff),
2012			 bytes))
2013		return -EFAULT;
2014	return 0;
2015}
2016
2017/* default copy_kernel ops for read */
2018static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2019				    int channel, unsigned long hwoff,
2020				    void *buf, unsigned long bytes)
2021{
2022	memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2023	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2024}
2025
2026/* call transfer function with the converted pointers and sizes;
2027 * for interleaved mode, it's one shot for all samples
2028 */
2029static int interleaved_copy(struct snd_pcm_substream *substream,
2030			    snd_pcm_uframes_t hwoff, void *data,
2031			    snd_pcm_uframes_t off,
2032			    snd_pcm_uframes_t frames,
2033			    pcm_transfer_f transfer)
 
2034{
2035	struct snd_pcm_runtime *runtime = substream->runtime;
2036
2037	/* convert to bytes */
2038	hwoff = frames_to_bytes(runtime, hwoff);
2039	off = frames_to_bytes(runtime, off);
2040	frames = frames_to_bytes(runtime, frames);
2041	return transfer(substream, 0, hwoff, data + off, frames);
 
 
2042}
2043
2044/* call transfer function with the converted pointers and sizes for each
2045 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2046 */
2047static int noninterleaved_copy(struct snd_pcm_substream *substream,
2048			       snd_pcm_uframes_t hwoff, void *data,
2049			       snd_pcm_uframes_t off,
2050			       snd_pcm_uframes_t frames,
2051			       pcm_transfer_f transfer)
 
2052{
2053	struct snd_pcm_runtime *runtime = substream->runtime;
2054	int channels = runtime->channels;
2055	void **bufs = data;
2056	int c, err;
2057
2058	/* convert to bytes; note that it's not frames_to_bytes() here.
2059	 * in non-interleaved mode, we copy for each channel, thus
2060	 * each copy is n_samples bytes x channels = whole frames.
2061	 */
2062	off = samples_to_bytes(runtime, off);
2063	frames = samples_to_bytes(runtime, frames);
2064	hwoff = samples_to_bytes(runtime, hwoff);
2065	for (c = 0; c < channels; ++c, ++bufs) {
2066		if (!data || !*bufs)
2067			err = fill_silence(substream, c, hwoff, NULL, frames);
2068		else
2069			err = transfer(substream, c, hwoff, *bufs + off,
2070				       frames);
2071		if (err < 0)
2072			return err;
2073	}
2074	return 0;
2075}
2076
2077/* fill silence on the given buffer position;
2078 * called from snd_pcm_playback_silence()
2079 */
2080static int fill_silence_frames(struct snd_pcm_substream *substream,
2081			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2082{
2083	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2084	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2085		return interleaved_copy(substream, off, NULL, 0, frames,
2086					fill_silence);
2087	else
2088		return noninterleaved_copy(substream, off, NULL, 0, frames,
2089					   fill_silence);
2090}
2091
2092/* sanity-check for read/write methods */
2093static int pcm_sanity_check(struct snd_pcm_substream *substream)
2094{
2095	struct snd_pcm_runtime *runtime;
2096	if (PCM_RUNTIME_CHECK(substream))
2097		return -ENXIO;
2098	runtime = substream->runtime;
2099	if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2100		return -EINVAL;
2101	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2102		return -EBADFD;
2103	return 0;
2104}
2105
2106static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2107{
2108	switch (runtime->status->state) {
2109	case SNDRV_PCM_STATE_PREPARED:
2110	case SNDRV_PCM_STATE_RUNNING:
2111	case SNDRV_PCM_STATE_PAUSED:
2112		return 0;
2113	case SNDRV_PCM_STATE_XRUN:
2114		return -EPIPE;
2115	case SNDRV_PCM_STATE_SUSPENDED:
2116		return -ESTRPIPE;
2117	default:
2118		return -EBADFD;
2119	}
2120}
2121
2122/* update to the given appl_ptr and call ack callback if needed;
2123 * when an error is returned, take back to the original value
2124 */
2125int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2126			   snd_pcm_uframes_t appl_ptr)
2127{
2128	struct snd_pcm_runtime *runtime = substream->runtime;
2129	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
 
2130	int ret;
2131
2132	if (old_appl_ptr == appl_ptr)
2133		return 0;
2134
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2135	runtime->control->appl_ptr = appl_ptr;
2136	if (substream->ops->ack) {
2137		ret = substream->ops->ack(substream);
2138		if (ret < 0) {
2139			runtime->control->appl_ptr = old_appl_ptr;
 
 
2140			return ret;
2141		}
2142	}
2143
2144	trace_applptr(substream, old_appl_ptr, appl_ptr);
2145
2146	return 0;
2147}
2148
2149/* the common loop for read/write data */
2150snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2151				     void *data, bool interleaved,
2152				     snd_pcm_uframes_t size, bool in_kernel)
2153{
2154	struct snd_pcm_runtime *runtime = substream->runtime;
2155	snd_pcm_uframes_t xfer = 0;
2156	snd_pcm_uframes_t offset = 0;
2157	snd_pcm_uframes_t avail;
2158	pcm_copy_f writer;
2159	pcm_transfer_f transfer;
2160	bool nonblock;
2161	bool is_playback;
2162	int err;
2163
2164	err = pcm_sanity_check(substream);
2165	if (err < 0)
2166		return err;
2167
2168	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2169	if (interleaved) {
2170		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2171		    runtime->channels > 1)
2172			return -EINVAL;
2173		writer = interleaved_copy;
2174	} else {
2175		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2176			return -EINVAL;
2177		writer = noninterleaved_copy;
2178	}
2179
2180	if (!data) {
2181		if (is_playback)
2182			transfer = fill_silence;
2183		else
2184			return -EINVAL;
2185	} else if (in_kernel) {
2186		if (substream->ops->copy_kernel)
2187			transfer = substream->ops->copy_kernel;
2188		else
2189			transfer = is_playback ?
2190				default_write_copy_kernel : default_read_copy_kernel;
2191	} else {
2192		if (substream->ops->copy_user)
2193			transfer = (pcm_transfer_f)substream->ops->copy_user;
2194		else
2195			transfer = is_playback ?
2196				default_write_copy : default_read_copy;
2197	}
2198
2199	if (size == 0)
2200		return 0;
2201
2202	nonblock = !!(substream->f_flags & O_NONBLOCK);
2203
2204	snd_pcm_stream_lock_irq(substream);
2205	err = pcm_accessible_state(runtime);
2206	if (err < 0)
2207		goto _end_unlock;
2208
2209	runtime->twake = runtime->control->avail_min ? : 1;
2210	if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2211		snd_pcm_update_hw_ptr(substream);
2212
2213	/*
2214	 * If size < start_threshold, wait indefinitely. Another
2215	 * thread may start capture
2216	 */
2217	if (!is_playback &&
2218	    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2219	    size >= runtime->start_threshold) {
2220		err = snd_pcm_start(substream);
2221		if (err < 0)
2222			goto _end_unlock;
2223	}
2224
2225	avail = snd_pcm_avail(substream);
2226
2227	while (size > 0) {
2228		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2229		snd_pcm_uframes_t cont;
2230		if (!avail) {
2231			if (!is_playback &&
2232			    runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2233				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2234				goto _end_unlock;
2235			}
2236			if (nonblock) {
2237				err = -EAGAIN;
2238				goto _end_unlock;
2239			}
2240			runtime->twake = min_t(snd_pcm_uframes_t, size,
2241					runtime->control->avail_min ? : 1);
2242			err = wait_for_avail(substream, &avail);
2243			if (err < 0)
2244				goto _end_unlock;
2245			if (!avail)
2246				continue; /* draining */
2247		}
2248		frames = size > avail ? avail : size;
2249		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2250		appl_ofs = appl_ptr % runtime->buffer_size;
2251		cont = runtime->buffer_size - appl_ofs;
2252		if (frames > cont)
2253			frames = cont;
2254		if (snd_BUG_ON(!frames)) {
2255			err = -EINVAL;
2256			goto _end_unlock;
2257		}
 
 
 
 
2258		snd_pcm_stream_unlock_irq(substream);
 
 
2259		err = writer(substream, appl_ofs, data, offset, frames,
2260			     transfer);
 
 
2261		snd_pcm_stream_lock_irq(substream);
 
2262		if (err < 0)
2263			goto _end_unlock;
2264		err = pcm_accessible_state(runtime);
2265		if (err < 0)
2266			goto _end_unlock;
2267		appl_ptr += frames;
2268		if (appl_ptr >= runtime->boundary)
2269			appl_ptr -= runtime->boundary;
2270		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2271		if (err < 0)
2272			goto _end_unlock;
2273
2274		offset += frames;
2275		size -= frames;
2276		xfer += frames;
2277		avail -= frames;
2278		if (is_playback &&
2279		    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2280		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2281			err = snd_pcm_start(substream);
2282			if (err < 0)
2283				goto _end_unlock;
2284		}
2285	}
2286 _end_unlock:
2287	runtime->twake = 0;
2288	if (xfer > 0 && err >= 0)
2289		snd_pcm_update_state(substream, runtime);
2290	snd_pcm_stream_unlock_irq(substream);
2291	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2292}
2293EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2294
2295/*
2296 * standard channel mapping helpers
2297 */
2298
2299/* default channel maps for multi-channel playbacks, up to 8 channels */
2300const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2301	{ .channels = 1,
2302	  .map = { SNDRV_CHMAP_MONO } },
2303	{ .channels = 2,
2304	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2305	{ .channels = 4,
2306	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2307		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2308	{ .channels = 6,
2309	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2310		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2311		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2312	{ .channels = 8,
2313	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2314		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2315		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2316		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2317	{ }
2318};
2319EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2320
2321/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2322const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2323	{ .channels = 1,
2324	  .map = { SNDRV_CHMAP_MONO } },
2325	{ .channels = 2,
2326	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2327	{ .channels = 4,
2328	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2329		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2330	{ .channels = 6,
2331	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2332		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2333		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2334	{ .channels = 8,
2335	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2336		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2337		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2338		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2339	{ }
2340};
2341EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2342
2343static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2344{
2345	if (ch > info->max_channels)
2346		return false;
2347	return !info->channel_mask || (info->channel_mask & (1U << ch));
2348}
2349
2350static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2351			      struct snd_ctl_elem_info *uinfo)
2352{
2353	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2354
2355	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2356	uinfo->count = info->max_channels;
2357	uinfo->value.integer.min = 0;
2358	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2359	return 0;
2360}
2361
2362/* get callback for channel map ctl element
2363 * stores the channel position firstly matching with the current channels
2364 */
2365static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2366			     struct snd_ctl_elem_value *ucontrol)
2367{
2368	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2369	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2370	struct snd_pcm_substream *substream;
2371	const struct snd_pcm_chmap_elem *map;
2372
2373	if (!info->chmap)
2374		return -EINVAL;
2375	substream = snd_pcm_chmap_substream(info, idx);
2376	if (!substream)
2377		return -ENODEV;
2378	memset(ucontrol->value.integer.value, 0,
2379	       sizeof(long) * info->max_channels);
2380	if (!substream->runtime)
2381		return 0; /* no channels set */
2382	for (map = info->chmap; map->channels; map++) {
2383		int i;
2384		if (map->channels == substream->runtime->channels &&
2385		    valid_chmap_channels(info, map->channels)) {
2386			for (i = 0; i < map->channels; i++)
2387				ucontrol->value.integer.value[i] = map->map[i];
2388			return 0;
2389		}
2390	}
2391	return -EINVAL;
2392}
2393
2394/* tlv callback for channel map ctl element
2395 * expands the pre-defined channel maps in a form of TLV
2396 */
2397static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2398			     unsigned int size, unsigned int __user *tlv)
2399{
2400	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2401	const struct snd_pcm_chmap_elem *map;
2402	unsigned int __user *dst;
2403	int c, count = 0;
2404
2405	if (!info->chmap)
2406		return -EINVAL;
2407	if (size < 8)
2408		return -ENOMEM;
2409	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2410		return -EFAULT;
2411	size -= 8;
2412	dst = tlv + 2;
2413	for (map = info->chmap; map->channels; map++) {
2414		int chs_bytes = map->channels * 4;
2415		if (!valid_chmap_channels(info, map->channels))
2416			continue;
2417		if (size < 8)
2418			return -ENOMEM;
2419		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2420		    put_user(chs_bytes, dst + 1))
2421			return -EFAULT;
2422		dst += 2;
2423		size -= 8;
2424		count += 8;
2425		if (size < chs_bytes)
2426			return -ENOMEM;
2427		size -= chs_bytes;
2428		count += chs_bytes;
2429		for (c = 0; c < map->channels; c++) {
2430			if (put_user(map->map[c], dst))
2431				return -EFAULT;
2432			dst++;
2433		}
2434	}
2435	if (put_user(count, tlv + 1))
2436		return -EFAULT;
2437	return 0;
2438}
2439
2440static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2441{
2442	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2443	info->pcm->streams[info->stream].chmap_kctl = NULL;
2444	kfree(info);
2445}
2446
2447/**
2448 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2449 * @pcm: the assigned PCM instance
2450 * @stream: stream direction
2451 * @chmap: channel map elements (for query)
2452 * @max_channels: the max number of channels for the stream
2453 * @private_value: the value passed to each kcontrol's private_value field
2454 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2455 *
2456 * Create channel-mapping control elements assigned to the given PCM stream(s).
2457 * Return: Zero if successful, or a negative error value.
2458 */
2459int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2460			   const struct snd_pcm_chmap_elem *chmap,
2461			   int max_channels,
2462			   unsigned long private_value,
2463			   struct snd_pcm_chmap **info_ret)
2464{
2465	struct snd_pcm_chmap *info;
2466	struct snd_kcontrol_new knew = {
2467		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2468		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2469			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2470			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2471		.info = pcm_chmap_ctl_info,
2472		.get = pcm_chmap_ctl_get,
2473		.tlv.c = pcm_chmap_ctl_tlv,
2474	};
2475	int err;
2476
2477	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2478		return -EBUSY;
2479	info = kzalloc(sizeof(*info), GFP_KERNEL);
2480	if (!info)
2481		return -ENOMEM;
2482	info->pcm = pcm;
2483	info->stream = stream;
2484	info->chmap = chmap;
2485	info->max_channels = max_channels;
2486	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2487		knew.name = "Playback Channel Map";
2488	else
2489		knew.name = "Capture Channel Map";
2490	knew.device = pcm->device;
2491	knew.count = pcm->streams[stream].substream_count;
2492	knew.private_value = private_value;
2493	info->kctl = snd_ctl_new1(&knew, info);
2494	if (!info->kctl) {
2495		kfree(info);
2496		return -ENOMEM;
2497	}
2498	info->kctl->private_free = pcm_chmap_ctl_private_free;
2499	err = snd_ctl_add(pcm->card, info->kctl);
2500	if (err < 0)
2501		return err;
2502	pcm->streams[stream].chmap_kctl = info->kctl;
2503	if (info_ret)
2504		*info_ret = info;
2505	return 0;
2506}
2507EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);