Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Digital Audio (PCM) abstract layer
   4 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   5 *                   Abramo Bagnara <abramo@alsa-project.org>
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/sched/signal.h>
  10#include <linux/time.h>
  11#include <linux/math64.h>
  12#include <linux/export.h>
  13#include <sound/core.h>
  14#include <sound/control.h>
  15#include <sound/tlv.h>
  16#include <sound/info.h>
  17#include <sound/pcm.h>
  18#include <sound/pcm_params.h>
  19#include <sound/timer.h>
  20
  21#include "pcm_local.h"
  22
  23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
  24#define CREATE_TRACE_POINTS
  25#include "pcm_trace.h"
  26#else
  27#define trace_hwptr(substream, pos, in_interrupt)
  28#define trace_xrun(substream)
  29#define trace_hw_ptr_error(substream, reason)
  30#define trace_applptr(substream, prev, curr)
  31#endif
  32
  33static int fill_silence_frames(struct snd_pcm_substream *substream,
  34			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
  35
  36
  37static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
  38				       snd_pcm_uframes_t ptr,
  39				       snd_pcm_uframes_t new_ptr)
  40{
  41	snd_pcm_sframes_t delta;
  42
  43	delta = new_ptr - ptr;
  44	if (delta == 0)
  45		return;
  46	if (delta < 0)
  47		delta += runtime->boundary;
  48	if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
  49		runtime->silence_filled -= delta;
  50	else
  51		runtime->silence_filled = 0;
  52	runtime->silence_start = new_ptr;
  53}
  54
  55/*
  56 * fill ring buffer with silence
  57 * runtime->silence_start: starting pointer to silence area
  58 * runtime->silence_filled: size filled with silence
  59 * runtime->silence_threshold: threshold from application
  60 * runtime->silence_size: maximal size from application
  61 *
  62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
  63 */
  64void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
  65{
  66	struct snd_pcm_runtime *runtime = substream->runtime;
  67	snd_pcm_uframes_t frames, ofs, transfer;
  68	int err;
  69
  70	if (runtime->silence_size < runtime->boundary) {
  71		snd_pcm_sframes_t noise_dist;
  72		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
  73		update_silence_vars(runtime, runtime->silence_start, appl_ptr);
  74		/* initialization outside pointer updates */
  75		if (new_hw_ptr == ULONG_MAX)
  76			new_hw_ptr = runtime->status->hw_ptr;
  77		/* get hw_avail with the boundary crossing */
  78		noise_dist = appl_ptr - new_hw_ptr;
  79		if (noise_dist < 0)
  80			noise_dist += runtime->boundary;
  81		/* total noise distance */
  82		noise_dist += runtime->silence_filled;
 
 
 
  83		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
  84			return;
  85		frames = runtime->silence_threshold - noise_dist;
  86		if (frames > runtime->silence_size)
  87			frames = runtime->silence_size;
  88	} else {
  89		/*
  90		 * This filling mode aims at free-running mode (used for example by dmix),
  91		 * which doesn't update the application pointer.
  92		 */
  93		snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
  94		if (new_hw_ptr == ULONG_MAX) {
  95			/*
  96			 * Initialization, fill the whole unused buffer with silence.
  97			 *
  98			 * Usually, this is entered while stopped, before data is queued,
  99			 * so both pointers are expected to be zero.
 100			 */
 101			snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
 102			if (avail < 0)
 103				avail += runtime->boundary;
 104			/*
 105			 * In free-running mode, appl_ptr will be zero even while running,
 106			 * so we end up with a huge number. There is no useful way to
 107			 * handle this, so we just clear the whole buffer.
 108			 */
 109			runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
 110			runtime->silence_start = hw_ptr;
 111		} else {
 112			/* Silence the just played area immediately */
 113			update_silence_vars(runtime, hw_ptr, new_hw_ptr);
 
 
 
 
 
 
 
 
 
 114		}
 115		/*
 116		 * In this mode, silence_filled actually includes the valid
 117		 * sample data from the user.
 118		 */
 119		frames = runtime->buffer_size - runtime->silence_filled;
 120	}
 121	if (snd_BUG_ON(frames > runtime->buffer_size))
 122		return;
 123	if (frames == 0)
 124		return;
 125	ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
 126	do {
 127		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
 128		err = fill_silence_frames(substream, ofs, transfer);
 129		snd_BUG_ON(err < 0);
 130		runtime->silence_filled += transfer;
 131		frames -= transfer;
 132		ofs = 0;
 133	} while (frames > 0);
 134	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 135}
 136
 137#ifdef CONFIG_SND_DEBUG
 138void snd_pcm_debug_name(struct snd_pcm_substream *substream,
 139			   char *name, size_t len)
 140{
 141	snprintf(name, len, "pcmC%dD%d%c:%d",
 142		 substream->pcm->card->number,
 143		 substream->pcm->device,
 144		 substream->stream ? 'c' : 'p',
 145		 substream->number);
 146}
 147EXPORT_SYMBOL(snd_pcm_debug_name);
 148#endif
 149
 150#define XRUN_DEBUG_BASIC	(1<<0)
 151#define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
 152#define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
 153
 154#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 155
 156#define xrun_debug(substream, mask) \
 157			((substream)->pstr->xrun_debug & (mask))
 158#else
 159#define xrun_debug(substream, mask)	0
 160#endif
 161
 162#define dump_stack_on_xrun(substream) do {			\
 163		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
 164			dump_stack();				\
 165	} while (0)
 166
 167/* call with stream lock held */
 168void __snd_pcm_xrun(struct snd_pcm_substream *substream)
 169{
 170	struct snd_pcm_runtime *runtime = substream->runtime;
 171
 172	trace_xrun(substream);
 173	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 174		struct timespec64 tstamp;
 175
 176		snd_pcm_gettime(runtime, &tstamp);
 177		runtime->status->tstamp.tv_sec = tstamp.tv_sec;
 178		runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
 179	}
 180	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
 181	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
 182		char name[16];
 183		snd_pcm_debug_name(substream, name, sizeof(name));
 184		pcm_warn(substream->pcm, "XRUN: %s\n", name);
 185		dump_stack_on_xrun(substream);
 186	}
 187#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 188	substream->xrun_counter++;
 189#endif
 190}
 191
 192#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 193#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
 194	do {								\
 195		trace_hw_ptr_error(substream, reason);	\
 196		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
 197			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
 198					   (in_interrupt) ? 'Q' : 'P', ##args);	\
 199			dump_stack_on_xrun(substream);			\
 200		}							\
 201	} while (0)
 202
 203#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
 204
 205#define hw_ptr_error(substream, fmt, args...) do { } while (0)
 206
 207#endif
 208
 209int snd_pcm_update_state(struct snd_pcm_substream *substream,
 210			 struct snd_pcm_runtime *runtime)
 211{
 212	snd_pcm_uframes_t avail;
 213
 214	avail = snd_pcm_avail(substream);
 215	if (avail > runtime->avail_max)
 216		runtime->avail_max = avail;
 217	if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
 218		if (avail >= runtime->buffer_size) {
 219			snd_pcm_drain_done(substream);
 220			return -EPIPE;
 221		}
 222	} else {
 223		if (avail >= runtime->stop_threshold) {
 224			__snd_pcm_xrun(substream);
 225			return -EPIPE;
 226		}
 227	}
 228	if (runtime->twake) {
 229		if (avail >= runtime->twake)
 230			wake_up(&runtime->tsleep);
 231	} else if (avail >= runtime->control->avail_min)
 232		wake_up(&runtime->sleep);
 233	return 0;
 234}
 235
 236static void update_audio_tstamp(struct snd_pcm_substream *substream,
 237				struct timespec64 *curr_tstamp,
 238				struct timespec64 *audio_tstamp)
 239{
 240	struct snd_pcm_runtime *runtime = substream->runtime;
 241	u64 audio_frames, audio_nsecs;
 242	struct timespec64 driver_tstamp;
 243
 244	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
 245		return;
 246
 247	if (!(substream->ops->get_time_info) ||
 248		(runtime->audio_tstamp_report.actual_type ==
 249			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 250
 251		/*
 252		 * provide audio timestamp derived from pointer position
 253		 * add delay only if requested
 254		 */
 255
 256		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
 257
 258		if (runtime->audio_tstamp_config.report_delay) {
 259			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 260				audio_frames -=  runtime->delay;
 261			else
 262				audio_frames +=  runtime->delay;
 263		}
 264		audio_nsecs = div_u64(audio_frames * 1000000000LL,
 265				runtime->rate);
 266		*audio_tstamp = ns_to_timespec64(audio_nsecs);
 267	}
 268
 269	if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
 270	    runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
 271		runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
 272		runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
 273		runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
 274		runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
 275	}
 276
 277
 278	/*
 279	 * re-take a driver timestamp to let apps detect if the reference tstamp
 280	 * read by low-level hardware was provided with a delay
 281	 */
 282	snd_pcm_gettime(substream->runtime, &driver_tstamp);
 283	runtime->driver_tstamp = driver_tstamp;
 284}
 285
 286static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
 287				  unsigned int in_interrupt)
 288{
 289	struct snd_pcm_runtime *runtime = substream->runtime;
 290	snd_pcm_uframes_t pos;
 291	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
 292	snd_pcm_sframes_t hdelta, delta;
 293	unsigned long jdelta;
 294	unsigned long curr_jiffies;
 295	struct timespec64 curr_tstamp;
 296	struct timespec64 audio_tstamp;
 297	int crossed_boundary = 0;
 298
 299	old_hw_ptr = runtime->status->hw_ptr;
 300
 301	/*
 302	 * group pointer, time and jiffies reads to allow for more
 303	 * accurate correlations/corrections.
 304	 * The values are stored at the end of this routine after
 305	 * corrections for hw_ptr position
 306	 */
 307	pos = substream->ops->pointer(substream);
 308	curr_jiffies = jiffies;
 309	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 310		if ((substream->ops->get_time_info) &&
 311			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 312			substream->ops->get_time_info(substream, &curr_tstamp,
 313						&audio_tstamp,
 314						&runtime->audio_tstamp_config,
 315						&runtime->audio_tstamp_report);
 316
 317			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
 318			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
 319				snd_pcm_gettime(runtime, &curr_tstamp);
 320		} else
 321			snd_pcm_gettime(runtime, &curr_tstamp);
 322	}
 323
 324	if (pos == SNDRV_PCM_POS_XRUN) {
 325		__snd_pcm_xrun(substream);
 326		return -EPIPE;
 327	}
 328	if (pos >= runtime->buffer_size) {
 329		if (printk_ratelimit()) {
 330			char name[16];
 331			snd_pcm_debug_name(substream, name, sizeof(name));
 332			pcm_err(substream->pcm,
 333				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
 334				name, pos, runtime->buffer_size,
 335				runtime->period_size);
 336		}
 337		pos = 0;
 338	}
 339	pos -= pos % runtime->min_align;
 340	trace_hwptr(substream, pos, in_interrupt);
 341	hw_base = runtime->hw_ptr_base;
 342	new_hw_ptr = hw_base + pos;
 343	if (in_interrupt) {
 344		/* we know that one period was processed */
 345		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
 346		delta = runtime->hw_ptr_interrupt + runtime->period_size;
 347		if (delta > new_hw_ptr) {
 348			/* check for double acknowledged interrupts */
 349			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 350			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
 351				hw_base += runtime->buffer_size;
 352				if (hw_base >= runtime->boundary) {
 353					hw_base = 0;
 354					crossed_boundary++;
 355				}
 356				new_hw_ptr = hw_base + pos;
 357				goto __delta;
 358			}
 359		}
 360	}
 361	/* new_hw_ptr might be lower than old_hw_ptr in case when */
 362	/* pointer crosses the end of the ring buffer */
 363	if (new_hw_ptr < old_hw_ptr) {
 364		hw_base += runtime->buffer_size;
 365		if (hw_base >= runtime->boundary) {
 366			hw_base = 0;
 367			crossed_boundary++;
 368		}
 369		new_hw_ptr = hw_base + pos;
 370	}
 371      __delta:
 372	delta = new_hw_ptr - old_hw_ptr;
 373	if (delta < 0)
 374		delta += runtime->boundary;
 375
 376	if (runtime->no_period_wakeup) {
 377		snd_pcm_sframes_t xrun_threshold;
 378		/*
 379		 * Without regular period interrupts, we have to check
 380		 * the elapsed time to detect xruns.
 381		 */
 382		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 383		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
 384			goto no_delta_check;
 385		hdelta = jdelta - delta * HZ / runtime->rate;
 386		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
 387		while (hdelta > xrun_threshold) {
 388			delta += runtime->buffer_size;
 389			hw_base += runtime->buffer_size;
 390			if (hw_base >= runtime->boundary) {
 391				hw_base = 0;
 392				crossed_boundary++;
 393			}
 394			new_hw_ptr = hw_base + pos;
 395			hdelta -= runtime->hw_ptr_buffer_jiffies;
 396		}
 397		goto no_delta_check;
 398	}
 399
 400	/* something must be really wrong */
 401	if (delta >= runtime->buffer_size + runtime->period_size) {
 402		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
 403			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 404			     substream->stream, (long)pos,
 405			     (long)new_hw_ptr, (long)old_hw_ptr);
 406		return 0;
 407	}
 408
 409	/* Do jiffies check only in xrun_debug mode */
 410	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
 411		goto no_jiffies_check;
 412
 413	/* Skip the jiffies check for hardwares with BATCH flag.
 414	 * Such hardware usually just increases the position at each IRQ,
 415	 * thus it can't give any strange position.
 416	 */
 417	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
 418		goto no_jiffies_check;
 419	hdelta = delta;
 420	if (hdelta < runtime->delay)
 421		goto no_jiffies_check;
 422	hdelta -= runtime->delay;
 423	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 424	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 425		delta = jdelta /
 426			(((runtime->period_size * HZ) / runtime->rate)
 427								+ HZ/100);
 428		/* move new_hw_ptr according jiffies not pos variable */
 429		new_hw_ptr = old_hw_ptr;
 430		hw_base = delta;
 431		/* use loop to avoid checks for delta overflows */
 432		/* the delta value is small or zero in most cases */
 433		while (delta > 0) {
 434			new_hw_ptr += runtime->period_size;
 435			if (new_hw_ptr >= runtime->boundary) {
 436				new_hw_ptr -= runtime->boundary;
 437				crossed_boundary--;
 438			}
 439			delta--;
 440		}
 441		/* align hw_base to buffer_size */
 442		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
 443			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
 444			     (long)pos, (long)hdelta,
 445			     (long)runtime->period_size, jdelta,
 446			     ((hdelta * HZ) / runtime->rate), hw_base,
 447			     (unsigned long)old_hw_ptr,
 448			     (unsigned long)new_hw_ptr);
 449		/* reset values to proper state */
 450		delta = 0;
 451		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
 452	}
 453 no_jiffies_check:
 454	if (delta > runtime->period_size + runtime->period_size / 2) {
 455		hw_ptr_error(substream, in_interrupt,
 456			     "Lost interrupts?",
 457			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 458			     substream->stream, (long)delta,
 459			     (long)new_hw_ptr,
 460			     (long)old_hw_ptr);
 461	}
 462
 463 no_delta_check:
 464	if (runtime->status->hw_ptr == new_hw_ptr) {
 465		runtime->hw_ptr_jiffies = curr_jiffies;
 466		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 467		return 0;
 468	}
 469
 470	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
 471	    runtime->silence_size > 0)
 472		snd_pcm_playback_silence(substream, new_hw_ptr);
 473
 474	if (in_interrupt) {
 475		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
 476		if (delta < 0)
 477			delta += runtime->boundary;
 478		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
 479		runtime->hw_ptr_interrupt += delta;
 480		if (runtime->hw_ptr_interrupt >= runtime->boundary)
 481			runtime->hw_ptr_interrupt -= runtime->boundary;
 482	}
 483	runtime->hw_ptr_base = hw_base;
 484	runtime->status->hw_ptr = new_hw_ptr;
 485	runtime->hw_ptr_jiffies = curr_jiffies;
 486	if (crossed_boundary) {
 487		snd_BUG_ON(crossed_boundary != 1);
 488		runtime->hw_ptr_wrap += runtime->boundary;
 489	}
 490
 491	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 492
 493	return snd_pcm_update_state(substream, runtime);
 494}
 495
 496/* CAUTION: call it with irq disabled */
 497int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
 498{
 499	return snd_pcm_update_hw_ptr0(substream, 0);
 500}
 501
 502/**
 503 * snd_pcm_set_ops - set the PCM operators
 504 * @pcm: the pcm instance
 505 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
 506 * @ops: the operator table
 507 *
 508 * Sets the given PCM operators to the pcm instance.
 509 */
 510void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
 511		     const struct snd_pcm_ops *ops)
 512{
 513	struct snd_pcm_str *stream = &pcm->streams[direction];
 514	struct snd_pcm_substream *substream;
 515	
 516	for (substream = stream->substream; substream != NULL; substream = substream->next)
 517		substream->ops = ops;
 518}
 519EXPORT_SYMBOL(snd_pcm_set_ops);
 520
 521/**
 522 * snd_pcm_set_sync_per_card - set the PCM sync id with card number
 523 * @substream: the pcm substream
 524 * @params: modified hardware parameters
 525 * @id: identifier (max 12 bytes)
 526 * @len: identifier length (max 12 bytes)
 527 *
 528 * Sets the PCM sync identifier for the card with zero padding.
 529 *
 530 * User space or any user should use this 16-byte identifier for a comparison only
 531 * to check if two IDs are similar or different. Special case is the identifier
 532 * containing only zeros. Interpretation for this combination is - empty (not set).
 533 * The contents of the identifier should not be interpreted in any other way.
 534 *
 535 * The synchronization ID must be unique per clock source (usually one sound card,
 536 * but multiple soundcard may use one PCM word clock source which means that they
 537 * are fully synchronized).
 538 *
 539 * This routine composes this ID using card number in first four bytes and
 540 * 12-byte additional ID. When other ID composition is used (e.g. for multiple
 541 * sound cards), make sure that the composition does not clash with this
 542 * composition scheme.
 543 */
 544void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream,
 545			       struct snd_pcm_hw_params *params,
 546			       const unsigned char *id, unsigned int len)
 547{
 548	*(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number);
 549	len = min(12, len);
 550	memcpy(params->sync + 4, id, len);
 551	memset(params->sync + 4 + len, 0, 12 - len);
 552}
 553EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card);
 554
 555/*
 556 *  Standard ioctl routine
 557 */
 558
 559static inline unsigned int div32(unsigned int a, unsigned int b, 
 560				 unsigned int *r)
 561{
 562	if (b == 0) {
 563		*r = 0;
 564		return UINT_MAX;
 565	}
 566	*r = a % b;
 567	return a / b;
 568}
 569
 570static inline unsigned int div_down(unsigned int a, unsigned int b)
 571{
 572	if (b == 0)
 573		return UINT_MAX;
 574	return a / b;
 575}
 576
 577static inline unsigned int div_up(unsigned int a, unsigned int b)
 578{
 579	unsigned int r;
 580	unsigned int q;
 581	if (b == 0)
 582		return UINT_MAX;
 583	q = div32(a, b, &r);
 584	if (r)
 585		++q;
 586	return q;
 587}
 588
 589static inline unsigned int mul(unsigned int a, unsigned int b)
 590{
 591	if (a == 0)
 592		return 0;
 593	if (div_down(UINT_MAX, a) < b)
 594		return UINT_MAX;
 595	return a * b;
 596}
 597
 598static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 599				    unsigned int c, unsigned int *r)
 600{
 601	u_int64_t n = (u_int64_t) a * b;
 602	if (c == 0) {
 603		*r = 0;
 604		return UINT_MAX;
 605	}
 606	n = div_u64_rem(n, c, r);
 607	if (n >= UINT_MAX) {
 608		*r = 0;
 609		return UINT_MAX;
 610	}
 611	return n;
 612}
 613
 614/**
 615 * snd_interval_refine - refine the interval value of configurator
 616 * @i: the interval value to refine
 617 * @v: the interval value to refer to
 618 *
 619 * Refines the interval value with the reference value.
 620 * The interval is changed to the range satisfying both intervals.
 621 * The interval status (min, max, integer, etc.) are evaluated.
 622 *
 623 * Return: Positive if the value is changed, zero if it's not changed, or a
 624 * negative error code.
 625 */
 626int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
 627{
 628	int changed = 0;
 629	if (snd_BUG_ON(snd_interval_empty(i)))
 630		return -EINVAL;
 631	if (i->min < v->min) {
 632		i->min = v->min;
 633		i->openmin = v->openmin;
 634		changed = 1;
 635	} else if (i->min == v->min && !i->openmin && v->openmin) {
 636		i->openmin = 1;
 637		changed = 1;
 638	}
 639	if (i->max > v->max) {
 640		i->max = v->max;
 641		i->openmax = v->openmax;
 642		changed = 1;
 643	} else if (i->max == v->max && !i->openmax && v->openmax) {
 644		i->openmax = 1;
 645		changed = 1;
 646	}
 647	if (!i->integer && v->integer) {
 648		i->integer = 1;
 649		changed = 1;
 650	}
 651	if (i->integer) {
 652		if (i->openmin) {
 653			i->min++;
 654			i->openmin = 0;
 655		}
 656		if (i->openmax) {
 657			i->max--;
 658			i->openmax = 0;
 659		}
 660	} else if (!i->openmin && !i->openmax && i->min == i->max)
 661		i->integer = 1;
 662	if (snd_interval_checkempty(i)) {
 663		snd_interval_none(i);
 664		return -EINVAL;
 665	}
 666	return changed;
 667}
 668EXPORT_SYMBOL(snd_interval_refine);
 669
 670static int snd_interval_refine_first(struct snd_interval *i)
 671{
 672	const unsigned int last_max = i->max;
 673
 674	if (snd_BUG_ON(snd_interval_empty(i)))
 675		return -EINVAL;
 676	if (snd_interval_single(i))
 677		return 0;
 678	i->max = i->min;
 679	if (i->openmin)
 680		i->max++;
 681	/* only exclude max value if also excluded before refine */
 682	i->openmax = (i->openmax && i->max >= last_max);
 683	return 1;
 684}
 685
 686static int snd_interval_refine_last(struct snd_interval *i)
 687{
 688	const unsigned int last_min = i->min;
 689
 690	if (snd_BUG_ON(snd_interval_empty(i)))
 691		return -EINVAL;
 692	if (snd_interval_single(i))
 693		return 0;
 694	i->min = i->max;
 695	if (i->openmax)
 696		i->min--;
 697	/* only exclude min value if also excluded before refine */
 698	i->openmin = (i->openmin && i->min <= last_min);
 699	return 1;
 700}
 701
 702void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 703{
 704	if (a->empty || b->empty) {
 705		snd_interval_none(c);
 706		return;
 707	}
 708	c->empty = 0;
 709	c->min = mul(a->min, b->min);
 710	c->openmin = (a->openmin || b->openmin);
 711	c->max = mul(a->max,  b->max);
 712	c->openmax = (a->openmax || b->openmax);
 713	c->integer = (a->integer && b->integer);
 714}
 715
 716/**
 717 * snd_interval_div - refine the interval value with division
 718 * @a: dividend
 719 * @b: divisor
 720 * @c: quotient
 721 *
 722 * c = a / b
 723 *
 724 * Returns non-zero if the value is changed, zero if not changed.
 725 */
 726void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 727{
 728	unsigned int r;
 729	if (a->empty || b->empty) {
 730		snd_interval_none(c);
 731		return;
 732	}
 733	c->empty = 0;
 734	c->min = div32(a->min, b->max, &r);
 735	c->openmin = (r || a->openmin || b->openmax);
 736	if (b->min > 0) {
 737		c->max = div32(a->max, b->min, &r);
 738		if (r) {
 739			c->max++;
 740			c->openmax = 1;
 741		} else
 742			c->openmax = (a->openmax || b->openmin);
 743	} else {
 744		c->max = UINT_MAX;
 745		c->openmax = 0;
 746	}
 747	c->integer = 0;
 748}
 749
 750/**
 751 * snd_interval_muldivk - refine the interval value
 752 * @a: dividend 1
 753 * @b: dividend 2
 754 * @k: divisor (as integer)
 755 * @c: result
 756  *
 757 * c = a * b / k
 758 *
 759 * Returns non-zero if the value is changed, zero if not changed.
 760 */
 761void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
 762		      unsigned int k, struct snd_interval *c)
 763{
 764	unsigned int r;
 765	if (a->empty || b->empty) {
 766		snd_interval_none(c);
 767		return;
 768	}
 769	c->empty = 0;
 770	c->min = muldiv32(a->min, b->min, k, &r);
 771	c->openmin = (r || a->openmin || b->openmin);
 772	c->max = muldiv32(a->max, b->max, k, &r);
 773	if (r) {
 774		c->max++;
 775		c->openmax = 1;
 776	} else
 777		c->openmax = (a->openmax || b->openmax);
 778	c->integer = 0;
 779}
 780
 781/**
 782 * snd_interval_mulkdiv - refine the interval value
 783 * @a: dividend 1
 784 * @k: dividend 2 (as integer)
 785 * @b: divisor
 786 * @c: result
 787 *
 788 * c = a * k / b
 789 *
 790 * Returns non-zero if the value is changed, zero if not changed.
 791 */
 792void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
 793		      const struct snd_interval *b, struct snd_interval *c)
 794{
 795	unsigned int r;
 796	if (a->empty || b->empty) {
 797		snd_interval_none(c);
 798		return;
 799	}
 800	c->empty = 0;
 801	c->min = muldiv32(a->min, k, b->max, &r);
 802	c->openmin = (r || a->openmin || b->openmax);
 803	if (b->min > 0) {
 804		c->max = muldiv32(a->max, k, b->min, &r);
 805		if (r) {
 806			c->max++;
 807			c->openmax = 1;
 808		} else
 809			c->openmax = (a->openmax || b->openmin);
 810	} else {
 811		c->max = UINT_MAX;
 812		c->openmax = 0;
 813	}
 814	c->integer = 0;
 815}
 816
 817/* ---- */
 818
 819
 820/**
 821 * snd_interval_ratnum - refine the interval value
 822 * @i: interval to refine
 823 * @rats_count: number of ratnum_t 
 824 * @rats: ratnum_t array
 825 * @nump: pointer to store the resultant numerator
 826 * @denp: pointer to store the resultant denominator
 827 *
 828 * Return: Positive if the value is changed, zero if it's not changed, or a
 829 * negative error code.
 830 */
 831int snd_interval_ratnum(struct snd_interval *i,
 832			unsigned int rats_count, const struct snd_ratnum *rats,
 833			unsigned int *nump, unsigned int *denp)
 834{
 835	unsigned int best_num, best_den;
 836	int best_diff;
 837	unsigned int k;
 838	struct snd_interval t;
 839	int err;
 840	unsigned int result_num, result_den;
 841	int result_diff;
 842
 843	best_num = best_den = best_diff = 0;
 844	for (k = 0; k < rats_count; ++k) {
 845		unsigned int num = rats[k].num;
 846		unsigned int den;
 847		unsigned int q = i->min;
 848		int diff;
 849		if (q == 0)
 850			q = 1;
 851		den = div_up(num, q);
 852		if (den < rats[k].den_min)
 853			continue;
 854		if (den > rats[k].den_max)
 855			den = rats[k].den_max;
 856		else {
 857			unsigned int r;
 858			r = (den - rats[k].den_min) % rats[k].den_step;
 859			if (r != 0)
 860				den -= r;
 861		}
 862		diff = num - q * den;
 863		if (diff < 0)
 864			diff = -diff;
 865		if (best_num == 0 ||
 866		    diff * best_den < best_diff * den) {
 867			best_diff = diff;
 868			best_den = den;
 869			best_num = num;
 870		}
 871	}
 872	if (best_den == 0) {
 873		i->empty = 1;
 874		return -EINVAL;
 875	}
 876	t.min = div_down(best_num, best_den);
 877	t.openmin = !!(best_num % best_den);
 878	
 879	result_num = best_num;
 880	result_diff = best_diff;
 881	result_den = best_den;
 882	best_num = best_den = best_diff = 0;
 883	for (k = 0; k < rats_count; ++k) {
 884		unsigned int num = rats[k].num;
 885		unsigned int den;
 886		unsigned int q = i->max;
 887		int diff;
 888		if (q == 0) {
 889			i->empty = 1;
 890			return -EINVAL;
 891		}
 892		den = div_down(num, q);
 893		if (den > rats[k].den_max)
 894			continue;
 895		if (den < rats[k].den_min)
 896			den = rats[k].den_min;
 897		else {
 898			unsigned int r;
 899			r = (den - rats[k].den_min) % rats[k].den_step;
 900			if (r != 0)
 901				den += rats[k].den_step - r;
 902		}
 903		diff = q * den - num;
 904		if (diff < 0)
 905			diff = -diff;
 906		if (best_num == 0 ||
 907		    diff * best_den < best_diff * den) {
 908			best_diff = diff;
 909			best_den = den;
 910			best_num = num;
 911		}
 912	}
 913	if (best_den == 0) {
 914		i->empty = 1;
 915		return -EINVAL;
 916	}
 917	t.max = div_up(best_num, best_den);
 918	t.openmax = !!(best_num % best_den);
 919	t.integer = 0;
 920	err = snd_interval_refine(i, &t);
 921	if (err < 0)
 922		return err;
 923
 924	if (snd_interval_single(i)) {
 925		if (best_diff * result_den < result_diff * best_den) {
 926			result_num = best_num;
 927			result_den = best_den;
 928		}
 929		if (nump)
 930			*nump = result_num;
 931		if (denp)
 932			*denp = result_den;
 933	}
 934	return err;
 935}
 936EXPORT_SYMBOL(snd_interval_ratnum);
 937
 938/**
 939 * snd_interval_ratden - refine the interval value
 940 * @i: interval to refine
 941 * @rats_count: number of struct ratden
 942 * @rats: struct ratden array
 943 * @nump: pointer to store the resultant numerator
 944 * @denp: pointer to store the resultant denominator
 945 *
 946 * Return: Positive if the value is changed, zero if it's not changed, or a
 947 * negative error code.
 948 */
 949static int snd_interval_ratden(struct snd_interval *i,
 950			       unsigned int rats_count,
 951			       const struct snd_ratden *rats,
 952			       unsigned int *nump, unsigned int *denp)
 953{
 954	unsigned int best_num, best_diff, best_den;
 955	unsigned int k;
 956	struct snd_interval t;
 957	int err;
 958
 959	best_num = best_den = best_diff = 0;
 960	for (k = 0; k < rats_count; ++k) {
 961		unsigned int num;
 962		unsigned int den = rats[k].den;
 963		unsigned int q = i->min;
 964		int diff;
 965		num = mul(q, den);
 966		if (num > rats[k].num_max)
 967			continue;
 968		if (num < rats[k].num_min)
 969			num = rats[k].num_max;
 970		else {
 971			unsigned int r;
 972			r = (num - rats[k].num_min) % rats[k].num_step;
 973			if (r != 0)
 974				num += rats[k].num_step - r;
 975		}
 976		diff = num - q * den;
 977		if (best_num == 0 ||
 978		    diff * best_den < best_diff * den) {
 979			best_diff = diff;
 980			best_den = den;
 981			best_num = num;
 982		}
 983	}
 984	if (best_den == 0) {
 985		i->empty = 1;
 986		return -EINVAL;
 987	}
 988	t.min = div_down(best_num, best_den);
 989	t.openmin = !!(best_num % best_den);
 990	
 991	best_num = best_den = best_diff = 0;
 992	for (k = 0; k < rats_count; ++k) {
 993		unsigned int num;
 994		unsigned int den = rats[k].den;
 995		unsigned int q = i->max;
 996		int diff;
 997		num = mul(q, den);
 998		if (num < rats[k].num_min)
 999			continue;
1000		if (num > rats[k].num_max)
1001			num = rats[k].num_max;
1002		else {
1003			unsigned int r;
1004			r = (num - rats[k].num_min) % rats[k].num_step;
1005			if (r != 0)
1006				num -= r;
1007		}
1008		diff = q * den - num;
1009		if (best_num == 0 ||
1010		    diff * best_den < best_diff * den) {
1011			best_diff = diff;
1012			best_den = den;
1013			best_num = num;
1014		}
1015	}
1016	if (best_den == 0) {
1017		i->empty = 1;
1018		return -EINVAL;
1019	}
1020	t.max = div_up(best_num, best_den);
1021	t.openmax = !!(best_num % best_den);
1022	t.integer = 0;
1023	err = snd_interval_refine(i, &t);
1024	if (err < 0)
1025		return err;
1026
1027	if (snd_interval_single(i)) {
1028		if (nump)
1029			*nump = best_num;
1030		if (denp)
1031			*denp = best_den;
1032	}
1033	return err;
1034}
1035
1036/**
1037 * snd_interval_list - refine the interval value from the list
1038 * @i: the interval value to refine
1039 * @count: the number of elements in the list
1040 * @list: the value list
1041 * @mask: the bit-mask to evaluate
1042 *
1043 * Refines the interval value from the list.
1044 * When mask is non-zero, only the elements corresponding to bit 1 are
1045 * evaluated.
1046 *
1047 * Return: Positive if the value is changed, zero if it's not changed, or a
1048 * negative error code.
1049 */
1050int snd_interval_list(struct snd_interval *i, unsigned int count,
1051		      const unsigned int *list, unsigned int mask)
1052{
1053        unsigned int k;
1054	struct snd_interval list_range;
1055
1056	if (!count) {
1057		i->empty = 1;
1058		return -EINVAL;
1059	}
1060	snd_interval_any(&list_range);
1061	list_range.min = UINT_MAX;
1062	list_range.max = 0;
1063        for (k = 0; k < count; k++) {
1064		if (mask && !(mask & (1 << k)))
1065			continue;
1066		if (!snd_interval_test(i, list[k]))
1067			continue;
1068		list_range.min = min(list_range.min, list[k]);
1069		list_range.max = max(list_range.max, list[k]);
1070        }
1071	return snd_interval_refine(i, &list_range);
1072}
1073EXPORT_SYMBOL(snd_interval_list);
1074
1075/**
1076 * snd_interval_ranges - refine the interval value from the list of ranges
1077 * @i: the interval value to refine
1078 * @count: the number of elements in the list of ranges
1079 * @ranges: the ranges list
1080 * @mask: the bit-mask to evaluate
1081 *
1082 * Refines the interval value from the list of ranges.
1083 * When mask is non-zero, only the elements corresponding to bit 1 are
1084 * evaluated.
1085 *
1086 * Return: Positive if the value is changed, zero if it's not changed, or a
1087 * negative error code.
1088 */
1089int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1090			const struct snd_interval *ranges, unsigned int mask)
1091{
1092	unsigned int k;
1093	struct snd_interval range_union;
1094	struct snd_interval range;
1095
1096	if (!count) {
1097		snd_interval_none(i);
1098		return -EINVAL;
1099	}
1100	snd_interval_any(&range_union);
1101	range_union.min = UINT_MAX;
1102	range_union.max = 0;
1103	for (k = 0; k < count; k++) {
1104		if (mask && !(mask & (1 << k)))
1105			continue;
1106		snd_interval_copy(&range, &ranges[k]);
1107		if (snd_interval_refine(&range, i) < 0)
1108			continue;
1109		if (snd_interval_empty(&range))
1110			continue;
1111
1112		if (range.min < range_union.min) {
1113			range_union.min = range.min;
1114			range_union.openmin = 1;
1115		}
1116		if (range.min == range_union.min && !range.openmin)
1117			range_union.openmin = 0;
1118		if (range.max > range_union.max) {
1119			range_union.max = range.max;
1120			range_union.openmax = 1;
1121		}
1122		if (range.max == range_union.max && !range.openmax)
1123			range_union.openmax = 0;
1124	}
1125	return snd_interval_refine(i, &range_union);
1126}
1127EXPORT_SYMBOL(snd_interval_ranges);
1128
1129static int snd_interval_step(struct snd_interval *i, unsigned int step)
1130{
1131	unsigned int n;
1132	int changed = 0;
1133	n = i->min % step;
1134	if (n != 0 || i->openmin) {
1135		i->min += step - n;
1136		i->openmin = 0;
1137		changed = 1;
1138	}
1139	n = i->max % step;
1140	if (n != 0 || i->openmax) {
1141		i->max -= n;
1142		i->openmax = 0;
1143		changed = 1;
1144	}
1145	if (snd_interval_checkempty(i)) {
1146		i->empty = 1;
1147		return -EINVAL;
1148	}
1149	return changed;
1150}
1151
1152/* Info constraints helpers */
1153
1154/**
1155 * snd_pcm_hw_rule_add - add the hw-constraint rule
1156 * @runtime: the pcm runtime instance
1157 * @cond: condition bits
1158 * @var: the variable to evaluate
1159 * @func: the evaluation function
1160 * @private: the private data pointer passed to function
1161 * @dep: the dependent variables
1162 *
1163 * Return: Zero if successful, or a negative error code on failure.
1164 */
1165int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1166			int var,
1167			snd_pcm_hw_rule_func_t func, void *private,
1168			int dep, ...)
1169{
1170	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1171	struct snd_pcm_hw_rule *c;
1172	unsigned int k;
1173	va_list args;
1174	va_start(args, dep);
1175	if (constrs->rules_num >= constrs->rules_all) {
1176		struct snd_pcm_hw_rule *new;
1177		unsigned int new_rules = constrs->rules_all + 16;
1178		new = krealloc_array(constrs->rules, new_rules,
1179				     sizeof(*c), GFP_KERNEL);
1180		if (!new) {
1181			va_end(args);
1182			return -ENOMEM;
1183		}
1184		constrs->rules = new;
1185		constrs->rules_all = new_rules;
1186	}
1187	c = &constrs->rules[constrs->rules_num];
1188	c->cond = cond;
1189	c->func = func;
1190	c->var = var;
1191	c->private = private;
1192	k = 0;
1193	while (1) {
1194		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1195			va_end(args);
1196			return -EINVAL;
1197		}
1198		c->deps[k++] = dep;
1199		if (dep < 0)
1200			break;
1201		dep = va_arg(args, int);
1202	}
1203	constrs->rules_num++;
1204	va_end(args);
1205	return 0;
1206}
1207EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1208
1209/**
1210 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the mask
1213 * @mask: the bitmap mask
1214 *
1215 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1216 *
1217 * Return: Zero if successful, or a negative error code on failure.
1218 */
1219int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1220			       u_int32_t mask)
1221{
1222	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223	struct snd_mask *maskp = constrs_mask(constrs, var);
1224	*maskp->bits &= mask;
1225	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1226	if (*maskp->bits == 0)
1227		return -EINVAL;
1228	return 0;
1229}
1230
1231/**
1232 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1233 * @runtime: PCM runtime instance
1234 * @var: hw_params variable to apply the mask
1235 * @mask: the 64bit bitmap mask
1236 *
1237 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1238 *
1239 * Return: Zero if successful, or a negative error code on failure.
1240 */
1241int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1242				 u_int64_t mask)
1243{
1244	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1245	struct snd_mask *maskp = constrs_mask(constrs, var);
1246	maskp->bits[0] &= (u_int32_t)mask;
1247	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1248	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1249	if (! maskp->bits[0] && ! maskp->bits[1])
1250		return -EINVAL;
1251	return 0;
1252}
1253EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1254
1255/**
1256 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1257 * @runtime: PCM runtime instance
1258 * @var: hw_params variable to apply the integer constraint
1259 *
1260 * Apply the constraint of integer to an interval parameter.
1261 *
1262 * Return: Positive if the value is changed, zero if it's not changed, or a
1263 * negative error code.
1264 */
1265int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1266{
1267	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268	return snd_interval_setinteger(constrs_interval(constrs, var));
1269}
1270EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1271
1272/**
1273 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1274 * @runtime: PCM runtime instance
1275 * @var: hw_params variable to apply the range
1276 * @min: the minimal value
1277 * @max: the maximal value
1278 * 
1279 * Apply the min/max range constraint to an interval parameter.
1280 *
1281 * Return: Positive if the value is changed, zero if it's not changed, or a
1282 * negative error code.
1283 */
1284int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1285				 unsigned int min, unsigned int max)
1286{
1287	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1288	struct snd_interval t;
1289	t.min = min;
1290	t.max = max;
1291	t.openmin = t.openmax = 0;
1292	t.integer = 0;
1293	return snd_interval_refine(constrs_interval(constrs, var), &t);
1294}
1295EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1296
1297static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1298				struct snd_pcm_hw_rule *rule)
1299{
1300	struct snd_pcm_hw_constraint_list *list = rule->private;
1301	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1302}		
1303
1304
1305/**
1306 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1307 * @runtime: PCM runtime instance
1308 * @cond: condition bits
1309 * @var: hw_params variable to apply the list constraint
1310 * @l: list
1311 * 
1312 * Apply the list of constraints to an interval parameter.
1313 *
1314 * Return: Zero if successful, or a negative error code on failure.
1315 */
1316int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1317			       unsigned int cond,
1318			       snd_pcm_hw_param_t var,
1319			       const struct snd_pcm_hw_constraint_list *l)
1320{
1321	return snd_pcm_hw_rule_add(runtime, cond, var,
1322				   snd_pcm_hw_rule_list, (void *)l,
1323				   var, -1);
1324}
1325EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1326
1327static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1328				  struct snd_pcm_hw_rule *rule)
1329{
1330	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1331	return snd_interval_ranges(hw_param_interval(params, rule->var),
1332				   r->count, r->ranges, r->mask);
1333}
1334
1335
1336/**
1337 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1338 * @runtime: PCM runtime instance
1339 * @cond: condition bits
1340 * @var: hw_params variable to apply the list of range constraints
1341 * @r: ranges
1342 *
1343 * Apply the list of range constraints to an interval parameter.
1344 *
1345 * Return: Zero if successful, or a negative error code on failure.
1346 */
1347int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1348				 unsigned int cond,
1349				 snd_pcm_hw_param_t var,
1350				 const struct snd_pcm_hw_constraint_ranges *r)
1351{
1352	return snd_pcm_hw_rule_add(runtime, cond, var,
1353				   snd_pcm_hw_rule_ranges, (void *)r,
1354				   var, -1);
1355}
1356EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1357
1358static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1359				   struct snd_pcm_hw_rule *rule)
1360{
1361	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1362	unsigned int num = 0, den = 0;
1363	int err;
1364	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1365				  r->nrats, r->rats, &num, &den);
1366	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1367		params->rate_num = num;
1368		params->rate_den = den;
1369	}
1370	return err;
1371}
1372
1373/**
1374 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1375 * @runtime: PCM runtime instance
1376 * @cond: condition bits
1377 * @var: hw_params variable to apply the ratnums constraint
1378 * @r: struct snd_ratnums constriants
1379 *
1380 * Return: Zero if successful, or a negative error code on failure.
1381 */
1382int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 
1383				  unsigned int cond,
1384				  snd_pcm_hw_param_t var,
1385				  const struct snd_pcm_hw_constraint_ratnums *r)
1386{
1387	return snd_pcm_hw_rule_add(runtime, cond, var,
1388				   snd_pcm_hw_rule_ratnums, (void *)r,
1389				   var, -1);
1390}
1391EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1392
1393static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1394				   struct snd_pcm_hw_rule *rule)
1395{
1396	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1397	unsigned int num = 0, den = 0;
1398	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1399				  r->nrats, r->rats, &num, &den);
1400	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1401		params->rate_num = num;
1402		params->rate_den = den;
1403	}
1404	return err;
1405}
1406
1407/**
1408 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1409 * @runtime: PCM runtime instance
1410 * @cond: condition bits
1411 * @var: hw_params variable to apply the ratdens constraint
1412 * @r: struct snd_ratdens constriants
1413 *
1414 * Return: Zero if successful, or a negative error code on failure.
1415 */
1416int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 
1417				  unsigned int cond,
1418				  snd_pcm_hw_param_t var,
1419				  const struct snd_pcm_hw_constraint_ratdens *r)
1420{
1421	return snd_pcm_hw_rule_add(runtime, cond, var,
1422				   snd_pcm_hw_rule_ratdens, (void *)r,
1423				   var, -1);
1424}
1425EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1426
1427static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1428				  struct snd_pcm_hw_rule *rule)
1429{
1430	unsigned int l = (unsigned long) rule->private;
1431	int width = l & 0xffff;
1432	unsigned int msbits = l >> 16;
1433	const struct snd_interval *i =
1434		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1435
1436	if (!snd_interval_single(i))
1437		return 0;
1438
1439	if ((snd_interval_value(i) == width) ||
1440	    (width == 0 && snd_interval_value(i) > msbits))
1441		params->msbits = min_not_zero(params->msbits, msbits);
1442
1443	return 0;
1444}
1445
1446/**
1447 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1448 * @runtime: PCM runtime instance
1449 * @cond: condition bits
1450 * @width: sample bits width
1451 * @msbits: msbits width
1452 *
1453 * This constraint will set the number of most significant bits (msbits) if a
1454 * sample format with the specified width has been select. If width is set to 0
1455 * the msbits will be set for any sample format with a width larger than the
1456 * specified msbits.
1457 *
1458 * Return: Zero if successful, or a negative error code on failure.
1459 */
1460int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 
1461				 unsigned int cond,
1462				 unsigned int width,
1463				 unsigned int msbits)
1464{
1465	unsigned long l = (msbits << 16) | width;
1466	return snd_pcm_hw_rule_add(runtime, cond, -1,
1467				    snd_pcm_hw_rule_msbits,
1468				    (void*) l,
1469				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1470}
1471EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1472
1473static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1474				struct snd_pcm_hw_rule *rule)
1475{
1476	unsigned long step = (unsigned long) rule->private;
1477	return snd_interval_step(hw_param_interval(params, rule->var), step);
1478}
1479
1480/**
1481 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1482 * @runtime: PCM runtime instance
1483 * @cond: condition bits
1484 * @var: hw_params variable to apply the step constraint
1485 * @step: step size
1486 *
1487 * Return: Zero if successful, or a negative error code on failure.
1488 */
1489int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1490			       unsigned int cond,
1491			       snd_pcm_hw_param_t var,
1492			       unsigned long step)
1493{
1494	return snd_pcm_hw_rule_add(runtime, cond, var, 
1495				   snd_pcm_hw_rule_step, (void *) step,
1496				   var, -1);
1497}
1498EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1499
1500static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1501{
1502	static const unsigned int pow2_sizes[] = {
1503		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1504		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1505		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1506		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1507	};
1508	return snd_interval_list(hw_param_interval(params, rule->var),
1509				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1510}		
1511
1512/**
1513 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1514 * @runtime: PCM runtime instance
1515 * @cond: condition bits
1516 * @var: hw_params variable to apply the power-of-2 constraint
1517 *
1518 * Return: Zero if successful, or a negative error code on failure.
1519 */
1520int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1521			       unsigned int cond,
1522			       snd_pcm_hw_param_t var)
1523{
1524	return snd_pcm_hw_rule_add(runtime, cond, var, 
1525				   snd_pcm_hw_rule_pow2, NULL,
1526				   var, -1);
1527}
1528EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1529
1530static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1531					   struct snd_pcm_hw_rule *rule)
1532{
1533	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1534	struct snd_interval *rate;
1535
1536	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1537	return snd_interval_list(rate, 1, &base_rate, 0);
1538}
1539
1540/**
1541 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1542 * @runtime: PCM runtime instance
1543 * @base_rate: the rate at which the hardware does not resample
1544 *
1545 * Return: Zero if successful, or a negative error code on failure.
1546 */
1547int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1548			       unsigned int base_rate)
1549{
1550	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1551				   SNDRV_PCM_HW_PARAM_RATE,
1552				   snd_pcm_hw_rule_noresample_func,
1553				   (void *)(uintptr_t)base_rate,
1554				   SNDRV_PCM_HW_PARAM_RATE, -1);
1555}
1556EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1557
1558static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1559				  snd_pcm_hw_param_t var)
1560{
1561	if (hw_is_mask(var)) {
1562		snd_mask_any(hw_param_mask(params, var));
1563		params->cmask |= 1 << var;
1564		params->rmask |= 1 << var;
1565		return;
1566	}
1567	if (hw_is_interval(var)) {
1568		snd_interval_any(hw_param_interval(params, var));
1569		params->cmask |= 1 << var;
1570		params->rmask |= 1 << var;
1571		return;
1572	}
1573	snd_BUG();
1574}
1575
1576void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1577{
1578	unsigned int k;
1579	memset(params, 0, sizeof(*params));
1580	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1581		_snd_pcm_hw_param_any(params, k);
1582	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1583		_snd_pcm_hw_param_any(params, k);
1584	params->info = ~0U;
1585}
1586EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1587
1588/**
1589 * snd_pcm_hw_param_value - return @params field @var value
1590 * @params: the hw_params instance
1591 * @var: parameter to retrieve
1592 * @dir: pointer to the direction (-1,0,1) or %NULL
1593 *
1594 * Return: The value for field @var if it's fixed in configuration space
1595 * defined by @params. -%EINVAL otherwise.
1596 */
1597int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1598			   snd_pcm_hw_param_t var, int *dir)
1599{
1600	if (hw_is_mask(var)) {
1601		const struct snd_mask *mask = hw_param_mask_c(params, var);
1602		if (!snd_mask_single(mask))
1603			return -EINVAL;
1604		if (dir)
1605			*dir = 0;
1606		return snd_mask_value(mask);
1607	}
1608	if (hw_is_interval(var)) {
1609		const struct snd_interval *i = hw_param_interval_c(params, var);
1610		if (!snd_interval_single(i))
1611			return -EINVAL;
1612		if (dir)
1613			*dir = i->openmin;
1614		return snd_interval_value(i);
1615	}
1616	return -EINVAL;
1617}
1618EXPORT_SYMBOL(snd_pcm_hw_param_value);
1619
1620void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1621				snd_pcm_hw_param_t var)
1622{
1623	if (hw_is_mask(var)) {
1624		snd_mask_none(hw_param_mask(params, var));
1625		params->cmask |= 1 << var;
1626		params->rmask |= 1 << var;
1627	} else if (hw_is_interval(var)) {
1628		snd_interval_none(hw_param_interval(params, var));
1629		params->cmask |= 1 << var;
1630		params->rmask |= 1 << var;
1631	} else {
1632		snd_BUG();
1633	}
1634}
1635EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1636
1637static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1638				   snd_pcm_hw_param_t var)
1639{
1640	int changed;
1641	if (hw_is_mask(var))
1642		changed = snd_mask_refine_first(hw_param_mask(params, var));
1643	else if (hw_is_interval(var))
1644		changed = snd_interval_refine_first(hw_param_interval(params, var));
1645	else
1646		return -EINVAL;
1647	if (changed > 0) {
1648		params->cmask |= 1 << var;
1649		params->rmask |= 1 << var;
1650	}
1651	return changed;
1652}
1653
1654
1655/**
1656 * snd_pcm_hw_param_first - refine config space and return minimum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1661 *
1662 * Inside configuration space defined by @params remove from @var all
1663 * values > minimum. Reduce configuration space accordingly.
1664 *
1665 * Return: The minimum, or a negative error code on failure.
1666 */
1667int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 
1668			   struct snd_pcm_hw_params *params, 
1669			   snd_pcm_hw_param_t var, int *dir)
1670{
1671	int changed = _snd_pcm_hw_param_first(params, var);
1672	if (changed < 0)
1673		return changed;
1674	if (params->rmask) {
1675		int err = snd_pcm_hw_refine(pcm, params);
1676		if (err < 0)
1677			return err;
1678	}
1679	return snd_pcm_hw_param_value(params, var, dir);
1680}
1681EXPORT_SYMBOL(snd_pcm_hw_param_first);
1682
1683static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1684				  snd_pcm_hw_param_t var)
1685{
1686	int changed;
1687	if (hw_is_mask(var))
1688		changed = snd_mask_refine_last(hw_param_mask(params, var));
1689	else if (hw_is_interval(var))
1690		changed = snd_interval_refine_last(hw_param_interval(params, var));
1691	else
1692		return -EINVAL;
1693	if (changed > 0) {
1694		params->cmask |= 1 << var;
1695		params->rmask |= 1 << var;
1696	}
1697	return changed;
1698}
1699
1700
1701/**
1702 * snd_pcm_hw_param_last - refine config space and return maximum value
1703 * @pcm: PCM instance
1704 * @params: the hw_params instance
1705 * @var: parameter to retrieve
1706 * @dir: pointer to the direction (-1,0,1) or %NULL
1707 *
1708 * Inside configuration space defined by @params remove from @var all
1709 * values < maximum. Reduce configuration space accordingly.
1710 *
1711 * Return: The maximum, or a negative error code on failure.
1712 */
1713int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 
1714			  struct snd_pcm_hw_params *params,
1715			  snd_pcm_hw_param_t var, int *dir)
1716{
1717	int changed = _snd_pcm_hw_param_last(params, var);
1718	if (changed < 0)
1719		return changed;
1720	if (params->rmask) {
1721		int err = snd_pcm_hw_refine(pcm, params);
1722		if (err < 0)
1723			return err;
1724	}
1725	return snd_pcm_hw_param_value(params, var, dir);
1726}
1727EXPORT_SYMBOL(snd_pcm_hw_param_last);
1728
1729/**
1730 * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1731 * @p: hardware parameters
1732 *
1733 * Return: The number of bits per sample based on the format,
1734 * subformat and msbits the specified hw params has.
1735 */
1736int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1737{
1738	snd_pcm_subformat_t subformat = params_subformat(p);
1739	snd_pcm_format_t format = params_format(p);
1740
1741	switch (format) {
1742	case SNDRV_PCM_FORMAT_S32_LE:
1743	case SNDRV_PCM_FORMAT_U32_LE:
1744	case SNDRV_PCM_FORMAT_S32_BE:
1745	case SNDRV_PCM_FORMAT_U32_BE:
1746		switch (subformat) {
1747		case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1748			return 20;
1749		case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1750			return 24;
1751		case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1752		case SNDRV_PCM_SUBFORMAT_STD:
1753		default:
1754			break;
1755		}
1756		fallthrough;
1757	default:
1758		return snd_pcm_format_width(format);
1759	}
1760}
1761EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1762
1763static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1764				   void *arg)
1765{
1766	struct snd_pcm_runtime *runtime = substream->runtime;
1767
1768	guard(pcm_stream_lock_irqsave)(substream);
1769	if (snd_pcm_running(substream) &&
1770	    snd_pcm_update_hw_ptr(substream) >= 0)
1771		runtime->status->hw_ptr %= runtime->buffer_size;
1772	else {
1773		runtime->status->hw_ptr = 0;
1774		runtime->hw_ptr_wrap = 0;
1775	}
 
1776	return 0;
1777}
1778
1779static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1780					  void *arg)
1781{
1782	struct snd_pcm_channel_info *info = arg;
1783	struct snd_pcm_runtime *runtime = substream->runtime;
1784	int width;
1785	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1786		info->offset = -1;
1787		return 0;
1788	}
1789	width = snd_pcm_format_physical_width(runtime->format);
1790	if (width < 0)
1791		return width;
1792	info->offset = 0;
1793	switch (runtime->access) {
1794	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1795	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1796		info->first = info->channel * width;
1797		info->step = runtime->channels * width;
1798		break;
1799	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1800	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1801	{
1802		size_t size = runtime->dma_bytes / runtime->channels;
1803		info->first = info->channel * size * 8;
1804		info->step = width;
1805		break;
1806	}
1807	default:
1808		snd_BUG();
1809		break;
1810	}
1811	return 0;
1812}
1813
1814static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1815				       void *arg)
1816{
1817	struct snd_pcm_hw_params *params = arg;
1818	snd_pcm_format_t format;
1819	int channels;
1820	ssize_t frame_size;
1821
1822	params->fifo_size = substream->runtime->hw.fifo_size;
1823	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1824		format = params_format(params);
1825		channels = params_channels(params);
1826		frame_size = snd_pcm_format_size(format, channels);
1827		if (frame_size > 0)
1828			params->fifo_size /= frame_size;
1829	}
1830	return 0;
1831}
1832
1833static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream,
1834				     void *arg)
1835{
1836	static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff,
1837					      0xff, 0xff, 0xff, 0xff,
1838					      0xff, 0xff, 0xff, 0xff };
1839
1840	if (substream->runtime->std_sync_id)
1841		snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id));
1842	return 0;
1843}
1844
1845/**
1846 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1847 * @substream: the pcm substream instance
1848 * @cmd: ioctl command
1849 * @arg: ioctl argument
1850 *
1851 * Processes the generic ioctl commands for PCM.
1852 * Can be passed as the ioctl callback for PCM ops.
1853 *
1854 * Return: Zero if successful, or a negative error code on failure.
1855 */
1856int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1857		      unsigned int cmd, void *arg)
1858{
1859	switch (cmd) {
1860	case SNDRV_PCM_IOCTL1_RESET:
1861		return snd_pcm_lib_ioctl_reset(substream, arg);
1862	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1863		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1864	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1865		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1866	case SNDRV_PCM_IOCTL1_SYNC_ID:
1867		return snd_pcm_lib_ioctl_sync_id(substream, arg);
1868	}
1869	return -ENXIO;
1870}
1871EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1872
1873/**
1874 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1875 *						under acquired lock of PCM substream.
1876 * @substream: the instance of pcm substream.
1877 *
1878 * This function is called when the batch of audio data frames as the same size as the period of
1879 * buffer is already processed in audio data transmission.
1880 *
1881 * The call of function updates the status of runtime with the latest position of audio data
1882 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1883 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1884 * substream according to configured threshold.
1885 *
1886 * The function is intended to use for the case that PCM driver operates audio data frames under
1887 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1888 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1889 * since lock of PCM substream should be acquired in advance.
1890 *
1891 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1892 * function:
1893 *
1894 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1895 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1896 * - .get_time_info - to retrieve audio time stamp if needed.
1897 *
1898 * Even if more than one periods have elapsed since the last call, you have to call this only once.
 
1899 */
1900void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1901{
1902	struct snd_pcm_runtime *runtime;
 
1903
1904	if (PCM_RUNTIME_CHECK(substream))
1905		return;
 
 
 
 
1906	runtime = substream->runtime;
1907
1908	if (!snd_pcm_running(substream) ||
1909	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1910		goto _end;
1911
1912#ifdef CONFIG_SND_PCM_TIMER
1913	if (substream->timer_running)
1914		snd_timer_interrupt(substream->timer, 1);
1915#endif
1916 _end:
1917	snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1918}
1919EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1920
1921/**
1922 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1923 *			      PCM substream.
1924 * @substream: the instance of PCM substream.
1925 *
1926 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1927 * acquiring lock of PCM substream voluntarily.
1928 *
1929 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1930 * the batch of audio data frames as the same size as the period of buffer is already processed in
1931 * audio data transmission.
1932 */
1933void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1934{
1935	if (snd_BUG_ON(!substream))
1936		return;
1937
1938	guard(pcm_stream_lock_irqsave)(substream);
1939	snd_pcm_period_elapsed_under_stream_lock(substream);
1940}
1941EXPORT_SYMBOL(snd_pcm_period_elapsed);
1942
1943/*
1944 * Wait until avail_min data becomes available
1945 * Returns a negative error code if any error occurs during operation.
1946 * The available space is stored on availp.  When err = 0 and avail = 0
1947 * on the capture stream, it indicates the stream is in DRAINING state.
1948 */
1949static int wait_for_avail(struct snd_pcm_substream *substream,
1950			      snd_pcm_uframes_t *availp)
1951{
1952	struct snd_pcm_runtime *runtime = substream->runtime;
1953	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1954	wait_queue_entry_t wait;
1955	int err = 0;
1956	snd_pcm_uframes_t avail = 0;
1957	long wait_time, tout;
1958
1959	init_waitqueue_entry(&wait, current);
1960	set_current_state(TASK_INTERRUPTIBLE);
1961	add_wait_queue(&runtime->tsleep, &wait);
1962
1963	if (runtime->no_period_wakeup)
1964		wait_time = MAX_SCHEDULE_TIMEOUT;
1965	else {
1966		/* use wait time from substream if available */
1967		if (substream->wait_time) {
1968			wait_time = substream->wait_time;
1969		} else {
1970			wait_time = 100;
1971
1972			if (runtime->rate) {
1973				long t = runtime->buffer_size * 1100 / runtime->rate;
 
1974				wait_time = max(t, wait_time);
1975			}
 
1976		}
1977		wait_time = msecs_to_jiffies(wait_time);
1978	}
1979
1980	for (;;) {
1981		if (signal_pending(current)) {
1982			err = -ERESTARTSYS;
1983			break;
1984		}
1985
1986		/*
1987		 * We need to check if space became available already
1988		 * (and thus the wakeup happened already) first to close
1989		 * the race of space already having become available.
1990		 * This check must happen after been added to the waitqueue
1991		 * and having current state be INTERRUPTIBLE.
1992		 */
1993		avail = snd_pcm_avail(substream);
1994		if (avail >= runtime->twake)
1995			break;
1996		snd_pcm_stream_unlock_irq(substream);
1997
1998		tout = schedule_timeout(wait_time);
1999
2000		snd_pcm_stream_lock_irq(substream);
2001		set_current_state(TASK_INTERRUPTIBLE);
2002		switch (runtime->state) {
2003		case SNDRV_PCM_STATE_SUSPENDED:
2004			err = -ESTRPIPE;
2005			goto _endloop;
2006		case SNDRV_PCM_STATE_XRUN:
2007			err = -EPIPE;
2008			goto _endloop;
2009		case SNDRV_PCM_STATE_DRAINING:
2010			if (is_playback)
2011				err = -EPIPE;
2012			else 
2013				avail = 0; /* indicate draining */
2014			goto _endloop;
2015		case SNDRV_PCM_STATE_OPEN:
2016		case SNDRV_PCM_STATE_SETUP:
2017		case SNDRV_PCM_STATE_DISCONNECTED:
2018			err = -EBADFD;
2019			goto _endloop;
2020		case SNDRV_PCM_STATE_PAUSED:
2021			continue;
2022		}
2023		if (!tout) {
2024			pcm_dbg(substream->pcm,
2025				"%s timeout (DMA or IRQ trouble?)\n",
2026				is_playback ? "playback write" : "capture read");
2027			err = -EIO;
2028			break;
2029		}
2030	}
2031 _endloop:
2032	set_current_state(TASK_RUNNING);
2033	remove_wait_queue(&runtime->tsleep, &wait);
2034	*availp = avail;
2035	return err;
2036}
2037	
2038typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2039			      int channel, unsigned long hwoff,
2040			      struct iov_iter *iter, unsigned long bytes);
2041
2042typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2043			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2044			  bool);
2045
2046/* calculate the target DMA-buffer position to be written/read */
2047static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2048			   int channel, unsigned long hwoff)
2049{
2050	return runtime->dma_area + hwoff +
2051		channel * (runtime->dma_bytes / runtime->channels);
2052}
2053
2054/* default copy ops for write; used for both interleaved and non- modes */
2055static int default_write_copy(struct snd_pcm_substream *substream,
2056			      int channel, unsigned long hwoff,
2057			      struct iov_iter *iter, unsigned long bytes)
2058{
2059	if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2060			   bytes, iter) != bytes)
2061		return -EFAULT;
2062	return 0;
2063}
2064
 
 
 
 
 
 
 
 
 
2065/* fill silence instead of copy data; called as a transfer helper
2066 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2067 * a NULL buffer is passed
2068 */
2069static int fill_silence(struct snd_pcm_substream *substream, int channel,
2070			unsigned long hwoff, struct iov_iter *iter,
2071			unsigned long bytes)
2072{
2073	struct snd_pcm_runtime *runtime = substream->runtime;
2074
2075	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2076		return 0;
2077	if (substream->ops->fill_silence)
2078		return substream->ops->fill_silence(substream, channel,
2079						    hwoff, bytes);
2080
2081	snd_pcm_format_set_silence(runtime->format,
2082				   get_dma_ptr(runtime, channel, hwoff),
2083				   bytes_to_samples(runtime, bytes));
2084	return 0;
2085}
2086
2087/* default copy ops for read; used for both interleaved and non- modes */
2088static int default_read_copy(struct snd_pcm_substream *substream,
2089			     int channel, unsigned long hwoff,
2090			     struct iov_iter *iter, unsigned long bytes)
2091{
2092	if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2093			 bytes, iter) != bytes)
 
2094		return -EFAULT;
2095	return 0;
2096}
2097
2098/* call transfer with the filled iov_iter */
2099static int do_transfer(struct snd_pcm_substream *substream, int c,
2100		       unsigned long hwoff, void *data, unsigned long bytes,
2101		       pcm_transfer_f transfer, bool in_kernel)
2102{
2103	struct iov_iter iter;
2104	int err, type;
2105
2106	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2107		type = ITER_SOURCE;
2108	else
2109		type = ITER_DEST;
2110
2111	if (in_kernel) {
2112		struct kvec kvec = { data, bytes };
2113
2114		iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2115		return transfer(substream, c, hwoff, &iter, bytes);
2116	}
2117
2118	err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2119	if (err)
2120		return err;
2121	return transfer(substream, c, hwoff, &iter, bytes);
2122}
2123
2124/* call transfer function with the converted pointers and sizes;
2125 * for interleaved mode, it's one shot for all samples
2126 */
2127static int interleaved_copy(struct snd_pcm_substream *substream,
2128			    snd_pcm_uframes_t hwoff, void *data,
2129			    snd_pcm_uframes_t off,
2130			    snd_pcm_uframes_t frames,
2131			    pcm_transfer_f transfer,
2132			    bool in_kernel)
2133{
2134	struct snd_pcm_runtime *runtime = substream->runtime;
2135
2136	/* convert to bytes */
2137	hwoff = frames_to_bytes(runtime, hwoff);
2138	off = frames_to_bytes(runtime, off);
2139	frames = frames_to_bytes(runtime, frames);
2140
2141	return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2142			   in_kernel);
2143}
2144
2145/* call transfer function with the converted pointers and sizes for each
2146 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2147 */
2148static int noninterleaved_copy(struct snd_pcm_substream *substream,
2149			       snd_pcm_uframes_t hwoff, void *data,
2150			       snd_pcm_uframes_t off,
2151			       snd_pcm_uframes_t frames,
2152			       pcm_transfer_f transfer,
2153			       bool in_kernel)
2154{
2155	struct snd_pcm_runtime *runtime = substream->runtime;
2156	int channels = runtime->channels;
2157	void **bufs = data;
2158	int c, err;
2159
2160	/* convert to bytes; note that it's not frames_to_bytes() here.
2161	 * in non-interleaved mode, we copy for each channel, thus
2162	 * each copy is n_samples bytes x channels = whole frames.
2163	 */
2164	off = samples_to_bytes(runtime, off);
2165	frames = samples_to_bytes(runtime, frames);
2166	hwoff = samples_to_bytes(runtime, hwoff);
2167	for (c = 0; c < channels; ++c, ++bufs) {
2168		if (!data || !*bufs)
2169			err = fill_silence(substream, c, hwoff, NULL, frames);
2170		else
2171			err = do_transfer(substream, c, hwoff, *bufs + off,
2172					  frames, transfer, in_kernel);
2173		if (err < 0)
2174			return err;
2175	}
2176	return 0;
2177}
2178
2179/* fill silence on the given buffer position;
2180 * called from snd_pcm_playback_silence()
2181 */
2182static int fill_silence_frames(struct snd_pcm_substream *substream,
2183			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2184{
2185	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2186	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2187		return interleaved_copy(substream, off, NULL, 0, frames,
2188					fill_silence, true);
2189	else
2190		return noninterleaved_copy(substream, off, NULL, 0, frames,
2191					   fill_silence, true);
2192}
2193
2194/* sanity-check for read/write methods */
2195static int pcm_sanity_check(struct snd_pcm_substream *substream)
2196{
2197	struct snd_pcm_runtime *runtime;
2198	if (PCM_RUNTIME_CHECK(substream))
2199		return -ENXIO;
2200	runtime = substream->runtime;
2201	if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2202		return -EINVAL;
2203	if (runtime->state == SNDRV_PCM_STATE_OPEN)
2204		return -EBADFD;
2205	return 0;
2206}
2207
2208static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2209{
2210	switch (runtime->state) {
2211	case SNDRV_PCM_STATE_PREPARED:
2212	case SNDRV_PCM_STATE_RUNNING:
2213	case SNDRV_PCM_STATE_PAUSED:
2214		return 0;
2215	case SNDRV_PCM_STATE_XRUN:
2216		return -EPIPE;
2217	case SNDRV_PCM_STATE_SUSPENDED:
2218		return -ESTRPIPE;
2219	default:
2220		return -EBADFD;
2221	}
2222}
2223
2224/* update to the given appl_ptr and call ack callback if needed;
2225 * when an error is returned, take back to the original value
2226 */
2227int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2228			   snd_pcm_uframes_t appl_ptr)
2229{
2230	struct snd_pcm_runtime *runtime = substream->runtime;
2231	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2232	snd_pcm_sframes_t diff;
2233	int ret;
2234
2235	if (old_appl_ptr == appl_ptr)
2236		return 0;
2237
2238	if (appl_ptr >= runtime->boundary)
2239		return -EINVAL;
2240	/*
2241	 * check if a rewind is requested by the application
2242	 */
2243	if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2244		diff = appl_ptr - old_appl_ptr;
2245		if (diff >= 0) {
2246			if (diff > runtime->buffer_size)
2247				return -EINVAL;
2248		} else {
2249			if (runtime->boundary + diff > runtime->buffer_size)
2250				return -EINVAL;
2251		}
2252	}
2253
2254	runtime->control->appl_ptr = appl_ptr;
2255	if (substream->ops->ack) {
2256		ret = substream->ops->ack(substream);
2257		if (ret < 0) {
2258			runtime->control->appl_ptr = old_appl_ptr;
2259			if (ret == -EPIPE)
2260				__snd_pcm_xrun(substream);
2261			return ret;
2262		}
2263	}
2264
2265	trace_applptr(substream, old_appl_ptr, appl_ptr);
2266
2267	return 0;
2268}
2269
2270/* the common loop for read/write data */
2271snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2272				     void *data, bool interleaved,
2273				     snd_pcm_uframes_t size, bool in_kernel)
2274{
2275	struct snd_pcm_runtime *runtime = substream->runtime;
2276	snd_pcm_uframes_t xfer = 0;
2277	snd_pcm_uframes_t offset = 0;
2278	snd_pcm_uframes_t avail;
2279	pcm_copy_f writer;
2280	pcm_transfer_f transfer;
2281	bool nonblock;
2282	bool is_playback;
2283	int err;
2284
2285	err = pcm_sanity_check(substream);
2286	if (err < 0)
2287		return err;
2288
2289	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2290	if (interleaved) {
2291		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2292		    runtime->channels > 1)
2293			return -EINVAL;
2294		writer = interleaved_copy;
2295	} else {
2296		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2297			return -EINVAL;
2298		writer = noninterleaved_copy;
2299	}
2300
2301	if (!data) {
2302		if (is_playback)
2303			transfer = fill_silence;
2304		else
2305			return -EINVAL;
 
 
 
 
 
 
2306	} else {
2307		if (substream->ops->copy)
2308			transfer = substream->ops->copy;
2309		else
2310			transfer = is_playback ?
2311				default_write_copy : default_read_copy;
2312	}
2313
2314	if (size == 0)
2315		return 0;
2316
2317	nonblock = !!(substream->f_flags & O_NONBLOCK);
2318
2319	snd_pcm_stream_lock_irq(substream);
2320	err = pcm_accessible_state(runtime);
2321	if (err < 0)
2322		goto _end_unlock;
2323
2324	runtime->twake = runtime->control->avail_min ? : 1;
2325	if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2326		snd_pcm_update_hw_ptr(substream);
2327
2328	/*
2329	 * If size < start_threshold, wait indefinitely. Another
2330	 * thread may start capture
2331	 */
2332	if (!is_playback &&
2333	    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2334	    size >= runtime->start_threshold) {
2335		err = snd_pcm_start(substream);
2336		if (err < 0)
2337			goto _end_unlock;
2338	}
2339
2340	avail = snd_pcm_avail(substream);
2341
2342	while (size > 0) {
2343		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2344		snd_pcm_uframes_t cont;
2345		if (!avail) {
2346			if (!is_playback &&
2347			    runtime->state == SNDRV_PCM_STATE_DRAINING) {
2348				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2349				goto _end_unlock;
2350			}
2351			if (nonblock) {
2352				err = -EAGAIN;
2353				goto _end_unlock;
2354			}
2355			runtime->twake = min_t(snd_pcm_uframes_t, size,
2356					runtime->control->avail_min ? : 1);
2357			err = wait_for_avail(substream, &avail);
2358			if (err < 0)
2359				goto _end_unlock;
2360			if (!avail)
2361				continue; /* draining */
2362		}
2363		frames = size > avail ? avail : size;
2364		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2365		appl_ofs = appl_ptr % runtime->buffer_size;
2366		cont = runtime->buffer_size - appl_ofs;
2367		if (frames > cont)
2368			frames = cont;
2369		if (snd_BUG_ON(!frames)) {
2370			err = -EINVAL;
2371			goto _end_unlock;
2372		}
2373		if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2374			err = -EBUSY;
2375			goto _end_unlock;
2376		}
2377		snd_pcm_stream_unlock_irq(substream);
2378		if (!is_playback)
2379			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2380		err = writer(substream, appl_ofs, data, offset, frames,
2381			     transfer, in_kernel);
2382		if (is_playback)
2383			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2384		snd_pcm_stream_lock_irq(substream);
2385		atomic_dec(&runtime->buffer_accessing);
2386		if (err < 0)
2387			goto _end_unlock;
2388		err = pcm_accessible_state(runtime);
2389		if (err < 0)
2390			goto _end_unlock;
2391		appl_ptr += frames;
2392		if (appl_ptr >= runtime->boundary)
2393			appl_ptr -= runtime->boundary;
2394		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2395		if (err < 0)
2396			goto _end_unlock;
2397
2398		offset += frames;
2399		size -= frames;
2400		xfer += frames;
2401		avail -= frames;
2402		if (is_playback &&
2403		    runtime->state == SNDRV_PCM_STATE_PREPARED &&
2404		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2405			err = snd_pcm_start(substream);
2406			if (err < 0)
2407				goto _end_unlock;
2408		}
2409	}
2410 _end_unlock:
2411	runtime->twake = 0;
2412	if (xfer > 0 && err >= 0)
2413		snd_pcm_update_state(substream, runtime);
2414	snd_pcm_stream_unlock_irq(substream);
2415	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2416}
2417EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2418
2419/*
2420 * standard channel mapping helpers
2421 */
2422
2423/* default channel maps for multi-channel playbacks, up to 8 channels */
2424const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2425	{ .channels = 1,
2426	  .map = { SNDRV_CHMAP_MONO } },
2427	{ .channels = 2,
2428	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2429	{ .channels = 4,
2430	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2431		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2432	{ .channels = 6,
2433	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2434		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2435		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2436	{ .channels = 8,
2437	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2438		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2439		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2440		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2441	{ }
2442};
2443EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2444
2445/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2446const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2447	{ .channels = 1,
2448	  .map = { SNDRV_CHMAP_MONO } },
2449	{ .channels = 2,
2450	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2451	{ .channels = 4,
2452	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2453		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2454	{ .channels = 6,
2455	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2456		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2457		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2458	{ .channels = 8,
2459	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2460		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2461		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2462		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2463	{ }
2464};
2465EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2466
2467static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2468{
2469	if (ch > info->max_channels)
2470		return false;
2471	return !info->channel_mask || (info->channel_mask & (1U << ch));
2472}
2473
2474static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2475			      struct snd_ctl_elem_info *uinfo)
2476{
2477	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2478
2479	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
 
2480	uinfo->count = info->max_channels;
2481	uinfo->value.integer.min = 0;
2482	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2483	return 0;
2484}
2485
2486/* get callback for channel map ctl element
2487 * stores the channel position firstly matching with the current channels
2488 */
2489static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2490			     struct snd_ctl_elem_value *ucontrol)
2491{
2492	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2493	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2494	struct snd_pcm_substream *substream;
2495	const struct snd_pcm_chmap_elem *map;
2496
2497	if (!info->chmap)
2498		return -EINVAL;
2499	substream = snd_pcm_chmap_substream(info, idx);
2500	if (!substream)
2501		return -ENODEV;
2502	memset(ucontrol->value.integer.value, 0,
2503	       sizeof(long) * info->max_channels);
2504	if (!substream->runtime)
2505		return 0; /* no channels set */
2506	for (map = info->chmap; map->channels; map++) {
2507		int i;
2508		if (map->channels == substream->runtime->channels &&
2509		    valid_chmap_channels(info, map->channels)) {
2510			for (i = 0; i < map->channels; i++)
2511				ucontrol->value.integer.value[i] = map->map[i];
2512			return 0;
2513		}
2514	}
2515	return -EINVAL;
2516}
2517
2518/* tlv callback for channel map ctl element
2519 * expands the pre-defined channel maps in a form of TLV
2520 */
2521static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2522			     unsigned int size, unsigned int __user *tlv)
2523{
2524	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2525	const struct snd_pcm_chmap_elem *map;
2526	unsigned int __user *dst;
2527	int c, count = 0;
2528
2529	if (!info->chmap)
2530		return -EINVAL;
2531	if (size < 8)
2532		return -ENOMEM;
2533	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2534		return -EFAULT;
2535	size -= 8;
2536	dst = tlv + 2;
2537	for (map = info->chmap; map->channels; map++) {
2538		int chs_bytes = map->channels * 4;
2539		if (!valid_chmap_channels(info, map->channels))
2540			continue;
2541		if (size < 8)
2542			return -ENOMEM;
2543		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2544		    put_user(chs_bytes, dst + 1))
2545			return -EFAULT;
2546		dst += 2;
2547		size -= 8;
2548		count += 8;
2549		if (size < chs_bytes)
2550			return -ENOMEM;
2551		size -= chs_bytes;
2552		count += chs_bytes;
2553		for (c = 0; c < map->channels; c++) {
2554			if (put_user(map->map[c], dst))
2555				return -EFAULT;
2556			dst++;
2557		}
2558	}
2559	if (put_user(count, tlv + 1))
2560		return -EFAULT;
2561	return 0;
2562}
2563
2564static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2565{
2566	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2567	info->pcm->streams[info->stream].chmap_kctl = NULL;
2568	kfree(info);
2569}
2570
2571/**
2572 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2573 * @pcm: the assigned PCM instance
2574 * @stream: stream direction
2575 * @chmap: channel map elements (for query)
2576 * @max_channels: the max number of channels for the stream
2577 * @private_value: the value passed to each kcontrol's private_value field
2578 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2579 *
2580 * Create channel-mapping control elements assigned to the given PCM stream(s).
2581 * Return: Zero if successful, or a negative error value.
2582 */
2583int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2584			   const struct snd_pcm_chmap_elem *chmap,
2585			   int max_channels,
2586			   unsigned long private_value,
2587			   struct snd_pcm_chmap **info_ret)
2588{
2589	struct snd_pcm_chmap *info;
2590	struct snd_kcontrol_new knew = {
2591		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2592		.access = SNDRV_CTL_ELEM_ACCESS_READ |
2593			SNDRV_CTL_ELEM_ACCESS_VOLATILE |
2594			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2595			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2596		.info = pcm_chmap_ctl_info,
2597		.get = pcm_chmap_ctl_get,
2598		.tlv.c = pcm_chmap_ctl_tlv,
2599	};
2600	int err;
2601
2602	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2603		return -EBUSY;
2604	info = kzalloc(sizeof(*info), GFP_KERNEL);
2605	if (!info)
2606		return -ENOMEM;
2607	info->pcm = pcm;
2608	info->stream = stream;
2609	info->chmap = chmap;
2610	info->max_channels = max_channels;
2611	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2612		knew.name = "Playback Channel Map";
2613	else
2614		knew.name = "Capture Channel Map";
2615	knew.device = pcm->device;
2616	knew.count = pcm->streams[stream].substream_count;
2617	knew.private_value = private_value;
2618	info->kctl = snd_ctl_new1(&knew, info);
2619	if (!info->kctl) {
2620		kfree(info);
2621		return -ENOMEM;
2622	}
2623	info->kctl->private_free = pcm_chmap_ctl_private_free;
2624	err = snd_ctl_add(pcm->card, info->kctl);
2625	if (err < 0)
2626		return err;
2627	pcm->streams[stream].chmap_kctl = info->kctl;
2628	if (info_ret)
2629		*info_ret = info;
2630	return 0;
2631}
2632EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Digital Audio (PCM) abstract layer
   4 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   5 *                   Abramo Bagnara <abramo@alsa-project.org>
   6 */
   7
   8#include <linux/slab.h>
   9#include <linux/sched/signal.h>
  10#include <linux/time.h>
  11#include <linux/math64.h>
  12#include <linux/export.h>
  13#include <sound/core.h>
  14#include <sound/control.h>
  15#include <sound/tlv.h>
  16#include <sound/info.h>
  17#include <sound/pcm.h>
  18#include <sound/pcm_params.h>
  19#include <sound/timer.h>
  20
  21#include "pcm_local.h"
  22
  23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
  24#define CREATE_TRACE_POINTS
  25#include "pcm_trace.h"
  26#else
  27#define trace_hwptr(substream, pos, in_interrupt)
  28#define trace_xrun(substream)
  29#define trace_hw_ptr_error(substream, reason)
  30#define trace_applptr(substream, prev, curr)
  31#endif
  32
  33static int fill_silence_frames(struct snd_pcm_substream *substream,
  34			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
  35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36/*
  37 * fill ring buffer with silence
  38 * runtime->silence_start: starting pointer to silence area
  39 * runtime->silence_filled: size filled with silence
  40 * runtime->silence_threshold: threshold from application
  41 * runtime->silence_size: maximal size from application
  42 *
  43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
  44 */
  45void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
  46{
  47	struct snd_pcm_runtime *runtime = substream->runtime;
  48	snd_pcm_uframes_t frames, ofs, transfer;
  49	int err;
  50
  51	if (runtime->silence_size < runtime->boundary) {
  52		snd_pcm_sframes_t noise_dist, n;
  53		snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
  54		if (runtime->silence_start != appl_ptr) {
  55			n = appl_ptr - runtime->silence_start;
  56			if (n < 0)
  57				n += runtime->boundary;
  58			if ((snd_pcm_uframes_t)n < runtime->silence_filled)
  59				runtime->silence_filled -= n;
  60			else
  61				runtime->silence_filled = 0;
  62			runtime->silence_start = appl_ptr;
  63		}
  64		if (runtime->silence_filled >= runtime->buffer_size)
  65			return;
  66		noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
  67		if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
  68			return;
  69		frames = runtime->silence_threshold - noise_dist;
  70		if (frames > runtime->silence_size)
  71			frames = runtime->silence_size;
  72	} else {
  73		if (new_hw_ptr == ULONG_MAX) {	/* initialization */
  74			snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
  75			if (avail > runtime->buffer_size)
  76				avail = runtime->buffer_size;
  77			runtime->silence_filled = avail > 0 ? avail : 0;
  78			runtime->silence_start = (runtime->status->hw_ptr +
  79						  runtime->silence_filled) %
  80						 runtime->boundary;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81		} else {
  82			ofs = runtime->status->hw_ptr;
  83			frames = new_hw_ptr - ofs;
  84			if ((snd_pcm_sframes_t)frames < 0)
  85				frames += runtime->boundary;
  86			runtime->silence_filled -= frames;
  87			if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
  88				runtime->silence_filled = 0;
  89				runtime->silence_start = new_hw_ptr;
  90			} else {
  91				runtime->silence_start = ofs;
  92			}
  93		}
 
 
 
 
  94		frames = runtime->buffer_size - runtime->silence_filled;
  95	}
  96	if (snd_BUG_ON(frames > runtime->buffer_size))
  97		return;
  98	if (frames == 0)
  99		return;
 100	ofs = runtime->silence_start % runtime->buffer_size;
 101	while (frames > 0) {
 102		transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
 103		err = fill_silence_frames(substream, ofs, transfer);
 104		snd_BUG_ON(err < 0);
 105		runtime->silence_filled += transfer;
 106		frames -= transfer;
 107		ofs = 0;
 108	}
 
 109}
 110
 111#ifdef CONFIG_SND_DEBUG
 112void snd_pcm_debug_name(struct snd_pcm_substream *substream,
 113			   char *name, size_t len)
 114{
 115	snprintf(name, len, "pcmC%dD%d%c:%d",
 116		 substream->pcm->card->number,
 117		 substream->pcm->device,
 118		 substream->stream ? 'c' : 'p',
 119		 substream->number);
 120}
 121EXPORT_SYMBOL(snd_pcm_debug_name);
 122#endif
 123
 124#define XRUN_DEBUG_BASIC	(1<<0)
 125#define XRUN_DEBUG_STACK	(1<<1)	/* dump also stack */
 126#define XRUN_DEBUG_JIFFIESCHECK	(1<<2)	/* do jiffies check */
 127
 128#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 129
 130#define xrun_debug(substream, mask) \
 131			((substream)->pstr->xrun_debug & (mask))
 132#else
 133#define xrun_debug(substream, mask)	0
 134#endif
 135
 136#define dump_stack_on_xrun(substream) do {			\
 137		if (xrun_debug(substream, XRUN_DEBUG_STACK))	\
 138			dump_stack();				\
 139	} while (0)
 140
 141/* call with stream lock held */
 142void __snd_pcm_xrun(struct snd_pcm_substream *substream)
 143{
 144	struct snd_pcm_runtime *runtime = substream->runtime;
 145
 146	trace_xrun(substream);
 147	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
 148		snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
 
 
 
 
 
 149	snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
 150	if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
 151		char name[16];
 152		snd_pcm_debug_name(substream, name, sizeof(name));
 153		pcm_warn(substream->pcm, "XRUN: %s\n", name);
 154		dump_stack_on_xrun(substream);
 155	}
 
 
 
 156}
 157
 158#ifdef CONFIG_SND_PCM_XRUN_DEBUG
 159#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...)	\
 160	do {								\
 161		trace_hw_ptr_error(substream, reason);	\
 162		if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {		\
 163			pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
 164					   (in_interrupt) ? 'Q' : 'P', ##args);	\
 165			dump_stack_on_xrun(substream);			\
 166		}							\
 167	} while (0)
 168
 169#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
 170
 171#define hw_ptr_error(substream, fmt, args...) do { } while (0)
 172
 173#endif
 174
 175int snd_pcm_update_state(struct snd_pcm_substream *substream,
 176			 struct snd_pcm_runtime *runtime)
 177{
 178	snd_pcm_uframes_t avail;
 179
 180	avail = snd_pcm_avail(substream);
 181	if (avail > runtime->avail_max)
 182		runtime->avail_max = avail;
 183	if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
 184		if (avail >= runtime->buffer_size) {
 185			snd_pcm_drain_done(substream);
 186			return -EPIPE;
 187		}
 188	} else {
 189		if (avail >= runtime->stop_threshold) {
 190			__snd_pcm_xrun(substream);
 191			return -EPIPE;
 192		}
 193	}
 194	if (runtime->twake) {
 195		if (avail >= runtime->twake)
 196			wake_up(&runtime->tsleep);
 197	} else if (avail >= runtime->control->avail_min)
 198		wake_up(&runtime->sleep);
 199	return 0;
 200}
 201
 202static void update_audio_tstamp(struct snd_pcm_substream *substream,
 203				struct timespec *curr_tstamp,
 204				struct timespec *audio_tstamp)
 205{
 206	struct snd_pcm_runtime *runtime = substream->runtime;
 207	u64 audio_frames, audio_nsecs;
 208	struct timespec driver_tstamp;
 209
 210	if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
 211		return;
 212
 213	if (!(substream->ops->get_time_info) ||
 214		(runtime->audio_tstamp_report.actual_type ==
 215			SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 216
 217		/*
 218		 * provide audio timestamp derived from pointer position
 219		 * add delay only if requested
 220		 */
 221
 222		audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
 223
 224		if (runtime->audio_tstamp_config.report_delay) {
 225			if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 226				audio_frames -=  runtime->delay;
 227			else
 228				audio_frames +=  runtime->delay;
 229		}
 230		audio_nsecs = div_u64(audio_frames * 1000000000LL,
 231				runtime->rate);
 232		*audio_tstamp = ns_to_timespec(audio_nsecs);
 233	}
 234	if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
 235		runtime->status->audio_tstamp = *audio_tstamp;
 236		runtime->status->tstamp = *curr_tstamp;
 
 
 
 
 237	}
 238
 
 239	/*
 240	 * re-take a driver timestamp to let apps detect if the reference tstamp
 241	 * read by low-level hardware was provided with a delay
 242	 */
 243	snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp);
 244	runtime->driver_tstamp = driver_tstamp;
 245}
 246
 247static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
 248				  unsigned int in_interrupt)
 249{
 250	struct snd_pcm_runtime *runtime = substream->runtime;
 251	snd_pcm_uframes_t pos;
 252	snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
 253	snd_pcm_sframes_t hdelta, delta;
 254	unsigned long jdelta;
 255	unsigned long curr_jiffies;
 256	struct timespec curr_tstamp;
 257	struct timespec audio_tstamp;
 258	int crossed_boundary = 0;
 259
 260	old_hw_ptr = runtime->status->hw_ptr;
 261
 262	/*
 263	 * group pointer, time and jiffies reads to allow for more
 264	 * accurate correlations/corrections.
 265	 * The values are stored at the end of this routine after
 266	 * corrections for hw_ptr position
 267	 */
 268	pos = substream->ops->pointer(substream);
 269	curr_jiffies = jiffies;
 270	if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
 271		if ((substream->ops->get_time_info) &&
 272			(runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
 273			substream->ops->get_time_info(substream, &curr_tstamp,
 274						&audio_tstamp,
 275						&runtime->audio_tstamp_config,
 276						&runtime->audio_tstamp_report);
 277
 278			/* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
 279			if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
 280				snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
 281		} else
 282			snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
 283	}
 284
 285	if (pos == SNDRV_PCM_POS_XRUN) {
 286		__snd_pcm_xrun(substream);
 287		return -EPIPE;
 288	}
 289	if (pos >= runtime->buffer_size) {
 290		if (printk_ratelimit()) {
 291			char name[16];
 292			snd_pcm_debug_name(substream, name, sizeof(name));
 293			pcm_err(substream->pcm,
 294				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
 295				name, pos, runtime->buffer_size,
 296				runtime->period_size);
 297		}
 298		pos = 0;
 299	}
 300	pos -= pos % runtime->min_align;
 301	trace_hwptr(substream, pos, in_interrupt);
 302	hw_base = runtime->hw_ptr_base;
 303	new_hw_ptr = hw_base + pos;
 304	if (in_interrupt) {
 305		/* we know that one period was processed */
 306		/* delta = "expected next hw_ptr" for in_interrupt != 0 */
 307		delta = runtime->hw_ptr_interrupt + runtime->period_size;
 308		if (delta > new_hw_ptr) {
 309			/* check for double acknowledged interrupts */
 310			hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 311			if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
 312				hw_base += runtime->buffer_size;
 313				if (hw_base >= runtime->boundary) {
 314					hw_base = 0;
 315					crossed_boundary++;
 316				}
 317				new_hw_ptr = hw_base + pos;
 318				goto __delta;
 319			}
 320		}
 321	}
 322	/* new_hw_ptr might be lower than old_hw_ptr in case when */
 323	/* pointer crosses the end of the ring buffer */
 324	if (new_hw_ptr < old_hw_ptr) {
 325		hw_base += runtime->buffer_size;
 326		if (hw_base >= runtime->boundary) {
 327			hw_base = 0;
 328			crossed_boundary++;
 329		}
 330		new_hw_ptr = hw_base + pos;
 331	}
 332      __delta:
 333	delta = new_hw_ptr - old_hw_ptr;
 334	if (delta < 0)
 335		delta += runtime->boundary;
 336
 337	if (runtime->no_period_wakeup) {
 338		snd_pcm_sframes_t xrun_threshold;
 339		/*
 340		 * Without regular period interrupts, we have to check
 341		 * the elapsed time to detect xruns.
 342		 */
 343		jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 344		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
 345			goto no_delta_check;
 346		hdelta = jdelta - delta * HZ / runtime->rate;
 347		xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
 348		while (hdelta > xrun_threshold) {
 349			delta += runtime->buffer_size;
 350			hw_base += runtime->buffer_size;
 351			if (hw_base >= runtime->boundary) {
 352				hw_base = 0;
 353				crossed_boundary++;
 354			}
 355			new_hw_ptr = hw_base + pos;
 356			hdelta -= runtime->hw_ptr_buffer_jiffies;
 357		}
 358		goto no_delta_check;
 359	}
 360
 361	/* something must be really wrong */
 362	if (delta >= runtime->buffer_size + runtime->period_size) {
 363		hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
 364			     "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 365			     substream->stream, (long)pos,
 366			     (long)new_hw_ptr, (long)old_hw_ptr);
 367		return 0;
 368	}
 369
 370	/* Do jiffies check only in xrun_debug mode */
 371	if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
 372		goto no_jiffies_check;
 373
 374	/* Skip the jiffies check for hardwares with BATCH flag.
 375	 * Such hardware usually just increases the position at each IRQ,
 376	 * thus it can't give any strange position.
 377	 */
 378	if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
 379		goto no_jiffies_check;
 380	hdelta = delta;
 381	if (hdelta < runtime->delay)
 382		goto no_jiffies_check;
 383	hdelta -= runtime->delay;
 384	jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
 385	if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
 386		delta = jdelta /
 387			(((runtime->period_size * HZ) / runtime->rate)
 388								+ HZ/100);
 389		/* move new_hw_ptr according jiffies not pos variable */
 390		new_hw_ptr = old_hw_ptr;
 391		hw_base = delta;
 392		/* use loop to avoid checks for delta overflows */
 393		/* the delta value is small or zero in most cases */
 394		while (delta > 0) {
 395			new_hw_ptr += runtime->period_size;
 396			if (new_hw_ptr >= runtime->boundary) {
 397				new_hw_ptr -= runtime->boundary;
 398				crossed_boundary--;
 399			}
 400			delta--;
 401		}
 402		/* align hw_base to buffer_size */
 403		hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
 404			     "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
 405			     (long)pos, (long)hdelta,
 406			     (long)runtime->period_size, jdelta,
 407			     ((hdelta * HZ) / runtime->rate), hw_base,
 408			     (unsigned long)old_hw_ptr,
 409			     (unsigned long)new_hw_ptr);
 410		/* reset values to proper state */
 411		delta = 0;
 412		hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
 413	}
 414 no_jiffies_check:
 415	if (delta > runtime->period_size + runtime->period_size / 2) {
 416		hw_ptr_error(substream, in_interrupt,
 417			     "Lost interrupts?",
 418			     "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
 419			     substream->stream, (long)delta,
 420			     (long)new_hw_ptr,
 421			     (long)old_hw_ptr);
 422	}
 423
 424 no_delta_check:
 425	if (runtime->status->hw_ptr == new_hw_ptr) {
 
 426		update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 427		return 0;
 428	}
 429
 430	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
 431	    runtime->silence_size > 0)
 432		snd_pcm_playback_silence(substream, new_hw_ptr);
 433
 434	if (in_interrupt) {
 435		delta = new_hw_ptr - runtime->hw_ptr_interrupt;
 436		if (delta < 0)
 437			delta += runtime->boundary;
 438		delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
 439		runtime->hw_ptr_interrupt += delta;
 440		if (runtime->hw_ptr_interrupt >= runtime->boundary)
 441			runtime->hw_ptr_interrupt -= runtime->boundary;
 442	}
 443	runtime->hw_ptr_base = hw_base;
 444	runtime->status->hw_ptr = new_hw_ptr;
 445	runtime->hw_ptr_jiffies = curr_jiffies;
 446	if (crossed_boundary) {
 447		snd_BUG_ON(crossed_boundary != 1);
 448		runtime->hw_ptr_wrap += runtime->boundary;
 449	}
 450
 451	update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
 452
 453	return snd_pcm_update_state(substream, runtime);
 454}
 455
 456/* CAUTION: call it with irq disabled */
 457int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
 458{
 459	return snd_pcm_update_hw_ptr0(substream, 0);
 460}
 461
 462/**
 463 * snd_pcm_set_ops - set the PCM operators
 464 * @pcm: the pcm instance
 465 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
 466 * @ops: the operator table
 467 *
 468 * Sets the given PCM operators to the pcm instance.
 469 */
 470void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
 471		     const struct snd_pcm_ops *ops)
 472{
 473	struct snd_pcm_str *stream = &pcm->streams[direction];
 474	struct snd_pcm_substream *substream;
 475	
 476	for (substream = stream->substream; substream != NULL; substream = substream->next)
 477		substream->ops = ops;
 478}
 479EXPORT_SYMBOL(snd_pcm_set_ops);
 480
 481/**
 482 * snd_pcm_sync - set the PCM sync id
 483 * @substream: the pcm substream
 484 *
 485 * Sets the PCM sync identifier for the card.
 486 */
 487void snd_pcm_set_sync(struct snd_pcm_substream *substream)
 488{
 489	struct snd_pcm_runtime *runtime = substream->runtime;
 490	
 491	runtime->sync.id32[0] = substream->pcm->card->number;
 492	runtime->sync.id32[1] = -1;
 493	runtime->sync.id32[2] = -1;
 494	runtime->sync.id32[3] = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495}
 496EXPORT_SYMBOL(snd_pcm_set_sync);
 497
 498/*
 499 *  Standard ioctl routine
 500 */
 501
 502static inline unsigned int div32(unsigned int a, unsigned int b, 
 503				 unsigned int *r)
 504{
 505	if (b == 0) {
 506		*r = 0;
 507		return UINT_MAX;
 508	}
 509	*r = a % b;
 510	return a / b;
 511}
 512
 513static inline unsigned int div_down(unsigned int a, unsigned int b)
 514{
 515	if (b == 0)
 516		return UINT_MAX;
 517	return a / b;
 518}
 519
 520static inline unsigned int div_up(unsigned int a, unsigned int b)
 521{
 522	unsigned int r;
 523	unsigned int q;
 524	if (b == 0)
 525		return UINT_MAX;
 526	q = div32(a, b, &r);
 527	if (r)
 528		++q;
 529	return q;
 530}
 531
 532static inline unsigned int mul(unsigned int a, unsigned int b)
 533{
 534	if (a == 0)
 535		return 0;
 536	if (div_down(UINT_MAX, a) < b)
 537		return UINT_MAX;
 538	return a * b;
 539}
 540
 541static inline unsigned int muldiv32(unsigned int a, unsigned int b,
 542				    unsigned int c, unsigned int *r)
 543{
 544	u_int64_t n = (u_int64_t) a * b;
 545	if (c == 0) {
 546		*r = 0;
 547		return UINT_MAX;
 548	}
 549	n = div_u64_rem(n, c, r);
 550	if (n >= UINT_MAX) {
 551		*r = 0;
 552		return UINT_MAX;
 553	}
 554	return n;
 555}
 556
 557/**
 558 * snd_interval_refine - refine the interval value of configurator
 559 * @i: the interval value to refine
 560 * @v: the interval value to refer to
 561 *
 562 * Refines the interval value with the reference value.
 563 * The interval is changed to the range satisfying both intervals.
 564 * The interval status (min, max, integer, etc.) are evaluated.
 565 *
 566 * Return: Positive if the value is changed, zero if it's not changed, or a
 567 * negative error code.
 568 */
 569int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
 570{
 571	int changed = 0;
 572	if (snd_BUG_ON(snd_interval_empty(i)))
 573		return -EINVAL;
 574	if (i->min < v->min) {
 575		i->min = v->min;
 576		i->openmin = v->openmin;
 577		changed = 1;
 578	} else if (i->min == v->min && !i->openmin && v->openmin) {
 579		i->openmin = 1;
 580		changed = 1;
 581	}
 582	if (i->max > v->max) {
 583		i->max = v->max;
 584		i->openmax = v->openmax;
 585		changed = 1;
 586	} else if (i->max == v->max && !i->openmax && v->openmax) {
 587		i->openmax = 1;
 588		changed = 1;
 589	}
 590	if (!i->integer && v->integer) {
 591		i->integer = 1;
 592		changed = 1;
 593	}
 594	if (i->integer) {
 595		if (i->openmin) {
 596			i->min++;
 597			i->openmin = 0;
 598		}
 599		if (i->openmax) {
 600			i->max--;
 601			i->openmax = 0;
 602		}
 603	} else if (!i->openmin && !i->openmax && i->min == i->max)
 604		i->integer = 1;
 605	if (snd_interval_checkempty(i)) {
 606		snd_interval_none(i);
 607		return -EINVAL;
 608	}
 609	return changed;
 610}
 611EXPORT_SYMBOL(snd_interval_refine);
 612
 613static int snd_interval_refine_first(struct snd_interval *i)
 614{
 615	const unsigned int last_max = i->max;
 616
 617	if (snd_BUG_ON(snd_interval_empty(i)))
 618		return -EINVAL;
 619	if (snd_interval_single(i))
 620		return 0;
 621	i->max = i->min;
 622	if (i->openmin)
 623		i->max++;
 624	/* only exclude max value if also excluded before refine */
 625	i->openmax = (i->openmax && i->max >= last_max);
 626	return 1;
 627}
 628
 629static int snd_interval_refine_last(struct snd_interval *i)
 630{
 631	const unsigned int last_min = i->min;
 632
 633	if (snd_BUG_ON(snd_interval_empty(i)))
 634		return -EINVAL;
 635	if (snd_interval_single(i))
 636		return 0;
 637	i->min = i->max;
 638	if (i->openmax)
 639		i->min--;
 640	/* only exclude min value if also excluded before refine */
 641	i->openmin = (i->openmin && i->min <= last_min);
 642	return 1;
 643}
 644
 645void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 646{
 647	if (a->empty || b->empty) {
 648		snd_interval_none(c);
 649		return;
 650	}
 651	c->empty = 0;
 652	c->min = mul(a->min, b->min);
 653	c->openmin = (a->openmin || b->openmin);
 654	c->max = mul(a->max,  b->max);
 655	c->openmax = (a->openmax || b->openmax);
 656	c->integer = (a->integer && b->integer);
 657}
 658
 659/**
 660 * snd_interval_div - refine the interval value with division
 661 * @a: dividend
 662 * @b: divisor
 663 * @c: quotient
 664 *
 665 * c = a / b
 666 *
 667 * Returns non-zero if the value is changed, zero if not changed.
 668 */
 669void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
 670{
 671	unsigned int r;
 672	if (a->empty || b->empty) {
 673		snd_interval_none(c);
 674		return;
 675	}
 676	c->empty = 0;
 677	c->min = div32(a->min, b->max, &r);
 678	c->openmin = (r || a->openmin || b->openmax);
 679	if (b->min > 0) {
 680		c->max = div32(a->max, b->min, &r);
 681		if (r) {
 682			c->max++;
 683			c->openmax = 1;
 684		} else
 685			c->openmax = (a->openmax || b->openmin);
 686	} else {
 687		c->max = UINT_MAX;
 688		c->openmax = 0;
 689	}
 690	c->integer = 0;
 691}
 692
 693/**
 694 * snd_interval_muldivk - refine the interval value
 695 * @a: dividend 1
 696 * @b: dividend 2
 697 * @k: divisor (as integer)
 698 * @c: result
 699  *
 700 * c = a * b / k
 701 *
 702 * Returns non-zero if the value is changed, zero if not changed.
 703 */
 704void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
 705		      unsigned int k, struct snd_interval *c)
 706{
 707	unsigned int r;
 708	if (a->empty || b->empty) {
 709		snd_interval_none(c);
 710		return;
 711	}
 712	c->empty = 0;
 713	c->min = muldiv32(a->min, b->min, k, &r);
 714	c->openmin = (r || a->openmin || b->openmin);
 715	c->max = muldiv32(a->max, b->max, k, &r);
 716	if (r) {
 717		c->max++;
 718		c->openmax = 1;
 719	} else
 720		c->openmax = (a->openmax || b->openmax);
 721	c->integer = 0;
 722}
 723
 724/**
 725 * snd_interval_mulkdiv - refine the interval value
 726 * @a: dividend 1
 727 * @k: dividend 2 (as integer)
 728 * @b: divisor
 729 * @c: result
 730 *
 731 * c = a * k / b
 732 *
 733 * Returns non-zero if the value is changed, zero if not changed.
 734 */
 735void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
 736		      const struct snd_interval *b, struct snd_interval *c)
 737{
 738	unsigned int r;
 739	if (a->empty || b->empty) {
 740		snd_interval_none(c);
 741		return;
 742	}
 743	c->empty = 0;
 744	c->min = muldiv32(a->min, k, b->max, &r);
 745	c->openmin = (r || a->openmin || b->openmax);
 746	if (b->min > 0) {
 747		c->max = muldiv32(a->max, k, b->min, &r);
 748		if (r) {
 749			c->max++;
 750			c->openmax = 1;
 751		} else
 752			c->openmax = (a->openmax || b->openmin);
 753	} else {
 754		c->max = UINT_MAX;
 755		c->openmax = 0;
 756	}
 757	c->integer = 0;
 758}
 759
 760/* ---- */
 761
 762
 763/**
 764 * snd_interval_ratnum - refine the interval value
 765 * @i: interval to refine
 766 * @rats_count: number of ratnum_t 
 767 * @rats: ratnum_t array
 768 * @nump: pointer to store the resultant numerator
 769 * @denp: pointer to store the resultant denominator
 770 *
 771 * Return: Positive if the value is changed, zero if it's not changed, or a
 772 * negative error code.
 773 */
 774int snd_interval_ratnum(struct snd_interval *i,
 775			unsigned int rats_count, const struct snd_ratnum *rats,
 776			unsigned int *nump, unsigned int *denp)
 777{
 778	unsigned int best_num, best_den;
 779	int best_diff;
 780	unsigned int k;
 781	struct snd_interval t;
 782	int err;
 783	unsigned int result_num, result_den;
 784	int result_diff;
 785
 786	best_num = best_den = best_diff = 0;
 787	for (k = 0; k < rats_count; ++k) {
 788		unsigned int num = rats[k].num;
 789		unsigned int den;
 790		unsigned int q = i->min;
 791		int diff;
 792		if (q == 0)
 793			q = 1;
 794		den = div_up(num, q);
 795		if (den < rats[k].den_min)
 796			continue;
 797		if (den > rats[k].den_max)
 798			den = rats[k].den_max;
 799		else {
 800			unsigned int r;
 801			r = (den - rats[k].den_min) % rats[k].den_step;
 802			if (r != 0)
 803				den -= r;
 804		}
 805		diff = num - q * den;
 806		if (diff < 0)
 807			diff = -diff;
 808		if (best_num == 0 ||
 809		    diff * best_den < best_diff * den) {
 810			best_diff = diff;
 811			best_den = den;
 812			best_num = num;
 813		}
 814	}
 815	if (best_den == 0) {
 816		i->empty = 1;
 817		return -EINVAL;
 818	}
 819	t.min = div_down(best_num, best_den);
 820	t.openmin = !!(best_num % best_den);
 821	
 822	result_num = best_num;
 823	result_diff = best_diff;
 824	result_den = best_den;
 825	best_num = best_den = best_diff = 0;
 826	for (k = 0; k < rats_count; ++k) {
 827		unsigned int num = rats[k].num;
 828		unsigned int den;
 829		unsigned int q = i->max;
 830		int diff;
 831		if (q == 0) {
 832			i->empty = 1;
 833			return -EINVAL;
 834		}
 835		den = div_down(num, q);
 836		if (den > rats[k].den_max)
 837			continue;
 838		if (den < rats[k].den_min)
 839			den = rats[k].den_min;
 840		else {
 841			unsigned int r;
 842			r = (den - rats[k].den_min) % rats[k].den_step;
 843			if (r != 0)
 844				den += rats[k].den_step - r;
 845		}
 846		diff = q * den - num;
 847		if (diff < 0)
 848			diff = -diff;
 849		if (best_num == 0 ||
 850		    diff * best_den < best_diff * den) {
 851			best_diff = diff;
 852			best_den = den;
 853			best_num = num;
 854		}
 855	}
 856	if (best_den == 0) {
 857		i->empty = 1;
 858		return -EINVAL;
 859	}
 860	t.max = div_up(best_num, best_den);
 861	t.openmax = !!(best_num % best_den);
 862	t.integer = 0;
 863	err = snd_interval_refine(i, &t);
 864	if (err < 0)
 865		return err;
 866
 867	if (snd_interval_single(i)) {
 868		if (best_diff * result_den < result_diff * best_den) {
 869			result_num = best_num;
 870			result_den = best_den;
 871		}
 872		if (nump)
 873			*nump = result_num;
 874		if (denp)
 875			*denp = result_den;
 876	}
 877	return err;
 878}
 879EXPORT_SYMBOL(snd_interval_ratnum);
 880
 881/**
 882 * snd_interval_ratden - refine the interval value
 883 * @i: interval to refine
 884 * @rats_count: number of struct ratden
 885 * @rats: struct ratden array
 886 * @nump: pointer to store the resultant numerator
 887 * @denp: pointer to store the resultant denominator
 888 *
 889 * Return: Positive if the value is changed, zero if it's not changed, or a
 890 * negative error code.
 891 */
 892static int snd_interval_ratden(struct snd_interval *i,
 893			       unsigned int rats_count,
 894			       const struct snd_ratden *rats,
 895			       unsigned int *nump, unsigned int *denp)
 896{
 897	unsigned int best_num, best_diff, best_den;
 898	unsigned int k;
 899	struct snd_interval t;
 900	int err;
 901
 902	best_num = best_den = best_diff = 0;
 903	for (k = 0; k < rats_count; ++k) {
 904		unsigned int num;
 905		unsigned int den = rats[k].den;
 906		unsigned int q = i->min;
 907		int diff;
 908		num = mul(q, den);
 909		if (num > rats[k].num_max)
 910			continue;
 911		if (num < rats[k].num_min)
 912			num = rats[k].num_max;
 913		else {
 914			unsigned int r;
 915			r = (num - rats[k].num_min) % rats[k].num_step;
 916			if (r != 0)
 917				num += rats[k].num_step - r;
 918		}
 919		diff = num - q * den;
 920		if (best_num == 0 ||
 921		    diff * best_den < best_diff * den) {
 922			best_diff = diff;
 923			best_den = den;
 924			best_num = num;
 925		}
 926	}
 927	if (best_den == 0) {
 928		i->empty = 1;
 929		return -EINVAL;
 930	}
 931	t.min = div_down(best_num, best_den);
 932	t.openmin = !!(best_num % best_den);
 933	
 934	best_num = best_den = best_diff = 0;
 935	for (k = 0; k < rats_count; ++k) {
 936		unsigned int num;
 937		unsigned int den = rats[k].den;
 938		unsigned int q = i->max;
 939		int diff;
 940		num = mul(q, den);
 941		if (num < rats[k].num_min)
 942			continue;
 943		if (num > rats[k].num_max)
 944			num = rats[k].num_max;
 945		else {
 946			unsigned int r;
 947			r = (num - rats[k].num_min) % rats[k].num_step;
 948			if (r != 0)
 949				num -= r;
 950		}
 951		diff = q * den - num;
 952		if (best_num == 0 ||
 953		    diff * best_den < best_diff * den) {
 954			best_diff = diff;
 955			best_den = den;
 956			best_num = num;
 957		}
 958	}
 959	if (best_den == 0) {
 960		i->empty = 1;
 961		return -EINVAL;
 962	}
 963	t.max = div_up(best_num, best_den);
 964	t.openmax = !!(best_num % best_den);
 965	t.integer = 0;
 966	err = snd_interval_refine(i, &t);
 967	if (err < 0)
 968		return err;
 969
 970	if (snd_interval_single(i)) {
 971		if (nump)
 972			*nump = best_num;
 973		if (denp)
 974			*denp = best_den;
 975	}
 976	return err;
 977}
 978
 979/**
 980 * snd_interval_list - refine the interval value from the list
 981 * @i: the interval value to refine
 982 * @count: the number of elements in the list
 983 * @list: the value list
 984 * @mask: the bit-mask to evaluate
 985 *
 986 * Refines the interval value from the list.
 987 * When mask is non-zero, only the elements corresponding to bit 1 are
 988 * evaluated.
 989 *
 990 * Return: Positive if the value is changed, zero if it's not changed, or a
 991 * negative error code.
 992 */
 993int snd_interval_list(struct snd_interval *i, unsigned int count,
 994		      const unsigned int *list, unsigned int mask)
 995{
 996        unsigned int k;
 997	struct snd_interval list_range;
 998
 999	if (!count) {
1000		i->empty = 1;
1001		return -EINVAL;
1002	}
1003	snd_interval_any(&list_range);
1004	list_range.min = UINT_MAX;
1005	list_range.max = 0;
1006        for (k = 0; k < count; k++) {
1007		if (mask && !(mask & (1 << k)))
1008			continue;
1009		if (!snd_interval_test(i, list[k]))
1010			continue;
1011		list_range.min = min(list_range.min, list[k]);
1012		list_range.max = max(list_range.max, list[k]);
1013        }
1014	return snd_interval_refine(i, &list_range);
1015}
1016EXPORT_SYMBOL(snd_interval_list);
1017
1018/**
1019 * snd_interval_ranges - refine the interval value from the list of ranges
1020 * @i: the interval value to refine
1021 * @count: the number of elements in the list of ranges
1022 * @ranges: the ranges list
1023 * @mask: the bit-mask to evaluate
1024 *
1025 * Refines the interval value from the list of ranges.
1026 * When mask is non-zero, only the elements corresponding to bit 1 are
1027 * evaluated.
1028 *
1029 * Return: Positive if the value is changed, zero if it's not changed, or a
1030 * negative error code.
1031 */
1032int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1033			const struct snd_interval *ranges, unsigned int mask)
1034{
1035	unsigned int k;
1036	struct snd_interval range_union;
1037	struct snd_interval range;
1038
1039	if (!count) {
1040		snd_interval_none(i);
1041		return -EINVAL;
1042	}
1043	snd_interval_any(&range_union);
1044	range_union.min = UINT_MAX;
1045	range_union.max = 0;
1046	for (k = 0; k < count; k++) {
1047		if (mask && !(mask & (1 << k)))
1048			continue;
1049		snd_interval_copy(&range, &ranges[k]);
1050		if (snd_interval_refine(&range, i) < 0)
1051			continue;
1052		if (snd_interval_empty(&range))
1053			continue;
1054
1055		if (range.min < range_union.min) {
1056			range_union.min = range.min;
1057			range_union.openmin = 1;
1058		}
1059		if (range.min == range_union.min && !range.openmin)
1060			range_union.openmin = 0;
1061		if (range.max > range_union.max) {
1062			range_union.max = range.max;
1063			range_union.openmax = 1;
1064		}
1065		if (range.max == range_union.max && !range.openmax)
1066			range_union.openmax = 0;
1067	}
1068	return snd_interval_refine(i, &range_union);
1069}
1070EXPORT_SYMBOL(snd_interval_ranges);
1071
1072static int snd_interval_step(struct snd_interval *i, unsigned int step)
1073{
1074	unsigned int n;
1075	int changed = 0;
1076	n = i->min % step;
1077	if (n != 0 || i->openmin) {
1078		i->min += step - n;
1079		i->openmin = 0;
1080		changed = 1;
1081	}
1082	n = i->max % step;
1083	if (n != 0 || i->openmax) {
1084		i->max -= n;
1085		i->openmax = 0;
1086		changed = 1;
1087	}
1088	if (snd_interval_checkempty(i)) {
1089		i->empty = 1;
1090		return -EINVAL;
1091	}
1092	return changed;
1093}
1094
1095/* Info constraints helpers */
1096
1097/**
1098 * snd_pcm_hw_rule_add - add the hw-constraint rule
1099 * @runtime: the pcm runtime instance
1100 * @cond: condition bits
1101 * @var: the variable to evaluate
1102 * @func: the evaluation function
1103 * @private: the private data pointer passed to function
1104 * @dep: the dependent variables
1105 *
1106 * Return: Zero if successful, or a negative error code on failure.
1107 */
1108int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1109			int var,
1110			snd_pcm_hw_rule_func_t func, void *private,
1111			int dep, ...)
1112{
1113	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1114	struct snd_pcm_hw_rule *c;
1115	unsigned int k;
1116	va_list args;
1117	va_start(args, dep);
1118	if (constrs->rules_num >= constrs->rules_all) {
1119		struct snd_pcm_hw_rule *new;
1120		unsigned int new_rules = constrs->rules_all + 16;
1121		new = krealloc(constrs->rules, new_rules * sizeof(*c),
1122			       GFP_KERNEL);
1123		if (!new) {
1124			va_end(args);
1125			return -ENOMEM;
1126		}
1127		constrs->rules = new;
1128		constrs->rules_all = new_rules;
1129	}
1130	c = &constrs->rules[constrs->rules_num];
1131	c->cond = cond;
1132	c->func = func;
1133	c->var = var;
1134	c->private = private;
1135	k = 0;
1136	while (1) {
1137		if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1138			va_end(args);
1139			return -EINVAL;
1140		}
1141		c->deps[k++] = dep;
1142		if (dep < 0)
1143			break;
1144		dep = va_arg(args, int);
1145	}
1146	constrs->rules_num++;
1147	va_end(args);
1148	return 0;
1149}
1150EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1151
1152/**
1153 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1154 * @runtime: PCM runtime instance
1155 * @var: hw_params variable to apply the mask
1156 * @mask: the bitmap mask
1157 *
1158 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1159 *
1160 * Return: Zero if successful, or a negative error code on failure.
1161 */
1162int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1163			       u_int32_t mask)
1164{
1165	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1166	struct snd_mask *maskp = constrs_mask(constrs, var);
1167	*maskp->bits &= mask;
1168	memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1169	if (*maskp->bits == 0)
1170		return -EINVAL;
1171	return 0;
1172}
1173
1174/**
1175 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1176 * @runtime: PCM runtime instance
1177 * @var: hw_params variable to apply the mask
1178 * @mask: the 64bit bitmap mask
1179 *
1180 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1181 *
1182 * Return: Zero if successful, or a negative error code on failure.
1183 */
1184int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1185				 u_int64_t mask)
1186{
1187	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1188	struct snd_mask *maskp = constrs_mask(constrs, var);
1189	maskp->bits[0] &= (u_int32_t)mask;
1190	maskp->bits[1] &= (u_int32_t)(mask >> 32);
1191	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1192	if (! maskp->bits[0] && ! maskp->bits[1])
1193		return -EINVAL;
1194	return 0;
1195}
1196EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1197
1198/**
1199 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1200 * @runtime: PCM runtime instance
1201 * @var: hw_params variable to apply the integer constraint
1202 *
1203 * Apply the constraint of integer to an interval parameter.
1204 *
1205 * Return: Positive if the value is changed, zero if it's not changed, or a
1206 * negative error code.
1207 */
1208int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1209{
1210	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1211	return snd_interval_setinteger(constrs_interval(constrs, var));
1212}
1213EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1214
1215/**
1216 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1217 * @runtime: PCM runtime instance
1218 * @var: hw_params variable to apply the range
1219 * @min: the minimal value
1220 * @max: the maximal value
1221 * 
1222 * Apply the min/max range constraint to an interval parameter.
1223 *
1224 * Return: Positive if the value is changed, zero if it's not changed, or a
1225 * negative error code.
1226 */
1227int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1228				 unsigned int min, unsigned int max)
1229{
1230	struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1231	struct snd_interval t;
1232	t.min = min;
1233	t.max = max;
1234	t.openmin = t.openmax = 0;
1235	t.integer = 0;
1236	return snd_interval_refine(constrs_interval(constrs, var), &t);
1237}
1238EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1239
1240static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1241				struct snd_pcm_hw_rule *rule)
1242{
1243	struct snd_pcm_hw_constraint_list *list = rule->private;
1244	return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1245}		
1246
1247
1248/**
1249 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1250 * @runtime: PCM runtime instance
1251 * @cond: condition bits
1252 * @var: hw_params variable to apply the list constraint
1253 * @l: list
1254 * 
1255 * Apply the list of constraints to an interval parameter.
1256 *
1257 * Return: Zero if successful, or a negative error code on failure.
1258 */
1259int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1260			       unsigned int cond,
1261			       snd_pcm_hw_param_t var,
1262			       const struct snd_pcm_hw_constraint_list *l)
1263{
1264	return snd_pcm_hw_rule_add(runtime, cond, var,
1265				   snd_pcm_hw_rule_list, (void *)l,
1266				   var, -1);
1267}
1268EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1269
1270static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1271				  struct snd_pcm_hw_rule *rule)
1272{
1273	struct snd_pcm_hw_constraint_ranges *r = rule->private;
1274	return snd_interval_ranges(hw_param_interval(params, rule->var),
1275				   r->count, r->ranges, r->mask);
1276}
1277
1278
1279/**
1280 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1281 * @runtime: PCM runtime instance
1282 * @cond: condition bits
1283 * @var: hw_params variable to apply the list of range constraints
1284 * @r: ranges
1285 *
1286 * Apply the list of range constraints to an interval parameter.
1287 *
1288 * Return: Zero if successful, or a negative error code on failure.
1289 */
1290int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1291				 unsigned int cond,
1292				 snd_pcm_hw_param_t var,
1293				 const struct snd_pcm_hw_constraint_ranges *r)
1294{
1295	return snd_pcm_hw_rule_add(runtime, cond, var,
1296				   snd_pcm_hw_rule_ranges, (void *)r,
1297				   var, -1);
1298}
1299EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1300
1301static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1302				   struct snd_pcm_hw_rule *rule)
1303{
1304	const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1305	unsigned int num = 0, den = 0;
1306	int err;
1307	err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1308				  r->nrats, r->rats, &num, &den);
1309	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1310		params->rate_num = num;
1311		params->rate_den = den;
1312	}
1313	return err;
1314}
1315
1316/**
1317 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1318 * @runtime: PCM runtime instance
1319 * @cond: condition bits
1320 * @var: hw_params variable to apply the ratnums constraint
1321 * @r: struct snd_ratnums constriants
1322 *
1323 * Return: Zero if successful, or a negative error code on failure.
1324 */
1325int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, 
1326				  unsigned int cond,
1327				  snd_pcm_hw_param_t var,
1328				  const struct snd_pcm_hw_constraint_ratnums *r)
1329{
1330	return snd_pcm_hw_rule_add(runtime, cond, var,
1331				   snd_pcm_hw_rule_ratnums, (void *)r,
1332				   var, -1);
1333}
1334EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1335
1336static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1337				   struct snd_pcm_hw_rule *rule)
1338{
1339	const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1340	unsigned int num = 0, den = 0;
1341	int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1342				  r->nrats, r->rats, &num, &den);
1343	if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1344		params->rate_num = num;
1345		params->rate_den = den;
1346	}
1347	return err;
1348}
1349
1350/**
1351 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1352 * @runtime: PCM runtime instance
1353 * @cond: condition bits
1354 * @var: hw_params variable to apply the ratdens constraint
1355 * @r: struct snd_ratdens constriants
1356 *
1357 * Return: Zero if successful, or a negative error code on failure.
1358 */
1359int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, 
1360				  unsigned int cond,
1361				  snd_pcm_hw_param_t var,
1362				  const struct snd_pcm_hw_constraint_ratdens *r)
1363{
1364	return snd_pcm_hw_rule_add(runtime, cond, var,
1365				   snd_pcm_hw_rule_ratdens, (void *)r,
1366				   var, -1);
1367}
1368EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1369
1370static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1371				  struct snd_pcm_hw_rule *rule)
1372{
1373	unsigned int l = (unsigned long) rule->private;
1374	int width = l & 0xffff;
1375	unsigned int msbits = l >> 16;
1376	const struct snd_interval *i =
1377		hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1378
1379	if (!snd_interval_single(i))
1380		return 0;
1381
1382	if ((snd_interval_value(i) == width) ||
1383	    (width == 0 && snd_interval_value(i) > msbits))
1384		params->msbits = min_not_zero(params->msbits, msbits);
1385
1386	return 0;
1387}
1388
1389/**
1390 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1391 * @runtime: PCM runtime instance
1392 * @cond: condition bits
1393 * @width: sample bits width
1394 * @msbits: msbits width
1395 *
1396 * This constraint will set the number of most significant bits (msbits) if a
1397 * sample format with the specified width has been select. If width is set to 0
1398 * the msbits will be set for any sample format with a width larger than the
1399 * specified msbits.
1400 *
1401 * Return: Zero if successful, or a negative error code on failure.
1402 */
1403int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, 
1404				 unsigned int cond,
1405				 unsigned int width,
1406				 unsigned int msbits)
1407{
1408	unsigned long l = (msbits << 16) | width;
1409	return snd_pcm_hw_rule_add(runtime, cond, -1,
1410				    snd_pcm_hw_rule_msbits,
1411				    (void*) l,
1412				    SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1413}
1414EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1415
1416static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1417				struct snd_pcm_hw_rule *rule)
1418{
1419	unsigned long step = (unsigned long) rule->private;
1420	return snd_interval_step(hw_param_interval(params, rule->var), step);
1421}
1422
1423/**
1424 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1425 * @runtime: PCM runtime instance
1426 * @cond: condition bits
1427 * @var: hw_params variable to apply the step constraint
1428 * @step: step size
1429 *
1430 * Return: Zero if successful, or a negative error code on failure.
1431 */
1432int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1433			       unsigned int cond,
1434			       snd_pcm_hw_param_t var,
1435			       unsigned long step)
1436{
1437	return snd_pcm_hw_rule_add(runtime, cond, var, 
1438				   snd_pcm_hw_rule_step, (void *) step,
1439				   var, -1);
1440}
1441EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1442
1443static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1444{
1445	static unsigned int pow2_sizes[] = {
1446		1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1447		1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1448		1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1449		1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1450	};
1451	return snd_interval_list(hw_param_interval(params, rule->var),
1452				 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1453}		
1454
1455/**
1456 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1457 * @runtime: PCM runtime instance
1458 * @cond: condition bits
1459 * @var: hw_params variable to apply the power-of-2 constraint
1460 *
1461 * Return: Zero if successful, or a negative error code on failure.
1462 */
1463int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1464			       unsigned int cond,
1465			       snd_pcm_hw_param_t var)
1466{
1467	return snd_pcm_hw_rule_add(runtime, cond, var, 
1468				   snd_pcm_hw_rule_pow2, NULL,
1469				   var, -1);
1470}
1471EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1472
1473static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1474					   struct snd_pcm_hw_rule *rule)
1475{
1476	unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1477	struct snd_interval *rate;
1478
1479	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1480	return snd_interval_list(rate, 1, &base_rate, 0);
1481}
1482
1483/**
1484 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1485 * @runtime: PCM runtime instance
1486 * @base_rate: the rate at which the hardware does not resample
1487 *
1488 * Return: Zero if successful, or a negative error code on failure.
1489 */
1490int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1491			       unsigned int base_rate)
1492{
1493	return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1494				   SNDRV_PCM_HW_PARAM_RATE,
1495				   snd_pcm_hw_rule_noresample_func,
1496				   (void *)(uintptr_t)base_rate,
1497				   SNDRV_PCM_HW_PARAM_RATE, -1);
1498}
1499EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1500
1501static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1502				  snd_pcm_hw_param_t var)
1503{
1504	if (hw_is_mask(var)) {
1505		snd_mask_any(hw_param_mask(params, var));
1506		params->cmask |= 1 << var;
1507		params->rmask |= 1 << var;
1508		return;
1509	}
1510	if (hw_is_interval(var)) {
1511		snd_interval_any(hw_param_interval(params, var));
1512		params->cmask |= 1 << var;
1513		params->rmask |= 1 << var;
1514		return;
1515	}
1516	snd_BUG();
1517}
1518
1519void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1520{
1521	unsigned int k;
1522	memset(params, 0, sizeof(*params));
1523	for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1524		_snd_pcm_hw_param_any(params, k);
1525	for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1526		_snd_pcm_hw_param_any(params, k);
1527	params->info = ~0U;
1528}
1529EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1530
1531/**
1532 * snd_pcm_hw_param_value - return @params field @var value
1533 * @params: the hw_params instance
1534 * @var: parameter to retrieve
1535 * @dir: pointer to the direction (-1,0,1) or %NULL
1536 *
1537 * Return: The value for field @var if it's fixed in configuration space
1538 * defined by @params. -%EINVAL otherwise.
1539 */
1540int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1541			   snd_pcm_hw_param_t var, int *dir)
1542{
1543	if (hw_is_mask(var)) {
1544		const struct snd_mask *mask = hw_param_mask_c(params, var);
1545		if (!snd_mask_single(mask))
1546			return -EINVAL;
1547		if (dir)
1548			*dir = 0;
1549		return snd_mask_value(mask);
1550	}
1551	if (hw_is_interval(var)) {
1552		const struct snd_interval *i = hw_param_interval_c(params, var);
1553		if (!snd_interval_single(i))
1554			return -EINVAL;
1555		if (dir)
1556			*dir = i->openmin;
1557		return snd_interval_value(i);
1558	}
1559	return -EINVAL;
1560}
1561EXPORT_SYMBOL(snd_pcm_hw_param_value);
1562
1563void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1564				snd_pcm_hw_param_t var)
1565{
1566	if (hw_is_mask(var)) {
1567		snd_mask_none(hw_param_mask(params, var));
1568		params->cmask |= 1 << var;
1569		params->rmask |= 1 << var;
1570	} else if (hw_is_interval(var)) {
1571		snd_interval_none(hw_param_interval(params, var));
1572		params->cmask |= 1 << var;
1573		params->rmask |= 1 << var;
1574	} else {
1575		snd_BUG();
1576	}
1577}
1578EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1579
1580static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1581				   snd_pcm_hw_param_t var)
1582{
1583	int changed;
1584	if (hw_is_mask(var))
1585		changed = snd_mask_refine_first(hw_param_mask(params, var));
1586	else if (hw_is_interval(var))
1587		changed = snd_interval_refine_first(hw_param_interval(params, var));
1588	else
1589		return -EINVAL;
1590	if (changed > 0) {
1591		params->cmask |= 1 << var;
1592		params->rmask |= 1 << var;
1593	}
1594	return changed;
1595}
1596
1597
1598/**
1599 * snd_pcm_hw_param_first - refine config space and return minimum value
1600 * @pcm: PCM instance
1601 * @params: the hw_params instance
1602 * @var: parameter to retrieve
1603 * @dir: pointer to the direction (-1,0,1) or %NULL
1604 *
1605 * Inside configuration space defined by @params remove from @var all
1606 * values > minimum. Reduce configuration space accordingly.
1607 *
1608 * Return: The minimum, or a negative error code on failure.
1609 */
1610int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, 
1611			   struct snd_pcm_hw_params *params, 
1612			   snd_pcm_hw_param_t var, int *dir)
1613{
1614	int changed = _snd_pcm_hw_param_first(params, var);
1615	if (changed < 0)
1616		return changed;
1617	if (params->rmask) {
1618		int err = snd_pcm_hw_refine(pcm, params);
1619		if (err < 0)
1620			return err;
1621	}
1622	return snd_pcm_hw_param_value(params, var, dir);
1623}
1624EXPORT_SYMBOL(snd_pcm_hw_param_first);
1625
1626static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1627				  snd_pcm_hw_param_t var)
1628{
1629	int changed;
1630	if (hw_is_mask(var))
1631		changed = snd_mask_refine_last(hw_param_mask(params, var));
1632	else if (hw_is_interval(var))
1633		changed = snd_interval_refine_last(hw_param_interval(params, var));
1634	else
1635		return -EINVAL;
1636	if (changed > 0) {
1637		params->cmask |= 1 << var;
1638		params->rmask |= 1 << var;
1639	}
1640	return changed;
1641}
1642
1643
1644/**
1645 * snd_pcm_hw_param_last - refine config space and return maximum value
1646 * @pcm: PCM instance
1647 * @params: the hw_params instance
1648 * @var: parameter to retrieve
1649 * @dir: pointer to the direction (-1,0,1) or %NULL
1650 *
1651 * Inside configuration space defined by @params remove from @var all
1652 * values < maximum. Reduce configuration space accordingly.
1653 *
1654 * Return: The maximum, or a negative error code on failure.
1655 */
1656int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, 
1657			  struct snd_pcm_hw_params *params,
1658			  snd_pcm_hw_param_t var, int *dir)
1659{
1660	int changed = _snd_pcm_hw_param_last(params, var);
1661	if (changed < 0)
1662		return changed;
1663	if (params->rmask) {
1664		int err = snd_pcm_hw_refine(pcm, params);
1665		if (err < 0)
1666			return err;
1667	}
1668	return snd_pcm_hw_param_value(params, var, dir);
1669}
1670EXPORT_SYMBOL(snd_pcm_hw_param_last);
1671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1672static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1673				   void *arg)
1674{
1675	struct snd_pcm_runtime *runtime = substream->runtime;
1676	unsigned long flags;
1677	snd_pcm_stream_lock_irqsave(substream, flags);
1678	if (snd_pcm_running(substream) &&
1679	    snd_pcm_update_hw_ptr(substream) >= 0)
1680		runtime->status->hw_ptr %= runtime->buffer_size;
1681	else {
1682		runtime->status->hw_ptr = 0;
1683		runtime->hw_ptr_wrap = 0;
1684	}
1685	snd_pcm_stream_unlock_irqrestore(substream, flags);
1686	return 0;
1687}
1688
1689static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1690					  void *arg)
1691{
1692	struct snd_pcm_channel_info *info = arg;
1693	struct snd_pcm_runtime *runtime = substream->runtime;
1694	int width;
1695	if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1696		info->offset = -1;
1697		return 0;
1698	}
1699	width = snd_pcm_format_physical_width(runtime->format);
1700	if (width < 0)
1701		return width;
1702	info->offset = 0;
1703	switch (runtime->access) {
1704	case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1705	case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1706		info->first = info->channel * width;
1707		info->step = runtime->channels * width;
1708		break;
1709	case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1710	case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1711	{
1712		size_t size = runtime->dma_bytes / runtime->channels;
1713		info->first = info->channel * size * 8;
1714		info->step = width;
1715		break;
1716	}
1717	default:
1718		snd_BUG();
1719		break;
1720	}
1721	return 0;
1722}
1723
1724static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1725				       void *arg)
1726{
1727	struct snd_pcm_hw_params *params = arg;
1728	snd_pcm_format_t format;
1729	int channels;
1730	ssize_t frame_size;
1731
1732	params->fifo_size = substream->runtime->hw.fifo_size;
1733	if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1734		format = params_format(params);
1735		channels = params_channels(params);
1736		frame_size = snd_pcm_format_size(format, channels);
1737		if (frame_size > 0)
1738			params->fifo_size /= (unsigned)frame_size;
1739	}
1740	return 0;
1741}
1742
 
 
 
 
 
 
 
 
 
 
 
 
1743/**
1744 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1745 * @substream: the pcm substream instance
1746 * @cmd: ioctl command
1747 * @arg: ioctl argument
1748 *
1749 * Processes the generic ioctl commands for PCM.
1750 * Can be passed as the ioctl callback for PCM ops.
1751 *
1752 * Return: Zero if successful, or a negative error code on failure.
1753 */
1754int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1755		      unsigned int cmd, void *arg)
1756{
1757	switch (cmd) {
1758	case SNDRV_PCM_IOCTL1_RESET:
1759		return snd_pcm_lib_ioctl_reset(substream, arg);
1760	case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1761		return snd_pcm_lib_ioctl_channel_info(substream, arg);
1762	case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1763		return snd_pcm_lib_ioctl_fifo_size(substream, arg);
 
 
1764	}
1765	return -ENXIO;
1766}
1767EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1768
1769/**
1770 * snd_pcm_period_elapsed - update the pcm status for the next period
1771 * @substream: the pcm substream instance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772 *
1773 * This function is called from the interrupt handler when the
1774 * PCM has processed the period size.  It will update the current
1775 * pointer, wake up sleepers, etc.
1776 *
1777 * Even if more than one periods have elapsed since the last call, you
1778 * have to call this only once.
1779 */
1780void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1781{
1782	struct snd_pcm_runtime *runtime;
1783	unsigned long flags;
1784
1785	if (snd_BUG_ON(!substream))
1786		return;
1787
1788	snd_pcm_stream_lock_irqsave(substream, flags);
1789	if (PCM_RUNTIME_CHECK(substream))
1790		goto _unlock;
1791	runtime = substream->runtime;
1792
1793	if (!snd_pcm_running(substream) ||
1794	    snd_pcm_update_hw_ptr0(substream, 1) < 0)
1795		goto _end;
1796
1797#ifdef CONFIG_SND_PCM_TIMER
1798	if (substream->timer_running)
1799		snd_timer_interrupt(substream->timer, 1);
1800#endif
1801 _end:
1802	kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1803 _unlock:
1804	snd_pcm_stream_unlock_irqrestore(substream, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805}
1806EXPORT_SYMBOL(snd_pcm_period_elapsed);
1807
1808/*
1809 * Wait until avail_min data becomes available
1810 * Returns a negative error code if any error occurs during operation.
1811 * The available space is stored on availp.  When err = 0 and avail = 0
1812 * on the capture stream, it indicates the stream is in DRAINING state.
1813 */
1814static int wait_for_avail(struct snd_pcm_substream *substream,
1815			      snd_pcm_uframes_t *availp)
1816{
1817	struct snd_pcm_runtime *runtime = substream->runtime;
1818	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1819	wait_queue_entry_t wait;
1820	int err = 0;
1821	snd_pcm_uframes_t avail = 0;
1822	long wait_time, tout;
1823
1824	init_waitqueue_entry(&wait, current);
1825	set_current_state(TASK_INTERRUPTIBLE);
1826	add_wait_queue(&runtime->tsleep, &wait);
1827
1828	if (runtime->no_period_wakeup)
1829		wait_time = MAX_SCHEDULE_TIMEOUT;
1830	else {
1831		/* use wait time from substream if available */
1832		if (substream->wait_time) {
1833			wait_time = substream->wait_time;
1834		} else {
1835			wait_time = 10;
1836
1837			if (runtime->rate) {
1838				long t = runtime->period_size * 2 /
1839					 runtime->rate;
1840				wait_time = max(t, wait_time);
1841			}
1842			wait_time = msecs_to_jiffies(wait_time * 1000);
1843		}
 
1844	}
1845
1846	for (;;) {
1847		if (signal_pending(current)) {
1848			err = -ERESTARTSYS;
1849			break;
1850		}
1851
1852		/*
1853		 * We need to check if space became available already
1854		 * (and thus the wakeup happened already) first to close
1855		 * the race of space already having become available.
1856		 * This check must happen after been added to the waitqueue
1857		 * and having current state be INTERRUPTIBLE.
1858		 */
1859		avail = snd_pcm_avail(substream);
1860		if (avail >= runtime->twake)
1861			break;
1862		snd_pcm_stream_unlock_irq(substream);
1863
1864		tout = schedule_timeout(wait_time);
1865
1866		snd_pcm_stream_lock_irq(substream);
1867		set_current_state(TASK_INTERRUPTIBLE);
1868		switch (runtime->status->state) {
1869		case SNDRV_PCM_STATE_SUSPENDED:
1870			err = -ESTRPIPE;
1871			goto _endloop;
1872		case SNDRV_PCM_STATE_XRUN:
1873			err = -EPIPE;
1874			goto _endloop;
1875		case SNDRV_PCM_STATE_DRAINING:
1876			if (is_playback)
1877				err = -EPIPE;
1878			else 
1879				avail = 0; /* indicate draining */
1880			goto _endloop;
1881		case SNDRV_PCM_STATE_OPEN:
1882		case SNDRV_PCM_STATE_SETUP:
1883		case SNDRV_PCM_STATE_DISCONNECTED:
1884			err = -EBADFD;
1885			goto _endloop;
1886		case SNDRV_PCM_STATE_PAUSED:
1887			continue;
1888		}
1889		if (!tout) {
1890			pcm_dbg(substream->pcm,
1891				"%s write error (DMA or IRQ trouble?)\n",
1892				is_playback ? "playback" : "capture");
1893			err = -EIO;
1894			break;
1895		}
1896	}
1897 _endloop:
1898	set_current_state(TASK_RUNNING);
1899	remove_wait_queue(&runtime->tsleep, &wait);
1900	*availp = avail;
1901	return err;
1902}
1903	
1904typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1905			      int channel, unsigned long hwoff,
1906			      void *buf, unsigned long bytes);
1907
1908typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1909			  snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
 
1910
1911/* calculate the target DMA-buffer position to be written/read */
1912static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1913			   int channel, unsigned long hwoff)
1914{
1915	return runtime->dma_area + hwoff +
1916		channel * (runtime->dma_bytes / runtime->channels);
1917}
1918
1919/* default copy_user ops for write; used for both interleaved and non- modes */
1920static int default_write_copy(struct snd_pcm_substream *substream,
1921			      int channel, unsigned long hwoff,
1922			      void *buf, unsigned long bytes)
1923{
1924	if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1925			   (void __user *)buf, bytes))
1926		return -EFAULT;
1927	return 0;
1928}
1929
1930/* default copy_kernel ops for write */
1931static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1932				     int channel, unsigned long hwoff,
1933				     void *buf, unsigned long bytes)
1934{
1935	memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1936	return 0;
1937}
1938
1939/* fill silence instead of copy data; called as a transfer helper
1940 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1941 * a NULL buffer is passed
1942 */
1943static int fill_silence(struct snd_pcm_substream *substream, int channel,
1944			unsigned long hwoff, void *buf, unsigned long bytes)
 
1945{
1946	struct snd_pcm_runtime *runtime = substream->runtime;
1947
1948	if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1949		return 0;
1950	if (substream->ops->fill_silence)
1951		return substream->ops->fill_silence(substream, channel,
1952						    hwoff, bytes);
1953
1954	snd_pcm_format_set_silence(runtime->format,
1955				   get_dma_ptr(runtime, channel, hwoff),
1956				   bytes_to_samples(runtime, bytes));
1957	return 0;
1958}
1959
1960/* default copy_user ops for read; used for both interleaved and non- modes */
1961static int default_read_copy(struct snd_pcm_substream *substream,
1962			     int channel, unsigned long hwoff,
1963			     void *buf, unsigned long bytes)
1964{
1965	if (copy_to_user((void __user *)buf,
1966			 get_dma_ptr(substream->runtime, channel, hwoff),
1967			 bytes))
1968		return -EFAULT;
1969	return 0;
1970}
1971
1972/* default copy_kernel ops for read */
1973static int default_read_copy_kernel(struct snd_pcm_substream *substream,
1974				    int channel, unsigned long hwoff,
1975				    void *buf, unsigned long bytes)
1976{
1977	memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
1978	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979}
1980
1981/* call transfer function with the converted pointers and sizes;
1982 * for interleaved mode, it's one shot for all samples
1983 */
1984static int interleaved_copy(struct snd_pcm_substream *substream,
1985			    snd_pcm_uframes_t hwoff, void *data,
1986			    snd_pcm_uframes_t off,
1987			    snd_pcm_uframes_t frames,
1988			    pcm_transfer_f transfer)
 
1989{
1990	struct snd_pcm_runtime *runtime = substream->runtime;
1991
1992	/* convert to bytes */
1993	hwoff = frames_to_bytes(runtime, hwoff);
1994	off = frames_to_bytes(runtime, off);
1995	frames = frames_to_bytes(runtime, frames);
1996	return transfer(substream, 0, hwoff, data + off, frames);
 
 
1997}
1998
1999/* call transfer function with the converted pointers and sizes for each
2000 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2001 */
2002static int noninterleaved_copy(struct snd_pcm_substream *substream,
2003			       snd_pcm_uframes_t hwoff, void *data,
2004			       snd_pcm_uframes_t off,
2005			       snd_pcm_uframes_t frames,
2006			       pcm_transfer_f transfer)
 
2007{
2008	struct snd_pcm_runtime *runtime = substream->runtime;
2009	int channels = runtime->channels;
2010	void **bufs = data;
2011	int c, err;
2012
2013	/* convert to bytes; note that it's not frames_to_bytes() here.
2014	 * in non-interleaved mode, we copy for each channel, thus
2015	 * each copy is n_samples bytes x channels = whole frames.
2016	 */
2017	off = samples_to_bytes(runtime, off);
2018	frames = samples_to_bytes(runtime, frames);
2019	hwoff = samples_to_bytes(runtime, hwoff);
2020	for (c = 0; c < channels; ++c, ++bufs) {
2021		if (!data || !*bufs)
2022			err = fill_silence(substream, c, hwoff, NULL, frames);
2023		else
2024			err = transfer(substream, c, hwoff, *bufs + off,
2025				       frames);
2026		if (err < 0)
2027			return err;
2028	}
2029	return 0;
2030}
2031
2032/* fill silence on the given buffer position;
2033 * called from snd_pcm_playback_silence()
2034 */
2035static int fill_silence_frames(struct snd_pcm_substream *substream,
2036			       snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2037{
2038	if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2039	    substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2040		return interleaved_copy(substream, off, NULL, 0, frames,
2041					fill_silence);
2042	else
2043		return noninterleaved_copy(substream, off, NULL, 0, frames,
2044					   fill_silence);
2045}
2046
2047/* sanity-check for read/write methods */
2048static int pcm_sanity_check(struct snd_pcm_substream *substream)
2049{
2050	struct snd_pcm_runtime *runtime;
2051	if (PCM_RUNTIME_CHECK(substream))
2052		return -ENXIO;
2053	runtime = substream->runtime;
2054	if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2055		return -EINVAL;
2056	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2057		return -EBADFD;
2058	return 0;
2059}
2060
2061static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2062{
2063	switch (runtime->status->state) {
2064	case SNDRV_PCM_STATE_PREPARED:
2065	case SNDRV_PCM_STATE_RUNNING:
2066	case SNDRV_PCM_STATE_PAUSED:
2067		return 0;
2068	case SNDRV_PCM_STATE_XRUN:
2069		return -EPIPE;
2070	case SNDRV_PCM_STATE_SUSPENDED:
2071		return -ESTRPIPE;
2072	default:
2073		return -EBADFD;
2074	}
2075}
2076
2077/* update to the given appl_ptr and call ack callback if needed;
2078 * when an error is returned, take back to the original value
2079 */
2080int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2081			   snd_pcm_uframes_t appl_ptr)
2082{
2083	struct snd_pcm_runtime *runtime = substream->runtime;
2084	snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
 
2085	int ret;
2086
2087	if (old_appl_ptr == appl_ptr)
2088		return 0;
2089
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2090	runtime->control->appl_ptr = appl_ptr;
2091	if (substream->ops->ack) {
2092		ret = substream->ops->ack(substream);
2093		if (ret < 0) {
2094			runtime->control->appl_ptr = old_appl_ptr;
 
 
2095			return ret;
2096		}
2097	}
2098
2099	trace_applptr(substream, old_appl_ptr, appl_ptr);
2100
2101	return 0;
2102}
2103
2104/* the common loop for read/write data */
2105snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2106				     void *data, bool interleaved,
2107				     snd_pcm_uframes_t size, bool in_kernel)
2108{
2109	struct snd_pcm_runtime *runtime = substream->runtime;
2110	snd_pcm_uframes_t xfer = 0;
2111	snd_pcm_uframes_t offset = 0;
2112	snd_pcm_uframes_t avail;
2113	pcm_copy_f writer;
2114	pcm_transfer_f transfer;
2115	bool nonblock;
2116	bool is_playback;
2117	int err;
2118
2119	err = pcm_sanity_check(substream);
2120	if (err < 0)
2121		return err;
2122
2123	is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2124	if (interleaved) {
2125		if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2126		    runtime->channels > 1)
2127			return -EINVAL;
2128		writer = interleaved_copy;
2129	} else {
2130		if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2131			return -EINVAL;
2132		writer = noninterleaved_copy;
2133	}
2134
2135	if (!data) {
2136		if (is_playback)
2137			transfer = fill_silence;
2138		else
2139			return -EINVAL;
2140	} else if (in_kernel) {
2141		if (substream->ops->copy_kernel)
2142			transfer = substream->ops->copy_kernel;
2143		else
2144			transfer = is_playback ?
2145				default_write_copy_kernel : default_read_copy_kernel;
2146	} else {
2147		if (substream->ops->copy_user)
2148			transfer = (pcm_transfer_f)substream->ops->copy_user;
2149		else
2150			transfer = is_playback ?
2151				default_write_copy : default_read_copy;
2152	}
2153
2154	if (size == 0)
2155		return 0;
2156
2157	nonblock = !!(substream->f_flags & O_NONBLOCK);
2158
2159	snd_pcm_stream_lock_irq(substream);
2160	err = pcm_accessible_state(runtime);
2161	if (err < 0)
2162		goto _end_unlock;
2163
2164	runtime->twake = runtime->control->avail_min ? : 1;
2165	if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2166		snd_pcm_update_hw_ptr(substream);
2167
2168	/*
2169	 * If size < start_threshold, wait indefinitely. Another
2170	 * thread may start capture
2171	 */
2172	if (!is_playback &&
2173	    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2174	    size >= runtime->start_threshold) {
2175		err = snd_pcm_start(substream);
2176		if (err < 0)
2177			goto _end_unlock;
2178	}
2179
2180	avail = snd_pcm_avail(substream);
2181
2182	while (size > 0) {
2183		snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2184		snd_pcm_uframes_t cont;
2185		if (!avail) {
2186			if (!is_playback &&
2187			    runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2188				snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2189				goto _end_unlock;
2190			}
2191			if (nonblock) {
2192				err = -EAGAIN;
2193				goto _end_unlock;
2194			}
2195			runtime->twake = min_t(snd_pcm_uframes_t, size,
2196					runtime->control->avail_min ? : 1);
2197			err = wait_for_avail(substream, &avail);
2198			if (err < 0)
2199				goto _end_unlock;
2200			if (!avail)
2201				continue; /* draining */
2202		}
2203		frames = size > avail ? avail : size;
2204		appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2205		appl_ofs = appl_ptr % runtime->buffer_size;
2206		cont = runtime->buffer_size - appl_ofs;
2207		if (frames > cont)
2208			frames = cont;
2209		if (snd_BUG_ON(!frames)) {
2210			err = -EINVAL;
2211			goto _end_unlock;
2212		}
 
 
 
 
2213		snd_pcm_stream_unlock_irq(substream);
 
 
2214		err = writer(substream, appl_ofs, data, offset, frames,
2215			     transfer);
 
 
2216		snd_pcm_stream_lock_irq(substream);
 
2217		if (err < 0)
2218			goto _end_unlock;
2219		err = pcm_accessible_state(runtime);
2220		if (err < 0)
2221			goto _end_unlock;
2222		appl_ptr += frames;
2223		if (appl_ptr >= runtime->boundary)
2224			appl_ptr -= runtime->boundary;
2225		err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2226		if (err < 0)
2227			goto _end_unlock;
2228
2229		offset += frames;
2230		size -= frames;
2231		xfer += frames;
2232		avail -= frames;
2233		if (is_playback &&
2234		    runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2235		    snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2236			err = snd_pcm_start(substream);
2237			if (err < 0)
2238				goto _end_unlock;
2239		}
2240	}
2241 _end_unlock:
2242	runtime->twake = 0;
2243	if (xfer > 0 && err >= 0)
2244		snd_pcm_update_state(substream, runtime);
2245	snd_pcm_stream_unlock_irq(substream);
2246	return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2247}
2248EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2249
2250/*
2251 * standard channel mapping helpers
2252 */
2253
2254/* default channel maps for multi-channel playbacks, up to 8 channels */
2255const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2256	{ .channels = 1,
2257	  .map = { SNDRV_CHMAP_MONO } },
2258	{ .channels = 2,
2259	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2260	{ .channels = 4,
2261	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2262		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2263	{ .channels = 6,
2264	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2265		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2266		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2267	{ .channels = 8,
2268	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2269		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2270		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2271		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2272	{ }
2273};
2274EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2275
2276/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2277const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2278	{ .channels = 1,
2279	  .map = { SNDRV_CHMAP_MONO } },
2280	{ .channels = 2,
2281	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2282	{ .channels = 4,
2283	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2284		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2285	{ .channels = 6,
2286	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2287		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2288		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2289	{ .channels = 8,
2290	  .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2291		   SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2292		   SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2293		   SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2294	{ }
2295};
2296EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2297
2298static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2299{
2300	if (ch > info->max_channels)
2301		return false;
2302	return !info->channel_mask || (info->channel_mask & (1U << ch));
2303}
2304
2305static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2306			      struct snd_ctl_elem_info *uinfo)
2307{
2308	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2309
2310	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2311	uinfo->count = 0;
2312	uinfo->count = info->max_channels;
2313	uinfo->value.integer.min = 0;
2314	uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2315	return 0;
2316}
2317
2318/* get callback for channel map ctl element
2319 * stores the channel position firstly matching with the current channels
2320 */
2321static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2322			     struct snd_ctl_elem_value *ucontrol)
2323{
2324	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2325	unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2326	struct snd_pcm_substream *substream;
2327	const struct snd_pcm_chmap_elem *map;
2328
2329	if (!info->chmap)
2330		return -EINVAL;
2331	substream = snd_pcm_chmap_substream(info, idx);
2332	if (!substream)
2333		return -ENODEV;
2334	memset(ucontrol->value.integer.value, 0,
2335	       sizeof(ucontrol->value.integer.value));
2336	if (!substream->runtime)
2337		return 0; /* no channels set */
2338	for (map = info->chmap; map->channels; map++) {
2339		int i;
2340		if (map->channels == substream->runtime->channels &&
2341		    valid_chmap_channels(info, map->channels)) {
2342			for (i = 0; i < map->channels; i++)
2343				ucontrol->value.integer.value[i] = map->map[i];
2344			return 0;
2345		}
2346	}
2347	return -EINVAL;
2348}
2349
2350/* tlv callback for channel map ctl element
2351 * expands the pre-defined channel maps in a form of TLV
2352 */
2353static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2354			     unsigned int size, unsigned int __user *tlv)
2355{
2356	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2357	const struct snd_pcm_chmap_elem *map;
2358	unsigned int __user *dst;
2359	int c, count = 0;
2360
2361	if (!info->chmap)
2362		return -EINVAL;
2363	if (size < 8)
2364		return -ENOMEM;
2365	if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2366		return -EFAULT;
2367	size -= 8;
2368	dst = tlv + 2;
2369	for (map = info->chmap; map->channels; map++) {
2370		int chs_bytes = map->channels * 4;
2371		if (!valid_chmap_channels(info, map->channels))
2372			continue;
2373		if (size < 8)
2374			return -ENOMEM;
2375		if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2376		    put_user(chs_bytes, dst + 1))
2377			return -EFAULT;
2378		dst += 2;
2379		size -= 8;
2380		count += 8;
2381		if (size < chs_bytes)
2382			return -ENOMEM;
2383		size -= chs_bytes;
2384		count += chs_bytes;
2385		for (c = 0; c < map->channels; c++) {
2386			if (put_user(map->map[c], dst))
2387				return -EFAULT;
2388			dst++;
2389		}
2390	}
2391	if (put_user(count, tlv + 1))
2392		return -EFAULT;
2393	return 0;
2394}
2395
2396static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2397{
2398	struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2399	info->pcm->streams[info->stream].chmap_kctl = NULL;
2400	kfree(info);
2401}
2402
2403/**
2404 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2405 * @pcm: the assigned PCM instance
2406 * @stream: stream direction
2407 * @chmap: channel map elements (for query)
2408 * @max_channels: the max number of channels for the stream
2409 * @private_value: the value passed to each kcontrol's private_value field
2410 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2411 *
2412 * Create channel-mapping control elements assigned to the given PCM stream(s).
2413 * Return: Zero if successful, or a negative error value.
2414 */
2415int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2416			   const struct snd_pcm_chmap_elem *chmap,
2417			   int max_channels,
2418			   unsigned long private_value,
2419			   struct snd_pcm_chmap **info_ret)
2420{
2421	struct snd_pcm_chmap *info;
2422	struct snd_kcontrol_new knew = {
2423		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
2424		.access = SNDRV_CTL_ELEM_ACCESS_READ |
 
2425			SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2426			SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2427		.info = pcm_chmap_ctl_info,
2428		.get = pcm_chmap_ctl_get,
2429		.tlv.c = pcm_chmap_ctl_tlv,
2430	};
2431	int err;
2432
2433	if (WARN_ON(pcm->streams[stream].chmap_kctl))
2434		return -EBUSY;
2435	info = kzalloc(sizeof(*info), GFP_KERNEL);
2436	if (!info)
2437		return -ENOMEM;
2438	info->pcm = pcm;
2439	info->stream = stream;
2440	info->chmap = chmap;
2441	info->max_channels = max_channels;
2442	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2443		knew.name = "Playback Channel Map";
2444	else
2445		knew.name = "Capture Channel Map";
2446	knew.device = pcm->device;
2447	knew.count = pcm->streams[stream].substream_count;
2448	knew.private_value = private_value;
2449	info->kctl = snd_ctl_new1(&knew, info);
2450	if (!info->kctl) {
2451		kfree(info);
2452		return -ENOMEM;
2453	}
2454	info->kctl->private_free = pcm_chmap_ctl_private_free;
2455	err = snd_ctl_add(pcm->card, info->kctl);
2456	if (err < 0)
2457		return err;
2458	pcm->streams[stream].chmap_kctl = info->kctl;
2459	if (info_ret)
2460		*info_ret = info;
2461	return 0;
2462}
2463EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);