Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36/*
37 * fill ring buffer with silence
38 * runtime->silence_start: starting pointer to silence area
39 * runtime->silence_filled: size filled with silence
40 * runtime->silence_threshold: threshold from application
41 * runtime->silence_size: maximal size from application
42 *
43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
44 */
45void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
46{
47 struct snd_pcm_runtime *runtime = substream->runtime;
48 snd_pcm_uframes_t frames, ofs, transfer;
49 int err;
50
51 if (runtime->silence_size < runtime->boundary) {
52 snd_pcm_sframes_t noise_dist, n;
53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
54 if (runtime->silence_start != appl_ptr) {
55 n = appl_ptr - runtime->silence_start;
56 if (n < 0)
57 n += runtime->boundary;
58 if ((snd_pcm_uframes_t)n < runtime->silence_filled)
59 runtime->silence_filled -= n;
60 else
61 runtime->silence_filled = 0;
62 runtime->silence_start = appl_ptr;
63 }
64 if (runtime->silence_filled >= runtime->buffer_size)
65 return;
66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
68 return;
69 frames = runtime->silence_threshold - noise_dist;
70 if (frames > runtime->silence_size)
71 frames = runtime->silence_size;
72 } else {
73 if (new_hw_ptr == ULONG_MAX) { /* initialization */
74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
75 if (avail > runtime->buffer_size)
76 avail = runtime->buffer_size;
77 runtime->silence_filled = avail > 0 ? avail : 0;
78 runtime->silence_start = (runtime->status->hw_ptr +
79 runtime->silence_filled) %
80 runtime->boundary;
81 } else {
82 ofs = runtime->status->hw_ptr;
83 frames = new_hw_ptr - ofs;
84 if ((snd_pcm_sframes_t)frames < 0)
85 frames += runtime->boundary;
86 runtime->silence_filled -= frames;
87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
88 runtime->silence_filled = 0;
89 runtime->silence_start = new_hw_ptr;
90 } else {
91 runtime->silence_start = ofs;
92 }
93 }
94 frames = runtime->buffer_size - runtime->silence_filled;
95 }
96 if (snd_BUG_ON(frames > runtime->buffer_size))
97 return;
98 if (frames == 0)
99 return;
100 ofs = runtime->silence_start % runtime->buffer_size;
101 while (frames > 0) {
102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
103 err = fill_silence_frames(substream, ofs, transfer);
104 snd_BUG_ON(err < 0);
105 runtime->silence_filled += transfer;
106 frames -= transfer;
107 ofs = 0;
108 }
109 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
110}
111
112#ifdef CONFIG_SND_DEBUG
113void snd_pcm_debug_name(struct snd_pcm_substream *substream,
114 char *name, size_t len)
115{
116 snprintf(name, len, "pcmC%dD%d%c:%d",
117 substream->pcm->card->number,
118 substream->pcm->device,
119 substream->stream ? 'c' : 'p',
120 substream->number);
121}
122EXPORT_SYMBOL(snd_pcm_debug_name);
123#endif
124
125#define XRUN_DEBUG_BASIC (1<<0)
126#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
127#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
128
129#ifdef CONFIG_SND_PCM_XRUN_DEBUG
130
131#define xrun_debug(substream, mask) \
132 ((substream)->pstr->xrun_debug & (mask))
133#else
134#define xrun_debug(substream, mask) 0
135#endif
136
137#define dump_stack_on_xrun(substream) do { \
138 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
139 dump_stack(); \
140 } while (0)
141
142/* call with stream lock held */
143void __snd_pcm_xrun(struct snd_pcm_substream *substream)
144{
145 struct snd_pcm_runtime *runtime = substream->runtime;
146
147 trace_xrun(substream);
148 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
149 struct timespec64 tstamp;
150
151 snd_pcm_gettime(runtime, &tstamp);
152 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
153 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
154 }
155 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
156 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
157 char name[16];
158 snd_pcm_debug_name(substream, name, sizeof(name));
159 pcm_warn(substream->pcm, "XRUN: %s\n", name);
160 dump_stack_on_xrun(substream);
161 }
162}
163
164#ifdef CONFIG_SND_PCM_XRUN_DEBUG
165#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
166 do { \
167 trace_hw_ptr_error(substream, reason); \
168 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
169 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
170 (in_interrupt) ? 'Q' : 'P', ##args); \
171 dump_stack_on_xrun(substream); \
172 } \
173 } while (0)
174
175#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
176
177#define hw_ptr_error(substream, fmt, args...) do { } while (0)
178
179#endif
180
181int snd_pcm_update_state(struct snd_pcm_substream *substream,
182 struct snd_pcm_runtime *runtime)
183{
184 snd_pcm_uframes_t avail;
185
186 avail = snd_pcm_avail(substream);
187 if (avail > runtime->avail_max)
188 runtime->avail_max = avail;
189 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
190 if (avail >= runtime->buffer_size) {
191 snd_pcm_drain_done(substream);
192 return -EPIPE;
193 }
194 } else {
195 if (avail >= runtime->stop_threshold) {
196 __snd_pcm_xrun(substream);
197 return -EPIPE;
198 }
199 }
200 if (runtime->twake) {
201 if (avail >= runtime->twake)
202 wake_up(&runtime->tsleep);
203 } else if (avail >= runtime->control->avail_min)
204 wake_up(&runtime->sleep);
205 return 0;
206}
207
208static void update_audio_tstamp(struct snd_pcm_substream *substream,
209 struct timespec64 *curr_tstamp,
210 struct timespec64 *audio_tstamp)
211{
212 struct snd_pcm_runtime *runtime = substream->runtime;
213 u64 audio_frames, audio_nsecs;
214 struct timespec64 driver_tstamp;
215
216 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
217 return;
218
219 if (!(substream->ops->get_time_info) ||
220 (runtime->audio_tstamp_report.actual_type ==
221 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
222
223 /*
224 * provide audio timestamp derived from pointer position
225 * add delay only if requested
226 */
227
228 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
229
230 if (runtime->audio_tstamp_config.report_delay) {
231 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
232 audio_frames -= runtime->delay;
233 else
234 audio_frames += runtime->delay;
235 }
236 audio_nsecs = div_u64(audio_frames * 1000000000LL,
237 runtime->rate);
238 *audio_tstamp = ns_to_timespec64(audio_nsecs);
239 }
240
241 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
242 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
243 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
244 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
245 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
246 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
247 }
248
249
250 /*
251 * re-take a driver timestamp to let apps detect if the reference tstamp
252 * read by low-level hardware was provided with a delay
253 */
254 snd_pcm_gettime(substream->runtime, &driver_tstamp);
255 runtime->driver_tstamp = driver_tstamp;
256}
257
258static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
259 unsigned int in_interrupt)
260{
261 struct snd_pcm_runtime *runtime = substream->runtime;
262 snd_pcm_uframes_t pos;
263 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
264 snd_pcm_sframes_t hdelta, delta;
265 unsigned long jdelta;
266 unsigned long curr_jiffies;
267 struct timespec64 curr_tstamp;
268 struct timespec64 audio_tstamp;
269 int crossed_boundary = 0;
270
271 old_hw_ptr = runtime->status->hw_ptr;
272
273 /*
274 * group pointer, time and jiffies reads to allow for more
275 * accurate correlations/corrections.
276 * The values are stored at the end of this routine after
277 * corrections for hw_ptr position
278 */
279 pos = substream->ops->pointer(substream);
280 curr_jiffies = jiffies;
281 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
282 if ((substream->ops->get_time_info) &&
283 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
284 substream->ops->get_time_info(substream, &curr_tstamp,
285 &audio_tstamp,
286 &runtime->audio_tstamp_config,
287 &runtime->audio_tstamp_report);
288
289 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
290 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
291 snd_pcm_gettime(runtime, &curr_tstamp);
292 } else
293 snd_pcm_gettime(runtime, &curr_tstamp);
294 }
295
296 if (pos == SNDRV_PCM_POS_XRUN) {
297 __snd_pcm_xrun(substream);
298 return -EPIPE;
299 }
300 if (pos >= runtime->buffer_size) {
301 if (printk_ratelimit()) {
302 char name[16];
303 snd_pcm_debug_name(substream, name, sizeof(name));
304 pcm_err(substream->pcm,
305 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
306 name, pos, runtime->buffer_size,
307 runtime->period_size);
308 }
309 pos = 0;
310 }
311 pos -= pos % runtime->min_align;
312 trace_hwptr(substream, pos, in_interrupt);
313 hw_base = runtime->hw_ptr_base;
314 new_hw_ptr = hw_base + pos;
315 if (in_interrupt) {
316 /* we know that one period was processed */
317 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
318 delta = runtime->hw_ptr_interrupt + runtime->period_size;
319 if (delta > new_hw_ptr) {
320 /* check for double acknowledged interrupts */
321 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
322 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
323 hw_base += runtime->buffer_size;
324 if (hw_base >= runtime->boundary) {
325 hw_base = 0;
326 crossed_boundary++;
327 }
328 new_hw_ptr = hw_base + pos;
329 goto __delta;
330 }
331 }
332 }
333 /* new_hw_ptr might be lower than old_hw_ptr in case when */
334 /* pointer crosses the end of the ring buffer */
335 if (new_hw_ptr < old_hw_ptr) {
336 hw_base += runtime->buffer_size;
337 if (hw_base >= runtime->boundary) {
338 hw_base = 0;
339 crossed_boundary++;
340 }
341 new_hw_ptr = hw_base + pos;
342 }
343 __delta:
344 delta = new_hw_ptr - old_hw_ptr;
345 if (delta < 0)
346 delta += runtime->boundary;
347
348 if (runtime->no_period_wakeup) {
349 snd_pcm_sframes_t xrun_threshold;
350 /*
351 * Without regular period interrupts, we have to check
352 * the elapsed time to detect xruns.
353 */
354 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
355 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
356 goto no_delta_check;
357 hdelta = jdelta - delta * HZ / runtime->rate;
358 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
359 while (hdelta > xrun_threshold) {
360 delta += runtime->buffer_size;
361 hw_base += runtime->buffer_size;
362 if (hw_base >= runtime->boundary) {
363 hw_base = 0;
364 crossed_boundary++;
365 }
366 new_hw_ptr = hw_base + pos;
367 hdelta -= runtime->hw_ptr_buffer_jiffies;
368 }
369 goto no_delta_check;
370 }
371
372 /* something must be really wrong */
373 if (delta >= runtime->buffer_size + runtime->period_size) {
374 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
375 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
376 substream->stream, (long)pos,
377 (long)new_hw_ptr, (long)old_hw_ptr);
378 return 0;
379 }
380
381 /* Do jiffies check only in xrun_debug mode */
382 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
383 goto no_jiffies_check;
384
385 /* Skip the jiffies check for hardwares with BATCH flag.
386 * Such hardware usually just increases the position at each IRQ,
387 * thus it can't give any strange position.
388 */
389 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
390 goto no_jiffies_check;
391 hdelta = delta;
392 if (hdelta < runtime->delay)
393 goto no_jiffies_check;
394 hdelta -= runtime->delay;
395 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
396 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
397 delta = jdelta /
398 (((runtime->period_size * HZ) / runtime->rate)
399 + HZ/100);
400 /* move new_hw_ptr according jiffies not pos variable */
401 new_hw_ptr = old_hw_ptr;
402 hw_base = delta;
403 /* use loop to avoid checks for delta overflows */
404 /* the delta value is small or zero in most cases */
405 while (delta > 0) {
406 new_hw_ptr += runtime->period_size;
407 if (new_hw_ptr >= runtime->boundary) {
408 new_hw_ptr -= runtime->boundary;
409 crossed_boundary--;
410 }
411 delta--;
412 }
413 /* align hw_base to buffer_size */
414 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
415 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
416 (long)pos, (long)hdelta,
417 (long)runtime->period_size, jdelta,
418 ((hdelta * HZ) / runtime->rate), hw_base,
419 (unsigned long)old_hw_ptr,
420 (unsigned long)new_hw_ptr);
421 /* reset values to proper state */
422 delta = 0;
423 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
424 }
425 no_jiffies_check:
426 if (delta > runtime->period_size + runtime->period_size / 2) {
427 hw_ptr_error(substream, in_interrupt,
428 "Lost interrupts?",
429 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
430 substream->stream, (long)delta,
431 (long)new_hw_ptr,
432 (long)old_hw_ptr);
433 }
434
435 no_delta_check:
436 if (runtime->status->hw_ptr == new_hw_ptr) {
437 runtime->hw_ptr_jiffies = curr_jiffies;
438 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
439 return 0;
440 }
441
442 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
443 runtime->silence_size > 0)
444 snd_pcm_playback_silence(substream, new_hw_ptr);
445
446 if (in_interrupt) {
447 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
448 if (delta < 0)
449 delta += runtime->boundary;
450 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
451 runtime->hw_ptr_interrupt += delta;
452 if (runtime->hw_ptr_interrupt >= runtime->boundary)
453 runtime->hw_ptr_interrupt -= runtime->boundary;
454 }
455 runtime->hw_ptr_base = hw_base;
456 runtime->status->hw_ptr = new_hw_ptr;
457 runtime->hw_ptr_jiffies = curr_jiffies;
458 if (crossed_boundary) {
459 snd_BUG_ON(crossed_boundary != 1);
460 runtime->hw_ptr_wrap += runtime->boundary;
461 }
462
463 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
464
465 return snd_pcm_update_state(substream, runtime);
466}
467
468/* CAUTION: call it with irq disabled */
469int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
470{
471 return snd_pcm_update_hw_ptr0(substream, 0);
472}
473
474/**
475 * snd_pcm_set_ops - set the PCM operators
476 * @pcm: the pcm instance
477 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
478 * @ops: the operator table
479 *
480 * Sets the given PCM operators to the pcm instance.
481 */
482void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
483 const struct snd_pcm_ops *ops)
484{
485 struct snd_pcm_str *stream = &pcm->streams[direction];
486 struct snd_pcm_substream *substream;
487
488 for (substream = stream->substream; substream != NULL; substream = substream->next)
489 substream->ops = ops;
490}
491EXPORT_SYMBOL(snd_pcm_set_ops);
492
493/**
494 * snd_pcm_set_sync - set the PCM sync id
495 * @substream: the pcm substream
496 *
497 * Sets the PCM sync identifier for the card.
498 */
499void snd_pcm_set_sync(struct snd_pcm_substream *substream)
500{
501 struct snd_pcm_runtime *runtime = substream->runtime;
502
503 runtime->sync.id32[0] = substream->pcm->card->number;
504 runtime->sync.id32[1] = -1;
505 runtime->sync.id32[2] = -1;
506 runtime->sync.id32[3] = -1;
507}
508EXPORT_SYMBOL(snd_pcm_set_sync);
509
510/*
511 * Standard ioctl routine
512 */
513
514static inline unsigned int div32(unsigned int a, unsigned int b,
515 unsigned int *r)
516{
517 if (b == 0) {
518 *r = 0;
519 return UINT_MAX;
520 }
521 *r = a % b;
522 return a / b;
523}
524
525static inline unsigned int div_down(unsigned int a, unsigned int b)
526{
527 if (b == 0)
528 return UINT_MAX;
529 return a / b;
530}
531
532static inline unsigned int div_up(unsigned int a, unsigned int b)
533{
534 unsigned int r;
535 unsigned int q;
536 if (b == 0)
537 return UINT_MAX;
538 q = div32(a, b, &r);
539 if (r)
540 ++q;
541 return q;
542}
543
544static inline unsigned int mul(unsigned int a, unsigned int b)
545{
546 if (a == 0)
547 return 0;
548 if (div_down(UINT_MAX, a) < b)
549 return UINT_MAX;
550 return a * b;
551}
552
553static inline unsigned int muldiv32(unsigned int a, unsigned int b,
554 unsigned int c, unsigned int *r)
555{
556 u_int64_t n = (u_int64_t) a * b;
557 if (c == 0) {
558 *r = 0;
559 return UINT_MAX;
560 }
561 n = div_u64_rem(n, c, r);
562 if (n >= UINT_MAX) {
563 *r = 0;
564 return UINT_MAX;
565 }
566 return n;
567}
568
569/**
570 * snd_interval_refine - refine the interval value of configurator
571 * @i: the interval value to refine
572 * @v: the interval value to refer to
573 *
574 * Refines the interval value with the reference value.
575 * The interval is changed to the range satisfying both intervals.
576 * The interval status (min, max, integer, etc.) are evaluated.
577 *
578 * Return: Positive if the value is changed, zero if it's not changed, or a
579 * negative error code.
580 */
581int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
582{
583 int changed = 0;
584 if (snd_BUG_ON(snd_interval_empty(i)))
585 return -EINVAL;
586 if (i->min < v->min) {
587 i->min = v->min;
588 i->openmin = v->openmin;
589 changed = 1;
590 } else if (i->min == v->min && !i->openmin && v->openmin) {
591 i->openmin = 1;
592 changed = 1;
593 }
594 if (i->max > v->max) {
595 i->max = v->max;
596 i->openmax = v->openmax;
597 changed = 1;
598 } else if (i->max == v->max && !i->openmax && v->openmax) {
599 i->openmax = 1;
600 changed = 1;
601 }
602 if (!i->integer && v->integer) {
603 i->integer = 1;
604 changed = 1;
605 }
606 if (i->integer) {
607 if (i->openmin) {
608 i->min++;
609 i->openmin = 0;
610 }
611 if (i->openmax) {
612 i->max--;
613 i->openmax = 0;
614 }
615 } else if (!i->openmin && !i->openmax && i->min == i->max)
616 i->integer = 1;
617 if (snd_interval_checkempty(i)) {
618 snd_interval_none(i);
619 return -EINVAL;
620 }
621 return changed;
622}
623EXPORT_SYMBOL(snd_interval_refine);
624
625static int snd_interval_refine_first(struct snd_interval *i)
626{
627 const unsigned int last_max = i->max;
628
629 if (snd_BUG_ON(snd_interval_empty(i)))
630 return -EINVAL;
631 if (snd_interval_single(i))
632 return 0;
633 i->max = i->min;
634 if (i->openmin)
635 i->max++;
636 /* only exclude max value if also excluded before refine */
637 i->openmax = (i->openmax && i->max >= last_max);
638 return 1;
639}
640
641static int snd_interval_refine_last(struct snd_interval *i)
642{
643 const unsigned int last_min = i->min;
644
645 if (snd_BUG_ON(snd_interval_empty(i)))
646 return -EINVAL;
647 if (snd_interval_single(i))
648 return 0;
649 i->min = i->max;
650 if (i->openmax)
651 i->min--;
652 /* only exclude min value if also excluded before refine */
653 i->openmin = (i->openmin && i->min <= last_min);
654 return 1;
655}
656
657void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
658{
659 if (a->empty || b->empty) {
660 snd_interval_none(c);
661 return;
662 }
663 c->empty = 0;
664 c->min = mul(a->min, b->min);
665 c->openmin = (a->openmin || b->openmin);
666 c->max = mul(a->max, b->max);
667 c->openmax = (a->openmax || b->openmax);
668 c->integer = (a->integer && b->integer);
669}
670
671/**
672 * snd_interval_div - refine the interval value with division
673 * @a: dividend
674 * @b: divisor
675 * @c: quotient
676 *
677 * c = a / b
678 *
679 * Returns non-zero if the value is changed, zero if not changed.
680 */
681void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
682{
683 unsigned int r;
684 if (a->empty || b->empty) {
685 snd_interval_none(c);
686 return;
687 }
688 c->empty = 0;
689 c->min = div32(a->min, b->max, &r);
690 c->openmin = (r || a->openmin || b->openmax);
691 if (b->min > 0) {
692 c->max = div32(a->max, b->min, &r);
693 if (r) {
694 c->max++;
695 c->openmax = 1;
696 } else
697 c->openmax = (a->openmax || b->openmin);
698 } else {
699 c->max = UINT_MAX;
700 c->openmax = 0;
701 }
702 c->integer = 0;
703}
704
705/**
706 * snd_interval_muldivk - refine the interval value
707 * @a: dividend 1
708 * @b: dividend 2
709 * @k: divisor (as integer)
710 * @c: result
711 *
712 * c = a * b / k
713 *
714 * Returns non-zero if the value is changed, zero if not changed.
715 */
716void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
717 unsigned int k, struct snd_interval *c)
718{
719 unsigned int r;
720 if (a->empty || b->empty) {
721 snd_interval_none(c);
722 return;
723 }
724 c->empty = 0;
725 c->min = muldiv32(a->min, b->min, k, &r);
726 c->openmin = (r || a->openmin || b->openmin);
727 c->max = muldiv32(a->max, b->max, k, &r);
728 if (r) {
729 c->max++;
730 c->openmax = 1;
731 } else
732 c->openmax = (a->openmax || b->openmax);
733 c->integer = 0;
734}
735
736/**
737 * snd_interval_mulkdiv - refine the interval value
738 * @a: dividend 1
739 * @k: dividend 2 (as integer)
740 * @b: divisor
741 * @c: result
742 *
743 * c = a * k / b
744 *
745 * Returns non-zero if the value is changed, zero if not changed.
746 */
747void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
748 const struct snd_interval *b, struct snd_interval *c)
749{
750 unsigned int r;
751 if (a->empty || b->empty) {
752 snd_interval_none(c);
753 return;
754 }
755 c->empty = 0;
756 c->min = muldiv32(a->min, k, b->max, &r);
757 c->openmin = (r || a->openmin || b->openmax);
758 if (b->min > 0) {
759 c->max = muldiv32(a->max, k, b->min, &r);
760 if (r) {
761 c->max++;
762 c->openmax = 1;
763 } else
764 c->openmax = (a->openmax || b->openmin);
765 } else {
766 c->max = UINT_MAX;
767 c->openmax = 0;
768 }
769 c->integer = 0;
770}
771
772/* ---- */
773
774
775/**
776 * snd_interval_ratnum - refine the interval value
777 * @i: interval to refine
778 * @rats_count: number of ratnum_t
779 * @rats: ratnum_t array
780 * @nump: pointer to store the resultant numerator
781 * @denp: pointer to store the resultant denominator
782 *
783 * Return: Positive if the value is changed, zero if it's not changed, or a
784 * negative error code.
785 */
786int snd_interval_ratnum(struct snd_interval *i,
787 unsigned int rats_count, const struct snd_ratnum *rats,
788 unsigned int *nump, unsigned int *denp)
789{
790 unsigned int best_num, best_den;
791 int best_diff;
792 unsigned int k;
793 struct snd_interval t;
794 int err;
795 unsigned int result_num, result_den;
796 int result_diff;
797
798 best_num = best_den = best_diff = 0;
799 for (k = 0; k < rats_count; ++k) {
800 unsigned int num = rats[k].num;
801 unsigned int den;
802 unsigned int q = i->min;
803 int diff;
804 if (q == 0)
805 q = 1;
806 den = div_up(num, q);
807 if (den < rats[k].den_min)
808 continue;
809 if (den > rats[k].den_max)
810 den = rats[k].den_max;
811 else {
812 unsigned int r;
813 r = (den - rats[k].den_min) % rats[k].den_step;
814 if (r != 0)
815 den -= r;
816 }
817 diff = num - q * den;
818 if (diff < 0)
819 diff = -diff;
820 if (best_num == 0 ||
821 diff * best_den < best_diff * den) {
822 best_diff = diff;
823 best_den = den;
824 best_num = num;
825 }
826 }
827 if (best_den == 0) {
828 i->empty = 1;
829 return -EINVAL;
830 }
831 t.min = div_down(best_num, best_den);
832 t.openmin = !!(best_num % best_den);
833
834 result_num = best_num;
835 result_diff = best_diff;
836 result_den = best_den;
837 best_num = best_den = best_diff = 0;
838 for (k = 0; k < rats_count; ++k) {
839 unsigned int num = rats[k].num;
840 unsigned int den;
841 unsigned int q = i->max;
842 int diff;
843 if (q == 0) {
844 i->empty = 1;
845 return -EINVAL;
846 }
847 den = div_down(num, q);
848 if (den > rats[k].den_max)
849 continue;
850 if (den < rats[k].den_min)
851 den = rats[k].den_min;
852 else {
853 unsigned int r;
854 r = (den - rats[k].den_min) % rats[k].den_step;
855 if (r != 0)
856 den += rats[k].den_step - r;
857 }
858 diff = q * den - num;
859 if (diff < 0)
860 diff = -diff;
861 if (best_num == 0 ||
862 diff * best_den < best_diff * den) {
863 best_diff = diff;
864 best_den = den;
865 best_num = num;
866 }
867 }
868 if (best_den == 0) {
869 i->empty = 1;
870 return -EINVAL;
871 }
872 t.max = div_up(best_num, best_den);
873 t.openmax = !!(best_num % best_den);
874 t.integer = 0;
875 err = snd_interval_refine(i, &t);
876 if (err < 0)
877 return err;
878
879 if (snd_interval_single(i)) {
880 if (best_diff * result_den < result_diff * best_den) {
881 result_num = best_num;
882 result_den = best_den;
883 }
884 if (nump)
885 *nump = result_num;
886 if (denp)
887 *denp = result_den;
888 }
889 return err;
890}
891EXPORT_SYMBOL(snd_interval_ratnum);
892
893/**
894 * snd_interval_ratden - refine the interval value
895 * @i: interval to refine
896 * @rats_count: number of struct ratden
897 * @rats: struct ratden array
898 * @nump: pointer to store the resultant numerator
899 * @denp: pointer to store the resultant denominator
900 *
901 * Return: Positive if the value is changed, zero if it's not changed, or a
902 * negative error code.
903 */
904static int snd_interval_ratden(struct snd_interval *i,
905 unsigned int rats_count,
906 const struct snd_ratden *rats,
907 unsigned int *nump, unsigned int *denp)
908{
909 unsigned int best_num, best_diff, best_den;
910 unsigned int k;
911 struct snd_interval t;
912 int err;
913
914 best_num = best_den = best_diff = 0;
915 for (k = 0; k < rats_count; ++k) {
916 unsigned int num;
917 unsigned int den = rats[k].den;
918 unsigned int q = i->min;
919 int diff;
920 num = mul(q, den);
921 if (num > rats[k].num_max)
922 continue;
923 if (num < rats[k].num_min)
924 num = rats[k].num_max;
925 else {
926 unsigned int r;
927 r = (num - rats[k].num_min) % rats[k].num_step;
928 if (r != 0)
929 num += rats[k].num_step - r;
930 }
931 diff = num - q * den;
932 if (best_num == 0 ||
933 diff * best_den < best_diff * den) {
934 best_diff = diff;
935 best_den = den;
936 best_num = num;
937 }
938 }
939 if (best_den == 0) {
940 i->empty = 1;
941 return -EINVAL;
942 }
943 t.min = div_down(best_num, best_den);
944 t.openmin = !!(best_num % best_den);
945
946 best_num = best_den = best_diff = 0;
947 for (k = 0; k < rats_count; ++k) {
948 unsigned int num;
949 unsigned int den = rats[k].den;
950 unsigned int q = i->max;
951 int diff;
952 num = mul(q, den);
953 if (num < rats[k].num_min)
954 continue;
955 if (num > rats[k].num_max)
956 num = rats[k].num_max;
957 else {
958 unsigned int r;
959 r = (num - rats[k].num_min) % rats[k].num_step;
960 if (r != 0)
961 num -= r;
962 }
963 diff = q * den - num;
964 if (best_num == 0 ||
965 diff * best_den < best_diff * den) {
966 best_diff = diff;
967 best_den = den;
968 best_num = num;
969 }
970 }
971 if (best_den == 0) {
972 i->empty = 1;
973 return -EINVAL;
974 }
975 t.max = div_up(best_num, best_den);
976 t.openmax = !!(best_num % best_den);
977 t.integer = 0;
978 err = snd_interval_refine(i, &t);
979 if (err < 0)
980 return err;
981
982 if (snd_interval_single(i)) {
983 if (nump)
984 *nump = best_num;
985 if (denp)
986 *denp = best_den;
987 }
988 return err;
989}
990
991/**
992 * snd_interval_list - refine the interval value from the list
993 * @i: the interval value to refine
994 * @count: the number of elements in the list
995 * @list: the value list
996 * @mask: the bit-mask to evaluate
997 *
998 * Refines the interval value from the list.
999 * When mask is non-zero, only the elements corresponding to bit 1 are
1000 * evaluated.
1001 *
1002 * Return: Positive if the value is changed, zero if it's not changed, or a
1003 * negative error code.
1004 */
1005int snd_interval_list(struct snd_interval *i, unsigned int count,
1006 const unsigned int *list, unsigned int mask)
1007{
1008 unsigned int k;
1009 struct snd_interval list_range;
1010
1011 if (!count) {
1012 i->empty = 1;
1013 return -EINVAL;
1014 }
1015 snd_interval_any(&list_range);
1016 list_range.min = UINT_MAX;
1017 list_range.max = 0;
1018 for (k = 0; k < count; k++) {
1019 if (mask && !(mask & (1 << k)))
1020 continue;
1021 if (!snd_interval_test(i, list[k]))
1022 continue;
1023 list_range.min = min(list_range.min, list[k]);
1024 list_range.max = max(list_range.max, list[k]);
1025 }
1026 return snd_interval_refine(i, &list_range);
1027}
1028EXPORT_SYMBOL(snd_interval_list);
1029
1030/**
1031 * snd_interval_ranges - refine the interval value from the list of ranges
1032 * @i: the interval value to refine
1033 * @count: the number of elements in the list of ranges
1034 * @ranges: the ranges list
1035 * @mask: the bit-mask to evaluate
1036 *
1037 * Refines the interval value from the list of ranges.
1038 * When mask is non-zero, only the elements corresponding to bit 1 are
1039 * evaluated.
1040 *
1041 * Return: Positive if the value is changed, zero if it's not changed, or a
1042 * negative error code.
1043 */
1044int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1045 const struct snd_interval *ranges, unsigned int mask)
1046{
1047 unsigned int k;
1048 struct snd_interval range_union;
1049 struct snd_interval range;
1050
1051 if (!count) {
1052 snd_interval_none(i);
1053 return -EINVAL;
1054 }
1055 snd_interval_any(&range_union);
1056 range_union.min = UINT_MAX;
1057 range_union.max = 0;
1058 for (k = 0; k < count; k++) {
1059 if (mask && !(mask & (1 << k)))
1060 continue;
1061 snd_interval_copy(&range, &ranges[k]);
1062 if (snd_interval_refine(&range, i) < 0)
1063 continue;
1064 if (snd_interval_empty(&range))
1065 continue;
1066
1067 if (range.min < range_union.min) {
1068 range_union.min = range.min;
1069 range_union.openmin = 1;
1070 }
1071 if (range.min == range_union.min && !range.openmin)
1072 range_union.openmin = 0;
1073 if (range.max > range_union.max) {
1074 range_union.max = range.max;
1075 range_union.openmax = 1;
1076 }
1077 if (range.max == range_union.max && !range.openmax)
1078 range_union.openmax = 0;
1079 }
1080 return snd_interval_refine(i, &range_union);
1081}
1082EXPORT_SYMBOL(snd_interval_ranges);
1083
1084static int snd_interval_step(struct snd_interval *i, unsigned int step)
1085{
1086 unsigned int n;
1087 int changed = 0;
1088 n = i->min % step;
1089 if (n != 0 || i->openmin) {
1090 i->min += step - n;
1091 i->openmin = 0;
1092 changed = 1;
1093 }
1094 n = i->max % step;
1095 if (n != 0 || i->openmax) {
1096 i->max -= n;
1097 i->openmax = 0;
1098 changed = 1;
1099 }
1100 if (snd_interval_checkempty(i)) {
1101 i->empty = 1;
1102 return -EINVAL;
1103 }
1104 return changed;
1105}
1106
1107/* Info constraints helpers */
1108
1109/**
1110 * snd_pcm_hw_rule_add - add the hw-constraint rule
1111 * @runtime: the pcm runtime instance
1112 * @cond: condition bits
1113 * @var: the variable to evaluate
1114 * @func: the evaluation function
1115 * @private: the private data pointer passed to function
1116 * @dep: the dependent variables
1117 *
1118 * Return: Zero if successful, or a negative error code on failure.
1119 */
1120int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1121 int var,
1122 snd_pcm_hw_rule_func_t func, void *private,
1123 int dep, ...)
1124{
1125 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1126 struct snd_pcm_hw_rule *c;
1127 unsigned int k;
1128 va_list args;
1129 va_start(args, dep);
1130 if (constrs->rules_num >= constrs->rules_all) {
1131 struct snd_pcm_hw_rule *new;
1132 unsigned int new_rules = constrs->rules_all + 16;
1133 new = krealloc_array(constrs->rules, new_rules,
1134 sizeof(*c), GFP_KERNEL);
1135 if (!new) {
1136 va_end(args);
1137 return -ENOMEM;
1138 }
1139 constrs->rules = new;
1140 constrs->rules_all = new_rules;
1141 }
1142 c = &constrs->rules[constrs->rules_num];
1143 c->cond = cond;
1144 c->func = func;
1145 c->var = var;
1146 c->private = private;
1147 k = 0;
1148 while (1) {
1149 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1150 va_end(args);
1151 return -EINVAL;
1152 }
1153 c->deps[k++] = dep;
1154 if (dep < 0)
1155 break;
1156 dep = va_arg(args, int);
1157 }
1158 constrs->rules_num++;
1159 va_end(args);
1160 return 0;
1161}
1162EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1163
1164/**
1165 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1166 * @runtime: PCM runtime instance
1167 * @var: hw_params variable to apply the mask
1168 * @mask: the bitmap mask
1169 *
1170 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1171 *
1172 * Return: Zero if successful, or a negative error code on failure.
1173 */
1174int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1175 u_int32_t mask)
1176{
1177 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1178 struct snd_mask *maskp = constrs_mask(constrs, var);
1179 *maskp->bits &= mask;
1180 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1181 if (*maskp->bits == 0)
1182 return -EINVAL;
1183 return 0;
1184}
1185
1186/**
1187 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1188 * @runtime: PCM runtime instance
1189 * @var: hw_params variable to apply the mask
1190 * @mask: the 64bit bitmap mask
1191 *
1192 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1193 *
1194 * Return: Zero if successful, or a negative error code on failure.
1195 */
1196int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1197 u_int64_t mask)
1198{
1199 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1200 struct snd_mask *maskp = constrs_mask(constrs, var);
1201 maskp->bits[0] &= (u_int32_t)mask;
1202 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1203 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1204 if (! maskp->bits[0] && ! maskp->bits[1])
1205 return -EINVAL;
1206 return 0;
1207}
1208EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1209
1210/**
1211 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1212 * @runtime: PCM runtime instance
1213 * @var: hw_params variable to apply the integer constraint
1214 *
1215 * Apply the constraint of integer to an interval parameter.
1216 *
1217 * Return: Positive if the value is changed, zero if it's not changed, or a
1218 * negative error code.
1219 */
1220int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1221{
1222 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 return snd_interval_setinteger(constrs_interval(constrs, var));
1224}
1225EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1226
1227/**
1228 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1229 * @runtime: PCM runtime instance
1230 * @var: hw_params variable to apply the range
1231 * @min: the minimal value
1232 * @max: the maximal value
1233 *
1234 * Apply the min/max range constraint to an interval parameter.
1235 *
1236 * Return: Positive if the value is changed, zero if it's not changed, or a
1237 * negative error code.
1238 */
1239int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1240 unsigned int min, unsigned int max)
1241{
1242 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1243 struct snd_interval t;
1244 t.min = min;
1245 t.max = max;
1246 t.openmin = t.openmax = 0;
1247 t.integer = 0;
1248 return snd_interval_refine(constrs_interval(constrs, var), &t);
1249}
1250EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1251
1252static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1253 struct snd_pcm_hw_rule *rule)
1254{
1255 struct snd_pcm_hw_constraint_list *list = rule->private;
1256 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1257}
1258
1259
1260/**
1261 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1262 * @runtime: PCM runtime instance
1263 * @cond: condition bits
1264 * @var: hw_params variable to apply the list constraint
1265 * @l: list
1266 *
1267 * Apply the list of constraints to an interval parameter.
1268 *
1269 * Return: Zero if successful, or a negative error code on failure.
1270 */
1271int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1272 unsigned int cond,
1273 snd_pcm_hw_param_t var,
1274 const struct snd_pcm_hw_constraint_list *l)
1275{
1276 return snd_pcm_hw_rule_add(runtime, cond, var,
1277 snd_pcm_hw_rule_list, (void *)l,
1278 var, -1);
1279}
1280EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1281
1282static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1283 struct snd_pcm_hw_rule *rule)
1284{
1285 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1286 return snd_interval_ranges(hw_param_interval(params, rule->var),
1287 r->count, r->ranges, r->mask);
1288}
1289
1290
1291/**
1292 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1293 * @runtime: PCM runtime instance
1294 * @cond: condition bits
1295 * @var: hw_params variable to apply the list of range constraints
1296 * @r: ranges
1297 *
1298 * Apply the list of range constraints to an interval parameter.
1299 *
1300 * Return: Zero if successful, or a negative error code on failure.
1301 */
1302int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1303 unsigned int cond,
1304 snd_pcm_hw_param_t var,
1305 const struct snd_pcm_hw_constraint_ranges *r)
1306{
1307 return snd_pcm_hw_rule_add(runtime, cond, var,
1308 snd_pcm_hw_rule_ranges, (void *)r,
1309 var, -1);
1310}
1311EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1312
1313static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1314 struct snd_pcm_hw_rule *rule)
1315{
1316 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1317 unsigned int num = 0, den = 0;
1318 int err;
1319 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1320 r->nrats, r->rats, &num, &den);
1321 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1322 params->rate_num = num;
1323 params->rate_den = den;
1324 }
1325 return err;
1326}
1327
1328/**
1329 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1330 * @runtime: PCM runtime instance
1331 * @cond: condition bits
1332 * @var: hw_params variable to apply the ratnums constraint
1333 * @r: struct snd_ratnums constriants
1334 *
1335 * Return: Zero if successful, or a negative error code on failure.
1336 */
1337int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1338 unsigned int cond,
1339 snd_pcm_hw_param_t var,
1340 const struct snd_pcm_hw_constraint_ratnums *r)
1341{
1342 return snd_pcm_hw_rule_add(runtime, cond, var,
1343 snd_pcm_hw_rule_ratnums, (void *)r,
1344 var, -1);
1345}
1346EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1347
1348static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1349 struct snd_pcm_hw_rule *rule)
1350{
1351 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1352 unsigned int num = 0, den = 0;
1353 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1354 r->nrats, r->rats, &num, &den);
1355 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1356 params->rate_num = num;
1357 params->rate_den = den;
1358 }
1359 return err;
1360}
1361
1362/**
1363 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1364 * @runtime: PCM runtime instance
1365 * @cond: condition bits
1366 * @var: hw_params variable to apply the ratdens constraint
1367 * @r: struct snd_ratdens constriants
1368 *
1369 * Return: Zero if successful, or a negative error code on failure.
1370 */
1371int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1372 unsigned int cond,
1373 snd_pcm_hw_param_t var,
1374 const struct snd_pcm_hw_constraint_ratdens *r)
1375{
1376 return snd_pcm_hw_rule_add(runtime, cond, var,
1377 snd_pcm_hw_rule_ratdens, (void *)r,
1378 var, -1);
1379}
1380EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1381
1382static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1383 struct snd_pcm_hw_rule *rule)
1384{
1385 unsigned int l = (unsigned long) rule->private;
1386 int width = l & 0xffff;
1387 unsigned int msbits = l >> 16;
1388 const struct snd_interval *i =
1389 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1390
1391 if (!snd_interval_single(i))
1392 return 0;
1393
1394 if ((snd_interval_value(i) == width) ||
1395 (width == 0 && snd_interval_value(i) > msbits))
1396 params->msbits = min_not_zero(params->msbits, msbits);
1397
1398 return 0;
1399}
1400
1401/**
1402 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1403 * @runtime: PCM runtime instance
1404 * @cond: condition bits
1405 * @width: sample bits width
1406 * @msbits: msbits width
1407 *
1408 * This constraint will set the number of most significant bits (msbits) if a
1409 * sample format with the specified width has been select. If width is set to 0
1410 * the msbits will be set for any sample format with a width larger than the
1411 * specified msbits.
1412 *
1413 * Return: Zero if successful, or a negative error code on failure.
1414 */
1415int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1416 unsigned int cond,
1417 unsigned int width,
1418 unsigned int msbits)
1419{
1420 unsigned long l = (msbits << 16) | width;
1421 return snd_pcm_hw_rule_add(runtime, cond, -1,
1422 snd_pcm_hw_rule_msbits,
1423 (void*) l,
1424 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1425}
1426EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1427
1428static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1429 struct snd_pcm_hw_rule *rule)
1430{
1431 unsigned long step = (unsigned long) rule->private;
1432 return snd_interval_step(hw_param_interval(params, rule->var), step);
1433}
1434
1435/**
1436 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1437 * @runtime: PCM runtime instance
1438 * @cond: condition bits
1439 * @var: hw_params variable to apply the step constraint
1440 * @step: step size
1441 *
1442 * Return: Zero if successful, or a negative error code on failure.
1443 */
1444int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1445 unsigned int cond,
1446 snd_pcm_hw_param_t var,
1447 unsigned long step)
1448{
1449 return snd_pcm_hw_rule_add(runtime, cond, var,
1450 snd_pcm_hw_rule_step, (void *) step,
1451 var, -1);
1452}
1453EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1454
1455static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1456{
1457 static const unsigned int pow2_sizes[] = {
1458 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1459 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1460 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1461 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1462 };
1463 return snd_interval_list(hw_param_interval(params, rule->var),
1464 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1465}
1466
1467/**
1468 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1469 * @runtime: PCM runtime instance
1470 * @cond: condition bits
1471 * @var: hw_params variable to apply the power-of-2 constraint
1472 *
1473 * Return: Zero if successful, or a negative error code on failure.
1474 */
1475int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1476 unsigned int cond,
1477 snd_pcm_hw_param_t var)
1478{
1479 return snd_pcm_hw_rule_add(runtime, cond, var,
1480 snd_pcm_hw_rule_pow2, NULL,
1481 var, -1);
1482}
1483EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1484
1485static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1486 struct snd_pcm_hw_rule *rule)
1487{
1488 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1489 struct snd_interval *rate;
1490
1491 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1492 return snd_interval_list(rate, 1, &base_rate, 0);
1493}
1494
1495/**
1496 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1497 * @runtime: PCM runtime instance
1498 * @base_rate: the rate at which the hardware does not resample
1499 *
1500 * Return: Zero if successful, or a negative error code on failure.
1501 */
1502int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1503 unsigned int base_rate)
1504{
1505 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1506 SNDRV_PCM_HW_PARAM_RATE,
1507 snd_pcm_hw_rule_noresample_func,
1508 (void *)(uintptr_t)base_rate,
1509 SNDRV_PCM_HW_PARAM_RATE, -1);
1510}
1511EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1512
1513static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1514 snd_pcm_hw_param_t var)
1515{
1516 if (hw_is_mask(var)) {
1517 snd_mask_any(hw_param_mask(params, var));
1518 params->cmask |= 1 << var;
1519 params->rmask |= 1 << var;
1520 return;
1521 }
1522 if (hw_is_interval(var)) {
1523 snd_interval_any(hw_param_interval(params, var));
1524 params->cmask |= 1 << var;
1525 params->rmask |= 1 << var;
1526 return;
1527 }
1528 snd_BUG();
1529}
1530
1531void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1532{
1533 unsigned int k;
1534 memset(params, 0, sizeof(*params));
1535 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1536 _snd_pcm_hw_param_any(params, k);
1537 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1538 _snd_pcm_hw_param_any(params, k);
1539 params->info = ~0U;
1540}
1541EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1542
1543/**
1544 * snd_pcm_hw_param_value - return @params field @var value
1545 * @params: the hw_params instance
1546 * @var: parameter to retrieve
1547 * @dir: pointer to the direction (-1,0,1) or %NULL
1548 *
1549 * Return: The value for field @var if it's fixed in configuration space
1550 * defined by @params. -%EINVAL otherwise.
1551 */
1552int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1553 snd_pcm_hw_param_t var, int *dir)
1554{
1555 if (hw_is_mask(var)) {
1556 const struct snd_mask *mask = hw_param_mask_c(params, var);
1557 if (!snd_mask_single(mask))
1558 return -EINVAL;
1559 if (dir)
1560 *dir = 0;
1561 return snd_mask_value(mask);
1562 }
1563 if (hw_is_interval(var)) {
1564 const struct snd_interval *i = hw_param_interval_c(params, var);
1565 if (!snd_interval_single(i))
1566 return -EINVAL;
1567 if (dir)
1568 *dir = i->openmin;
1569 return snd_interval_value(i);
1570 }
1571 return -EINVAL;
1572}
1573EXPORT_SYMBOL(snd_pcm_hw_param_value);
1574
1575void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1576 snd_pcm_hw_param_t var)
1577{
1578 if (hw_is_mask(var)) {
1579 snd_mask_none(hw_param_mask(params, var));
1580 params->cmask |= 1 << var;
1581 params->rmask |= 1 << var;
1582 } else if (hw_is_interval(var)) {
1583 snd_interval_none(hw_param_interval(params, var));
1584 params->cmask |= 1 << var;
1585 params->rmask |= 1 << var;
1586 } else {
1587 snd_BUG();
1588 }
1589}
1590EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1591
1592static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1593 snd_pcm_hw_param_t var)
1594{
1595 int changed;
1596 if (hw_is_mask(var))
1597 changed = snd_mask_refine_first(hw_param_mask(params, var));
1598 else if (hw_is_interval(var))
1599 changed = snd_interval_refine_first(hw_param_interval(params, var));
1600 else
1601 return -EINVAL;
1602 if (changed > 0) {
1603 params->cmask |= 1 << var;
1604 params->rmask |= 1 << var;
1605 }
1606 return changed;
1607}
1608
1609
1610/**
1611 * snd_pcm_hw_param_first - refine config space and return minimum value
1612 * @pcm: PCM instance
1613 * @params: the hw_params instance
1614 * @var: parameter to retrieve
1615 * @dir: pointer to the direction (-1,0,1) or %NULL
1616 *
1617 * Inside configuration space defined by @params remove from @var all
1618 * values > minimum. Reduce configuration space accordingly.
1619 *
1620 * Return: The minimum, or a negative error code on failure.
1621 */
1622int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1623 struct snd_pcm_hw_params *params,
1624 snd_pcm_hw_param_t var, int *dir)
1625{
1626 int changed = _snd_pcm_hw_param_first(params, var);
1627 if (changed < 0)
1628 return changed;
1629 if (params->rmask) {
1630 int err = snd_pcm_hw_refine(pcm, params);
1631 if (err < 0)
1632 return err;
1633 }
1634 return snd_pcm_hw_param_value(params, var, dir);
1635}
1636EXPORT_SYMBOL(snd_pcm_hw_param_first);
1637
1638static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1639 snd_pcm_hw_param_t var)
1640{
1641 int changed;
1642 if (hw_is_mask(var))
1643 changed = snd_mask_refine_last(hw_param_mask(params, var));
1644 else if (hw_is_interval(var))
1645 changed = snd_interval_refine_last(hw_param_interval(params, var));
1646 else
1647 return -EINVAL;
1648 if (changed > 0) {
1649 params->cmask |= 1 << var;
1650 params->rmask |= 1 << var;
1651 }
1652 return changed;
1653}
1654
1655
1656/**
1657 * snd_pcm_hw_param_last - refine config space and return maximum value
1658 * @pcm: PCM instance
1659 * @params: the hw_params instance
1660 * @var: parameter to retrieve
1661 * @dir: pointer to the direction (-1,0,1) or %NULL
1662 *
1663 * Inside configuration space defined by @params remove from @var all
1664 * values < maximum. Reduce configuration space accordingly.
1665 *
1666 * Return: The maximum, or a negative error code on failure.
1667 */
1668int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1669 struct snd_pcm_hw_params *params,
1670 snd_pcm_hw_param_t var, int *dir)
1671{
1672 int changed = _snd_pcm_hw_param_last(params, var);
1673 if (changed < 0)
1674 return changed;
1675 if (params->rmask) {
1676 int err = snd_pcm_hw_refine(pcm, params);
1677 if (err < 0)
1678 return err;
1679 }
1680 return snd_pcm_hw_param_value(params, var, dir);
1681}
1682EXPORT_SYMBOL(snd_pcm_hw_param_last);
1683
1684static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1685 void *arg)
1686{
1687 struct snd_pcm_runtime *runtime = substream->runtime;
1688 unsigned long flags;
1689 snd_pcm_stream_lock_irqsave(substream, flags);
1690 if (snd_pcm_running(substream) &&
1691 snd_pcm_update_hw_ptr(substream) >= 0)
1692 runtime->status->hw_ptr %= runtime->buffer_size;
1693 else {
1694 runtime->status->hw_ptr = 0;
1695 runtime->hw_ptr_wrap = 0;
1696 }
1697 snd_pcm_stream_unlock_irqrestore(substream, flags);
1698 return 0;
1699}
1700
1701static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1702 void *arg)
1703{
1704 struct snd_pcm_channel_info *info = arg;
1705 struct snd_pcm_runtime *runtime = substream->runtime;
1706 int width;
1707 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1708 info->offset = -1;
1709 return 0;
1710 }
1711 width = snd_pcm_format_physical_width(runtime->format);
1712 if (width < 0)
1713 return width;
1714 info->offset = 0;
1715 switch (runtime->access) {
1716 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1717 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1718 info->first = info->channel * width;
1719 info->step = runtime->channels * width;
1720 break;
1721 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1722 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1723 {
1724 size_t size = runtime->dma_bytes / runtime->channels;
1725 info->first = info->channel * size * 8;
1726 info->step = width;
1727 break;
1728 }
1729 default:
1730 snd_BUG();
1731 break;
1732 }
1733 return 0;
1734}
1735
1736static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1737 void *arg)
1738{
1739 struct snd_pcm_hw_params *params = arg;
1740 snd_pcm_format_t format;
1741 int channels;
1742 ssize_t frame_size;
1743
1744 params->fifo_size = substream->runtime->hw.fifo_size;
1745 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1746 format = params_format(params);
1747 channels = params_channels(params);
1748 frame_size = snd_pcm_format_size(format, channels);
1749 if (frame_size > 0)
1750 params->fifo_size /= frame_size;
1751 }
1752 return 0;
1753}
1754
1755/**
1756 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1757 * @substream: the pcm substream instance
1758 * @cmd: ioctl command
1759 * @arg: ioctl argument
1760 *
1761 * Processes the generic ioctl commands for PCM.
1762 * Can be passed as the ioctl callback for PCM ops.
1763 *
1764 * Return: Zero if successful, or a negative error code on failure.
1765 */
1766int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1767 unsigned int cmd, void *arg)
1768{
1769 switch (cmd) {
1770 case SNDRV_PCM_IOCTL1_RESET:
1771 return snd_pcm_lib_ioctl_reset(substream, arg);
1772 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1773 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1774 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1775 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1776 }
1777 return -ENXIO;
1778}
1779EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1780
1781/**
1782 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1783 * under acquired lock of PCM substream.
1784 * @substream: the instance of pcm substream.
1785 *
1786 * This function is called when the batch of audio data frames as the same size as the period of
1787 * buffer is already processed in audio data transmission.
1788 *
1789 * The call of function updates the status of runtime with the latest position of audio data
1790 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1791 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1792 * substream according to configured threshold.
1793 *
1794 * The function is intended to use for the case that PCM driver operates audio data frames under
1795 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1796 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1797 * since lock of PCM substream should be acquired in advance.
1798 *
1799 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1800 * function:
1801 *
1802 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1803 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1804 * - .get_time_info - to retrieve audio time stamp if needed.
1805 *
1806 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1807 */
1808void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1809{
1810 struct snd_pcm_runtime *runtime;
1811
1812 if (PCM_RUNTIME_CHECK(substream))
1813 return;
1814 runtime = substream->runtime;
1815
1816 if (!snd_pcm_running(substream) ||
1817 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1818 goto _end;
1819
1820#ifdef CONFIG_SND_PCM_TIMER
1821 if (substream->timer_running)
1822 snd_timer_interrupt(substream->timer, 1);
1823#endif
1824 _end:
1825 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1826}
1827EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1828
1829/**
1830 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1831 * PCM substream.
1832 * @substream: the instance of PCM substream.
1833 *
1834 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1835 * acquiring lock of PCM substream voluntarily.
1836 *
1837 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1838 * the batch of audio data frames as the same size as the period of buffer is already processed in
1839 * audio data transmission.
1840 */
1841void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1842{
1843 unsigned long flags;
1844
1845 if (snd_BUG_ON(!substream))
1846 return;
1847
1848 snd_pcm_stream_lock_irqsave(substream, flags);
1849 snd_pcm_period_elapsed_under_stream_lock(substream);
1850 snd_pcm_stream_unlock_irqrestore(substream, flags);
1851}
1852EXPORT_SYMBOL(snd_pcm_period_elapsed);
1853
1854/*
1855 * Wait until avail_min data becomes available
1856 * Returns a negative error code if any error occurs during operation.
1857 * The available space is stored on availp. When err = 0 and avail = 0
1858 * on the capture stream, it indicates the stream is in DRAINING state.
1859 */
1860static int wait_for_avail(struct snd_pcm_substream *substream,
1861 snd_pcm_uframes_t *availp)
1862{
1863 struct snd_pcm_runtime *runtime = substream->runtime;
1864 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1865 wait_queue_entry_t wait;
1866 int err = 0;
1867 snd_pcm_uframes_t avail = 0;
1868 long wait_time, tout;
1869
1870 init_waitqueue_entry(&wait, current);
1871 set_current_state(TASK_INTERRUPTIBLE);
1872 add_wait_queue(&runtime->tsleep, &wait);
1873
1874 if (runtime->no_period_wakeup)
1875 wait_time = MAX_SCHEDULE_TIMEOUT;
1876 else {
1877 /* use wait time from substream if available */
1878 if (substream->wait_time) {
1879 wait_time = substream->wait_time;
1880 } else {
1881 wait_time = 10;
1882
1883 if (runtime->rate) {
1884 long t = runtime->period_size * 2 /
1885 runtime->rate;
1886 wait_time = max(t, wait_time);
1887 }
1888 wait_time = msecs_to_jiffies(wait_time * 1000);
1889 }
1890 }
1891
1892 for (;;) {
1893 if (signal_pending(current)) {
1894 err = -ERESTARTSYS;
1895 break;
1896 }
1897
1898 /*
1899 * We need to check if space became available already
1900 * (and thus the wakeup happened already) first to close
1901 * the race of space already having become available.
1902 * This check must happen after been added to the waitqueue
1903 * and having current state be INTERRUPTIBLE.
1904 */
1905 avail = snd_pcm_avail(substream);
1906 if (avail >= runtime->twake)
1907 break;
1908 snd_pcm_stream_unlock_irq(substream);
1909
1910 tout = schedule_timeout(wait_time);
1911
1912 snd_pcm_stream_lock_irq(substream);
1913 set_current_state(TASK_INTERRUPTIBLE);
1914 switch (runtime->state) {
1915 case SNDRV_PCM_STATE_SUSPENDED:
1916 err = -ESTRPIPE;
1917 goto _endloop;
1918 case SNDRV_PCM_STATE_XRUN:
1919 err = -EPIPE;
1920 goto _endloop;
1921 case SNDRV_PCM_STATE_DRAINING:
1922 if (is_playback)
1923 err = -EPIPE;
1924 else
1925 avail = 0; /* indicate draining */
1926 goto _endloop;
1927 case SNDRV_PCM_STATE_OPEN:
1928 case SNDRV_PCM_STATE_SETUP:
1929 case SNDRV_PCM_STATE_DISCONNECTED:
1930 err = -EBADFD;
1931 goto _endloop;
1932 case SNDRV_PCM_STATE_PAUSED:
1933 continue;
1934 }
1935 if (!tout) {
1936 pcm_dbg(substream->pcm,
1937 "%s write error (DMA or IRQ trouble?)\n",
1938 is_playback ? "playback" : "capture");
1939 err = -EIO;
1940 break;
1941 }
1942 }
1943 _endloop:
1944 set_current_state(TASK_RUNNING);
1945 remove_wait_queue(&runtime->tsleep, &wait);
1946 *availp = avail;
1947 return err;
1948}
1949
1950typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1951 int channel, unsigned long hwoff,
1952 void *buf, unsigned long bytes);
1953
1954typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1955 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1956
1957/* calculate the target DMA-buffer position to be written/read */
1958static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1959 int channel, unsigned long hwoff)
1960{
1961 return runtime->dma_area + hwoff +
1962 channel * (runtime->dma_bytes / runtime->channels);
1963}
1964
1965/* default copy_user ops for write; used for both interleaved and non- modes */
1966static int default_write_copy(struct snd_pcm_substream *substream,
1967 int channel, unsigned long hwoff,
1968 void *buf, unsigned long bytes)
1969{
1970 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1971 (void __user *)buf, bytes))
1972 return -EFAULT;
1973 return 0;
1974}
1975
1976/* default copy_kernel ops for write */
1977static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1978 int channel, unsigned long hwoff,
1979 void *buf, unsigned long bytes)
1980{
1981 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1982 return 0;
1983}
1984
1985/* fill silence instead of copy data; called as a transfer helper
1986 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1987 * a NULL buffer is passed
1988 */
1989static int fill_silence(struct snd_pcm_substream *substream, int channel,
1990 unsigned long hwoff, void *buf, unsigned long bytes)
1991{
1992 struct snd_pcm_runtime *runtime = substream->runtime;
1993
1994 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1995 return 0;
1996 if (substream->ops->fill_silence)
1997 return substream->ops->fill_silence(substream, channel,
1998 hwoff, bytes);
1999
2000 snd_pcm_format_set_silence(runtime->format,
2001 get_dma_ptr(runtime, channel, hwoff),
2002 bytes_to_samples(runtime, bytes));
2003 return 0;
2004}
2005
2006/* default copy_user ops for read; used for both interleaved and non- modes */
2007static int default_read_copy(struct snd_pcm_substream *substream,
2008 int channel, unsigned long hwoff,
2009 void *buf, unsigned long bytes)
2010{
2011 if (copy_to_user((void __user *)buf,
2012 get_dma_ptr(substream->runtime, channel, hwoff),
2013 bytes))
2014 return -EFAULT;
2015 return 0;
2016}
2017
2018/* default copy_kernel ops for read */
2019static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2020 int channel, unsigned long hwoff,
2021 void *buf, unsigned long bytes)
2022{
2023 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2024 return 0;
2025}
2026
2027/* call transfer function with the converted pointers and sizes;
2028 * for interleaved mode, it's one shot for all samples
2029 */
2030static int interleaved_copy(struct snd_pcm_substream *substream,
2031 snd_pcm_uframes_t hwoff, void *data,
2032 snd_pcm_uframes_t off,
2033 snd_pcm_uframes_t frames,
2034 pcm_transfer_f transfer)
2035{
2036 struct snd_pcm_runtime *runtime = substream->runtime;
2037
2038 /* convert to bytes */
2039 hwoff = frames_to_bytes(runtime, hwoff);
2040 off = frames_to_bytes(runtime, off);
2041 frames = frames_to_bytes(runtime, frames);
2042 return transfer(substream, 0, hwoff, data + off, frames);
2043}
2044
2045/* call transfer function with the converted pointers and sizes for each
2046 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2047 */
2048static int noninterleaved_copy(struct snd_pcm_substream *substream,
2049 snd_pcm_uframes_t hwoff, void *data,
2050 snd_pcm_uframes_t off,
2051 snd_pcm_uframes_t frames,
2052 pcm_transfer_f transfer)
2053{
2054 struct snd_pcm_runtime *runtime = substream->runtime;
2055 int channels = runtime->channels;
2056 void **bufs = data;
2057 int c, err;
2058
2059 /* convert to bytes; note that it's not frames_to_bytes() here.
2060 * in non-interleaved mode, we copy for each channel, thus
2061 * each copy is n_samples bytes x channels = whole frames.
2062 */
2063 off = samples_to_bytes(runtime, off);
2064 frames = samples_to_bytes(runtime, frames);
2065 hwoff = samples_to_bytes(runtime, hwoff);
2066 for (c = 0; c < channels; ++c, ++bufs) {
2067 if (!data || !*bufs)
2068 err = fill_silence(substream, c, hwoff, NULL, frames);
2069 else
2070 err = transfer(substream, c, hwoff, *bufs + off,
2071 frames);
2072 if (err < 0)
2073 return err;
2074 }
2075 return 0;
2076}
2077
2078/* fill silence on the given buffer position;
2079 * called from snd_pcm_playback_silence()
2080 */
2081static int fill_silence_frames(struct snd_pcm_substream *substream,
2082 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2083{
2084 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2085 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2086 return interleaved_copy(substream, off, NULL, 0, frames,
2087 fill_silence);
2088 else
2089 return noninterleaved_copy(substream, off, NULL, 0, frames,
2090 fill_silence);
2091}
2092
2093/* sanity-check for read/write methods */
2094static int pcm_sanity_check(struct snd_pcm_substream *substream)
2095{
2096 struct snd_pcm_runtime *runtime;
2097 if (PCM_RUNTIME_CHECK(substream))
2098 return -ENXIO;
2099 runtime = substream->runtime;
2100 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2101 return -EINVAL;
2102 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2103 return -EBADFD;
2104 return 0;
2105}
2106
2107static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2108{
2109 switch (runtime->state) {
2110 case SNDRV_PCM_STATE_PREPARED:
2111 case SNDRV_PCM_STATE_RUNNING:
2112 case SNDRV_PCM_STATE_PAUSED:
2113 return 0;
2114 case SNDRV_PCM_STATE_XRUN:
2115 return -EPIPE;
2116 case SNDRV_PCM_STATE_SUSPENDED:
2117 return -ESTRPIPE;
2118 default:
2119 return -EBADFD;
2120 }
2121}
2122
2123/* update to the given appl_ptr and call ack callback if needed;
2124 * when an error is returned, take back to the original value
2125 */
2126int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2127 snd_pcm_uframes_t appl_ptr)
2128{
2129 struct snd_pcm_runtime *runtime = substream->runtime;
2130 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2131 snd_pcm_sframes_t diff;
2132 int ret;
2133
2134 if (old_appl_ptr == appl_ptr)
2135 return 0;
2136
2137 if (appl_ptr >= runtime->boundary)
2138 return -EINVAL;
2139 /*
2140 * check if a rewind is requested by the application
2141 */
2142 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2143 diff = appl_ptr - old_appl_ptr;
2144 if (diff >= 0) {
2145 if (diff > runtime->buffer_size)
2146 return -EINVAL;
2147 } else {
2148 if (runtime->boundary + diff > runtime->buffer_size)
2149 return -EINVAL;
2150 }
2151 }
2152
2153 runtime->control->appl_ptr = appl_ptr;
2154 if (substream->ops->ack) {
2155 ret = substream->ops->ack(substream);
2156 if (ret < 0) {
2157 runtime->control->appl_ptr = old_appl_ptr;
2158 return ret;
2159 }
2160 }
2161
2162 trace_applptr(substream, old_appl_ptr, appl_ptr);
2163
2164 return 0;
2165}
2166
2167/* the common loop for read/write data */
2168snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2169 void *data, bool interleaved,
2170 snd_pcm_uframes_t size, bool in_kernel)
2171{
2172 struct snd_pcm_runtime *runtime = substream->runtime;
2173 snd_pcm_uframes_t xfer = 0;
2174 snd_pcm_uframes_t offset = 0;
2175 snd_pcm_uframes_t avail;
2176 pcm_copy_f writer;
2177 pcm_transfer_f transfer;
2178 bool nonblock;
2179 bool is_playback;
2180 int err;
2181
2182 err = pcm_sanity_check(substream);
2183 if (err < 0)
2184 return err;
2185
2186 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2187 if (interleaved) {
2188 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2189 runtime->channels > 1)
2190 return -EINVAL;
2191 writer = interleaved_copy;
2192 } else {
2193 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2194 return -EINVAL;
2195 writer = noninterleaved_copy;
2196 }
2197
2198 if (!data) {
2199 if (is_playback)
2200 transfer = fill_silence;
2201 else
2202 return -EINVAL;
2203 } else if (in_kernel) {
2204 if (substream->ops->copy_kernel)
2205 transfer = substream->ops->copy_kernel;
2206 else
2207 transfer = is_playback ?
2208 default_write_copy_kernel : default_read_copy_kernel;
2209 } else {
2210 if (substream->ops->copy_user)
2211 transfer = (pcm_transfer_f)substream->ops->copy_user;
2212 else
2213 transfer = is_playback ?
2214 default_write_copy : default_read_copy;
2215 }
2216
2217 if (size == 0)
2218 return 0;
2219
2220 nonblock = !!(substream->f_flags & O_NONBLOCK);
2221
2222 snd_pcm_stream_lock_irq(substream);
2223 err = pcm_accessible_state(runtime);
2224 if (err < 0)
2225 goto _end_unlock;
2226
2227 runtime->twake = runtime->control->avail_min ? : 1;
2228 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2229 snd_pcm_update_hw_ptr(substream);
2230
2231 /*
2232 * If size < start_threshold, wait indefinitely. Another
2233 * thread may start capture
2234 */
2235 if (!is_playback &&
2236 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2237 size >= runtime->start_threshold) {
2238 err = snd_pcm_start(substream);
2239 if (err < 0)
2240 goto _end_unlock;
2241 }
2242
2243 avail = snd_pcm_avail(substream);
2244
2245 while (size > 0) {
2246 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2247 snd_pcm_uframes_t cont;
2248 if (!avail) {
2249 if (!is_playback &&
2250 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2251 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2252 goto _end_unlock;
2253 }
2254 if (nonblock) {
2255 err = -EAGAIN;
2256 goto _end_unlock;
2257 }
2258 runtime->twake = min_t(snd_pcm_uframes_t, size,
2259 runtime->control->avail_min ? : 1);
2260 err = wait_for_avail(substream, &avail);
2261 if (err < 0)
2262 goto _end_unlock;
2263 if (!avail)
2264 continue; /* draining */
2265 }
2266 frames = size > avail ? avail : size;
2267 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2268 appl_ofs = appl_ptr % runtime->buffer_size;
2269 cont = runtime->buffer_size - appl_ofs;
2270 if (frames > cont)
2271 frames = cont;
2272 if (snd_BUG_ON(!frames)) {
2273 err = -EINVAL;
2274 goto _end_unlock;
2275 }
2276 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2277 err = -EBUSY;
2278 goto _end_unlock;
2279 }
2280 snd_pcm_stream_unlock_irq(substream);
2281 if (!is_playback)
2282 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2283 err = writer(substream, appl_ofs, data, offset, frames,
2284 transfer);
2285 if (is_playback)
2286 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2287 snd_pcm_stream_lock_irq(substream);
2288 atomic_dec(&runtime->buffer_accessing);
2289 if (err < 0)
2290 goto _end_unlock;
2291 err = pcm_accessible_state(runtime);
2292 if (err < 0)
2293 goto _end_unlock;
2294 appl_ptr += frames;
2295 if (appl_ptr >= runtime->boundary)
2296 appl_ptr -= runtime->boundary;
2297 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2298 if (err < 0)
2299 goto _end_unlock;
2300
2301 offset += frames;
2302 size -= frames;
2303 xfer += frames;
2304 avail -= frames;
2305 if (is_playback &&
2306 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2307 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2308 err = snd_pcm_start(substream);
2309 if (err < 0)
2310 goto _end_unlock;
2311 }
2312 }
2313 _end_unlock:
2314 runtime->twake = 0;
2315 if (xfer > 0 && err >= 0)
2316 snd_pcm_update_state(substream, runtime);
2317 snd_pcm_stream_unlock_irq(substream);
2318 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2319}
2320EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2321
2322/*
2323 * standard channel mapping helpers
2324 */
2325
2326/* default channel maps for multi-channel playbacks, up to 8 channels */
2327const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2328 { .channels = 1,
2329 .map = { SNDRV_CHMAP_MONO } },
2330 { .channels = 2,
2331 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2332 { .channels = 4,
2333 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2334 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2335 { .channels = 6,
2336 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2337 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2338 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2339 { .channels = 8,
2340 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2341 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2342 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2343 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2344 { }
2345};
2346EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2347
2348/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2349const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2350 { .channels = 1,
2351 .map = { SNDRV_CHMAP_MONO } },
2352 { .channels = 2,
2353 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2354 { .channels = 4,
2355 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2356 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2357 { .channels = 6,
2358 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2359 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2360 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2361 { .channels = 8,
2362 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2363 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2364 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2365 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2366 { }
2367};
2368EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2369
2370static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2371{
2372 if (ch > info->max_channels)
2373 return false;
2374 return !info->channel_mask || (info->channel_mask & (1U << ch));
2375}
2376
2377static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2378 struct snd_ctl_elem_info *uinfo)
2379{
2380 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2381
2382 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2383 uinfo->count = info->max_channels;
2384 uinfo->value.integer.min = 0;
2385 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2386 return 0;
2387}
2388
2389/* get callback for channel map ctl element
2390 * stores the channel position firstly matching with the current channels
2391 */
2392static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2393 struct snd_ctl_elem_value *ucontrol)
2394{
2395 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2396 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2397 struct snd_pcm_substream *substream;
2398 const struct snd_pcm_chmap_elem *map;
2399
2400 if (!info->chmap)
2401 return -EINVAL;
2402 substream = snd_pcm_chmap_substream(info, idx);
2403 if (!substream)
2404 return -ENODEV;
2405 memset(ucontrol->value.integer.value, 0,
2406 sizeof(long) * info->max_channels);
2407 if (!substream->runtime)
2408 return 0; /* no channels set */
2409 for (map = info->chmap; map->channels; map++) {
2410 int i;
2411 if (map->channels == substream->runtime->channels &&
2412 valid_chmap_channels(info, map->channels)) {
2413 for (i = 0; i < map->channels; i++)
2414 ucontrol->value.integer.value[i] = map->map[i];
2415 return 0;
2416 }
2417 }
2418 return -EINVAL;
2419}
2420
2421/* tlv callback for channel map ctl element
2422 * expands the pre-defined channel maps in a form of TLV
2423 */
2424static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2425 unsigned int size, unsigned int __user *tlv)
2426{
2427 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2428 const struct snd_pcm_chmap_elem *map;
2429 unsigned int __user *dst;
2430 int c, count = 0;
2431
2432 if (!info->chmap)
2433 return -EINVAL;
2434 if (size < 8)
2435 return -ENOMEM;
2436 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2437 return -EFAULT;
2438 size -= 8;
2439 dst = tlv + 2;
2440 for (map = info->chmap; map->channels; map++) {
2441 int chs_bytes = map->channels * 4;
2442 if (!valid_chmap_channels(info, map->channels))
2443 continue;
2444 if (size < 8)
2445 return -ENOMEM;
2446 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2447 put_user(chs_bytes, dst + 1))
2448 return -EFAULT;
2449 dst += 2;
2450 size -= 8;
2451 count += 8;
2452 if (size < chs_bytes)
2453 return -ENOMEM;
2454 size -= chs_bytes;
2455 count += chs_bytes;
2456 for (c = 0; c < map->channels; c++) {
2457 if (put_user(map->map[c], dst))
2458 return -EFAULT;
2459 dst++;
2460 }
2461 }
2462 if (put_user(count, tlv + 1))
2463 return -EFAULT;
2464 return 0;
2465}
2466
2467static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2468{
2469 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2470 info->pcm->streams[info->stream].chmap_kctl = NULL;
2471 kfree(info);
2472}
2473
2474/**
2475 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2476 * @pcm: the assigned PCM instance
2477 * @stream: stream direction
2478 * @chmap: channel map elements (for query)
2479 * @max_channels: the max number of channels for the stream
2480 * @private_value: the value passed to each kcontrol's private_value field
2481 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2482 *
2483 * Create channel-mapping control elements assigned to the given PCM stream(s).
2484 * Return: Zero if successful, or a negative error value.
2485 */
2486int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2487 const struct snd_pcm_chmap_elem *chmap,
2488 int max_channels,
2489 unsigned long private_value,
2490 struct snd_pcm_chmap **info_ret)
2491{
2492 struct snd_pcm_chmap *info;
2493 struct snd_kcontrol_new knew = {
2494 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2495 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2496 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2497 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2498 .info = pcm_chmap_ctl_info,
2499 .get = pcm_chmap_ctl_get,
2500 .tlv.c = pcm_chmap_ctl_tlv,
2501 };
2502 int err;
2503
2504 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2505 return -EBUSY;
2506 info = kzalloc(sizeof(*info), GFP_KERNEL);
2507 if (!info)
2508 return -ENOMEM;
2509 info->pcm = pcm;
2510 info->stream = stream;
2511 info->chmap = chmap;
2512 info->max_channels = max_channels;
2513 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2514 knew.name = "Playback Channel Map";
2515 else
2516 knew.name = "Capture Channel Map";
2517 knew.device = pcm->device;
2518 knew.count = pcm->streams[stream].substream_count;
2519 knew.private_value = private_value;
2520 info->kctl = snd_ctl_new1(&knew, info);
2521 if (!info->kctl) {
2522 kfree(info);
2523 return -ENOMEM;
2524 }
2525 info->kctl->private_free = pcm_chmap_ctl_private_free;
2526 err = snd_ctl_add(pcm->card, info->kctl);
2527 if (err < 0)
2528 return err;
2529 pcm->streams[stream].chmap_kctl = info->kctl;
2530 if (info_ret)
2531 *info_ret = info;
2532 return 0;
2533}
2534EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
1/*
2 * Digital Audio (PCM) abstract layer
3 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4 * Abramo Bagnara <abramo@alsa-project.org>
5 *
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/slab.h>
24#include <linux/sched/signal.h>
25#include <linux/time.h>
26#include <linux/math64.h>
27#include <linux/export.h>
28#include <sound/core.h>
29#include <sound/control.h>
30#include <sound/tlv.h>
31#include <sound/info.h>
32#include <sound/pcm.h>
33#include <sound/pcm_params.h>
34#include <sound/timer.h>
35
36#include "pcm_local.h"
37
38#ifdef CONFIG_SND_PCM_XRUN_DEBUG
39#define CREATE_TRACE_POINTS
40#include "pcm_trace.h"
41#else
42#define trace_hwptr(substream, pos, in_interrupt)
43#define trace_xrun(substream)
44#define trace_hw_ptr_error(substream, reason)
45#define trace_applptr(substream, prev, curr)
46#endif
47
48static int fill_silence_frames(struct snd_pcm_substream *substream,
49 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
50
51/*
52 * fill ring buffer with silence
53 * runtime->silence_start: starting pointer to silence area
54 * runtime->silence_filled: size filled with silence
55 * runtime->silence_threshold: threshold from application
56 * runtime->silence_size: maximal size from application
57 *
58 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
59 */
60void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
61{
62 struct snd_pcm_runtime *runtime = substream->runtime;
63 snd_pcm_uframes_t frames, ofs, transfer;
64 int err;
65
66 if (runtime->silence_size < runtime->boundary) {
67 snd_pcm_sframes_t noise_dist, n;
68 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
69 if (runtime->silence_start != appl_ptr) {
70 n = appl_ptr - runtime->silence_start;
71 if (n < 0)
72 n += runtime->boundary;
73 if ((snd_pcm_uframes_t)n < runtime->silence_filled)
74 runtime->silence_filled -= n;
75 else
76 runtime->silence_filled = 0;
77 runtime->silence_start = appl_ptr;
78 }
79 if (runtime->silence_filled >= runtime->buffer_size)
80 return;
81 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
82 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
83 return;
84 frames = runtime->silence_threshold - noise_dist;
85 if (frames > runtime->silence_size)
86 frames = runtime->silence_size;
87 } else {
88 if (new_hw_ptr == ULONG_MAX) { /* initialization */
89 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
90 if (avail > runtime->buffer_size)
91 avail = runtime->buffer_size;
92 runtime->silence_filled = avail > 0 ? avail : 0;
93 runtime->silence_start = (runtime->status->hw_ptr +
94 runtime->silence_filled) %
95 runtime->boundary;
96 } else {
97 ofs = runtime->status->hw_ptr;
98 frames = new_hw_ptr - ofs;
99 if ((snd_pcm_sframes_t)frames < 0)
100 frames += runtime->boundary;
101 runtime->silence_filled -= frames;
102 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
103 runtime->silence_filled = 0;
104 runtime->silence_start = new_hw_ptr;
105 } else {
106 runtime->silence_start = ofs;
107 }
108 }
109 frames = runtime->buffer_size - runtime->silence_filled;
110 }
111 if (snd_BUG_ON(frames > runtime->buffer_size))
112 return;
113 if (frames == 0)
114 return;
115 ofs = runtime->silence_start % runtime->buffer_size;
116 while (frames > 0) {
117 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
118 err = fill_silence_frames(substream, ofs, transfer);
119 snd_BUG_ON(err < 0);
120 runtime->silence_filled += transfer;
121 frames -= transfer;
122 ofs = 0;
123 }
124}
125
126#ifdef CONFIG_SND_DEBUG
127void snd_pcm_debug_name(struct snd_pcm_substream *substream,
128 char *name, size_t len)
129{
130 snprintf(name, len, "pcmC%dD%d%c:%d",
131 substream->pcm->card->number,
132 substream->pcm->device,
133 substream->stream ? 'c' : 'p',
134 substream->number);
135}
136EXPORT_SYMBOL(snd_pcm_debug_name);
137#endif
138
139#define XRUN_DEBUG_BASIC (1<<0)
140#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
141#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
142
143#ifdef CONFIG_SND_PCM_XRUN_DEBUG
144
145#define xrun_debug(substream, mask) \
146 ((substream)->pstr->xrun_debug & (mask))
147#else
148#define xrun_debug(substream, mask) 0
149#endif
150
151#define dump_stack_on_xrun(substream) do { \
152 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
153 dump_stack(); \
154 } while (0)
155
156static void xrun(struct snd_pcm_substream *substream)
157{
158 struct snd_pcm_runtime *runtime = substream->runtime;
159
160 trace_xrun(substream);
161 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
162 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
163 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
164 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
165 char name[16];
166 snd_pcm_debug_name(substream, name, sizeof(name));
167 pcm_warn(substream->pcm, "XRUN: %s\n", name);
168 dump_stack_on_xrun(substream);
169 }
170}
171
172#ifdef CONFIG_SND_PCM_XRUN_DEBUG
173#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
174 do { \
175 trace_hw_ptr_error(substream, reason); \
176 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
177 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
178 (in_interrupt) ? 'Q' : 'P', ##args); \
179 dump_stack_on_xrun(substream); \
180 } \
181 } while (0)
182
183#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
184
185#define hw_ptr_error(substream, fmt, args...) do { } while (0)
186
187#endif
188
189int snd_pcm_update_state(struct snd_pcm_substream *substream,
190 struct snd_pcm_runtime *runtime)
191{
192 snd_pcm_uframes_t avail;
193
194 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
195 avail = snd_pcm_playback_avail(runtime);
196 else
197 avail = snd_pcm_capture_avail(runtime);
198 if (avail > runtime->avail_max)
199 runtime->avail_max = avail;
200 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
201 if (avail >= runtime->buffer_size) {
202 snd_pcm_drain_done(substream);
203 return -EPIPE;
204 }
205 } else {
206 if (avail >= runtime->stop_threshold) {
207 xrun(substream);
208 return -EPIPE;
209 }
210 }
211 if (runtime->twake) {
212 if (avail >= runtime->twake)
213 wake_up(&runtime->tsleep);
214 } else if (avail >= runtime->control->avail_min)
215 wake_up(&runtime->sleep);
216 return 0;
217}
218
219static void update_audio_tstamp(struct snd_pcm_substream *substream,
220 struct timespec *curr_tstamp,
221 struct timespec *audio_tstamp)
222{
223 struct snd_pcm_runtime *runtime = substream->runtime;
224 u64 audio_frames, audio_nsecs;
225 struct timespec driver_tstamp;
226
227 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
228 return;
229
230 if (!(substream->ops->get_time_info) ||
231 (runtime->audio_tstamp_report.actual_type ==
232 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
233
234 /*
235 * provide audio timestamp derived from pointer position
236 * add delay only if requested
237 */
238
239 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
240
241 if (runtime->audio_tstamp_config.report_delay) {
242 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
243 audio_frames -= runtime->delay;
244 else
245 audio_frames += runtime->delay;
246 }
247 audio_nsecs = div_u64(audio_frames * 1000000000LL,
248 runtime->rate);
249 *audio_tstamp = ns_to_timespec(audio_nsecs);
250 }
251 if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
252 runtime->status->audio_tstamp = *audio_tstamp;
253 runtime->status->tstamp = *curr_tstamp;
254 }
255
256 /*
257 * re-take a driver timestamp to let apps detect if the reference tstamp
258 * read by low-level hardware was provided with a delay
259 */
260 snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp);
261 runtime->driver_tstamp = driver_tstamp;
262}
263
264static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
265 unsigned int in_interrupt)
266{
267 struct snd_pcm_runtime *runtime = substream->runtime;
268 snd_pcm_uframes_t pos;
269 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
270 snd_pcm_sframes_t hdelta, delta;
271 unsigned long jdelta;
272 unsigned long curr_jiffies;
273 struct timespec curr_tstamp;
274 struct timespec audio_tstamp;
275 int crossed_boundary = 0;
276
277 old_hw_ptr = runtime->status->hw_ptr;
278
279 /*
280 * group pointer, time and jiffies reads to allow for more
281 * accurate correlations/corrections.
282 * The values are stored at the end of this routine after
283 * corrections for hw_ptr position
284 */
285 pos = substream->ops->pointer(substream);
286 curr_jiffies = jiffies;
287 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
288 if ((substream->ops->get_time_info) &&
289 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
290 substream->ops->get_time_info(substream, &curr_tstamp,
291 &audio_tstamp,
292 &runtime->audio_tstamp_config,
293 &runtime->audio_tstamp_report);
294
295 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
296 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
297 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
298 } else
299 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
300 }
301
302 if (pos == SNDRV_PCM_POS_XRUN) {
303 xrun(substream);
304 return -EPIPE;
305 }
306 if (pos >= runtime->buffer_size) {
307 if (printk_ratelimit()) {
308 char name[16];
309 snd_pcm_debug_name(substream, name, sizeof(name));
310 pcm_err(substream->pcm,
311 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
312 name, pos, runtime->buffer_size,
313 runtime->period_size);
314 }
315 pos = 0;
316 }
317 pos -= pos % runtime->min_align;
318 trace_hwptr(substream, pos, in_interrupt);
319 hw_base = runtime->hw_ptr_base;
320 new_hw_ptr = hw_base + pos;
321 if (in_interrupt) {
322 /* we know that one period was processed */
323 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
324 delta = runtime->hw_ptr_interrupt + runtime->period_size;
325 if (delta > new_hw_ptr) {
326 /* check for double acknowledged interrupts */
327 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
328 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
329 hw_base += runtime->buffer_size;
330 if (hw_base >= runtime->boundary) {
331 hw_base = 0;
332 crossed_boundary++;
333 }
334 new_hw_ptr = hw_base + pos;
335 goto __delta;
336 }
337 }
338 }
339 /* new_hw_ptr might be lower than old_hw_ptr in case when */
340 /* pointer crosses the end of the ring buffer */
341 if (new_hw_ptr < old_hw_ptr) {
342 hw_base += runtime->buffer_size;
343 if (hw_base >= runtime->boundary) {
344 hw_base = 0;
345 crossed_boundary++;
346 }
347 new_hw_ptr = hw_base + pos;
348 }
349 __delta:
350 delta = new_hw_ptr - old_hw_ptr;
351 if (delta < 0)
352 delta += runtime->boundary;
353
354 if (runtime->no_period_wakeup) {
355 snd_pcm_sframes_t xrun_threshold;
356 /*
357 * Without regular period interrupts, we have to check
358 * the elapsed time to detect xruns.
359 */
360 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
361 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
362 goto no_delta_check;
363 hdelta = jdelta - delta * HZ / runtime->rate;
364 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
365 while (hdelta > xrun_threshold) {
366 delta += runtime->buffer_size;
367 hw_base += runtime->buffer_size;
368 if (hw_base >= runtime->boundary) {
369 hw_base = 0;
370 crossed_boundary++;
371 }
372 new_hw_ptr = hw_base + pos;
373 hdelta -= runtime->hw_ptr_buffer_jiffies;
374 }
375 goto no_delta_check;
376 }
377
378 /* something must be really wrong */
379 if (delta >= runtime->buffer_size + runtime->period_size) {
380 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
381 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
382 substream->stream, (long)pos,
383 (long)new_hw_ptr, (long)old_hw_ptr);
384 return 0;
385 }
386
387 /* Do jiffies check only in xrun_debug mode */
388 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
389 goto no_jiffies_check;
390
391 /* Skip the jiffies check for hardwares with BATCH flag.
392 * Such hardware usually just increases the position at each IRQ,
393 * thus it can't give any strange position.
394 */
395 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
396 goto no_jiffies_check;
397 hdelta = delta;
398 if (hdelta < runtime->delay)
399 goto no_jiffies_check;
400 hdelta -= runtime->delay;
401 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
402 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
403 delta = jdelta /
404 (((runtime->period_size * HZ) / runtime->rate)
405 + HZ/100);
406 /* move new_hw_ptr according jiffies not pos variable */
407 new_hw_ptr = old_hw_ptr;
408 hw_base = delta;
409 /* use loop to avoid checks for delta overflows */
410 /* the delta value is small or zero in most cases */
411 while (delta > 0) {
412 new_hw_ptr += runtime->period_size;
413 if (new_hw_ptr >= runtime->boundary) {
414 new_hw_ptr -= runtime->boundary;
415 crossed_boundary--;
416 }
417 delta--;
418 }
419 /* align hw_base to buffer_size */
420 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
421 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
422 (long)pos, (long)hdelta,
423 (long)runtime->period_size, jdelta,
424 ((hdelta * HZ) / runtime->rate), hw_base,
425 (unsigned long)old_hw_ptr,
426 (unsigned long)new_hw_ptr);
427 /* reset values to proper state */
428 delta = 0;
429 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
430 }
431 no_jiffies_check:
432 if (delta > runtime->period_size + runtime->period_size / 2) {
433 hw_ptr_error(substream, in_interrupt,
434 "Lost interrupts?",
435 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
436 substream->stream, (long)delta,
437 (long)new_hw_ptr,
438 (long)old_hw_ptr);
439 }
440
441 no_delta_check:
442 if (runtime->status->hw_ptr == new_hw_ptr) {
443 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
444 return 0;
445 }
446
447 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
448 runtime->silence_size > 0)
449 snd_pcm_playback_silence(substream, new_hw_ptr);
450
451 if (in_interrupt) {
452 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
453 if (delta < 0)
454 delta += runtime->boundary;
455 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
456 runtime->hw_ptr_interrupt += delta;
457 if (runtime->hw_ptr_interrupt >= runtime->boundary)
458 runtime->hw_ptr_interrupt -= runtime->boundary;
459 }
460 runtime->hw_ptr_base = hw_base;
461 runtime->status->hw_ptr = new_hw_ptr;
462 runtime->hw_ptr_jiffies = curr_jiffies;
463 if (crossed_boundary) {
464 snd_BUG_ON(crossed_boundary != 1);
465 runtime->hw_ptr_wrap += runtime->boundary;
466 }
467
468 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
469
470 return snd_pcm_update_state(substream, runtime);
471}
472
473/* CAUTION: call it with irq disabled */
474int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
475{
476 return snd_pcm_update_hw_ptr0(substream, 0);
477}
478
479/**
480 * snd_pcm_set_ops - set the PCM operators
481 * @pcm: the pcm instance
482 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
483 * @ops: the operator table
484 *
485 * Sets the given PCM operators to the pcm instance.
486 */
487void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
488 const struct snd_pcm_ops *ops)
489{
490 struct snd_pcm_str *stream = &pcm->streams[direction];
491 struct snd_pcm_substream *substream;
492
493 for (substream = stream->substream; substream != NULL; substream = substream->next)
494 substream->ops = ops;
495}
496EXPORT_SYMBOL(snd_pcm_set_ops);
497
498/**
499 * snd_pcm_sync - set the PCM sync id
500 * @substream: the pcm substream
501 *
502 * Sets the PCM sync identifier for the card.
503 */
504void snd_pcm_set_sync(struct snd_pcm_substream *substream)
505{
506 struct snd_pcm_runtime *runtime = substream->runtime;
507
508 runtime->sync.id32[0] = substream->pcm->card->number;
509 runtime->sync.id32[1] = -1;
510 runtime->sync.id32[2] = -1;
511 runtime->sync.id32[3] = -1;
512}
513EXPORT_SYMBOL(snd_pcm_set_sync);
514
515/*
516 * Standard ioctl routine
517 */
518
519static inline unsigned int div32(unsigned int a, unsigned int b,
520 unsigned int *r)
521{
522 if (b == 0) {
523 *r = 0;
524 return UINT_MAX;
525 }
526 *r = a % b;
527 return a / b;
528}
529
530static inline unsigned int div_down(unsigned int a, unsigned int b)
531{
532 if (b == 0)
533 return UINT_MAX;
534 return a / b;
535}
536
537static inline unsigned int div_up(unsigned int a, unsigned int b)
538{
539 unsigned int r;
540 unsigned int q;
541 if (b == 0)
542 return UINT_MAX;
543 q = div32(a, b, &r);
544 if (r)
545 ++q;
546 return q;
547}
548
549static inline unsigned int mul(unsigned int a, unsigned int b)
550{
551 if (a == 0)
552 return 0;
553 if (div_down(UINT_MAX, a) < b)
554 return UINT_MAX;
555 return a * b;
556}
557
558static inline unsigned int muldiv32(unsigned int a, unsigned int b,
559 unsigned int c, unsigned int *r)
560{
561 u_int64_t n = (u_int64_t) a * b;
562 if (c == 0) {
563 *r = 0;
564 return UINT_MAX;
565 }
566 n = div_u64_rem(n, c, r);
567 if (n >= UINT_MAX) {
568 *r = 0;
569 return UINT_MAX;
570 }
571 return n;
572}
573
574/**
575 * snd_interval_refine - refine the interval value of configurator
576 * @i: the interval value to refine
577 * @v: the interval value to refer to
578 *
579 * Refines the interval value with the reference value.
580 * The interval is changed to the range satisfying both intervals.
581 * The interval status (min, max, integer, etc.) are evaluated.
582 *
583 * Return: Positive if the value is changed, zero if it's not changed, or a
584 * negative error code.
585 */
586int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
587{
588 int changed = 0;
589 if (snd_BUG_ON(snd_interval_empty(i)))
590 return -EINVAL;
591 if (i->min < v->min) {
592 i->min = v->min;
593 i->openmin = v->openmin;
594 changed = 1;
595 } else if (i->min == v->min && !i->openmin && v->openmin) {
596 i->openmin = 1;
597 changed = 1;
598 }
599 if (i->max > v->max) {
600 i->max = v->max;
601 i->openmax = v->openmax;
602 changed = 1;
603 } else if (i->max == v->max && !i->openmax && v->openmax) {
604 i->openmax = 1;
605 changed = 1;
606 }
607 if (!i->integer && v->integer) {
608 i->integer = 1;
609 changed = 1;
610 }
611 if (i->integer) {
612 if (i->openmin) {
613 i->min++;
614 i->openmin = 0;
615 }
616 if (i->openmax) {
617 i->max--;
618 i->openmax = 0;
619 }
620 } else if (!i->openmin && !i->openmax && i->min == i->max)
621 i->integer = 1;
622 if (snd_interval_checkempty(i)) {
623 snd_interval_none(i);
624 return -EINVAL;
625 }
626 return changed;
627}
628EXPORT_SYMBOL(snd_interval_refine);
629
630static int snd_interval_refine_first(struct snd_interval *i)
631{
632 if (snd_BUG_ON(snd_interval_empty(i)))
633 return -EINVAL;
634 if (snd_interval_single(i))
635 return 0;
636 i->max = i->min;
637 i->openmax = i->openmin;
638 if (i->openmax)
639 i->max++;
640 return 1;
641}
642
643static int snd_interval_refine_last(struct snd_interval *i)
644{
645 if (snd_BUG_ON(snd_interval_empty(i)))
646 return -EINVAL;
647 if (snd_interval_single(i))
648 return 0;
649 i->min = i->max;
650 i->openmin = i->openmax;
651 if (i->openmin)
652 i->min--;
653 return 1;
654}
655
656void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
657{
658 if (a->empty || b->empty) {
659 snd_interval_none(c);
660 return;
661 }
662 c->empty = 0;
663 c->min = mul(a->min, b->min);
664 c->openmin = (a->openmin || b->openmin);
665 c->max = mul(a->max, b->max);
666 c->openmax = (a->openmax || b->openmax);
667 c->integer = (a->integer && b->integer);
668}
669
670/**
671 * snd_interval_div - refine the interval value with division
672 * @a: dividend
673 * @b: divisor
674 * @c: quotient
675 *
676 * c = a / b
677 *
678 * Returns non-zero if the value is changed, zero if not changed.
679 */
680void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
681{
682 unsigned int r;
683 if (a->empty || b->empty) {
684 snd_interval_none(c);
685 return;
686 }
687 c->empty = 0;
688 c->min = div32(a->min, b->max, &r);
689 c->openmin = (r || a->openmin || b->openmax);
690 if (b->min > 0) {
691 c->max = div32(a->max, b->min, &r);
692 if (r) {
693 c->max++;
694 c->openmax = 1;
695 } else
696 c->openmax = (a->openmax || b->openmin);
697 } else {
698 c->max = UINT_MAX;
699 c->openmax = 0;
700 }
701 c->integer = 0;
702}
703
704/**
705 * snd_interval_muldivk - refine the interval value
706 * @a: dividend 1
707 * @b: dividend 2
708 * @k: divisor (as integer)
709 * @c: result
710 *
711 * c = a * b / k
712 *
713 * Returns non-zero if the value is changed, zero if not changed.
714 */
715void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
716 unsigned int k, struct snd_interval *c)
717{
718 unsigned int r;
719 if (a->empty || b->empty) {
720 snd_interval_none(c);
721 return;
722 }
723 c->empty = 0;
724 c->min = muldiv32(a->min, b->min, k, &r);
725 c->openmin = (r || a->openmin || b->openmin);
726 c->max = muldiv32(a->max, b->max, k, &r);
727 if (r) {
728 c->max++;
729 c->openmax = 1;
730 } else
731 c->openmax = (a->openmax || b->openmax);
732 c->integer = 0;
733}
734
735/**
736 * snd_interval_mulkdiv - refine the interval value
737 * @a: dividend 1
738 * @k: dividend 2 (as integer)
739 * @b: divisor
740 * @c: result
741 *
742 * c = a * k / b
743 *
744 * Returns non-zero if the value is changed, zero if not changed.
745 */
746void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
747 const struct snd_interval *b, struct snd_interval *c)
748{
749 unsigned int r;
750 if (a->empty || b->empty) {
751 snd_interval_none(c);
752 return;
753 }
754 c->empty = 0;
755 c->min = muldiv32(a->min, k, b->max, &r);
756 c->openmin = (r || a->openmin || b->openmax);
757 if (b->min > 0) {
758 c->max = muldiv32(a->max, k, b->min, &r);
759 if (r) {
760 c->max++;
761 c->openmax = 1;
762 } else
763 c->openmax = (a->openmax || b->openmin);
764 } else {
765 c->max = UINT_MAX;
766 c->openmax = 0;
767 }
768 c->integer = 0;
769}
770
771/* ---- */
772
773
774/**
775 * snd_interval_ratnum - refine the interval value
776 * @i: interval to refine
777 * @rats_count: number of ratnum_t
778 * @rats: ratnum_t array
779 * @nump: pointer to store the resultant numerator
780 * @denp: pointer to store the resultant denominator
781 *
782 * Return: Positive if the value is changed, zero if it's not changed, or a
783 * negative error code.
784 */
785int snd_interval_ratnum(struct snd_interval *i,
786 unsigned int rats_count, const struct snd_ratnum *rats,
787 unsigned int *nump, unsigned int *denp)
788{
789 unsigned int best_num, best_den;
790 int best_diff;
791 unsigned int k;
792 struct snd_interval t;
793 int err;
794 unsigned int result_num, result_den;
795 int result_diff;
796
797 best_num = best_den = best_diff = 0;
798 for (k = 0; k < rats_count; ++k) {
799 unsigned int num = rats[k].num;
800 unsigned int den;
801 unsigned int q = i->min;
802 int diff;
803 if (q == 0)
804 q = 1;
805 den = div_up(num, q);
806 if (den < rats[k].den_min)
807 continue;
808 if (den > rats[k].den_max)
809 den = rats[k].den_max;
810 else {
811 unsigned int r;
812 r = (den - rats[k].den_min) % rats[k].den_step;
813 if (r != 0)
814 den -= r;
815 }
816 diff = num - q * den;
817 if (diff < 0)
818 diff = -diff;
819 if (best_num == 0 ||
820 diff * best_den < best_diff * den) {
821 best_diff = diff;
822 best_den = den;
823 best_num = num;
824 }
825 }
826 if (best_den == 0) {
827 i->empty = 1;
828 return -EINVAL;
829 }
830 t.min = div_down(best_num, best_den);
831 t.openmin = !!(best_num % best_den);
832
833 result_num = best_num;
834 result_diff = best_diff;
835 result_den = best_den;
836 best_num = best_den = best_diff = 0;
837 for (k = 0; k < rats_count; ++k) {
838 unsigned int num = rats[k].num;
839 unsigned int den;
840 unsigned int q = i->max;
841 int diff;
842 if (q == 0) {
843 i->empty = 1;
844 return -EINVAL;
845 }
846 den = div_down(num, q);
847 if (den > rats[k].den_max)
848 continue;
849 if (den < rats[k].den_min)
850 den = rats[k].den_min;
851 else {
852 unsigned int r;
853 r = (den - rats[k].den_min) % rats[k].den_step;
854 if (r != 0)
855 den += rats[k].den_step - r;
856 }
857 diff = q * den - num;
858 if (diff < 0)
859 diff = -diff;
860 if (best_num == 0 ||
861 diff * best_den < best_diff * den) {
862 best_diff = diff;
863 best_den = den;
864 best_num = num;
865 }
866 }
867 if (best_den == 0) {
868 i->empty = 1;
869 return -EINVAL;
870 }
871 t.max = div_up(best_num, best_den);
872 t.openmax = !!(best_num % best_den);
873 t.integer = 0;
874 err = snd_interval_refine(i, &t);
875 if (err < 0)
876 return err;
877
878 if (snd_interval_single(i)) {
879 if (best_diff * result_den < result_diff * best_den) {
880 result_num = best_num;
881 result_den = best_den;
882 }
883 if (nump)
884 *nump = result_num;
885 if (denp)
886 *denp = result_den;
887 }
888 return err;
889}
890EXPORT_SYMBOL(snd_interval_ratnum);
891
892/**
893 * snd_interval_ratden - refine the interval value
894 * @i: interval to refine
895 * @rats_count: number of struct ratden
896 * @rats: struct ratden array
897 * @nump: pointer to store the resultant numerator
898 * @denp: pointer to store the resultant denominator
899 *
900 * Return: Positive if the value is changed, zero if it's not changed, or a
901 * negative error code.
902 */
903static int snd_interval_ratden(struct snd_interval *i,
904 unsigned int rats_count,
905 const struct snd_ratden *rats,
906 unsigned int *nump, unsigned int *denp)
907{
908 unsigned int best_num, best_diff, best_den;
909 unsigned int k;
910 struct snd_interval t;
911 int err;
912
913 best_num = best_den = best_diff = 0;
914 for (k = 0; k < rats_count; ++k) {
915 unsigned int num;
916 unsigned int den = rats[k].den;
917 unsigned int q = i->min;
918 int diff;
919 num = mul(q, den);
920 if (num > rats[k].num_max)
921 continue;
922 if (num < rats[k].num_min)
923 num = rats[k].num_max;
924 else {
925 unsigned int r;
926 r = (num - rats[k].num_min) % rats[k].num_step;
927 if (r != 0)
928 num += rats[k].num_step - r;
929 }
930 diff = num - q * den;
931 if (best_num == 0 ||
932 diff * best_den < best_diff * den) {
933 best_diff = diff;
934 best_den = den;
935 best_num = num;
936 }
937 }
938 if (best_den == 0) {
939 i->empty = 1;
940 return -EINVAL;
941 }
942 t.min = div_down(best_num, best_den);
943 t.openmin = !!(best_num % best_den);
944
945 best_num = best_den = best_diff = 0;
946 for (k = 0; k < rats_count; ++k) {
947 unsigned int num;
948 unsigned int den = rats[k].den;
949 unsigned int q = i->max;
950 int diff;
951 num = mul(q, den);
952 if (num < rats[k].num_min)
953 continue;
954 if (num > rats[k].num_max)
955 num = rats[k].num_max;
956 else {
957 unsigned int r;
958 r = (num - rats[k].num_min) % rats[k].num_step;
959 if (r != 0)
960 num -= r;
961 }
962 diff = q * den - num;
963 if (best_num == 0 ||
964 diff * best_den < best_diff * den) {
965 best_diff = diff;
966 best_den = den;
967 best_num = num;
968 }
969 }
970 if (best_den == 0) {
971 i->empty = 1;
972 return -EINVAL;
973 }
974 t.max = div_up(best_num, best_den);
975 t.openmax = !!(best_num % best_den);
976 t.integer = 0;
977 err = snd_interval_refine(i, &t);
978 if (err < 0)
979 return err;
980
981 if (snd_interval_single(i)) {
982 if (nump)
983 *nump = best_num;
984 if (denp)
985 *denp = best_den;
986 }
987 return err;
988}
989
990/**
991 * snd_interval_list - refine the interval value from the list
992 * @i: the interval value to refine
993 * @count: the number of elements in the list
994 * @list: the value list
995 * @mask: the bit-mask to evaluate
996 *
997 * Refines the interval value from the list.
998 * When mask is non-zero, only the elements corresponding to bit 1 are
999 * evaluated.
1000 *
1001 * Return: Positive if the value is changed, zero if it's not changed, or a
1002 * negative error code.
1003 */
1004int snd_interval_list(struct snd_interval *i, unsigned int count,
1005 const unsigned int *list, unsigned int mask)
1006{
1007 unsigned int k;
1008 struct snd_interval list_range;
1009
1010 if (!count) {
1011 i->empty = 1;
1012 return -EINVAL;
1013 }
1014 snd_interval_any(&list_range);
1015 list_range.min = UINT_MAX;
1016 list_range.max = 0;
1017 for (k = 0; k < count; k++) {
1018 if (mask && !(mask & (1 << k)))
1019 continue;
1020 if (!snd_interval_test(i, list[k]))
1021 continue;
1022 list_range.min = min(list_range.min, list[k]);
1023 list_range.max = max(list_range.max, list[k]);
1024 }
1025 return snd_interval_refine(i, &list_range);
1026}
1027EXPORT_SYMBOL(snd_interval_list);
1028
1029/**
1030 * snd_interval_ranges - refine the interval value from the list of ranges
1031 * @i: the interval value to refine
1032 * @count: the number of elements in the list of ranges
1033 * @ranges: the ranges list
1034 * @mask: the bit-mask to evaluate
1035 *
1036 * Refines the interval value from the list of ranges.
1037 * When mask is non-zero, only the elements corresponding to bit 1 are
1038 * evaluated.
1039 *
1040 * Return: Positive if the value is changed, zero if it's not changed, or a
1041 * negative error code.
1042 */
1043int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1044 const struct snd_interval *ranges, unsigned int mask)
1045{
1046 unsigned int k;
1047 struct snd_interval range_union;
1048 struct snd_interval range;
1049
1050 if (!count) {
1051 snd_interval_none(i);
1052 return -EINVAL;
1053 }
1054 snd_interval_any(&range_union);
1055 range_union.min = UINT_MAX;
1056 range_union.max = 0;
1057 for (k = 0; k < count; k++) {
1058 if (mask && !(mask & (1 << k)))
1059 continue;
1060 snd_interval_copy(&range, &ranges[k]);
1061 if (snd_interval_refine(&range, i) < 0)
1062 continue;
1063 if (snd_interval_empty(&range))
1064 continue;
1065
1066 if (range.min < range_union.min) {
1067 range_union.min = range.min;
1068 range_union.openmin = 1;
1069 }
1070 if (range.min == range_union.min && !range.openmin)
1071 range_union.openmin = 0;
1072 if (range.max > range_union.max) {
1073 range_union.max = range.max;
1074 range_union.openmax = 1;
1075 }
1076 if (range.max == range_union.max && !range.openmax)
1077 range_union.openmax = 0;
1078 }
1079 return snd_interval_refine(i, &range_union);
1080}
1081EXPORT_SYMBOL(snd_interval_ranges);
1082
1083static int snd_interval_step(struct snd_interval *i, unsigned int step)
1084{
1085 unsigned int n;
1086 int changed = 0;
1087 n = i->min % step;
1088 if (n != 0 || i->openmin) {
1089 i->min += step - n;
1090 i->openmin = 0;
1091 changed = 1;
1092 }
1093 n = i->max % step;
1094 if (n != 0 || i->openmax) {
1095 i->max -= n;
1096 i->openmax = 0;
1097 changed = 1;
1098 }
1099 if (snd_interval_checkempty(i)) {
1100 i->empty = 1;
1101 return -EINVAL;
1102 }
1103 return changed;
1104}
1105
1106/* Info constraints helpers */
1107
1108/**
1109 * snd_pcm_hw_rule_add - add the hw-constraint rule
1110 * @runtime: the pcm runtime instance
1111 * @cond: condition bits
1112 * @var: the variable to evaluate
1113 * @func: the evaluation function
1114 * @private: the private data pointer passed to function
1115 * @dep: the dependent variables
1116 *
1117 * Return: Zero if successful, or a negative error code on failure.
1118 */
1119int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1120 int var,
1121 snd_pcm_hw_rule_func_t func, void *private,
1122 int dep, ...)
1123{
1124 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1125 struct snd_pcm_hw_rule *c;
1126 unsigned int k;
1127 va_list args;
1128 va_start(args, dep);
1129 if (constrs->rules_num >= constrs->rules_all) {
1130 struct snd_pcm_hw_rule *new;
1131 unsigned int new_rules = constrs->rules_all + 16;
1132 new = krealloc(constrs->rules, new_rules * sizeof(*c),
1133 GFP_KERNEL);
1134 if (!new) {
1135 va_end(args);
1136 return -ENOMEM;
1137 }
1138 constrs->rules = new;
1139 constrs->rules_all = new_rules;
1140 }
1141 c = &constrs->rules[constrs->rules_num];
1142 c->cond = cond;
1143 c->func = func;
1144 c->var = var;
1145 c->private = private;
1146 k = 0;
1147 while (1) {
1148 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1149 va_end(args);
1150 return -EINVAL;
1151 }
1152 c->deps[k++] = dep;
1153 if (dep < 0)
1154 break;
1155 dep = va_arg(args, int);
1156 }
1157 constrs->rules_num++;
1158 va_end(args);
1159 return 0;
1160}
1161EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1162
1163/**
1164 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1165 * @runtime: PCM runtime instance
1166 * @var: hw_params variable to apply the mask
1167 * @mask: the bitmap mask
1168 *
1169 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1170 *
1171 * Return: Zero if successful, or a negative error code on failure.
1172 */
1173int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1174 u_int32_t mask)
1175{
1176 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1177 struct snd_mask *maskp = constrs_mask(constrs, var);
1178 *maskp->bits &= mask;
1179 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1180 if (*maskp->bits == 0)
1181 return -EINVAL;
1182 return 0;
1183}
1184
1185/**
1186 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1187 * @runtime: PCM runtime instance
1188 * @var: hw_params variable to apply the mask
1189 * @mask: the 64bit bitmap mask
1190 *
1191 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1192 *
1193 * Return: Zero if successful, or a negative error code on failure.
1194 */
1195int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1196 u_int64_t mask)
1197{
1198 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1199 struct snd_mask *maskp = constrs_mask(constrs, var);
1200 maskp->bits[0] &= (u_int32_t)mask;
1201 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1202 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1203 if (! maskp->bits[0] && ! maskp->bits[1])
1204 return -EINVAL;
1205 return 0;
1206}
1207EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1208
1209/**
1210 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the integer constraint
1213 *
1214 * Apply the constraint of integer to an interval parameter.
1215 *
1216 * Return: Positive if the value is changed, zero if it's not changed, or a
1217 * negative error code.
1218 */
1219int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1220{
1221 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1222 return snd_interval_setinteger(constrs_interval(constrs, var));
1223}
1224EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1225
1226/**
1227 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1228 * @runtime: PCM runtime instance
1229 * @var: hw_params variable to apply the range
1230 * @min: the minimal value
1231 * @max: the maximal value
1232 *
1233 * Apply the min/max range constraint to an interval parameter.
1234 *
1235 * Return: Positive if the value is changed, zero if it's not changed, or a
1236 * negative error code.
1237 */
1238int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1239 unsigned int min, unsigned int max)
1240{
1241 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1242 struct snd_interval t;
1243 t.min = min;
1244 t.max = max;
1245 t.openmin = t.openmax = 0;
1246 t.integer = 0;
1247 return snd_interval_refine(constrs_interval(constrs, var), &t);
1248}
1249EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1250
1251static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1252 struct snd_pcm_hw_rule *rule)
1253{
1254 struct snd_pcm_hw_constraint_list *list = rule->private;
1255 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1256}
1257
1258
1259/**
1260 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1261 * @runtime: PCM runtime instance
1262 * @cond: condition bits
1263 * @var: hw_params variable to apply the list constraint
1264 * @l: list
1265 *
1266 * Apply the list of constraints to an interval parameter.
1267 *
1268 * Return: Zero if successful, or a negative error code on failure.
1269 */
1270int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1271 unsigned int cond,
1272 snd_pcm_hw_param_t var,
1273 const struct snd_pcm_hw_constraint_list *l)
1274{
1275 return snd_pcm_hw_rule_add(runtime, cond, var,
1276 snd_pcm_hw_rule_list, (void *)l,
1277 var, -1);
1278}
1279EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1280
1281static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1282 struct snd_pcm_hw_rule *rule)
1283{
1284 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1285 return snd_interval_ranges(hw_param_interval(params, rule->var),
1286 r->count, r->ranges, r->mask);
1287}
1288
1289
1290/**
1291 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1292 * @runtime: PCM runtime instance
1293 * @cond: condition bits
1294 * @var: hw_params variable to apply the list of range constraints
1295 * @r: ranges
1296 *
1297 * Apply the list of range constraints to an interval parameter.
1298 *
1299 * Return: Zero if successful, or a negative error code on failure.
1300 */
1301int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1302 unsigned int cond,
1303 snd_pcm_hw_param_t var,
1304 const struct snd_pcm_hw_constraint_ranges *r)
1305{
1306 return snd_pcm_hw_rule_add(runtime, cond, var,
1307 snd_pcm_hw_rule_ranges, (void *)r,
1308 var, -1);
1309}
1310EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1311
1312static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1313 struct snd_pcm_hw_rule *rule)
1314{
1315 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1316 unsigned int num = 0, den = 0;
1317 int err;
1318 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1319 r->nrats, r->rats, &num, &den);
1320 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1321 params->rate_num = num;
1322 params->rate_den = den;
1323 }
1324 return err;
1325}
1326
1327/**
1328 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1329 * @runtime: PCM runtime instance
1330 * @cond: condition bits
1331 * @var: hw_params variable to apply the ratnums constraint
1332 * @r: struct snd_ratnums constriants
1333 *
1334 * Return: Zero if successful, or a negative error code on failure.
1335 */
1336int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1337 unsigned int cond,
1338 snd_pcm_hw_param_t var,
1339 const struct snd_pcm_hw_constraint_ratnums *r)
1340{
1341 return snd_pcm_hw_rule_add(runtime, cond, var,
1342 snd_pcm_hw_rule_ratnums, (void *)r,
1343 var, -1);
1344}
1345EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1346
1347static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1348 struct snd_pcm_hw_rule *rule)
1349{
1350 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1351 unsigned int num = 0, den = 0;
1352 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1353 r->nrats, r->rats, &num, &den);
1354 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1355 params->rate_num = num;
1356 params->rate_den = den;
1357 }
1358 return err;
1359}
1360
1361/**
1362 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1363 * @runtime: PCM runtime instance
1364 * @cond: condition bits
1365 * @var: hw_params variable to apply the ratdens constraint
1366 * @r: struct snd_ratdens constriants
1367 *
1368 * Return: Zero if successful, or a negative error code on failure.
1369 */
1370int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1371 unsigned int cond,
1372 snd_pcm_hw_param_t var,
1373 const struct snd_pcm_hw_constraint_ratdens *r)
1374{
1375 return snd_pcm_hw_rule_add(runtime, cond, var,
1376 snd_pcm_hw_rule_ratdens, (void *)r,
1377 var, -1);
1378}
1379EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1380
1381static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1382 struct snd_pcm_hw_rule *rule)
1383{
1384 unsigned int l = (unsigned long) rule->private;
1385 int width = l & 0xffff;
1386 unsigned int msbits = l >> 16;
1387 const struct snd_interval *i =
1388 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1389
1390 if (!snd_interval_single(i))
1391 return 0;
1392
1393 if ((snd_interval_value(i) == width) ||
1394 (width == 0 && snd_interval_value(i) > msbits))
1395 params->msbits = min_not_zero(params->msbits, msbits);
1396
1397 return 0;
1398}
1399
1400/**
1401 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1402 * @runtime: PCM runtime instance
1403 * @cond: condition bits
1404 * @width: sample bits width
1405 * @msbits: msbits width
1406 *
1407 * This constraint will set the number of most significant bits (msbits) if a
1408 * sample format with the specified width has been select. If width is set to 0
1409 * the msbits will be set for any sample format with a width larger than the
1410 * specified msbits.
1411 *
1412 * Return: Zero if successful, or a negative error code on failure.
1413 */
1414int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1415 unsigned int cond,
1416 unsigned int width,
1417 unsigned int msbits)
1418{
1419 unsigned long l = (msbits << 16) | width;
1420 return snd_pcm_hw_rule_add(runtime, cond, -1,
1421 snd_pcm_hw_rule_msbits,
1422 (void*) l,
1423 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1424}
1425EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1426
1427static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1428 struct snd_pcm_hw_rule *rule)
1429{
1430 unsigned long step = (unsigned long) rule->private;
1431 return snd_interval_step(hw_param_interval(params, rule->var), step);
1432}
1433
1434/**
1435 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1436 * @runtime: PCM runtime instance
1437 * @cond: condition bits
1438 * @var: hw_params variable to apply the step constraint
1439 * @step: step size
1440 *
1441 * Return: Zero if successful, or a negative error code on failure.
1442 */
1443int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1444 unsigned int cond,
1445 snd_pcm_hw_param_t var,
1446 unsigned long step)
1447{
1448 return snd_pcm_hw_rule_add(runtime, cond, var,
1449 snd_pcm_hw_rule_step, (void *) step,
1450 var, -1);
1451}
1452EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1453
1454static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1455{
1456 static unsigned int pow2_sizes[] = {
1457 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1458 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1459 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1460 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1461 };
1462 return snd_interval_list(hw_param_interval(params, rule->var),
1463 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1464}
1465
1466/**
1467 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1468 * @runtime: PCM runtime instance
1469 * @cond: condition bits
1470 * @var: hw_params variable to apply the power-of-2 constraint
1471 *
1472 * Return: Zero if successful, or a negative error code on failure.
1473 */
1474int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1475 unsigned int cond,
1476 snd_pcm_hw_param_t var)
1477{
1478 return snd_pcm_hw_rule_add(runtime, cond, var,
1479 snd_pcm_hw_rule_pow2, NULL,
1480 var, -1);
1481}
1482EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1483
1484static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1485 struct snd_pcm_hw_rule *rule)
1486{
1487 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1488 struct snd_interval *rate;
1489
1490 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1491 return snd_interval_list(rate, 1, &base_rate, 0);
1492}
1493
1494/**
1495 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1496 * @runtime: PCM runtime instance
1497 * @base_rate: the rate at which the hardware does not resample
1498 *
1499 * Return: Zero if successful, or a negative error code on failure.
1500 */
1501int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1502 unsigned int base_rate)
1503{
1504 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1505 SNDRV_PCM_HW_PARAM_RATE,
1506 snd_pcm_hw_rule_noresample_func,
1507 (void *)(uintptr_t)base_rate,
1508 SNDRV_PCM_HW_PARAM_RATE, -1);
1509}
1510EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1511
1512static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1513 snd_pcm_hw_param_t var)
1514{
1515 if (hw_is_mask(var)) {
1516 snd_mask_any(hw_param_mask(params, var));
1517 params->cmask |= 1 << var;
1518 params->rmask |= 1 << var;
1519 return;
1520 }
1521 if (hw_is_interval(var)) {
1522 snd_interval_any(hw_param_interval(params, var));
1523 params->cmask |= 1 << var;
1524 params->rmask |= 1 << var;
1525 return;
1526 }
1527 snd_BUG();
1528}
1529
1530void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1531{
1532 unsigned int k;
1533 memset(params, 0, sizeof(*params));
1534 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1535 _snd_pcm_hw_param_any(params, k);
1536 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1537 _snd_pcm_hw_param_any(params, k);
1538 params->info = ~0U;
1539}
1540EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1541
1542/**
1543 * snd_pcm_hw_param_value - return @params field @var value
1544 * @params: the hw_params instance
1545 * @var: parameter to retrieve
1546 * @dir: pointer to the direction (-1,0,1) or %NULL
1547 *
1548 * Return: The value for field @var if it's fixed in configuration space
1549 * defined by @params. -%EINVAL otherwise.
1550 */
1551int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1552 snd_pcm_hw_param_t var, int *dir)
1553{
1554 if (hw_is_mask(var)) {
1555 const struct snd_mask *mask = hw_param_mask_c(params, var);
1556 if (!snd_mask_single(mask))
1557 return -EINVAL;
1558 if (dir)
1559 *dir = 0;
1560 return snd_mask_value(mask);
1561 }
1562 if (hw_is_interval(var)) {
1563 const struct snd_interval *i = hw_param_interval_c(params, var);
1564 if (!snd_interval_single(i))
1565 return -EINVAL;
1566 if (dir)
1567 *dir = i->openmin;
1568 return snd_interval_value(i);
1569 }
1570 return -EINVAL;
1571}
1572EXPORT_SYMBOL(snd_pcm_hw_param_value);
1573
1574void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1575 snd_pcm_hw_param_t var)
1576{
1577 if (hw_is_mask(var)) {
1578 snd_mask_none(hw_param_mask(params, var));
1579 params->cmask |= 1 << var;
1580 params->rmask |= 1 << var;
1581 } else if (hw_is_interval(var)) {
1582 snd_interval_none(hw_param_interval(params, var));
1583 params->cmask |= 1 << var;
1584 params->rmask |= 1 << var;
1585 } else {
1586 snd_BUG();
1587 }
1588}
1589EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1590
1591static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1592 snd_pcm_hw_param_t var)
1593{
1594 int changed;
1595 if (hw_is_mask(var))
1596 changed = snd_mask_refine_first(hw_param_mask(params, var));
1597 else if (hw_is_interval(var))
1598 changed = snd_interval_refine_first(hw_param_interval(params, var));
1599 else
1600 return -EINVAL;
1601 if (changed > 0) {
1602 params->cmask |= 1 << var;
1603 params->rmask |= 1 << var;
1604 }
1605 return changed;
1606}
1607
1608
1609/**
1610 * snd_pcm_hw_param_first - refine config space and return minimum value
1611 * @pcm: PCM instance
1612 * @params: the hw_params instance
1613 * @var: parameter to retrieve
1614 * @dir: pointer to the direction (-1,0,1) or %NULL
1615 *
1616 * Inside configuration space defined by @params remove from @var all
1617 * values > minimum. Reduce configuration space accordingly.
1618 *
1619 * Return: The minimum, or a negative error code on failure.
1620 */
1621int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1622 struct snd_pcm_hw_params *params,
1623 snd_pcm_hw_param_t var, int *dir)
1624{
1625 int changed = _snd_pcm_hw_param_first(params, var);
1626 if (changed < 0)
1627 return changed;
1628 if (params->rmask) {
1629 int err = snd_pcm_hw_refine(pcm, params);
1630 if (err < 0)
1631 return err;
1632 }
1633 return snd_pcm_hw_param_value(params, var, dir);
1634}
1635EXPORT_SYMBOL(snd_pcm_hw_param_first);
1636
1637static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1638 snd_pcm_hw_param_t var)
1639{
1640 int changed;
1641 if (hw_is_mask(var))
1642 changed = snd_mask_refine_last(hw_param_mask(params, var));
1643 else if (hw_is_interval(var))
1644 changed = snd_interval_refine_last(hw_param_interval(params, var));
1645 else
1646 return -EINVAL;
1647 if (changed > 0) {
1648 params->cmask |= 1 << var;
1649 params->rmask |= 1 << var;
1650 }
1651 return changed;
1652}
1653
1654
1655/**
1656 * snd_pcm_hw_param_last - refine config space and return maximum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1661 *
1662 * Inside configuration space defined by @params remove from @var all
1663 * values < maximum. Reduce configuration space accordingly.
1664 *
1665 * Return: The maximum, or a negative error code on failure.
1666 */
1667int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1668 struct snd_pcm_hw_params *params,
1669 snd_pcm_hw_param_t var, int *dir)
1670{
1671 int changed = _snd_pcm_hw_param_last(params, var);
1672 if (changed < 0)
1673 return changed;
1674 if (params->rmask) {
1675 int err = snd_pcm_hw_refine(pcm, params);
1676 if (err < 0)
1677 return err;
1678 }
1679 return snd_pcm_hw_param_value(params, var, dir);
1680}
1681EXPORT_SYMBOL(snd_pcm_hw_param_last);
1682
1683static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1684 void *arg)
1685{
1686 struct snd_pcm_runtime *runtime = substream->runtime;
1687 unsigned long flags;
1688 snd_pcm_stream_lock_irqsave(substream, flags);
1689 if (snd_pcm_running(substream) &&
1690 snd_pcm_update_hw_ptr(substream) >= 0)
1691 runtime->status->hw_ptr %= runtime->buffer_size;
1692 else {
1693 runtime->status->hw_ptr = 0;
1694 runtime->hw_ptr_wrap = 0;
1695 }
1696 snd_pcm_stream_unlock_irqrestore(substream, flags);
1697 return 0;
1698}
1699
1700static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1701 void *arg)
1702{
1703 struct snd_pcm_channel_info *info = arg;
1704 struct snd_pcm_runtime *runtime = substream->runtime;
1705 int width;
1706 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1707 info->offset = -1;
1708 return 0;
1709 }
1710 width = snd_pcm_format_physical_width(runtime->format);
1711 if (width < 0)
1712 return width;
1713 info->offset = 0;
1714 switch (runtime->access) {
1715 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1716 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1717 info->first = info->channel * width;
1718 info->step = runtime->channels * width;
1719 break;
1720 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1721 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1722 {
1723 size_t size = runtime->dma_bytes / runtime->channels;
1724 info->first = info->channel * size * 8;
1725 info->step = width;
1726 break;
1727 }
1728 default:
1729 snd_BUG();
1730 break;
1731 }
1732 return 0;
1733}
1734
1735static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1736 void *arg)
1737{
1738 struct snd_pcm_hw_params *params = arg;
1739 snd_pcm_format_t format;
1740 int channels;
1741 ssize_t frame_size;
1742
1743 params->fifo_size = substream->runtime->hw.fifo_size;
1744 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1745 format = params_format(params);
1746 channels = params_channels(params);
1747 frame_size = snd_pcm_format_size(format, channels);
1748 if (frame_size > 0)
1749 params->fifo_size /= (unsigned)frame_size;
1750 }
1751 return 0;
1752}
1753
1754/**
1755 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1756 * @substream: the pcm substream instance
1757 * @cmd: ioctl command
1758 * @arg: ioctl argument
1759 *
1760 * Processes the generic ioctl commands for PCM.
1761 * Can be passed as the ioctl callback for PCM ops.
1762 *
1763 * Return: Zero if successful, or a negative error code on failure.
1764 */
1765int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1766 unsigned int cmd, void *arg)
1767{
1768 switch (cmd) {
1769 case SNDRV_PCM_IOCTL1_RESET:
1770 return snd_pcm_lib_ioctl_reset(substream, arg);
1771 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1772 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1773 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1774 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1775 }
1776 return -ENXIO;
1777}
1778EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1779
1780/**
1781 * snd_pcm_period_elapsed - update the pcm status for the next period
1782 * @substream: the pcm substream instance
1783 *
1784 * This function is called from the interrupt handler when the
1785 * PCM has processed the period size. It will update the current
1786 * pointer, wake up sleepers, etc.
1787 *
1788 * Even if more than one periods have elapsed since the last call, you
1789 * have to call this only once.
1790 */
1791void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1792{
1793 struct snd_pcm_runtime *runtime;
1794 unsigned long flags;
1795
1796 if (PCM_RUNTIME_CHECK(substream))
1797 return;
1798 runtime = substream->runtime;
1799
1800 snd_pcm_stream_lock_irqsave(substream, flags);
1801 if (!snd_pcm_running(substream) ||
1802 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1803 goto _end;
1804
1805#ifdef CONFIG_SND_PCM_TIMER
1806 if (substream->timer_running)
1807 snd_timer_interrupt(substream->timer, 1);
1808#endif
1809 _end:
1810 kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1811 snd_pcm_stream_unlock_irqrestore(substream, flags);
1812}
1813EXPORT_SYMBOL(snd_pcm_period_elapsed);
1814
1815/*
1816 * Wait until avail_min data becomes available
1817 * Returns a negative error code if any error occurs during operation.
1818 * The available space is stored on availp. When err = 0 and avail = 0
1819 * on the capture stream, it indicates the stream is in DRAINING state.
1820 */
1821static int wait_for_avail(struct snd_pcm_substream *substream,
1822 snd_pcm_uframes_t *availp)
1823{
1824 struct snd_pcm_runtime *runtime = substream->runtime;
1825 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1826 wait_queue_entry_t wait;
1827 int err = 0;
1828 snd_pcm_uframes_t avail = 0;
1829 long wait_time, tout;
1830
1831 init_waitqueue_entry(&wait, current);
1832 set_current_state(TASK_INTERRUPTIBLE);
1833 add_wait_queue(&runtime->tsleep, &wait);
1834
1835 if (runtime->no_period_wakeup)
1836 wait_time = MAX_SCHEDULE_TIMEOUT;
1837 else {
1838 wait_time = 10;
1839 if (runtime->rate) {
1840 long t = runtime->period_size * 2 / runtime->rate;
1841 wait_time = max(t, wait_time);
1842 }
1843 wait_time = msecs_to_jiffies(wait_time * 1000);
1844 }
1845
1846 for (;;) {
1847 if (signal_pending(current)) {
1848 err = -ERESTARTSYS;
1849 break;
1850 }
1851
1852 /*
1853 * We need to check if space became available already
1854 * (and thus the wakeup happened already) first to close
1855 * the race of space already having become available.
1856 * This check must happen after been added to the waitqueue
1857 * and having current state be INTERRUPTIBLE.
1858 */
1859 if (is_playback)
1860 avail = snd_pcm_playback_avail(runtime);
1861 else
1862 avail = snd_pcm_capture_avail(runtime);
1863 if (avail >= runtime->twake)
1864 break;
1865 snd_pcm_stream_unlock_irq(substream);
1866
1867 tout = schedule_timeout(wait_time);
1868
1869 snd_pcm_stream_lock_irq(substream);
1870 set_current_state(TASK_INTERRUPTIBLE);
1871 switch (runtime->status->state) {
1872 case SNDRV_PCM_STATE_SUSPENDED:
1873 err = -ESTRPIPE;
1874 goto _endloop;
1875 case SNDRV_PCM_STATE_XRUN:
1876 err = -EPIPE;
1877 goto _endloop;
1878 case SNDRV_PCM_STATE_DRAINING:
1879 if (is_playback)
1880 err = -EPIPE;
1881 else
1882 avail = 0; /* indicate draining */
1883 goto _endloop;
1884 case SNDRV_PCM_STATE_OPEN:
1885 case SNDRV_PCM_STATE_SETUP:
1886 case SNDRV_PCM_STATE_DISCONNECTED:
1887 err = -EBADFD;
1888 goto _endloop;
1889 case SNDRV_PCM_STATE_PAUSED:
1890 continue;
1891 }
1892 if (!tout) {
1893 pcm_dbg(substream->pcm,
1894 "%s write error (DMA or IRQ trouble?)\n",
1895 is_playback ? "playback" : "capture");
1896 err = -EIO;
1897 break;
1898 }
1899 }
1900 _endloop:
1901 set_current_state(TASK_RUNNING);
1902 remove_wait_queue(&runtime->tsleep, &wait);
1903 *availp = avail;
1904 return err;
1905}
1906
1907typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1908 int channel, unsigned long hwoff,
1909 void *buf, unsigned long bytes);
1910
1911typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1912 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1913
1914/* calculate the target DMA-buffer position to be written/read */
1915static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1916 int channel, unsigned long hwoff)
1917{
1918 return runtime->dma_area + hwoff +
1919 channel * (runtime->dma_bytes / runtime->channels);
1920}
1921
1922/* default copy_user ops for write; used for both interleaved and non- modes */
1923static int default_write_copy(struct snd_pcm_substream *substream,
1924 int channel, unsigned long hwoff,
1925 void *buf, unsigned long bytes)
1926{
1927 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1928 (void __user *)buf, bytes))
1929 return -EFAULT;
1930 return 0;
1931}
1932
1933/* default copy_kernel ops for write */
1934static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1935 int channel, unsigned long hwoff,
1936 void *buf, unsigned long bytes)
1937{
1938 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1939 return 0;
1940}
1941
1942/* fill silence instead of copy data; called as a transfer helper
1943 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1944 * a NULL buffer is passed
1945 */
1946static int fill_silence(struct snd_pcm_substream *substream, int channel,
1947 unsigned long hwoff, void *buf, unsigned long bytes)
1948{
1949 struct snd_pcm_runtime *runtime = substream->runtime;
1950
1951 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1952 return 0;
1953 if (substream->ops->fill_silence)
1954 return substream->ops->fill_silence(substream, channel,
1955 hwoff, bytes);
1956
1957 snd_pcm_format_set_silence(runtime->format,
1958 get_dma_ptr(runtime, channel, hwoff),
1959 bytes_to_samples(runtime, bytes));
1960 return 0;
1961}
1962
1963/* default copy_user ops for read; used for both interleaved and non- modes */
1964static int default_read_copy(struct snd_pcm_substream *substream,
1965 int channel, unsigned long hwoff,
1966 void *buf, unsigned long bytes)
1967{
1968 if (copy_to_user((void __user *)buf,
1969 get_dma_ptr(substream->runtime, channel, hwoff),
1970 bytes))
1971 return -EFAULT;
1972 return 0;
1973}
1974
1975/* default copy_kernel ops for read */
1976static int default_read_copy_kernel(struct snd_pcm_substream *substream,
1977 int channel, unsigned long hwoff,
1978 void *buf, unsigned long bytes)
1979{
1980 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
1981 return 0;
1982}
1983
1984/* call transfer function with the converted pointers and sizes;
1985 * for interleaved mode, it's one shot for all samples
1986 */
1987static int interleaved_copy(struct snd_pcm_substream *substream,
1988 snd_pcm_uframes_t hwoff, void *data,
1989 snd_pcm_uframes_t off,
1990 snd_pcm_uframes_t frames,
1991 pcm_transfer_f transfer)
1992{
1993 struct snd_pcm_runtime *runtime = substream->runtime;
1994
1995 /* convert to bytes */
1996 hwoff = frames_to_bytes(runtime, hwoff);
1997 off = frames_to_bytes(runtime, off);
1998 frames = frames_to_bytes(runtime, frames);
1999 return transfer(substream, 0, hwoff, data + off, frames);
2000}
2001
2002/* call transfer function with the converted pointers and sizes for each
2003 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2004 */
2005static int noninterleaved_copy(struct snd_pcm_substream *substream,
2006 snd_pcm_uframes_t hwoff, void *data,
2007 snd_pcm_uframes_t off,
2008 snd_pcm_uframes_t frames,
2009 pcm_transfer_f transfer)
2010{
2011 struct snd_pcm_runtime *runtime = substream->runtime;
2012 int channels = runtime->channels;
2013 void **bufs = data;
2014 int c, err;
2015
2016 /* convert to bytes; note that it's not frames_to_bytes() here.
2017 * in non-interleaved mode, we copy for each channel, thus
2018 * each copy is n_samples bytes x channels = whole frames.
2019 */
2020 off = samples_to_bytes(runtime, off);
2021 frames = samples_to_bytes(runtime, frames);
2022 hwoff = samples_to_bytes(runtime, hwoff);
2023 for (c = 0; c < channels; ++c, ++bufs) {
2024 if (!data || !*bufs)
2025 err = fill_silence(substream, c, hwoff, NULL, frames);
2026 else
2027 err = transfer(substream, c, hwoff, *bufs + off,
2028 frames);
2029 if (err < 0)
2030 return err;
2031 }
2032 return 0;
2033}
2034
2035/* fill silence on the given buffer position;
2036 * called from snd_pcm_playback_silence()
2037 */
2038static int fill_silence_frames(struct snd_pcm_substream *substream,
2039 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2040{
2041 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2042 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2043 return interleaved_copy(substream, off, NULL, 0, frames,
2044 fill_silence);
2045 else
2046 return noninterleaved_copy(substream, off, NULL, 0, frames,
2047 fill_silence);
2048}
2049
2050/* sanity-check for read/write methods */
2051static int pcm_sanity_check(struct snd_pcm_substream *substream)
2052{
2053 struct snd_pcm_runtime *runtime;
2054 if (PCM_RUNTIME_CHECK(substream))
2055 return -ENXIO;
2056 runtime = substream->runtime;
2057 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2058 return -EINVAL;
2059 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2060 return -EBADFD;
2061 return 0;
2062}
2063
2064static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2065{
2066 switch (runtime->status->state) {
2067 case SNDRV_PCM_STATE_PREPARED:
2068 case SNDRV_PCM_STATE_RUNNING:
2069 case SNDRV_PCM_STATE_PAUSED:
2070 return 0;
2071 case SNDRV_PCM_STATE_XRUN:
2072 return -EPIPE;
2073 case SNDRV_PCM_STATE_SUSPENDED:
2074 return -ESTRPIPE;
2075 default:
2076 return -EBADFD;
2077 }
2078}
2079
2080/* update to the given appl_ptr and call ack callback if needed;
2081 * when an error is returned, take back to the original value
2082 */
2083int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2084 snd_pcm_uframes_t appl_ptr)
2085{
2086 struct snd_pcm_runtime *runtime = substream->runtime;
2087 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2088 int ret;
2089
2090 if (old_appl_ptr == appl_ptr)
2091 return 0;
2092
2093 runtime->control->appl_ptr = appl_ptr;
2094 if (substream->ops->ack) {
2095 ret = substream->ops->ack(substream);
2096 if (ret < 0) {
2097 runtime->control->appl_ptr = old_appl_ptr;
2098 return ret;
2099 }
2100 }
2101
2102 trace_applptr(substream, old_appl_ptr, appl_ptr);
2103
2104 return 0;
2105}
2106
2107/* the common loop for read/write data */
2108snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2109 void *data, bool interleaved,
2110 snd_pcm_uframes_t size, bool in_kernel)
2111{
2112 struct snd_pcm_runtime *runtime = substream->runtime;
2113 snd_pcm_uframes_t xfer = 0;
2114 snd_pcm_uframes_t offset = 0;
2115 snd_pcm_uframes_t avail;
2116 pcm_copy_f writer;
2117 pcm_transfer_f transfer;
2118 bool nonblock;
2119 bool is_playback;
2120 int err;
2121
2122 err = pcm_sanity_check(substream);
2123 if (err < 0)
2124 return err;
2125
2126 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2127 if (interleaved) {
2128 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2129 runtime->channels > 1)
2130 return -EINVAL;
2131 writer = interleaved_copy;
2132 } else {
2133 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2134 return -EINVAL;
2135 writer = noninterleaved_copy;
2136 }
2137
2138 if (!data) {
2139 if (is_playback)
2140 transfer = fill_silence;
2141 else
2142 return -EINVAL;
2143 } else if (in_kernel) {
2144 if (substream->ops->copy_kernel)
2145 transfer = substream->ops->copy_kernel;
2146 else
2147 transfer = is_playback ?
2148 default_write_copy_kernel : default_read_copy_kernel;
2149 } else {
2150 if (substream->ops->copy_user)
2151 transfer = (pcm_transfer_f)substream->ops->copy_user;
2152 else
2153 transfer = is_playback ?
2154 default_write_copy : default_read_copy;
2155 }
2156
2157 if (size == 0)
2158 return 0;
2159
2160 nonblock = !!(substream->f_flags & O_NONBLOCK);
2161
2162 snd_pcm_stream_lock_irq(substream);
2163 err = pcm_accessible_state(runtime);
2164 if (err < 0)
2165 goto _end_unlock;
2166
2167 if (!is_playback &&
2168 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2169 size >= runtime->start_threshold) {
2170 err = snd_pcm_start(substream);
2171 if (err < 0)
2172 goto _end_unlock;
2173 }
2174
2175 runtime->twake = runtime->control->avail_min ? : 1;
2176 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2177 snd_pcm_update_hw_ptr(substream);
2178 if (is_playback)
2179 avail = snd_pcm_playback_avail(runtime);
2180 else
2181 avail = snd_pcm_capture_avail(runtime);
2182 while (size > 0) {
2183 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2184 snd_pcm_uframes_t cont;
2185 if (!avail) {
2186 if (!is_playback &&
2187 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2188 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2189 goto _end_unlock;
2190 }
2191 if (nonblock) {
2192 err = -EAGAIN;
2193 goto _end_unlock;
2194 }
2195 runtime->twake = min_t(snd_pcm_uframes_t, size,
2196 runtime->control->avail_min ? : 1);
2197 err = wait_for_avail(substream, &avail);
2198 if (err < 0)
2199 goto _end_unlock;
2200 if (!avail)
2201 continue; /* draining */
2202 }
2203 frames = size > avail ? avail : size;
2204 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2205 appl_ofs = appl_ptr % runtime->buffer_size;
2206 cont = runtime->buffer_size - appl_ofs;
2207 if (frames > cont)
2208 frames = cont;
2209 if (snd_BUG_ON(!frames)) {
2210 runtime->twake = 0;
2211 snd_pcm_stream_unlock_irq(substream);
2212 return -EINVAL;
2213 }
2214 snd_pcm_stream_unlock_irq(substream);
2215 err = writer(substream, appl_ofs, data, offset, frames,
2216 transfer);
2217 snd_pcm_stream_lock_irq(substream);
2218 if (err < 0)
2219 goto _end_unlock;
2220 err = pcm_accessible_state(runtime);
2221 if (err < 0)
2222 goto _end_unlock;
2223 appl_ptr += frames;
2224 if (appl_ptr >= runtime->boundary)
2225 appl_ptr -= runtime->boundary;
2226 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2227 if (err < 0)
2228 goto _end_unlock;
2229
2230 offset += frames;
2231 size -= frames;
2232 xfer += frames;
2233 avail -= frames;
2234 if (is_playback &&
2235 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2236 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2237 err = snd_pcm_start(substream);
2238 if (err < 0)
2239 goto _end_unlock;
2240 }
2241 }
2242 _end_unlock:
2243 runtime->twake = 0;
2244 if (xfer > 0 && err >= 0)
2245 snd_pcm_update_state(substream, runtime);
2246 snd_pcm_stream_unlock_irq(substream);
2247 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2248}
2249EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2250
2251/*
2252 * standard channel mapping helpers
2253 */
2254
2255/* default channel maps for multi-channel playbacks, up to 8 channels */
2256const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2257 { .channels = 1,
2258 .map = { SNDRV_CHMAP_MONO } },
2259 { .channels = 2,
2260 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2261 { .channels = 4,
2262 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2263 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2264 { .channels = 6,
2265 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2266 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2267 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2268 { .channels = 8,
2269 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2270 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2271 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2272 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2273 { }
2274};
2275EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2276
2277/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2278const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2279 { .channels = 1,
2280 .map = { SNDRV_CHMAP_MONO } },
2281 { .channels = 2,
2282 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2283 { .channels = 4,
2284 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2285 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2286 { .channels = 6,
2287 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2288 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2289 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2290 { .channels = 8,
2291 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2292 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2293 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2294 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2295 { }
2296};
2297EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2298
2299static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2300{
2301 if (ch > info->max_channels)
2302 return false;
2303 return !info->channel_mask || (info->channel_mask & (1U << ch));
2304}
2305
2306static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2307 struct snd_ctl_elem_info *uinfo)
2308{
2309 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2310
2311 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2312 uinfo->count = 0;
2313 uinfo->count = info->max_channels;
2314 uinfo->value.integer.min = 0;
2315 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2316 return 0;
2317}
2318
2319/* get callback for channel map ctl element
2320 * stores the channel position firstly matching with the current channels
2321 */
2322static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2323 struct snd_ctl_elem_value *ucontrol)
2324{
2325 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2326 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2327 struct snd_pcm_substream *substream;
2328 const struct snd_pcm_chmap_elem *map;
2329
2330 if (!info->chmap)
2331 return -EINVAL;
2332 substream = snd_pcm_chmap_substream(info, idx);
2333 if (!substream)
2334 return -ENODEV;
2335 memset(ucontrol->value.integer.value, 0,
2336 sizeof(ucontrol->value.integer.value));
2337 if (!substream->runtime)
2338 return 0; /* no channels set */
2339 for (map = info->chmap; map->channels; map++) {
2340 int i;
2341 if (map->channels == substream->runtime->channels &&
2342 valid_chmap_channels(info, map->channels)) {
2343 for (i = 0; i < map->channels; i++)
2344 ucontrol->value.integer.value[i] = map->map[i];
2345 return 0;
2346 }
2347 }
2348 return -EINVAL;
2349}
2350
2351/* tlv callback for channel map ctl element
2352 * expands the pre-defined channel maps in a form of TLV
2353 */
2354static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2355 unsigned int size, unsigned int __user *tlv)
2356{
2357 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2358 const struct snd_pcm_chmap_elem *map;
2359 unsigned int __user *dst;
2360 int c, count = 0;
2361
2362 if (!info->chmap)
2363 return -EINVAL;
2364 if (size < 8)
2365 return -ENOMEM;
2366 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2367 return -EFAULT;
2368 size -= 8;
2369 dst = tlv + 2;
2370 for (map = info->chmap; map->channels; map++) {
2371 int chs_bytes = map->channels * 4;
2372 if (!valid_chmap_channels(info, map->channels))
2373 continue;
2374 if (size < 8)
2375 return -ENOMEM;
2376 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2377 put_user(chs_bytes, dst + 1))
2378 return -EFAULT;
2379 dst += 2;
2380 size -= 8;
2381 count += 8;
2382 if (size < chs_bytes)
2383 return -ENOMEM;
2384 size -= chs_bytes;
2385 count += chs_bytes;
2386 for (c = 0; c < map->channels; c++) {
2387 if (put_user(map->map[c], dst))
2388 return -EFAULT;
2389 dst++;
2390 }
2391 }
2392 if (put_user(count, tlv + 1))
2393 return -EFAULT;
2394 return 0;
2395}
2396
2397static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2398{
2399 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2400 info->pcm->streams[info->stream].chmap_kctl = NULL;
2401 kfree(info);
2402}
2403
2404/**
2405 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2406 * @pcm: the assigned PCM instance
2407 * @stream: stream direction
2408 * @chmap: channel map elements (for query)
2409 * @max_channels: the max number of channels for the stream
2410 * @private_value: the value passed to each kcontrol's private_value field
2411 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2412 *
2413 * Create channel-mapping control elements assigned to the given PCM stream(s).
2414 * Return: Zero if successful, or a negative error value.
2415 */
2416int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2417 const struct snd_pcm_chmap_elem *chmap,
2418 int max_channels,
2419 unsigned long private_value,
2420 struct snd_pcm_chmap **info_ret)
2421{
2422 struct snd_pcm_chmap *info;
2423 struct snd_kcontrol_new knew = {
2424 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2425 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2426 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2427 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2428 .info = pcm_chmap_ctl_info,
2429 .get = pcm_chmap_ctl_get,
2430 .tlv.c = pcm_chmap_ctl_tlv,
2431 };
2432 int err;
2433
2434 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2435 return -EBUSY;
2436 info = kzalloc(sizeof(*info), GFP_KERNEL);
2437 if (!info)
2438 return -ENOMEM;
2439 info->pcm = pcm;
2440 info->stream = stream;
2441 info->chmap = chmap;
2442 info->max_channels = max_channels;
2443 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2444 knew.name = "Playback Channel Map";
2445 else
2446 knew.name = "Capture Channel Map";
2447 knew.device = pcm->device;
2448 knew.count = pcm->streams[stream].substream_count;
2449 knew.private_value = private_value;
2450 info->kctl = snd_ctl_new1(&knew, info);
2451 if (!info->kctl) {
2452 kfree(info);
2453 return -ENOMEM;
2454 }
2455 info->kctl->private_free = pcm_chmap_ctl_private_free;
2456 err = snd_ctl_add(pcm->card, info->kctl);
2457 if (err < 0)
2458 return err;
2459 pcm->streams[stream].chmap_kctl = info->kctl;
2460 if (info_ret)
2461 *info_ret = info;
2462 return 0;
2463}
2464EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);