Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36/*
37 * fill ring buffer with silence
38 * runtime->silence_start: starting pointer to silence area
39 * runtime->silence_filled: size filled with silence
40 * runtime->silence_threshold: threshold from application
41 * runtime->silence_size: maximal size from application
42 *
43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
44 */
45void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
46{
47 struct snd_pcm_runtime *runtime = substream->runtime;
48 snd_pcm_uframes_t frames, ofs, transfer;
49 int err;
50
51 if (runtime->silence_size < runtime->boundary) {
52 snd_pcm_sframes_t noise_dist, n;
53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
54 if (runtime->silence_start != appl_ptr) {
55 n = appl_ptr - runtime->silence_start;
56 if (n < 0)
57 n += runtime->boundary;
58 if ((snd_pcm_uframes_t)n < runtime->silence_filled)
59 runtime->silence_filled -= n;
60 else
61 runtime->silence_filled = 0;
62 runtime->silence_start = appl_ptr;
63 }
64 if (runtime->silence_filled >= runtime->buffer_size)
65 return;
66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
68 return;
69 frames = runtime->silence_threshold - noise_dist;
70 if (frames > runtime->silence_size)
71 frames = runtime->silence_size;
72 } else {
73 if (new_hw_ptr == ULONG_MAX) { /* initialization */
74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
75 if (avail > runtime->buffer_size)
76 avail = runtime->buffer_size;
77 runtime->silence_filled = avail > 0 ? avail : 0;
78 runtime->silence_start = (runtime->status->hw_ptr +
79 runtime->silence_filled) %
80 runtime->boundary;
81 } else {
82 ofs = runtime->status->hw_ptr;
83 frames = new_hw_ptr - ofs;
84 if ((snd_pcm_sframes_t)frames < 0)
85 frames += runtime->boundary;
86 runtime->silence_filled -= frames;
87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
88 runtime->silence_filled = 0;
89 runtime->silence_start = new_hw_ptr;
90 } else {
91 runtime->silence_start = ofs;
92 }
93 }
94 frames = runtime->buffer_size - runtime->silence_filled;
95 }
96 if (snd_BUG_ON(frames > runtime->buffer_size))
97 return;
98 if (frames == 0)
99 return;
100 ofs = runtime->silence_start % runtime->buffer_size;
101 while (frames > 0) {
102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
103 err = fill_silence_frames(substream, ofs, transfer);
104 snd_BUG_ON(err < 0);
105 runtime->silence_filled += transfer;
106 frames -= transfer;
107 ofs = 0;
108 }
109}
110
111#ifdef CONFIG_SND_DEBUG
112void snd_pcm_debug_name(struct snd_pcm_substream *substream,
113 char *name, size_t len)
114{
115 snprintf(name, len, "pcmC%dD%d%c:%d",
116 substream->pcm->card->number,
117 substream->pcm->device,
118 substream->stream ? 'c' : 'p',
119 substream->number);
120}
121EXPORT_SYMBOL(snd_pcm_debug_name);
122#endif
123
124#define XRUN_DEBUG_BASIC (1<<0)
125#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
126#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
127
128#ifdef CONFIG_SND_PCM_XRUN_DEBUG
129
130#define xrun_debug(substream, mask) \
131 ((substream)->pstr->xrun_debug & (mask))
132#else
133#define xrun_debug(substream, mask) 0
134#endif
135
136#define dump_stack_on_xrun(substream) do { \
137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
138 dump_stack(); \
139 } while (0)
140
141/* call with stream lock held */
142void __snd_pcm_xrun(struct snd_pcm_substream *substream)
143{
144 struct snd_pcm_runtime *runtime = substream->runtime;
145
146 trace_xrun(substream);
147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
148 struct timespec64 tstamp;
149
150 snd_pcm_gettime(runtime, &tstamp);
151 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
152 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
153 }
154 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
155 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
156 char name[16];
157 snd_pcm_debug_name(substream, name, sizeof(name));
158 pcm_warn(substream->pcm, "XRUN: %s\n", name);
159 dump_stack_on_xrun(substream);
160 }
161}
162
163#ifdef CONFIG_SND_PCM_XRUN_DEBUG
164#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
165 do { \
166 trace_hw_ptr_error(substream, reason); \
167 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
168 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
169 (in_interrupt) ? 'Q' : 'P', ##args); \
170 dump_stack_on_xrun(substream); \
171 } \
172 } while (0)
173
174#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
175
176#define hw_ptr_error(substream, fmt, args...) do { } while (0)
177
178#endif
179
180int snd_pcm_update_state(struct snd_pcm_substream *substream,
181 struct snd_pcm_runtime *runtime)
182{
183 snd_pcm_uframes_t avail;
184
185 avail = snd_pcm_avail(substream);
186 if (avail > runtime->avail_max)
187 runtime->avail_max = avail;
188 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
189 if (avail >= runtime->buffer_size) {
190 snd_pcm_drain_done(substream);
191 return -EPIPE;
192 }
193 } else {
194 if (avail >= runtime->stop_threshold) {
195 __snd_pcm_xrun(substream);
196 return -EPIPE;
197 }
198 }
199 if (runtime->twake) {
200 if (avail >= runtime->twake)
201 wake_up(&runtime->tsleep);
202 } else if (avail >= runtime->control->avail_min)
203 wake_up(&runtime->sleep);
204 return 0;
205}
206
207static void update_audio_tstamp(struct snd_pcm_substream *substream,
208 struct timespec64 *curr_tstamp,
209 struct timespec64 *audio_tstamp)
210{
211 struct snd_pcm_runtime *runtime = substream->runtime;
212 u64 audio_frames, audio_nsecs;
213 struct timespec64 driver_tstamp;
214
215 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
216 return;
217
218 if (!(substream->ops->get_time_info) ||
219 (runtime->audio_tstamp_report.actual_type ==
220 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
221
222 /*
223 * provide audio timestamp derived from pointer position
224 * add delay only if requested
225 */
226
227 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
228
229 if (runtime->audio_tstamp_config.report_delay) {
230 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
231 audio_frames -= runtime->delay;
232 else
233 audio_frames += runtime->delay;
234 }
235 audio_nsecs = div_u64(audio_frames * 1000000000LL,
236 runtime->rate);
237 *audio_tstamp = ns_to_timespec64(audio_nsecs);
238 }
239
240 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
241 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
242 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
243 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
244 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
245 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
246 }
247
248
249 /*
250 * re-take a driver timestamp to let apps detect if the reference tstamp
251 * read by low-level hardware was provided with a delay
252 */
253 snd_pcm_gettime(substream->runtime, &driver_tstamp);
254 runtime->driver_tstamp = driver_tstamp;
255}
256
257static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
258 unsigned int in_interrupt)
259{
260 struct snd_pcm_runtime *runtime = substream->runtime;
261 snd_pcm_uframes_t pos;
262 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
263 snd_pcm_sframes_t hdelta, delta;
264 unsigned long jdelta;
265 unsigned long curr_jiffies;
266 struct timespec64 curr_tstamp;
267 struct timespec64 audio_tstamp;
268 int crossed_boundary = 0;
269
270 old_hw_ptr = runtime->status->hw_ptr;
271
272 /*
273 * group pointer, time and jiffies reads to allow for more
274 * accurate correlations/corrections.
275 * The values are stored at the end of this routine after
276 * corrections for hw_ptr position
277 */
278 pos = substream->ops->pointer(substream);
279 curr_jiffies = jiffies;
280 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
281 if ((substream->ops->get_time_info) &&
282 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
283 substream->ops->get_time_info(substream, &curr_tstamp,
284 &audio_tstamp,
285 &runtime->audio_tstamp_config,
286 &runtime->audio_tstamp_report);
287
288 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
289 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
290 snd_pcm_gettime(runtime, &curr_tstamp);
291 } else
292 snd_pcm_gettime(runtime, &curr_tstamp);
293 }
294
295 if (pos == SNDRV_PCM_POS_XRUN) {
296 __snd_pcm_xrun(substream);
297 return -EPIPE;
298 }
299 if (pos >= runtime->buffer_size) {
300 if (printk_ratelimit()) {
301 char name[16];
302 snd_pcm_debug_name(substream, name, sizeof(name));
303 pcm_err(substream->pcm,
304 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
305 name, pos, runtime->buffer_size,
306 runtime->period_size);
307 }
308 pos = 0;
309 }
310 pos -= pos % runtime->min_align;
311 trace_hwptr(substream, pos, in_interrupt);
312 hw_base = runtime->hw_ptr_base;
313 new_hw_ptr = hw_base + pos;
314 if (in_interrupt) {
315 /* we know that one period was processed */
316 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
317 delta = runtime->hw_ptr_interrupt + runtime->period_size;
318 if (delta > new_hw_ptr) {
319 /* check for double acknowledged interrupts */
320 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
321 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
322 hw_base += runtime->buffer_size;
323 if (hw_base >= runtime->boundary) {
324 hw_base = 0;
325 crossed_boundary++;
326 }
327 new_hw_ptr = hw_base + pos;
328 goto __delta;
329 }
330 }
331 }
332 /* new_hw_ptr might be lower than old_hw_ptr in case when */
333 /* pointer crosses the end of the ring buffer */
334 if (new_hw_ptr < old_hw_ptr) {
335 hw_base += runtime->buffer_size;
336 if (hw_base >= runtime->boundary) {
337 hw_base = 0;
338 crossed_boundary++;
339 }
340 new_hw_ptr = hw_base + pos;
341 }
342 __delta:
343 delta = new_hw_ptr - old_hw_ptr;
344 if (delta < 0)
345 delta += runtime->boundary;
346
347 if (runtime->no_period_wakeup) {
348 snd_pcm_sframes_t xrun_threshold;
349 /*
350 * Without regular period interrupts, we have to check
351 * the elapsed time to detect xruns.
352 */
353 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
354 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
355 goto no_delta_check;
356 hdelta = jdelta - delta * HZ / runtime->rate;
357 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
358 while (hdelta > xrun_threshold) {
359 delta += runtime->buffer_size;
360 hw_base += runtime->buffer_size;
361 if (hw_base >= runtime->boundary) {
362 hw_base = 0;
363 crossed_boundary++;
364 }
365 new_hw_ptr = hw_base + pos;
366 hdelta -= runtime->hw_ptr_buffer_jiffies;
367 }
368 goto no_delta_check;
369 }
370
371 /* something must be really wrong */
372 if (delta >= runtime->buffer_size + runtime->period_size) {
373 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
374 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
375 substream->stream, (long)pos,
376 (long)new_hw_ptr, (long)old_hw_ptr);
377 return 0;
378 }
379
380 /* Do jiffies check only in xrun_debug mode */
381 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
382 goto no_jiffies_check;
383
384 /* Skip the jiffies check for hardwares with BATCH flag.
385 * Such hardware usually just increases the position at each IRQ,
386 * thus it can't give any strange position.
387 */
388 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
389 goto no_jiffies_check;
390 hdelta = delta;
391 if (hdelta < runtime->delay)
392 goto no_jiffies_check;
393 hdelta -= runtime->delay;
394 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
395 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
396 delta = jdelta /
397 (((runtime->period_size * HZ) / runtime->rate)
398 + HZ/100);
399 /* move new_hw_ptr according jiffies not pos variable */
400 new_hw_ptr = old_hw_ptr;
401 hw_base = delta;
402 /* use loop to avoid checks for delta overflows */
403 /* the delta value is small or zero in most cases */
404 while (delta > 0) {
405 new_hw_ptr += runtime->period_size;
406 if (new_hw_ptr >= runtime->boundary) {
407 new_hw_ptr -= runtime->boundary;
408 crossed_boundary--;
409 }
410 delta--;
411 }
412 /* align hw_base to buffer_size */
413 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
414 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
415 (long)pos, (long)hdelta,
416 (long)runtime->period_size, jdelta,
417 ((hdelta * HZ) / runtime->rate), hw_base,
418 (unsigned long)old_hw_ptr,
419 (unsigned long)new_hw_ptr);
420 /* reset values to proper state */
421 delta = 0;
422 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
423 }
424 no_jiffies_check:
425 if (delta > runtime->period_size + runtime->period_size / 2) {
426 hw_ptr_error(substream, in_interrupt,
427 "Lost interrupts?",
428 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
429 substream->stream, (long)delta,
430 (long)new_hw_ptr,
431 (long)old_hw_ptr);
432 }
433
434 no_delta_check:
435 if (runtime->status->hw_ptr == new_hw_ptr) {
436 runtime->hw_ptr_jiffies = curr_jiffies;
437 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
438 return 0;
439 }
440
441 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
442 runtime->silence_size > 0)
443 snd_pcm_playback_silence(substream, new_hw_ptr);
444
445 if (in_interrupt) {
446 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
447 if (delta < 0)
448 delta += runtime->boundary;
449 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
450 runtime->hw_ptr_interrupt += delta;
451 if (runtime->hw_ptr_interrupt >= runtime->boundary)
452 runtime->hw_ptr_interrupt -= runtime->boundary;
453 }
454 runtime->hw_ptr_base = hw_base;
455 runtime->status->hw_ptr = new_hw_ptr;
456 runtime->hw_ptr_jiffies = curr_jiffies;
457 if (crossed_boundary) {
458 snd_BUG_ON(crossed_boundary != 1);
459 runtime->hw_ptr_wrap += runtime->boundary;
460 }
461
462 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
463
464 return snd_pcm_update_state(substream, runtime);
465}
466
467/* CAUTION: call it with irq disabled */
468int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
469{
470 return snd_pcm_update_hw_ptr0(substream, 0);
471}
472
473/**
474 * snd_pcm_set_ops - set the PCM operators
475 * @pcm: the pcm instance
476 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
477 * @ops: the operator table
478 *
479 * Sets the given PCM operators to the pcm instance.
480 */
481void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
482 const struct snd_pcm_ops *ops)
483{
484 struct snd_pcm_str *stream = &pcm->streams[direction];
485 struct snd_pcm_substream *substream;
486
487 for (substream = stream->substream; substream != NULL; substream = substream->next)
488 substream->ops = ops;
489}
490EXPORT_SYMBOL(snd_pcm_set_ops);
491
492/**
493 * snd_pcm_set_sync - set the PCM sync id
494 * @substream: the pcm substream
495 *
496 * Sets the PCM sync identifier for the card.
497 */
498void snd_pcm_set_sync(struct snd_pcm_substream *substream)
499{
500 struct snd_pcm_runtime *runtime = substream->runtime;
501
502 runtime->sync.id32[0] = substream->pcm->card->number;
503 runtime->sync.id32[1] = -1;
504 runtime->sync.id32[2] = -1;
505 runtime->sync.id32[3] = -1;
506}
507EXPORT_SYMBOL(snd_pcm_set_sync);
508
509/*
510 * Standard ioctl routine
511 */
512
513static inline unsigned int div32(unsigned int a, unsigned int b,
514 unsigned int *r)
515{
516 if (b == 0) {
517 *r = 0;
518 return UINT_MAX;
519 }
520 *r = a % b;
521 return a / b;
522}
523
524static inline unsigned int div_down(unsigned int a, unsigned int b)
525{
526 if (b == 0)
527 return UINT_MAX;
528 return a / b;
529}
530
531static inline unsigned int div_up(unsigned int a, unsigned int b)
532{
533 unsigned int r;
534 unsigned int q;
535 if (b == 0)
536 return UINT_MAX;
537 q = div32(a, b, &r);
538 if (r)
539 ++q;
540 return q;
541}
542
543static inline unsigned int mul(unsigned int a, unsigned int b)
544{
545 if (a == 0)
546 return 0;
547 if (div_down(UINT_MAX, a) < b)
548 return UINT_MAX;
549 return a * b;
550}
551
552static inline unsigned int muldiv32(unsigned int a, unsigned int b,
553 unsigned int c, unsigned int *r)
554{
555 u_int64_t n = (u_int64_t) a * b;
556 if (c == 0) {
557 *r = 0;
558 return UINT_MAX;
559 }
560 n = div_u64_rem(n, c, r);
561 if (n >= UINT_MAX) {
562 *r = 0;
563 return UINT_MAX;
564 }
565 return n;
566}
567
568/**
569 * snd_interval_refine - refine the interval value of configurator
570 * @i: the interval value to refine
571 * @v: the interval value to refer to
572 *
573 * Refines the interval value with the reference value.
574 * The interval is changed to the range satisfying both intervals.
575 * The interval status (min, max, integer, etc.) are evaluated.
576 *
577 * Return: Positive if the value is changed, zero if it's not changed, or a
578 * negative error code.
579 */
580int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
581{
582 int changed = 0;
583 if (snd_BUG_ON(snd_interval_empty(i)))
584 return -EINVAL;
585 if (i->min < v->min) {
586 i->min = v->min;
587 i->openmin = v->openmin;
588 changed = 1;
589 } else if (i->min == v->min && !i->openmin && v->openmin) {
590 i->openmin = 1;
591 changed = 1;
592 }
593 if (i->max > v->max) {
594 i->max = v->max;
595 i->openmax = v->openmax;
596 changed = 1;
597 } else if (i->max == v->max && !i->openmax && v->openmax) {
598 i->openmax = 1;
599 changed = 1;
600 }
601 if (!i->integer && v->integer) {
602 i->integer = 1;
603 changed = 1;
604 }
605 if (i->integer) {
606 if (i->openmin) {
607 i->min++;
608 i->openmin = 0;
609 }
610 if (i->openmax) {
611 i->max--;
612 i->openmax = 0;
613 }
614 } else if (!i->openmin && !i->openmax && i->min == i->max)
615 i->integer = 1;
616 if (snd_interval_checkempty(i)) {
617 snd_interval_none(i);
618 return -EINVAL;
619 }
620 return changed;
621}
622EXPORT_SYMBOL(snd_interval_refine);
623
624static int snd_interval_refine_first(struct snd_interval *i)
625{
626 const unsigned int last_max = i->max;
627
628 if (snd_BUG_ON(snd_interval_empty(i)))
629 return -EINVAL;
630 if (snd_interval_single(i))
631 return 0;
632 i->max = i->min;
633 if (i->openmin)
634 i->max++;
635 /* only exclude max value if also excluded before refine */
636 i->openmax = (i->openmax && i->max >= last_max);
637 return 1;
638}
639
640static int snd_interval_refine_last(struct snd_interval *i)
641{
642 const unsigned int last_min = i->min;
643
644 if (snd_BUG_ON(snd_interval_empty(i)))
645 return -EINVAL;
646 if (snd_interval_single(i))
647 return 0;
648 i->min = i->max;
649 if (i->openmax)
650 i->min--;
651 /* only exclude min value if also excluded before refine */
652 i->openmin = (i->openmin && i->min <= last_min);
653 return 1;
654}
655
656void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
657{
658 if (a->empty || b->empty) {
659 snd_interval_none(c);
660 return;
661 }
662 c->empty = 0;
663 c->min = mul(a->min, b->min);
664 c->openmin = (a->openmin || b->openmin);
665 c->max = mul(a->max, b->max);
666 c->openmax = (a->openmax || b->openmax);
667 c->integer = (a->integer && b->integer);
668}
669
670/**
671 * snd_interval_div - refine the interval value with division
672 * @a: dividend
673 * @b: divisor
674 * @c: quotient
675 *
676 * c = a / b
677 *
678 * Returns non-zero if the value is changed, zero if not changed.
679 */
680void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
681{
682 unsigned int r;
683 if (a->empty || b->empty) {
684 snd_interval_none(c);
685 return;
686 }
687 c->empty = 0;
688 c->min = div32(a->min, b->max, &r);
689 c->openmin = (r || a->openmin || b->openmax);
690 if (b->min > 0) {
691 c->max = div32(a->max, b->min, &r);
692 if (r) {
693 c->max++;
694 c->openmax = 1;
695 } else
696 c->openmax = (a->openmax || b->openmin);
697 } else {
698 c->max = UINT_MAX;
699 c->openmax = 0;
700 }
701 c->integer = 0;
702}
703
704/**
705 * snd_interval_muldivk - refine the interval value
706 * @a: dividend 1
707 * @b: dividend 2
708 * @k: divisor (as integer)
709 * @c: result
710 *
711 * c = a * b / k
712 *
713 * Returns non-zero if the value is changed, zero if not changed.
714 */
715void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
716 unsigned int k, struct snd_interval *c)
717{
718 unsigned int r;
719 if (a->empty || b->empty) {
720 snd_interval_none(c);
721 return;
722 }
723 c->empty = 0;
724 c->min = muldiv32(a->min, b->min, k, &r);
725 c->openmin = (r || a->openmin || b->openmin);
726 c->max = muldiv32(a->max, b->max, k, &r);
727 if (r) {
728 c->max++;
729 c->openmax = 1;
730 } else
731 c->openmax = (a->openmax || b->openmax);
732 c->integer = 0;
733}
734
735/**
736 * snd_interval_mulkdiv - refine the interval value
737 * @a: dividend 1
738 * @k: dividend 2 (as integer)
739 * @b: divisor
740 * @c: result
741 *
742 * c = a * k / b
743 *
744 * Returns non-zero if the value is changed, zero if not changed.
745 */
746void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
747 const struct snd_interval *b, struct snd_interval *c)
748{
749 unsigned int r;
750 if (a->empty || b->empty) {
751 snd_interval_none(c);
752 return;
753 }
754 c->empty = 0;
755 c->min = muldiv32(a->min, k, b->max, &r);
756 c->openmin = (r || a->openmin || b->openmax);
757 if (b->min > 0) {
758 c->max = muldiv32(a->max, k, b->min, &r);
759 if (r) {
760 c->max++;
761 c->openmax = 1;
762 } else
763 c->openmax = (a->openmax || b->openmin);
764 } else {
765 c->max = UINT_MAX;
766 c->openmax = 0;
767 }
768 c->integer = 0;
769}
770
771/* ---- */
772
773
774/**
775 * snd_interval_ratnum - refine the interval value
776 * @i: interval to refine
777 * @rats_count: number of ratnum_t
778 * @rats: ratnum_t array
779 * @nump: pointer to store the resultant numerator
780 * @denp: pointer to store the resultant denominator
781 *
782 * Return: Positive if the value is changed, zero if it's not changed, or a
783 * negative error code.
784 */
785int snd_interval_ratnum(struct snd_interval *i,
786 unsigned int rats_count, const struct snd_ratnum *rats,
787 unsigned int *nump, unsigned int *denp)
788{
789 unsigned int best_num, best_den;
790 int best_diff;
791 unsigned int k;
792 struct snd_interval t;
793 int err;
794 unsigned int result_num, result_den;
795 int result_diff;
796
797 best_num = best_den = best_diff = 0;
798 for (k = 0; k < rats_count; ++k) {
799 unsigned int num = rats[k].num;
800 unsigned int den;
801 unsigned int q = i->min;
802 int diff;
803 if (q == 0)
804 q = 1;
805 den = div_up(num, q);
806 if (den < rats[k].den_min)
807 continue;
808 if (den > rats[k].den_max)
809 den = rats[k].den_max;
810 else {
811 unsigned int r;
812 r = (den - rats[k].den_min) % rats[k].den_step;
813 if (r != 0)
814 den -= r;
815 }
816 diff = num - q * den;
817 if (diff < 0)
818 diff = -diff;
819 if (best_num == 0 ||
820 diff * best_den < best_diff * den) {
821 best_diff = diff;
822 best_den = den;
823 best_num = num;
824 }
825 }
826 if (best_den == 0) {
827 i->empty = 1;
828 return -EINVAL;
829 }
830 t.min = div_down(best_num, best_den);
831 t.openmin = !!(best_num % best_den);
832
833 result_num = best_num;
834 result_diff = best_diff;
835 result_den = best_den;
836 best_num = best_den = best_diff = 0;
837 for (k = 0; k < rats_count; ++k) {
838 unsigned int num = rats[k].num;
839 unsigned int den;
840 unsigned int q = i->max;
841 int diff;
842 if (q == 0) {
843 i->empty = 1;
844 return -EINVAL;
845 }
846 den = div_down(num, q);
847 if (den > rats[k].den_max)
848 continue;
849 if (den < rats[k].den_min)
850 den = rats[k].den_min;
851 else {
852 unsigned int r;
853 r = (den - rats[k].den_min) % rats[k].den_step;
854 if (r != 0)
855 den += rats[k].den_step - r;
856 }
857 diff = q * den - num;
858 if (diff < 0)
859 diff = -diff;
860 if (best_num == 0 ||
861 diff * best_den < best_diff * den) {
862 best_diff = diff;
863 best_den = den;
864 best_num = num;
865 }
866 }
867 if (best_den == 0) {
868 i->empty = 1;
869 return -EINVAL;
870 }
871 t.max = div_up(best_num, best_den);
872 t.openmax = !!(best_num % best_den);
873 t.integer = 0;
874 err = snd_interval_refine(i, &t);
875 if (err < 0)
876 return err;
877
878 if (snd_interval_single(i)) {
879 if (best_diff * result_den < result_diff * best_den) {
880 result_num = best_num;
881 result_den = best_den;
882 }
883 if (nump)
884 *nump = result_num;
885 if (denp)
886 *denp = result_den;
887 }
888 return err;
889}
890EXPORT_SYMBOL(snd_interval_ratnum);
891
892/**
893 * snd_interval_ratden - refine the interval value
894 * @i: interval to refine
895 * @rats_count: number of struct ratden
896 * @rats: struct ratden array
897 * @nump: pointer to store the resultant numerator
898 * @denp: pointer to store the resultant denominator
899 *
900 * Return: Positive if the value is changed, zero if it's not changed, or a
901 * negative error code.
902 */
903static int snd_interval_ratden(struct snd_interval *i,
904 unsigned int rats_count,
905 const struct snd_ratden *rats,
906 unsigned int *nump, unsigned int *denp)
907{
908 unsigned int best_num, best_diff, best_den;
909 unsigned int k;
910 struct snd_interval t;
911 int err;
912
913 best_num = best_den = best_diff = 0;
914 for (k = 0; k < rats_count; ++k) {
915 unsigned int num;
916 unsigned int den = rats[k].den;
917 unsigned int q = i->min;
918 int diff;
919 num = mul(q, den);
920 if (num > rats[k].num_max)
921 continue;
922 if (num < rats[k].num_min)
923 num = rats[k].num_max;
924 else {
925 unsigned int r;
926 r = (num - rats[k].num_min) % rats[k].num_step;
927 if (r != 0)
928 num += rats[k].num_step - r;
929 }
930 diff = num - q * den;
931 if (best_num == 0 ||
932 diff * best_den < best_diff * den) {
933 best_diff = diff;
934 best_den = den;
935 best_num = num;
936 }
937 }
938 if (best_den == 0) {
939 i->empty = 1;
940 return -EINVAL;
941 }
942 t.min = div_down(best_num, best_den);
943 t.openmin = !!(best_num % best_den);
944
945 best_num = best_den = best_diff = 0;
946 for (k = 0; k < rats_count; ++k) {
947 unsigned int num;
948 unsigned int den = rats[k].den;
949 unsigned int q = i->max;
950 int diff;
951 num = mul(q, den);
952 if (num < rats[k].num_min)
953 continue;
954 if (num > rats[k].num_max)
955 num = rats[k].num_max;
956 else {
957 unsigned int r;
958 r = (num - rats[k].num_min) % rats[k].num_step;
959 if (r != 0)
960 num -= r;
961 }
962 diff = q * den - num;
963 if (best_num == 0 ||
964 diff * best_den < best_diff * den) {
965 best_diff = diff;
966 best_den = den;
967 best_num = num;
968 }
969 }
970 if (best_den == 0) {
971 i->empty = 1;
972 return -EINVAL;
973 }
974 t.max = div_up(best_num, best_den);
975 t.openmax = !!(best_num % best_den);
976 t.integer = 0;
977 err = snd_interval_refine(i, &t);
978 if (err < 0)
979 return err;
980
981 if (snd_interval_single(i)) {
982 if (nump)
983 *nump = best_num;
984 if (denp)
985 *denp = best_den;
986 }
987 return err;
988}
989
990/**
991 * snd_interval_list - refine the interval value from the list
992 * @i: the interval value to refine
993 * @count: the number of elements in the list
994 * @list: the value list
995 * @mask: the bit-mask to evaluate
996 *
997 * Refines the interval value from the list.
998 * When mask is non-zero, only the elements corresponding to bit 1 are
999 * evaluated.
1000 *
1001 * Return: Positive if the value is changed, zero if it's not changed, or a
1002 * negative error code.
1003 */
1004int snd_interval_list(struct snd_interval *i, unsigned int count,
1005 const unsigned int *list, unsigned int mask)
1006{
1007 unsigned int k;
1008 struct snd_interval list_range;
1009
1010 if (!count) {
1011 i->empty = 1;
1012 return -EINVAL;
1013 }
1014 snd_interval_any(&list_range);
1015 list_range.min = UINT_MAX;
1016 list_range.max = 0;
1017 for (k = 0; k < count; k++) {
1018 if (mask && !(mask & (1 << k)))
1019 continue;
1020 if (!snd_interval_test(i, list[k]))
1021 continue;
1022 list_range.min = min(list_range.min, list[k]);
1023 list_range.max = max(list_range.max, list[k]);
1024 }
1025 return snd_interval_refine(i, &list_range);
1026}
1027EXPORT_SYMBOL(snd_interval_list);
1028
1029/**
1030 * snd_interval_ranges - refine the interval value from the list of ranges
1031 * @i: the interval value to refine
1032 * @count: the number of elements in the list of ranges
1033 * @ranges: the ranges list
1034 * @mask: the bit-mask to evaluate
1035 *
1036 * Refines the interval value from the list of ranges.
1037 * When mask is non-zero, only the elements corresponding to bit 1 are
1038 * evaluated.
1039 *
1040 * Return: Positive if the value is changed, zero if it's not changed, or a
1041 * negative error code.
1042 */
1043int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1044 const struct snd_interval *ranges, unsigned int mask)
1045{
1046 unsigned int k;
1047 struct snd_interval range_union;
1048 struct snd_interval range;
1049
1050 if (!count) {
1051 snd_interval_none(i);
1052 return -EINVAL;
1053 }
1054 snd_interval_any(&range_union);
1055 range_union.min = UINT_MAX;
1056 range_union.max = 0;
1057 for (k = 0; k < count; k++) {
1058 if (mask && !(mask & (1 << k)))
1059 continue;
1060 snd_interval_copy(&range, &ranges[k]);
1061 if (snd_interval_refine(&range, i) < 0)
1062 continue;
1063 if (snd_interval_empty(&range))
1064 continue;
1065
1066 if (range.min < range_union.min) {
1067 range_union.min = range.min;
1068 range_union.openmin = 1;
1069 }
1070 if (range.min == range_union.min && !range.openmin)
1071 range_union.openmin = 0;
1072 if (range.max > range_union.max) {
1073 range_union.max = range.max;
1074 range_union.openmax = 1;
1075 }
1076 if (range.max == range_union.max && !range.openmax)
1077 range_union.openmax = 0;
1078 }
1079 return snd_interval_refine(i, &range_union);
1080}
1081EXPORT_SYMBOL(snd_interval_ranges);
1082
1083static int snd_interval_step(struct snd_interval *i, unsigned int step)
1084{
1085 unsigned int n;
1086 int changed = 0;
1087 n = i->min % step;
1088 if (n != 0 || i->openmin) {
1089 i->min += step - n;
1090 i->openmin = 0;
1091 changed = 1;
1092 }
1093 n = i->max % step;
1094 if (n != 0 || i->openmax) {
1095 i->max -= n;
1096 i->openmax = 0;
1097 changed = 1;
1098 }
1099 if (snd_interval_checkempty(i)) {
1100 i->empty = 1;
1101 return -EINVAL;
1102 }
1103 return changed;
1104}
1105
1106/* Info constraints helpers */
1107
1108/**
1109 * snd_pcm_hw_rule_add - add the hw-constraint rule
1110 * @runtime: the pcm runtime instance
1111 * @cond: condition bits
1112 * @var: the variable to evaluate
1113 * @func: the evaluation function
1114 * @private: the private data pointer passed to function
1115 * @dep: the dependent variables
1116 *
1117 * Return: Zero if successful, or a negative error code on failure.
1118 */
1119int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1120 int var,
1121 snd_pcm_hw_rule_func_t func, void *private,
1122 int dep, ...)
1123{
1124 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1125 struct snd_pcm_hw_rule *c;
1126 unsigned int k;
1127 va_list args;
1128 va_start(args, dep);
1129 if (constrs->rules_num >= constrs->rules_all) {
1130 struct snd_pcm_hw_rule *new;
1131 unsigned int new_rules = constrs->rules_all + 16;
1132 new = krealloc_array(constrs->rules, new_rules,
1133 sizeof(*c), GFP_KERNEL);
1134 if (!new) {
1135 va_end(args);
1136 return -ENOMEM;
1137 }
1138 constrs->rules = new;
1139 constrs->rules_all = new_rules;
1140 }
1141 c = &constrs->rules[constrs->rules_num];
1142 c->cond = cond;
1143 c->func = func;
1144 c->var = var;
1145 c->private = private;
1146 k = 0;
1147 while (1) {
1148 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1149 va_end(args);
1150 return -EINVAL;
1151 }
1152 c->deps[k++] = dep;
1153 if (dep < 0)
1154 break;
1155 dep = va_arg(args, int);
1156 }
1157 constrs->rules_num++;
1158 va_end(args);
1159 return 0;
1160}
1161EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1162
1163/**
1164 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1165 * @runtime: PCM runtime instance
1166 * @var: hw_params variable to apply the mask
1167 * @mask: the bitmap mask
1168 *
1169 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1170 *
1171 * Return: Zero if successful, or a negative error code on failure.
1172 */
1173int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1174 u_int32_t mask)
1175{
1176 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1177 struct snd_mask *maskp = constrs_mask(constrs, var);
1178 *maskp->bits &= mask;
1179 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1180 if (*maskp->bits == 0)
1181 return -EINVAL;
1182 return 0;
1183}
1184
1185/**
1186 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1187 * @runtime: PCM runtime instance
1188 * @var: hw_params variable to apply the mask
1189 * @mask: the 64bit bitmap mask
1190 *
1191 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1192 *
1193 * Return: Zero if successful, or a negative error code on failure.
1194 */
1195int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1196 u_int64_t mask)
1197{
1198 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1199 struct snd_mask *maskp = constrs_mask(constrs, var);
1200 maskp->bits[0] &= (u_int32_t)mask;
1201 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1202 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1203 if (! maskp->bits[0] && ! maskp->bits[1])
1204 return -EINVAL;
1205 return 0;
1206}
1207EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1208
1209/**
1210 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the integer constraint
1213 *
1214 * Apply the constraint of integer to an interval parameter.
1215 *
1216 * Return: Positive if the value is changed, zero if it's not changed, or a
1217 * negative error code.
1218 */
1219int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1220{
1221 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1222 return snd_interval_setinteger(constrs_interval(constrs, var));
1223}
1224EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1225
1226/**
1227 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1228 * @runtime: PCM runtime instance
1229 * @var: hw_params variable to apply the range
1230 * @min: the minimal value
1231 * @max: the maximal value
1232 *
1233 * Apply the min/max range constraint to an interval parameter.
1234 *
1235 * Return: Positive if the value is changed, zero if it's not changed, or a
1236 * negative error code.
1237 */
1238int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1239 unsigned int min, unsigned int max)
1240{
1241 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1242 struct snd_interval t;
1243 t.min = min;
1244 t.max = max;
1245 t.openmin = t.openmax = 0;
1246 t.integer = 0;
1247 return snd_interval_refine(constrs_interval(constrs, var), &t);
1248}
1249EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1250
1251static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1252 struct snd_pcm_hw_rule *rule)
1253{
1254 struct snd_pcm_hw_constraint_list *list = rule->private;
1255 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1256}
1257
1258
1259/**
1260 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1261 * @runtime: PCM runtime instance
1262 * @cond: condition bits
1263 * @var: hw_params variable to apply the list constraint
1264 * @l: list
1265 *
1266 * Apply the list of constraints to an interval parameter.
1267 *
1268 * Return: Zero if successful, or a negative error code on failure.
1269 */
1270int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1271 unsigned int cond,
1272 snd_pcm_hw_param_t var,
1273 const struct snd_pcm_hw_constraint_list *l)
1274{
1275 return snd_pcm_hw_rule_add(runtime, cond, var,
1276 snd_pcm_hw_rule_list, (void *)l,
1277 var, -1);
1278}
1279EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1280
1281static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1282 struct snd_pcm_hw_rule *rule)
1283{
1284 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1285 return snd_interval_ranges(hw_param_interval(params, rule->var),
1286 r->count, r->ranges, r->mask);
1287}
1288
1289
1290/**
1291 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1292 * @runtime: PCM runtime instance
1293 * @cond: condition bits
1294 * @var: hw_params variable to apply the list of range constraints
1295 * @r: ranges
1296 *
1297 * Apply the list of range constraints to an interval parameter.
1298 *
1299 * Return: Zero if successful, or a negative error code on failure.
1300 */
1301int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1302 unsigned int cond,
1303 snd_pcm_hw_param_t var,
1304 const struct snd_pcm_hw_constraint_ranges *r)
1305{
1306 return snd_pcm_hw_rule_add(runtime, cond, var,
1307 snd_pcm_hw_rule_ranges, (void *)r,
1308 var, -1);
1309}
1310EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1311
1312static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1313 struct snd_pcm_hw_rule *rule)
1314{
1315 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1316 unsigned int num = 0, den = 0;
1317 int err;
1318 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1319 r->nrats, r->rats, &num, &den);
1320 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1321 params->rate_num = num;
1322 params->rate_den = den;
1323 }
1324 return err;
1325}
1326
1327/**
1328 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1329 * @runtime: PCM runtime instance
1330 * @cond: condition bits
1331 * @var: hw_params variable to apply the ratnums constraint
1332 * @r: struct snd_ratnums constriants
1333 *
1334 * Return: Zero if successful, or a negative error code on failure.
1335 */
1336int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1337 unsigned int cond,
1338 snd_pcm_hw_param_t var,
1339 const struct snd_pcm_hw_constraint_ratnums *r)
1340{
1341 return snd_pcm_hw_rule_add(runtime, cond, var,
1342 snd_pcm_hw_rule_ratnums, (void *)r,
1343 var, -1);
1344}
1345EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1346
1347static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1348 struct snd_pcm_hw_rule *rule)
1349{
1350 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1351 unsigned int num = 0, den = 0;
1352 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1353 r->nrats, r->rats, &num, &den);
1354 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1355 params->rate_num = num;
1356 params->rate_den = den;
1357 }
1358 return err;
1359}
1360
1361/**
1362 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1363 * @runtime: PCM runtime instance
1364 * @cond: condition bits
1365 * @var: hw_params variable to apply the ratdens constraint
1366 * @r: struct snd_ratdens constriants
1367 *
1368 * Return: Zero if successful, or a negative error code on failure.
1369 */
1370int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1371 unsigned int cond,
1372 snd_pcm_hw_param_t var,
1373 const struct snd_pcm_hw_constraint_ratdens *r)
1374{
1375 return snd_pcm_hw_rule_add(runtime, cond, var,
1376 snd_pcm_hw_rule_ratdens, (void *)r,
1377 var, -1);
1378}
1379EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1380
1381static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1382 struct snd_pcm_hw_rule *rule)
1383{
1384 unsigned int l = (unsigned long) rule->private;
1385 int width = l & 0xffff;
1386 unsigned int msbits = l >> 16;
1387 const struct snd_interval *i =
1388 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1389
1390 if (!snd_interval_single(i))
1391 return 0;
1392
1393 if ((snd_interval_value(i) == width) ||
1394 (width == 0 && snd_interval_value(i) > msbits))
1395 params->msbits = min_not_zero(params->msbits, msbits);
1396
1397 return 0;
1398}
1399
1400/**
1401 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1402 * @runtime: PCM runtime instance
1403 * @cond: condition bits
1404 * @width: sample bits width
1405 * @msbits: msbits width
1406 *
1407 * This constraint will set the number of most significant bits (msbits) if a
1408 * sample format with the specified width has been select. If width is set to 0
1409 * the msbits will be set for any sample format with a width larger than the
1410 * specified msbits.
1411 *
1412 * Return: Zero if successful, or a negative error code on failure.
1413 */
1414int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1415 unsigned int cond,
1416 unsigned int width,
1417 unsigned int msbits)
1418{
1419 unsigned long l = (msbits << 16) | width;
1420 return snd_pcm_hw_rule_add(runtime, cond, -1,
1421 snd_pcm_hw_rule_msbits,
1422 (void*) l,
1423 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1424}
1425EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1426
1427static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1428 struct snd_pcm_hw_rule *rule)
1429{
1430 unsigned long step = (unsigned long) rule->private;
1431 return snd_interval_step(hw_param_interval(params, rule->var), step);
1432}
1433
1434/**
1435 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1436 * @runtime: PCM runtime instance
1437 * @cond: condition bits
1438 * @var: hw_params variable to apply the step constraint
1439 * @step: step size
1440 *
1441 * Return: Zero if successful, or a negative error code on failure.
1442 */
1443int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1444 unsigned int cond,
1445 snd_pcm_hw_param_t var,
1446 unsigned long step)
1447{
1448 return snd_pcm_hw_rule_add(runtime, cond, var,
1449 snd_pcm_hw_rule_step, (void *) step,
1450 var, -1);
1451}
1452EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1453
1454static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1455{
1456 static const unsigned int pow2_sizes[] = {
1457 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1458 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1459 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1460 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1461 };
1462 return snd_interval_list(hw_param_interval(params, rule->var),
1463 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1464}
1465
1466/**
1467 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1468 * @runtime: PCM runtime instance
1469 * @cond: condition bits
1470 * @var: hw_params variable to apply the power-of-2 constraint
1471 *
1472 * Return: Zero if successful, or a negative error code on failure.
1473 */
1474int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1475 unsigned int cond,
1476 snd_pcm_hw_param_t var)
1477{
1478 return snd_pcm_hw_rule_add(runtime, cond, var,
1479 snd_pcm_hw_rule_pow2, NULL,
1480 var, -1);
1481}
1482EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1483
1484static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1485 struct snd_pcm_hw_rule *rule)
1486{
1487 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1488 struct snd_interval *rate;
1489
1490 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1491 return snd_interval_list(rate, 1, &base_rate, 0);
1492}
1493
1494/**
1495 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1496 * @runtime: PCM runtime instance
1497 * @base_rate: the rate at which the hardware does not resample
1498 *
1499 * Return: Zero if successful, or a negative error code on failure.
1500 */
1501int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1502 unsigned int base_rate)
1503{
1504 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1505 SNDRV_PCM_HW_PARAM_RATE,
1506 snd_pcm_hw_rule_noresample_func,
1507 (void *)(uintptr_t)base_rate,
1508 SNDRV_PCM_HW_PARAM_RATE, -1);
1509}
1510EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1511
1512static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1513 snd_pcm_hw_param_t var)
1514{
1515 if (hw_is_mask(var)) {
1516 snd_mask_any(hw_param_mask(params, var));
1517 params->cmask |= 1 << var;
1518 params->rmask |= 1 << var;
1519 return;
1520 }
1521 if (hw_is_interval(var)) {
1522 snd_interval_any(hw_param_interval(params, var));
1523 params->cmask |= 1 << var;
1524 params->rmask |= 1 << var;
1525 return;
1526 }
1527 snd_BUG();
1528}
1529
1530void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1531{
1532 unsigned int k;
1533 memset(params, 0, sizeof(*params));
1534 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1535 _snd_pcm_hw_param_any(params, k);
1536 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1537 _snd_pcm_hw_param_any(params, k);
1538 params->info = ~0U;
1539}
1540EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1541
1542/**
1543 * snd_pcm_hw_param_value - return @params field @var value
1544 * @params: the hw_params instance
1545 * @var: parameter to retrieve
1546 * @dir: pointer to the direction (-1,0,1) or %NULL
1547 *
1548 * Return: The value for field @var if it's fixed in configuration space
1549 * defined by @params. -%EINVAL otherwise.
1550 */
1551int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1552 snd_pcm_hw_param_t var, int *dir)
1553{
1554 if (hw_is_mask(var)) {
1555 const struct snd_mask *mask = hw_param_mask_c(params, var);
1556 if (!snd_mask_single(mask))
1557 return -EINVAL;
1558 if (dir)
1559 *dir = 0;
1560 return snd_mask_value(mask);
1561 }
1562 if (hw_is_interval(var)) {
1563 const struct snd_interval *i = hw_param_interval_c(params, var);
1564 if (!snd_interval_single(i))
1565 return -EINVAL;
1566 if (dir)
1567 *dir = i->openmin;
1568 return snd_interval_value(i);
1569 }
1570 return -EINVAL;
1571}
1572EXPORT_SYMBOL(snd_pcm_hw_param_value);
1573
1574void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1575 snd_pcm_hw_param_t var)
1576{
1577 if (hw_is_mask(var)) {
1578 snd_mask_none(hw_param_mask(params, var));
1579 params->cmask |= 1 << var;
1580 params->rmask |= 1 << var;
1581 } else if (hw_is_interval(var)) {
1582 snd_interval_none(hw_param_interval(params, var));
1583 params->cmask |= 1 << var;
1584 params->rmask |= 1 << var;
1585 } else {
1586 snd_BUG();
1587 }
1588}
1589EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1590
1591static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1592 snd_pcm_hw_param_t var)
1593{
1594 int changed;
1595 if (hw_is_mask(var))
1596 changed = snd_mask_refine_first(hw_param_mask(params, var));
1597 else if (hw_is_interval(var))
1598 changed = snd_interval_refine_first(hw_param_interval(params, var));
1599 else
1600 return -EINVAL;
1601 if (changed > 0) {
1602 params->cmask |= 1 << var;
1603 params->rmask |= 1 << var;
1604 }
1605 return changed;
1606}
1607
1608
1609/**
1610 * snd_pcm_hw_param_first - refine config space and return minimum value
1611 * @pcm: PCM instance
1612 * @params: the hw_params instance
1613 * @var: parameter to retrieve
1614 * @dir: pointer to the direction (-1,0,1) or %NULL
1615 *
1616 * Inside configuration space defined by @params remove from @var all
1617 * values > minimum. Reduce configuration space accordingly.
1618 *
1619 * Return: The minimum, or a negative error code on failure.
1620 */
1621int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1622 struct snd_pcm_hw_params *params,
1623 snd_pcm_hw_param_t var, int *dir)
1624{
1625 int changed = _snd_pcm_hw_param_first(params, var);
1626 if (changed < 0)
1627 return changed;
1628 if (params->rmask) {
1629 int err = snd_pcm_hw_refine(pcm, params);
1630 if (err < 0)
1631 return err;
1632 }
1633 return snd_pcm_hw_param_value(params, var, dir);
1634}
1635EXPORT_SYMBOL(snd_pcm_hw_param_first);
1636
1637static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1638 snd_pcm_hw_param_t var)
1639{
1640 int changed;
1641 if (hw_is_mask(var))
1642 changed = snd_mask_refine_last(hw_param_mask(params, var));
1643 else if (hw_is_interval(var))
1644 changed = snd_interval_refine_last(hw_param_interval(params, var));
1645 else
1646 return -EINVAL;
1647 if (changed > 0) {
1648 params->cmask |= 1 << var;
1649 params->rmask |= 1 << var;
1650 }
1651 return changed;
1652}
1653
1654
1655/**
1656 * snd_pcm_hw_param_last - refine config space and return maximum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1661 *
1662 * Inside configuration space defined by @params remove from @var all
1663 * values < maximum. Reduce configuration space accordingly.
1664 *
1665 * Return: The maximum, or a negative error code on failure.
1666 */
1667int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1668 struct snd_pcm_hw_params *params,
1669 snd_pcm_hw_param_t var, int *dir)
1670{
1671 int changed = _snd_pcm_hw_param_last(params, var);
1672 if (changed < 0)
1673 return changed;
1674 if (params->rmask) {
1675 int err = snd_pcm_hw_refine(pcm, params);
1676 if (err < 0)
1677 return err;
1678 }
1679 return snd_pcm_hw_param_value(params, var, dir);
1680}
1681EXPORT_SYMBOL(snd_pcm_hw_param_last);
1682
1683static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1684 void *arg)
1685{
1686 struct snd_pcm_runtime *runtime = substream->runtime;
1687 unsigned long flags;
1688 snd_pcm_stream_lock_irqsave(substream, flags);
1689 if (snd_pcm_running(substream) &&
1690 snd_pcm_update_hw_ptr(substream) >= 0)
1691 runtime->status->hw_ptr %= runtime->buffer_size;
1692 else {
1693 runtime->status->hw_ptr = 0;
1694 runtime->hw_ptr_wrap = 0;
1695 }
1696 snd_pcm_stream_unlock_irqrestore(substream, flags);
1697 return 0;
1698}
1699
1700static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1701 void *arg)
1702{
1703 struct snd_pcm_channel_info *info = arg;
1704 struct snd_pcm_runtime *runtime = substream->runtime;
1705 int width;
1706 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1707 info->offset = -1;
1708 return 0;
1709 }
1710 width = snd_pcm_format_physical_width(runtime->format);
1711 if (width < 0)
1712 return width;
1713 info->offset = 0;
1714 switch (runtime->access) {
1715 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1716 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1717 info->first = info->channel * width;
1718 info->step = runtime->channels * width;
1719 break;
1720 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1721 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1722 {
1723 size_t size = runtime->dma_bytes / runtime->channels;
1724 info->first = info->channel * size * 8;
1725 info->step = width;
1726 break;
1727 }
1728 default:
1729 snd_BUG();
1730 break;
1731 }
1732 return 0;
1733}
1734
1735static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1736 void *arg)
1737{
1738 struct snd_pcm_hw_params *params = arg;
1739 snd_pcm_format_t format;
1740 int channels;
1741 ssize_t frame_size;
1742
1743 params->fifo_size = substream->runtime->hw.fifo_size;
1744 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1745 format = params_format(params);
1746 channels = params_channels(params);
1747 frame_size = snd_pcm_format_size(format, channels);
1748 if (frame_size > 0)
1749 params->fifo_size /= frame_size;
1750 }
1751 return 0;
1752}
1753
1754/**
1755 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1756 * @substream: the pcm substream instance
1757 * @cmd: ioctl command
1758 * @arg: ioctl argument
1759 *
1760 * Processes the generic ioctl commands for PCM.
1761 * Can be passed as the ioctl callback for PCM ops.
1762 *
1763 * Return: Zero if successful, or a negative error code on failure.
1764 */
1765int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1766 unsigned int cmd, void *arg)
1767{
1768 switch (cmd) {
1769 case SNDRV_PCM_IOCTL1_RESET:
1770 return snd_pcm_lib_ioctl_reset(substream, arg);
1771 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1772 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1773 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1774 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1775 }
1776 return -ENXIO;
1777}
1778EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1779
1780/**
1781 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1782 * under acquired lock of PCM substream.
1783 * @substream: the instance of pcm substream.
1784 *
1785 * This function is called when the batch of audio data frames as the same size as the period of
1786 * buffer is already processed in audio data transmission.
1787 *
1788 * The call of function updates the status of runtime with the latest position of audio data
1789 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1790 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1791 * substream according to configured threshold.
1792 *
1793 * The function is intended to use for the case that PCM driver operates audio data frames under
1794 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1795 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1796 * since lock of PCM substream should be acquired in advance.
1797 *
1798 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1799 * function:
1800 *
1801 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1802 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1803 * - .get_time_info - to retrieve audio time stamp if needed.
1804 *
1805 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1806 */
1807void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1808{
1809 struct snd_pcm_runtime *runtime;
1810
1811 if (PCM_RUNTIME_CHECK(substream))
1812 return;
1813 runtime = substream->runtime;
1814
1815 if (!snd_pcm_running(substream) ||
1816 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1817 goto _end;
1818
1819#ifdef CONFIG_SND_PCM_TIMER
1820 if (substream->timer_running)
1821 snd_timer_interrupt(substream->timer, 1);
1822#endif
1823 _end:
1824 kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1825}
1826EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1827
1828/**
1829 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1830 * PCM substream.
1831 * @substream: the instance of PCM substream.
1832 *
1833 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1834 * acquiring lock of PCM substream voluntarily.
1835 *
1836 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1837 * the batch of audio data frames as the same size as the period of buffer is already processed in
1838 * audio data transmission.
1839 */
1840void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1841{
1842 unsigned long flags;
1843
1844 if (snd_BUG_ON(!substream))
1845 return;
1846
1847 snd_pcm_stream_lock_irqsave(substream, flags);
1848 snd_pcm_period_elapsed_under_stream_lock(substream);
1849 snd_pcm_stream_unlock_irqrestore(substream, flags);
1850}
1851EXPORT_SYMBOL(snd_pcm_period_elapsed);
1852
1853/*
1854 * Wait until avail_min data becomes available
1855 * Returns a negative error code if any error occurs during operation.
1856 * The available space is stored on availp. When err = 0 and avail = 0
1857 * on the capture stream, it indicates the stream is in DRAINING state.
1858 */
1859static int wait_for_avail(struct snd_pcm_substream *substream,
1860 snd_pcm_uframes_t *availp)
1861{
1862 struct snd_pcm_runtime *runtime = substream->runtime;
1863 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1864 wait_queue_entry_t wait;
1865 int err = 0;
1866 snd_pcm_uframes_t avail = 0;
1867 long wait_time, tout;
1868
1869 init_waitqueue_entry(&wait, current);
1870 set_current_state(TASK_INTERRUPTIBLE);
1871 add_wait_queue(&runtime->tsleep, &wait);
1872
1873 if (runtime->no_period_wakeup)
1874 wait_time = MAX_SCHEDULE_TIMEOUT;
1875 else {
1876 /* use wait time from substream if available */
1877 if (substream->wait_time) {
1878 wait_time = substream->wait_time;
1879 } else {
1880 wait_time = 10;
1881
1882 if (runtime->rate) {
1883 long t = runtime->period_size * 2 /
1884 runtime->rate;
1885 wait_time = max(t, wait_time);
1886 }
1887 wait_time = msecs_to_jiffies(wait_time * 1000);
1888 }
1889 }
1890
1891 for (;;) {
1892 if (signal_pending(current)) {
1893 err = -ERESTARTSYS;
1894 break;
1895 }
1896
1897 /*
1898 * We need to check if space became available already
1899 * (and thus the wakeup happened already) first to close
1900 * the race of space already having become available.
1901 * This check must happen after been added to the waitqueue
1902 * and having current state be INTERRUPTIBLE.
1903 */
1904 avail = snd_pcm_avail(substream);
1905 if (avail >= runtime->twake)
1906 break;
1907 snd_pcm_stream_unlock_irq(substream);
1908
1909 tout = schedule_timeout(wait_time);
1910
1911 snd_pcm_stream_lock_irq(substream);
1912 set_current_state(TASK_INTERRUPTIBLE);
1913 switch (runtime->status->state) {
1914 case SNDRV_PCM_STATE_SUSPENDED:
1915 err = -ESTRPIPE;
1916 goto _endloop;
1917 case SNDRV_PCM_STATE_XRUN:
1918 err = -EPIPE;
1919 goto _endloop;
1920 case SNDRV_PCM_STATE_DRAINING:
1921 if (is_playback)
1922 err = -EPIPE;
1923 else
1924 avail = 0; /* indicate draining */
1925 goto _endloop;
1926 case SNDRV_PCM_STATE_OPEN:
1927 case SNDRV_PCM_STATE_SETUP:
1928 case SNDRV_PCM_STATE_DISCONNECTED:
1929 err = -EBADFD;
1930 goto _endloop;
1931 case SNDRV_PCM_STATE_PAUSED:
1932 continue;
1933 }
1934 if (!tout) {
1935 pcm_dbg(substream->pcm,
1936 "%s write error (DMA or IRQ trouble?)\n",
1937 is_playback ? "playback" : "capture");
1938 err = -EIO;
1939 break;
1940 }
1941 }
1942 _endloop:
1943 set_current_state(TASK_RUNNING);
1944 remove_wait_queue(&runtime->tsleep, &wait);
1945 *availp = avail;
1946 return err;
1947}
1948
1949typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1950 int channel, unsigned long hwoff,
1951 void *buf, unsigned long bytes);
1952
1953typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1954 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1955
1956/* calculate the target DMA-buffer position to be written/read */
1957static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1958 int channel, unsigned long hwoff)
1959{
1960 return runtime->dma_area + hwoff +
1961 channel * (runtime->dma_bytes / runtime->channels);
1962}
1963
1964/* default copy_user ops for write; used for both interleaved and non- modes */
1965static int default_write_copy(struct snd_pcm_substream *substream,
1966 int channel, unsigned long hwoff,
1967 void *buf, unsigned long bytes)
1968{
1969 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1970 (void __user *)buf, bytes))
1971 return -EFAULT;
1972 return 0;
1973}
1974
1975/* default copy_kernel ops for write */
1976static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1977 int channel, unsigned long hwoff,
1978 void *buf, unsigned long bytes)
1979{
1980 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1981 return 0;
1982}
1983
1984/* fill silence instead of copy data; called as a transfer helper
1985 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1986 * a NULL buffer is passed
1987 */
1988static int fill_silence(struct snd_pcm_substream *substream, int channel,
1989 unsigned long hwoff, void *buf, unsigned long bytes)
1990{
1991 struct snd_pcm_runtime *runtime = substream->runtime;
1992
1993 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1994 return 0;
1995 if (substream->ops->fill_silence)
1996 return substream->ops->fill_silence(substream, channel,
1997 hwoff, bytes);
1998
1999 snd_pcm_format_set_silence(runtime->format,
2000 get_dma_ptr(runtime, channel, hwoff),
2001 bytes_to_samples(runtime, bytes));
2002 return 0;
2003}
2004
2005/* default copy_user ops for read; used for both interleaved and non- modes */
2006static int default_read_copy(struct snd_pcm_substream *substream,
2007 int channel, unsigned long hwoff,
2008 void *buf, unsigned long bytes)
2009{
2010 if (copy_to_user((void __user *)buf,
2011 get_dma_ptr(substream->runtime, channel, hwoff),
2012 bytes))
2013 return -EFAULT;
2014 return 0;
2015}
2016
2017/* default copy_kernel ops for read */
2018static int default_read_copy_kernel(struct snd_pcm_substream *substream,
2019 int channel, unsigned long hwoff,
2020 void *buf, unsigned long bytes)
2021{
2022 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
2023 return 0;
2024}
2025
2026/* call transfer function with the converted pointers and sizes;
2027 * for interleaved mode, it's one shot for all samples
2028 */
2029static int interleaved_copy(struct snd_pcm_substream *substream,
2030 snd_pcm_uframes_t hwoff, void *data,
2031 snd_pcm_uframes_t off,
2032 snd_pcm_uframes_t frames,
2033 pcm_transfer_f transfer)
2034{
2035 struct snd_pcm_runtime *runtime = substream->runtime;
2036
2037 /* convert to bytes */
2038 hwoff = frames_to_bytes(runtime, hwoff);
2039 off = frames_to_bytes(runtime, off);
2040 frames = frames_to_bytes(runtime, frames);
2041 return transfer(substream, 0, hwoff, data + off, frames);
2042}
2043
2044/* call transfer function with the converted pointers and sizes for each
2045 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2046 */
2047static int noninterleaved_copy(struct snd_pcm_substream *substream,
2048 snd_pcm_uframes_t hwoff, void *data,
2049 snd_pcm_uframes_t off,
2050 snd_pcm_uframes_t frames,
2051 pcm_transfer_f transfer)
2052{
2053 struct snd_pcm_runtime *runtime = substream->runtime;
2054 int channels = runtime->channels;
2055 void **bufs = data;
2056 int c, err;
2057
2058 /* convert to bytes; note that it's not frames_to_bytes() here.
2059 * in non-interleaved mode, we copy for each channel, thus
2060 * each copy is n_samples bytes x channels = whole frames.
2061 */
2062 off = samples_to_bytes(runtime, off);
2063 frames = samples_to_bytes(runtime, frames);
2064 hwoff = samples_to_bytes(runtime, hwoff);
2065 for (c = 0; c < channels; ++c, ++bufs) {
2066 if (!data || !*bufs)
2067 err = fill_silence(substream, c, hwoff, NULL, frames);
2068 else
2069 err = transfer(substream, c, hwoff, *bufs + off,
2070 frames);
2071 if (err < 0)
2072 return err;
2073 }
2074 return 0;
2075}
2076
2077/* fill silence on the given buffer position;
2078 * called from snd_pcm_playback_silence()
2079 */
2080static int fill_silence_frames(struct snd_pcm_substream *substream,
2081 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2082{
2083 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2084 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2085 return interleaved_copy(substream, off, NULL, 0, frames,
2086 fill_silence);
2087 else
2088 return noninterleaved_copy(substream, off, NULL, 0, frames,
2089 fill_silence);
2090}
2091
2092/* sanity-check for read/write methods */
2093static int pcm_sanity_check(struct snd_pcm_substream *substream)
2094{
2095 struct snd_pcm_runtime *runtime;
2096 if (PCM_RUNTIME_CHECK(substream))
2097 return -ENXIO;
2098 runtime = substream->runtime;
2099 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2100 return -EINVAL;
2101 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2102 return -EBADFD;
2103 return 0;
2104}
2105
2106static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2107{
2108 switch (runtime->status->state) {
2109 case SNDRV_PCM_STATE_PREPARED:
2110 case SNDRV_PCM_STATE_RUNNING:
2111 case SNDRV_PCM_STATE_PAUSED:
2112 return 0;
2113 case SNDRV_PCM_STATE_XRUN:
2114 return -EPIPE;
2115 case SNDRV_PCM_STATE_SUSPENDED:
2116 return -ESTRPIPE;
2117 default:
2118 return -EBADFD;
2119 }
2120}
2121
2122/* update to the given appl_ptr and call ack callback if needed;
2123 * when an error is returned, take back to the original value
2124 */
2125int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2126 snd_pcm_uframes_t appl_ptr)
2127{
2128 struct snd_pcm_runtime *runtime = substream->runtime;
2129 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2130 int ret;
2131
2132 if (old_appl_ptr == appl_ptr)
2133 return 0;
2134
2135 runtime->control->appl_ptr = appl_ptr;
2136 if (substream->ops->ack) {
2137 ret = substream->ops->ack(substream);
2138 if (ret < 0) {
2139 runtime->control->appl_ptr = old_appl_ptr;
2140 return ret;
2141 }
2142 }
2143
2144 trace_applptr(substream, old_appl_ptr, appl_ptr);
2145
2146 return 0;
2147}
2148
2149/* the common loop for read/write data */
2150snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2151 void *data, bool interleaved,
2152 snd_pcm_uframes_t size, bool in_kernel)
2153{
2154 struct snd_pcm_runtime *runtime = substream->runtime;
2155 snd_pcm_uframes_t xfer = 0;
2156 snd_pcm_uframes_t offset = 0;
2157 snd_pcm_uframes_t avail;
2158 pcm_copy_f writer;
2159 pcm_transfer_f transfer;
2160 bool nonblock;
2161 bool is_playback;
2162 int err;
2163
2164 err = pcm_sanity_check(substream);
2165 if (err < 0)
2166 return err;
2167
2168 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2169 if (interleaved) {
2170 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2171 runtime->channels > 1)
2172 return -EINVAL;
2173 writer = interleaved_copy;
2174 } else {
2175 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2176 return -EINVAL;
2177 writer = noninterleaved_copy;
2178 }
2179
2180 if (!data) {
2181 if (is_playback)
2182 transfer = fill_silence;
2183 else
2184 return -EINVAL;
2185 } else if (in_kernel) {
2186 if (substream->ops->copy_kernel)
2187 transfer = substream->ops->copy_kernel;
2188 else
2189 transfer = is_playback ?
2190 default_write_copy_kernel : default_read_copy_kernel;
2191 } else {
2192 if (substream->ops->copy_user)
2193 transfer = (pcm_transfer_f)substream->ops->copy_user;
2194 else
2195 transfer = is_playback ?
2196 default_write_copy : default_read_copy;
2197 }
2198
2199 if (size == 0)
2200 return 0;
2201
2202 nonblock = !!(substream->f_flags & O_NONBLOCK);
2203
2204 snd_pcm_stream_lock_irq(substream);
2205 err = pcm_accessible_state(runtime);
2206 if (err < 0)
2207 goto _end_unlock;
2208
2209 runtime->twake = runtime->control->avail_min ? : 1;
2210 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2211 snd_pcm_update_hw_ptr(substream);
2212
2213 /*
2214 * If size < start_threshold, wait indefinitely. Another
2215 * thread may start capture
2216 */
2217 if (!is_playback &&
2218 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2219 size >= runtime->start_threshold) {
2220 err = snd_pcm_start(substream);
2221 if (err < 0)
2222 goto _end_unlock;
2223 }
2224
2225 avail = snd_pcm_avail(substream);
2226
2227 while (size > 0) {
2228 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2229 snd_pcm_uframes_t cont;
2230 if (!avail) {
2231 if (!is_playback &&
2232 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2233 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2234 goto _end_unlock;
2235 }
2236 if (nonblock) {
2237 err = -EAGAIN;
2238 goto _end_unlock;
2239 }
2240 runtime->twake = min_t(snd_pcm_uframes_t, size,
2241 runtime->control->avail_min ? : 1);
2242 err = wait_for_avail(substream, &avail);
2243 if (err < 0)
2244 goto _end_unlock;
2245 if (!avail)
2246 continue; /* draining */
2247 }
2248 frames = size > avail ? avail : size;
2249 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2250 appl_ofs = appl_ptr % runtime->buffer_size;
2251 cont = runtime->buffer_size - appl_ofs;
2252 if (frames > cont)
2253 frames = cont;
2254 if (snd_BUG_ON(!frames)) {
2255 err = -EINVAL;
2256 goto _end_unlock;
2257 }
2258 snd_pcm_stream_unlock_irq(substream);
2259 err = writer(substream, appl_ofs, data, offset, frames,
2260 transfer);
2261 snd_pcm_stream_lock_irq(substream);
2262 if (err < 0)
2263 goto _end_unlock;
2264 err = pcm_accessible_state(runtime);
2265 if (err < 0)
2266 goto _end_unlock;
2267 appl_ptr += frames;
2268 if (appl_ptr >= runtime->boundary)
2269 appl_ptr -= runtime->boundary;
2270 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2271 if (err < 0)
2272 goto _end_unlock;
2273
2274 offset += frames;
2275 size -= frames;
2276 xfer += frames;
2277 avail -= frames;
2278 if (is_playback &&
2279 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2280 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2281 err = snd_pcm_start(substream);
2282 if (err < 0)
2283 goto _end_unlock;
2284 }
2285 }
2286 _end_unlock:
2287 runtime->twake = 0;
2288 if (xfer > 0 && err >= 0)
2289 snd_pcm_update_state(substream, runtime);
2290 snd_pcm_stream_unlock_irq(substream);
2291 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2292}
2293EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2294
2295/*
2296 * standard channel mapping helpers
2297 */
2298
2299/* default channel maps for multi-channel playbacks, up to 8 channels */
2300const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2301 { .channels = 1,
2302 .map = { SNDRV_CHMAP_MONO } },
2303 { .channels = 2,
2304 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2305 { .channels = 4,
2306 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2307 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2308 { .channels = 6,
2309 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2310 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2311 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2312 { .channels = 8,
2313 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2314 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2315 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2316 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2317 { }
2318};
2319EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2320
2321/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2322const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2323 { .channels = 1,
2324 .map = { SNDRV_CHMAP_MONO } },
2325 { .channels = 2,
2326 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2327 { .channels = 4,
2328 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2329 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2330 { .channels = 6,
2331 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2332 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2333 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2334 { .channels = 8,
2335 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2336 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2337 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2338 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2339 { }
2340};
2341EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2342
2343static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2344{
2345 if (ch > info->max_channels)
2346 return false;
2347 return !info->channel_mask || (info->channel_mask & (1U << ch));
2348}
2349
2350static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2351 struct snd_ctl_elem_info *uinfo)
2352{
2353 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2354
2355 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2356 uinfo->count = info->max_channels;
2357 uinfo->value.integer.min = 0;
2358 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2359 return 0;
2360}
2361
2362/* get callback for channel map ctl element
2363 * stores the channel position firstly matching with the current channels
2364 */
2365static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2366 struct snd_ctl_elem_value *ucontrol)
2367{
2368 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2369 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2370 struct snd_pcm_substream *substream;
2371 const struct snd_pcm_chmap_elem *map;
2372
2373 if (!info->chmap)
2374 return -EINVAL;
2375 substream = snd_pcm_chmap_substream(info, idx);
2376 if (!substream)
2377 return -ENODEV;
2378 memset(ucontrol->value.integer.value, 0,
2379 sizeof(long) * info->max_channels);
2380 if (!substream->runtime)
2381 return 0; /* no channels set */
2382 for (map = info->chmap; map->channels; map++) {
2383 int i;
2384 if (map->channels == substream->runtime->channels &&
2385 valid_chmap_channels(info, map->channels)) {
2386 for (i = 0; i < map->channels; i++)
2387 ucontrol->value.integer.value[i] = map->map[i];
2388 return 0;
2389 }
2390 }
2391 return -EINVAL;
2392}
2393
2394/* tlv callback for channel map ctl element
2395 * expands the pre-defined channel maps in a form of TLV
2396 */
2397static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2398 unsigned int size, unsigned int __user *tlv)
2399{
2400 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2401 const struct snd_pcm_chmap_elem *map;
2402 unsigned int __user *dst;
2403 int c, count = 0;
2404
2405 if (!info->chmap)
2406 return -EINVAL;
2407 if (size < 8)
2408 return -ENOMEM;
2409 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2410 return -EFAULT;
2411 size -= 8;
2412 dst = tlv + 2;
2413 for (map = info->chmap; map->channels; map++) {
2414 int chs_bytes = map->channels * 4;
2415 if (!valid_chmap_channels(info, map->channels))
2416 continue;
2417 if (size < 8)
2418 return -ENOMEM;
2419 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2420 put_user(chs_bytes, dst + 1))
2421 return -EFAULT;
2422 dst += 2;
2423 size -= 8;
2424 count += 8;
2425 if (size < chs_bytes)
2426 return -ENOMEM;
2427 size -= chs_bytes;
2428 count += chs_bytes;
2429 for (c = 0; c < map->channels; c++) {
2430 if (put_user(map->map[c], dst))
2431 return -EFAULT;
2432 dst++;
2433 }
2434 }
2435 if (put_user(count, tlv + 1))
2436 return -EFAULT;
2437 return 0;
2438}
2439
2440static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2441{
2442 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2443 info->pcm->streams[info->stream].chmap_kctl = NULL;
2444 kfree(info);
2445}
2446
2447/**
2448 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2449 * @pcm: the assigned PCM instance
2450 * @stream: stream direction
2451 * @chmap: channel map elements (for query)
2452 * @max_channels: the max number of channels for the stream
2453 * @private_value: the value passed to each kcontrol's private_value field
2454 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2455 *
2456 * Create channel-mapping control elements assigned to the given PCM stream(s).
2457 * Return: Zero if successful, or a negative error value.
2458 */
2459int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2460 const struct snd_pcm_chmap_elem *chmap,
2461 int max_channels,
2462 unsigned long private_value,
2463 struct snd_pcm_chmap **info_ret)
2464{
2465 struct snd_pcm_chmap *info;
2466 struct snd_kcontrol_new knew = {
2467 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2468 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2469 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2470 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2471 .info = pcm_chmap_ctl_info,
2472 .get = pcm_chmap_ctl_get,
2473 .tlv.c = pcm_chmap_ctl_tlv,
2474 };
2475 int err;
2476
2477 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2478 return -EBUSY;
2479 info = kzalloc(sizeof(*info), GFP_KERNEL);
2480 if (!info)
2481 return -ENOMEM;
2482 info->pcm = pcm;
2483 info->stream = stream;
2484 info->chmap = chmap;
2485 info->max_channels = max_channels;
2486 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2487 knew.name = "Playback Channel Map";
2488 else
2489 knew.name = "Capture Channel Map";
2490 knew.device = pcm->device;
2491 knew.count = pcm->streams[stream].substream_count;
2492 knew.private_value = private_value;
2493 info->kctl = snd_ctl_new1(&knew, info);
2494 if (!info->kctl) {
2495 kfree(info);
2496 return -ENOMEM;
2497 }
2498 info->kctl->private_free = pcm_chmap_ctl_private_free;
2499 err = snd_ctl_add(pcm->card, info->kctl);
2500 if (err < 0)
2501 return err;
2502 pcm->streams[stream].chmap_kctl = info->kctl;
2503 if (info_ret)
2504 *info_ret = info;
2505 return 0;
2506}
2507EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36
37static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38 snd_pcm_uframes_t ptr,
39 snd_pcm_uframes_t new_ptr)
40{
41 snd_pcm_sframes_t delta;
42
43 delta = new_ptr - ptr;
44 if (delta == 0)
45 return;
46 if (delta < 0)
47 delta += runtime->boundary;
48 if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49 runtime->silence_filled -= delta;
50 else
51 runtime->silence_filled = 0;
52 runtime->silence_start = new_ptr;
53}
54
55/*
56 * fill ring buffer with silence
57 * runtime->silence_start: starting pointer to silence area
58 * runtime->silence_filled: size filled with silence
59 * runtime->silence_threshold: threshold from application
60 * runtime->silence_size: maximal size from application
61 *
62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
63 */
64void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
65{
66 struct snd_pcm_runtime *runtime = substream->runtime;
67 snd_pcm_uframes_t frames, ofs, transfer;
68 int err;
69
70 if (runtime->silence_size < runtime->boundary) {
71 snd_pcm_sframes_t noise_dist;
72 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73 update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74 /* initialization outside pointer updates */
75 if (new_hw_ptr == ULONG_MAX)
76 new_hw_ptr = runtime->status->hw_ptr;
77 /* get hw_avail with the boundary crossing */
78 noise_dist = appl_ptr - new_hw_ptr;
79 if (noise_dist < 0)
80 noise_dist += runtime->boundary;
81 /* total noise distance */
82 noise_dist += runtime->silence_filled;
83 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
84 return;
85 frames = runtime->silence_threshold - noise_dist;
86 if (frames > runtime->silence_size)
87 frames = runtime->silence_size;
88 } else {
89 /*
90 * This filling mode aims at free-running mode (used for example by dmix),
91 * which doesn't update the application pointer.
92 */
93 snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94 if (new_hw_ptr == ULONG_MAX) {
95 /*
96 * Initialization, fill the whole unused buffer with silence.
97 *
98 * Usually, this is entered while stopped, before data is queued,
99 * so both pointers are expected to be zero.
100 */
101 snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
102 if (avail < 0)
103 avail += runtime->boundary;
104 /*
105 * In free-running mode, appl_ptr will be zero even while running,
106 * so we end up with a huge number. There is no useful way to
107 * handle this, so we just clear the whole buffer.
108 */
109 runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110 runtime->silence_start = hw_ptr;
111 } else {
112 /* Silence the just played area immediately */
113 update_silence_vars(runtime, hw_ptr, new_hw_ptr);
114 }
115 /*
116 * In this mode, silence_filled actually includes the valid
117 * sample data from the user.
118 */
119 frames = runtime->buffer_size - runtime->silence_filled;
120 }
121 if (snd_BUG_ON(frames > runtime->buffer_size))
122 return;
123 if (frames == 0)
124 return;
125 ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
126 do {
127 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128 err = fill_silence_frames(substream, ofs, transfer);
129 snd_BUG_ON(err < 0);
130 runtime->silence_filled += transfer;
131 frames -= transfer;
132 ofs = 0;
133 } while (frames > 0);
134 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
135}
136
137#ifdef CONFIG_SND_DEBUG
138void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139 char *name, size_t len)
140{
141 snprintf(name, len, "pcmC%dD%d%c:%d",
142 substream->pcm->card->number,
143 substream->pcm->device,
144 substream->stream ? 'c' : 'p',
145 substream->number);
146}
147EXPORT_SYMBOL(snd_pcm_debug_name);
148#endif
149
150#define XRUN_DEBUG_BASIC (1<<0)
151#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
152#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
153
154#ifdef CONFIG_SND_PCM_XRUN_DEBUG
155
156#define xrun_debug(substream, mask) \
157 ((substream)->pstr->xrun_debug & (mask))
158#else
159#define xrun_debug(substream, mask) 0
160#endif
161
162#define dump_stack_on_xrun(substream) do { \
163 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
164 dump_stack(); \
165 } while (0)
166
167/* call with stream lock held */
168void __snd_pcm_xrun(struct snd_pcm_substream *substream)
169{
170 struct snd_pcm_runtime *runtime = substream->runtime;
171
172 trace_xrun(substream);
173 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174 struct timespec64 tstamp;
175
176 snd_pcm_gettime(runtime, &tstamp);
177 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
179 }
180 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
182 char name[16];
183 snd_pcm_debug_name(substream, name, sizeof(name));
184 pcm_warn(substream->pcm, "XRUN: %s\n", name);
185 dump_stack_on_xrun(substream);
186 }
187#ifdef CONFIG_SND_PCM_XRUN_DEBUG
188 substream->xrun_counter++;
189#endif
190}
191
192#ifdef CONFIG_SND_PCM_XRUN_DEBUG
193#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
194 do { \
195 trace_hw_ptr_error(substream, reason); \
196 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
197 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
198 (in_interrupt) ? 'Q' : 'P', ##args); \
199 dump_stack_on_xrun(substream); \
200 } \
201 } while (0)
202
203#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
204
205#define hw_ptr_error(substream, fmt, args...) do { } while (0)
206
207#endif
208
209int snd_pcm_update_state(struct snd_pcm_substream *substream,
210 struct snd_pcm_runtime *runtime)
211{
212 snd_pcm_uframes_t avail;
213
214 avail = snd_pcm_avail(substream);
215 if (avail > runtime->avail_max)
216 runtime->avail_max = avail;
217 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
218 if (avail >= runtime->buffer_size) {
219 snd_pcm_drain_done(substream);
220 return -EPIPE;
221 }
222 } else {
223 if (avail >= runtime->stop_threshold) {
224 __snd_pcm_xrun(substream);
225 return -EPIPE;
226 }
227 }
228 if (runtime->twake) {
229 if (avail >= runtime->twake)
230 wake_up(&runtime->tsleep);
231 } else if (avail >= runtime->control->avail_min)
232 wake_up(&runtime->sleep);
233 return 0;
234}
235
236static void update_audio_tstamp(struct snd_pcm_substream *substream,
237 struct timespec64 *curr_tstamp,
238 struct timespec64 *audio_tstamp)
239{
240 struct snd_pcm_runtime *runtime = substream->runtime;
241 u64 audio_frames, audio_nsecs;
242 struct timespec64 driver_tstamp;
243
244 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
245 return;
246
247 if (!(substream->ops->get_time_info) ||
248 (runtime->audio_tstamp_report.actual_type ==
249 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
250
251 /*
252 * provide audio timestamp derived from pointer position
253 * add delay only if requested
254 */
255
256 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
257
258 if (runtime->audio_tstamp_config.report_delay) {
259 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
260 audio_frames -= runtime->delay;
261 else
262 audio_frames += runtime->delay;
263 }
264 audio_nsecs = div_u64(audio_frames * 1000000000LL,
265 runtime->rate);
266 *audio_tstamp = ns_to_timespec64(audio_nsecs);
267 }
268
269 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
270 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
271 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
272 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
273 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
274 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
275 }
276
277
278 /*
279 * re-take a driver timestamp to let apps detect if the reference tstamp
280 * read by low-level hardware was provided with a delay
281 */
282 snd_pcm_gettime(substream->runtime, &driver_tstamp);
283 runtime->driver_tstamp = driver_tstamp;
284}
285
286static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
287 unsigned int in_interrupt)
288{
289 struct snd_pcm_runtime *runtime = substream->runtime;
290 snd_pcm_uframes_t pos;
291 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
292 snd_pcm_sframes_t hdelta, delta;
293 unsigned long jdelta;
294 unsigned long curr_jiffies;
295 struct timespec64 curr_tstamp;
296 struct timespec64 audio_tstamp;
297 int crossed_boundary = 0;
298
299 old_hw_ptr = runtime->status->hw_ptr;
300
301 /*
302 * group pointer, time and jiffies reads to allow for more
303 * accurate correlations/corrections.
304 * The values are stored at the end of this routine after
305 * corrections for hw_ptr position
306 */
307 pos = substream->ops->pointer(substream);
308 curr_jiffies = jiffies;
309 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
310 if ((substream->ops->get_time_info) &&
311 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
312 substream->ops->get_time_info(substream, &curr_tstamp,
313 &audio_tstamp,
314 &runtime->audio_tstamp_config,
315 &runtime->audio_tstamp_report);
316
317 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
318 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
319 snd_pcm_gettime(runtime, &curr_tstamp);
320 } else
321 snd_pcm_gettime(runtime, &curr_tstamp);
322 }
323
324 if (pos == SNDRV_PCM_POS_XRUN) {
325 __snd_pcm_xrun(substream);
326 return -EPIPE;
327 }
328 if (pos >= runtime->buffer_size) {
329 if (printk_ratelimit()) {
330 char name[16];
331 snd_pcm_debug_name(substream, name, sizeof(name));
332 pcm_err(substream->pcm,
333 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
334 name, pos, runtime->buffer_size,
335 runtime->period_size);
336 }
337 pos = 0;
338 }
339 pos -= pos % runtime->min_align;
340 trace_hwptr(substream, pos, in_interrupt);
341 hw_base = runtime->hw_ptr_base;
342 new_hw_ptr = hw_base + pos;
343 if (in_interrupt) {
344 /* we know that one period was processed */
345 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
346 delta = runtime->hw_ptr_interrupt + runtime->period_size;
347 if (delta > new_hw_ptr) {
348 /* check for double acknowledged interrupts */
349 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
350 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
351 hw_base += runtime->buffer_size;
352 if (hw_base >= runtime->boundary) {
353 hw_base = 0;
354 crossed_boundary++;
355 }
356 new_hw_ptr = hw_base + pos;
357 goto __delta;
358 }
359 }
360 }
361 /* new_hw_ptr might be lower than old_hw_ptr in case when */
362 /* pointer crosses the end of the ring buffer */
363 if (new_hw_ptr < old_hw_ptr) {
364 hw_base += runtime->buffer_size;
365 if (hw_base >= runtime->boundary) {
366 hw_base = 0;
367 crossed_boundary++;
368 }
369 new_hw_ptr = hw_base + pos;
370 }
371 __delta:
372 delta = new_hw_ptr - old_hw_ptr;
373 if (delta < 0)
374 delta += runtime->boundary;
375
376 if (runtime->no_period_wakeup) {
377 snd_pcm_sframes_t xrun_threshold;
378 /*
379 * Without regular period interrupts, we have to check
380 * the elapsed time to detect xruns.
381 */
382 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
383 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
384 goto no_delta_check;
385 hdelta = jdelta - delta * HZ / runtime->rate;
386 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
387 while (hdelta > xrun_threshold) {
388 delta += runtime->buffer_size;
389 hw_base += runtime->buffer_size;
390 if (hw_base >= runtime->boundary) {
391 hw_base = 0;
392 crossed_boundary++;
393 }
394 new_hw_ptr = hw_base + pos;
395 hdelta -= runtime->hw_ptr_buffer_jiffies;
396 }
397 goto no_delta_check;
398 }
399
400 /* something must be really wrong */
401 if (delta >= runtime->buffer_size + runtime->period_size) {
402 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
403 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
404 substream->stream, (long)pos,
405 (long)new_hw_ptr, (long)old_hw_ptr);
406 return 0;
407 }
408
409 /* Do jiffies check only in xrun_debug mode */
410 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
411 goto no_jiffies_check;
412
413 /* Skip the jiffies check for hardwares with BATCH flag.
414 * Such hardware usually just increases the position at each IRQ,
415 * thus it can't give any strange position.
416 */
417 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
418 goto no_jiffies_check;
419 hdelta = delta;
420 if (hdelta < runtime->delay)
421 goto no_jiffies_check;
422 hdelta -= runtime->delay;
423 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
424 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
425 delta = jdelta /
426 (((runtime->period_size * HZ) / runtime->rate)
427 + HZ/100);
428 /* move new_hw_ptr according jiffies not pos variable */
429 new_hw_ptr = old_hw_ptr;
430 hw_base = delta;
431 /* use loop to avoid checks for delta overflows */
432 /* the delta value is small or zero in most cases */
433 while (delta > 0) {
434 new_hw_ptr += runtime->period_size;
435 if (new_hw_ptr >= runtime->boundary) {
436 new_hw_ptr -= runtime->boundary;
437 crossed_boundary--;
438 }
439 delta--;
440 }
441 /* align hw_base to buffer_size */
442 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
443 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
444 (long)pos, (long)hdelta,
445 (long)runtime->period_size, jdelta,
446 ((hdelta * HZ) / runtime->rate), hw_base,
447 (unsigned long)old_hw_ptr,
448 (unsigned long)new_hw_ptr);
449 /* reset values to proper state */
450 delta = 0;
451 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
452 }
453 no_jiffies_check:
454 if (delta > runtime->period_size + runtime->period_size / 2) {
455 hw_ptr_error(substream, in_interrupt,
456 "Lost interrupts?",
457 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
458 substream->stream, (long)delta,
459 (long)new_hw_ptr,
460 (long)old_hw_ptr);
461 }
462
463 no_delta_check:
464 if (runtime->status->hw_ptr == new_hw_ptr) {
465 runtime->hw_ptr_jiffies = curr_jiffies;
466 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
467 return 0;
468 }
469
470 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
471 runtime->silence_size > 0)
472 snd_pcm_playback_silence(substream, new_hw_ptr);
473
474 if (in_interrupt) {
475 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
476 if (delta < 0)
477 delta += runtime->boundary;
478 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
479 runtime->hw_ptr_interrupt += delta;
480 if (runtime->hw_ptr_interrupt >= runtime->boundary)
481 runtime->hw_ptr_interrupt -= runtime->boundary;
482 }
483 runtime->hw_ptr_base = hw_base;
484 runtime->status->hw_ptr = new_hw_ptr;
485 runtime->hw_ptr_jiffies = curr_jiffies;
486 if (crossed_boundary) {
487 snd_BUG_ON(crossed_boundary != 1);
488 runtime->hw_ptr_wrap += runtime->boundary;
489 }
490
491 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
492
493 return snd_pcm_update_state(substream, runtime);
494}
495
496/* CAUTION: call it with irq disabled */
497int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
498{
499 return snd_pcm_update_hw_ptr0(substream, 0);
500}
501
502/**
503 * snd_pcm_set_ops - set the PCM operators
504 * @pcm: the pcm instance
505 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
506 * @ops: the operator table
507 *
508 * Sets the given PCM operators to the pcm instance.
509 */
510void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
511 const struct snd_pcm_ops *ops)
512{
513 struct snd_pcm_str *stream = &pcm->streams[direction];
514 struct snd_pcm_substream *substream;
515
516 for (substream = stream->substream; substream != NULL; substream = substream->next)
517 substream->ops = ops;
518}
519EXPORT_SYMBOL(snd_pcm_set_ops);
520
521/**
522 * snd_pcm_set_sync_per_card - set the PCM sync id with card number
523 * @substream: the pcm substream
524 * @params: modified hardware parameters
525 * @id: identifier (max 12 bytes)
526 * @len: identifier length (max 12 bytes)
527 *
528 * Sets the PCM sync identifier for the card with zero padding.
529 *
530 * User space or any user should use this 16-byte identifier for a comparison only
531 * to check if two IDs are similar or different. Special case is the identifier
532 * containing only zeros. Interpretation for this combination is - empty (not set).
533 * The contents of the identifier should not be interpreted in any other way.
534 *
535 * The synchronization ID must be unique per clock source (usually one sound card,
536 * but multiple soundcard may use one PCM word clock source which means that they
537 * are fully synchronized).
538 *
539 * This routine composes this ID using card number in first four bytes and
540 * 12-byte additional ID. When other ID composition is used (e.g. for multiple
541 * sound cards), make sure that the composition does not clash with this
542 * composition scheme.
543 */
544void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream,
545 struct snd_pcm_hw_params *params,
546 const unsigned char *id, unsigned int len)
547{
548 *(__u32 *)params->sync = cpu_to_le32(substream->pcm->card->number);
549 len = min(12, len);
550 memcpy(params->sync + 4, id, len);
551 memset(params->sync + 4 + len, 0, 12 - len);
552}
553EXPORT_SYMBOL_GPL(snd_pcm_set_sync_per_card);
554
555/*
556 * Standard ioctl routine
557 */
558
559static inline unsigned int div32(unsigned int a, unsigned int b,
560 unsigned int *r)
561{
562 if (b == 0) {
563 *r = 0;
564 return UINT_MAX;
565 }
566 *r = a % b;
567 return a / b;
568}
569
570static inline unsigned int div_down(unsigned int a, unsigned int b)
571{
572 if (b == 0)
573 return UINT_MAX;
574 return a / b;
575}
576
577static inline unsigned int div_up(unsigned int a, unsigned int b)
578{
579 unsigned int r;
580 unsigned int q;
581 if (b == 0)
582 return UINT_MAX;
583 q = div32(a, b, &r);
584 if (r)
585 ++q;
586 return q;
587}
588
589static inline unsigned int mul(unsigned int a, unsigned int b)
590{
591 if (a == 0)
592 return 0;
593 if (div_down(UINT_MAX, a) < b)
594 return UINT_MAX;
595 return a * b;
596}
597
598static inline unsigned int muldiv32(unsigned int a, unsigned int b,
599 unsigned int c, unsigned int *r)
600{
601 u_int64_t n = (u_int64_t) a * b;
602 if (c == 0) {
603 *r = 0;
604 return UINT_MAX;
605 }
606 n = div_u64_rem(n, c, r);
607 if (n >= UINT_MAX) {
608 *r = 0;
609 return UINT_MAX;
610 }
611 return n;
612}
613
614/**
615 * snd_interval_refine - refine the interval value of configurator
616 * @i: the interval value to refine
617 * @v: the interval value to refer to
618 *
619 * Refines the interval value with the reference value.
620 * The interval is changed to the range satisfying both intervals.
621 * The interval status (min, max, integer, etc.) are evaluated.
622 *
623 * Return: Positive if the value is changed, zero if it's not changed, or a
624 * negative error code.
625 */
626int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
627{
628 int changed = 0;
629 if (snd_BUG_ON(snd_interval_empty(i)))
630 return -EINVAL;
631 if (i->min < v->min) {
632 i->min = v->min;
633 i->openmin = v->openmin;
634 changed = 1;
635 } else if (i->min == v->min && !i->openmin && v->openmin) {
636 i->openmin = 1;
637 changed = 1;
638 }
639 if (i->max > v->max) {
640 i->max = v->max;
641 i->openmax = v->openmax;
642 changed = 1;
643 } else if (i->max == v->max && !i->openmax && v->openmax) {
644 i->openmax = 1;
645 changed = 1;
646 }
647 if (!i->integer && v->integer) {
648 i->integer = 1;
649 changed = 1;
650 }
651 if (i->integer) {
652 if (i->openmin) {
653 i->min++;
654 i->openmin = 0;
655 }
656 if (i->openmax) {
657 i->max--;
658 i->openmax = 0;
659 }
660 } else if (!i->openmin && !i->openmax && i->min == i->max)
661 i->integer = 1;
662 if (snd_interval_checkempty(i)) {
663 snd_interval_none(i);
664 return -EINVAL;
665 }
666 return changed;
667}
668EXPORT_SYMBOL(snd_interval_refine);
669
670static int snd_interval_refine_first(struct snd_interval *i)
671{
672 const unsigned int last_max = i->max;
673
674 if (snd_BUG_ON(snd_interval_empty(i)))
675 return -EINVAL;
676 if (snd_interval_single(i))
677 return 0;
678 i->max = i->min;
679 if (i->openmin)
680 i->max++;
681 /* only exclude max value if also excluded before refine */
682 i->openmax = (i->openmax && i->max >= last_max);
683 return 1;
684}
685
686static int snd_interval_refine_last(struct snd_interval *i)
687{
688 const unsigned int last_min = i->min;
689
690 if (snd_BUG_ON(snd_interval_empty(i)))
691 return -EINVAL;
692 if (snd_interval_single(i))
693 return 0;
694 i->min = i->max;
695 if (i->openmax)
696 i->min--;
697 /* only exclude min value if also excluded before refine */
698 i->openmin = (i->openmin && i->min <= last_min);
699 return 1;
700}
701
702void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
703{
704 if (a->empty || b->empty) {
705 snd_interval_none(c);
706 return;
707 }
708 c->empty = 0;
709 c->min = mul(a->min, b->min);
710 c->openmin = (a->openmin || b->openmin);
711 c->max = mul(a->max, b->max);
712 c->openmax = (a->openmax || b->openmax);
713 c->integer = (a->integer && b->integer);
714}
715
716/**
717 * snd_interval_div - refine the interval value with division
718 * @a: dividend
719 * @b: divisor
720 * @c: quotient
721 *
722 * c = a / b
723 *
724 * Returns non-zero if the value is changed, zero if not changed.
725 */
726void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
727{
728 unsigned int r;
729 if (a->empty || b->empty) {
730 snd_interval_none(c);
731 return;
732 }
733 c->empty = 0;
734 c->min = div32(a->min, b->max, &r);
735 c->openmin = (r || a->openmin || b->openmax);
736 if (b->min > 0) {
737 c->max = div32(a->max, b->min, &r);
738 if (r) {
739 c->max++;
740 c->openmax = 1;
741 } else
742 c->openmax = (a->openmax || b->openmin);
743 } else {
744 c->max = UINT_MAX;
745 c->openmax = 0;
746 }
747 c->integer = 0;
748}
749
750/**
751 * snd_interval_muldivk - refine the interval value
752 * @a: dividend 1
753 * @b: dividend 2
754 * @k: divisor (as integer)
755 * @c: result
756 *
757 * c = a * b / k
758 *
759 * Returns non-zero if the value is changed, zero if not changed.
760 */
761void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
762 unsigned int k, struct snd_interval *c)
763{
764 unsigned int r;
765 if (a->empty || b->empty) {
766 snd_interval_none(c);
767 return;
768 }
769 c->empty = 0;
770 c->min = muldiv32(a->min, b->min, k, &r);
771 c->openmin = (r || a->openmin || b->openmin);
772 c->max = muldiv32(a->max, b->max, k, &r);
773 if (r) {
774 c->max++;
775 c->openmax = 1;
776 } else
777 c->openmax = (a->openmax || b->openmax);
778 c->integer = 0;
779}
780
781/**
782 * snd_interval_mulkdiv - refine the interval value
783 * @a: dividend 1
784 * @k: dividend 2 (as integer)
785 * @b: divisor
786 * @c: result
787 *
788 * c = a * k / b
789 *
790 * Returns non-zero if the value is changed, zero if not changed.
791 */
792void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
793 const struct snd_interval *b, struct snd_interval *c)
794{
795 unsigned int r;
796 if (a->empty || b->empty) {
797 snd_interval_none(c);
798 return;
799 }
800 c->empty = 0;
801 c->min = muldiv32(a->min, k, b->max, &r);
802 c->openmin = (r || a->openmin || b->openmax);
803 if (b->min > 0) {
804 c->max = muldiv32(a->max, k, b->min, &r);
805 if (r) {
806 c->max++;
807 c->openmax = 1;
808 } else
809 c->openmax = (a->openmax || b->openmin);
810 } else {
811 c->max = UINT_MAX;
812 c->openmax = 0;
813 }
814 c->integer = 0;
815}
816
817/* ---- */
818
819
820/**
821 * snd_interval_ratnum - refine the interval value
822 * @i: interval to refine
823 * @rats_count: number of ratnum_t
824 * @rats: ratnum_t array
825 * @nump: pointer to store the resultant numerator
826 * @denp: pointer to store the resultant denominator
827 *
828 * Return: Positive if the value is changed, zero if it's not changed, or a
829 * negative error code.
830 */
831int snd_interval_ratnum(struct snd_interval *i,
832 unsigned int rats_count, const struct snd_ratnum *rats,
833 unsigned int *nump, unsigned int *denp)
834{
835 unsigned int best_num, best_den;
836 int best_diff;
837 unsigned int k;
838 struct snd_interval t;
839 int err;
840 unsigned int result_num, result_den;
841 int result_diff;
842
843 best_num = best_den = best_diff = 0;
844 for (k = 0; k < rats_count; ++k) {
845 unsigned int num = rats[k].num;
846 unsigned int den;
847 unsigned int q = i->min;
848 int diff;
849 if (q == 0)
850 q = 1;
851 den = div_up(num, q);
852 if (den < rats[k].den_min)
853 continue;
854 if (den > rats[k].den_max)
855 den = rats[k].den_max;
856 else {
857 unsigned int r;
858 r = (den - rats[k].den_min) % rats[k].den_step;
859 if (r != 0)
860 den -= r;
861 }
862 diff = num - q * den;
863 if (diff < 0)
864 diff = -diff;
865 if (best_num == 0 ||
866 diff * best_den < best_diff * den) {
867 best_diff = diff;
868 best_den = den;
869 best_num = num;
870 }
871 }
872 if (best_den == 0) {
873 i->empty = 1;
874 return -EINVAL;
875 }
876 t.min = div_down(best_num, best_den);
877 t.openmin = !!(best_num % best_den);
878
879 result_num = best_num;
880 result_diff = best_diff;
881 result_den = best_den;
882 best_num = best_den = best_diff = 0;
883 for (k = 0; k < rats_count; ++k) {
884 unsigned int num = rats[k].num;
885 unsigned int den;
886 unsigned int q = i->max;
887 int diff;
888 if (q == 0) {
889 i->empty = 1;
890 return -EINVAL;
891 }
892 den = div_down(num, q);
893 if (den > rats[k].den_max)
894 continue;
895 if (den < rats[k].den_min)
896 den = rats[k].den_min;
897 else {
898 unsigned int r;
899 r = (den - rats[k].den_min) % rats[k].den_step;
900 if (r != 0)
901 den += rats[k].den_step - r;
902 }
903 diff = q * den - num;
904 if (diff < 0)
905 diff = -diff;
906 if (best_num == 0 ||
907 diff * best_den < best_diff * den) {
908 best_diff = diff;
909 best_den = den;
910 best_num = num;
911 }
912 }
913 if (best_den == 0) {
914 i->empty = 1;
915 return -EINVAL;
916 }
917 t.max = div_up(best_num, best_den);
918 t.openmax = !!(best_num % best_den);
919 t.integer = 0;
920 err = snd_interval_refine(i, &t);
921 if (err < 0)
922 return err;
923
924 if (snd_interval_single(i)) {
925 if (best_diff * result_den < result_diff * best_den) {
926 result_num = best_num;
927 result_den = best_den;
928 }
929 if (nump)
930 *nump = result_num;
931 if (denp)
932 *denp = result_den;
933 }
934 return err;
935}
936EXPORT_SYMBOL(snd_interval_ratnum);
937
938/**
939 * snd_interval_ratden - refine the interval value
940 * @i: interval to refine
941 * @rats_count: number of struct ratden
942 * @rats: struct ratden array
943 * @nump: pointer to store the resultant numerator
944 * @denp: pointer to store the resultant denominator
945 *
946 * Return: Positive if the value is changed, zero if it's not changed, or a
947 * negative error code.
948 */
949static int snd_interval_ratden(struct snd_interval *i,
950 unsigned int rats_count,
951 const struct snd_ratden *rats,
952 unsigned int *nump, unsigned int *denp)
953{
954 unsigned int best_num, best_diff, best_den;
955 unsigned int k;
956 struct snd_interval t;
957 int err;
958
959 best_num = best_den = best_diff = 0;
960 for (k = 0; k < rats_count; ++k) {
961 unsigned int num;
962 unsigned int den = rats[k].den;
963 unsigned int q = i->min;
964 int diff;
965 num = mul(q, den);
966 if (num > rats[k].num_max)
967 continue;
968 if (num < rats[k].num_min)
969 num = rats[k].num_max;
970 else {
971 unsigned int r;
972 r = (num - rats[k].num_min) % rats[k].num_step;
973 if (r != 0)
974 num += rats[k].num_step - r;
975 }
976 diff = num - q * den;
977 if (best_num == 0 ||
978 diff * best_den < best_diff * den) {
979 best_diff = diff;
980 best_den = den;
981 best_num = num;
982 }
983 }
984 if (best_den == 0) {
985 i->empty = 1;
986 return -EINVAL;
987 }
988 t.min = div_down(best_num, best_den);
989 t.openmin = !!(best_num % best_den);
990
991 best_num = best_den = best_diff = 0;
992 for (k = 0; k < rats_count; ++k) {
993 unsigned int num;
994 unsigned int den = rats[k].den;
995 unsigned int q = i->max;
996 int diff;
997 num = mul(q, den);
998 if (num < rats[k].num_min)
999 continue;
1000 if (num > rats[k].num_max)
1001 num = rats[k].num_max;
1002 else {
1003 unsigned int r;
1004 r = (num - rats[k].num_min) % rats[k].num_step;
1005 if (r != 0)
1006 num -= r;
1007 }
1008 diff = q * den - num;
1009 if (best_num == 0 ||
1010 diff * best_den < best_diff * den) {
1011 best_diff = diff;
1012 best_den = den;
1013 best_num = num;
1014 }
1015 }
1016 if (best_den == 0) {
1017 i->empty = 1;
1018 return -EINVAL;
1019 }
1020 t.max = div_up(best_num, best_den);
1021 t.openmax = !!(best_num % best_den);
1022 t.integer = 0;
1023 err = snd_interval_refine(i, &t);
1024 if (err < 0)
1025 return err;
1026
1027 if (snd_interval_single(i)) {
1028 if (nump)
1029 *nump = best_num;
1030 if (denp)
1031 *denp = best_den;
1032 }
1033 return err;
1034}
1035
1036/**
1037 * snd_interval_list - refine the interval value from the list
1038 * @i: the interval value to refine
1039 * @count: the number of elements in the list
1040 * @list: the value list
1041 * @mask: the bit-mask to evaluate
1042 *
1043 * Refines the interval value from the list.
1044 * When mask is non-zero, only the elements corresponding to bit 1 are
1045 * evaluated.
1046 *
1047 * Return: Positive if the value is changed, zero if it's not changed, or a
1048 * negative error code.
1049 */
1050int snd_interval_list(struct snd_interval *i, unsigned int count,
1051 const unsigned int *list, unsigned int mask)
1052{
1053 unsigned int k;
1054 struct snd_interval list_range;
1055
1056 if (!count) {
1057 i->empty = 1;
1058 return -EINVAL;
1059 }
1060 snd_interval_any(&list_range);
1061 list_range.min = UINT_MAX;
1062 list_range.max = 0;
1063 for (k = 0; k < count; k++) {
1064 if (mask && !(mask & (1 << k)))
1065 continue;
1066 if (!snd_interval_test(i, list[k]))
1067 continue;
1068 list_range.min = min(list_range.min, list[k]);
1069 list_range.max = max(list_range.max, list[k]);
1070 }
1071 return snd_interval_refine(i, &list_range);
1072}
1073EXPORT_SYMBOL(snd_interval_list);
1074
1075/**
1076 * snd_interval_ranges - refine the interval value from the list of ranges
1077 * @i: the interval value to refine
1078 * @count: the number of elements in the list of ranges
1079 * @ranges: the ranges list
1080 * @mask: the bit-mask to evaluate
1081 *
1082 * Refines the interval value from the list of ranges.
1083 * When mask is non-zero, only the elements corresponding to bit 1 are
1084 * evaluated.
1085 *
1086 * Return: Positive if the value is changed, zero if it's not changed, or a
1087 * negative error code.
1088 */
1089int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1090 const struct snd_interval *ranges, unsigned int mask)
1091{
1092 unsigned int k;
1093 struct snd_interval range_union;
1094 struct snd_interval range;
1095
1096 if (!count) {
1097 snd_interval_none(i);
1098 return -EINVAL;
1099 }
1100 snd_interval_any(&range_union);
1101 range_union.min = UINT_MAX;
1102 range_union.max = 0;
1103 for (k = 0; k < count; k++) {
1104 if (mask && !(mask & (1 << k)))
1105 continue;
1106 snd_interval_copy(&range, &ranges[k]);
1107 if (snd_interval_refine(&range, i) < 0)
1108 continue;
1109 if (snd_interval_empty(&range))
1110 continue;
1111
1112 if (range.min < range_union.min) {
1113 range_union.min = range.min;
1114 range_union.openmin = 1;
1115 }
1116 if (range.min == range_union.min && !range.openmin)
1117 range_union.openmin = 0;
1118 if (range.max > range_union.max) {
1119 range_union.max = range.max;
1120 range_union.openmax = 1;
1121 }
1122 if (range.max == range_union.max && !range.openmax)
1123 range_union.openmax = 0;
1124 }
1125 return snd_interval_refine(i, &range_union);
1126}
1127EXPORT_SYMBOL(snd_interval_ranges);
1128
1129static int snd_interval_step(struct snd_interval *i, unsigned int step)
1130{
1131 unsigned int n;
1132 int changed = 0;
1133 n = i->min % step;
1134 if (n != 0 || i->openmin) {
1135 i->min += step - n;
1136 i->openmin = 0;
1137 changed = 1;
1138 }
1139 n = i->max % step;
1140 if (n != 0 || i->openmax) {
1141 i->max -= n;
1142 i->openmax = 0;
1143 changed = 1;
1144 }
1145 if (snd_interval_checkempty(i)) {
1146 i->empty = 1;
1147 return -EINVAL;
1148 }
1149 return changed;
1150}
1151
1152/* Info constraints helpers */
1153
1154/**
1155 * snd_pcm_hw_rule_add - add the hw-constraint rule
1156 * @runtime: the pcm runtime instance
1157 * @cond: condition bits
1158 * @var: the variable to evaluate
1159 * @func: the evaluation function
1160 * @private: the private data pointer passed to function
1161 * @dep: the dependent variables
1162 *
1163 * Return: Zero if successful, or a negative error code on failure.
1164 */
1165int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1166 int var,
1167 snd_pcm_hw_rule_func_t func, void *private,
1168 int dep, ...)
1169{
1170 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1171 struct snd_pcm_hw_rule *c;
1172 unsigned int k;
1173 va_list args;
1174 va_start(args, dep);
1175 if (constrs->rules_num >= constrs->rules_all) {
1176 struct snd_pcm_hw_rule *new;
1177 unsigned int new_rules = constrs->rules_all + 16;
1178 new = krealloc_array(constrs->rules, new_rules,
1179 sizeof(*c), GFP_KERNEL);
1180 if (!new) {
1181 va_end(args);
1182 return -ENOMEM;
1183 }
1184 constrs->rules = new;
1185 constrs->rules_all = new_rules;
1186 }
1187 c = &constrs->rules[constrs->rules_num];
1188 c->cond = cond;
1189 c->func = func;
1190 c->var = var;
1191 c->private = private;
1192 k = 0;
1193 while (1) {
1194 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1195 va_end(args);
1196 return -EINVAL;
1197 }
1198 c->deps[k++] = dep;
1199 if (dep < 0)
1200 break;
1201 dep = va_arg(args, int);
1202 }
1203 constrs->rules_num++;
1204 va_end(args);
1205 return 0;
1206}
1207EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1208
1209/**
1210 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1211 * @runtime: PCM runtime instance
1212 * @var: hw_params variable to apply the mask
1213 * @mask: the bitmap mask
1214 *
1215 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1216 *
1217 * Return: Zero if successful, or a negative error code on failure.
1218 */
1219int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1220 u_int32_t mask)
1221{
1222 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1223 struct snd_mask *maskp = constrs_mask(constrs, var);
1224 *maskp->bits &= mask;
1225 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1226 if (*maskp->bits == 0)
1227 return -EINVAL;
1228 return 0;
1229}
1230
1231/**
1232 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1233 * @runtime: PCM runtime instance
1234 * @var: hw_params variable to apply the mask
1235 * @mask: the 64bit bitmap mask
1236 *
1237 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1238 *
1239 * Return: Zero if successful, or a negative error code on failure.
1240 */
1241int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1242 u_int64_t mask)
1243{
1244 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1245 struct snd_mask *maskp = constrs_mask(constrs, var);
1246 maskp->bits[0] &= (u_int32_t)mask;
1247 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1248 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1249 if (! maskp->bits[0] && ! maskp->bits[1])
1250 return -EINVAL;
1251 return 0;
1252}
1253EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1254
1255/**
1256 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1257 * @runtime: PCM runtime instance
1258 * @var: hw_params variable to apply the integer constraint
1259 *
1260 * Apply the constraint of integer to an interval parameter.
1261 *
1262 * Return: Positive if the value is changed, zero if it's not changed, or a
1263 * negative error code.
1264 */
1265int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1266{
1267 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268 return snd_interval_setinteger(constrs_interval(constrs, var));
1269}
1270EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1271
1272/**
1273 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1274 * @runtime: PCM runtime instance
1275 * @var: hw_params variable to apply the range
1276 * @min: the minimal value
1277 * @max: the maximal value
1278 *
1279 * Apply the min/max range constraint to an interval parameter.
1280 *
1281 * Return: Positive if the value is changed, zero if it's not changed, or a
1282 * negative error code.
1283 */
1284int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1285 unsigned int min, unsigned int max)
1286{
1287 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1288 struct snd_interval t;
1289 t.min = min;
1290 t.max = max;
1291 t.openmin = t.openmax = 0;
1292 t.integer = 0;
1293 return snd_interval_refine(constrs_interval(constrs, var), &t);
1294}
1295EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1296
1297static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1298 struct snd_pcm_hw_rule *rule)
1299{
1300 struct snd_pcm_hw_constraint_list *list = rule->private;
1301 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1302}
1303
1304
1305/**
1306 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1307 * @runtime: PCM runtime instance
1308 * @cond: condition bits
1309 * @var: hw_params variable to apply the list constraint
1310 * @l: list
1311 *
1312 * Apply the list of constraints to an interval parameter.
1313 *
1314 * Return: Zero if successful, or a negative error code on failure.
1315 */
1316int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1317 unsigned int cond,
1318 snd_pcm_hw_param_t var,
1319 const struct snd_pcm_hw_constraint_list *l)
1320{
1321 return snd_pcm_hw_rule_add(runtime, cond, var,
1322 snd_pcm_hw_rule_list, (void *)l,
1323 var, -1);
1324}
1325EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1326
1327static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1328 struct snd_pcm_hw_rule *rule)
1329{
1330 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1331 return snd_interval_ranges(hw_param_interval(params, rule->var),
1332 r->count, r->ranges, r->mask);
1333}
1334
1335
1336/**
1337 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1338 * @runtime: PCM runtime instance
1339 * @cond: condition bits
1340 * @var: hw_params variable to apply the list of range constraints
1341 * @r: ranges
1342 *
1343 * Apply the list of range constraints to an interval parameter.
1344 *
1345 * Return: Zero if successful, or a negative error code on failure.
1346 */
1347int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1348 unsigned int cond,
1349 snd_pcm_hw_param_t var,
1350 const struct snd_pcm_hw_constraint_ranges *r)
1351{
1352 return snd_pcm_hw_rule_add(runtime, cond, var,
1353 snd_pcm_hw_rule_ranges, (void *)r,
1354 var, -1);
1355}
1356EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1357
1358static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1359 struct snd_pcm_hw_rule *rule)
1360{
1361 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1362 unsigned int num = 0, den = 0;
1363 int err;
1364 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1365 r->nrats, r->rats, &num, &den);
1366 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1367 params->rate_num = num;
1368 params->rate_den = den;
1369 }
1370 return err;
1371}
1372
1373/**
1374 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1375 * @runtime: PCM runtime instance
1376 * @cond: condition bits
1377 * @var: hw_params variable to apply the ratnums constraint
1378 * @r: struct snd_ratnums constriants
1379 *
1380 * Return: Zero if successful, or a negative error code on failure.
1381 */
1382int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1383 unsigned int cond,
1384 snd_pcm_hw_param_t var,
1385 const struct snd_pcm_hw_constraint_ratnums *r)
1386{
1387 return snd_pcm_hw_rule_add(runtime, cond, var,
1388 snd_pcm_hw_rule_ratnums, (void *)r,
1389 var, -1);
1390}
1391EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1392
1393static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1394 struct snd_pcm_hw_rule *rule)
1395{
1396 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1397 unsigned int num = 0, den = 0;
1398 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1399 r->nrats, r->rats, &num, &den);
1400 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1401 params->rate_num = num;
1402 params->rate_den = den;
1403 }
1404 return err;
1405}
1406
1407/**
1408 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1409 * @runtime: PCM runtime instance
1410 * @cond: condition bits
1411 * @var: hw_params variable to apply the ratdens constraint
1412 * @r: struct snd_ratdens constriants
1413 *
1414 * Return: Zero if successful, or a negative error code on failure.
1415 */
1416int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1417 unsigned int cond,
1418 snd_pcm_hw_param_t var,
1419 const struct snd_pcm_hw_constraint_ratdens *r)
1420{
1421 return snd_pcm_hw_rule_add(runtime, cond, var,
1422 snd_pcm_hw_rule_ratdens, (void *)r,
1423 var, -1);
1424}
1425EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1426
1427static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1428 struct snd_pcm_hw_rule *rule)
1429{
1430 unsigned int l = (unsigned long) rule->private;
1431 int width = l & 0xffff;
1432 unsigned int msbits = l >> 16;
1433 const struct snd_interval *i =
1434 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1435
1436 if (!snd_interval_single(i))
1437 return 0;
1438
1439 if ((snd_interval_value(i) == width) ||
1440 (width == 0 && snd_interval_value(i) > msbits))
1441 params->msbits = min_not_zero(params->msbits, msbits);
1442
1443 return 0;
1444}
1445
1446/**
1447 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1448 * @runtime: PCM runtime instance
1449 * @cond: condition bits
1450 * @width: sample bits width
1451 * @msbits: msbits width
1452 *
1453 * This constraint will set the number of most significant bits (msbits) if a
1454 * sample format with the specified width has been select. If width is set to 0
1455 * the msbits will be set for any sample format with a width larger than the
1456 * specified msbits.
1457 *
1458 * Return: Zero if successful, or a negative error code on failure.
1459 */
1460int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1461 unsigned int cond,
1462 unsigned int width,
1463 unsigned int msbits)
1464{
1465 unsigned long l = (msbits << 16) | width;
1466 return snd_pcm_hw_rule_add(runtime, cond, -1,
1467 snd_pcm_hw_rule_msbits,
1468 (void*) l,
1469 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1470}
1471EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1472
1473static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1474 struct snd_pcm_hw_rule *rule)
1475{
1476 unsigned long step = (unsigned long) rule->private;
1477 return snd_interval_step(hw_param_interval(params, rule->var), step);
1478}
1479
1480/**
1481 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1482 * @runtime: PCM runtime instance
1483 * @cond: condition bits
1484 * @var: hw_params variable to apply the step constraint
1485 * @step: step size
1486 *
1487 * Return: Zero if successful, or a negative error code on failure.
1488 */
1489int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1490 unsigned int cond,
1491 snd_pcm_hw_param_t var,
1492 unsigned long step)
1493{
1494 return snd_pcm_hw_rule_add(runtime, cond, var,
1495 snd_pcm_hw_rule_step, (void *) step,
1496 var, -1);
1497}
1498EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1499
1500static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1501{
1502 static const unsigned int pow2_sizes[] = {
1503 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1504 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1505 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1506 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1507 };
1508 return snd_interval_list(hw_param_interval(params, rule->var),
1509 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1510}
1511
1512/**
1513 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1514 * @runtime: PCM runtime instance
1515 * @cond: condition bits
1516 * @var: hw_params variable to apply the power-of-2 constraint
1517 *
1518 * Return: Zero if successful, or a negative error code on failure.
1519 */
1520int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1521 unsigned int cond,
1522 snd_pcm_hw_param_t var)
1523{
1524 return snd_pcm_hw_rule_add(runtime, cond, var,
1525 snd_pcm_hw_rule_pow2, NULL,
1526 var, -1);
1527}
1528EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1529
1530static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1531 struct snd_pcm_hw_rule *rule)
1532{
1533 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1534 struct snd_interval *rate;
1535
1536 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1537 return snd_interval_list(rate, 1, &base_rate, 0);
1538}
1539
1540/**
1541 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1542 * @runtime: PCM runtime instance
1543 * @base_rate: the rate at which the hardware does not resample
1544 *
1545 * Return: Zero if successful, or a negative error code on failure.
1546 */
1547int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1548 unsigned int base_rate)
1549{
1550 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1551 SNDRV_PCM_HW_PARAM_RATE,
1552 snd_pcm_hw_rule_noresample_func,
1553 (void *)(uintptr_t)base_rate,
1554 SNDRV_PCM_HW_PARAM_RATE, -1);
1555}
1556EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1557
1558static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1559 snd_pcm_hw_param_t var)
1560{
1561 if (hw_is_mask(var)) {
1562 snd_mask_any(hw_param_mask(params, var));
1563 params->cmask |= 1 << var;
1564 params->rmask |= 1 << var;
1565 return;
1566 }
1567 if (hw_is_interval(var)) {
1568 snd_interval_any(hw_param_interval(params, var));
1569 params->cmask |= 1 << var;
1570 params->rmask |= 1 << var;
1571 return;
1572 }
1573 snd_BUG();
1574}
1575
1576void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1577{
1578 unsigned int k;
1579 memset(params, 0, sizeof(*params));
1580 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1581 _snd_pcm_hw_param_any(params, k);
1582 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1583 _snd_pcm_hw_param_any(params, k);
1584 params->info = ~0U;
1585}
1586EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1587
1588/**
1589 * snd_pcm_hw_param_value - return @params field @var value
1590 * @params: the hw_params instance
1591 * @var: parameter to retrieve
1592 * @dir: pointer to the direction (-1,0,1) or %NULL
1593 *
1594 * Return: The value for field @var if it's fixed in configuration space
1595 * defined by @params. -%EINVAL otherwise.
1596 */
1597int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1598 snd_pcm_hw_param_t var, int *dir)
1599{
1600 if (hw_is_mask(var)) {
1601 const struct snd_mask *mask = hw_param_mask_c(params, var);
1602 if (!snd_mask_single(mask))
1603 return -EINVAL;
1604 if (dir)
1605 *dir = 0;
1606 return snd_mask_value(mask);
1607 }
1608 if (hw_is_interval(var)) {
1609 const struct snd_interval *i = hw_param_interval_c(params, var);
1610 if (!snd_interval_single(i))
1611 return -EINVAL;
1612 if (dir)
1613 *dir = i->openmin;
1614 return snd_interval_value(i);
1615 }
1616 return -EINVAL;
1617}
1618EXPORT_SYMBOL(snd_pcm_hw_param_value);
1619
1620void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1621 snd_pcm_hw_param_t var)
1622{
1623 if (hw_is_mask(var)) {
1624 snd_mask_none(hw_param_mask(params, var));
1625 params->cmask |= 1 << var;
1626 params->rmask |= 1 << var;
1627 } else if (hw_is_interval(var)) {
1628 snd_interval_none(hw_param_interval(params, var));
1629 params->cmask |= 1 << var;
1630 params->rmask |= 1 << var;
1631 } else {
1632 snd_BUG();
1633 }
1634}
1635EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1636
1637static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1638 snd_pcm_hw_param_t var)
1639{
1640 int changed;
1641 if (hw_is_mask(var))
1642 changed = snd_mask_refine_first(hw_param_mask(params, var));
1643 else if (hw_is_interval(var))
1644 changed = snd_interval_refine_first(hw_param_interval(params, var));
1645 else
1646 return -EINVAL;
1647 if (changed > 0) {
1648 params->cmask |= 1 << var;
1649 params->rmask |= 1 << var;
1650 }
1651 return changed;
1652}
1653
1654
1655/**
1656 * snd_pcm_hw_param_first - refine config space and return minimum value
1657 * @pcm: PCM instance
1658 * @params: the hw_params instance
1659 * @var: parameter to retrieve
1660 * @dir: pointer to the direction (-1,0,1) or %NULL
1661 *
1662 * Inside configuration space defined by @params remove from @var all
1663 * values > minimum. Reduce configuration space accordingly.
1664 *
1665 * Return: The minimum, or a negative error code on failure.
1666 */
1667int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1668 struct snd_pcm_hw_params *params,
1669 snd_pcm_hw_param_t var, int *dir)
1670{
1671 int changed = _snd_pcm_hw_param_first(params, var);
1672 if (changed < 0)
1673 return changed;
1674 if (params->rmask) {
1675 int err = snd_pcm_hw_refine(pcm, params);
1676 if (err < 0)
1677 return err;
1678 }
1679 return snd_pcm_hw_param_value(params, var, dir);
1680}
1681EXPORT_SYMBOL(snd_pcm_hw_param_first);
1682
1683static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1684 snd_pcm_hw_param_t var)
1685{
1686 int changed;
1687 if (hw_is_mask(var))
1688 changed = snd_mask_refine_last(hw_param_mask(params, var));
1689 else if (hw_is_interval(var))
1690 changed = snd_interval_refine_last(hw_param_interval(params, var));
1691 else
1692 return -EINVAL;
1693 if (changed > 0) {
1694 params->cmask |= 1 << var;
1695 params->rmask |= 1 << var;
1696 }
1697 return changed;
1698}
1699
1700
1701/**
1702 * snd_pcm_hw_param_last - refine config space and return maximum value
1703 * @pcm: PCM instance
1704 * @params: the hw_params instance
1705 * @var: parameter to retrieve
1706 * @dir: pointer to the direction (-1,0,1) or %NULL
1707 *
1708 * Inside configuration space defined by @params remove from @var all
1709 * values < maximum. Reduce configuration space accordingly.
1710 *
1711 * Return: The maximum, or a negative error code on failure.
1712 */
1713int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1714 struct snd_pcm_hw_params *params,
1715 snd_pcm_hw_param_t var, int *dir)
1716{
1717 int changed = _snd_pcm_hw_param_last(params, var);
1718 if (changed < 0)
1719 return changed;
1720 if (params->rmask) {
1721 int err = snd_pcm_hw_refine(pcm, params);
1722 if (err < 0)
1723 return err;
1724 }
1725 return snd_pcm_hw_param_value(params, var, dir);
1726}
1727EXPORT_SYMBOL(snd_pcm_hw_param_last);
1728
1729/**
1730 * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1731 * @p: hardware parameters
1732 *
1733 * Return: The number of bits per sample based on the format,
1734 * subformat and msbits the specified hw params has.
1735 */
1736int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1737{
1738 snd_pcm_subformat_t subformat = params_subformat(p);
1739 snd_pcm_format_t format = params_format(p);
1740
1741 switch (format) {
1742 case SNDRV_PCM_FORMAT_S32_LE:
1743 case SNDRV_PCM_FORMAT_U32_LE:
1744 case SNDRV_PCM_FORMAT_S32_BE:
1745 case SNDRV_PCM_FORMAT_U32_BE:
1746 switch (subformat) {
1747 case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1748 return 20;
1749 case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1750 return 24;
1751 case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1752 case SNDRV_PCM_SUBFORMAT_STD:
1753 default:
1754 break;
1755 }
1756 fallthrough;
1757 default:
1758 return snd_pcm_format_width(format);
1759 }
1760}
1761EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1762
1763static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1764 void *arg)
1765{
1766 struct snd_pcm_runtime *runtime = substream->runtime;
1767
1768 guard(pcm_stream_lock_irqsave)(substream);
1769 if (snd_pcm_running(substream) &&
1770 snd_pcm_update_hw_ptr(substream) >= 0)
1771 runtime->status->hw_ptr %= runtime->buffer_size;
1772 else {
1773 runtime->status->hw_ptr = 0;
1774 runtime->hw_ptr_wrap = 0;
1775 }
1776 return 0;
1777}
1778
1779static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1780 void *arg)
1781{
1782 struct snd_pcm_channel_info *info = arg;
1783 struct snd_pcm_runtime *runtime = substream->runtime;
1784 int width;
1785 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1786 info->offset = -1;
1787 return 0;
1788 }
1789 width = snd_pcm_format_physical_width(runtime->format);
1790 if (width < 0)
1791 return width;
1792 info->offset = 0;
1793 switch (runtime->access) {
1794 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1795 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1796 info->first = info->channel * width;
1797 info->step = runtime->channels * width;
1798 break;
1799 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1800 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1801 {
1802 size_t size = runtime->dma_bytes / runtime->channels;
1803 info->first = info->channel * size * 8;
1804 info->step = width;
1805 break;
1806 }
1807 default:
1808 snd_BUG();
1809 break;
1810 }
1811 return 0;
1812}
1813
1814static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1815 void *arg)
1816{
1817 struct snd_pcm_hw_params *params = arg;
1818 snd_pcm_format_t format;
1819 int channels;
1820 ssize_t frame_size;
1821
1822 params->fifo_size = substream->runtime->hw.fifo_size;
1823 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1824 format = params_format(params);
1825 channels = params_channels(params);
1826 frame_size = snd_pcm_format_size(format, channels);
1827 if (frame_size > 0)
1828 params->fifo_size /= frame_size;
1829 }
1830 return 0;
1831}
1832
1833static int snd_pcm_lib_ioctl_sync_id(struct snd_pcm_substream *substream,
1834 void *arg)
1835{
1836 static const unsigned char id[12] = { 0xff, 0xff, 0xff, 0xff,
1837 0xff, 0xff, 0xff, 0xff,
1838 0xff, 0xff, 0xff, 0xff };
1839
1840 if (substream->runtime->std_sync_id)
1841 snd_pcm_set_sync_per_card(substream, arg, id, sizeof(id));
1842 return 0;
1843}
1844
1845/**
1846 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1847 * @substream: the pcm substream instance
1848 * @cmd: ioctl command
1849 * @arg: ioctl argument
1850 *
1851 * Processes the generic ioctl commands for PCM.
1852 * Can be passed as the ioctl callback for PCM ops.
1853 *
1854 * Return: Zero if successful, or a negative error code on failure.
1855 */
1856int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1857 unsigned int cmd, void *arg)
1858{
1859 switch (cmd) {
1860 case SNDRV_PCM_IOCTL1_RESET:
1861 return snd_pcm_lib_ioctl_reset(substream, arg);
1862 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1863 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1864 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1865 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1866 case SNDRV_PCM_IOCTL1_SYNC_ID:
1867 return snd_pcm_lib_ioctl_sync_id(substream, arg);
1868 }
1869 return -ENXIO;
1870}
1871EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1872
1873/**
1874 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1875 * under acquired lock of PCM substream.
1876 * @substream: the instance of pcm substream.
1877 *
1878 * This function is called when the batch of audio data frames as the same size as the period of
1879 * buffer is already processed in audio data transmission.
1880 *
1881 * The call of function updates the status of runtime with the latest position of audio data
1882 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1883 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1884 * substream according to configured threshold.
1885 *
1886 * The function is intended to use for the case that PCM driver operates audio data frames under
1887 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1888 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1889 * since lock of PCM substream should be acquired in advance.
1890 *
1891 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1892 * function:
1893 *
1894 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1895 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1896 * - .get_time_info - to retrieve audio time stamp if needed.
1897 *
1898 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1899 */
1900void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1901{
1902 struct snd_pcm_runtime *runtime;
1903
1904 if (PCM_RUNTIME_CHECK(substream))
1905 return;
1906 runtime = substream->runtime;
1907
1908 if (!snd_pcm_running(substream) ||
1909 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1910 goto _end;
1911
1912#ifdef CONFIG_SND_PCM_TIMER
1913 if (substream->timer_running)
1914 snd_timer_interrupt(substream->timer, 1);
1915#endif
1916 _end:
1917 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1918}
1919EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1920
1921/**
1922 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1923 * PCM substream.
1924 * @substream: the instance of PCM substream.
1925 *
1926 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1927 * acquiring lock of PCM substream voluntarily.
1928 *
1929 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1930 * the batch of audio data frames as the same size as the period of buffer is already processed in
1931 * audio data transmission.
1932 */
1933void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1934{
1935 if (snd_BUG_ON(!substream))
1936 return;
1937
1938 guard(pcm_stream_lock_irqsave)(substream);
1939 snd_pcm_period_elapsed_under_stream_lock(substream);
1940}
1941EXPORT_SYMBOL(snd_pcm_period_elapsed);
1942
1943/*
1944 * Wait until avail_min data becomes available
1945 * Returns a negative error code if any error occurs during operation.
1946 * The available space is stored on availp. When err = 0 and avail = 0
1947 * on the capture stream, it indicates the stream is in DRAINING state.
1948 */
1949static int wait_for_avail(struct snd_pcm_substream *substream,
1950 snd_pcm_uframes_t *availp)
1951{
1952 struct snd_pcm_runtime *runtime = substream->runtime;
1953 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1954 wait_queue_entry_t wait;
1955 int err = 0;
1956 snd_pcm_uframes_t avail = 0;
1957 long wait_time, tout;
1958
1959 init_waitqueue_entry(&wait, current);
1960 set_current_state(TASK_INTERRUPTIBLE);
1961 add_wait_queue(&runtime->tsleep, &wait);
1962
1963 if (runtime->no_period_wakeup)
1964 wait_time = MAX_SCHEDULE_TIMEOUT;
1965 else {
1966 /* use wait time from substream if available */
1967 if (substream->wait_time) {
1968 wait_time = substream->wait_time;
1969 } else {
1970 wait_time = 100;
1971
1972 if (runtime->rate) {
1973 long t = runtime->buffer_size * 1100 / runtime->rate;
1974 wait_time = max(t, wait_time);
1975 }
1976 }
1977 wait_time = msecs_to_jiffies(wait_time);
1978 }
1979
1980 for (;;) {
1981 if (signal_pending(current)) {
1982 err = -ERESTARTSYS;
1983 break;
1984 }
1985
1986 /*
1987 * We need to check if space became available already
1988 * (and thus the wakeup happened already) first to close
1989 * the race of space already having become available.
1990 * This check must happen after been added to the waitqueue
1991 * and having current state be INTERRUPTIBLE.
1992 */
1993 avail = snd_pcm_avail(substream);
1994 if (avail >= runtime->twake)
1995 break;
1996 snd_pcm_stream_unlock_irq(substream);
1997
1998 tout = schedule_timeout(wait_time);
1999
2000 snd_pcm_stream_lock_irq(substream);
2001 set_current_state(TASK_INTERRUPTIBLE);
2002 switch (runtime->state) {
2003 case SNDRV_PCM_STATE_SUSPENDED:
2004 err = -ESTRPIPE;
2005 goto _endloop;
2006 case SNDRV_PCM_STATE_XRUN:
2007 err = -EPIPE;
2008 goto _endloop;
2009 case SNDRV_PCM_STATE_DRAINING:
2010 if (is_playback)
2011 err = -EPIPE;
2012 else
2013 avail = 0; /* indicate draining */
2014 goto _endloop;
2015 case SNDRV_PCM_STATE_OPEN:
2016 case SNDRV_PCM_STATE_SETUP:
2017 case SNDRV_PCM_STATE_DISCONNECTED:
2018 err = -EBADFD;
2019 goto _endloop;
2020 case SNDRV_PCM_STATE_PAUSED:
2021 continue;
2022 }
2023 if (!tout) {
2024 pcm_dbg(substream->pcm,
2025 "%s timeout (DMA or IRQ trouble?)\n",
2026 is_playback ? "playback write" : "capture read");
2027 err = -EIO;
2028 break;
2029 }
2030 }
2031 _endloop:
2032 set_current_state(TASK_RUNNING);
2033 remove_wait_queue(&runtime->tsleep, &wait);
2034 *availp = avail;
2035 return err;
2036}
2037
2038typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2039 int channel, unsigned long hwoff,
2040 struct iov_iter *iter, unsigned long bytes);
2041
2042typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2043 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2044 bool);
2045
2046/* calculate the target DMA-buffer position to be written/read */
2047static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2048 int channel, unsigned long hwoff)
2049{
2050 return runtime->dma_area + hwoff +
2051 channel * (runtime->dma_bytes / runtime->channels);
2052}
2053
2054/* default copy ops for write; used for both interleaved and non- modes */
2055static int default_write_copy(struct snd_pcm_substream *substream,
2056 int channel, unsigned long hwoff,
2057 struct iov_iter *iter, unsigned long bytes)
2058{
2059 if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2060 bytes, iter) != bytes)
2061 return -EFAULT;
2062 return 0;
2063}
2064
2065/* fill silence instead of copy data; called as a transfer helper
2066 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2067 * a NULL buffer is passed
2068 */
2069static int fill_silence(struct snd_pcm_substream *substream, int channel,
2070 unsigned long hwoff, struct iov_iter *iter,
2071 unsigned long bytes)
2072{
2073 struct snd_pcm_runtime *runtime = substream->runtime;
2074
2075 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2076 return 0;
2077 if (substream->ops->fill_silence)
2078 return substream->ops->fill_silence(substream, channel,
2079 hwoff, bytes);
2080
2081 snd_pcm_format_set_silence(runtime->format,
2082 get_dma_ptr(runtime, channel, hwoff),
2083 bytes_to_samples(runtime, bytes));
2084 return 0;
2085}
2086
2087/* default copy ops for read; used for both interleaved and non- modes */
2088static int default_read_copy(struct snd_pcm_substream *substream,
2089 int channel, unsigned long hwoff,
2090 struct iov_iter *iter, unsigned long bytes)
2091{
2092 if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2093 bytes, iter) != bytes)
2094 return -EFAULT;
2095 return 0;
2096}
2097
2098/* call transfer with the filled iov_iter */
2099static int do_transfer(struct snd_pcm_substream *substream, int c,
2100 unsigned long hwoff, void *data, unsigned long bytes,
2101 pcm_transfer_f transfer, bool in_kernel)
2102{
2103 struct iov_iter iter;
2104 int err, type;
2105
2106 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2107 type = ITER_SOURCE;
2108 else
2109 type = ITER_DEST;
2110
2111 if (in_kernel) {
2112 struct kvec kvec = { data, bytes };
2113
2114 iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2115 return transfer(substream, c, hwoff, &iter, bytes);
2116 }
2117
2118 err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2119 if (err)
2120 return err;
2121 return transfer(substream, c, hwoff, &iter, bytes);
2122}
2123
2124/* call transfer function with the converted pointers and sizes;
2125 * for interleaved mode, it's one shot for all samples
2126 */
2127static int interleaved_copy(struct snd_pcm_substream *substream,
2128 snd_pcm_uframes_t hwoff, void *data,
2129 snd_pcm_uframes_t off,
2130 snd_pcm_uframes_t frames,
2131 pcm_transfer_f transfer,
2132 bool in_kernel)
2133{
2134 struct snd_pcm_runtime *runtime = substream->runtime;
2135
2136 /* convert to bytes */
2137 hwoff = frames_to_bytes(runtime, hwoff);
2138 off = frames_to_bytes(runtime, off);
2139 frames = frames_to_bytes(runtime, frames);
2140
2141 return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2142 in_kernel);
2143}
2144
2145/* call transfer function with the converted pointers and sizes for each
2146 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2147 */
2148static int noninterleaved_copy(struct snd_pcm_substream *substream,
2149 snd_pcm_uframes_t hwoff, void *data,
2150 snd_pcm_uframes_t off,
2151 snd_pcm_uframes_t frames,
2152 pcm_transfer_f transfer,
2153 bool in_kernel)
2154{
2155 struct snd_pcm_runtime *runtime = substream->runtime;
2156 int channels = runtime->channels;
2157 void **bufs = data;
2158 int c, err;
2159
2160 /* convert to bytes; note that it's not frames_to_bytes() here.
2161 * in non-interleaved mode, we copy for each channel, thus
2162 * each copy is n_samples bytes x channels = whole frames.
2163 */
2164 off = samples_to_bytes(runtime, off);
2165 frames = samples_to_bytes(runtime, frames);
2166 hwoff = samples_to_bytes(runtime, hwoff);
2167 for (c = 0; c < channels; ++c, ++bufs) {
2168 if (!data || !*bufs)
2169 err = fill_silence(substream, c, hwoff, NULL, frames);
2170 else
2171 err = do_transfer(substream, c, hwoff, *bufs + off,
2172 frames, transfer, in_kernel);
2173 if (err < 0)
2174 return err;
2175 }
2176 return 0;
2177}
2178
2179/* fill silence on the given buffer position;
2180 * called from snd_pcm_playback_silence()
2181 */
2182static int fill_silence_frames(struct snd_pcm_substream *substream,
2183 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2184{
2185 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2186 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2187 return interleaved_copy(substream, off, NULL, 0, frames,
2188 fill_silence, true);
2189 else
2190 return noninterleaved_copy(substream, off, NULL, 0, frames,
2191 fill_silence, true);
2192}
2193
2194/* sanity-check for read/write methods */
2195static int pcm_sanity_check(struct snd_pcm_substream *substream)
2196{
2197 struct snd_pcm_runtime *runtime;
2198 if (PCM_RUNTIME_CHECK(substream))
2199 return -ENXIO;
2200 runtime = substream->runtime;
2201 if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2202 return -EINVAL;
2203 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2204 return -EBADFD;
2205 return 0;
2206}
2207
2208static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2209{
2210 switch (runtime->state) {
2211 case SNDRV_PCM_STATE_PREPARED:
2212 case SNDRV_PCM_STATE_RUNNING:
2213 case SNDRV_PCM_STATE_PAUSED:
2214 return 0;
2215 case SNDRV_PCM_STATE_XRUN:
2216 return -EPIPE;
2217 case SNDRV_PCM_STATE_SUSPENDED:
2218 return -ESTRPIPE;
2219 default:
2220 return -EBADFD;
2221 }
2222}
2223
2224/* update to the given appl_ptr and call ack callback if needed;
2225 * when an error is returned, take back to the original value
2226 */
2227int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2228 snd_pcm_uframes_t appl_ptr)
2229{
2230 struct snd_pcm_runtime *runtime = substream->runtime;
2231 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2232 snd_pcm_sframes_t diff;
2233 int ret;
2234
2235 if (old_appl_ptr == appl_ptr)
2236 return 0;
2237
2238 if (appl_ptr >= runtime->boundary)
2239 return -EINVAL;
2240 /*
2241 * check if a rewind is requested by the application
2242 */
2243 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2244 diff = appl_ptr - old_appl_ptr;
2245 if (diff >= 0) {
2246 if (diff > runtime->buffer_size)
2247 return -EINVAL;
2248 } else {
2249 if (runtime->boundary + diff > runtime->buffer_size)
2250 return -EINVAL;
2251 }
2252 }
2253
2254 runtime->control->appl_ptr = appl_ptr;
2255 if (substream->ops->ack) {
2256 ret = substream->ops->ack(substream);
2257 if (ret < 0) {
2258 runtime->control->appl_ptr = old_appl_ptr;
2259 if (ret == -EPIPE)
2260 __snd_pcm_xrun(substream);
2261 return ret;
2262 }
2263 }
2264
2265 trace_applptr(substream, old_appl_ptr, appl_ptr);
2266
2267 return 0;
2268}
2269
2270/* the common loop for read/write data */
2271snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2272 void *data, bool interleaved,
2273 snd_pcm_uframes_t size, bool in_kernel)
2274{
2275 struct snd_pcm_runtime *runtime = substream->runtime;
2276 snd_pcm_uframes_t xfer = 0;
2277 snd_pcm_uframes_t offset = 0;
2278 snd_pcm_uframes_t avail;
2279 pcm_copy_f writer;
2280 pcm_transfer_f transfer;
2281 bool nonblock;
2282 bool is_playback;
2283 int err;
2284
2285 err = pcm_sanity_check(substream);
2286 if (err < 0)
2287 return err;
2288
2289 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2290 if (interleaved) {
2291 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2292 runtime->channels > 1)
2293 return -EINVAL;
2294 writer = interleaved_copy;
2295 } else {
2296 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2297 return -EINVAL;
2298 writer = noninterleaved_copy;
2299 }
2300
2301 if (!data) {
2302 if (is_playback)
2303 transfer = fill_silence;
2304 else
2305 return -EINVAL;
2306 } else {
2307 if (substream->ops->copy)
2308 transfer = substream->ops->copy;
2309 else
2310 transfer = is_playback ?
2311 default_write_copy : default_read_copy;
2312 }
2313
2314 if (size == 0)
2315 return 0;
2316
2317 nonblock = !!(substream->f_flags & O_NONBLOCK);
2318
2319 snd_pcm_stream_lock_irq(substream);
2320 err = pcm_accessible_state(runtime);
2321 if (err < 0)
2322 goto _end_unlock;
2323
2324 runtime->twake = runtime->control->avail_min ? : 1;
2325 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2326 snd_pcm_update_hw_ptr(substream);
2327
2328 /*
2329 * If size < start_threshold, wait indefinitely. Another
2330 * thread may start capture
2331 */
2332 if (!is_playback &&
2333 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2334 size >= runtime->start_threshold) {
2335 err = snd_pcm_start(substream);
2336 if (err < 0)
2337 goto _end_unlock;
2338 }
2339
2340 avail = snd_pcm_avail(substream);
2341
2342 while (size > 0) {
2343 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2344 snd_pcm_uframes_t cont;
2345 if (!avail) {
2346 if (!is_playback &&
2347 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2348 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2349 goto _end_unlock;
2350 }
2351 if (nonblock) {
2352 err = -EAGAIN;
2353 goto _end_unlock;
2354 }
2355 runtime->twake = min_t(snd_pcm_uframes_t, size,
2356 runtime->control->avail_min ? : 1);
2357 err = wait_for_avail(substream, &avail);
2358 if (err < 0)
2359 goto _end_unlock;
2360 if (!avail)
2361 continue; /* draining */
2362 }
2363 frames = size > avail ? avail : size;
2364 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2365 appl_ofs = appl_ptr % runtime->buffer_size;
2366 cont = runtime->buffer_size - appl_ofs;
2367 if (frames > cont)
2368 frames = cont;
2369 if (snd_BUG_ON(!frames)) {
2370 err = -EINVAL;
2371 goto _end_unlock;
2372 }
2373 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2374 err = -EBUSY;
2375 goto _end_unlock;
2376 }
2377 snd_pcm_stream_unlock_irq(substream);
2378 if (!is_playback)
2379 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2380 err = writer(substream, appl_ofs, data, offset, frames,
2381 transfer, in_kernel);
2382 if (is_playback)
2383 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2384 snd_pcm_stream_lock_irq(substream);
2385 atomic_dec(&runtime->buffer_accessing);
2386 if (err < 0)
2387 goto _end_unlock;
2388 err = pcm_accessible_state(runtime);
2389 if (err < 0)
2390 goto _end_unlock;
2391 appl_ptr += frames;
2392 if (appl_ptr >= runtime->boundary)
2393 appl_ptr -= runtime->boundary;
2394 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2395 if (err < 0)
2396 goto _end_unlock;
2397
2398 offset += frames;
2399 size -= frames;
2400 xfer += frames;
2401 avail -= frames;
2402 if (is_playback &&
2403 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2404 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2405 err = snd_pcm_start(substream);
2406 if (err < 0)
2407 goto _end_unlock;
2408 }
2409 }
2410 _end_unlock:
2411 runtime->twake = 0;
2412 if (xfer > 0 && err >= 0)
2413 snd_pcm_update_state(substream, runtime);
2414 snd_pcm_stream_unlock_irq(substream);
2415 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2416}
2417EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2418
2419/*
2420 * standard channel mapping helpers
2421 */
2422
2423/* default channel maps for multi-channel playbacks, up to 8 channels */
2424const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2425 { .channels = 1,
2426 .map = { SNDRV_CHMAP_MONO } },
2427 { .channels = 2,
2428 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2429 { .channels = 4,
2430 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2431 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2432 { .channels = 6,
2433 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2434 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2435 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2436 { .channels = 8,
2437 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2438 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2439 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2440 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2441 { }
2442};
2443EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2444
2445/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2446const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2447 { .channels = 1,
2448 .map = { SNDRV_CHMAP_MONO } },
2449 { .channels = 2,
2450 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2451 { .channels = 4,
2452 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2453 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2454 { .channels = 6,
2455 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2456 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2457 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2458 { .channels = 8,
2459 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2460 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2461 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2462 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2463 { }
2464};
2465EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2466
2467static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2468{
2469 if (ch > info->max_channels)
2470 return false;
2471 return !info->channel_mask || (info->channel_mask & (1U << ch));
2472}
2473
2474static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2475 struct snd_ctl_elem_info *uinfo)
2476{
2477 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2478
2479 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2480 uinfo->count = info->max_channels;
2481 uinfo->value.integer.min = 0;
2482 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2483 return 0;
2484}
2485
2486/* get callback for channel map ctl element
2487 * stores the channel position firstly matching with the current channels
2488 */
2489static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2490 struct snd_ctl_elem_value *ucontrol)
2491{
2492 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2493 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2494 struct snd_pcm_substream *substream;
2495 const struct snd_pcm_chmap_elem *map;
2496
2497 if (!info->chmap)
2498 return -EINVAL;
2499 substream = snd_pcm_chmap_substream(info, idx);
2500 if (!substream)
2501 return -ENODEV;
2502 memset(ucontrol->value.integer.value, 0,
2503 sizeof(long) * info->max_channels);
2504 if (!substream->runtime)
2505 return 0; /* no channels set */
2506 for (map = info->chmap; map->channels; map++) {
2507 int i;
2508 if (map->channels == substream->runtime->channels &&
2509 valid_chmap_channels(info, map->channels)) {
2510 for (i = 0; i < map->channels; i++)
2511 ucontrol->value.integer.value[i] = map->map[i];
2512 return 0;
2513 }
2514 }
2515 return -EINVAL;
2516}
2517
2518/* tlv callback for channel map ctl element
2519 * expands the pre-defined channel maps in a form of TLV
2520 */
2521static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2522 unsigned int size, unsigned int __user *tlv)
2523{
2524 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2525 const struct snd_pcm_chmap_elem *map;
2526 unsigned int __user *dst;
2527 int c, count = 0;
2528
2529 if (!info->chmap)
2530 return -EINVAL;
2531 if (size < 8)
2532 return -ENOMEM;
2533 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2534 return -EFAULT;
2535 size -= 8;
2536 dst = tlv + 2;
2537 for (map = info->chmap; map->channels; map++) {
2538 int chs_bytes = map->channels * 4;
2539 if (!valid_chmap_channels(info, map->channels))
2540 continue;
2541 if (size < 8)
2542 return -ENOMEM;
2543 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2544 put_user(chs_bytes, dst + 1))
2545 return -EFAULT;
2546 dst += 2;
2547 size -= 8;
2548 count += 8;
2549 if (size < chs_bytes)
2550 return -ENOMEM;
2551 size -= chs_bytes;
2552 count += chs_bytes;
2553 for (c = 0; c < map->channels; c++) {
2554 if (put_user(map->map[c], dst))
2555 return -EFAULT;
2556 dst++;
2557 }
2558 }
2559 if (put_user(count, tlv + 1))
2560 return -EFAULT;
2561 return 0;
2562}
2563
2564static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2565{
2566 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2567 info->pcm->streams[info->stream].chmap_kctl = NULL;
2568 kfree(info);
2569}
2570
2571/**
2572 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2573 * @pcm: the assigned PCM instance
2574 * @stream: stream direction
2575 * @chmap: channel map elements (for query)
2576 * @max_channels: the max number of channels for the stream
2577 * @private_value: the value passed to each kcontrol's private_value field
2578 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2579 *
2580 * Create channel-mapping control elements assigned to the given PCM stream(s).
2581 * Return: Zero if successful, or a negative error value.
2582 */
2583int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2584 const struct snd_pcm_chmap_elem *chmap,
2585 int max_channels,
2586 unsigned long private_value,
2587 struct snd_pcm_chmap **info_ret)
2588{
2589 struct snd_pcm_chmap *info;
2590 struct snd_kcontrol_new knew = {
2591 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2592 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2593 SNDRV_CTL_ELEM_ACCESS_VOLATILE |
2594 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2595 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2596 .info = pcm_chmap_ctl_info,
2597 .get = pcm_chmap_ctl_get,
2598 .tlv.c = pcm_chmap_ctl_tlv,
2599 };
2600 int err;
2601
2602 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2603 return -EBUSY;
2604 info = kzalloc(sizeof(*info), GFP_KERNEL);
2605 if (!info)
2606 return -ENOMEM;
2607 info->pcm = pcm;
2608 info->stream = stream;
2609 info->chmap = chmap;
2610 info->max_channels = max_channels;
2611 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2612 knew.name = "Playback Channel Map";
2613 else
2614 knew.name = "Capture Channel Map";
2615 knew.device = pcm->device;
2616 knew.count = pcm->streams[stream].substream_count;
2617 knew.private_value = private_value;
2618 info->kctl = snd_ctl_new1(&knew, info);
2619 if (!info->kctl) {
2620 kfree(info);
2621 return -ENOMEM;
2622 }
2623 info->kctl->private_free = pcm_chmap_ctl_private_free;
2624 err = snd_ctl_add(pcm->card, info->kctl);
2625 if (err < 0)
2626 return err;
2627 pcm->streams[stream].chmap_kctl = info->kctl;
2628 if (info_ret)
2629 *info_ret = info;
2630 return 0;
2631}
2632EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);