Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36/*
37 * fill ring buffer with silence
38 * runtime->silence_start: starting pointer to silence area
39 * runtime->silence_filled: size filled with silence
40 * runtime->silence_threshold: threshold from application
41 * runtime->silence_size: maximal size from application
42 *
43 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
44 */
45void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
46{
47 struct snd_pcm_runtime *runtime = substream->runtime;
48 snd_pcm_uframes_t frames, ofs, transfer;
49 int err;
50
51 if (runtime->silence_size < runtime->boundary) {
52 snd_pcm_sframes_t noise_dist, n;
53 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
54 if (runtime->silence_start != appl_ptr) {
55 n = appl_ptr - runtime->silence_start;
56 if (n < 0)
57 n += runtime->boundary;
58 if ((snd_pcm_uframes_t)n < runtime->silence_filled)
59 runtime->silence_filled -= n;
60 else
61 runtime->silence_filled = 0;
62 runtime->silence_start = appl_ptr;
63 }
64 if (runtime->silence_filled >= runtime->buffer_size)
65 return;
66 noise_dist = snd_pcm_playback_hw_avail(runtime) + runtime->silence_filled;
67 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
68 return;
69 frames = runtime->silence_threshold - noise_dist;
70 if (frames > runtime->silence_size)
71 frames = runtime->silence_size;
72 } else {
73 if (new_hw_ptr == ULONG_MAX) { /* initialization */
74 snd_pcm_sframes_t avail = snd_pcm_playback_hw_avail(runtime);
75 if (avail > runtime->buffer_size)
76 avail = runtime->buffer_size;
77 runtime->silence_filled = avail > 0 ? avail : 0;
78 runtime->silence_start = (runtime->status->hw_ptr +
79 runtime->silence_filled) %
80 runtime->boundary;
81 } else {
82 ofs = runtime->status->hw_ptr;
83 frames = new_hw_ptr - ofs;
84 if ((snd_pcm_sframes_t)frames < 0)
85 frames += runtime->boundary;
86 runtime->silence_filled -= frames;
87 if ((snd_pcm_sframes_t)runtime->silence_filled < 0) {
88 runtime->silence_filled = 0;
89 runtime->silence_start = new_hw_ptr;
90 } else {
91 runtime->silence_start = ofs;
92 }
93 }
94 frames = runtime->buffer_size - runtime->silence_filled;
95 }
96 if (snd_BUG_ON(frames > runtime->buffer_size))
97 return;
98 if (frames == 0)
99 return;
100 ofs = runtime->silence_start % runtime->buffer_size;
101 while (frames > 0) {
102 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
103 err = fill_silence_frames(substream, ofs, transfer);
104 snd_BUG_ON(err < 0);
105 runtime->silence_filled += transfer;
106 frames -= transfer;
107 ofs = 0;
108 }
109}
110
111#ifdef CONFIG_SND_DEBUG
112void snd_pcm_debug_name(struct snd_pcm_substream *substream,
113 char *name, size_t len)
114{
115 snprintf(name, len, "pcmC%dD%d%c:%d",
116 substream->pcm->card->number,
117 substream->pcm->device,
118 substream->stream ? 'c' : 'p',
119 substream->number);
120}
121EXPORT_SYMBOL(snd_pcm_debug_name);
122#endif
123
124#define XRUN_DEBUG_BASIC (1<<0)
125#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
126#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
127
128#ifdef CONFIG_SND_PCM_XRUN_DEBUG
129
130#define xrun_debug(substream, mask) \
131 ((substream)->pstr->xrun_debug & (mask))
132#else
133#define xrun_debug(substream, mask) 0
134#endif
135
136#define dump_stack_on_xrun(substream) do { \
137 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
138 dump_stack(); \
139 } while (0)
140
141/* call with stream lock held */
142void __snd_pcm_xrun(struct snd_pcm_substream *substream)
143{
144 struct snd_pcm_runtime *runtime = substream->runtime;
145
146 trace_xrun(substream);
147 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
148 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
149 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
150 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
151 char name[16];
152 snd_pcm_debug_name(substream, name, sizeof(name));
153 pcm_warn(substream->pcm, "XRUN: %s\n", name);
154 dump_stack_on_xrun(substream);
155 }
156}
157
158#ifdef CONFIG_SND_PCM_XRUN_DEBUG
159#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
160 do { \
161 trace_hw_ptr_error(substream, reason); \
162 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
163 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
164 (in_interrupt) ? 'Q' : 'P', ##args); \
165 dump_stack_on_xrun(substream); \
166 } \
167 } while (0)
168
169#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
170
171#define hw_ptr_error(substream, fmt, args...) do { } while (0)
172
173#endif
174
175int snd_pcm_update_state(struct snd_pcm_substream *substream,
176 struct snd_pcm_runtime *runtime)
177{
178 snd_pcm_uframes_t avail;
179
180 avail = snd_pcm_avail(substream);
181 if (avail > runtime->avail_max)
182 runtime->avail_max = avail;
183 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
184 if (avail >= runtime->buffer_size) {
185 snd_pcm_drain_done(substream);
186 return -EPIPE;
187 }
188 } else {
189 if (avail >= runtime->stop_threshold) {
190 __snd_pcm_xrun(substream);
191 return -EPIPE;
192 }
193 }
194 if (runtime->twake) {
195 if (avail >= runtime->twake)
196 wake_up(&runtime->tsleep);
197 } else if (avail >= runtime->control->avail_min)
198 wake_up(&runtime->sleep);
199 return 0;
200}
201
202static void update_audio_tstamp(struct snd_pcm_substream *substream,
203 struct timespec *curr_tstamp,
204 struct timespec *audio_tstamp)
205{
206 struct snd_pcm_runtime *runtime = substream->runtime;
207 u64 audio_frames, audio_nsecs;
208 struct timespec driver_tstamp;
209
210 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
211 return;
212
213 if (!(substream->ops->get_time_info) ||
214 (runtime->audio_tstamp_report.actual_type ==
215 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
216
217 /*
218 * provide audio timestamp derived from pointer position
219 * add delay only if requested
220 */
221
222 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
223
224 if (runtime->audio_tstamp_config.report_delay) {
225 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
226 audio_frames -= runtime->delay;
227 else
228 audio_frames += runtime->delay;
229 }
230 audio_nsecs = div_u64(audio_frames * 1000000000LL,
231 runtime->rate);
232 *audio_tstamp = ns_to_timespec(audio_nsecs);
233 }
234 if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
235 runtime->status->audio_tstamp = *audio_tstamp;
236 runtime->status->tstamp = *curr_tstamp;
237 }
238
239 /*
240 * re-take a driver timestamp to let apps detect if the reference tstamp
241 * read by low-level hardware was provided with a delay
242 */
243 snd_pcm_gettime(substream->runtime, (struct timespec *)&driver_tstamp);
244 runtime->driver_tstamp = driver_tstamp;
245}
246
247static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
248 unsigned int in_interrupt)
249{
250 struct snd_pcm_runtime *runtime = substream->runtime;
251 snd_pcm_uframes_t pos;
252 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
253 snd_pcm_sframes_t hdelta, delta;
254 unsigned long jdelta;
255 unsigned long curr_jiffies;
256 struct timespec curr_tstamp;
257 struct timespec audio_tstamp;
258 int crossed_boundary = 0;
259
260 old_hw_ptr = runtime->status->hw_ptr;
261
262 /*
263 * group pointer, time and jiffies reads to allow for more
264 * accurate correlations/corrections.
265 * The values are stored at the end of this routine after
266 * corrections for hw_ptr position
267 */
268 pos = substream->ops->pointer(substream);
269 curr_jiffies = jiffies;
270 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
271 if ((substream->ops->get_time_info) &&
272 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
273 substream->ops->get_time_info(substream, &curr_tstamp,
274 &audio_tstamp,
275 &runtime->audio_tstamp_config,
276 &runtime->audio_tstamp_report);
277
278 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
279 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
280 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
281 } else
282 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
283 }
284
285 if (pos == SNDRV_PCM_POS_XRUN) {
286 __snd_pcm_xrun(substream);
287 return -EPIPE;
288 }
289 if (pos >= runtime->buffer_size) {
290 if (printk_ratelimit()) {
291 char name[16];
292 snd_pcm_debug_name(substream, name, sizeof(name));
293 pcm_err(substream->pcm,
294 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
295 name, pos, runtime->buffer_size,
296 runtime->period_size);
297 }
298 pos = 0;
299 }
300 pos -= pos % runtime->min_align;
301 trace_hwptr(substream, pos, in_interrupt);
302 hw_base = runtime->hw_ptr_base;
303 new_hw_ptr = hw_base + pos;
304 if (in_interrupt) {
305 /* we know that one period was processed */
306 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
307 delta = runtime->hw_ptr_interrupt + runtime->period_size;
308 if (delta > new_hw_ptr) {
309 /* check for double acknowledged interrupts */
310 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
311 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
312 hw_base += runtime->buffer_size;
313 if (hw_base >= runtime->boundary) {
314 hw_base = 0;
315 crossed_boundary++;
316 }
317 new_hw_ptr = hw_base + pos;
318 goto __delta;
319 }
320 }
321 }
322 /* new_hw_ptr might be lower than old_hw_ptr in case when */
323 /* pointer crosses the end of the ring buffer */
324 if (new_hw_ptr < old_hw_ptr) {
325 hw_base += runtime->buffer_size;
326 if (hw_base >= runtime->boundary) {
327 hw_base = 0;
328 crossed_boundary++;
329 }
330 new_hw_ptr = hw_base + pos;
331 }
332 __delta:
333 delta = new_hw_ptr - old_hw_ptr;
334 if (delta < 0)
335 delta += runtime->boundary;
336
337 if (runtime->no_period_wakeup) {
338 snd_pcm_sframes_t xrun_threshold;
339 /*
340 * Without regular period interrupts, we have to check
341 * the elapsed time to detect xruns.
342 */
343 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
344 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
345 goto no_delta_check;
346 hdelta = jdelta - delta * HZ / runtime->rate;
347 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
348 while (hdelta > xrun_threshold) {
349 delta += runtime->buffer_size;
350 hw_base += runtime->buffer_size;
351 if (hw_base >= runtime->boundary) {
352 hw_base = 0;
353 crossed_boundary++;
354 }
355 new_hw_ptr = hw_base + pos;
356 hdelta -= runtime->hw_ptr_buffer_jiffies;
357 }
358 goto no_delta_check;
359 }
360
361 /* something must be really wrong */
362 if (delta >= runtime->buffer_size + runtime->period_size) {
363 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
364 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
365 substream->stream, (long)pos,
366 (long)new_hw_ptr, (long)old_hw_ptr);
367 return 0;
368 }
369
370 /* Do jiffies check only in xrun_debug mode */
371 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
372 goto no_jiffies_check;
373
374 /* Skip the jiffies check for hardwares with BATCH flag.
375 * Such hardware usually just increases the position at each IRQ,
376 * thus it can't give any strange position.
377 */
378 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
379 goto no_jiffies_check;
380 hdelta = delta;
381 if (hdelta < runtime->delay)
382 goto no_jiffies_check;
383 hdelta -= runtime->delay;
384 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
385 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
386 delta = jdelta /
387 (((runtime->period_size * HZ) / runtime->rate)
388 + HZ/100);
389 /* move new_hw_ptr according jiffies not pos variable */
390 new_hw_ptr = old_hw_ptr;
391 hw_base = delta;
392 /* use loop to avoid checks for delta overflows */
393 /* the delta value is small or zero in most cases */
394 while (delta > 0) {
395 new_hw_ptr += runtime->period_size;
396 if (new_hw_ptr >= runtime->boundary) {
397 new_hw_ptr -= runtime->boundary;
398 crossed_boundary--;
399 }
400 delta--;
401 }
402 /* align hw_base to buffer_size */
403 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
404 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
405 (long)pos, (long)hdelta,
406 (long)runtime->period_size, jdelta,
407 ((hdelta * HZ) / runtime->rate), hw_base,
408 (unsigned long)old_hw_ptr,
409 (unsigned long)new_hw_ptr);
410 /* reset values to proper state */
411 delta = 0;
412 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
413 }
414 no_jiffies_check:
415 if (delta > runtime->period_size + runtime->period_size / 2) {
416 hw_ptr_error(substream, in_interrupt,
417 "Lost interrupts?",
418 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
419 substream->stream, (long)delta,
420 (long)new_hw_ptr,
421 (long)old_hw_ptr);
422 }
423
424 no_delta_check:
425 if (runtime->status->hw_ptr == new_hw_ptr) {
426 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
427 return 0;
428 }
429
430 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
431 runtime->silence_size > 0)
432 snd_pcm_playback_silence(substream, new_hw_ptr);
433
434 if (in_interrupt) {
435 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
436 if (delta < 0)
437 delta += runtime->boundary;
438 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
439 runtime->hw_ptr_interrupt += delta;
440 if (runtime->hw_ptr_interrupt >= runtime->boundary)
441 runtime->hw_ptr_interrupt -= runtime->boundary;
442 }
443 runtime->hw_ptr_base = hw_base;
444 runtime->status->hw_ptr = new_hw_ptr;
445 runtime->hw_ptr_jiffies = curr_jiffies;
446 if (crossed_boundary) {
447 snd_BUG_ON(crossed_boundary != 1);
448 runtime->hw_ptr_wrap += runtime->boundary;
449 }
450
451 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
452
453 return snd_pcm_update_state(substream, runtime);
454}
455
456/* CAUTION: call it with irq disabled */
457int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
458{
459 return snd_pcm_update_hw_ptr0(substream, 0);
460}
461
462/**
463 * snd_pcm_set_ops - set the PCM operators
464 * @pcm: the pcm instance
465 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
466 * @ops: the operator table
467 *
468 * Sets the given PCM operators to the pcm instance.
469 */
470void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
471 const struct snd_pcm_ops *ops)
472{
473 struct snd_pcm_str *stream = &pcm->streams[direction];
474 struct snd_pcm_substream *substream;
475
476 for (substream = stream->substream; substream != NULL; substream = substream->next)
477 substream->ops = ops;
478}
479EXPORT_SYMBOL(snd_pcm_set_ops);
480
481/**
482 * snd_pcm_sync - set the PCM sync id
483 * @substream: the pcm substream
484 *
485 * Sets the PCM sync identifier for the card.
486 */
487void snd_pcm_set_sync(struct snd_pcm_substream *substream)
488{
489 struct snd_pcm_runtime *runtime = substream->runtime;
490
491 runtime->sync.id32[0] = substream->pcm->card->number;
492 runtime->sync.id32[1] = -1;
493 runtime->sync.id32[2] = -1;
494 runtime->sync.id32[3] = -1;
495}
496EXPORT_SYMBOL(snd_pcm_set_sync);
497
498/*
499 * Standard ioctl routine
500 */
501
502static inline unsigned int div32(unsigned int a, unsigned int b,
503 unsigned int *r)
504{
505 if (b == 0) {
506 *r = 0;
507 return UINT_MAX;
508 }
509 *r = a % b;
510 return a / b;
511}
512
513static inline unsigned int div_down(unsigned int a, unsigned int b)
514{
515 if (b == 0)
516 return UINT_MAX;
517 return a / b;
518}
519
520static inline unsigned int div_up(unsigned int a, unsigned int b)
521{
522 unsigned int r;
523 unsigned int q;
524 if (b == 0)
525 return UINT_MAX;
526 q = div32(a, b, &r);
527 if (r)
528 ++q;
529 return q;
530}
531
532static inline unsigned int mul(unsigned int a, unsigned int b)
533{
534 if (a == 0)
535 return 0;
536 if (div_down(UINT_MAX, a) < b)
537 return UINT_MAX;
538 return a * b;
539}
540
541static inline unsigned int muldiv32(unsigned int a, unsigned int b,
542 unsigned int c, unsigned int *r)
543{
544 u_int64_t n = (u_int64_t) a * b;
545 if (c == 0) {
546 *r = 0;
547 return UINT_MAX;
548 }
549 n = div_u64_rem(n, c, r);
550 if (n >= UINT_MAX) {
551 *r = 0;
552 return UINT_MAX;
553 }
554 return n;
555}
556
557/**
558 * snd_interval_refine - refine the interval value of configurator
559 * @i: the interval value to refine
560 * @v: the interval value to refer to
561 *
562 * Refines the interval value with the reference value.
563 * The interval is changed to the range satisfying both intervals.
564 * The interval status (min, max, integer, etc.) are evaluated.
565 *
566 * Return: Positive if the value is changed, zero if it's not changed, or a
567 * negative error code.
568 */
569int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
570{
571 int changed = 0;
572 if (snd_BUG_ON(snd_interval_empty(i)))
573 return -EINVAL;
574 if (i->min < v->min) {
575 i->min = v->min;
576 i->openmin = v->openmin;
577 changed = 1;
578 } else if (i->min == v->min && !i->openmin && v->openmin) {
579 i->openmin = 1;
580 changed = 1;
581 }
582 if (i->max > v->max) {
583 i->max = v->max;
584 i->openmax = v->openmax;
585 changed = 1;
586 } else if (i->max == v->max && !i->openmax && v->openmax) {
587 i->openmax = 1;
588 changed = 1;
589 }
590 if (!i->integer && v->integer) {
591 i->integer = 1;
592 changed = 1;
593 }
594 if (i->integer) {
595 if (i->openmin) {
596 i->min++;
597 i->openmin = 0;
598 }
599 if (i->openmax) {
600 i->max--;
601 i->openmax = 0;
602 }
603 } else if (!i->openmin && !i->openmax && i->min == i->max)
604 i->integer = 1;
605 if (snd_interval_checkempty(i)) {
606 snd_interval_none(i);
607 return -EINVAL;
608 }
609 return changed;
610}
611EXPORT_SYMBOL(snd_interval_refine);
612
613static int snd_interval_refine_first(struct snd_interval *i)
614{
615 const unsigned int last_max = i->max;
616
617 if (snd_BUG_ON(snd_interval_empty(i)))
618 return -EINVAL;
619 if (snd_interval_single(i))
620 return 0;
621 i->max = i->min;
622 if (i->openmin)
623 i->max++;
624 /* only exclude max value if also excluded before refine */
625 i->openmax = (i->openmax && i->max >= last_max);
626 return 1;
627}
628
629static int snd_interval_refine_last(struct snd_interval *i)
630{
631 const unsigned int last_min = i->min;
632
633 if (snd_BUG_ON(snd_interval_empty(i)))
634 return -EINVAL;
635 if (snd_interval_single(i))
636 return 0;
637 i->min = i->max;
638 if (i->openmax)
639 i->min--;
640 /* only exclude min value if also excluded before refine */
641 i->openmin = (i->openmin && i->min <= last_min);
642 return 1;
643}
644
645void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
646{
647 if (a->empty || b->empty) {
648 snd_interval_none(c);
649 return;
650 }
651 c->empty = 0;
652 c->min = mul(a->min, b->min);
653 c->openmin = (a->openmin || b->openmin);
654 c->max = mul(a->max, b->max);
655 c->openmax = (a->openmax || b->openmax);
656 c->integer = (a->integer && b->integer);
657}
658
659/**
660 * snd_interval_div - refine the interval value with division
661 * @a: dividend
662 * @b: divisor
663 * @c: quotient
664 *
665 * c = a / b
666 *
667 * Returns non-zero if the value is changed, zero if not changed.
668 */
669void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
670{
671 unsigned int r;
672 if (a->empty || b->empty) {
673 snd_interval_none(c);
674 return;
675 }
676 c->empty = 0;
677 c->min = div32(a->min, b->max, &r);
678 c->openmin = (r || a->openmin || b->openmax);
679 if (b->min > 0) {
680 c->max = div32(a->max, b->min, &r);
681 if (r) {
682 c->max++;
683 c->openmax = 1;
684 } else
685 c->openmax = (a->openmax || b->openmin);
686 } else {
687 c->max = UINT_MAX;
688 c->openmax = 0;
689 }
690 c->integer = 0;
691}
692
693/**
694 * snd_interval_muldivk - refine the interval value
695 * @a: dividend 1
696 * @b: dividend 2
697 * @k: divisor (as integer)
698 * @c: result
699 *
700 * c = a * b / k
701 *
702 * Returns non-zero if the value is changed, zero if not changed.
703 */
704void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
705 unsigned int k, struct snd_interval *c)
706{
707 unsigned int r;
708 if (a->empty || b->empty) {
709 snd_interval_none(c);
710 return;
711 }
712 c->empty = 0;
713 c->min = muldiv32(a->min, b->min, k, &r);
714 c->openmin = (r || a->openmin || b->openmin);
715 c->max = muldiv32(a->max, b->max, k, &r);
716 if (r) {
717 c->max++;
718 c->openmax = 1;
719 } else
720 c->openmax = (a->openmax || b->openmax);
721 c->integer = 0;
722}
723
724/**
725 * snd_interval_mulkdiv - refine the interval value
726 * @a: dividend 1
727 * @k: dividend 2 (as integer)
728 * @b: divisor
729 * @c: result
730 *
731 * c = a * k / b
732 *
733 * Returns non-zero if the value is changed, zero if not changed.
734 */
735void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
736 const struct snd_interval *b, struct snd_interval *c)
737{
738 unsigned int r;
739 if (a->empty || b->empty) {
740 snd_interval_none(c);
741 return;
742 }
743 c->empty = 0;
744 c->min = muldiv32(a->min, k, b->max, &r);
745 c->openmin = (r || a->openmin || b->openmax);
746 if (b->min > 0) {
747 c->max = muldiv32(a->max, k, b->min, &r);
748 if (r) {
749 c->max++;
750 c->openmax = 1;
751 } else
752 c->openmax = (a->openmax || b->openmin);
753 } else {
754 c->max = UINT_MAX;
755 c->openmax = 0;
756 }
757 c->integer = 0;
758}
759
760/* ---- */
761
762
763/**
764 * snd_interval_ratnum - refine the interval value
765 * @i: interval to refine
766 * @rats_count: number of ratnum_t
767 * @rats: ratnum_t array
768 * @nump: pointer to store the resultant numerator
769 * @denp: pointer to store the resultant denominator
770 *
771 * Return: Positive if the value is changed, zero if it's not changed, or a
772 * negative error code.
773 */
774int snd_interval_ratnum(struct snd_interval *i,
775 unsigned int rats_count, const struct snd_ratnum *rats,
776 unsigned int *nump, unsigned int *denp)
777{
778 unsigned int best_num, best_den;
779 int best_diff;
780 unsigned int k;
781 struct snd_interval t;
782 int err;
783 unsigned int result_num, result_den;
784 int result_diff;
785
786 best_num = best_den = best_diff = 0;
787 for (k = 0; k < rats_count; ++k) {
788 unsigned int num = rats[k].num;
789 unsigned int den;
790 unsigned int q = i->min;
791 int diff;
792 if (q == 0)
793 q = 1;
794 den = div_up(num, q);
795 if (den < rats[k].den_min)
796 continue;
797 if (den > rats[k].den_max)
798 den = rats[k].den_max;
799 else {
800 unsigned int r;
801 r = (den - rats[k].den_min) % rats[k].den_step;
802 if (r != 0)
803 den -= r;
804 }
805 diff = num - q * den;
806 if (diff < 0)
807 diff = -diff;
808 if (best_num == 0 ||
809 diff * best_den < best_diff * den) {
810 best_diff = diff;
811 best_den = den;
812 best_num = num;
813 }
814 }
815 if (best_den == 0) {
816 i->empty = 1;
817 return -EINVAL;
818 }
819 t.min = div_down(best_num, best_den);
820 t.openmin = !!(best_num % best_den);
821
822 result_num = best_num;
823 result_diff = best_diff;
824 result_den = best_den;
825 best_num = best_den = best_diff = 0;
826 for (k = 0; k < rats_count; ++k) {
827 unsigned int num = rats[k].num;
828 unsigned int den;
829 unsigned int q = i->max;
830 int diff;
831 if (q == 0) {
832 i->empty = 1;
833 return -EINVAL;
834 }
835 den = div_down(num, q);
836 if (den > rats[k].den_max)
837 continue;
838 if (den < rats[k].den_min)
839 den = rats[k].den_min;
840 else {
841 unsigned int r;
842 r = (den - rats[k].den_min) % rats[k].den_step;
843 if (r != 0)
844 den += rats[k].den_step - r;
845 }
846 diff = q * den - num;
847 if (diff < 0)
848 diff = -diff;
849 if (best_num == 0 ||
850 diff * best_den < best_diff * den) {
851 best_diff = diff;
852 best_den = den;
853 best_num = num;
854 }
855 }
856 if (best_den == 0) {
857 i->empty = 1;
858 return -EINVAL;
859 }
860 t.max = div_up(best_num, best_den);
861 t.openmax = !!(best_num % best_den);
862 t.integer = 0;
863 err = snd_interval_refine(i, &t);
864 if (err < 0)
865 return err;
866
867 if (snd_interval_single(i)) {
868 if (best_diff * result_den < result_diff * best_den) {
869 result_num = best_num;
870 result_den = best_den;
871 }
872 if (nump)
873 *nump = result_num;
874 if (denp)
875 *denp = result_den;
876 }
877 return err;
878}
879EXPORT_SYMBOL(snd_interval_ratnum);
880
881/**
882 * snd_interval_ratden - refine the interval value
883 * @i: interval to refine
884 * @rats_count: number of struct ratden
885 * @rats: struct ratden array
886 * @nump: pointer to store the resultant numerator
887 * @denp: pointer to store the resultant denominator
888 *
889 * Return: Positive if the value is changed, zero if it's not changed, or a
890 * negative error code.
891 */
892static int snd_interval_ratden(struct snd_interval *i,
893 unsigned int rats_count,
894 const struct snd_ratden *rats,
895 unsigned int *nump, unsigned int *denp)
896{
897 unsigned int best_num, best_diff, best_den;
898 unsigned int k;
899 struct snd_interval t;
900 int err;
901
902 best_num = best_den = best_diff = 0;
903 for (k = 0; k < rats_count; ++k) {
904 unsigned int num;
905 unsigned int den = rats[k].den;
906 unsigned int q = i->min;
907 int diff;
908 num = mul(q, den);
909 if (num > rats[k].num_max)
910 continue;
911 if (num < rats[k].num_min)
912 num = rats[k].num_max;
913 else {
914 unsigned int r;
915 r = (num - rats[k].num_min) % rats[k].num_step;
916 if (r != 0)
917 num += rats[k].num_step - r;
918 }
919 diff = num - q * den;
920 if (best_num == 0 ||
921 diff * best_den < best_diff * den) {
922 best_diff = diff;
923 best_den = den;
924 best_num = num;
925 }
926 }
927 if (best_den == 0) {
928 i->empty = 1;
929 return -EINVAL;
930 }
931 t.min = div_down(best_num, best_den);
932 t.openmin = !!(best_num % best_den);
933
934 best_num = best_den = best_diff = 0;
935 for (k = 0; k < rats_count; ++k) {
936 unsigned int num;
937 unsigned int den = rats[k].den;
938 unsigned int q = i->max;
939 int diff;
940 num = mul(q, den);
941 if (num < rats[k].num_min)
942 continue;
943 if (num > rats[k].num_max)
944 num = rats[k].num_max;
945 else {
946 unsigned int r;
947 r = (num - rats[k].num_min) % rats[k].num_step;
948 if (r != 0)
949 num -= r;
950 }
951 diff = q * den - num;
952 if (best_num == 0 ||
953 diff * best_den < best_diff * den) {
954 best_diff = diff;
955 best_den = den;
956 best_num = num;
957 }
958 }
959 if (best_den == 0) {
960 i->empty = 1;
961 return -EINVAL;
962 }
963 t.max = div_up(best_num, best_den);
964 t.openmax = !!(best_num % best_den);
965 t.integer = 0;
966 err = snd_interval_refine(i, &t);
967 if (err < 0)
968 return err;
969
970 if (snd_interval_single(i)) {
971 if (nump)
972 *nump = best_num;
973 if (denp)
974 *denp = best_den;
975 }
976 return err;
977}
978
979/**
980 * snd_interval_list - refine the interval value from the list
981 * @i: the interval value to refine
982 * @count: the number of elements in the list
983 * @list: the value list
984 * @mask: the bit-mask to evaluate
985 *
986 * Refines the interval value from the list.
987 * When mask is non-zero, only the elements corresponding to bit 1 are
988 * evaluated.
989 *
990 * Return: Positive if the value is changed, zero if it's not changed, or a
991 * negative error code.
992 */
993int snd_interval_list(struct snd_interval *i, unsigned int count,
994 const unsigned int *list, unsigned int mask)
995{
996 unsigned int k;
997 struct snd_interval list_range;
998
999 if (!count) {
1000 i->empty = 1;
1001 return -EINVAL;
1002 }
1003 snd_interval_any(&list_range);
1004 list_range.min = UINT_MAX;
1005 list_range.max = 0;
1006 for (k = 0; k < count; k++) {
1007 if (mask && !(mask & (1 << k)))
1008 continue;
1009 if (!snd_interval_test(i, list[k]))
1010 continue;
1011 list_range.min = min(list_range.min, list[k]);
1012 list_range.max = max(list_range.max, list[k]);
1013 }
1014 return snd_interval_refine(i, &list_range);
1015}
1016EXPORT_SYMBOL(snd_interval_list);
1017
1018/**
1019 * snd_interval_ranges - refine the interval value from the list of ranges
1020 * @i: the interval value to refine
1021 * @count: the number of elements in the list of ranges
1022 * @ranges: the ranges list
1023 * @mask: the bit-mask to evaluate
1024 *
1025 * Refines the interval value from the list of ranges.
1026 * When mask is non-zero, only the elements corresponding to bit 1 are
1027 * evaluated.
1028 *
1029 * Return: Positive if the value is changed, zero if it's not changed, or a
1030 * negative error code.
1031 */
1032int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1033 const struct snd_interval *ranges, unsigned int mask)
1034{
1035 unsigned int k;
1036 struct snd_interval range_union;
1037 struct snd_interval range;
1038
1039 if (!count) {
1040 snd_interval_none(i);
1041 return -EINVAL;
1042 }
1043 snd_interval_any(&range_union);
1044 range_union.min = UINT_MAX;
1045 range_union.max = 0;
1046 for (k = 0; k < count; k++) {
1047 if (mask && !(mask & (1 << k)))
1048 continue;
1049 snd_interval_copy(&range, &ranges[k]);
1050 if (snd_interval_refine(&range, i) < 0)
1051 continue;
1052 if (snd_interval_empty(&range))
1053 continue;
1054
1055 if (range.min < range_union.min) {
1056 range_union.min = range.min;
1057 range_union.openmin = 1;
1058 }
1059 if (range.min == range_union.min && !range.openmin)
1060 range_union.openmin = 0;
1061 if (range.max > range_union.max) {
1062 range_union.max = range.max;
1063 range_union.openmax = 1;
1064 }
1065 if (range.max == range_union.max && !range.openmax)
1066 range_union.openmax = 0;
1067 }
1068 return snd_interval_refine(i, &range_union);
1069}
1070EXPORT_SYMBOL(snd_interval_ranges);
1071
1072static int snd_interval_step(struct snd_interval *i, unsigned int step)
1073{
1074 unsigned int n;
1075 int changed = 0;
1076 n = i->min % step;
1077 if (n != 0 || i->openmin) {
1078 i->min += step - n;
1079 i->openmin = 0;
1080 changed = 1;
1081 }
1082 n = i->max % step;
1083 if (n != 0 || i->openmax) {
1084 i->max -= n;
1085 i->openmax = 0;
1086 changed = 1;
1087 }
1088 if (snd_interval_checkempty(i)) {
1089 i->empty = 1;
1090 return -EINVAL;
1091 }
1092 return changed;
1093}
1094
1095/* Info constraints helpers */
1096
1097/**
1098 * snd_pcm_hw_rule_add - add the hw-constraint rule
1099 * @runtime: the pcm runtime instance
1100 * @cond: condition bits
1101 * @var: the variable to evaluate
1102 * @func: the evaluation function
1103 * @private: the private data pointer passed to function
1104 * @dep: the dependent variables
1105 *
1106 * Return: Zero if successful, or a negative error code on failure.
1107 */
1108int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1109 int var,
1110 snd_pcm_hw_rule_func_t func, void *private,
1111 int dep, ...)
1112{
1113 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1114 struct snd_pcm_hw_rule *c;
1115 unsigned int k;
1116 va_list args;
1117 va_start(args, dep);
1118 if (constrs->rules_num >= constrs->rules_all) {
1119 struct snd_pcm_hw_rule *new;
1120 unsigned int new_rules = constrs->rules_all + 16;
1121 new = krealloc(constrs->rules, new_rules * sizeof(*c),
1122 GFP_KERNEL);
1123 if (!new) {
1124 va_end(args);
1125 return -ENOMEM;
1126 }
1127 constrs->rules = new;
1128 constrs->rules_all = new_rules;
1129 }
1130 c = &constrs->rules[constrs->rules_num];
1131 c->cond = cond;
1132 c->func = func;
1133 c->var = var;
1134 c->private = private;
1135 k = 0;
1136 while (1) {
1137 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1138 va_end(args);
1139 return -EINVAL;
1140 }
1141 c->deps[k++] = dep;
1142 if (dep < 0)
1143 break;
1144 dep = va_arg(args, int);
1145 }
1146 constrs->rules_num++;
1147 va_end(args);
1148 return 0;
1149}
1150EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1151
1152/**
1153 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1154 * @runtime: PCM runtime instance
1155 * @var: hw_params variable to apply the mask
1156 * @mask: the bitmap mask
1157 *
1158 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1159 *
1160 * Return: Zero if successful, or a negative error code on failure.
1161 */
1162int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1163 u_int32_t mask)
1164{
1165 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1166 struct snd_mask *maskp = constrs_mask(constrs, var);
1167 *maskp->bits &= mask;
1168 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1169 if (*maskp->bits == 0)
1170 return -EINVAL;
1171 return 0;
1172}
1173
1174/**
1175 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1176 * @runtime: PCM runtime instance
1177 * @var: hw_params variable to apply the mask
1178 * @mask: the 64bit bitmap mask
1179 *
1180 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1181 *
1182 * Return: Zero if successful, or a negative error code on failure.
1183 */
1184int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1185 u_int64_t mask)
1186{
1187 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1188 struct snd_mask *maskp = constrs_mask(constrs, var);
1189 maskp->bits[0] &= (u_int32_t)mask;
1190 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1191 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1192 if (! maskp->bits[0] && ! maskp->bits[1])
1193 return -EINVAL;
1194 return 0;
1195}
1196EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1197
1198/**
1199 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1200 * @runtime: PCM runtime instance
1201 * @var: hw_params variable to apply the integer constraint
1202 *
1203 * Apply the constraint of integer to an interval parameter.
1204 *
1205 * Return: Positive if the value is changed, zero if it's not changed, or a
1206 * negative error code.
1207 */
1208int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1209{
1210 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1211 return snd_interval_setinteger(constrs_interval(constrs, var));
1212}
1213EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1214
1215/**
1216 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1217 * @runtime: PCM runtime instance
1218 * @var: hw_params variable to apply the range
1219 * @min: the minimal value
1220 * @max: the maximal value
1221 *
1222 * Apply the min/max range constraint to an interval parameter.
1223 *
1224 * Return: Positive if the value is changed, zero if it's not changed, or a
1225 * negative error code.
1226 */
1227int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1228 unsigned int min, unsigned int max)
1229{
1230 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1231 struct snd_interval t;
1232 t.min = min;
1233 t.max = max;
1234 t.openmin = t.openmax = 0;
1235 t.integer = 0;
1236 return snd_interval_refine(constrs_interval(constrs, var), &t);
1237}
1238EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1239
1240static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1241 struct snd_pcm_hw_rule *rule)
1242{
1243 struct snd_pcm_hw_constraint_list *list = rule->private;
1244 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1245}
1246
1247
1248/**
1249 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1250 * @runtime: PCM runtime instance
1251 * @cond: condition bits
1252 * @var: hw_params variable to apply the list constraint
1253 * @l: list
1254 *
1255 * Apply the list of constraints to an interval parameter.
1256 *
1257 * Return: Zero if successful, or a negative error code on failure.
1258 */
1259int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1260 unsigned int cond,
1261 snd_pcm_hw_param_t var,
1262 const struct snd_pcm_hw_constraint_list *l)
1263{
1264 return snd_pcm_hw_rule_add(runtime, cond, var,
1265 snd_pcm_hw_rule_list, (void *)l,
1266 var, -1);
1267}
1268EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1269
1270static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1271 struct snd_pcm_hw_rule *rule)
1272{
1273 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1274 return snd_interval_ranges(hw_param_interval(params, rule->var),
1275 r->count, r->ranges, r->mask);
1276}
1277
1278
1279/**
1280 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1281 * @runtime: PCM runtime instance
1282 * @cond: condition bits
1283 * @var: hw_params variable to apply the list of range constraints
1284 * @r: ranges
1285 *
1286 * Apply the list of range constraints to an interval parameter.
1287 *
1288 * Return: Zero if successful, or a negative error code on failure.
1289 */
1290int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1291 unsigned int cond,
1292 snd_pcm_hw_param_t var,
1293 const struct snd_pcm_hw_constraint_ranges *r)
1294{
1295 return snd_pcm_hw_rule_add(runtime, cond, var,
1296 snd_pcm_hw_rule_ranges, (void *)r,
1297 var, -1);
1298}
1299EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1300
1301static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1302 struct snd_pcm_hw_rule *rule)
1303{
1304 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1305 unsigned int num = 0, den = 0;
1306 int err;
1307 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1308 r->nrats, r->rats, &num, &den);
1309 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1310 params->rate_num = num;
1311 params->rate_den = den;
1312 }
1313 return err;
1314}
1315
1316/**
1317 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1318 * @runtime: PCM runtime instance
1319 * @cond: condition bits
1320 * @var: hw_params variable to apply the ratnums constraint
1321 * @r: struct snd_ratnums constriants
1322 *
1323 * Return: Zero if successful, or a negative error code on failure.
1324 */
1325int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1326 unsigned int cond,
1327 snd_pcm_hw_param_t var,
1328 const struct snd_pcm_hw_constraint_ratnums *r)
1329{
1330 return snd_pcm_hw_rule_add(runtime, cond, var,
1331 snd_pcm_hw_rule_ratnums, (void *)r,
1332 var, -1);
1333}
1334EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1335
1336static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1337 struct snd_pcm_hw_rule *rule)
1338{
1339 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1340 unsigned int num = 0, den = 0;
1341 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1342 r->nrats, r->rats, &num, &den);
1343 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1344 params->rate_num = num;
1345 params->rate_den = den;
1346 }
1347 return err;
1348}
1349
1350/**
1351 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1352 * @runtime: PCM runtime instance
1353 * @cond: condition bits
1354 * @var: hw_params variable to apply the ratdens constraint
1355 * @r: struct snd_ratdens constriants
1356 *
1357 * Return: Zero if successful, or a negative error code on failure.
1358 */
1359int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1360 unsigned int cond,
1361 snd_pcm_hw_param_t var,
1362 const struct snd_pcm_hw_constraint_ratdens *r)
1363{
1364 return snd_pcm_hw_rule_add(runtime, cond, var,
1365 snd_pcm_hw_rule_ratdens, (void *)r,
1366 var, -1);
1367}
1368EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1369
1370static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1371 struct snd_pcm_hw_rule *rule)
1372{
1373 unsigned int l = (unsigned long) rule->private;
1374 int width = l & 0xffff;
1375 unsigned int msbits = l >> 16;
1376 const struct snd_interval *i =
1377 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1378
1379 if (!snd_interval_single(i))
1380 return 0;
1381
1382 if ((snd_interval_value(i) == width) ||
1383 (width == 0 && snd_interval_value(i) > msbits))
1384 params->msbits = min_not_zero(params->msbits, msbits);
1385
1386 return 0;
1387}
1388
1389/**
1390 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1391 * @runtime: PCM runtime instance
1392 * @cond: condition bits
1393 * @width: sample bits width
1394 * @msbits: msbits width
1395 *
1396 * This constraint will set the number of most significant bits (msbits) if a
1397 * sample format with the specified width has been select. If width is set to 0
1398 * the msbits will be set for any sample format with a width larger than the
1399 * specified msbits.
1400 *
1401 * Return: Zero if successful, or a negative error code on failure.
1402 */
1403int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1404 unsigned int cond,
1405 unsigned int width,
1406 unsigned int msbits)
1407{
1408 unsigned long l = (msbits << 16) | width;
1409 return snd_pcm_hw_rule_add(runtime, cond, -1,
1410 snd_pcm_hw_rule_msbits,
1411 (void*) l,
1412 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1413}
1414EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1415
1416static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1417 struct snd_pcm_hw_rule *rule)
1418{
1419 unsigned long step = (unsigned long) rule->private;
1420 return snd_interval_step(hw_param_interval(params, rule->var), step);
1421}
1422
1423/**
1424 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1425 * @runtime: PCM runtime instance
1426 * @cond: condition bits
1427 * @var: hw_params variable to apply the step constraint
1428 * @step: step size
1429 *
1430 * Return: Zero if successful, or a negative error code on failure.
1431 */
1432int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1433 unsigned int cond,
1434 snd_pcm_hw_param_t var,
1435 unsigned long step)
1436{
1437 return snd_pcm_hw_rule_add(runtime, cond, var,
1438 snd_pcm_hw_rule_step, (void *) step,
1439 var, -1);
1440}
1441EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1442
1443static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1444{
1445 static unsigned int pow2_sizes[] = {
1446 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1447 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1448 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1449 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1450 };
1451 return snd_interval_list(hw_param_interval(params, rule->var),
1452 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1453}
1454
1455/**
1456 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1457 * @runtime: PCM runtime instance
1458 * @cond: condition bits
1459 * @var: hw_params variable to apply the power-of-2 constraint
1460 *
1461 * Return: Zero if successful, or a negative error code on failure.
1462 */
1463int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1464 unsigned int cond,
1465 snd_pcm_hw_param_t var)
1466{
1467 return snd_pcm_hw_rule_add(runtime, cond, var,
1468 snd_pcm_hw_rule_pow2, NULL,
1469 var, -1);
1470}
1471EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1472
1473static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1474 struct snd_pcm_hw_rule *rule)
1475{
1476 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1477 struct snd_interval *rate;
1478
1479 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1480 return snd_interval_list(rate, 1, &base_rate, 0);
1481}
1482
1483/**
1484 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1485 * @runtime: PCM runtime instance
1486 * @base_rate: the rate at which the hardware does not resample
1487 *
1488 * Return: Zero if successful, or a negative error code on failure.
1489 */
1490int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1491 unsigned int base_rate)
1492{
1493 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1494 SNDRV_PCM_HW_PARAM_RATE,
1495 snd_pcm_hw_rule_noresample_func,
1496 (void *)(uintptr_t)base_rate,
1497 SNDRV_PCM_HW_PARAM_RATE, -1);
1498}
1499EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1500
1501static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1502 snd_pcm_hw_param_t var)
1503{
1504 if (hw_is_mask(var)) {
1505 snd_mask_any(hw_param_mask(params, var));
1506 params->cmask |= 1 << var;
1507 params->rmask |= 1 << var;
1508 return;
1509 }
1510 if (hw_is_interval(var)) {
1511 snd_interval_any(hw_param_interval(params, var));
1512 params->cmask |= 1 << var;
1513 params->rmask |= 1 << var;
1514 return;
1515 }
1516 snd_BUG();
1517}
1518
1519void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1520{
1521 unsigned int k;
1522 memset(params, 0, sizeof(*params));
1523 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1524 _snd_pcm_hw_param_any(params, k);
1525 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1526 _snd_pcm_hw_param_any(params, k);
1527 params->info = ~0U;
1528}
1529EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1530
1531/**
1532 * snd_pcm_hw_param_value - return @params field @var value
1533 * @params: the hw_params instance
1534 * @var: parameter to retrieve
1535 * @dir: pointer to the direction (-1,0,1) or %NULL
1536 *
1537 * Return: The value for field @var if it's fixed in configuration space
1538 * defined by @params. -%EINVAL otherwise.
1539 */
1540int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1541 snd_pcm_hw_param_t var, int *dir)
1542{
1543 if (hw_is_mask(var)) {
1544 const struct snd_mask *mask = hw_param_mask_c(params, var);
1545 if (!snd_mask_single(mask))
1546 return -EINVAL;
1547 if (dir)
1548 *dir = 0;
1549 return snd_mask_value(mask);
1550 }
1551 if (hw_is_interval(var)) {
1552 const struct snd_interval *i = hw_param_interval_c(params, var);
1553 if (!snd_interval_single(i))
1554 return -EINVAL;
1555 if (dir)
1556 *dir = i->openmin;
1557 return snd_interval_value(i);
1558 }
1559 return -EINVAL;
1560}
1561EXPORT_SYMBOL(snd_pcm_hw_param_value);
1562
1563void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1564 snd_pcm_hw_param_t var)
1565{
1566 if (hw_is_mask(var)) {
1567 snd_mask_none(hw_param_mask(params, var));
1568 params->cmask |= 1 << var;
1569 params->rmask |= 1 << var;
1570 } else if (hw_is_interval(var)) {
1571 snd_interval_none(hw_param_interval(params, var));
1572 params->cmask |= 1 << var;
1573 params->rmask |= 1 << var;
1574 } else {
1575 snd_BUG();
1576 }
1577}
1578EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1579
1580static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1581 snd_pcm_hw_param_t var)
1582{
1583 int changed;
1584 if (hw_is_mask(var))
1585 changed = snd_mask_refine_first(hw_param_mask(params, var));
1586 else if (hw_is_interval(var))
1587 changed = snd_interval_refine_first(hw_param_interval(params, var));
1588 else
1589 return -EINVAL;
1590 if (changed > 0) {
1591 params->cmask |= 1 << var;
1592 params->rmask |= 1 << var;
1593 }
1594 return changed;
1595}
1596
1597
1598/**
1599 * snd_pcm_hw_param_first - refine config space and return minimum value
1600 * @pcm: PCM instance
1601 * @params: the hw_params instance
1602 * @var: parameter to retrieve
1603 * @dir: pointer to the direction (-1,0,1) or %NULL
1604 *
1605 * Inside configuration space defined by @params remove from @var all
1606 * values > minimum. Reduce configuration space accordingly.
1607 *
1608 * Return: The minimum, or a negative error code on failure.
1609 */
1610int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1611 struct snd_pcm_hw_params *params,
1612 snd_pcm_hw_param_t var, int *dir)
1613{
1614 int changed = _snd_pcm_hw_param_first(params, var);
1615 if (changed < 0)
1616 return changed;
1617 if (params->rmask) {
1618 int err = snd_pcm_hw_refine(pcm, params);
1619 if (err < 0)
1620 return err;
1621 }
1622 return snd_pcm_hw_param_value(params, var, dir);
1623}
1624EXPORT_SYMBOL(snd_pcm_hw_param_first);
1625
1626static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1627 snd_pcm_hw_param_t var)
1628{
1629 int changed;
1630 if (hw_is_mask(var))
1631 changed = snd_mask_refine_last(hw_param_mask(params, var));
1632 else if (hw_is_interval(var))
1633 changed = snd_interval_refine_last(hw_param_interval(params, var));
1634 else
1635 return -EINVAL;
1636 if (changed > 0) {
1637 params->cmask |= 1 << var;
1638 params->rmask |= 1 << var;
1639 }
1640 return changed;
1641}
1642
1643
1644/**
1645 * snd_pcm_hw_param_last - refine config space and return maximum value
1646 * @pcm: PCM instance
1647 * @params: the hw_params instance
1648 * @var: parameter to retrieve
1649 * @dir: pointer to the direction (-1,0,1) or %NULL
1650 *
1651 * Inside configuration space defined by @params remove from @var all
1652 * values < maximum. Reduce configuration space accordingly.
1653 *
1654 * Return: The maximum, or a negative error code on failure.
1655 */
1656int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1657 struct snd_pcm_hw_params *params,
1658 snd_pcm_hw_param_t var, int *dir)
1659{
1660 int changed = _snd_pcm_hw_param_last(params, var);
1661 if (changed < 0)
1662 return changed;
1663 if (params->rmask) {
1664 int err = snd_pcm_hw_refine(pcm, params);
1665 if (err < 0)
1666 return err;
1667 }
1668 return snd_pcm_hw_param_value(params, var, dir);
1669}
1670EXPORT_SYMBOL(snd_pcm_hw_param_last);
1671
1672static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1673 void *arg)
1674{
1675 struct snd_pcm_runtime *runtime = substream->runtime;
1676 unsigned long flags;
1677 snd_pcm_stream_lock_irqsave(substream, flags);
1678 if (snd_pcm_running(substream) &&
1679 snd_pcm_update_hw_ptr(substream) >= 0)
1680 runtime->status->hw_ptr %= runtime->buffer_size;
1681 else {
1682 runtime->status->hw_ptr = 0;
1683 runtime->hw_ptr_wrap = 0;
1684 }
1685 snd_pcm_stream_unlock_irqrestore(substream, flags);
1686 return 0;
1687}
1688
1689static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1690 void *arg)
1691{
1692 struct snd_pcm_channel_info *info = arg;
1693 struct snd_pcm_runtime *runtime = substream->runtime;
1694 int width;
1695 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1696 info->offset = -1;
1697 return 0;
1698 }
1699 width = snd_pcm_format_physical_width(runtime->format);
1700 if (width < 0)
1701 return width;
1702 info->offset = 0;
1703 switch (runtime->access) {
1704 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1705 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1706 info->first = info->channel * width;
1707 info->step = runtime->channels * width;
1708 break;
1709 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1710 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1711 {
1712 size_t size = runtime->dma_bytes / runtime->channels;
1713 info->first = info->channel * size * 8;
1714 info->step = width;
1715 break;
1716 }
1717 default:
1718 snd_BUG();
1719 break;
1720 }
1721 return 0;
1722}
1723
1724static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1725 void *arg)
1726{
1727 struct snd_pcm_hw_params *params = arg;
1728 snd_pcm_format_t format;
1729 int channels;
1730 ssize_t frame_size;
1731
1732 params->fifo_size = substream->runtime->hw.fifo_size;
1733 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1734 format = params_format(params);
1735 channels = params_channels(params);
1736 frame_size = snd_pcm_format_size(format, channels);
1737 if (frame_size > 0)
1738 params->fifo_size /= (unsigned)frame_size;
1739 }
1740 return 0;
1741}
1742
1743/**
1744 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1745 * @substream: the pcm substream instance
1746 * @cmd: ioctl command
1747 * @arg: ioctl argument
1748 *
1749 * Processes the generic ioctl commands for PCM.
1750 * Can be passed as the ioctl callback for PCM ops.
1751 *
1752 * Return: Zero if successful, or a negative error code on failure.
1753 */
1754int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1755 unsigned int cmd, void *arg)
1756{
1757 switch (cmd) {
1758 case SNDRV_PCM_IOCTL1_RESET:
1759 return snd_pcm_lib_ioctl_reset(substream, arg);
1760 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1761 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1762 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1763 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1764 }
1765 return -ENXIO;
1766}
1767EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1768
1769/**
1770 * snd_pcm_period_elapsed - update the pcm status for the next period
1771 * @substream: the pcm substream instance
1772 *
1773 * This function is called from the interrupt handler when the
1774 * PCM has processed the period size. It will update the current
1775 * pointer, wake up sleepers, etc.
1776 *
1777 * Even if more than one periods have elapsed since the last call, you
1778 * have to call this only once.
1779 */
1780void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1781{
1782 struct snd_pcm_runtime *runtime;
1783 unsigned long flags;
1784
1785 if (snd_BUG_ON(!substream))
1786 return;
1787
1788 snd_pcm_stream_lock_irqsave(substream, flags);
1789 if (PCM_RUNTIME_CHECK(substream))
1790 goto _unlock;
1791 runtime = substream->runtime;
1792
1793 if (!snd_pcm_running(substream) ||
1794 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1795 goto _end;
1796
1797#ifdef CONFIG_SND_PCM_TIMER
1798 if (substream->timer_running)
1799 snd_timer_interrupt(substream->timer, 1);
1800#endif
1801 _end:
1802 kill_fasync(&runtime->fasync, SIGIO, POLL_IN);
1803 _unlock:
1804 snd_pcm_stream_unlock_irqrestore(substream, flags);
1805}
1806EXPORT_SYMBOL(snd_pcm_period_elapsed);
1807
1808/*
1809 * Wait until avail_min data becomes available
1810 * Returns a negative error code if any error occurs during operation.
1811 * The available space is stored on availp. When err = 0 and avail = 0
1812 * on the capture stream, it indicates the stream is in DRAINING state.
1813 */
1814static int wait_for_avail(struct snd_pcm_substream *substream,
1815 snd_pcm_uframes_t *availp)
1816{
1817 struct snd_pcm_runtime *runtime = substream->runtime;
1818 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1819 wait_queue_entry_t wait;
1820 int err = 0;
1821 snd_pcm_uframes_t avail = 0;
1822 long wait_time, tout;
1823
1824 init_waitqueue_entry(&wait, current);
1825 set_current_state(TASK_INTERRUPTIBLE);
1826 add_wait_queue(&runtime->tsleep, &wait);
1827
1828 if (runtime->no_period_wakeup)
1829 wait_time = MAX_SCHEDULE_TIMEOUT;
1830 else {
1831 /* use wait time from substream if available */
1832 if (substream->wait_time) {
1833 wait_time = substream->wait_time;
1834 } else {
1835 wait_time = 10;
1836
1837 if (runtime->rate) {
1838 long t = runtime->period_size * 2 /
1839 runtime->rate;
1840 wait_time = max(t, wait_time);
1841 }
1842 wait_time = msecs_to_jiffies(wait_time * 1000);
1843 }
1844 }
1845
1846 for (;;) {
1847 if (signal_pending(current)) {
1848 err = -ERESTARTSYS;
1849 break;
1850 }
1851
1852 /*
1853 * We need to check if space became available already
1854 * (and thus the wakeup happened already) first to close
1855 * the race of space already having become available.
1856 * This check must happen after been added to the waitqueue
1857 * and having current state be INTERRUPTIBLE.
1858 */
1859 avail = snd_pcm_avail(substream);
1860 if (avail >= runtime->twake)
1861 break;
1862 snd_pcm_stream_unlock_irq(substream);
1863
1864 tout = schedule_timeout(wait_time);
1865
1866 snd_pcm_stream_lock_irq(substream);
1867 set_current_state(TASK_INTERRUPTIBLE);
1868 switch (runtime->status->state) {
1869 case SNDRV_PCM_STATE_SUSPENDED:
1870 err = -ESTRPIPE;
1871 goto _endloop;
1872 case SNDRV_PCM_STATE_XRUN:
1873 err = -EPIPE;
1874 goto _endloop;
1875 case SNDRV_PCM_STATE_DRAINING:
1876 if (is_playback)
1877 err = -EPIPE;
1878 else
1879 avail = 0; /* indicate draining */
1880 goto _endloop;
1881 case SNDRV_PCM_STATE_OPEN:
1882 case SNDRV_PCM_STATE_SETUP:
1883 case SNDRV_PCM_STATE_DISCONNECTED:
1884 err = -EBADFD;
1885 goto _endloop;
1886 case SNDRV_PCM_STATE_PAUSED:
1887 continue;
1888 }
1889 if (!tout) {
1890 pcm_dbg(substream->pcm,
1891 "%s write error (DMA or IRQ trouble?)\n",
1892 is_playback ? "playback" : "capture");
1893 err = -EIO;
1894 break;
1895 }
1896 }
1897 _endloop:
1898 set_current_state(TASK_RUNNING);
1899 remove_wait_queue(&runtime->tsleep, &wait);
1900 *availp = avail;
1901 return err;
1902}
1903
1904typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
1905 int channel, unsigned long hwoff,
1906 void *buf, unsigned long bytes);
1907
1908typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
1909 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f);
1910
1911/* calculate the target DMA-buffer position to be written/read */
1912static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
1913 int channel, unsigned long hwoff)
1914{
1915 return runtime->dma_area + hwoff +
1916 channel * (runtime->dma_bytes / runtime->channels);
1917}
1918
1919/* default copy_user ops for write; used for both interleaved and non- modes */
1920static int default_write_copy(struct snd_pcm_substream *substream,
1921 int channel, unsigned long hwoff,
1922 void *buf, unsigned long bytes)
1923{
1924 if (copy_from_user(get_dma_ptr(substream->runtime, channel, hwoff),
1925 (void __user *)buf, bytes))
1926 return -EFAULT;
1927 return 0;
1928}
1929
1930/* default copy_kernel ops for write */
1931static int default_write_copy_kernel(struct snd_pcm_substream *substream,
1932 int channel, unsigned long hwoff,
1933 void *buf, unsigned long bytes)
1934{
1935 memcpy(get_dma_ptr(substream->runtime, channel, hwoff), buf, bytes);
1936 return 0;
1937}
1938
1939/* fill silence instead of copy data; called as a transfer helper
1940 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
1941 * a NULL buffer is passed
1942 */
1943static int fill_silence(struct snd_pcm_substream *substream, int channel,
1944 unsigned long hwoff, void *buf, unsigned long bytes)
1945{
1946 struct snd_pcm_runtime *runtime = substream->runtime;
1947
1948 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
1949 return 0;
1950 if (substream->ops->fill_silence)
1951 return substream->ops->fill_silence(substream, channel,
1952 hwoff, bytes);
1953
1954 snd_pcm_format_set_silence(runtime->format,
1955 get_dma_ptr(runtime, channel, hwoff),
1956 bytes_to_samples(runtime, bytes));
1957 return 0;
1958}
1959
1960/* default copy_user ops for read; used for both interleaved and non- modes */
1961static int default_read_copy(struct snd_pcm_substream *substream,
1962 int channel, unsigned long hwoff,
1963 void *buf, unsigned long bytes)
1964{
1965 if (copy_to_user((void __user *)buf,
1966 get_dma_ptr(substream->runtime, channel, hwoff),
1967 bytes))
1968 return -EFAULT;
1969 return 0;
1970}
1971
1972/* default copy_kernel ops for read */
1973static int default_read_copy_kernel(struct snd_pcm_substream *substream,
1974 int channel, unsigned long hwoff,
1975 void *buf, unsigned long bytes)
1976{
1977 memcpy(buf, get_dma_ptr(substream->runtime, channel, hwoff), bytes);
1978 return 0;
1979}
1980
1981/* call transfer function with the converted pointers and sizes;
1982 * for interleaved mode, it's one shot for all samples
1983 */
1984static int interleaved_copy(struct snd_pcm_substream *substream,
1985 snd_pcm_uframes_t hwoff, void *data,
1986 snd_pcm_uframes_t off,
1987 snd_pcm_uframes_t frames,
1988 pcm_transfer_f transfer)
1989{
1990 struct snd_pcm_runtime *runtime = substream->runtime;
1991
1992 /* convert to bytes */
1993 hwoff = frames_to_bytes(runtime, hwoff);
1994 off = frames_to_bytes(runtime, off);
1995 frames = frames_to_bytes(runtime, frames);
1996 return transfer(substream, 0, hwoff, data + off, frames);
1997}
1998
1999/* call transfer function with the converted pointers and sizes for each
2000 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2001 */
2002static int noninterleaved_copy(struct snd_pcm_substream *substream,
2003 snd_pcm_uframes_t hwoff, void *data,
2004 snd_pcm_uframes_t off,
2005 snd_pcm_uframes_t frames,
2006 pcm_transfer_f transfer)
2007{
2008 struct snd_pcm_runtime *runtime = substream->runtime;
2009 int channels = runtime->channels;
2010 void **bufs = data;
2011 int c, err;
2012
2013 /* convert to bytes; note that it's not frames_to_bytes() here.
2014 * in non-interleaved mode, we copy for each channel, thus
2015 * each copy is n_samples bytes x channels = whole frames.
2016 */
2017 off = samples_to_bytes(runtime, off);
2018 frames = samples_to_bytes(runtime, frames);
2019 hwoff = samples_to_bytes(runtime, hwoff);
2020 for (c = 0; c < channels; ++c, ++bufs) {
2021 if (!data || !*bufs)
2022 err = fill_silence(substream, c, hwoff, NULL, frames);
2023 else
2024 err = transfer(substream, c, hwoff, *bufs + off,
2025 frames);
2026 if (err < 0)
2027 return err;
2028 }
2029 return 0;
2030}
2031
2032/* fill silence on the given buffer position;
2033 * called from snd_pcm_playback_silence()
2034 */
2035static int fill_silence_frames(struct snd_pcm_substream *substream,
2036 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2037{
2038 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2039 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2040 return interleaved_copy(substream, off, NULL, 0, frames,
2041 fill_silence);
2042 else
2043 return noninterleaved_copy(substream, off, NULL, 0, frames,
2044 fill_silence);
2045}
2046
2047/* sanity-check for read/write methods */
2048static int pcm_sanity_check(struct snd_pcm_substream *substream)
2049{
2050 struct snd_pcm_runtime *runtime;
2051 if (PCM_RUNTIME_CHECK(substream))
2052 return -ENXIO;
2053 runtime = substream->runtime;
2054 if (snd_BUG_ON(!substream->ops->copy_user && !runtime->dma_area))
2055 return -EINVAL;
2056 if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
2057 return -EBADFD;
2058 return 0;
2059}
2060
2061static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2062{
2063 switch (runtime->status->state) {
2064 case SNDRV_PCM_STATE_PREPARED:
2065 case SNDRV_PCM_STATE_RUNNING:
2066 case SNDRV_PCM_STATE_PAUSED:
2067 return 0;
2068 case SNDRV_PCM_STATE_XRUN:
2069 return -EPIPE;
2070 case SNDRV_PCM_STATE_SUSPENDED:
2071 return -ESTRPIPE;
2072 default:
2073 return -EBADFD;
2074 }
2075}
2076
2077/* update to the given appl_ptr and call ack callback if needed;
2078 * when an error is returned, take back to the original value
2079 */
2080int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2081 snd_pcm_uframes_t appl_ptr)
2082{
2083 struct snd_pcm_runtime *runtime = substream->runtime;
2084 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2085 int ret;
2086
2087 if (old_appl_ptr == appl_ptr)
2088 return 0;
2089
2090 runtime->control->appl_ptr = appl_ptr;
2091 if (substream->ops->ack) {
2092 ret = substream->ops->ack(substream);
2093 if (ret < 0) {
2094 runtime->control->appl_ptr = old_appl_ptr;
2095 return ret;
2096 }
2097 }
2098
2099 trace_applptr(substream, old_appl_ptr, appl_ptr);
2100
2101 return 0;
2102}
2103
2104/* the common loop for read/write data */
2105snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2106 void *data, bool interleaved,
2107 snd_pcm_uframes_t size, bool in_kernel)
2108{
2109 struct snd_pcm_runtime *runtime = substream->runtime;
2110 snd_pcm_uframes_t xfer = 0;
2111 snd_pcm_uframes_t offset = 0;
2112 snd_pcm_uframes_t avail;
2113 pcm_copy_f writer;
2114 pcm_transfer_f transfer;
2115 bool nonblock;
2116 bool is_playback;
2117 int err;
2118
2119 err = pcm_sanity_check(substream);
2120 if (err < 0)
2121 return err;
2122
2123 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2124 if (interleaved) {
2125 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2126 runtime->channels > 1)
2127 return -EINVAL;
2128 writer = interleaved_copy;
2129 } else {
2130 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2131 return -EINVAL;
2132 writer = noninterleaved_copy;
2133 }
2134
2135 if (!data) {
2136 if (is_playback)
2137 transfer = fill_silence;
2138 else
2139 return -EINVAL;
2140 } else if (in_kernel) {
2141 if (substream->ops->copy_kernel)
2142 transfer = substream->ops->copy_kernel;
2143 else
2144 transfer = is_playback ?
2145 default_write_copy_kernel : default_read_copy_kernel;
2146 } else {
2147 if (substream->ops->copy_user)
2148 transfer = (pcm_transfer_f)substream->ops->copy_user;
2149 else
2150 transfer = is_playback ?
2151 default_write_copy : default_read_copy;
2152 }
2153
2154 if (size == 0)
2155 return 0;
2156
2157 nonblock = !!(substream->f_flags & O_NONBLOCK);
2158
2159 snd_pcm_stream_lock_irq(substream);
2160 err = pcm_accessible_state(runtime);
2161 if (err < 0)
2162 goto _end_unlock;
2163
2164 runtime->twake = runtime->control->avail_min ? : 1;
2165 if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
2166 snd_pcm_update_hw_ptr(substream);
2167
2168 /*
2169 * If size < start_threshold, wait indefinitely. Another
2170 * thread may start capture
2171 */
2172 if (!is_playback &&
2173 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2174 size >= runtime->start_threshold) {
2175 err = snd_pcm_start(substream);
2176 if (err < 0)
2177 goto _end_unlock;
2178 }
2179
2180 avail = snd_pcm_avail(substream);
2181
2182 while (size > 0) {
2183 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2184 snd_pcm_uframes_t cont;
2185 if (!avail) {
2186 if (!is_playback &&
2187 runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
2188 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2189 goto _end_unlock;
2190 }
2191 if (nonblock) {
2192 err = -EAGAIN;
2193 goto _end_unlock;
2194 }
2195 runtime->twake = min_t(snd_pcm_uframes_t, size,
2196 runtime->control->avail_min ? : 1);
2197 err = wait_for_avail(substream, &avail);
2198 if (err < 0)
2199 goto _end_unlock;
2200 if (!avail)
2201 continue; /* draining */
2202 }
2203 frames = size > avail ? avail : size;
2204 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2205 appl_ofs = appl_ptr % runtime->buffer_size;
2206 cont = runtime->buffer_size - appl_ofs;
2207 if (frames > cont)
2208 frames = cont;
2209 if (snd_BUG_ON(!frames)) {
2210 err = -EINVAL;
2211 goto _end_unlock;
2212 }
2213 snd_pcm_stream_unlock_irq(substream);
2214 err = writer(substream, appl_ofs, data, offset, frames,
2215 transfer);
2216 snd_pcm_stream_lock_irq(substream);
2217 if (err < 0)
2218 goto _end_unlock;
2219 err = pcm_accessible_state(runtime);
2220 if (err < 0)
2221 goto _end_unlock;
2222 appl_ptr += frames;
2223 if (appl_ptr >= runtime->boundary)
2224 appl_ptr -= runtime->boundary;
2225 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2226 if (err < 0)
2227 goto _end_unlock;
2228
2229 offset += frames;
2230 size -= frames;
2231 xfer += frames;
2232 avail -= frames;
2233 if (is_playback &&
2234 runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
2235 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2236 err = snd_pcm_start(substream);
2237 if (err < 0)
2238 goto _end_unlock;
2239 }
2240 }
2241 _end_unlock:
2242 runtime->twake = 0;
2243 if (xfer > 0 && err >= 0)
2244 snd_pcm_update_state(substream, runtime);
2245 snd_pcm_stream_unlock_irq(substream);
2246 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2247}
2248EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2249
2250/*
2251 * standard channel mapping helpers
2252 */
2253
2254/* default channel maps for multi-channel playbacks, up to 8 channels */
2255const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2256 { .channels = 1,
2257 .map = { SNDRV_CHMAP_MONO } },
2258 { .channels = 2,
2259 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2260 { .channels = 4,
2261 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2262 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2263 { .channels = 6,
2264 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2265 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2266 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2267 { .channels = 8,
2268 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2269 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2270 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2271 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2272 { }
2273};
2274EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2275
2276/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2277const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2278 { .channels = 1,
2279 .map = { SNDRV_CHMAP_MONO } },
2280 { .channels = 2,
2281 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2282 { .channels = 4,
2283 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2284 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2285 { .channels = 6,
2286 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2287 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2288 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2289 { .channels = 8,
2290 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2291 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2292 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2293 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2294 { }
2295};
2296EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2297
2298static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2299{
2300 if (ch > info->max_channels)
2301 return false;
2302 return !info->channel_mask || (info->channel_mask & (1U << ch));
2303}
2304
2305static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2306 struct snd_ctl_elem_info *uinfo)
2307{
2308 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2309
2310 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2311 uinfo->count = 0;
2312 uinfo->count = info->max_channels;
2313 uinfo->value.integer.min = 0;
2314 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2315 return 0;
2316}
2317
2318/* get callback for channel map ctl element
2319 * stores the channel position firstly matching with the current channels
2320 */
2321static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2322 struct snd_ctl_elem_value *ucontrol)
2323{
2324 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2325 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2326 struct snd_pcm_substream *substream;
2327 const struct snd_pcm_chmap_elem *map;
2328
2329 if (!info->chmap)
2330 return -EINVAL;
2331 substream = snd_pcm_chmap_substream(info, idx);
2332 if (!substream)
2333 return -ENODEV;
2334 memset(ucontrol->value.integer.value, 0,
2335 sizeof(ucontrol->value.integer.value));
2336 if (!substream->runtime)
2337 return 0; /* no channels set */
2338 for (map = info->chmap; map->channels; map++) {
2339 int i;
2340 if (map->channels == substream->runtime->channels &&
2341 valid_chmap_channels(info, map->channels)) {
2342 for (i = 0; i < map->channels; i++)
2343 ucontrol->value.integer.value[i] = map->map[i];
2344 return 0;
2345 }
2346 }
2347 return -EINVAL;
2348}
2349
2350/* tlv callback for channel map ctl element
2351 * expands the pre-defined channel maps in a form of TLV
2352 */
2353static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2354 unsigned int size, unsigned int __user *tlv)
2355{
2356 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2357 const struct snd_pcm_chmap_elem *map;
2358 unsigned int __user *dst;
2359 int c, count = 0;
2360
2361 if (!info->chmap)
2362 return -EINVAL;
2363 if (size < 8)
2364 return -ENOMEM;
2365 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2366 return -EFAULT;
2367 size -= 8;
2368 dst = tlv + 2;
2369 for (map = info->chmap; map->channels; map++) {
2370 int chs_bytes = map->channels * 4;
2371 if (!valid_chmap_channels(info, map->channels))
2372 continue;
2373 if (size < 8)
2374 return -ENOMEM;
2375 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2376 put_user(chs_bytes, dst + 1))
2377 return -EFAULT;
2378 dst += 2;
2379 size -= 8;
2380 count += 8;
2381 if (size < chs_bytes)
2382 return -ENOMEM;
2383 size -= chs_bytes;
2384 count += chs_bytes;
2385 for (c = 0; c < map->channels; c++) {
2386 if (put_user(map->map[c], dst))
2387 return -EFAULT;
2388 dst++;
2389 }
2390 }
2391 if (put_user(count, tlv + 1))
2392 return -EFAULT;
2393 return 0;
2394}
2395
2396static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2397{
2398 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2399 info->pcm->streams[info->stream].chmap_kctl = NULL;
2400 kfree(info);
2401}
2402
2403/**
2404 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2405 * @pcm: the assigned PCM instance
2406 * @stream: stream direction
2407 * @chmap: channel map elements (for query)
2408 * @max_channels: the max number of channels for the stream
2409 * @private_value: the value passed to each kcontrol's private_value field
2410 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2411 *
2412 * Create channel-mapping control elements assigned to the given PCM stream(s).
2413 * Return: Zero if successful, or a negative error value.
2414 */
2415int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2416 const struct snd_pcm_chmap_elem *chmap,
2417 int max_channels,
2418 unsigned long private_value,
2419 struct snd_pcm_chmap **info_ret)
2420{
2421 struct snd_pcm_chmap *info;
2422 struct snd_kcontrol_new knew = {
2423 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2424 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2425 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2426 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2427 .info = pcm_chmap_ctl_info,
2428 .get = pcm_chmap_ctl_get,
2429 .tlv.c = pcm_chmap_ctl_tlv,
2430 };
2431 int err;
2432
2433 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2434 return -EBUSY;
2435 info = kzalloc(sizeof(*info), GFP_KERNEL);
2436 if (!info)
2437 return -ENOMEM;
2438 info->pcm = pcm;
2439 info->stream = stream;
2440 info->chmap = chmap;
2441 info->max_channels = max_channels;
2442 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2443 knew.name = "Playback Channel Map";
2444 else
2445 knew.name = "Capture Channel Map";
2446 knew.device = pcm->device;
2447 knew.count = pcm->streams[stream].substream_count;
2448 knew.private_value = private_value;
2449 info->kctl = snd_ctl_new1(&knew, info);
2450 if (!info->kctl) {
2451 kfree(info);
2452 return -ENOMEM;
2453 }
2454 info->kctl->private_free = pcm_chmap_ctl_private_free;
2455 err = snd_ctl_add(pcm->card, info->kctl);
2456 if (err < 0)
2457 return err;
2458 pcm->streams[stream].chmap_kctl = info->kctl;
2459 if (info_ret)
2460 *info_ret = info;
2461 return 0;
2462}
2463EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Digital Audio (PCM) abstract layer
4 * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
5 * Abramo Bagnara <abramo@alsa-project.org>
6 */
7
8#include <linux/slab.h>
9#include <linux/sched/signal.h>
10#include <linux/time.h>
11#include <linux/math64.h>
12#include <linux/export.h>
13#include <sound/core.h>
14#include <sound/control.h>
15#include <sound/tlv.h>
16#include <sound/info.h>
17#include <sound/pcm.h>
18#include <sound/pcm_params.h>
19#include <sound/timer.h>
20
21#include "pcm_local.h"
22
23#ifdef CONFIG_SND_PCM_XRUN_DEBUG
24#define CREATE_TRACE_POINTS
25#include "pcm_trace.h"
26#else
27#define trace_hwptr(substream, pos, in_interrupt)
28#define trace_xrun(substream)
29#define trace_hw_ptr_error(substream, reason)
30#define trace_applptr(substream, prev, curr)
31#endif
32
33static int fill_silence_frames(struct snd_pcm_substream *substream,
34 snd_pcm_uframes_t off, snd_pcm_uframes_t frames);
35
36
37static inline void update_silence_vars(struct snd_pcm_runtime *runtime,
38 snd_pcm_uframes_t ptr,
39 snd_pcm_uframes_t new_ptr)
40{
41 snd_pcm_sframes_t delta;
42
43 delta = new_ptr - ptr;
44 if (delta == 0)
45 return;
46 if (delta < 0)
47 delta += runtime->boundary;
48 if ((snd_pcm_uframes_t)delta < runtime->silence_filled)
49 runtime->silence_filled -= delta;
50 else
51 runtime->silence_filled = 0;
52 runtime->silence_start = new_ptr;
53}
54
55/*
56 * fill ring buffer with silence
57 * runtime->silence_start: starting pointer to silence area
58 * runtime->silence_filled: size filled with silence
59 * runtime->silence_threshold: threshold from application
60 * runtime->silence_size: maximal size from application
61 *
62 * when runtime->silence_size >= runtime->boundary - fill processed area with silence immediately
63 */
64void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_uframes_t new_hw_ptr)
65{
66 struct snd_pcm_runtime *runtime = substream->runtime;
67 snd_pcm_uframes_t frames, ofs, transfer;
68 int err;
69
70 if (runtime->silence_size < runtime->boundary) {
71 snd_pcm_sframes_t noise_dist;
72 snd_pcm_uframes_t appl_ptr = READ_ONCE(runtime->control->appl_ptr);
73 update_silence_vars(runtime, runtime->silence_start, appl_ptr);
74 /* initialization outside pointer updates */
75 if (new_hw_ptr == ULONG_MAX)
76 new_hw_ptr = runtime->status->hw_ptr;
77 /* get hw_avail with the boundary crossing */
78 noise_dist = appl_ptr - new_hw_ptr;
79 if (noise_dist < 0)
80 noise_dist += runtime->boundary;
81 /* total noise distance */
82 noise_dist += runtime->silence_filled;
83 if (noise_dist >= (snd_pcm_sframes_t) runtime->silence_threshold)
84 return;
85 frames = runtime->silence_threshold - noise_dist;
86 if (frames > runtime->silence_size)
87 frames = runtime->silence_size;
88 } else {
89 /*
90 * This filling mode aims at free-running mode (used for example by dmix),
91 * which doesn't update the application pointer.
92 */
93 snd_pcm_uframes_t hw_ptr = runtime->status->hw_ptr;
94 if (new_hw_ptr == ULONG_MAX) {
95 /*
96 * Initialization, fill the whole unused buffer with silence.
97 *
98 * Usually, this is entered while stopped, before data is queued,
99 * so both pointers are expected to be zero.
100 */
101 snd_pcm_sframes_t avail = runtime->control->appl_ptr - hw_ptr;
102 if (avail < 0)
103 avail += runtime->boundary;
104 /*
105 * In free-running mode, appl_ptr will be zero even while running,
106 * so we end up with a huge number. There is no useful way to
107 * handle this, so we just clear the whole buffer.
108 */
109 runtime->silence_filled = avail > runtime->buffer_size ? 0 : avail;
110 runtime->silence_start = hw_ptr;
111 } else {
112 /* Silence the just played area immediately */
113 update_silence_vars(runtime, hw_ptr, new_hw_ptr);
114 }
115 /*
116 * In this mode, silence_filled actually includes the valid
117 * sample data from the user.
118 */
119 frames = runtime->buffer_size - runtime->silence_filled;
120 }
121 if (snd_BUG_ON(frames > runtime->buffer_size))
122 return;
123 if (frames == 0)
124 return;
125 ofs = (runtime->silence_start + runtime->silence_filled) % runtime->buffer_size;
126 do {
127 transfer = ofs + frames > runtime->buffer_size ? runtime->buffer_size - ofs : frames;
128 err = fill_silence_frames(substream, ofs, transfer);
129 snd_BUG_ON(err < 0);
130 runtime->silence_filled += transfer;
131 frames -= transfer;
132 ofs = 0;
133 } while (frames > 0);
134 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
135}
136
137#ifdef CONFIG_SND_DEBUG
138void snd_pcm_debug_name(struct snd_pcm_substream *substream,
139 char *name, size_t len)
140{
141 snprintf(name, len, "pcmC%dD%d%c:%d",
142 substream->pcm->card->number,
143 substream->pcm->device,
144 substream->stream ? 'c' : 'p',
145 substream->number);
146}
147EXPORT_SYMBOL(snd_pcm_debug_name);
148#endif
149
150#define XRUN_DEBUG_BASIC (1<<0)
151#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
152#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
153
154#ifdef CONFIG_SND_PCM_XRUN_DEBUG
155
156#define xrun_debug(substream, mask) \
157 ((substream)->pstr->xrun_debug & (mask))
158#else
159#define xrun_debug(substream, mask) 0
160#endif
161
162#define dump_stack_on_xrun(substream) do { \
163 if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
164 dump_stack(); \
165 } while (0)
166
167/* call with stream lock held */
168void __snd_pcm_xrun(struct snd_pcm_substream *substream)
169{
170 struct snd_pcm_runtime *runtime = substream->runtime;
171
172 trace_xrun(substream);
173 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
174 struct timespec64 tstamp;
175
176 snd_pcm_gettime(runtime, &tstamp);
177 runtime->status->tstamp.tv_sec = tstamp.tv_sec;
178 runtime->status->tstamp.tv_nsec = tstamp.tv_nsec;
179 }
180 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
181 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
182 char name[16];
183 snd_pcm_debug_name(substream, name, sizeof(name));
184 pcm_warn(substream->pcm, "XRUN: %s\n", name);
185 dump_stack_on_xrun(substream);
186 }
187}
188
189#ifdef CONFIG_SND_PCM_XRUN_DEBUG
190#define hw_ptr_error(substream, in_interrupt, reason, fmt, args...) \
191 do { \
192 trace_hw_ptr_error(substream, reason); \
193 if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
194 pr_err_ratelimited("ALSA: PCM: [%c] " reason ": " fmt, \
195 (in_interrupt) ? 'Q' : 'P', ##args); \
196 dump_stack_on_xrun(substream); \
197 } \
198 } while (0)
199
200#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
201
202#define hw_ptr_error(substream, fmt, args...) do { } while (0)
203
204#endif
205
206int snd_pcm_update_state(struct snd_pcm_substream *substream,
207 struct snd_pcm_runtime *runtime)
208{
209 snd_pcm_uframes_t avail;
210
211 avail = snd_pcm_avail(substream);
212 if (avail > runtime->avail_max)
213 runtime->avail_max = avail;
214 if (runtime->state == SNDRV_PCM_STATE_DRAINING) {
215 if (avail >= runtime->buffer_size) {
216 snd_pcm_drain_done(substream);
217 return -EPIPE;
218 }
219 } else {
220 if (avail >= runtime->stop_threshold) {
221 __snd_pcm_xrun(substream);
222 return -EPIPE;
223 }
224 }
225 if (runtime->twake) {
226 if (avail >= runtime->twake)
227 wake_up(&runtime->tsleep);
228 } else if (avail >= runtime->control->avail_min)
229 wake_up(&runtime->sleep);
230 return 0;
231}
232
233static void update_audio_tstamp(struct snd_pcm_substream *substream,
234 struct timespec64 *curr_tstamp,
235 struct timespec64 *audio_tstamp)
236{
237 struct snd_pcm_runtime *runtime = substream->runtime;
238 u64 audio_frames, audio_nsecs;
239 struct timespec64 driver_tstamp;
240
241 if (runtime->tstamp_mode != SNDRV_PCM_TSTAMP_ENABLE)
242 return;
243
244 if (!(substream->ops->get_time_info) ||
245 (runtime->audio_tstamp_report.actual_type ==
246 SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
247
248 /*
249 * provide audio timestamp derived from pointer position
250 * add delay only if requested
251 */
252
253 audio_frames = runtime->hw_ptr_wrap + runtime->status->hw_ptr;
254
255 if (runtime->audio_tstamp_config.report_delay) {
256 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
257 audio_frames -= runtime->delay;
258 else
259 audio_frames += runtime->delay;
260 }
261 audio_nsecs = div_u64(audio_frames * 1000000000LL,
262 runtime->rate);
263 *audio_tstamp = ns_to_timespec64(audio_nsecs);
264 }
265
266 if (runtime->status->audio_tstamp.tv_sec != audio_tstamp->tv_sec ||
267 runtime->status->audio_tstamp.tv_nsec != audio_tstamp->tv_nsec) {
268 runtime->status->audio_tstamp.tv_sec = audio_tstamp->tv_sec;
269 runtime->status->audio_tstamp.tv_nsec = audio_tstamp->tv_nsec;
270 runtime->status->tstamp.tv_sec = curr_tstamp->tv_sec;
271 runtime->status->tstamp.tv_nsec = curr_tstamp->tv_nsec;
272 }
273
274
275 /*
276 * re-take a driver timestamp to let apps detect if the reference tstamp
277 * read by low-level hardware was provided with a delay
278 */
279 snd_pcm_gettime(substream->runtime, &driver_tstamp);
280 runtime->driver_tstamp = driver_tstamp;
281}
282
283static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
284 unsigned int in_interrupt)
285{
286 struct snd_pcm_runtime *runtime = substream->runtime;
287 snd_pcm_uframes_t pos;
288 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
289 snd_pcm_sframes_t hdelta, delta;
290 unsigned long jdelta;
291 unsigned long curr_jiffies;
292 struct timespec64 curr_tstamp;
293 struct timespec64 audio_tstamp;
294 int crossed_boundary = 0;
295
296 old_hw_ptr = runtime->status->hw_ptr;
297
298 /*
299 * group pointer, time and jiffies reads to allow for more
300 * accurate correlations/corrections.
301 * The values are stored at the end of this routine after
302 * corrections for hw_ptr position
303 */
304 pos = substream->ops->pointer(substream);
305 curr_jiffies = jiffies;
306 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
307 if ((substream->ops->get_time_info) &&
308 (runtime->audio_tstamp_config.type_requested != SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)) {
309 substream->ops->get_time_info(substream, &curr_tstamp,
310 &audio_tstamp,
311 &runtime->audio_tstamp_config,
312 &runtime->audio_tstamp_report);
313
314 /* re-test in case tstamp type is not supported in hardware and was demoted to DEFAULT */
315 if (runtime->audio_tstamp_report.actual_type == SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT)
316 snd_pcm_gettime(runtime, &curr_tstamp);
317 } else
318 snd_pcm_gettime(runtime, &curr_tstamp);
319 }
320
321 if (pos == SNDRV_PCM_POS_XRUN) {
322 __snd_pcm_xrun(substream);
323 return -EPIPE;
324 }
325 if (pos >= runtime->buffer_size) {
326 if (printk_ratelimit()) {
327 char name[16];
328 snd_pcm_debug_name(substream, name, sizeof(name));
329 pcm_err(substream->pcm,
330 "invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
331 name, pos, runtime->buffer_size,
332 runtime->period_size);
333 }
334 pos = 0;
335 }
336 pos -= pos % runtime->min_align;
337 trace_hwptr(substream, pos, in_interrupt);
338 hw_base = runtime->hw_ptr_base;
339 new_hw_ptr = hw_base + pos;
340 if (in_interrupt) {
341 /* we know that one period was processed */
342 /* delta = "expected next hw_ptr" for in_interrupt != 0 */
343 delta = runtime->hw_ptr_interrupt + runtime->period_size;
344 if (delta > new_hw_ptr) {
345 /* check for double acknowledged interrupts */
346 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
347 if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
348 hw_base += runtime->buffer_size;
349 if (hw_base >= runtime->boundary) {
350 hw_base = 0;
351 crossed_boundary++;
352 }
353 new_hw_ptr = hw_base + pos;
354 goto __delta;
355 }
356 }
357 }
358 /* new_hw_ptr might be lower than old_hw_ptr in case when */
359 /* pointer crosses the end of the ring buffer */
360 if (new_hw_ptr < old_hw_ptr) {
361 hw_base += runtime->buffer_size;
362 if (hw_base >= runtime->boundary) {
363 hw_base = 0;
364 crossed_boundary++;
365 }
366 new_hw_ptr = hw_base + pos;
367 }
368 __delta:
369 delta = new_hw_ptr - old_hw_ptr;
370 if (delta < 0)
371 delta += runtime->boundary;
372
373 if (runtime->no_period_wakeup) {
374 snd_pcm_sframes_t xrun_threshold;
375 /*
376 * Without regular period interrupts, we have to check
377 * the elapsed time to detect xruns.
378 */
379 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
380 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
381 goto no_delta_check;
382 hdelta = jdelta - delta * HZ / runtime->rate;
383 xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
384 while (hdelta > xrun_threshold) {
385 delta += runtime->buffer_size;
386 hw_base += runtime->buffer_size;
387 if (hw_base >= runtime->boundary) {
388 hw_base = 0;
389 crossed_boundary++;
390 }
391 new_hw_ptr = hw_base + pos;
392 hdelta -= runtime->hw_ptr_buffer_jiffies;
393 }
394 goto no_delta_check;
395 }
396
397 /* something must be really wrong */
398 if (delta >= runtime->buffer_size + runtime->period_size) {
399 hw_ptr_error(substream, in_interrupt, "Unexpected hw_ptr",
400 "(stream=%i, pos=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
401 substream->stream, (long)pos,
402 (long)new_hw_ptr, (long)old_hw_ptr);
403 return 0;
404 }
405
406 /* Do jiffies check only in xrun_debug mode */
407 if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
408 goto no_jiffies_check;
409
410 /* Skip the jiffies check for hardwares with BATCH flag.
411 * Such hardware usually just increases the position at each IRQ,
412 * thus it can't give any strange position.
413 */
414 if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
415 goto no_jiffies_check;
416 hdelta = delta;
417 if (hdelta < runtime->delay)
418 goto no_jiffies_check;
419 hdelta -= runtime->delay;
420 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
421 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
422 delta = jdelta /
423 (((runtime->period_size * HZ) / runtime->rate)
424 + HZ/100);
425 /* move new_hw_ptr according jiffies not pos variable */
426 new_hw_ptr = old_hw_ptr;
427 hw_base = delta;
428 /* use loop to avoid checks for delta overflows */
429 /* the delta value is small or zero in most cases */
430 while (delta > 0) {
431 new_hw_ptr += runtime->period_size;
432 if (new_hw_ptr >= runtime->boundary) {
433 new_hw_ptr -= runtime->boundary;
434 crossed_boundary--;
435 }
436 delta--;
437 }
438 /* align hw_base to buffer_size */
439 hw_ptr_error(substream, in_interrupt, "hw_ptr skipping",
440 "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
441 (long)pos, (long)hdelta,
442 (long)runtime->period_size, jdelta,
443 ((hdelta * HZ) / runtime->rate), hw_base,
444 (unsigned long)old_hw_ptr,
445 (unsigned long)new_hw_ptr);
446 /* reset values to proper state */
447 delta = 0;
448 hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
449 }
450 no_jiffies_check:
451 if (delta > runtime->period_size + runtime->period_size / 2) {
452 hw_ptr_error(substream, in_interrupt,
453 "Lost interrupts?",
454 "(stream=%i, delta=%ld, new_hw_ptr=%ld, old_hw_ptr=%ld)\n",
455 substream->stream, (long)delta,
456 (long)new_hw_ptr,
457 (long)old_hw_ptr);
458 }
459
460 no_delta_check:
461 if (runtime->status->hw_ptr == new_hw_ptr) {
462 runtime->hw_ptr_jiffies = curr_jiffies;
463 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
464 return 0;
465 }
466
467 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
468 runtime->silence_size > 0)
469 snd_pcm_playback_silence(substream, new_hw_ptr);
470
471 if (in_interrupt) {
472 delta = new_hw_ptr - runtime->hw_ptr_interrupt;
473 if (delta < 0)
474 delta += runtime->boundary;
475 delta -= (snd_pcm_uframes_t)delta % runtime->period_size;
476 runtime->hw_ptr_interrupt += delta;
477 if (runtime->hw_ptr_interrupt >= runtime->boundary)
478 runtime->hw_ptr_interrupt -= runtime->boundary;
479 }
480 runtime->hw_ptr_base = hw_base;
481 runtime->status->hw_ptr = new_hw_ptr;
482 runtime->hw_ptr_jiffies = curr_jiffies;
483 if (crossed_boundary) {
484 snd_BUG_ON(crossed_boundary != 1);
485 runtime->hw_ptr_wrap += runtime->boundary;
486 }
487
488 update_audio_tstamp(substream, &curr_tstamp, &audio_tstamp);
489
490 return snd_pcm_update_state(substream, runtime);
491}
492
493/* CAUTION: call it with irq disabled */
494int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
495{
496 return snd_pcm_update_hw_ptr0(substream, 0);
497}
498
499/**
500 * snd_pcm_set_ops - set the PCM operators
501 * @pcm: the pcm instance
502 * @direction: stream direction, SNDRV_PCM_STREAM_XXX
503 * @ops: the operator table
504 *
505 * Sets the given PCM operators to the pcm instance.
506 */
507void snd_pcm_set_ops(struct snd_pcm *pcm, int direction,
508 const struct snd_pcm_ops *ops)
509{
510 struct snd_pcm_str *stream = &pcm->streams[direction];
511 struct snd_pcm_substream *substream;
512
513 for (substream = stream->substream; substream != NULL; substream = substream->next)
514 substream->ops = ops;
515}
516EXPORT_SYMBOL(snd_pcm_set_ops);
517
518/**
519 * snd_pcm_set_sync - set the PCM sync id
520 * @substream: the pcm substream
521 *
522 * Sets the PCM sync identifier for the card.
523 */
524void snd_pcm_set_sync(struct snd_pcm_substream *substream)
525{
526 struct snd_pcm_runtime *runtime = substream->runtime;
527
528 runtime->sync.id32[0] = substream->pcm->card->number;
529 runtime->sync.id32[1] = -1;
530 runtime->sync.id32[2] = -1;
531 runtime->sync.id32[3] = -1;
532}
533EXPORT_SYMBOL(snd_pcm_set_sync);
534
535/*
536 * Standard ioctl routine
537 */
538
539static inline unsigned int div32(unsigned int a, unsigned int b,
540 unsigned int *r)
541{
542 if (b == 0) {
543 *r = 0;
544 return UINT_MAX;
545 }
546 *r = a % b;
547 return a / b;
548}
549
550static inline unsigned int div_down(unsigned int a, unsigned int b)
551{
552 if (b == 0)
553 return UINT_MAX;
554 return a / b;
555}
556
557static inline unsigned int div_up(unsigned int a, unsigned int b)
558{
559 unsigned int r;
560 unsigned int q;
561 if (b == 0)
562 return UINT_MAX;
563 q = div32(a, b, &r);
564 if (r)
565 ++q;
566 return q;
567}
568
569static inline unsigned int mul(unsigned int a, unsigned int b)
570{
571 if (a == 0)
572 return 0;
573 if (div_down(UINT_MAX, a) < b)
574 return UINT_MAX;
575 return a * b;
576}
577
578static inline unsigned int muldiv32(unsigned int a, unsigned int b,
579 unsigned int c, unsigned int *r)
580{
581 u_int64_t n = (u_int64_t) a * b;
582 if (c == 0) {
583 *r = 0;
584 return UINT_MAX;
585 }
586 n = div_u64_rem(n, c, r);
587 if (n >= UINT_MAX) {
588 *r = 0;
589 return UINT_MAX;
590 }
591 return n;
592}
593
594/**
595 * snd_interval_refine - refine the interval value of configurator
596 * @i: the interval value to refine
597 * @v: the interval value to refer to
598 *
599 * Refines the interval value with the reference value.
600 * The interval is changed to the range satisfying both intervals.
601 * The interval status (min, max, integer, etc.) are evaluated.
602 *
603 * Return: Positive if the value is changed, zero if it's not changed, or a
604 * negative error code.
605 */
606int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v)
607{
608 int changed = 0;
609 if (snd_BUG_ON(snd_interval_empty(i)))
610 return -EINVAL;
611 if (i->min < v->min) {
612 i->min = v->min;
613 i->openmin = v->openmin;
614 changed = 1;
615 } else if (i->min == v->min && !i->openmin && v->openmin) {
616 i->openmin = 1;
617 changed = 1;
618 }
619 if (i->max > v->max) {
620 i->max = v->max;
621 i->openmax = v->openmax;
622 changed = 1;
623 } else if (i->max == v->max && !i->openmax && v->openmax) {
624 i->openmax = 1;
625 changed = 1;
626 }
627 if (!i->integer && v->integer) {
628 i->integer = 1;
629 changed = 1;
630 }
631 if (i->integer) {
632 if (i->openmin) {
633 i->min++;
634 i->openmin = 0;
635 }
636 if (i->openmax) {
637 i->max--;
638 i->openmax = 0;
639 }
640 } else if (!i->openmin && !i->openmax && i->min == i->max)
641 i->integer = 1;
642 if (snd_interval_checkempty(i)) {
643 snd_interval_none(i);
644 return -EINVAL;
645 }
646 return changed;
647}
648EXPORT_SYMBOL(snd_interval_refine);
649
650static int snd_interval_refine_first(struct snd_interval *i)
651{
652 const unsigned int last_max = i->max;
653
654 if (snd_BUG_ON(snd_interval_empty(i)))
655 return -EINVAL;
656 if (snd_interval_single(i))
657 return 0;
658 i->max = i->min;
659 if (i->openmin)
660 i->max++;
661 /* only exclude max value if also excluded before refine */
662 i->openmax = (i->openmax && i->max >= last_max);
663 return 1;
664}
665
666static int snd_interval_refine_last(struct snd_interval *i)
667{
668 const unsigned int last_min = i->min;
669
670 if (snd_BUG_ON(snd_interval_empty(i)))
671 return -EINVAL;
672 if (snd_interval_single(i))
673 return 0;
674 i->min = i->max;
675 if (i->openmax)
676 i->min--;
677 /* only exclude min value if also excluded before refine */
678 i->openmin = (i->openmin && i->min <= last_min);
679 return 1;
680}
681
682void snd_interval_mul(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
683{
684 if (a->empty || b->empty) {
685 snd_interval_none(c);
686 return;
687 }
688 c->empty = 0;
689 c->min = mul(a->min, b->min);
690 c->openmin = (a->openmin || b->openmin);
691 c->max = mul(a->max, b->max);
692 c->openmax = (a->openmax || b->openmax);
693 c->integer = (a->integer && b->integer);
694}
695
696/**
697 * snd_interval_div - refine the interval value with division
698 * @a: dividend
699 * @b: divisor
700 * @c: quotient
701 *
702 * c = a / b
703 *
704 * Returns non-zero if the value is changed, zero if not changed.
705 */
706void snd_interval_div(const struct snd_interval *a, const struct snd_interval *b, struct snd_interval *c)
707{
708 unsigned int r;
709 if (a->empty || b->empty) {
710 snd_interval_none(c);
711 return;
712 }
713 c->empty = 0;
714 c->min = div32(a->min, b->max, &r);
715 c->openmin = (r || a->openmin || b->openmax);
716 if (b->min > 0) {
717 c->max = div32(a->max, b->min, &r);
718 if (r) {
719 c->max++;
720 c->openmax = 1;
721 } else
722 c->openmax = (a->openmax || b->openmin);
723 } else {
724 c->max = UINT_MAX;
725 c->openmax = 0;
726 }
727 c->integer = 0;
728}
729
730/**
731 * snd_interval_muldivk - refine the interval value
732 * @a: dividend 1
733 * @b: dividend 2
734 * @k: divisor (as integer)
735 * @c: result
736 *
737 * c = a * b / k
738 *
739 * Returns non-zero if the value is changed, zero if not changed.
740 */
741void snd_interval_muldivk(const struct snd_interval *a, const struct snd_interval *b,
742 unsigned int k, struct snd_interval *c)
743{
744 unsigned int r;
745 if (a->empty || b->empty) {
746 snd_interval_none(c);
747 return;
748 }
749 c->empty = 0;
750 c->min = muldiv32(a->min, b->min, k, &r);
751 c->openmin = (r || a->openmin || b->openmin);
752 c->max = muldiv32(a->max, b->max, k, &r);
753 if (r) {
754 c->max++;
755 c->openmax = 1;
756 } else
757 c->openmax = (a->openmax || b->openmax);
758 c->integer = 0;
759}
760
761/**
762 * snd_interval_mulkdiv - refine the interval value
763 * @a: dividend 1
764 * @k: dividend 2 (as integer)
765 * @b: divisor
766 * @c: result
767 *
768 * c = a * k / b
769 *
770 * Returns non-zero if the value is changed, zero if not changed.
771 */
772void snd_interval_mulkdiv(const struct snd_interval *a, unsigned int k,
773 const struct snd_interval *b, struct snd_interval *c)
774{
775 unsigned int r;
776 if (a->empty || b->empty) {
777 snd_interval_none(c);
778 return;
779 }
780 c->empty = 0;
781 c->min = muldiv32(a->min, k, b->max, &r);
782 c->openmin = (r || a->openmin || b->openmax);
783 if (b->min > 0) {
784 c->max = muldiv32(a->max, k, b->min, &r);
785 if (r) {
786 c->max++;
787 c->openmax = 1;
788 } else
789 c->openmax = (a->openmax || b->openmin);
790 } else {
791 c->max = UINT_MAX;
792 c->openmax = 0;
793 }
794 c->integer = 0;
795}
796
797/* ---- */
798
799
800/**
801 * snd_interval_ratnum - refine the interval value
802 * @i: interval to refine
803 * @rats_count: number of ratnum_t
804 * @rats: ratnum_t array
805 * @nump: pointer to store the resultant numerator
806 * @denp: pointer to store the resultant denominator
807 *
808 * Return: Positive if the value is changed, zero if it's not changed, or a
809 * negative error code.
810 */
811int snd_interval_ratnum(struct snd_interval *i,
812 unsigned int rats_count, const struct snd_ratnum *rats,
813 unsigned int *nump, unsigned int *denp)
814{
815 unsigned int best_num, best_den;
816 int best_diff;
817 unsigned int k;
818 struct snd_interval t;
819 int err;
820 unsigned int result_num, result_den;
821 int result_diff;
822
823 best_num = best_den = best_diff = 0;
824 for (k = 0; k < rats_count; ++k) {
825 unsigned int num = rats[k].num;
826 unsigned int den;
827 unsigned int q = i->min;
828 int diff;
829 if (q == 0)
830 q = 1;
831 den = div_up(num, q);
832 if (den < rats[k].den_min)
833 continue;
834 if (den > rats[k].den_max)
835 den = rats[k].den_max;
836 else {
837 unsigned int r;
838 r = (den - rats[k].den_min) % rats[k].den_step;
839 if (r != 0)
840 den -= r;
841 }
842 diff = num - q * den;
843 if (diff < 0)
844 diff = -diff;
845 if (best_num == 0 ||
846 diff * best_den < best_diff * den) {
847 best_diff = diff;
848 best_den = den;
849 best_num = num;
850 }
851 }
852 if (best_den == 0) {
853 i->empty = 1;
854 return -EINVAL;
855 }
856 t.min = div_down(best_num, best_den);
857 t.openmin = !!(best_num % best_den);
858
859 result_num = best_num;
860 result_diff = best_diff;
861 result_den = best_den;
862 best_num = best_den = best_diff = 0;
863 for (k = 0; k < rats_count; ++k) {
864 unsigned int num = rats[k].num;
865 unsigned int den;
866 unsigned int q = i->max;
867 int diff;
868 if (q == 0) {
869 i->empty = 1;
870 return -EINVAL;
871 }
872 den = div_down(num, q);
873 if (den > rats[k].den_max)
874 continue;
875 if (den < rats[k].den_min)
876 den = rats[k].den_min;
877 else {
878 unsigned int r;
879 r = (den - rats[k].den_min) % rats[k].den_step;
880 if (r != 0)
881 den += rats[k].den_step - r;
882 }
883 diff = q * den - num;
884 if (diff < 0)
885 diff = -diff;
886 if (best_num == 0 ||
887 diff * best_den < best_diff * den) {
888 best_diff = diff;
889 best_den = den;
890 best_num = num;
891 }
892 }
893 if (best_den == 0) {
894 i->empty = 1;
895 return -EINVAL;
896 }
897 t.max = div_up(best_num, best_den);
898 t.openmax = !!(best_num % best_den);
899 t.integer = 0;
900 err = snd_interval_refine(i, &t);
901 if (err < 0)
902 return err;
903
904 if (snd_interval_single(i)) {
905 if (best_diff * result_den < result_diff * best_den) {
906 result_num = best_num;
907 result_den = best_den;
908 }
909 if (nump)
910 *nump = result_num;
911 if (denp)
912 *denp = result_den;
913 }
914 return err;
915}
916EXPORT_SYMBOL(snd_interval_ratnum);
917
918/**
919 * snd_interval_ratden - refine the interval value
920 * @i: interval to refine
921 * @rats_count: number of struct ratden
922 * @rats: struct ratden array
923 * @nump: pointer to store the resultant numerator
924 * @denp: pointer to store the resultant denominator
925 *
926 * Return: Positive if the value is changed, zero if it's not changed, or a
927 * negative error code.
928 */
929static int snd_interval_ratden(struct snd_interval *i,
930 unsigned int rats_count,
931 const struct snd_ratden *rats,
932 unsigned int *nump, unsigned int *denp)
933{
934 unsigned int best_num, best_diff, best_den;
935 unsigned int k;
936 struct snd_interval t;
937 int err;
938
939 best_num = best_den = best_diff = 0;
940 for (k = 0; k < rats_count; ++k) {
941 unsigned int num;
942 unsigned int den = rats[k].den;
943 unsigned int q = i->min;
944 int diff;
945 num = mul(q, den);
946 if (num > rats[k].num_max)
947 continue;
948 if (num < rats[k].num_min)
949 num = rats[k].num_max;
950 else {
951 unsigned int r;
952 r = (num - rats[k].num_min) % rats[k].num_step;
953 if (r != 0)
954 num += rats[k].num_step - r;
955 }
956 diff = num - q * den;
957 if (best_num == 0 ||
958 diff * best_den < best_diff * den) {
959 best_diff = diff;
960 best_den = den;
961 best_num = num;
962 }
963 }
964 if (best_den == 0) {
965 i->empty = 1;
966 return -EINVAL;
967 }
968 t.min = div_down(best_num, best_den);
969 t.openmin = !!(best_num % best_den);
970
971 best_num = best_den = best_diff = 0;
972 for (k = 0; k < rats_count; ++k) {
973 unsigned int num;
974 unsigned int den = rats[k].den;
975 unsigned int q = i->max;
976 int diff;
977 num = mul(q, den);
978 if (num < rats[k].num_min)
979 continue;
980 if (num > rats[k].num_max)
981 num = rats[k].num_max;
982 else {
983 unsigned int r;
984 r = (num - rats[k].num_min) % rats[k].num_step;
985 if (r != 0)
986 num -= r;
987 }
988 diff = q * den - num;
989 if (best_num == 0 ||
990 diff * best_den < best_diff * den) {
991 best_diff = diff;
992 best_den = den;
993 best_num = num;
994 }
995 }
996 if (best_den == 0) {
997 i->empty = 1;
998 return -EINVAL;
999 }
1000 t.max = div_up(best_num, best_den);
1001 t.openmax = !!(best_num % best_den);
1002 t.integer = 0;
1003 err = snd_interval_refine(i, &t);
1004 if (err < 0)
1005 return err;
1006
1007 if (snd_interval_single(i)) {
1008 if (nump)
1009 *nump = best_num;
1010 if (denp)
1011 *denp = best_den;
1012 }
1013 return err;
1014}
1015
1016/**
1017 * snd_interval_list - refine the interval value from the list
1018 * @i: the interval value to refine
1019 * @count: the number of elements in the list
1020 * @list: the value list
1021 * @mask: the bit-mask to evaluate
1022 *
1023 * Refines the interval value from the list.
1024 * When mask is non-zero, only the elements corresponding to bit 1 are
1025 * evaluated.
1026 *
1027 * Return: Positive if the value is changed, zero if it's not changed, or a
1028 * negative error code.
1029 */
1030int snd_interval_list(struct snd_interval *i, unsigned int count,
1031 const unsigned int *list, unsigned int mask)
1032{
1033 unsigned int k;
1034 struct snd_interval list_range;
1035
1036 if (!count) {
1037 i->empty = 1;
1038 return -EINVAL;
1039 }
1040 snd_interval_any(&list_range);
1041 list_range.min = UINT_MAX;
1042 list_range.max = 0;
1043 for (k = 0; k < count; k++) {
1044 if (mask && !(mask & (1 << k)))
1045 continue;
1046 if (!snd_interval_test(i, list[k]))
1047 continue;
1048 list_range.min = min(list_range.min, list[k]);
1049 list_range.max = max(list_range.max, list[k]);
1050 }
1051 return snd_interval_refine(i, &list_range);
1052}
1053EXPORT_SYMBOL(snd_interval_list);
1054
1055/**
1056 * snd_interval_ranges - refine the interval value from the list of ranges
1057 * @i: the interval value to refine
1058 * @count: the number of elements in the list of ranges
1059 * @ranges: the ranges list
1060 * @mask: the bit-mask to evaluate
1061 *
1062 * Refines the interval value from the list of ranges.
1063 * When mask is non-zero, only the elements corresponding to bit 1 are
1064 * evaluated.
1065 *
1066 * Return: Positive if the value is changed, zero if it's not changed, or a
1067 * negative error code.
1068 */
1069int snd_interval_ranges(struct snd_interval *i, unsigned int count,
1070 const struct snd_interval *ranges, unsigned int mask)
1071{
1072 unsigned int k;
1073 struct snd_interval range_union;
1074 struct snd_interval range;
1075
1076 if (!count) {
1077 snd_interval_none(i);
1078 return -EINVAL;
1079 }
1080 snd_interval_any(&range_union);
1081 range_union.min = UINT_MAX;
1082 range_union.max = 0;
1083 for (k = 0; k < count; k++) {
1084 if (mask && !(mask & (1 << k)))
1085 continue;
1086 snd_interval_copy(&range, &ranges[k]);
1087 if (snd_interval_refine(&range, i) < 0)
1088 continue;
1089 if (snd_interval_empty(&range))
1090 continue;
1091
1092 if (range.min < range_union.min) {
1093 range_union.min = range.min;
1094 range_union.openmin = 1;
1095 }
1096 if (range.min == range_union.min && !range.openmin)
1097 range_union.openmin = 0;
1098 if (range.max > range_union.max) {
1099 range_union.max = range.max;
1100 range_union.openmax = 1;
1101 }
1102 if (range.max == range_union.max && !range.openmax)
1103 range_union.openmax = 0;
1104 }
1105 return snd_interval_refine(i, &range_union);
1106}
1107EXPORT_SYMBOL(snd_interval_ranges);
1108
1109static int snd_interval_step(struct snd_interval *i, unsigned int step)
1110{
1111 unsigned int n;
1112 int changed = 0;
1113 n = i->min % step;
1114 if (n != 0 || i->openmin) {
1115 i->min += step - n;
1116 i->openmin = 0;
1117 changed = 1;
1118 }
1119 n = i->max % step;
1120 if (n != 0 || i->openmax) {
1121 i->max -= n;
1122 i->openmax = 0;
1123 changed = 1;
1124 }
1125 if (snd_interval_checkempty(i)) {
1126 i->empty = 1;
1127 return -EINVAL;
1128 }
1129 return changed;
1130}
1131
1132/* Info constraints helpers */
1133
1134/**
1135 * snd_pcm_hw_rule_add - add the hw-constraint rule
1136 * @runtime: the pcm runtime instance
1137 * @cond: condition bits
1138 * @var: the variable to evaluate
1139 * @func: the evaluation function
1140 * @private: the private data pointer passed to function
1141 * @dep: the dependent variables
1142 *
1143 * Return: Zero if successful, or a negative error code on failure.
1144 */
1145int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond,
1146 int var,
1147 snd_pcm_hw_rule_func_t func, void *private,
1148 int dep, ...)
1149{
1150 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1151 struct snd_pcm_hw_rule *c;
1152 unsigned int k;
1153 va_list args;
1154 va_start(args, dep);
1155 if (constrs->rules_num >= constrs->rules_all) {
1156 struct snd_pcm_hw_rule *new;
1157 unsigned int new_rules = constrs->rules_all + 16;
1158 new = krealloc_array(constrs->rules, new_rules,
1159 sizeof(*c), GFP_KERNEL);
1160 if (!new) {
1161 va_end(args);
1162 return -ENOMEM;
1163 }
1164 constrs->rules = new;
1165 constrs->rules_all = new_rules;
1166 }
1167 c = &constrs->rules[constrs->rules_num];
1168 c->cond = cond;
1169 c->func = func;
1170 c->var = var;
1171 c->private = private;
1172 k = 0;
1173 while (1) {
1174 if (snd_BUG_ON(k >= ARRAY_SIZE(c->deps))) {
1175 va_end(args);
1176 return -EINVAL;
1177 }
1178 c->deps[k++] = dep;
1179 if (dep < 0)
1180 break;
1181 dep = va_arg(args, int);
1182 }
1183 constrs->rules_num++;
1184 va_end(args);
1185 return 0;
1186}
1187EXPORT_SYMBOL(snd_pcm_hw_rule_add);
1188
1189/**
1190 * snd_pcm_hw_constraint_mask - apply the given bitmap mask constraint
1191 * @runtime: PCM runtime instance
1192 * @var: hw_params variable to apply the mask
1193 * @mask: the bitmap mask
1194 *
1195 * Apply the constraint of the given bitmap mask to a 32-bit mask parameter.
1196 *
1197 * Return: Zero if successful, or a negative error code on failure.
1198 */
1199int snd_pcm_hw_constraint_mask(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1200 u_int32_t mask)
1201{
1202 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1203 struct snd_mask *maskp = constrs_mask(constrs, var);
1204 *maskp->bits &= mask;
1205 memset(maskp->bits + 1, 0, (SNDRV_MASK_MAX-32) / 8); /* clear rest */
1206 if (*maskp->bits == 0)
1207 return -EINVAL;
1208 return 0;
1209}
1210
1211/**
1212 * snd_pcm_hw_constraint_mask64 - apply the given bitmap mask constraint
1213 * @runtime: PCM runtime instance
1214 * @var: hw_params variable to apply the mask
1215 * @mask: the 64bit bitmap mask
1216 *
1217 * Apply the constraint of the given bitmap mask to a 64-bit mask parameter.
1218 *
1219 * Return: Zero if successful, or a negative error code on failure.
1220 */
1221int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1222 u_int64_t mask)
1223{
1224 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1225 struct snd_mask *maskp = constrs_mask(constrs, var);
1226 maskp->bits[0] &= (u_int32_t)mask;
1227 maskp->bits[1] &= (u_int32_t)(mask >> 32);
1228 memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
1229 if (! maskp->bits[0] && ! maskp->bits[1])
1230 return -EINVAL;
1231 return 0;
1232}
1233EXPORT_SYMBOL(snd_pcm_hw_constraint_mask64);
1234
1235/**
1236 * snd_pcm_hw_constraint_integer - apply an integer constraint to an interval
1237 * @runtime: PCM runtime instance
1238 * @var: hw_params variable to apply the integer constraint
1239 *
1240 * Apply the constraint of integer to an interval parameter.
1241 *
1242 * Return: Positive if the value is changed, zero if it's not changed, or a
1243 * negative error code.
1244 */
1245int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var)
1246{
1247 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1248 return snd_interval_setinteger(constrs_interval(constrs, var));
1249}
1250EXPORT_SYMBOL(snd_pcm_hw_constraint_integer);
1251
1252/**
1253 * snd_pcm_hw_constraint_minmax - apply a min/max range constraint to an interval
1254 * @runtime: PCM runtime instance
1255 * @var: hw_params variable to apply the range
1256 * @min: the minimal value
1257 * @max: the maximal value
1258 *
1259 * Apply the min/max range constraint to an interval parameter.
1260 *
1261 * Return: Positive if the value is changed, zero if it's not changed, or a
1262 * negative error code.
1263 */
1264int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var,
1265 unsigned int min, unsigned int max)
1266{
1267 struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
1268 struct snd_interval t;
1269 t.min = min;
1270 t.max = max;
1271 t.openmin = t.openmax = 0;
1272 t.integer = 0;
1273 return snd_interval_refine(constrs_interval(constrs, var), &t);
1274}
1275EXPORT_SYMBOL(snd_pcm_hw_constraint_minmax);
1276
1277static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
1278 struct snd_pcm_hw_rule *rule)
1279{
1280 struct snd_pcm_hw_constraint_list *list = rule->private;
1281 return snd_interval_list(hw_param_interval(params, rule->var), list->count, list->list, list->mask);
1282}
1283
1284
1285/**
1286 * snd_pcm_hw_constraint_list - apply a list of constraints to a parameter
1287 * @runtime: PCM runtime instance
1288 * @cond: condition bits
1289 * @var: hw_params variable to apply the list constraint
1290 * @l: list
1291 *
1292 * Apply the list of constraints to an interval parameter.
1293 *
1294 * Return: Zero if successful, or a negative error code on failure.
1295 */
1296int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
1297 unsigned int cond,
1298 snd_pcm_hw_param_t var,
1299 const struct snd_pcm_hw_constraint_list *l)
1300{
1301 return snd_pcm_hw_rule_add(runtime, cond, var,
1302 snd_pcm_hw_rule_list, (void *)l,
1303 var, -1);
1304}
1305EXPORT_SYMBOL(snd_pcm_hw_constraint_list);
1306
1307static int snd_pcm_hw_rule_ranges(struct snd_pcm_hw_params *params,
1308 struct snd_pcm_hw_rule *rule)
1309{
1310 struct snd_pcm_hw_constraint_ranges *r = rule->private;
1311 return snd_interval_ranges(hw_param_interval(params, rule->var),
1312 r->count, r->ranges, r->mask);
1313}
1314
1315
1316/**
1317 * snd_pcm_hw_constraint_ranges - apply list of range constraints to a parameter
1318 * @runtime: PCM runtime instance
1319 * @cond: condition bits
1320 * @var: hw_params variable to apply the list of range constraints
1321 * @r: ranges
1322 *
1323 * Apply the list of range constraints to an interval parameter.
1324 *
1325 * Return: Zero if successful, or a negative error code on failure.
1326 */
1327int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime,
1328 unsigned int cond,
1329 snd_pcm_hw_param_t var,
1330 const struct snd_pcm_hw_constraint_ranges *r)
1331{
1332 return snd_pcm_hw_rule_add(runtime, cond, var,
1333 snd_pcm_hw_rule_ranges, (void *)r,
1334 var, -1);
1335}
1336EXPORT_SYMBOL(snd_pcm_hw_constraint_ranges);
1337
1338static int snd_pcm_hw_rule_ratnums(struct snd_pcm_hw_params *params,
1339 struct snd_pcm_hw_rule *rule)
1340{
1341 const struct snd_pcm_hw_constraint_ratnums *r = rule->private;
1342 unsigned int num = 0, den = 0;
1343 int err;
1344 err = snd_interval_ratnum(hw_param_interval(params, rule->var),
1345 r->nrats, r->rats, &num, &den);
1346 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1347 params->rate_num = num;
1348 params->rate_den = den;
1349 }
1350 return err;
1351}
1352
1353/**
1354 * snd_pcm_hw_constraint_ratnums - apply ratnums constraint to a parameter
1355 * @runtime: PCM runtime instance
1356 * @cond: condition bits
1357 * @var: hw_params variable to apply the ratnums constraint
1358 * @r: struct snd_ratnums constriants
1359 *
1360 * Return: Zero if successful, or a negative error code on failure.
1361 */
1362int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
1363 unsigned int cond,
1364 snd_pcm_hw_param_t var,
1365 const struct snd_pcm_hw_constraint_ratnums *r)
1366{
1367 return snd_pcm_hw_rule_add(runtime, cond, var,
1368 snd_pcm_hw_rule_ratnums, (void *)r,
1369 var, -1);
1370}
1371EXPORT_SYMBOL(snd_pcm_hw_constraint_ratnums);
1372
1373static int snd_pcm_hw_rule_ratdens(struct snd_pcm_hw_params *params,
1374 struct snd_pcm_hw_rule *rule)
1375{
1376 const struct snd_pcm_hw_constraint_ratdens *r = rule->private;
1377 unsigned int num = 0, den = 0;
1378 int err = snd_interval_ratden(hw_param_interval(params, rule->var),
1379 r->nrats, r->rats, &num, &den);
1380 if (err >= 0 && den && rule->var == SNDRV_PCM_HW_PARAM_RATE) {
1381 params->rate_num = num;
1382 params->rate_den = den;
1383 }
1384 return err;
1385}
1386
1387/**
1388 * snd_pcm_hw_constraint_ratdens - apply ratdens constraint to a parameter
1389 * @runtime: PCM runtime instance
1390 * @cond: condition bits
1391 * @var: hw_params variable to apply the ratdens constraint
1392 * @r: struct snd_ratdens constriants
1393 *
1394 * Return: Zero if successful, or a negative error code on failure.
1395 */
1396int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime,
1397 unsigned int cond,
1398 snd_pcm_hw_param_t var,
1399 const struct snd_pcm_hw_constraint_ratdens *r)
1400{
1401 return snd_pcm_hw_rule_add(runtime, cond, var,
1402 snd_pcm_hw_rule_ratdens, (void *)r,
1403 var, -1);
1404}
1405EXPORT_SYMBOL(snd_pcm_hw_constraint_ratdens);
1406
1407static int snd_pcm_hw_rule_msbits(struct snd_pcm_hw_params *params,
1408 struct snd_pcm_hw_rule *rule)
1409{
1410 unsigned int l = (unsigned long) rule->private;
1411 int width = l & 0xffff;
1412 unsigned int msbits = l >> 16;
1413 const struct snd_interval *i =
1414 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
1415
1416 if (!snd_interval_single(i))
1417 return 0;
1418
1419 if ((snd_interval_value(i) == width) ||
1420 (width == 0 && snd_interval_value(i) > msbits))
1421 params->msbits = min_not_zero(params->msbits, msbits);
1422
1423 return 0;
1424}
1425
1426/**
1427 * snd_pcm_hw_constraint_msbits - add a hw constraint msbits rule
1428 * @runtime: PCM runtime instance
1429 * @cond: condition bits
1430 * @width: sample bits width
1431 * @msbits: msbits width
1432 *
1433 * This constraint will set the number of most significant bits (msbits) if a
1434 * sample format with the specified width has been select. If width is set to 0
1435 * the msbits will be set for any sample format with a width larger than the
1436 * specified msbits.
1437 *
1438 * Return: Zero if successful, or a negative error code on failure.
1439 */
1440int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime,
1441 unsigned int cond,
1442 unsigned int width,
1443 unsigned int msbits)
1444{
1445 unsigned long l = (msbits << 16) | width;
1446 return snd_pcm_hw_rule_add(runtime, cond, -1,
1447 snd_pcm_hw_rule_msbits,
1448 (void*) l,
1449 SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
1450}
1451EXPORT_SYMBOL(snd_pcm_hw_constraint_msbits);
1452
1453static int snd_pcm_hw_rule_step(struct snd_pcm_hw_params *params,
1454 struct snd_pcm_hw_rule *rule)
1455{
1456 unsigned long step = (unsigned long) rule->private;
1457 return snd_interval_step(hw_param_interval(params, rule->var), step);
1458}
1459
1460/**
1461 * snd_pcm_hw_constraint_step - add a hw constraint step rule
1462 * @runtime: PCM runtime instance
1463 * @cond: condition bits
1464 * @var: hw_params variable to apply the step constraint
1465 * @step: step size
1466 *
1467 * Return: Zero if successful, or a negative error code on failure.
1468 */
1469int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime,
1470 unsigned int cond,
1471 snd_pcm_hw_param_t var,
1472 unsigned long step)
1473{
1474 return snd_pcm_hw_rule_add(runtime, cond, var,
1475 snd_pcm_hw_rule_step, (void *) step,
1476 var, -1);
1477}
1478EXPORT_SYMBOL(snd_pcm_hw_constraint_step);
1479
1480static int snd_pcm_hw_rule_pow2(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule)
1481{
1482 static const unsigned int pow2_sizes[] = {
1483 1<<0, 1<<1, 1<<2, 1<<3, 1<<4, 1<<5, 1<<6, 1<<7,
1484 1<<8, 1<<9, 1<<10, 1<<11, 1<<12, 1<<13, 1<<14, 1<<15,
1485 1<<16, 1<<17, 1<<18, 1<<19, 1<<20, 1<<21, 1<<22, 1<<23,
1486 1<<24, 1<<25, 1<<26, 1<<27, 1<<28, 1<<29, 1<<30
1487 };
1488 return snd_interval_list(hw_param_interval(params, rule->var),
1489 ARRAY_SIZE(pow2_sizes), pow2_sizes, 0);
1490}
1491
1492/**
1493 * snd_pcm_hw_constraint_pow2 - add a hw constraint power-of-2 rule
1494 * @runtime: PCM runtime instance
1495 * @cond: condition bits
1496 * @var: hw_params variable to apply the power-of-2 constraint
1497 *
1498 * Return: Zero if successful, or a negative error code on failure.
1499 */
1500int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime,
1501 unsigned int cond,
1502 snd_pcm_hw_param_t var)
1503{
1504 return snd_pcm_hw_rule_add(runtime, cond, var,
1505 snd_pcm_hw_rule_pow2, NULL,
1506 var, -1);
1507}
1508EXPORT_SYMBOL(snd_pcm_hw_constraint_pow2);
1509
1510static int snd_pcm_hw_rule_noresample_func(struct snd_pcm_hw_params *params,
1511 struct snd_pcm_hw_rule *rule)
1512{
1513 unsigned int base_rate = (unsigned int)(uintptr_t)rule->private;
1514 struct snd_interval *rate;
1515
1516 rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
1517 return snd_interval_list(rate, 1, &base_rate, 0);
1518}
1519
1520/**
1521 * snd_pcm_hw_rule_noresample - add a rule to allow disabling hw resampling
1522 * @runtime: PCM runtime instance
1523 * @base_rate: the rate at which the hardware does not resample
1524 *
1525 * Return: Zero if successful, or a negative error code on failure.
1526 */
1527int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime,
1528 unsigned int base_rate)
1529{
1530 return snd_pcm_hw_rule_add(runtime, SNDRV_PCM_HW_PARAMS_NORESAMPLE,
1531 SNDRV_PCM_HW_PARAM_RATE,
1532 snd_pcm_hw_rule_noresample_func,
1533 (void *)(uintptr_t)base_rate,
1534 SNDRV_PCM_HW_PARAM_RATE, -1);
1535}
1536EXPORT_SYMBOL(snd_pcm_hw_rule_noresample);
1537
1538static void _snd_pcm_hw_param_any(struct snd_pcm_hw_params *params,
1539 snd_pcm_hw_param_t var)
1540{
1541 if (hw_is_mask(var)) {
1542 snd_mask_any(hw_param_mask(params, var));
1543 params->cmask |= 1 << var;
1544 params->rmask |= 1 << var;
1545 return;
1546 }
1547 if (hw_is_interval(var)) {
1548 snd_interval_any(hw_param_interval(params, var));
1549 params->cmask |= 1 << var;
1550 params->rmask |= 1 << var;
1551 return;
1552 }
1553 snd_BUG();
1554}
1555
1556void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params)
1557{
1558 unsigned int k;
1559 memset(params, 0, sizeof(*params));
1560 for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++)
1561 _snd_pcm_hw_param_any(params, k);
1562 for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
1563 _snd_pcm_hw_param_any(params, k);
1564 params->info = ~0U;
1565}
1566EXPORT_SYMBOL(_snd_pcm_hw_params_any);
1567
1568/**
1569 * snd_pcm_hw_param_value - return @params field @var value
1570 * @params: the hw_params instance
1571 * @var: parameter to retrieve
1572 * @dir: pointer to the direction (-1,0,1) or %NULL
1573 *
1574 * Return: The value for field @var if it's fixed in configuration space
1575 * defined by @params. -%EINVAL otherwise.
1576 */
1577int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params,
1578 snd_pcm_hw_param_t var, int *dir)
1579{
1580 if (hw_is_mask(var)) {
1581 const struct snd_mask *mask = hw_param_mask_c(params, var);
1582 if (!snd_mask_single(mask))
1583 return -EINVAL;
1584 if (dir)
1585 *dir = 0;
1586 return snd_mask_value(mask);
1587 }
1588 if (hw_is_interval(var)) {
1589 const struct snd_interval *i = hw_param_interval_c(params, var);
1590 if (!snd_interval_single(i))
1591 return -EINVAL;
1592 if (dir)
1593 *dir = i->openmin;
1594 return snd_interval_value(i);
1595 }
1596 return -EINVAL;
1597}
1598EXPORT_SYMBOL(snd_pcm_hw_param_value);
1599
1600void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params,
1601 snd_pcm_hw_param_t var)
1602{
1603 if (hw_is_mask(var)) {
1604 snd_mask_none(hw_param_mask(params, var));
1605 params->cmask |= 1 << var;
1606 params->rmask |= 1 << var;
1607 } else if (hw_is_interval(var)) {
1608 snd_interval_none(hw_param_interval(params, var));
1609 params->cmask |= 1 << var;
1610 params->rmask |= 1 << var;
1611 } else {
1612 snd_BUG();
1613 }
1614}
1615EXPORT_SYMBOL(_snd_pcm_hw_param_setempty);
1616
1617static int _snd_pcm_hw_param_first(struct snd_pcm_hw_params *params,
1618 snd_pcm_hw_param_t var)
1619{
1620 int changed;
1621 if (hw_is_mask(var))
1622 changed = snd_mask_refine_first(hw_param_mask(params, var));
1623 else if (hw_is_interval(var))
1624 changed = snd_interval_refine_first(hw_param_interval(params, var));
1625 else
1626 return -EINVAL;
1627 if (changed > 0) {
1628 params->cmask |= 1 << var;
1629 params->rmask |= 1 << var;
1630 }
1631 return changed;
1632}
1633
1634
1635/**
1636 * snd_pcm_hw_param_first - refine config space and return minimum value
1637 * @pcm: PCM instance
1638 * @params: the hw_params instance
1639 * @var: parameter to retrieve
1640 * @dir: pointer to the direction (-1,0,1) or %NULL
1641 *
1642 * Inside configuration space defined by @params remove from @var all
1643 * values > minimum. Reduce configuration space accordingly.
1644 *
1645 * Return: The minimum, or a negative error code on failure.
1646 */
1647int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
1648 struct snd_pcm_hw_params *params,
1649 snd_pcm_hw_param_t var, int *dir)
1650{
1651 int changed = _snd_pcm_hw_param_first(params, var);
1652 if (changed < 0)
1653 return changed;
1654 if (params->rmask) {
1655 int err = snd_pcm_hw_refine(pcm, params);
1656 if (err < 0)
1657 return err;
1658 }
1659 return snd_pcm_hw_param_value(params, var, dir);
1660}
1661EXPORT_SYMBOL(snd_pcm_hw_param_first);
1662
1663static int _snd_pcm_hw_param_last(struct snd_pcm_hw_params *params,
1664 snd_pcm_hw_param_t var)
1665{
1666 int changed;
1667 if (hw_is_mask(var))
1668 changed = snd_mask_refine_last(hw_param_mask(params, var));
1669 else if (hw_is_interval(var))
1670 changed = snd_interval_refine_last(hw_param_interval(params, var));
1671 else
1672 return -EINVAL;
1673 if (changed > 0) {
1674 params->cmask |= 1 << var;
1675 params->rmask |= 1 << var;
1676 }
1677 return changed;
1678}
1679
1680
1681/**
1682 * snd_pcm_hw_param_last - refine config space and return maximum value
1683 * @pcm: PCM instance
1684 * @params: the hw_params instance
1685 * @var: parameter to retrieve
1686 * @dir: pointer to the direction (-1,0,1) or %NULL
1687 *
1688 * Inside configuration space defined by @params remove from @var all
1689 * values < maximum. Reduce configuration space accordingly.
1690 *
1691 * Return: The maximum, or a negative error code on failure.
1692 */
1693int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
1694 struct snd_pcm_hw_params *params,
1695 snd_pcm_hw_param_t var, int *dir)
1696{
1697 int changed = _snd_pcm_hw_param_last(params, var);
1698 if (changed < 0)
1699 return changed;
1700 if (params->rmask) {
1701 int err = snd_pcm_hw_refine(pcm, params);
1702 if (err < 0)
1703 return err;
1704 }
1705 return snd_pcm_hw_param_value(params, var, dir);
1706}
1707EXPORT_SYMBOL(snd_pcm_hw_param_last);
1708
1709/**
1710 * snd_pcm_hw_params_bits - Get the number of bits per the sample.
1711 * @p: hardware parameters
1712 *
1713 * Return: The number of bits per sample based on the format,
1714 * subformat and msbits the specified hw params has.
1715 */
1716int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p)
1717{
1718 snd_pcm_subformat_t subformat = params_subformat(p);
1719 snd_pcm_format_t format = params_format(p);
1720
1721 switch (format) {
1722 case SNDRV_PCM_FORMAT_S32_LE:
1723 case SNDRV_PCM_FORMAT_U32_LE:
1724 case SNDRV_PCM_FORMAT_S32_BE:
1725 case SNDRV_PCM_FORMAT_U32_BE:
1726 switch (subformat) {
1727 case SNDRV_PCM_SUBFORMAT_MSBITS_20:
1728 return 20;
1729 case SNDRV_PCM_SUBFORMAT_MSBITS_24:
1730 return 24;
1731 case SNDRV_PCM_SUBFORMAT_MSBITS_MAX:
1732 case SNDRV_PCM_SUBFORMAT_STD:
1733 default:
1734 break;
1735 }
1736 fallthrough;
1737 default:
1738 return snd_pcm_format_width(format);
1739 }
1740}
1741EXPORT_SYMBOL(snd_pcm_hw_params_bits);
1742
1743static int snd_pcm_lib_ioctl_reset(struct snd_pcm_substream *substream,
1744 void *arg)
1745{
1746 struct snd_pcm_runtime *runtime = substream->runtime;
1747 unsigned long flags;
1748 snd_pcm_stream_lock_irqsave(substream, flags);
1749 if (snd_pcm_running(substream) &&
1750 snd_pcm_update_hw_ptr(substream) >= 0)
1751 runtime->status->hw_ptr %= runtime->buffer_size;
1752 else {
1753 runtime->status->hw_ptr = 0;
1754 runtime->hw_ptr_wrap = 0;
1755 }
1756 snd_pcm_stream_unlock_irqrestore(substream, flags);
1757 return 0;
1758}
1759
1760static int snd_pcm_lib_ioctl_channel_info(struct snd_pcm_substream *substream,
1761 void *arg)
1762{
1763 struct snd_pcm_channel_info *info = arg;
1764 struct snd_pcm_runtime *runtime = substream->runtime;
1765 int width;
1766 if (!(runtime->info & SNDRV_PCM_INFO_MMAP)) {
1767 info->offset = -1;
1768 return 0;
1769 }
1770 width = snd_pcm_format_physical_width(runtime->format);
1771 if (width < 0)
1772 return width;
1773 info->offset = 0;
1774 switch (runtime->access) {
1775 case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
1776 case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
1777 info->first = info->channel * width;
1778 info->step = runtime->channels * width;
1779 break;
1780 case SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED:
1781 case SNDRV_PCM_ACCESS_RW_NONINTERLEAVED:
1782 {
1783 size_t size = runtime->dma_bytes / runtime->channels;
1784 info->first = info->channel * size * 8;
1785 info->step = width;
1786 break;
1787 }
1788 default:
1789 snd_BUG();
1790 break;
1791 }
1792 return 0;
1793}
1794
1795static int snd_pcm_lib_ioctl_fifo_size(struct snd_pcm_substream *substream,
1796 void *arg)
1797{
1798 struct snd_pcm_hw_params *params = arg;
1799 snd_pcm_format_t format;
1800 int channels;
1801 ssize_t frame_size;
1802
1803 params->fifo_size = substream->runtime->hw.fifo_size;
1804 if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_FIFO_IN_FRAMES)) {
1805 format = params_format(params);
1806 channels = params_channels(params);
1807 frame_size = snd_pcm_format_size(format, channels);
1808 if (frame_size > 0)
1809 params->fifo_size /= frame_size;
1810 }
1811 return 0;
1812}
1813
1814/**
1815 * snd_pcm_lib_ioctl - a generic PCM ioctl callback
1816 * @substream: the pcm substream instance
1817 * @cmd: ioctl command
1818 * @arg: ioctl argument
1819 *
1820 * Processes the generic ioctl commands for PCM.
1821 * Can be passed as the ioctl callback for PCM ops.
1822 *
1823 * Return: Zero if successful, or a negative error code on failure.
1824 */
1825int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
1826 unsigned int cmd, void *arg)
1827{
1828 switch (cmd) {
1829 case SNDRV_PCM_IOCTL1_RESET:
1830 return snd_pcm_lib_ioctl_reset(substream, arg);
1831 case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
1832 return snd_pcm_lib_ioctl_channel_info(substream, arg);
1833 case SNDRV_PCM_IOCTL1_FIFO_SIZE:
1834 return snd_pcm_lib_ioctl_fifo_size(substream, arg);
1835 }
1836 return -ENXIO;
1837}
1838EXPORT_SYMBOL(snd_pcm_lib_ioctl);
1839
1840/**
1841 * snd_pcm_period_elapsed_under_stream_lock() - update the status of runtime for the next period
1842 * under acquired lock of PCM substream.
1843 * @substream: the instance of pcm substream.
1844 *
1845 * This function is called when the batch of audio data frames as the same size as the period of
1846 * buffer is already processed in audio data transmission.
1847 *
1848 * The call of function updates the status of runtime with the latest position of audio data
1849 * transmission, checks overrun and underrun over buffer, awaken user processes from waiting for
1850 * available audio data frames, sampling audio timestamp, and performs stop or drain the PCM
1851 * substream according to configured threshold.
1852 *
1853 * The function is intended to use for the case that PCM driver operates audio data frames under
1854 * acquired lock of PCM substream; e.g. in callback of any operation of &snd_pcm_ops in process
1855 * context. In any interrupt context, it's preferrable to use ``snd_pcm_period_elapsed()`` instead
1856 * since lock of PCM substream should be acquired in advance.
1857 *
1858 * Developer should pay enough attention that some callbacks in &snd_pcm_ops are done by the call of
1859 * function:
1860 *
1861 * - .pointer - to retrieve current position of audio data transmission by frame count or XRUN state.
1862 * - .trigger - with SNDRV_PCM_TRIGGER_STOP at XRUN or DRAINING state.
1863 * - .get_time_info - to retrieve audio time stamp if needed.
1864 *
1865 * Even if more than one periods have elapsed since the last call, you have to call this only once.
1866 */
1867void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream)
1868{
1869 struct snd_pcm_runtime *runtime;
1870
1871 if (PCM_RUNTIME_CHECK(substream))
1872 return;
1873 runtime = substream->runtime;
1874
1875 if (!snd_pcm_running(substream) ||
1876 snd_pcm_update_hw_ptr0(substream, 1) < 0)
1877 goto _end;
1878
1879#ifdef CONFIG_SND_PCM_TIMER
1880 if (substream->timer_running)
1881 snd_timer_interrupt(substream->timer, 1);
1882#endif
1883 _end:
1884 snd_kill_fasync(runtime->fasync, SIGIO, POLL_IN);
1885}
1886EXPORT_SYMBOL(snd_pcm_period_elapsed_under_stream_lock);
1887
1888/**
1889 * snd_pcm_period_elapsed() - update the status of runtime for the next period by acquiring lock of
1890 * PCM substream.
1891 * @substream: the instance of PCM substream.
1892 *
1893 * This function is mostly similar to ``snd_pcm_period_elapsed_under_stream_lock()`` except for
1894 * acquiring lock of PCM substream voluntarily.
1895 *
1896 * It's typically called by any type of IRQ handler when hardware IRQ occurs to notify event that
1897 * the batch of audio data frames as the same size as the period of buffer is already processed in
1898 * audio data transmission.
1899 */
1900void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
1901{
1902 unsigned long flags;
1903
1904 if (snd_BUG_ON(!substream))
1905 return;
1906
1907 snd_pcm_stream_lock_irqsave(substream, flags);
1908 snd_pcm_period_elapsed_under_stream_lock(substream);
1909 snd_pcm_stream_unlock_irqrestore(substream, flags);
1910}
1911EXPORT_SYMBOL(snd_pcm_period_elapsed);
1912
1913/*
1914 * Wait until avail_min data becomes available
1915 * Returns a negative error code if any error occurs during operation.
1916 * The available space is stored on availp. When err = 0 and avail = 0
1917 * on the capture stream, it indicates the stream is in DRAINING state.
1918 */
1919static int wait_for_avail(struct snd_pcm_substream *substream,
1920 snd_pcm_uframes_t *availp)
1921{
1922 struct snd_pcm_runtime *runtime = substream->runtime;
1923 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
1924 wait_queue_entry_t wait;
1925 int err = 0;
1926 snd_pcm_uframes_t avail = 0;
1927 long wait_time, tout;
1928
1929 init_waitqueue_entry(&wait, current);
1930 set_current_state(TASK_INTERRUPTIBLE);
1931 add_wait_queue(&runtime->tsleep, &wait);
1932
1933 if (runtime->no_period_wakeup)
1934 wait_time = MAX_SCHEDULE_TIMEOUT;
1935 else {
1936 /* use wait time from substream if available */
1937 if (substream->wait_time) {
1938 wait_time = substream->wait_time;
1939 } else {
1940 wait_time = 100;
1941
1942 if (runtime->rate) {
1943 long t = runtime->buffer_size * 1100 / runtime->rate;
1944 wait_time = max(t, wait_time);
1945 }
1946 }
1947 wait_time = msecs_to_jiffies(wait_time);
1948 }
1949
1950 for (;;) {
1951 if (signal_pending(current)) {
1952 err = -ERESTARTSYS;
1953 break;
1954 }
1955
1956 /*
1957 * We need to check if space became available already
1958 * (and thus the wakeup happened already) first to close
1959 * the race of space already having become available.
1960 * This check must happen after been added to the waitqueue
1961 * and having current state be INTERRUPTIBLE.
1962 */
1963 avail = snd_pcm_avail(substream);
1964 if (avail >= runtime->twake)
1965 break;
1966 snd_pcm_stream_unlock_irq(substream);
1967
1968 tout = schedule_timeout(wait_time);
1969
1970 snd_pcm_stream_lock_irq(substream);
1971 set_current_state(TASK_INTERRUPTIBLE);
1972 switch (runtime->state) {
1973 case SNDRV_PCM_STATE_SUSPENDED:
1974 err = -ESTRPIPE;
1975 goto _endloop;
1976 case SNDRV_PCM_STATE_XRUN:
1977 err = -EPIPE;
1978 goto _endloop;
1979 case SNDRV_PCM_STATE_DRAINING:
1980 if (is_playback)
1981 err = -EPIPE;
1982 else
1983 avail = 0; /* indicate draining */
1984 goto _endloop;
1985 case SNDRV_PCM_STATE_OPEN:
1986 case SNDRV_PCM_STATE_SETUP:
1987 case SNDRV_PCM_STATE_DISCONNECTED:
1988 err = -EBADFD;
1989 goto _endloop;
1990 case SNDRV_PCM_STATE_PAUSED:
1991 continue;
1992 }
1993 if (!tout) {
1994 pcm_dbg(substream->pcm,
1995 "%s timeout (DMA or IRQ trouble?)\n",
1996 is_playback ? "playback write" : "capture read");
1997 err = -EIO;
1998 break;
1999 }
2000 }
2001 _endloop:
2002 set_current_state(TASK_RUNNING);
2003 remove_wait_queue(&runtime->tsleep, &wait);
2004 *availp = avail;
2005 return err;
2006}
2007
2008typedef int (*pcm_transfer_f)(struct snd_pcm_substream *substream,
2009 int channel, unsigned long hwoff,
2010 struct iov_iter *iter, unsigned long bytes);
2011
2012typedef int (*pcm_copy_f)(struct snd_pcm_substream *, snd_pcm_uframes_t, void *,
2013 snd_pcm_uframes_t, snd_pcm_uframes_t, pcm_transfer_f,
2014 bool);
2015
2016/* calculate the target DMA-buffer position to be written/read */
2017static void *get_dma_ptr(struct snd_pcm_runtime *runtime,
2018 int channel, unsigned long hwoff)
2019{
2020 return runtime->dma_area + hwoff +
2021 channel * (runtime->dma_bytes / runtime->channels);
2022}
2023
2024/* default copy ops for write; used for both interleaved and non- modes */
2025static int default_write_copy(struct snd_pcm_substream *substream,
2026 int channel, unsigned long hwoff,
2027 struct iov_iter *iter, unsigned long bytes)
2028{
2029 if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2030 bytes, iter) != bytes)
2031 return -EFAULT;
2032 return 0;
2033}
2034
2035/* fill silence instead of copy data; called as a transfer helper
2036 * from __snd_pcm_lib_write() or directly from noninterleaved_copy() when
2037 * a NULL buffer is passed
2038 */
2039static int fill_silence(struct snd_pcm_substream *substream, int channel,
2040 unsigned long hwoff, struct iov_iter *iter,
2041 unsigned long bytes)
2042{
2043 struct snd_pcm_runtime *runtime = substream->runtime;
2044
2045 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
2046 return 0;
2047 if (substream->ops->fill_silence)
2048 return substream->ops->fill_silence(substream, channel,
2049 hwoff, bytes);
2050
2051 snd_pcm_format_set_silence(runtime->format,
2052 get_dma_ptr(runtime, channel, hwoff),
2053 bytes_to_samples(runtime, bytes));
2054 return 0;
2055}
2056
2057/* default copy ops for read; used for both interleaved and non- modes */
2058static int default_read_copy(struct snd_pcm_substream *substream,
2059 int channel, unsigned long hwoff,
2060 struct iov_iter *iter, unsigned long bytes)
2061{
2062 if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
2063 bytes, iter) != bytes)
2064 return -EFAULT;
2065 return 0;
2066}
2067
2068/* call transfer with the filled iov_iter */
2069static int do_transfer(struct snd_pcm_substream *substream, int c,
2070 unsigned long hwoff, void *data, unsigned long bytes,
2071 pcm_transfer_f transfer, bool in_kernel)
2072{
2073 struct iov_iter iter;
2074 int err, type;
2075
2076 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
2077 type = ITER_SOURCE;
2078 else
2079 type = ITER_DEST;
2080
2081 if (in_kernel) {
2082 struct kvec kvec = { data, bytes };
2083
2084 iov_iter_kvec(&iter, type, &kvec, 1, bytes);
2085 return transfer(substream, c, hwoff, &iter, bytes);
2086 }
2087
2088 err = import_ubuf(type, (__force void __user *)data, bytes, &iter);
2089 if (err)
2090 return err;
2091 return transfer(substream, c, hwoff, &iter, bytes);
2092}
2093
2094/* call transfer function with the converted pointers and sizes;
2095 * for interleaved mode, it's one shot for all samples
2096 */
2097static int interleaved_copy(struct snd_pcm_substream *substream,
2098 snd_pcm_uframes_t hwoff, void *data,
2099 snd_pcm_uframes_t off,
2100 snd_pcm_uframes_t frames,
2101 pcm_transfer_f transfer,
2102 bool in_kernel)
2103{
2104 struct snd_pcm_runtime *runtime = substream->runtime;
2105
2106 /* convert to bytes */
2107 hwoff = frames_to_bytes(runtime, hwoff);
2108 off = frames_to_bytes(runtime, off);
2109 frames = frames_to_bytes(runtime, frames);
2110
2111 return do_transfer(substream, 0, hwoff, data + off, frames, transfer,
2112 in_kernel);
2113}
2114
2115/* call transfer function with the converted pointers and sizes for each
2116 * non-interleaved channel; when buffer is NULL, silencing instead of copying
2117 */
2118static int noninterleaved_copy(struct snd_pcm_substream *substream,
2119 snd_pcm_uframes_t hwoff, void *data,
2120 snd_pcm_uframes_t off,
2121 snd_pcm_uframes_t frames,
2122 pcm_transfer_f transfer,
2123 bool in_kernel)
2124{
2125 struct snd_pcm_runtime *runtime = substream->runtime;
2126 int channels = runtime->channels;
2127 void **bufs = data;
2128 int c, err;
2129
2130 /* convert to bytes; note that it's not frames_to_bytes() here.
2131 * in non-interleaved mode, we copy for each channel, thus
2132 * each copy is n_samples bytes x channels = whole frames.
2133 */
2134 off = samples_to_bytes(runtime, off);
2135 frames = samples_to_bytes(runtime, frames);
2136 hwoff = samples_to_bytes(runtime, hwoff);
2137 for (c = 0; c < channels; ++c, ++bufs) {
2138 if (!data || !*bufs)
2139 err = fill_silence(substream, c, hwoff, NULL, frames);
2140 else
2141 err = do_transfer(substream, c, hwoff, *bufs + off,
2142 frames, transfer, in_kernel);
2143 if (err < 0)
2144 return err;
2145 }
2146 return 0;
2147}
2148
2149/* fill silence on the given buffer position;
2150 * called from snd_pcm_playback_silence()
2151 */
2152static int fill_silence_frames(struct snd_pcm_substream *substream,
2153 snd_pcm_uframes_t off, snd_pcm_uframes_t frames)
2154{
2155 if (substream->runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
2156 substream->runtime->access == SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
2157 return interleaved_copy(substream, off, NULL, 0, frames,
2158 fill_silence, true);
2159 else
2160 return noninterleaved_copy(substream, off, NULL, 0, frames,
2161 fill_silence, true);
2162}
2163
2164/* sanity-check for read/write methods */
2165static int pcm_sanity_check(struct snd_pcm_substream *substream)
2166{
2167 struct snd_pcm_runtime *runtime;
2168 if (PCM_RUNTIME_CHECK(substream))
2169 return -ENXIO;
2170 runtime = substream->runtime;
2171 if (snd_BUG_ON(!substream->ops->copy && !runtime->dma_area))
2172 return -EINVAL;
2173 if (runtime->state == SNDRV_PCM_STATE_OPEN)
2174 return -EBADFD;
2175 return 0;
2176}
2177
2178static int pcm_accessible_state(struct snd_pcm_runtime *runtime)
2179{
2180 switch (runtime->state) {
2181 case SNDRV_PCM_STATE_PREPARED:
2182 case SNDRV_PCM_STATE_RUNNING:
2183 case SNDRV_PCM_STATE_PAUSED:
2184 return 0;
2185 case SNDRV_PCM_STATE_XRUN:
2186 return -EPIPE;
2187 case SNDRV_PCM_STATE_SUSPENDED:
2188 return -ESTRPIPE;
2189 default:
2190 return -EBADFD;
2191 }
2192}
2193
2194/* update to the given appl_ptr and call ack callback if needed;
2195 * when an error is returned, take back to the original value
2196 */
2197int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream,
2198 snd_pcm_uframes_t appl_ptr)
2199{
2200 struct snd_pcm_runtime *runtime = substream->runtime;
2201 snd_pcm_uframes_t old_appl_ptr = runtime->control->appl_ptr;
2202 snd_pcm_sframes_t diff;
2203 int ret;
2204
2205 if (old_appl_ptr == appl_ptr)
2206 return 0;
2207
2208 if (appl_ptr >= runtime->boundary)
2209 return -EINVAL;
2210 /*
2211 * check if a rewind is requested by the application
2212 */
2213 if (substream->runtime->info & SNDRV_PCM_INFO_NO_REWINDS) {
2214 diff = appl_ptr - old_appl_ptr;
2215 if (diff >= 0) {
2216 if (diff > runtime->buffer_size)
2217 return -EINVAL;
2218 } else {
2219 if (runtime->boundary + diff > runtime->buffer_size)
2220 return -EINVAL;
2221 }
2222 }
2223
2224 runtime->control->appl_ptr = appl_ptr;
2225 if (substream->ops->ack) {
2226 ret = substream->ops->ack(substream);
2227 if (ret < 0) {
2228 runtime->control->appl_ptr = old_appl_ptr;
2229 if (ret == -EPIPE)
2230 __snd_pcm_xrun(substream);
2231 return ret;
2232 }
2233 }
2234
2235 trace_applptr(substream, old_appl_ptr, appl_ptr);
2236
2237 return 0;
2238}
2239
2240/* the common loop for read/write data */
2241snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
2242 void *data, bool interleaved,
2243 snd_pcm_uframes_t size, bool in_kernel)
2244{
2245 struct snd_pcm_runtime *runtime = substream->runtime;
2246 snd_pcm_uframes_t xfer = 0;
2247 snd_pcm_uframes_t offset = 0;
2248 snd_pcm_uframes_t avail;
2249 pcm_copy_f writer;
2250 pcm_transfer_f transfer;
2251 bool nonblock;
2252 bool is_playback;
2253 int err;
2254
2255 err = pcm_sanity_check(substream);
2256 if (err < 0)
2257 return err;
2258
2259 is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
2260 if (interleaved) {
2261 if (runtime->access != SNDRV_PCM_ACCESS_RW_INTERLEAVED &&
2262 runtime->channels > 1)
2263 return -EINVAL;
2264 writer = interleaved_copy;
2265 } else {
2266 if (runtime->access != SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
2267 return -EINVAL;
2268 writer = noninterleaved_copy;
2269 }
2270
2271 if (!data) {
2272 if (is_playback)
2273 transfer = fill_silence;
2274 else
2275 return -EINVAL;
2276 } else {
2277 if (substream->ops->copy)
2278 transfer = substream->ops->copy;
2279 else
2280 transfer = is_playback ?
2281 default_write_copy : default_read_copy;
2282 }
2283
2284 if (size == 0)
2285 return 0;
2286
2287 nonblock = !!(substream->f_flags & O_NONBLOCK);
2288
2289 snd_pcm_stream_lock_irq(substream);
2290 err = pcm_accessible_state(runtime);
2291 if (err < 0)
2292 goto _end_unlock;
2293
2294 runtime->twake = runtime->control->avail_min ? : 1;
2295 if (runtime->state == SNDRV_PCM_STATE_RUNNING)
2296 snd_pcm_update_hw_ptr(substream);
2297
2298 /*
2299 * If size < start_threshold, wait indefinitely. Another
2300 * thread may start capture
2301 */
2302 if (!is_playback &&
2303 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2304 size >= runtime->start_threshold) {
2305 err = snd_pcm_start(substream);
2306 if (err < 0)
2307 goto _end_unlock;
2308 }
2309
2310 avail = snd_pcm_avail(substream);
2311
2312 while (size > 0) {
2313 snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
2314 snd_pcm_uframes_t cont;
2315 if (!avail) {
2316 if (!is_playback &&
2317 runtime->state == SNDRV_PCM_STATE_DRAINING) {
2318 snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
2319 goto _end_unlock;
2320 }
2321 if (nonblock) {
2322 err = -EAGAIN;
2323 goto _end_unlock;
2324 }
2325 runtime->twake = min_t(snd_pcm_uframes_t, size,
2326 runtime->control->avail_min ? : 1);
2327 err = wait_for_avail(substream, &avail);
2328 if (err < 0)
2329 goto _end_unlock;
2330 if (!avail)
2331 continue; /* draining */
2332 }
2333 frames = size > avail ? avail : size;
2334 appl_ptr = READ_ONCE(runtime->control->appl_ptr);
2335 appl_ofs = appl_ptr % runtime->buffer_size;
2336 cont = runtime->buffer_size - appl_ofs;
2337 if (frames > cont)
2338 frames = cont;
2339 if (snd_BUG_ON(!frames)) {
2340 err = -EINVAL;
2341 goto _end_unlock;
2342 }
2343 if (!atomic_inc_unless_negative(&runtime->buffer_accessing)) {
2344 err = -EBUSY;
2345 goto _end_unlock;
2346 }
2347 snd_pcm_stream_unlock_irq(substream);
2348 if (!is_playback)
2349 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
2350 err = writer(substream, appl_ofs, data, offset, frames,
2351 transfer, in_kernel);
2352 if (is_playback)
2353 snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
2354 snd_pcm_stream_lock_irq(substream);
2355 atomic_dec(&runtime->buffer_accessing);
2356 if (err < 0)
2357 goto _end_unlock;
2358 err = pcm_accessible_state(runtime);
2359 if (err < 0)
2360 goto _end_unlock;
2361 appl_ptr += frames;
2362 if (appl_ptr >= runtime->boundary)
2363 appl_ptr -= runtime->boundary;
2364 err = pcm_lib_apply_appl_ptr(substream, appl_ptr);
2365 if (err < 0)
2366 goto _end_unlock;
2367
2368 offset += frames;
2369 size -= frames;
2370 xfer += frames;
2371 avail -= frames;
2372 if (is_playback &&
2373 runtime->state == SNDRV_PCM_STATE_PREPARED &&
2374 snd_pcm_playback_hw_avail(runtime) >= (snd_pcm_sframes_t)runtime->start_threshold) {
2375 err = snd_pcm_start(substream);
2376 if (err < 0)
2377 goto _end_unlock;
2378 }
2379 }
2380 _end_unlock:
2381 runtime->twake = 0;
2382 if (xfer > 0 && err >= 0)
2383 snd_pcm_update_state(substream, runtime);
2384 snd_pcm_stream_unlock_irq(substream);
2385 return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
2386}
2387EXPORT_SYMBOL(__snd_pcm_lib_xfer);
2388
2389/*
2390 * standard channel mapping helpers
2391 */
2392
2393/* default channel maps for multi-channel playbacks, up to 8 channels */
2394const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[] = {
2395 { .channels = 1,
2396 .map = { SNDRV_CHMAP_MONO } },
2397 { .channels = 2,
2398 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2399 { .channels = 4,
2400 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2401 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2402 { .channels = 6,
2403 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2404 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2405 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } },
2406 { .channels = 8,
2407 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2408 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2409 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2410 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2411 { }
2412};
2413EXPORT_SYMBOL_GPL(snd_pcm_std_chmaps);
2414
2415/* alternative channel maps with CLFE <-> surround swapped for 6/8 channels */
2416const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[] = {
2417 { .channels = 1,
2418 .map = { SNDRV_CHMAP_MONO } },
2419 { .channels = 2,
2420 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
2421 { .channels = 4,
2422 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2423 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2424 { .channels = 6,
2425 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2426 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2427 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } },
2428 { .channels = 8,
2429 .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
2430 SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE,
2431 SNDRV_CHMAP_RL, SNDRV_CHMAP_RR,
2432 SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } },
2433 { }
2434};
2435EXPORT_SYMBOL_GPL(snd_pcm_alt_chmaps);
2436
2437static bool valid_chmap_channels(const struct snd_pcm_chmap *info, int ch)
2438{
2439 if (ch > info->max_channels)
2440 return false;
2441 return !info->channel_mask || (info->channel_mask & (1U << ch));
2442}
2443
2444static int pcm_chmap_ctl_info(struct snd_kcontrol *kcontrol,
2445 struct snd_ctl_elem_info *uinfo)
2446{
2447 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2448
2449 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2450 uinfo->count = info->max_channels;
2451 uinfo->value.integer.min = 0;
2452 uinfo->value.integer.max = SNDRV_CHMAP_LAST;
2453 return 0;
2454}
2455
2456/* get callback for channel map ctl element
2457 * stores the channel position firstly matching with the current channels
2458 */
2459static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol,
2460 struct snd_ctl_elem_value *ucontrol)
2461{
2462 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2463 unsigned int idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
2464 struct snd_pcm_substream *substream;
2465 const struct snd_pcm_chmap_elem *map;
2466
2467 if (!info->chmap)
2468 return -EINVAL;
2469 substream = snd_pcm_chmap_substream(info, idx);
2470 if (!substream)
2471 return -ENODEV;
2472 memset(ucontrol->value.integer.value, 0,
2473 sizeof(long) * info->max_channels);
2474 if (!substream->runtime)
2475 return 0; /* no channels set */
2476 for (map = info->chmap; map->channels; map++) {
2477 int i;
2478 if (map->channels == substream->runtime->channels &&
2479 valid_chmap_channels(info, map->channels)) {
2480 for (i = 0; i < map->channels; i++)
2481 ucontrol->value.integer.value[i] = map->map[i];
2482 return 0;
2483 }
2484 }
2485 return -EINVAL;
2486}
2487
2488/* tlv callback for channel map ctl element
2489 * expands the pre-defined channel maps in a form of TLV
2490 */
2491static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
2492 unsigned int size, unsigned int __user *tlv)
2493{
2494 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2495 const struct snd_pcm_chmap_elem *map;
2496 unsigned int __user *dst;
2497 int c, count = 0;
2498
2499 if (!info->chmap)
2500 return -EINVAL;
2501 if (size < 8)
2502 return -ENOMEM;
2503 if (put_user(SNDRV_CTL_TLVT_CONTAINER, tlv))
2504 return -EFAULT;
2505 size -= 8;
2506 dst = tlv + 2;
2507 for (map = info->chmap; map->channels; map++) {
2508 int chs_bytes = map->channels * 4;
2509 if (!valid_chmap_channels(info, map->channels))
2510 continue;
2511 if (size < 8)
2512 return -ENOMEM;
2513 if (put_user(SNDRV_CTL_TLVT_CHMAP_FIXED, dst) ||
2514 put_user(chs_bytes, dst + 1))
2515 return -EFAULT;
2516 dst += 2;
2517 size -= 8;
2518 count += 8;
2519 if (size < chs_bytes)
2520 return -ENOMEM;
2521 size -= chs_bytes;
2522 count += chs_bytes;
2523 for (c = 0; c < map->channels; c++) {
2524 if (put_user(map->map[c], dst))
2525 return -EFAULT;
2526 dst++;
2527 }
2528 }
2529 if (put_user(count, tlv + 1))
2530 return -EFAULT;
2531 return 0;
2532}
2533
2534static void pcm_chmap_ctl_private_free(struct snd_kcontrol *kcontrol)
2535{
2536 struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
2537 info->pcm->streams[info->stream].chmap_kctl = NULL;
2538 kfree(info);
2539}
2540
2541/**
2542 * snd_pcm_add_chmap_ctls - create channel-mapping control elements
2543 * @pcm: the assigned PCM instance
2544 * @stream: stream direction
2545 * @chmap: channel map elements (for query)
2546 * @max_channels: the max number of channels for the stream
2547 * @private_value: the value passed to each kcontrol's private_value field
2548 * @info_ret: store struct snd_pcm_chmap instance if non-NULL
2549 *
2550 * Create channel-mapping control elements assigned to the given PCM stream(s).
2551 * Return: Zero if successful, or a negative error value.
2552 */
2553int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream,
2554 const struct snd_pcm_chmap_elem *chmap,
2555 int max_channels,
2556 unsigned long private_value,
2557 struct snd_pcm_chmap **info_ret)
2558{
2559 struct snd_pcm_chmap *info;
2560 struct snd_kcontrol_new knew = {
2561 .iface = SNDRV_CTL_ELEM_IFACE_PCM,
2562 .access = SNDRV_CTL_ELEM_ACCESS_READ |
2563 SNDRV_CTL_ELEM_ACCESS_TLV_READ |
2564 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
2565 .info = pcm_chmap_ctl_info,
2566 .get = pcm_chmap_ctl_get,
2567 .tlv.c = pcm_chmap_ctl_tlv,
2568 };
2569 int err;
2570
2571 if (WARN_ON(pcm->streams[stream].chmap_kctl))
2572 return -EBUSY;
2573 info = kzalloc(sizeof(*info), GFP_KERNEL);
2574 if (!info)
2575 return -ENOMEM;
2576 info->pcm = pcm;
2577 info->stream = stream;
2578 info->chmap = chmap;
2579 info->max_channels = max_channels;
2580 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
2581 knew.name = "Playback Channel Map";
2582 else
2583 knew.name = "Capture Channel Map";
2584 knew.device = pcm->device;
2585 knew.count = pcm->streams[stream].substream_count;
2586 knew.private_value = private_value;
2587 info->kctl = snd_ctl_new1(&knew, info);
2588 if (!info->kctl) {
2589 kfree(info);
2590 return -ENOMEM;
2591 }
2592 info->kctl->private_free = pcm_chmap_ctl_private_free;
2593 err = snd_ctl_add(pcm->card, info->kctl);
2594 if (err < 0)
2595 return err;
2596 pcm->streams[stream].chmap_kctl = info->kctl;
2597 if (info_ret)
2598 *info_ret = info;
2599 return 0;
2600}
2601EXPORT_SYMBOL_GPL(snd_pcm_add_chmap_ctls);