Loading...
1/*
2 * Copyright (C) 2013, Analog Devices Inc.
3 * Author: Lars-Peter Clausen <lars@metafoo.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * You should have received a copy of the GNU General Public License along
11 * with this program; if not, write to the Free Software Foundation, Inc.,
12 * 675 Mass Ave, Cambridge, MA 02139, USA.
13 *
14 */
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/dmaengine.h>
18#include <linux/slab.h>
19#include <sound/pcm.h>
20#include <sound/pcm_params.h>
21#include <sound/soc.h>
22#include <linux/dma-mapping.h>
23#include <linux/of.h>
24
25#include <sound/dmaengine_pcm.h>
26
27/*
28 * The platforms dmaengine driver does not support reporting the amount of
29 * bytes that are still left to transfer.
30 */
31#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
32
33struct dmaengine_pcm {
34 struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
35 const struct snd_dmaengine_pcm_config *config;
36 struct snd_soc_platform platform;
37 unsigned int flags;
38};
39
40static struct dmaengine_pcm *soc_platform_to_pcm(struct snd_soc_platform *p)
41{
42 return container_of(p, struct dmaengine_pcm, platform);
43}
44
45static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
46 struct snd_pcm_substream *substream)
47{
48 if (!pcm->chan[substream->stream])
49 return NULL;
50
51 return pcm->chan[substream->stream]->device->dev;
52}
53
54/**
55 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
56 * @substream: PCM substream
57 * @params: hw_params
58 * @slave_config: DMA slave config to prepare
59 *
60 * This function can be used as a generic prepare_slave_config callback for
61 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
62 * DAI DMA data. Internally the function will first call
63 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
64 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
65 * remaining fields based on the DAI DMA data.
66 */
67int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
68 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
69{
70 struct snd_soc_pcm_runtime *rtd = substream->private_data;
71 struct snd_dmaengine_dai_dma_data *dma_data;
72 int ret;
73
74 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
75
76 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
77 if (ret)
78 return ret;
79
80 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
81 slave_config);
82
83 return 0;
84}
85EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
86
87static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
88 struct snd_pcm_hw_params *params)
89{
90 struct snd_soc_pcm_runtime *rtd = substream->private_data;
91 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
92 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
93 int (*prepare_slave_config)(struct snd_pcm_substream *substream,
94 struct snd_pcm_hw_params *params,
95 struct dma_slave_config *slave_config);
96 struct dma_slave_config slave_config;
97 int ret;
98
99 memset(&slave_config, 0, sizeof(slave_config));
100
101 if (!pcm->config)
102 prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
103 else
104 prepare_slave_config = pcm->config->prepare_slave_config;
105
106 if (prepare_slave_config) {
107 ret = prepare_slave_config(substream, params, &slave_config);
108 if (ret)
109 return ret;
110
111 ret = dmaengine_slave_config(chan, &slave_config);
112 if (ret)
113 return ret;
114 }
115
116 return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
117}
118
119static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
120{
121 struct snd_soc_pcm_runtime *rtd = substream->private_data;
122 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
123 struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
124 struct dma_chan *chan = pcm->chan[substream->stream];
125 struct snd_dmaengine_dai_dma_data *dma_data;
126 struct dma_slave_caps dma_caps;
127 struct snd_pcm_hardware hw;
128 u32 addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
129 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
130 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
131 int i, ret;
132
133 if (pcm->config && pcm->config->pcm_hardware)
134 return snd_soc_set_runtime_hwparams(substream,
135 pcm->config->pcm_hardware);
136
137 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
138
139 memset(&hw, 0, sizeof(hw));
140 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
141 SNDRV_PCM_INFO_INTERLEAVED;
142 hw.periods_min = 2;
143 hw.periods_max = UINT_MAX;
144 hw.period_bytes_min = 256;
145 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
146 hw.buffer_bytes_max = SIZE_MAX;
147 hw.fifo_size = dma_data->fifo_size;
148
149 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
150 hw.info |= SNDRV_PCM_INFO_BATCH;
151
152 ret = dma_get_slave_caps(chan, &dma_caps);
153 if (ret == 0) {
154 if (dma_caps.cmd_pause)
155 hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
156 if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT)
157 hw.info |= SNDRV_PCM_INFO_BATCH;
158
159 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
160 addr_widths = dma_caps.dst_addr_widths;
161 else
162 addr_widths = dma_caps.src_addr_widths;
163 }
164
165 /*
166 * If SND_DMAENGINE_PCM_DAI_FLAG_PACK is set keep
167 * hw.formats set to 0, meaning no restrictions are in place.
168 * In this case it's the responsibility of the DAI driver to
169 * provide the supported format information.
170 */
171 if (!(dma_data->flags & SND_DMAENGINE_PCM_DAI_FLAG_PACK))
172 /*
173 * Prepare formats mask for valid/allowed sample types. If the
174 * dma does not have support for the given physical word size,
175 * it needs to be masked out so user space can not use the
176 * format which produces corrupted audio.
177 * In case the dma driver does not implement the slave_caps the
178 * default assumption is that it supports 1, 2 and 4 bytes
179 * widths.
180 */
181 for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
182 int bits = snd_pcm_format_physical_width(i);
183
184 /*
185 * Enable only samples with DMA supported physical
186 * widths
187 */
188 switch (bits) {
189 case 8:
190 case 16:
191 case 24:
192 case 32:
193 case 64:
194 if (addr_widths & (1 << (bits / 8)))
195 hw.formats |= (1LL << i);
196 break;
197 default:
198 /* Unsupported types */
199 break;
200 }
201 }
202
203 return snd_soc_set_runtime_hwparams(substream, &hw);
204}
205
206static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
207{
208 struct snd_soc_pcm_runtime *rtd = substream->private_data;
209 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
210 struct dma_chan *chan = pcm->chan[substream->stream];
211 int ret;
212
213 ret = dmaengine_pcm_set_runtime_hwparams(substream);
214 if (ret)
215 return ret;
216
217 return snd_dmaengine_pcm_open(substream, chan);
218}
219
220static struct dma_chan *dmaengine_pcm_compat_request_channel(
221 struct snd_soc_pcm_runtime *rtd,
222 struct snd_pcm_substream *substream)
223{
224 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
225 struct snd_dmaengine_dai_dma_data *dma_data;
226 dma_filter_fn fn = NULL;
227
228 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
229
230 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
231 return pcm->chan[0];
232
233 if (pcm->config && pcm->config->compat_request_channel)
234 return pcm->config->compat_request_channel(rtd, substream);
235
236 if (pcm->config)
237 fn = pcm->config->compat_filter_fn;
238
239 return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
240}
241
242static bool dmaengine_pcm_can_report_residue(struct device *dev,
243 struct dma_chan *chan)
244{
245 struct dma_slave_caps dma_caps;
246 int ret;
247
248 ret = dma_get_slave_caps(chan, &dma_caps);
249 if (ret != 0) {
250 dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
251 ret);
252 return false;
253 }
254
255 if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
256 return false;
257
258 return true;
259}
260
261static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
262{
263 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
264 const struct snd_dmaengine_pcm_config *config = pcm->config;
265 struct device *dev = rtd->platform->dev;
266 struct snd_pcm_substream *substream;
267 size_t prealloc_buffer_size;
268 size_t max_buffer_size;
269 unsigned int i;
270 int ret;
271
272 if (config && config->prealloc_buffer_size) {
273 prealloc_buffer_size = config->prealloc_buffer_size;
274 max_buffer_size = config->pcm_hardware->buffer_bytes_max;
275 } else {
276 prealloc_buffer_size = 512 * 1024;
277 max_buffer_size = SIZE_MAX;
278 }
279
280 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
281 substream = rtd->pcm->streams[i].substream;
282 if (!substream)
283 continue;
284
285 if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
286 pcm->chan[i] = dmaengine_pcm_compat_request_channel(rtd,
287 substream);
288 }
289
290 if (!pcm->chan[i]) {
291 dev_err(rtd->platform->dev,
292 "Missing dma channel for stream: %d\n", i);
293 return -EINVAL;
294 }
295
296 ret = snd_pcm_lib_preallocate_pages(substream,
297 SNDRV_DMA_TYPE_DEV_IRAM,
298 dmaengine_dma_dev(pcm, substream),
299 prealloc_buffer_size,
300 max_buffer_size);
301 if (ret)
302 return ret;
303
304 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
305 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
306 }
307
308 return 0;
309}
310
311static snd_pcm_uframes_t dmaengine_pcm_pointer(
312 struct snd_pcm_substream *substream)
313{
314 struct snd_soc_pcm_runtime *rtd = substream->private_data;
315 struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
316
317 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
318 return snd_dmaengine_pcm_pointer_no_residue(substream);
319 else
320 return snd_dmaengine_pcm_pointer(substream);
321}
322
323static const struct snd_pcm_ops dmaengine_pcm_ops = {
324 .open = dmaengine_pcm_open,
325 .close = snd_dmaengine_pcm_close,
326 .ioctl = snd_pcm_lib_ioctl,
327 .hw_params = dmaengine_pcm_hw_params,
328 .hw_free = snd_pcm_lib_free_pages,
329 .trigger = snd_dmaengine_pcm_trigger,
330 .pointer = dmaengine_pcm_pointer,
331};
332
333static const struct snd_soc_platform_driver dmaengine_pcm_platform = {
334 .component_driver = {
335 .probe_order = SND_SOC_COMP_ORDER_LATE,
336 },
337 .ops = &dmaengine_pcm_ops,
338 .pcm_new = dmaengine_pcm_new,
339};
340
341static const char * const dmaengine_pcm_dma_channel_names[] = {
342 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
343 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
344};
345
346static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
347 struct device *dev, const struct snd_dmaengine_pcm_config *config)
348{
349 unsigned int i;
350 const char *name;
351 struct dma_chan *chan;
352
353 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || !dev->of_node)
354 return 0;
355
356 if (config && config->dma_dev) {
357 /*
358 * If this warning is seen, it probably means that your Linux
359 * device structure does not match your HW device structure.
360 * It would be best to refactor the Linux device structure to
361 * correctly match the HW structure.
362 */
363 dev_warn(dev, "DMA channels sourced from device %s",
364 dev_name(config->dma_dev));
365 dev = config->dma_dev;
366 }
367
368 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
369 i++) {
370 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
371 name = "rx-tx";
372 else
373 name = dmaengine_pcm_dma_channel_names[i];
374 if (config && config->chan_names[i])
375 name = config->chan_names[i];
376 chan = dma_request_slave_channel_reason(dev, name);
377 if (IS_ERR(chan)) {
378 if (PTR_ERR(chan) == -EPROBE_DEFER)
379 return -EPROBE_DEFER;
380 pcm->chan[i] = NULL;
381 } else {
382 pcm->chan[i] = chan;
383 }
384 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
385 break;
386 }
387
388 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
389 pcm->chan[1] = pcm->chan[0];
390
391 return 0;
392}
393
394static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
395{
396 unsigned int i;
397
398 for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
399 i++) {
400 if (!pcm->chan[i])
401 continue;
402 dma_release_channel(pcm->chan[i]);
403 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
404 break;
405 }
406}
407
408/**
409 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
410 * @dev: The parent device for the PCM device
411 * @config: Platform specific PCM configuration
412 * @flags: Platform specific quirks
413 */
414int snd_dmaengine_pcm_register(struct device *dev,
415 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
416{
417 struct dmaengine_pcm *pcm;
418 int ret;
419
420 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
421 if (!pcm)
422 return -ENOMEM;
423
424 pcm->config = config;
425 pcm->flags = flags;
426
427 ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
428 if (ret)
429 goto err_free_dma;
430
431 ret = snd_soc_add_platform(dev, &pcm->platform,
432 &dmaengine_pcm_platform);
433 if (ret)
434 goto err_free_dma;
435
436 return 0;
437
438err_free_dma:
439 dmaengine_pcm_release_chan(pcm);
440 kfree(pcm);
441 return ret;
442}
443EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
444
445/**
446 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
447 * @dev: Parent device the PCM was register with
448 *
449 * Removes a dmaengine based PCM device previously registered with
450 * snd_dmaengine_pcm_register.
451 */
452void snd_dmaengine_pcm_unregister(struct device *dev)
453{
454 struct snd_soc_platform *platform;
455 struct dmaengine_pcm *pcm;
456
457 platform = snd_soc_lookup_platform(dev);
458 if (!platform)
459 return;
460
461 pcm = soc_platform_to_pcm(platform);
462
463 snd_soc_remove_platform(platform);
464 dmaengine_pcm_release_chan(pcm);
465 kfree(pcm);
466}
467EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
468
469MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (C) 2013, Analog Devices Inc.
4// Author: Lars-Peter Clausen <lars@metafoo.de>
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/dmaengine.h>
9#include <linux/slab.h>
10#include <sound/pcm.h>
11#include <sound/pcm_params.h>
12#include <sound/soc.h>
13#include <linux/dma-mapping.h>
14#include <linux/of.h>
15
16#include <sound/dmaengine_pcm.h>
17
18static unsigned int prealloc_buffer_size_kbytes = 512;
19module_param(prealloc_buffer_size_kbytes, uint, 0444);
20MODULE_PARM_DESC(prealloc_buffer_size_kbytes, "Preallocate DMA buffer size (KB).");
21
22/*
23 * The platforms dmaengine driver does not support reporting the amount of
24 * bytes that are still left to transfer.
25 */
26#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
27
28static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
29 struct snd_pcm_substream *substream)
30{
31 if (!pcm->chan[substream->stream])
32 return NULL;
33
34 return pcm->chan[substream->stream]->device->dev;
35}
36
37/**
38 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
39 * @substream: PCM substream
40 * @params: hw_params
41 * @slave_config: DMA slave config to prepare
42 *
43 * This function can be used as a generic prepare_slave_config callback for
44 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
45 * DAI DMA data. Internally the function will first call
46 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
47 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
48 * remaining fields based on the DAI DMA data.
49 */
50int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
51 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
52{
53 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
54 struct snd_dmaengine_dai_dma_data *dma_data;
55 int ret;
56
57 if (rtd->dai_link->num_cpus > 1) {
58 dev_err(rtd->dev,
59 "%s doesn't support Multi CPU yet\n", __func__);
60 return -EINVAL;
61 }
62
63 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
64
65 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
66 if (ret)
67 return ret;
68
69 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
70 slave_config);
71
72 return 0;
73}
74EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
75
76static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
77 struct snd_pcm_substream *substream,
78 struct snd_pcm_hw_params *params)
79{
80 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
81 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
82 struct dma_slave_config slave_config;
83 int ret;
84
85 if (!pcm->config->prepare_slave_config)
86 return 0;
87
88 memset(&slave_config, 0, sizeof(slave_config));
89
90 ret = pcm->config->prepare_slave_config(substream, params, &slave_config);
91 if (ret)
92 return ret;
93
94 return dmaengine_slave_config(chan, &slave_config);
95}
96
97static int
98dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
99 struct snd_pcm_substream *substream)
100{
101 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
102 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
103 struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
104 struct dma_chan *chan = pcm->chan[substream->stream];
105 struct snd_dmaengine_dai_dma_data *dma_data;
106 struct snd_pcm_hardware hw;
107
108 if (rtd->dai_link->num_cpus > 1) {
109 dev_err(rtd->dev,
110 "%s doesn't support Multi CPU yet\n", __func__);
111 return -EINVAL;
112 }
113
114 if (pcm->config->pcm_hardware)
115 return snd_soc_set_runtime_hwparams(substream,
116 pcm->config->pcm_hardware);
117
118 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
119
120 memset(&hw, 0, sizeof(hw));
121 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
122 SNDRV_PCM_INFO_INTERLEAVED;
123 hw.periods_min = 2;
124 hw.periods_max = UINT_MAX;
125 hw.period_bytes_min = dma_data->maxburst * DMA_SLAVE_BUSWIDTH_8_BYTES;
126 if (!hw.period_bytes_min)
127 hw.period_bytes_min = 256;
128 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
129 hw.buffer_bytes_max = SIZE_MAX;
130 hw.fifo_size = dma_data->fifo_size;
131
132 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
133 hw.info |= SNDRV_PCM_INFO_BATCH;
134
135 /**
136 * FIXME: Remove the return value check to align with the code
137 * before adding snd_dmaengine_pcm_refine_runtime_hwparams
138 * function.
139 */
140 snd_dmaengine_pcm_refine_runtime_hwparams(substream,
141 dma_data,
142 &hw,
143 chan);
144
145 return snd_soc_set_runtime_hwparams(substream, &hw);
146}
147
148static int dmaengine_pcm_open(struct snd_soc_component *component,
149 struct snd_pcm_substream *substream)
150{
151 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
152 struct dma_chan *chan = pcm->chan[substream->stream];
153 int ret;
154
155 ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
156 if (ret)
157 return ret;
158
159 return snd_dmaengine_pcm_open(substream, chan);
160}
161
162static int dmaengine_pcm_close(struct snd_soc_component *component,
163 struct snd_pcm_substream *substream)
164{
165 return snd_dmaengine_pcm_close(substream);
166}
167
168static int dmaengine_pcm_trigger(struct snd_soc_component *component,
169 struct snd_pcm_substream *substream, int cmd)
170{
171 return snd_dmaengine_pcm_trigger(substream, cmd);
172}
173
174static struct dma_chan *dmaengine_pcm_compat_request_channel(
175 struct snd_soc_component *component,
176 struct snd_soc_pcm_runtime *rtd,
177 struct snd_pcm_substream *substream)
178{
179 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
180 struct snd_dmaengine_dai_dma_data *dma_data;
181
182 if (rtd->dai_link->num_cpus > 1) {
183 dev_err(rtd->dev,
184 "%s doesn't support Multi CPU yet\n", __func__);
185 return NULL;
186 }
187
188 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
189
190 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
191 return pcm->chan[0];
192
193 if (pcm->config->compat_request_channel)
194 return pcm->config->compat_request_channel(rtd, substream);
195
196 return snd_dmaengine_pcm_request_channel(pcm->config->compat_filter_fn,
197 dma_data->filter_data);
198}
199
200static bool dmaengine_pcm_can_report_residue(struct device *dev,
201 struct dma_chan *chan)
202{
203 struct dma_slave_caps dma_caps;
204 int ret;
205
206 ret = dma_get_slave_caps(chan, &dma_caps);
207 if (ret != 0) {
208 dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
209 ret);
210 return false;
211 }
212
213 if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
214 return false;
215
216 return true;
217}
218
219static int dmaengine_pcm_new(struct snd_soc_component *component,
220 struct snd_soc_pcm_runtime *rtd)
221{
222 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
223 const struct snd_dmaengine_pcm_config *config = pcm->config;
224 struct device *dev = component->dev;
225 size_t prealloc_buffer_size;
226 size_t max_buffer_size;
227 unsigned int i;
228
229 if (config->prealloc_buffer_size)
230 prealloc_buffer_size = config->prealloc_buffer_size;
231 else
232 prealloc_buffer_size = prealloc_buffer_size_kbytes * 1024;
233
234 if (config->pcm_hardware && config->pcm_hardware->buffer_bytes_max)
235 max_buffer_size = config->pcm_hardware->buffer_bytes_max;
236 else
237 max_buffer_size = SIZE_MAX;
238
239 for_each_pcm_streams(i) {
240 struct snd_pcm_substream *substream = rtd->pcm->streams[i].substream;
241 if (!substream)
242 continue;
243
244 if (!pcm->chan[i] && config->chan_names[i])
245 pcm->chan[i] = dma_request_slave_channel(dev,
246 config->chan_names[i]);
247
248 if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
249 pcm->chan[i] = dmaengine_pcm_compat_request_channel(
250 component, rtd, substream);
251 }
252
253 if (!pcm->chan[i]) {
254 dev_err(component->dev,
255 "Missing dma channel for stream: %d\n", i);
256 return -EINVAL;
257 }
258
259 snd_pcm_set_managed_buffer(substream,
260 SNDRV_DMA_TYPE_DEV_IRAM,
261 dmaengine_dma_dev(pcm, substream),
262 prealloc_buffer_size,
263 max_buffer_size);
264
265 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
266 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
267
268 if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
269 strscpy_pad(rtd->pcm->streams[i].pcm->name,
270 rtd->pcm->streams[i].pcm->id,
271 sizeof(rtd->pcm->streams[i].pcm->name));
272 }
273 }
274
275 return 0;
276}
277
278static snd_pcm_uframes_t dmaengine_pcm_pointer(
279 struct snd_soc_component *component,
280 struct snd_pcm_substream *substream)
281{
282 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
283
284 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
285 return snd_dmaengine_pcm_pointer_no_residue(substream);
286 else
287 return snd_dmaengine_pcm_pointer(substream);
288}
289
290static int dmaengine_copy_user(struct snd_soc_component *component,
291 struct snd_pcm_substream *substream,
292 int channel, unsigned long hwoff,
293 void __user *buf, unsigned long bytes)
294{
295 struct snd_pcm_runtime *runtime = substream->runtime;
296 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
297 int (*process)(struct snd_pcm_substream *substream,
298 int channel, unsigned long hwoff,
299 void *buf, unsigned long bytes) = pcm->config->process;
300 bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
301 void *dma_ptr = runtime->dma_area + hwoff +
302 channel * (runtime->dma_bytes / runtime->channels);
303
304 if (is_playback)
305 if (copy_from_user(dma_ptr, buf, bytes))
306 return -EFAULT;
307
308 if (process) {
309 int ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
310 if (ret < 0)
311 return ret;
312 }
313
314 if (!is_playback)
315 if (copy_to_user(buf, dma_ptr, bytes))
316 return -EFAULT;
317
318 return 0;
319}
320
321static const struct snd_soc_component_driver dmaengine_pcm_component = {
322 .name = SND_DMAENGINE_PCM_DRV_NAME,
323 .probe_order = SND_SOC_COMP_ORDER_LATE,
324 .open = dmaengine_pcm_open,
325 .close = dmaengine_pcm_close,
326 .hw_params = dmaengine_pcm_hw_params,
327 .trigger = dmaengine_pcm_trigger,
328 .pointer = dmaengine_pcm_pointer,
329 .pcm_construct = dmaengine_pcm_new,
330};
331
332static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
333 .name = SND_DMAENGINE_PCM_DRV_NAME,
334 .probe_order = SND_SOC_COMP_ORDER_LATE,
335 .open = dmaengine_pcm_open,
336 .close = dmaengine_pcm_close,
337 .hw_params = dmaengine_pcm_hw_params,
338 .trigger = dmaengine_pcm_trigger,
339 .pointer = dmaengine_pcm_pointer,
340 .copy_user = dmaengine_copy_user,
341 .pcm_construct = dmaengine_pcm_new,
342};
343
344static const char * const dmaengine_pcm_dma_channel_names[] = {
345 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
346 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
347};
348
349static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
350 struct device *dev, const struct snd_dmaengine_pcm_config *config)
351{
352 unsigned int i;
353 const char *name;
354 struct dma_chan *chan;
355
356 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
357 !(config->dma_dev && config->dma_dev->of_node)))
358 return 0;
359
360 if (config->dma_dev) {
361 /*
362 * If this warning is seen, it probably means that your Linux
363 * device structure does not match your HW device structure.
364 * It would be best to refactor the Linux device structure to
365 * correctly match the HW structure.
366 */
367 dev_warn(dev, "DMA channels sourced from device %s",
368 dev_name(config->dma_dev));
369 dev = config->dma_dev;
370 }
371
372 for_each_pcm_streams(i) {
373 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
374 name = "rx-tx";
375 else
376 name = dmaengine_pcm_dma_channel_names[i];
377 if (config->chan_names[i])
378 name = config->chan_names[i];
379 chan = dma_request_chan(dev, name);
380 if (IS_ERR(chan)) {
381 /*
382 * Only report probe deferral errors, channels
383 * might not be present for devices that
384 * support only TX or only RX.
385 */
386 if (PTR_ERR(chan) == -EPROBE_DEFER)
387 return -EPROBE_DEFER;
388 pcm->chan[i] = NULL;
389 } else {
390 pcm->chan[i] = chan;
391 }
392 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
393 break;
394 }
395
396 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
397 pcm->chan[1] = pcm->chan[0];
398
399 return 0;
400}
401
402static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
403{
404 unsigned int i;
405
406 for_each_pcm_streams(i) {
407 if (!pcm->chan[i])
408 continue;
409 dma_release_channel(pcm->chan[i]);
410 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
411 break;
412 }
413}
414
415static const struct snd_dmaengine_pcm_config snd_dmaengine_pcm_default_config = {
416 .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
417};
418
419/**
420 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
421 * @dev: The parent device for the PCM device
422 * @config: Platform specific PCM configuration
423 * @flags: Platform specific quirks
424 */
425int snd_dmaengine_pcm_register(struct device *dev,
426 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
427{
428 const struct snd_soc_component_driver *driver;
429 struct dmaengine_pcm *pcm;
430 int ret;
431
432 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
433 if (!pcm)
434 return -ENOMEM;
435
436#ifdef CONFIG_DEBUG_FS
437 pcm->component.debugfs_prefix = "dma";
438#endif
439 if (!config)
440 config = &snd_dmaengine_pcm_default_config;
441 pcm->config = config;
442 pcm->flags = flags;
443
444 ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
445 if (ret)
446 goto err_free_dma;
447
448 if (config->process)
449 driver = &dmaengine_pcm_component_process;
450 else
451 driver = &dmaengine_pcm_component;
452
453 ret = snd_soc_component_initialize(&pcm->component, driver, dev);
454 if (ret)
455 goto err_free_dma;
456
457 ret = snd_soc_add_component(&pcm->component, NULL, 0);
458 if (ret)
459 goto err_free_dma;
460
461 return 0;
462
463err_free_dma:
464 dmaengine_pcm_release_chan(pcm);
465 kfree(pcm);
466 return ret;
467}
468EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
469
470/**
471 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
472 * @dev: Parent device the PCM was register with
473 *
474 * Removes a dmaengine based PCM device previously registered with
475 * snd_dmaengine_pcm_register.
476 */
477void snd_dmaengine_pcm_unregister(struct device *dev)
478{
479 struct snd_soc_component *component;
480 struct dmaengine_pcm *pcm;
481
482 component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
483 if (!component)
484 return;
485
486 pcm = soc_component_to_pcm(component);
487
488 snd_soc_unregister_component_by_driver(dev, component->driver);
489 dmaengine_pcm_release_chan(pcm);
490 kfree(pcm);
491}
492EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
493
494MODULE_LICENSE("GPL");