Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0-only
2//
3// Copyright(c) 2021 Intel Corporation. All rights reserved.
4//
5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7//
8
9#include <sound/intel-nhlt.h>
10#include <sound/pcm_params.h>
11#include <sound/soc.h>
12#include "avs.h"
13#include "control.h"
14#include "path.h"
15#include "topology.h"
16
17/* Must be called with adev->comp_list_mutex held. */
18static struct avs_tplg *
19avs_path_find_tplg(struct avs_dev *adev, const char *name)
20{
21 struct avs_soc_component *acomp;
22
23 list_for_each_entry(acomp, &adev->comp_list, node)
24 if (!strcmp(acomp->tplg->name, name))
25 return acomp->tplg;
26 return NULL;
27}
28
29static struct avs_path_module *
30avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
31{
32 struct avs_path_module *mod;
33
34 list_for_each_entry(mod, &ppl->mod_list, node)
35 if (mod->template->id == template_id)
36 return mod;
37 return NULL;
38}
39
40static struct avs_path_pipeline *
41avs_path_find_pipeline(struct avs_path *path, u32 template_id)
42{
43 struct avs_path_pipeline *ppl;
44
45 list_for_each_entry(ppl, &path->ppl_list, node)
46 if (ppl->template->id == template_id)
47 return ppl;
48 return NULL;
49}
50
51static struct avs_path *
52avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
53{
54 struct avs_tplg_path_template *pos, *template = NULL;
55 struct avs_tplg *tplg;
56 struct avs_path *path;
57
58 tplg = avs_path_find_tplg(adev, name);
59 if (!tplg)
60 return NULL;
61
62 list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
63 if (pos->id == template_id) {
64 template = pos;
65 break;
66 }
67 }
68 if (!template)
69 return NULL;
70
71 spin_lock(&adev->path_list_lock);
72 /* Only one variant of given path template may be instantiated at a time. */
73 list_for_each_entry(path, &adev->path_list, node) {
74 if (path->template->owner == template) {
75 spin_unlock(&adev->path_list_lock);
76 return path;
77 }
78 }
79
80 spin_unlock(&adev->path_list_lock);
81 return NULL;
82}
83
84static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
85 struct avs_audio_format *fmt)
86{
87 return (params_rate(params) == fmt->sampling_freq &&
88 params_channels(params) == fmt->num_channels &&
89 params_physical_width(params) == fmt->bit_depth &&
90 snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
91}
92
93static struct avs_tplg_path *
94avs_path_find_variant(struct avs_dev *adev,
95 struct avs_tplg_path_template *template,
96 struct snd_pcm_hw_params *fe_params,
97 struct snd_pcm_hw_params *be_params)
98{
99 struct avs_tplg_path *variant;
100
101 list_for_each_entry(variant, &template->path_list, node) {
102 dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
103 variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
104 variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
105 dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
106 variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
107 variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
108
109 if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
110 variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
111 return variant;
112 }
113
114 return NULL;
115}
116
117__maybe_unused
118static bool avs_dma_type_is_host(u32 dma_type)
119{
120 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
121 dma_type == AVS_DMA_HDA_HOST_INPUT;
122}
123
124__maybe_unused
125static bool avs_dma_type_is_link(u32 dma_type)
126{
127 return !avs_dma_type_is_host(dma_type);
128}
129
130__maybe_unused
131static bool avs_dma_type_is_output(u32 dma_type)
132{
133 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
134 dma_type == AVS_DMA_HDA_LINK_OUTPUT ||
135 dma_type == AVS_DMA_I2S_LINK_OUTPUT;
136}
137
138__maybe_unused
139static bool avs_dma_type_is_input(u32 dma_type)
140{
141 return !avs_dma_type_is_output(dma_type);
142}
143
144static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
145{
146 struct nhlt_acpi_table *nhlt = adev->nhlt;
147 struct avs_tplg_module *t = mod->template;
148 struct avs_copier_cfg *cfg;
149 struct nhlt_specific_cfg *ep_blob;
150 union avs_connector_node_id node_id = {0};
151 size_t cfg_size, data_size = 0;
152 void *data = NULL;
153 u32 dma_type;
154 int ret;
155
156 dma_type = t->cfg_ext->copier.dma_type;
157 node_id.dma_type = dma_type;
158
159 switch (dma_type) {
160 struct avs_audio_format *fmt;
161 int direction;
162
163 case AVS_DMA_I2S_LINK_OUTPUT:
164 case AVS_DMA_I2S_LINK_INPUT:
165 if (avs_dma_type_is_input(dma_type))
166 direction = SNDRV_PCM_STREAM_CAPTURE;
167 else
168 direction = SNDRV_PCM_STREAM_PLAYBACK;
169
170 if (t->cfg_ext->copier.blob_fmt)
171 fmt = t->cfg_ext->copier.blob_fmt;
172 else if (direction == SNDRV_PCM_STREAM_CAPTURE)
173 fmt = t->in_fmt;
174 else
175 fmt = t->cfg_ext->copier.out_fmt;
176
177 ep_blob = intel_nhlt_get_endpoint_blob(adev->dev,
178 nhlt, t->cfg_ext->copier.vindex.i2s.instance,
179 NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth,
180 fmt->num_channels, fmt->sampling_freq, direction,
181 NHLT_DEVICE_I2S);
182 if (!ep_blob) {
183 dev_err(adev->dev, "no I2S ep_blob found\n");
184 return -ENOENT;
185 }
186
187 data = ep_blob->caps;
188 data_size = ep_blob->size;
189 /* I2S gateway's vindex is statically assigned in topology */
190 node_id.vindex = t->cfg_ext->copier.vindex.val;
191
192 break;
193
194 case AVS_DMA_DMIC_LINK_INPUT:
195 direction = SNDRV_PCM_STREAM_CAPTURE;
196
197 if (t->cfg_ext->copier.blob_fmt)
198 fmt = t->cfg_ext->copier.blob_fmt;
199 else
200 fmt = t->in_fmt;
201
202 ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0,
203 NHLT_LINK_DMIC, fmt->valid_bit_depth,
204 fmt->bit_depth, fmt->num_channels,
205 fmt->sampling_freq, direction, NHLT_DEVICE_DMIC);
206 if (!ep_blob) {
207 dev_err(adev->dev, "no DMIC ep_blob found\n");
208 return -ENOENT;
209 }
210
211 data = ep_blob->caps;
212 data_size = ep_blob->size;
213 /* DMIC gateway's vindex is statically assigned in topology */
214 node_id.vindex = t->cfg_ext->copier.vindex.val;
215
216 break;
217
218 case AVS_DMA_HDA_HOST_OUTPUT:
219 case AVS_DMA_HDA_HOST_INPUT:
220 /* HOST gateway's vindex is dynamically assigned with DMA id */
221 node_id.vindex = mod->owner->owner->dma_id;
222 break;
223
224 case AVS_DMA_HDA_LINK_OUTPUT:
225 case AVS_DMA_HDA_LINK_INPUT:
226 node_id.vindex = t->cfg_ext->copier.vindex.val |
227 mod->owner->owner->dma_id;
228 break;
229
230 case INVALID_OBJECT_ID:
231 default:
232 node_id = INVALID_NODE_ID;
233 break;
234 }
235
236 cfg_size = sizeof(*cfg) + data_size;
237 /* Every config-BLOB contains gateway attributes. */
238 if (data_size)
239 cfg_size -= sizeof(cfg->gtw_cfg.config.attrs);
240 if (cfg_size > AVS_MAILBOX_SIZE)
241 return -EINVAL;
242
243 cfg = adev->modcfg_buf;
244 memset(cfg, 0, cfg_size);
245 cfg->base.cpc = t->cfg_base->cpc;
246 cfg->base.ibs = t->cfg_base->ibs;
247 cfg->base.obs = t->cfg_base->obs;
248 cfg->base.is_pages = t->cfg_base->is_pages;
249 cfg->base.audio_fmt = *t->in_fmt;
250 cfg->out_fmt = *t->cfg_ext->copier.out_fmt;
251 cfg->feature_mask = t->cfg_ext->copier.feature_mask;
252 cfg->gtw_cfg.node_id = node_id;
253 cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size;
254 /* config_length in DWORDs */
255 cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4);
256 if (data)
257 memcpy(&cfg->gtw_cfg.config, data, data_size);
258
259 mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
260
261 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
262 t->core_id, t->domain, cfg, cfg_size,
263 &mod->instance_id);
264 return ret;
265}
266
267static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod)
268{
269 struct avs_tplg_module *t = mod->template;
270 struct avs_tplg_path_template *path_tmpl;
271 struct snd_soc_dapm_widget *w;
272 int i;
273
274 path_tmpl = t->owner->owner->owner;
275 w = path_tmpl->w;
276
277 for (i = 0; i < w->num_kcontrols; i++) {
278 struct avs_control_data *ctl_data;
279 struct soc_mixer_control *mc;
280
281 mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
282 ctl_data = (struct avs_control_data *)mc->dobj.private;
283 if (ctl_data->id == t->ctl_id)
284 return ctl_data;
285 }
286
287 return NULL;
288}
289
290static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
291{
292 struct avs_tplg_module *t = mod->template;
293 struct avs_control_data *ctl_data;
294 struct avs_peakvol_cfg *cfg;
295 int volume = S32_MAX;
296 size_t cfg_size;
297 int ret;
298
299 ctl_data = avs_get_module_control(mod);
300 if (ctl_data)
301 volume = ctl_data->volume;
302
303 /* As 2+ channels controls are unsupported, have a single block for all channels. */
304 cfg_size = struct_size(cfg, vols, 1);
305 if (cfg_size > AVS_MAILBOX_SIZE)
306 return -EINVAL;
307
308 cfg = adev->modcfg_buf;
309 memset(cfg, 0, cfg_size);
310 cfg->base.cpc = t->cfg_base->cpc;
311 cfg->base.ibs = t->cfg_base->ibs;
312 cfg->base.obs = t->cfg_base->obs;
313 cfg->base.is_pages = t->cfg_base->is_pages;
314 cfg->base.audio_fmt = *t->in_fmt;
315 cfg->vols[0].target_volume = volume;
316 cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
317 cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE;
318 cfg->vols[0].curve_duration = 0;
319
320 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
321 t->domain, cfg, cfg_size, &mod->instance_id);
322
323 return ret;
324}
325
326static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
327{
328 struct avs_tplg_module *t = mod->template;
329 struct avs_updown_mixer_cfg cfg;
330 int i;
331
332 cfg.base.cpc = t->cfg_base->cpc;
333 cfg.base.ibs = t->cfg_base->ibs;
334 cfg.base.obs = t->cfg_base->obs;
335 cfg.base.is_pages = t->cfg_base->is_pages;
336 cfg.base.audio_fmt = *t->in_fmt;
337 cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
338 cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
339 for (i = 0; i < AVS_CHANNELS_MAX; i++)
340 cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
341 cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
342
343 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
344 t->core_id, t->domain, &cfg, sizeof(cfg),
345 &mod->instance_id);
346}
347
348static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
349{
350 struct avs_tplg_module *t = mod->template;
351 struct avs_src_cfg cfg;
352
353 cfg.base.cpc = t->cfg_base->cpc;
354 cfg.base.ibs = t->cfg_base->ibs;
355 cfg.base.obs = t->cfg_base->obs;
356 cfg.base.is_pages = t->cfg_base->is_pages;
357 cfg.base.audio_fmt = *t->in_fmt;
358 cfg.out_freq = t->cfg_ext->src.out_freq;
359
360 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
361 t->core_id, t->domain, &cfg, sizeof(cfg),
362 &mod->instance_id);
363}
364
365static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
366{
367 struct avs_tplg_module *t = mod->template;
368 struct avs_asrc_cfg cfg;
369
370 cfg.base.cpc = t->cfg_base->cpc;
371 cfg.base.ibs = t->cfg_base->ibs;
372 cfg.base.obs = t->cfg_base->obs;
373 cfg.base.is_pages = t->cfg_base->is_pages;
374 cfg.base.audio_fmt = *t->in_fmt;
375 cfg.out_freq = t->cfg_ext->asrc.out_freq;
376 cfg.mode = t->cfg_ext->asrc.mode;
377 cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
378
379 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
380 t->core_id, t->domain, &cfg, sizeof(cfg),
381 &mod->instance_id);
382}
383
384static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
385{
386 struct avs_tplg_module *t = mod->template;
387 struct avs_aec_cfg cfg;
388
389 cfg.base.cpc = t->cfg_base->cpc;
390 cfg.base.ibs = t->cfg_base->ibs;
391 cfg.base.obs = t->cfg_base->obs;
392 cfg.base.is_pages = t->cfg_base->is_pages;
393 cfg.base.audio_fmt = *t->in_fmt;
394 cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
395 cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
396 cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
397
398 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
399 t->core_id, t->domain, &cfg, sizeof(cfg),
400 &mod->instance_id);
401}
402
403static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
404{
405 struct avs_tplg_module *t = mod->template;
406 struct avs_mux_cfg cfg;
407
408 cfg.base.cpc = t->cfg_base->cpc;
409 cfg.base.ibs = t->cfg_base->ibs;
410 cfg.base.obs = t->cfg_base->obs;
411 cfg.base.is_pages = t->cfg_base->is_pages;
412 cfg.base.audio_fmt = *t->in_fmt;
413 cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
414 cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
415
416 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
417 t->core_id, t->domain, &cfg, sizeof(cfg),
418 &mod->instance_id);
419}
420
421static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
422{
423 struct avs_tplg_module *t = mod->template;
424 struct avs_wov_cfg cfg;
425
426 cfg.base.cpc = t->cfg_base->cpc;
427 cfg.base.ibs = t->cfg_base->ibs;
428 cfg.base.obs = t->cfg_base->obs;
429 cfg.base.is_pages = t->cfg_base->is_pages;
430 cfg.base.audio_fmt = *t->in_fmt;
431 cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
432
433 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
434 t->core_id, t->domain, &cfg, sizeof(cfg),
435 &mod->instance_id);
436}
437
438static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
439{
440 struct avs_tplg_module *t = mod->template;
441 struct avs_micsel_cfg cfg;
442
443 cfg.base.cpc = t->cfg_base->cpc;
444 cfg.base.ibs = t->cfg_base->ibs;
445 cfg.base.obs = t->cfg_base->obs;
446 cfg.base.is_pages = t->cfg_base->is_pages;
447 cfg.base.audio_fmt = *t->in_fmt;
448 cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
449
450 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
451 t->core_id, t->domain, &cfg, sizeof(cfg),
452 &mod->instance_id);
453}
454
455static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
456{
457 struct avs_tplg_module *t = mod->template;
458 struct avs_modcfg_base cfg;
459
460 cfg.cpc = t->cfg_base->cpc;
461 cfg.ibs = t->cfg_base->ibs;
462 cfg.obs = t->cfg_base->obs;
463 cfg.is_pages = t->cfg_base->is_pages;
464 cfg.audio_fmt = *t->in_fmt;
465
466 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
467 t->core_id, t->domain, &cfg, sizeof(cfg),
468 &mod->instance_id);
469}
470
471static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
472{
473 struct avs_tplg_module *t = mod->template;
474 struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
475 struct avs_modcfg_ext *cfg;
476 size_t cfg_size, num_pins;
477 int ret, i;
478
479 num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
480 cfg_size = struct_size(cfg, pin_fmts, num_pins);
481
482 if (cfg_size > AVS_MAILBOX_SIZE)
483 return -EINVAL;
484
485 cfg = adev->modcfg_buf;
486 memset(cfg, 0, cfg_size);
487 cfg->base.cpc = t->cfg_base->cpc;
488 cfg->base.ibs = t->cfg_base->ibs;
489 cfg->base.obs = t->cfg_base->obs;
490 cfg->base.is_pages = t->cfg_base->is_pages;
491 cfg->base.audio_fmt = *t->in_fmt;
492 cfg->num_input_pins = tcfg->generic.num_input_pins;
493 cfg->num_output_pins = tcfg->generic.num_output_pins;
494
495 /* configure pin formats */
496 for (i = 0; i < num_pins; i++) {
497 struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
498 struct avs_pin_format *pin = &cfg->pin_fmts[i];
499
500 pin->pin_index = tpin->pin_index;
501 pin->iobs = tpin->iobs;
502 pin->audio_fmt = *tpin->fmt;
503 }
504
505 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
506 t->core_id, t->domain, cfg, cfg_size,
507 &mod->instance_id);
508 return ret;
509}
510
511static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
512{
513 dev_err(adev->dev, "Probe module can't be instantiated by topology");
514 return -EINVAL;
515}
516
517struct avs_module_create {
518 guid_t *guid;
519 int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
520};
521
522static struct avs_module_create avs_module_create[] = {
523 { &AVS_MIXIN_MOD_UUID, avs_modbase_create },
524 { &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
525 { &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
526 { &AVS_COPIER_MOD_UUID, avs_copier_create },
527 { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
528 { &AVS_GAIN_MOD_UUID, avs_peakvol_create },
529 { &AVS_MICSEL_MOD_UUID, avs_micsel_create },
530 { &AVS_MUX_MOD_UUID, avs_mux_create },
531 { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
532 { &AVS_SRCINTC_MOD_UUID, avs_src_create },
533 { &AVS_AEC_MOD_UUID, avs_aec_create },
534 { &AVS_ASRC_MOD_UUID, avs_asrc_create },
535 { &AVS_INTELWOV_MOD_UUID, avs_wov_create },
536 { &AVS_PROBE_MOD_UUID, avs_probe_create },
537};
538
539static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
540{
541 const guid_t *type = &mod->template->cfg_ext->type;
542
543 for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
544 if (guid_equal(type, avs_module_create[i].guid))
545 return avs_module_create[i].create(adev, mod);
546
547 return avs_modext_create(adev, mod);
548}
549
550static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
551{
552 kfree(mod);
553}
554
555static struct avs_path_module *
556avs_path_module_create(struct avs_dev *adev,
557 struct avs_path_pipeline *owner,
558 struct avs_tplg_module *template)
559{
560 struct avs_path_module *mod;
561 int module_id, ret;
562
563 module_id = avs_get_module_id(adev, &template->cfg_ext->type);
564 if (module_id < 0)
565 return ERR_PTR(module_id);
566
567 mod = kzalloc(sizeof(*mod), GFP_KERNEL);
568 if (!mod)
569 return ERR_PTR(-ENOMEM);
570
571 mod->template = template;
572 mod->module_id = module_id;
573 mod->owner = owner;
574 INIT_LIST_HEAD(&mod->node);
575
576 ret = avs_path_module_type_create(adev, mod);
577 if (ret) {
578 dev_err(adev->dev, "module-type create failed: %d\n", ret);
579 kfree(mod);
580 return ERR_PTR(ret);
581 }
582
583 return mod;
584}
585
586static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
587{
588 struct avs_path_module *this_mod, *target_mod;
589 struct avs_path_pipeline *target_ppl;
590 struct avs_path *target_path;
591 struct avs_tplg_binding *t;
592
593 t = binding->template;
594 this_mod = avs_path_find_module(binding->owner,
595 t->mod_id);
596 if (!this_mod) {
597 dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
598 return -EINVAL;
599 }
600
601 /* update with target_tplg_name too */
602 target_path = avs_path_find_path(adev, t->target_tplg_name,
603 t->target_path_tmpl_id);
604 if (!target_path) {
605 dev_err(adev->dev, "target path %s:%d not found\n",
606 t->target_tplg_name, t->target_path_tmpl_id);
607 return -EINVAL;
608 }
609
610 target_ppl = avs_path_find_pipeline(target_path,
611 t->target_ppl_id);
612 if (!target_ppl) {
613 dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
614 return -EINVAL;
615 }
616
617 target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
618 if (!target_mod) {
619 dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
620 return -EINVAL;
621 }
622
623 if (t->is_sink) {
624 binding->sink = this_mod;
625 binding->sink_pin = t->mod_pin;
626 binding->source = target_mod;
627 binding->source_pin = t->target_mod_pin;
628 } else {
629 binding->sink = target_mod;
630 binding->sink_pin = t->target_mod_pin;
631 binding->source = this_mod;
632 binding->source_pin = t->mod_pin;
633 }
634
635 return 0;
636}
637
638static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
639{
640 kfree(binding);
641}
642
643static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
644 struct avs_path_pipeline *owner,
645 struct avs_tplg_binding *t)
646{
647 struct avs_path_binding *binding;
648
649 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
650 if (!binding)
651 return ERR_PTR(-ENOMEM);
652
653 binding->template = t;
654 binding->owner = owner;
655 INIT_LIST_HEAD(&binding->node);
656
657 return binding;
658}
659
660static int avs_path_pipeline_arm(struct avs_dev *adev,
661 struct avs_path_pipeline *ppl)
662{
663 struct avs_path_module *mod;
664
665 list_for_each_entry(mod, &ppl->mod_list, node) {
666 struct avs_path_module *source, *sink;
667 int ret;
668
669 /*
670 * Only one module (so it's implicitly last) or it is the last
671 * one, either way we don't have next module to bind it to.
672 */
673 if (mod == list_last_entry(&ppl->mod_list,
674 struct avs_path_module, node))
675 break;
676
677 /* bind current module to next module on list */
678 source = mod;
679 sink = list_next_entry(mod, node);
680 if (!source || !sink)
681 return -EINVAL;
682
683 ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
684 sink->module_id, sink->instance_id, 0, 0);
685 if (ret)
686 return AVS_IPC_RET(ret);
687 }
688
689 return 0;
690}
691
692static void avs_path_pipeline_free(struct avs_dev *adev,
693 struct avs_path_pipeline *ppl)
694{
695 struct avs_path_binding *binding, *bsave;
696 struct avs_path_module *mod, *save;
697
698 list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
699 list_del(&binding->node);
700 avs_path_binding_free(adev, binding);
701 }
702
703 avs_dsp_delete_pipeline(adev, ppl->instance_id);
704
705 /* Unload resources occupied by owned modules */
706 list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
707 avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
708 mod->owner->instance_id,
709 mod->template->core_id);
710 avs_path_module_free(adev, mod);
711 }
712
713 list_del(&ppl->node);
714 kfree(ppl);
715}
716
717static struct avs_path_pipeline *
718avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
719 struct avs_tplg_pipeline *template)
720{
721 struct avs_path_pipeline *ppl;
722 struct avs_tplg_pplcfg *cfg = template->cfg;
723 struct avs_tplg_module *tmod;
724 int ret, i;
725
726 ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
727 if (!ppl)
728 return ERR_PTR(-ENOMEM);
729
730 ppl->template = template;
731 ppl->owner = owner;
732 INIT_LIST_HEAD(&ppl->binding_list);
733 INIT_LIST_HEAD(&ppl->mod_list);
734 INIT_LIST_HEAD(&ppl->node);
735
736 ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
737 cfg->lp, cfg->attributes,
738 &ppl->instance_id);
739 if (ret) {
740 dev_err(adev->dev, "error creating pipeline %d\n", ret);
741 kfree(ppl);
742 return ERR_PTR(ret);
743 }
744
745 list_for_each_entry(tmod, &template->mod_list, node) {
746 struct avs_path_module *mod;
747
748 mod = avs_path_module_create(adev, ppl, tmod);
749 if (IS_ERR(mod)) {
750 ret = PTR_ERR(mod);
751 dev_err(adev->dev, "error creating module %d\n", ret);
752 goto init_err;
753 }
754
755 list_add_tail(&mod->node, &ppl->mod_list);
756 }
757
758 for (i = 0; i < template->num_bindings; i++) {
759 struct avs_path_binding *binding;
760
761 binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
762 if (IS_ERR(binding)) {
763 ret = PTR_ERR(binding);
764 dev_err(adev->dev, "error creating binding %d\n", ret);
765 goto init_err;
766 }
767
768 list_add_tail(&binding->node, &ppl->binding_list);
769 }
770
771 return ppl;
772
773init_err:
774 avs_path_pipeline_free(adev, ppl);
775 return ERR_PTR(ret);
776}
777
778static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
779 struct avs_tplg_path *template, u32 dma_id)
780{
781 struct avs_tplg_pipeline *tppl;
782
783 path->owner = adev;
784 path->template = template;
785 path->dma_id = dma_id;
786 INIT_LIST_HEAD(&path->ppl_list);
787 INIT_LIST_HEAD(&path->node);
788
789 /* create all the pipelines */
790 list_for_each_entry(tppl, &template->ppl_list, node) {
791 struct avs_path_pipeline *ppl;
792
793 ppl = avs_path_pipeline_create(adev, path, tppl);
794 if (IS_ERR(ppl))
795 return PTR_ERR(ppl);
796
797 list_add_tail(&ppl->node, &path->ppl_list);
798 }
799
800 spin_lock(&adev->path_list_lock);
801 list_add_tail(&path->node, &adev->path_list);
802 spin_unlock(&adev->path_list_lock);
803
804 return 0;
805}
806
807static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
808{
809 struct avs_path_pipeline *ppl;
810 struct avs_path_binding *binding;
811 int ret;
812
813 list_for_each_entry(ppl, &path->ppl_list, node) {
814 /*
815 * Arm all ppl bindings before binding internal modules
816 * as it costs no IPCs which isn't true for the latter.
817 */
818 list_for_each_entry(binding, &ppl->binding_list, node) {
819 ret = avs_path_binding_arm(adev, binding);
820 if (ret < 0)
821 return ret;
822 }
823
824 ret = avs_path_pipeline_arm(adev, ppl);
825 if (ret < 0)
826 return ret;
827 }
828
829 return 0;
830}
831
832static void avs_path_free_unlocked(struct avs_path *path)
833{
834 struct avs_path_pipeline *ppl, *save;
835
836 spin_lock(&path->owner->path_list_lock);
837 list_del(&path->node);
838 spin_unlock(&path->owner->path_list_lock);
839
840 list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
841 avs_path_pipeline_free(path->owner, ppl);
842
843 kfree(path);
844}
845
846static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
847 struct avs_tplg_path *template)
848{
849 struct avs_path *path;
850 int ret;
851
852 path = kzalloc(sizeof(*path), GFP_KERNEL);
853 if (!path)
854 return ERR_PTR(-ENOMEM);
855
856 ret = avs_path_init(adev, path, template, dma_id);
857 if (ret < 0)
858 goto err;
859
860 ret = avs_path_arm(adev, path);
861 if (ret < 0)
862 goto err;
863
864 path->state = AVS_PPL_STATE_INVALID;
865 return path;
866err:
867 avs_path_free_unlocked(path);
868 return ERR_PTR(ret);
869}
870
871void avs_path_free(struct avs_path *path)
872{
873 struct avs_dev *adev = path->owner;
874
875 mutex_lock(&adev->path_mutex);
876 avs_path_free_unlocked(path);
877 mutex_unlock(&adev->path_mutex);
878}
879
880struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
881 struct avs_tplg_path_template *template,
882 struct snd_pcm_hw_params *fe_params,
883 struct snd_pcm_hw_params *be_params)
884{
885 struct avs_tplg_path *variant;
886 struct avs_path *path;
887
888 variant = avs_path_find_variant(adev, template, fe_params, be_params);
889 if (!variant) {
890 dev_err(adev->dev, "no matching variant found\n");
891 return ERR_PTR(-ENOENT);
892 }
893
894 /* Serialize path and its components creation. */
895 mutex_lock(&adev->path_mutex);
896 /* Satisfy needs of avs_path_find_tplg(). */
897 mutex_lock(&adev->comp_list_mutex);
898
899 path = avs_path_create_unlocked(adev, dma_id, variant);
900
901 mutex_unlock(&adev->comp_list_mutex);
902 mutex_unlock(&adev->path_mutex);
903
904 return path;
905}
906
907static int avs_path_bind_prepare(struct avs_dev *adev,
908 struct avs_path_binding *binding)
909{
910 const struct avs_audio_format *src_fmt, *sink_fmt;
911 struct avs_tplg_module *tsource = binding->source->template;
912 struct avs_path_module *source = binding->source;
913 int ret;
914
915 /*
916 * only copier modules about to be bound
917 * to output pin other than 0 need preparation
918 */
919 if (!binding->source_pin)
920 return 0;
921 if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
922 return 0;
923
924 src_fmt = tsource->in_fmt;
925 sink_fmt = binding->sink->template->in_fmt;
926
927 ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
928 source->instance_id, binding->source_pin,
929 src_fmt, sink_fmt);
930 if (ret) {
931 dev_err(adev->dev, "config copier failed: %d\n", ret);
932 return AVS_IPC_RET(ret);
933 }
934
935 return 0;
936}
937
938int avs_path_bind(struct avs_path *path)
939{
940 struct avs_path_pipeline *ppl;
941 struct avs_dev *adev = path->owner;
942 int ret;
943
944 list_for_each_entry(ppl, &path->ppl_list, node) {
945 struct avs_path_binding *binding;
946
947 list_for_each_entry(binding, &ppl->binding_list, node) {
948 struct avs_path_module *source, *sink;
949
950 source = binding->source;
951 sink = binding->sink;
952
953 ret = avs_path_bind_prepare(adev, binding);
954 if (ret < 0)
955 return ret;
956
957 ret = avs_ipc_bind(adev, source->module_id,
958 source->instance_id, sink->module_id,
959 sink->instance_id, binding->sink_pin,
960 binding->source_pin);
961 if (ret) {
962 dev_err(adev->dev, "bind path failed: %d\n", ret);
963 return AVS_IPC_RET(ret);
964 }
965 }
966 }
967
968 return 0;
969}
970
971int avs_path_unbind(struct avs_path *path)
972{
973 struct avs_path_pipeline *ppl;
974 struct avs_dev *adev = path->owner;
975 int ret;
976
977 list_for_each_entry(ppl, &path->ppl_list, node) {
978 struct avs_path_binding *binding;
979
980 list_for_each_entry(binding, &ppl->binding_list, node) {
981 struct avs_path_module *source, *sink;
982
983 source = binding->source;
984 sink = binding->sink;
985
986 ret = avs_ipc_unbind(adev, source->module_id,
987 source->instance_id, sink->module_id,
988 sink->instance_id, binding->sink_pin,
989 binding->source_pin);
990 if (ret) {
991 dev_err(adev->dev, "unbind path failed: %d\n", ret);
992 return AVS_IPC_RET(ret);
993 }
994 }
995 }
996
997 return 0;
998}
999
1000int avs_path_reset(struct avs_path *path)
1001{
1002 struct avs_path_pipeline *ppl;
1003 struct avs_dev *adev = path->owner;
1004 int ret;
1005
1006 if (path->state == AVS_PPL_STATE_RESET)
1007 return 0;
1008
1009 list_for_each_entry(ppl, &path->ppl_list, node) {
1010 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1011 AVS_PPL_STATE_RESET);
1012 if (ret) {
1013 dev_err(adev->dev, "reset path failed: %d\n", ret);
1014 path->state = AVS_PPL_STATE_INVALID;
1015 return AVS_IPC_RET(ret);
1016 }
1017 }
1018
1019 path->state = AVS_PPL_STATE_RESET;
1020 return 0;
1021}
1022
1023int avs_path_pause(struct avs_path *path)
1024{
1025 struct avs_path_pipeline *ppl;
1026 struct avs_dev *adev = path->owner;
1027 int ret;
1028
1029 if (path->state == AVS_PPL_STATE_PAUSED)
1030 return 0;
1031
1032 list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1033 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1034 AVS_PPL_STATE_PAUSED);
1035 if (ret) {
1036 dev_err(adev->dev, "pause path failed: %d\n", ret);
1037 path->state = AVS_PPL_STATE_INVALID;
1038 return AVS_IPC_RET(ret);
1039 }
1040 }
1041
1042 path->state = AVS_PPL_STATE_PAUSED;
1043 return 0;
1044}
1045
1046int avs_path_run(struct avs_path *path, int trigger)
1047{
1048 struct avs_path_pipeline *ppl;
1049 struct avs_dev *adev = path->owner;
1050 int ret;
1051
1052 if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1053 return 0;
1054
1055 list_for_each_entry(ppl, &path->ppl_list, node) {
1056 if (ppl->template->cfg->trigger != trigger)
1057 continue;
1058
1059 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1060 AVS_PPL_STATE_RUNNING);
1061 if (ret) {
1062 dev_err(adev->dev, "run path failed: %d\n", ret);
1063 path->state = AVS_PPL_STATE_INVALID;
1064 return AVS_IPC_RET(ret);
1065 }
1066 }
1067
1068 path->state = AVS_PPL_STATE_RUNNING;
1069 return 0;
1070}