Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2//
3// Copyright(c) 2021 Intel Corporation
4//
5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7//
8
9#include <linux/acpi.h>
10#include <acpi/nhlt.h>
11#include <sound/pcm_params.h>
12#include <sound/soc.h>
13#include "avs.h"
14#include "control.h"
15#include "path.h"
16#include "topology.h"
17
18/* Must be called with adev->comp_list_mutex held. */
19static struct avs_tplg *
20avs_path_find_tplg(struct avs_dev *adev, const char *name)
21{
22 struct avs_soc_component *acomp;
23
24 list_for_each_entry(acomp, &adev->comp_list, node)
25 if (!strcmp(acomp->tplg->name, name))
26 return acomp->tplg;
27 return NULL;
28}
29
30static struct avs_path_module *
31avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
32{
33 struct avs_path_module *mod;
34
35 list_for_each_entry(mod, &ppl->mod_list, node)
36 if (mod->template->id == template_id)
37 return mod;
38 return NULL;
39}
40
41static struct avs_path_pipeline *
42avs_path_find_pipeline(struct avs_path *path, u32 template_id)
43{
44 struct avs_path_pipeline *ppl;
45
46 list_for_each_entry(ppl, &path->ppl_list, node)
47 if (ppl->template->id == template_id)
48 return ppl;
49 return NULL;
50}
51
52static struct avs_path *
53avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
54{
55 struct avs_tplg_path_template *pos, *template = NULL;
56 struct avs_tplg *tplg;
57 struct avs_path *path;
58
59 tplg = avs_path_find_tplg(adev, name);
60 if (!tplg)
61 return NULL;
62
63 list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
64 if (pos->id == template_id) {
65 template = pos;
66 break;
67 }
68 }
69 if (!template)
70 return NULL;
71
72 spin_lock(&adev->path_list_lock);
73 /* Only one variant of given path template may be instantiated at a time. */
74 list_for_each_entry(path, &adev->path_list, node) {
75 if (path->template->owner == template) {
76 spin_unlock(&adev->path_list_lock);
77 return path;
78 }
79 }
80
81 spin_unlock(&adev->path_list_lock);
82 return NULL;
83}
84
85static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
86 struct avs_audio_format *fmt)
87{
88 return (params_rate(params) == fmt->sampling_freq &&
89 params_channels(params) == fmt->num_channels &&
90 params_physical_width(params) == fmt->bit_depth &&
91 snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
92}
93
94static struct avs_tplg_path *
95avs_path_find_variant(struct avs_dev *adev,
96 struct avs_tplg_path_template *template,
97 struct snd_pcm_hw_params *fe_params,
98 struct snd_pcm_hw_params *be_params)
99{
100 struct avs_tplg_path *variant;
101
102 list_for_each_entry(variant, &template->path_list, node) {
103 dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
104 variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
105 variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
106 dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
107 variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
108 variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
109
110 if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
111 variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
112 return variant;
113 }
114
115 return NULL;
116}
117
118__maybe_unused
119static bool avs_dma_type_is_host(u32 dma_type)
120{
121 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
122 dma_type == AVS_DMA_HDA_HOST_INPUT;
123}
124
125__maybe_unused
126static bool avs_dma_type_is_link(u32 dma_type)
127{
128 return !avs_dma_type_is_host(dma_type);
129}
130
131__maybe_unused
132static bool avs_dma_type_is_output(u32 dma_type)
133{
134 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
135 dma_type == AVS_DMA_HDA_LINK_OUTPUT ||
136 dma_type == AVS_DMA_I2S_LINK_OUTPUT;
137}
138
139__maybe_unused
140static bool avs_dma_type_is_input(u32 dma_type)
141{
142 return !avs_dma_type_is_output(dma_type);
143}
144
145static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
146{
147 struct avs_tplg_module *t = mod->template;
148 struct avs_copier_cfg *cfg;
149 struct acpi_nhlt_format_config *ep_blob;
150 struct acpi_nhlt_endpoint *ep;
151 union avs_connector_node_id node_id = {0};
152 size_t cfg_size, data_size;
153 void *data = NULL;
154 u32 dma_type;
155 int ret;
156
157 data_size = sizeof(cfg->gtw_cfg.config);
158 dma_type = t->cfg_ext->copier.dma_type;
159 node_id.dma_type = dma_type;
160
161 switch (dma_type) {
162 struct avs_audio_format *fmt;
163 int direction;
164
165 case AVS_DMA_I2S_LINK_OUTPUT:
166 case AVS_DMA_I2S_LINK_INPUT:
167 if (avs_dma_type_is_input(dma_type))
168 direction = SNDRV_PCM_STREAM_CAPTURE;
169 else
170 direction = SNDRV_PCM_STREAM_PLAYBACK;
171
172 if (t->cfg_ext->copier.blob_fmt)
173 fmt = t->cfg_ext->copier.blob_fmt;
174 else if (direction == SNDRV_PCM_STREAM_CAPTURE)
175 fmt = t->in_fmt;
176 else
177 fmt = t->cfg_ext->copier.out_fmt;
178
179 ep = acpi_nhlt_find_endpoint(ACPI_NHLT_LINKTYPE_SSP,
180 ACPI_NHLT_DEVICETYPE_CODEC, direction,
181 t->cfg_ext->copier.vindex.i2s.instance);
182 ep_blob = acpi_nhlt_endpoint_find_fmtcfg(ep, fmt->num_channels, fmt->sampling_freq,
183 fmt->valid_bit_depth, fmt->bit_depth);
184 if (!ep_blob) {
185 dev_err(adev->dev, "no I2S ep_blob found\n");
186 return -ENOENT;
187 }
188
189 data = ep_blob->config.capabilities;
190 data_size = ep_blob->config.capabilities_size;
191 /* I2S gateway's vindex is statically assigned in topology */
192 node_id.vindex = t->cfg_ext->copier.vindex.val;
193
194 break;
195
196 case AVS_DMA_DMIC_LINK_INPUT:
197 direction = SNDRV_PCM_STREAM_CAPTURE;
198
199 if (t->cfg_ext->copier.blob_fmt)
200 fmt = t->cfg_ext->copier.blob_fmt;
201 else
202 fmt = t->in_fmt;
203
204 ep = acpi_nhlt_find_endpoint(ACPI_NHLT_LINKTYPE_PDM, -1, direction, 0);
205 ep_blob = acpi_nhlt_endpoint_find_fmtcfg(ep, fmt->num_channels, fmt->sampling_freq,
206 fmt->valid_bit_depth, fmt->bit_depth);
207 if (!ep_blob) {
208 dev_err(adev->dev, "no DMIC ep_blob found\n");
209 return -ENOENT;
210 }
211
212 data = ep_blob->config.capabilities;
213 data_size = ep_blob->config.capabilities_size;
214 /* DMIC gateway's vindex is statically assigned in topology */
215 node_id.vindex = t->cfg_ext->copier.vindex.val;
216
217 break;
218
219 case AVS_DMA_HDA_HOST_OUTPUT:
220 case AVS_DMA_HDA_HOST_INPUT:
221 /* HOST gateway's vindex is dynamically assigned with DMA id */
222 node_id.vindex = mod->owner->owner->dma_id;
223 break;
224
225 case AVS_DMA_HDA_LINK_OUTPUT:
226 case AVS_DMA_HDA_LINK_INPUT:
227 node_id.vindex = t->cfg_ext->copier.vindex.val |
228 mod->owner->owner->dma_id;
229 break;
230
231 case INVALID_OBJECT_ID:
232 default:
233 node_id = INVALID_NODE_ID;
234 break;
235 }
236
237 cfg_size = offsetof(struct avs_copier_cfg, gtw_cfg.config) + data_size;
238 if (cfg_size > AVS_MAILBOX_SIZE)
239 return -EINVAL;
240
241 cfg = adev->modcfg_buf;
242 memset(cfg, 0, cfg_size);
243 cfg->base.cpc = t->cfg_base->cpc;
244 cfg->base.ibs = t->cfg_base->ibs;
245 cfg->base.obs = t->cfg_base->obs;
246 cfg->base.is_pages = t->cfg_base->is_pages;
247 cfg->base.audio_fmt = *t->in_fmt;
248 cfg->out_fmt = *t->cfg_ext->copier.out_fmt;
249 cfg->feature_mask = t->cfg_ext->copier.feature_mask;
250 cfg->gtw_cfg.node_id = node_id;
251 cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size;
252 /* config_length in DWORDs */
253 cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4);
254 if (data)
255 memcpy(&cfg->gtw_cfg.config.blob, data, data_size);
256
257 mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
258
259 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
260 t->core_id, t->domain, cfg, cfg_size,
261 &mod->instance_id);
262 return ret;
263}
264
265static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod)
266{
267 struct avs_tplg_module *t = mod->template;
268 struct avs_tplg_path_template *path_tmpl;
269 struct snd_soc_dapm_widget *w;
270 int i;
271
272 path_tmpl = t->owner->owner->owner;
273 w = path_tmpl->w;
274
275 for (i = 0; i < w->num_kcontrols; i++) {
276 struct avs_control_data *ctl_data;
277 struct soc_mixer_control *mc;
278
279 mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
280 ctl_data = (struct avs_control_data *)mc->dobj.private;
281 if (ctl_data->id == t->ctl_id)
282 return ctl_data;
283 }
284
285 return NULL;
286}
287
288static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
289{
290 struct avs_tplg_module *t = mod->template;
291 struct avs_control_data *ctl_data;
292 struct avs_peakvol_cfg *cfg;
293 int volume = S32_MAX;
294 size_t cfg_size;
295 int ret;
296
297 ctl_data = avs_get_module_control(mod);
298 if (ctl_data)
299 volume = ctl_data->volume;
300
301 /* As 2+ channels controls are unsupported, have a single block for all channels. */
302 cfg_size = struct_size(cfg, vols, 1);
303 if (cfg_size > AVS_MAILBOX_SIZE)
304 return -EINVAL;
305
306 cfg = adev->modcfg_buf;
307 memset(cfg, 0, cfg_size);
308 cfg->base.cpc = t->cfg_base->cpc;
309 cfg->base.ibs = t->cfg_base->ibs;
310 cfg->base.obs = t->cfg_base->obs;
311 cfg->base.is_pages = t->cfg_base->is_pages;
312 cfg->base.audio_fmt = *t->in_fmt;
313 cfg->vols[0].target_volume = volume;
314 cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
315 cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE;
316 cfg->vols[0].curve_duration = 0;
317
318 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
319 t->domain, cfg, cfg_size, &mod->instance_id);
320
321 return ret;
322}
323
324static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
325{
326 struct avs_tplg_module *t = mod->template;
327 struct avs_updown_mixer_cfg cfg;
328 int i;
329
330 cfg.base.cpc = t->cfg_base->cpc;
331 cfg.base.ibs = t->cfg_base->ibs;
332 cfg.base.obs = t->cfg_base->obs;
333 cfg.base.is_pages = t->cfg_base->is_pages;
334 cfg.base.audio_fmt = *t->in_fmt;
335 cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
336 cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
337 for (i = 0; i < AVS_CHANNELS_MAX; i++)
338 cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
339 cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
340
341 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
342 t->core_id, t->domain, &cfg, sizeof(cfg),
343 &mod->instance_id);
344}
345
346static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
347{
348 struct avs_tplg_module *t = mod->template;
349 struct avs_src_cfg cfg;
350
351 cfg.base.cpc = t->cfg_base->cpc;
352 cfg.base.ibs = t->cfg_base->ibs;
353 cfg.base.obs = t->cfg_base->obs;
354 cfg.base.is_pages = t->cfg_base->is_pages;
355 cfg.base.audio_fmt = *t->in_fmt;
356 cfg.out_freq = t->cfg_ext->src.out_freq;
357
358 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
359 t->core_id, t->domain, &cfg, sizeof(cfg),
360 &mod->instance_id);
361}
362
363static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
364{
365 struct avs_tplg_module *t = mod->template;
366 struct avs_asrc_cfg cfg;
367
368 memset(&cfg, 0, sizeof(cfg));
369 cfg.base.cpc = t->cfg_base->cpc;
370 cfg.base.ibs = t->cfg_base->ibs;
371 cfg.base.obs = t->cfg_base->obs;
372 cfg.base.is_pages = t->cfg_base->is_pages;
373 cfg.base.audio_fmt = *t->in_fmt;
374 cfg.out_freq = t->cfg_ext->asrc.out_freq;
375 cfg.mode = t->cfg_ext->asrc.mode;
376 cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
377
378 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
379 t->core_id, t->domain, &cfg, sizeof(cfg),
380 &mod->instance_id);
381}
382
383static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
384{
385 struct avs_tplg_module *t = mod->template;
386 struct avs_aec_cfg cfg;
387
388 cfg.base.cpc = t->cfg_base->cpc;
389 cfg.base.ibs = t->cfg_base->ibs;
390 cfg.base.obs = t->cfg_base->obs;
391 cfg.base.is_pages = t->cfg_base->is_pages;
392 cfg.base.audio_fmt = *t->in_fmt;
393 cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
394 cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
395 cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
396
397 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
398 t->core_id, t->domain, &cfg, sizeof(cfg),
399 &mod->instance_id);
400}
401
402static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
403{
404 struct avs_tplg_module *t = mod->template;
405 struct avs_mux_cfg cfg;
406
407 cfg.base.cpc = t->cfg_base->cpc;
408 cfg.base.ibs = t->cfg_base->ibs;
409 cfg.base.obs = t->cfg_base->obs;
410 cfg.base.is_pages = t->cfg_base->is_pages;
411 cfg.base.audio_fmt = *t->in_fmt;
412 cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
413 cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
414
415 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
416 t->core_id, t->domain, &cfg, sizeof(cfg),
417 &mod->instance_id);
418}
419
420static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
421{
422 struct avs_tplg_module *t = mod->template;
423 struct avs_wov_cfg cfg;
424
425 cfg.base.cpc = t->cfg_base->cpc;
426 cfg.base.ibs = t->cfg_base->ibs;
427 cfg.base.obs = t->cfg_base->obs;
428 cfg.base.is_pages = t->cfg_base->is_pages;
429 cfg.base.audio_fmt = *t->in_fmt;
430 cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
431
432 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
433 t->core_id, t->domain, &cfg, sizeof(cfg),
434 &mod->instance_id);
435}
436
437static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
438{
439 struct avs_tplg_module *t = mod->template;
440 struct avs_micsel_cfg cfg;
441
442 cfg.base.cpc = t->cfg_base->cpc;
443 cfg.base.ibs = t->cfg_base->ibs;
444 cfg.base.obs = t->cfg_base->obs;
445 cfg.base.is_pages = t->cfg_base->is_pages;
446 cfg.base.audio_fmt = *t->in_fmt;
447 cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
448
449 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
450 t->core_id, t->domain, &cfg, sizeof(cfg),
451 &mod->instance_id);
452}
453
454static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
455{
456 struct avs_tplg_module *t = mod->template;
457 struct avs_modcfg_base cfg;
458
459 cfg.cpc = t->cfg_base->cpc;
460 cfg.ibs = t->cfg_base->ibs;
461 cfg.obs = t->cfg_base->obs;
462 cfg.is_pages = t->cfg_base->is_pages;
463 cfg.audio_fmt = *t->in_fmt;
464
465 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
466 t->core_id, t->domain, &cfg, sizeof(cfg),
467 &mod->instance_id);
468}
469
470static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
471{
472 struct avs_tplg_module *t = mod->template;
473 struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
474 struct avs_modcfg_ext *cfg;
475 size_t cfg_size, num_pins;
476 int ret, i;
477
478 num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
479 cfg_size = struct_size(cfg, pin_fmts, num_pins);
480
481 if (cfg_size > AVS_MAILBOX_SIZE)
482 return -EINVAL;
483
484 cfg = adev->modcfg_buf;
485 memset(cfg, 0, cfg_size);
486 cfg->base.cpc = t->cfg_base->cpc;
487 cfg->base.ibs = t->cfg_base->ibs;
488 cfg->base.obs = t->cfg_base->obs;
489 cfg->base.is_pages = t->cfg_base->is_pages;
490 cfg->base.audio_fmt = *t->in_fmt;
491 cfg->num_input_pins = tcfg->generic.num_input_pins;
492 cfg->num_output_pins = tcfg->generic.num_output_pins;
493
494 /* configure pin formats */
495 for (i = 0; i < num_pins; i++) {
496 struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
497 struct avs_pin_format *pin = &cfg->pin_fmts[i];
498
499 pin->pin_index = tpin->pin_index;
500 pin->iobs = tpin->iobs;
501 pin->audio_fmt = *tpin->fmt;
502 }
503
504 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
505 t->core_id, t->domain, cfg, cfg_size,
506 &mod->instance_id);
507 return ret;
508}
509
510static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
511{
512 dev_err(adev->dev, "Probe module can't be instantiated by topology");
513 return -EINVAL;
514}
515
516struct avs_module_create {
517 guid_t *guid;
518 int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
519};
520
521static struct avs_module_create avs_module_create[] = {
522 { &AVS_MIXIN_MOD_UUID, avs_modbase_create },
523 { &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
524 { &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
525 { &AVS_COPIER_MOD_UUID, avs_copier_create },
526 { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
527 { &AVS_GAIN_MOD_UUID, avs_peakvol_create },
528 { &AVS_MICSEL_MOD_UUID, avs_micsel_create },
529 { &AVS_MUX_MOD_UUID, avs_mux_create },
530 { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
531 { &AVS_SRCINTC_MOD_UUID, avs_src_create },
532 { &AVS_AEC_MOD_UUID, avs_aec_create },
533 { &AVS_ASRC_MOD_UUID, avs_asrc_create },
534 { &AVS_INTELWOV_MOD_UUID, avs_wov_create },
535 { &AVS_PROBE_MOD_UUID, avs_probe_create },
536};
537
538static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
539{
540 const guid_t *type = &mod->template->cfg_ext->type;
541
542 for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
543 if (guid_equal(type, avs_module_create[i].guid))
544 return avs_module_create[i].create(adev, mod);
545
546 return avs_modext_create(adev, mod);
547}
548
549static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
550{
551 struct avs_soc_component *acomp;
552
553 acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
554
555 u32 num_ids = mod->template->num_config_ids;
556 u32 *ids = mod->template->config_ids;
557
558 for (int i = 0; i < num_ids; i++) {
559 struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
560 size_t len = config->length;
561 void *data = config->data;
562 u32 param = config->param;
563 int ret;
564
565 ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
566 param, data, len);
567 if (ret) {
568 dev_err(adev->dev, "send initial module config failed: %d\n", ret);
569 return AVS_IPC_RET(ret);
570 }
571 }
572
573 return 0;
574}
575
576static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
577{
578 kfree(mod);
579}
580
581static struct avs_path_module *
582avs_path_module_create(struct avs_dev *adev,
583 struct avs_path_pipeline *owner,
584 struct avs_tplg_module *template)
585{
586 struct avs_path_module *mod;
587 int module_id, ret;
588
589 module_id = avs_get_module_id(adev, &template->cfg_ext->type);
590 if (module_id < 0)
591 return ERR_PTR(module_id);
592
593 mod = kzalloc(sizeof(*mod), GFP_KERNEL);
594 if (!mod)
595 return ERR_PTR(-ENOMEM);
596
597 mod->template = template;
598 mod->module_id = module_id;
599 mod->owner = owner;
600 INIT_LIST_HEAD(&mod->node);
601
602 ret = avs_path_module_type_create(adev, mod);
603 if (ret) {
604 dev_err(adev->dev, "module-type create failed: %d\n", ret);
605 kfree(mod);
606 return ERR_PTR(ret);
607 }
608
609 ret = avs_path_module_send_init_configs(adev, mod);
610 if (ret) {
611 kfree(mod);
612 return ERR_PTR(ret);
613 }
614
615 return mod;
616}
617
618static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
619{
620 struct avs_path_module *this_mod, *target_mod;
621 struct avs_path_pipeline *target_ppl;
622 struct avs_path *target_path;
623 struct avs_tplg_binding *t;
624
625 t = binding->template;
626 this_mod = avs_path_find_module(binding->owner,
627 t->mod_id);
628 if (!this_mod) {
629 dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
630 return -EINVAL;
631 }
632
633 /* update with target_tplg_name too */
634 target_path = avs_path_find_path(adev, t->target_tplg_name,
635 t->target_path_tmpl_id);
636 if (!target_path) {
637 dev_err(adev->dev, "target path %s:%d not found\n",
638 t->target_tplg_name, t->target_path_tmpl_id);
639 return -EINVAL;
640 }
641
642 target_ppl = avs_path_find_pipeline(target_path,
643 t->target_ppl_id);
644 if (!target_ppl) {
645 dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
646 return -EINVAL;
647 }
648
649 target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
650 if (!target_mod) {
651 dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
652 return -EINVAL;
653 }
654
655 if (t->is_sink) {
656 binding->sink = this_mod;
657 binding->sink_pin = t->mod_pin;
658 binding->source = target_mod;
659 binding->source_pin = t->target_mod_pin;
660 } else {
661 binding->sink = target_mod;
662 binding->sink_pin = t->target_mod_pin;
663 binding->source = this_mod;
664 binding->source_pin = t->mod_pin;
665 }
666
667 return 0;
668}
669
670static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
671{
672 kfree(binding);
673}
674
675static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
676 struct avs_path_pipeline *owner,
677 struct avs_tplg_binding *t)
678{
679 struct avs_path_binding *binding;
680
681 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
682 if (!binding)
683 return ERR_PTR(-ENOMEM);
684
685 binding->template = t;
686 binding->owner = owner;
687 INIT_LIST_HEAD(&binding->node);
688
689 return binding;
690}
691
692static int avs_path_pipeline_arm(struct avs_dev *adev,
693 struct avs_path_pipeline *ppl)
694{
695 struct avs_path_module *mod;
696
697 list_for_each_entry(mod, &ppl->mod_list, node) {
698 struct avs_path_module *source, *sink;
699 int ret;
700
701 /*
702 * Only one module (so it's implicitly last) or it is the last
703 * one, either way we don't have next module to bind it to.
704 */
705 if (mod == list_last_entry(&ppl->mod_list,
706 struct avs_path_module, node))
707 break;
708
709 /* bind current module to next module on list */
710 source = mod;
711 sink = list_next_entry(mod, node);
712
713 ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
714 sink->module_id, sink->instance_id, 0, 0);
715 if (ret)
716 return AVS_IPC_RET(ret);
717 }
718
719 return 0;
720}
721
722static void avs_path_pipeline_free(struct avs_dev *adev,
723 struct avs_path_pipeline *ppl)
724{
725 struct avs_path_binding *binding, *bsave;
726 struct avs_path_module *mod, *save;
727
728 list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
729 list_del(&binding->node);
730 avs_path_binding_free(adev, binding);
731 }
732
733 avs_dsp_delete_pipeline(adev, ppl->instance_id);
734
735 /* Unload resources occupied by owned modules */
736 list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
737 avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
738 mod->owner->instance_id,
739 mod->template->core_id);
740 avs_path_module_free(adev, mod);
741 }
742
743 list_del(&ppl->node);
744 kfree(ppl);
745}
746
747static struct avs_path_pipeline *
748avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
749 struct avs_tplg_pipeline *template)
750{
751 struct avs_path_pipeline *ppl;
752 struct avs_tplg_pplcfg *cfg = template->cfg;
753 struct avs_tplg_module *tmod;
754 int ret, i;
755
756 ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
757 if (!ppl)
758 return ERR_PTR(-ENOMEM);
759
760 ppl->template = template;
761 ppl->owner = owner;
762 INIT_LIST_HEAD(&ppl->binding_list);
763 INIT_LIST_HEAD(&ppl->mod_list);
764 INIT_LIST_HEAD(&ppl->node);
765
766 ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
767 cfg->lp, cfg->attributes,
768 &ppl->instance_id);
769 if (ret) {
770 dev_err(adev->dev, "error creating pipeline %d\n", ret);
771 kfree(ppl);
772 return ERR_PTR(ret);
773 }
774
775 list_for_each_entry(tmod, &template->mod_list, node) {
776 struct avs_path_module *mod;
777
778 mod = avs_path_module_create(adev, ppl, tmod);
779 if (IS_ERR(mod)) {
780 ret = PTR_ERR(mod);
781 dev_err(adev->dev, "error creating module %d\n", ret);
782 goto init_err;
783 }
784
785 list_add_tail(&mod->node, &ppl->mod_list);
786 }
787
788 for (i = 0; i < template->num_bindings; i++) {
789 struct avs_path_binding *binding;
790
791 binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
792 if (IS_ERR(binding)) {
793 ret = PTR_ERR(binding);
794 dev_err(adev->dev, "error creating binding %d\n", ret);
795 goto init_err;
796 }
797
798 list_add_tail(&binding->node, &ppl->binding_list);
799 }
800
801 return ppl;
802
803init_err:
804 avs_path_pipeline_free(adev, ppl);
805 return ERR_PTR(ret);
806}
807
808static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
809 struct avs_tplg_path *template, u32 dma_id)
810{
811 struct avs_tplg_pipeline *tppl;
812
813 path->owner = adev;
814 path->template = template;
815 path->dma_id = dma_id;
816 INIT_LIST_HEAD(&path->ppl_list);
817 INIT_LIST_HEAD(&path->node);
818
819 /* create all the pipelines */
820 list_for_each_entry(tppl, &template->ppl_list, node) {
821 struct avs_path_pipeline *ppl;
822
823 ppl = avs_path_pipeline_create(adev, path, tppl);
824 if (IS_ERR(ppl))
825 return PTR_ERR(ppl);
826
827 list_add_tail(&ppl->node, &path->ppl_list);
828 }
829
830 spin_lock(&adev->path_list_lock);
831 list_add_tail(&path->node, &adev->path_list);
832 spin_unlock(&adev->path_list_lock);
833
834 return 0;
835}
836
837static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
838{
839 struct avs_path_pipeline *ppl;
840 struct avs_path_binding *binding;
841 int ret;
842
843 list_for_each_entry(ppl, &path->ppl_list, node) {
844 /*
845 * Arm all ppl bindings before binding internal modules
846 * as it costs no IPCs which isn't true for the latter.
847 */
848 list_for_each_entry(binding, &ppl->binding_list, node) {
849 ret = avs_path_binding_arm(adev, binding);
850 if (ret < 0)
851 return ret;
852 }
853
854 ret = avs_path_pipeline_arm(adev, ppl);
855 if (ret < 0)
856 return ret;
857 }
858
859 return 0;
860}
861
862static void avs_path_free_unlocked(struct avs_path *path)
863{
864 struct avs_path_pipeline *ppl, *save;
865
866 spin_lock(&path->owner->path_list_lock);
867 list_del(&path->node);
868 spin_unlock(&path->owner->path_list_lock);
869
870 list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
871 avs_path_pipeline_free(path->owner, ppl);
872
873 kfree(path);
874}
875
876static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
877 struct avs_tplg_path *template)
878{
879 struct avs_path *path;
880 int ret;
881
882 path = kzalloc(sizeof(*path), GFP_KERNEL);
883 if (!path)
884 return ERR_PTR(-ENOMEM);
885
886 ret = avs_path_init(adev, path, template, dma_id);
887 if (ret < 0)
888 goto err;
889
890 ret = avs_path_arm(adev, path);
891 if (ret < 0)
892 goto err;
893
894 path->state = AVS_PPL_STATE_INVALID;
895 return path;
896err:
897 avs_path_free_unlocked(path);
898 return ERR_PTR(ret);
899}
900
901void avs_path_free(struct avs_path *path)
902{
903 struct avs_dev *adev = path->owner;
904
905 mutex_lock(&adev->path_mutex);
906 avs_path_free_unlocked(path);
907 mutex_unlock(&adev->path_mutex);
908}
909
910struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
911 struct avs_tplg_path_template *template,
912 struct snd_pcm_hw_params *fe_params,
913 struct snd_pcm_hw_params *be_params)
914{
915 struct avs_tplg_path *variant;
916 struct avs_path *path;
917
918 variant = avs_path_find_variant(adev, template, fe_params, be_params);
919 if (!variant) {
920 dev_err(adev->dev, "no matching variant found\n");
921 return ERR_PTR(-ENOENT);
922 }
923
924 /* Serialize path and its components creation. */
925 mutex_lock(&adev->path_mutex);
926 /* Satisfy needs of avs_path_find_tplg(). */
927 mutex_lock(&adev->comp_list_mutex);
928
929 path = avs_path_create_unlocked(adev, dma_id, variant);
930
931 mutex_unlock(&adev->comp_list_mutex);
932 mutex_unlock(&adev->path_mutex);
933
934 return path;
935}
936
937static int avs_path_bind_prepare(struct avs_dev *adev,
938 struct avs_path_binding *binding)
939{
940 const struct avs_audio_format *src_fmt, *sink_fmt;
941 struct avs_tplg_module *tsource = binding->source->template;
942 struct avs_path_module *source = binding->source;
943 int ret;
944
945 /*
946 * only copier modules about to be bound
947 * to output pin other than 0 need preparation
948 */
949 if (!binding->source_pin)
950 return 0;
951 if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
952 return 0;
953
954 src_fmt = tsource->in_fmt;
955 sink_fmt = binding->sink->template->in_fmt;
956
957 ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
958 source->instance_id, binding->source_pin,
959 src_fmt, sink_fmt);
960 if (ret) {
961 dev_err(adev->dev, "config copier failed: %d\n", ret);
962 return AVS_IPC_RET(ret);
963 }
964
965 return 0;
966}
967
968int avs_path_bind(struct avs_path *path)
969{
970 struct avs_path_pipeline *ppl;
971 struct avs_dev *adev = path->owner;
972 int ret;
973
974 list_for_each_entry(ppl, &path->ppl_list, node) {
975 struct avs_path_binding *binding;
976
977 list_for_each_entry(binding, &ppl->binding_list, node) {
978 struct avs_path_module *source, *sink;
979
980 source = binding->source;
981 sink = binding->sink;
982
983 ret = avs_path_bind_prepare(adev, binding);
984 if (ret < 0)
985 return ret;
986
987 ret = avs_ipc_bind(adev, source->module_id,
988 source->instance_id, sink->module_id,
989 sink->instance_id, binding->sink_pin,
990 binding->source_pin);
991 if (ret) {
992 dev_err(adev->dev, "bind path failed: %d\n", ret);
993 return AVS_IPC_RET(ret);
994 }
995 }
996 }
997
998 return 0;
999}
1000
1001int avs_path_unbind(struct avs_path *path)
1002{
1003 struct avs_path_pipeline *ppl;
1004 struct avs_dev *adev = path->owner;
1005 int ret;
1006
1007 list_for_each_entry(ppl, &path->ppl_list, node) {
1008 struct avs_path_binding *binding;
1009
1010 list_for_each_entry(binding, &ppl->binding_list, node) {
1011 struct avs_path_module *source, *sink;
1012
1013 source = binding->source;
1014 sink = binding->sink;
1015
1016 ret = avs_ipc_unbind(adev, source->module_id,
1017 source->instance_id, sink->module_id,
1018 sink->instance_id, binding->sink_pin,
1019 binding->source_pin);
1020 if (ret) {
1021 dev_err(adev->dev, "unbind path failed: %d\n", ret);
1022 return AVS_IPC_RET(ret);
1023 }
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030int avs_path_reset(struct avs_path *path)
1031{
1032 struct avs_path_pipeline *ppl;
1033 struct avs_dev *adev = path->owner;
1034 int ret;
1035
1036 if (path->state == AVS_PPL_STATE_RESET)
1037 return 0;
1038
1039 list_for_each_entry(ppl, &path->ppl_list, node) {
1040 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1041 AVS_PPL_STATE_RESET);
1042 if (ret) {
1043 dev_err(adev->dev, "reset path failed: %d\n", ret);
1044 path->state = AVS_PPL_STATE_INVALID;
1045 return AVS_IPC_RET(ret);
1046 }
1047 }
1048
1049 path->state = AVS_PPL_STATE_RESET;
1050 return 0;
1051}
1052
1053int avs_path_pause(struct avs_path *path)
1054{
1055 struct avs_path_pipeline *ppl;
1056 struct avs_dev *adev = path->owner;
1057 int ret;
1058
1059 if (path->state == AVS_PPL_STATE_PAUSED)
1060 return 0;
1061
1062 list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1063 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1064 AVS_PPL_STATE_PAUSED);
1065 if (ret) {
1066 dev_err(adev->dev, "pause path failed: %d\n", ret);
1067 path->state = AVS_PPL_STATE_INVALID;
1068 return AVS_IPC_RET(ret);
1069 }
1070 }
1071
1072 path->state = AVS_PPL_STATE_PAUSED;
1073 return 0;
1074}
1075
1076int avs_path_run(struct avs_path *path, int trigger)
1077{
1078 struct avs_path_pipeline *ppl;
1079 struct avs_dev *adev = path->owner;
1080 int ret;
1081
1082 if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1083 return 0;
1084
1085 list_for_each_entry(ppl, &path->ppl_list, node) {
1086 if (ppl->template->cfg->trigger != trigger)
1087 continue;
1088
1089 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1090 AVS_PPL_STATE_RUNNING);
1091 if (ret) {
1092 dev_err(adev->dev, "run path failed: %d\n", ret);
1093 path->state = AVS_PPL_STATE_INVALID;
1094 return AVS_IPC_RET(ret);
1095 }
1096 }
1097
1098 path->state = AVS_PPL_STATE_RUNNING;
1099 return 0;
1100}
1// SPDX-License-Identifier: GPL-2.0-only
2//
3// Copyright(c) 2021 Intel Corporation. All rights reserved.
4//
5// Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6// Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7//
8
9#include <sound/intel-nhlt.h>
10#include <sound/pcm_params.h>
11#include <sound/soc.h>
12#include "avs.h"
13#include "control.h"
14#include "path.h"
15#include "topology.h"
16
17/* Must be called with adev->comp_list_mutex held. */
18static struct avs_tplg *
19avs_path_find_tplg(struct avs_dev *adev, const char *name)
20{
21 struct avs_soc_component *acomp;
22
23 list_for_each_entry(acomp, &adev->comp_list, node)
24 if (!strcmp(acomp->tplg->name, name))
25 return acomp->tplg;
26 return NULL;
27}
28
29static struct avs_path_module *
30avs_path_find_module(struct avs_path_pipeline *ppl, u32 template_id)
31{
32 struct avs_path_module *mod;
33
34 list_for_each_entry(mod, &ppl->mod_list, node)
35 if (mod->template->id == template_id)
36 return mod;
37 return NULL;
38}
39
40static struct avs_path_pipeline *
41avs_path_find_pipeline(struct avs_path *path, u32 template_id)
42{
43 struct avs_path_pipeline *ppl;
44
45 list_for_each_entry(ppl, &path->ppl_list, node)
46 if (ppl->template->id == template_id)
47 return ppl;
48 return NULL;
49}
50
51static struct avs_path *
52avs_path_find_path(struct avs_dev *adev, const char *name, u32 template_id)
53{
54 struct avs_tplg_path_template *pos, *template = NULL;
55 struct avs_tplg *tplg;
56 struct avs_path *path;
57
58 tplg = avs_path_find_tplg(adev, name);
59 if (!tplg)
60 return NULL;
61
62 list_for_each_entry(pos, &tplg->path_tmpl_list, node) {
63 if (pos->id == template_id) {
64 template = pos;
65 break;
66 }
67 }
68 if (!template)
69 return NULL;
70
71 spin_lock(&adev->path_list_lock);
72 /* Only one variant of given path template may be instantiated at a time. */
73 list_for_each_entry(path, &adev->path_list, node) {
74 if (path->template->owner == template) {
75 spin_unlock(&adev->path_list_lock);
76 return path;
77 }
78 }
79
80 spin_unlock(&adev->path_list_lock);
81 return NULL;
82}
83
84static bool avs_test_hw_params(struct snd_pcm_hw_params *params,
85 struct avs_audio_format *fmt)
86{
87 return (params_rate(params) == fmt->sampling_freq &&
88 params_channels(params) == fmt->num_channels &&
89 params_physical_width(params) == fmt->bit_depth &&
90 snd_pcm_hw_params_bits(params) == fmt->valid_bit_depth);
91}
92
93static struct avs_tplg_path *
94avs_path_find_variant(struct avs_dev *adev,
95 struct avs_tplg_path_template *template,
96 struct snd_pcm_hw_params *fe_params,
97 struct snd_pcm_hw_params *be_params)
98{
99 struct avs_tplg_path *variant;
100
101 list_for_each_entry(variant, &template->path_list, node) {
102 dev_dbg(adev->dev, "check FE rate %d chn %d vbd %d bd %d\n",
103 variant->fe_fmt->sampling_freq, variant->fe_fmt->num_channels,
104 variant->fe_fmt->valid_bit_depth, variant->fe_fmt->bit_depth);
105 dev_dbg(adev->dev, "check BE rate %d chn %d vbd %d bd %d\n",
106 variant->be_fmt->sampling_freq, variant->be_fmt->num_channels,
107 variant->be_fmt->valid_bit_depth, variant->be_fmt->bit_depth);
108
109 if (variant->fe_fmt && avs_test_hw_params(fe_params, variant->fe_fmt) &&
110 variant->be_fmt && avs_test_hw_params(be_params, variant->be_fmt))
111 return variant;
112 }
113
114 return NULL;
115}
116
117__maybe_unused
118static bool avs_dma_type_is_host(u32 dma_type)
119{
120 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
121 dma_type == AVS_DMA_HDA_HOST_INPUT;
122}
123
124__maybe_unused
125static bool avs_dma_type_is_link(u32 dma_type)
126{
127 return !avs_dma_type_is_host(dma_type);
128}
129
130__maybe_unused
131static bool avs_dma_type_is_output(u32 dma_type)
132{
133 return dma_type == AVS_DMA_HDA_HOST_OUTPUT ||
134 dma_type == AVS_DMA_HDA_LINK_OUTPUT ||
135 dma_type == AVS_DMA_I2S_LINK_OUTPUT;
136}
137
138__maybe_unused
139static bool avs_dma_type_is_input(u32 dma_type)
140{
141 return !avs_dma_type_is_output(dma_type);
142}
143
144static int avs_copier_create(struct avs_dev *adev, struct avs_path_module *mod)
145{
146 struct nhlt_acpi_table *nhlt = adev->nhlt;
147 struct avs_tplg_module *t = mod->template;
148 struct avs_copier_cfg *cfg;
149 struct nhlt_specific_cfg *ep_blob;
150 union avs_connector_node_id node_id = {0};
151 size_t cfg_size, data_size = 0;
152 void *data = NULL;
153 u32 dma_type;
154 int ret;
155
156 dma_type = t->cfg_ext->copier.dma_type;
157 node_id.dma_type = dma_type;
158
159 switch (dma_type) {
160 struct avs_audio_format *fmt;
161 int direction;
162
163 case AVS_DMA_I2S_LINK_OUTPUT:
164 case AVS_DMA_I2S_LINK_INPUT:
165 if (avs_dma_type_is_input(dma_type))
166 direction = SNDRV_PCM_STREAM_CAPTURE;
167 else
168 direction = SNDRV_PCM_STREAM_PLAYBACK;
169
170 if (t->cfg_ext->copier.blob_fmt)
171 fmt = t->cfg_ext->copier.blob_fmt;
172 else if (direction == SNDRV_PCM_STREAM_CAPTURE)
173 fmt = t->in_fmt;
174 else
175 fmt = t->cfg_ext->copier.out_fmt;
176
177 ep_blob = intel_nhlt_get_endpoint_blob(adev->dev,
178 nhlt, t->cfg_ext->copier.vindex.i2s.instance,
179 NHLT_LINK_SSP, fmt->valid_bit_depth, fmt->bit_depth,
180 fmt->num_channels, fmt->sampling_freq, direction,
181 NHLT_DEVICE_I2S);
182 if (!ep_blob) {
183 dev_err(adev->dev, "no I2S ep_blob found\n");
184 return -ENOENT;
185 }
186
187 data = ep_blob->caps;
188 data_size = ep_blob->size;
189 /* I2S gateway's vindex is statically assigned in topology */
190 node_id.vindex = t->cfg_ext->copier.vindex.val;
191
192 break;
193
194 case AVS_DMA_DMIC_LINK_INPUT:
195 direction = SNDRV_PCM_STREAM_CAPTURE;
196
197 if (t->cfg_ext->copier.blob_fmt)
198 fmt = t->cfg_ext->copier.blob_fmt;
199 else
200 fmt = t->in_fmt;
201
202 ep_blob = intel_nhlt_get_endpoint_blob(adev->dev, nhlt, 0,
203 NHLT_LINK_DMIC, fmt->valid_bit_depth,
204 fmt->bit_depth, fmt->num_channels,
205 fmt->sampling_freq, direction, NHLT_DEVICE_DMIC);
206 if (!ep_blob) {
207 dev_err(adev->dev, "no DMIC ep_blob found\n");
208 return -ENOENT;
209 }
210
211 data = ep_blob->caps;
212 data_size = ep_blob->size;
213 /* DMIC gateway's vindex is statically assigned in topology */
214 node_id.vindex = t->cfg_ext->copier.vindex.val;
215
216 break;
217
218 case AVS_DMA_HDA_HOST_OUTPUT:
219 case AVS_DMA_HDA_HOST_INPUT:
220 /* HOST gateway's vindex is dynamically assigned with DMA id */
221 node_id.vindex = mod->owner->owner->dma_id;
222 break;
223
224 case AVS_DMA_HDA_LINK_OUTPUT:
225 case AVS_DMA_HDA_LINK_INPUT:
226 node_id.vindex = t->cfg_ext->copier.vindex.val |
227 mod->owner->owner->dma_id;
228 break;
229
230 case INVALID_OBJECT_ID:
231 default:
232 node_id = INVALID_NODE_ID;
233 break;
234 }
235
236 cfg_size = sizeof(*cfg) + data_size;
237 /* Every config-BLOB contains gateway attributes. */
238 if (data_size)
239 cfg_size -= sizeof(cfg->gtw_cfg.config.attrs);
240 if (cfg_size > AVS_MAILBOX_SIZE)
241 return -EINVAL;
242
243 cfg = adev->modcfg_buf;
244 memset(cfg, 0, cfg_size);
245 cfg->base.cpc = t->cfg_base->cpc;
246 cfg->base.ibs = t->cfg_base->ibs;
247 cfg->base.obs = t->cfg_base->obs;
248 cfg->base.is_pages = t->cfg_base->is_pages;
249 cfg->base.audio_fmt = *t->in_fmt;
250 cfg->out_fmt = *t->cfg_ext->copier.out_fmt;
251 cfg->feature_mask = t->cfg_ext->copier.feature_mask;
252 cfg->gtw_cfg.node_id = node_id;
253 cfg->gtw_cfg.dma_buffer_size = t->cfg_ext->copier.dma_buffer_size;
254 /* config_length in DWORDs */
255 cfg->gtw_cfg.config_length = DIV_ROUND_UP(data_size, 4);
256 if (data)
257 memcpy(&cfg->gtw_cfg.config, data, data_size);
258
259 mod->gtw_attrs = cfg->gtw_cfg.config.attrs;
260
261 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
262 t->core_id, t->domain, cfg, cfg_size,
263 &mod->instance_id);
264 return ret;
265}
266
267static struct avs_control_data *avs_get_module_control(struct avs_path_module *mod)
268{
269 struct avs_tplg_module *t = mod->template;
270 struct avs_tplg_path_template *path_tmpl;
271 struct snd_soc_dapm_widget *w;
272 int i;
273
274 path_tmpl = t->owner->owner->owner;
275 w = path_tmpl->w;
276
277 for (i = 0; i < w->num_kcontrols; i++) {
278 struct avs_control_data *ctl_data;
279 struct soc_mixer_control *mc;
280
281 mc = (struct soc_mixer_control *)w->kcontrols[i]->private_value;
282 ctl_data = (struct avs_control_data *)mc->dobj.private;
283 if (ctl_data->id == t->ctl_id)
284 return ctl_data;
285 }
286
287 return NULL;
288}
289
290static int avs_peakvol_create(struct avs_dev *adev, struct avs_path_module *mod)
291{
292 struct avs_tplg_module *t = mod->template;
293 struct avs_control_data *ctl_data;
294 struct avs_peakvol_cfg *cfg;
295 int volume = S32_MAX;
296 size_t cfg_size;
297 int ret;
298
299 ctl_data = avs_get_module_control(mod);
300 if (ctl_data)
301 volume = ctl_data->volume;
302
303 /* As 2+ channels controls are unsupported, have a single block for all channels. */
304 cfg_size = struct_size(cfg, vols, 1);
305 if (cfg_size > AVS_MAILBOX_SIZE)
306 return -EINVAL;
307
308 cfg = adev->modcfg_buf;
309 memset(cfg, 0, cfg_size);
310 cfg->base.cpc = t->cfg_base->cpc;
311 cfg->base.ibs = t->cfg_base->ibs;
312 cfg->base.obs = t->cfg_base->obs;
313 cfg->base.is_pages = t->cfg_base->is_pages;
314 cfg->base.audio_fmt = *t->in_fmt;
315 cfg->vols[0].target_volume = volume;
316 cfg->vols[0].channel_id = AVS_ALL_CHANNELS_MASK;
317 cfg->vols[0].curve_type = AVS_AUDIO_CURVE_NONE;
318 cfg->vols[0].curve_duration = 0;
319
320 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id, t->core_id,
321 t->domain, cfg, cfg_size, &mod->instance_id);
322
323 return ret;
324}
325
326static int avs_updown_mix_create(struct avs_dev *adev, struct avs_path_module *mod)
327{
328 struct avs_tplg_module *t = mod->template;
329 struct avs_updown_mixer_cfg cfg;
330 int i;
331
332 cfg.base.cpc = t->cfg_base->cpc;
333 cfg.base.ibs = t->cfg_base->ibs;
334 cfg.base.obs = t->cfg_base->obs;
335 cfg.base.is_pages = t->cfg_base->is_pages;
336 cfg.base.audio_fmt = *t->in_fmt;
337 cfg.out_channel_config = t->cfg_ext->updown_mix.out_channel_config;
338 cfg.coefficients_select = t->cfg_ext->updown_mix.coefficients_select;
339 for (i = 0; i < AVS_CHANNELS_MAX; i++)
340 cfg.coefficients[i] = t->cfg_ext->updown_mix.coefficients[i];
341 cfg.channel_map = t->cfg_ext->updown_mix.channel_map;
342
343 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
344 t->core_id, t->domain, &cfg, sizeof(cfg),
345 &mod->instance_id);
346}
347
348static int avs_src_create(struct avs_dev *adev, struct avs_path_module *mod)
349{
350 struct avs_tplg_module *t = mod->template;
351 struct avs_src_cfg cfg;
352
353 cfg.base.cpc = t->cfg_base->cpc;
354 cfg.base.ibs = t->cfg_base->ibs;
355 cfg.base.obs = t->cfg_base->obs;
356 cfg.base.is_pages = t->cfg_base->is_pages;
357 cfg.base.audio_fmt = *t->in_fmt;
358 cfg.out_freq = t->cfg_ext->src.out_freq;
359
360 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
361 t->core_id, t->domain, &cfg, sizeof(cfg),
362 &mod->instance_id);
363}
364
365static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
366{
367 struct avs_tplg_module *t = mod->template;
368 struct avs_asrc_cfg cfg;
369
370 memset(&cfg, 0, sizeof(cfg));
371 cfg.base.cpc = t->cfg_base->cpc;
372 cfg.base.ibs = t->cfg_base->ibs;
373 cfg.base.obs = t->cfg_base->obs;
374 cfg.base.is_pages = t->cfg_base->is_pages;
375 cfg.base.audio_fmt = *t->in_fmt;
376 cfg.out_freq = t->cfg_ext->asrc.out_freq;
377 cfg.mode = t->cfg_ext->asrc.mode;
378 cfg.disable_jitter_buffer = t->cfg_ext->asrc.disable_jitter_buffer;
379
380 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
381 t->core_id, t->domain, &cfg, sizeof(cfg),
382 &mod->instance_id);
383}
384
385static int avs_aec_create(struct avs_dev *adev, struct avs_path_module *mod)
386{
387 struct avs_tplg_module *t = mod->template;
388 struct avs_aec_cfg cfg;
389
390 cfg.base.cpc = t->cfg_base->cpc;
391 cfg.base.ibs = t->cfg_base->ibs;
392 cfg.base.obs = t->cfg_base->obs;
393 cfg.base.is_pages = t->cfg_base->is_pages;
394 cfg.base.audio_fmt = *t->in_fmt;
395 cfg.ref_fmt = *t->cfg_ext->aec.ref_fmt;
396 cfg.out_fmt = *t->cfg_ext->aec.out_fmt;
397 cfg.cpc_lp_mode = t->cfg_ext->aec.cpc_lp_mode;
398
399 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
400 t->core_id, t->domain, &cfg, sizeof(cfg),
401 &mod->instance_id);
402}
403
404static int avs_mux_create(struct avs_dev *adev, struct avs_path_module *mod)
405{
406 struct avs_tplg_module *t = mod->template;
407 struct avs_mux_cfg cfg;
408
409 cfg.base.cpc = t->cfg_base->cpc;
410 cfg.base.ibs = t->cfg_base->ibs;
411 cfg.base.obs = t->cfg_base->obs;
412 cfg.base.is_pages = t->cfg_base->is_pages;
413 cfg.base.audio_fmt = *t->in_fmt;
414 cfg.ref_fmt = *t->cfg_ext->mux.ref_fmt;
415 cfg.out_fmt = *t->cfg_ext->mux.out_fmt;
416
417 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
418 t->core_id, t->domain, &cfg, sizeof(cfg),
419 &mod->instance_id);
420}
421
422static int avs_wov_create(struct avs_dev *adev, struct avs_path_module *mod)
423{
424 struct avs_tplg_module *t = mod->template;
425 struct avs_wov_cfg cfg;
426
427 cfg.base.cpc = t->cfg_base->cpc;
428 cfg.base.ibs = t->cfg_base->ibs;
429 cfg.base.obs = t->cfg_base->obs;
430 cfg.base.is_pages = t->cfg_base->is_pages;
431 cfg.base.audio_fmt = *t->in_fmt;
432 cfg.cpc_lp_mode = t->cfg_ext->wov.cpc_lp_mode;
433
434 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
435 t->core_id, t->domain, &cfg, sizeof(cfg),
436 &mod->instance_id);
437}
438
439static int avs_micsel_create(struct avs_dev *adev, struct avs_path_module *mod)
440{
441 struct avs_tplg_module *t = mod->template;
442 struct avs_micsel_cfg cfg;
443
444 cfg.base.cpc = t->cfg_base->cpc;
445 cfg.base.ibs = t->cfg_base->ibs;
446 cfg.base.obs = t->cfg_base->obs;
447 cfg.base.is_pages = t->cfg_base->is_pages;
448 cfg.base.audio_fmt = *t->in_fmt;
449 cfg.out_fmt = *t->cfg_ext->micsel.out_fmt;
450
451 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
452 t->core_id, t->domain, &cfg, sizeof(cfg),
453 &mod->instance_id);
454}
455
456static int avs_modbase_create(struct avs_dev *adev, struct avs_path_module *mod)
457{
458 struct avs_tplg_module *t = mod->template;
459 struct avs_modcfg_base cfg;
460
461 cfg.cpc = t->cfg_base->cpc;
462 cfg.ibs = t->cfg_base->ibs;
463 cfg.obs = t->cfg_base->obs;
464 cfg.is_pages = t->cfg_base->is_pages;
465 cfg.audio_fmt = *t->in_fmt;
466
467 return avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
468 t->core_id, t->domain, &cfg, sizeof(cfg),
469 &mod->instance_id);
470}
471
472static int avs_modext_create(struct avs_dev *adev, struct avs_path_module *mod)
473{
474 struct avs_tplg_module *t = mod->template;
475 struct avs_tplg_modcfg_ext *tcfg = t->cfg_ext;
476 struct avs_modcfg_ext *cfg;
477 size_t cfg_size, num_pins;
478 int ret, i;
479
480 num_pins = tcfg->generic.num_input_pins + tcfg->generic.num_output_pins;
481 cfg_size = struct_size(cfg, pin_fmts, num_pins);
482
483 if (cfg_size > AVS_MAILBOX_SIZE)
484 return -EINVAL;
485
486 cfg = adev->modcfg_buf;
487 memset(cfg, 0, cfg_size);
488 cfg->base.cpc = t->cfg_base->cpc;
489 cfg->base.ibs = t->cfg_base->ibs;
490 cfg->base.obs = t->cfg_base->obs;
491 cfg->base.is_pages = t->cfg_base->is_pages;
492 cfg->base.audio_fmt = *t->in_fmt;
493 cfg->num_input_pins = tcfg->generic.num_input_pins;
494 cfg->num_output_pins = tcfg->generic.num_output_pins;
495
496 /* configure pin formats */
497 for (i = 0; i < num_pins; i++) {
498 struct avs_tplg_pin_format *tpin = &tcfg->generic.pin_fmts[i];
499 struct avs_pin_format *pin = &cfg->pin_fmts[i];
500
501 pin->pin_index = tpin->pin_index;
502 pin->iobs = tpin->iobs;
503 pin->audio_fmt = *tpin->fmt;
504 }
505
506 ret = avs_dsp_init_module(adev, mod->module_id, mod->owner->instance_id,
507 t->core_id, t->domain, cfg, cfg_size,
508 &mod->instance_id);
509 return ret;
510}
511
512static int avs_probe_create(struct avs_dev *adev, struct avs_path_module *mod)
513{
514 dev_err(adev->dev, "Probe module can't be instantiated by topology");
515 return -EINVAL;
516}
517
518struct avs_module_create {
519 guid_t *guid;
520 int (*create)(struct avs_dev *adev, struct avs_path_module *mod);
521};
522
523static struct avs_module_create avs_module_create[] = {
524 { &AVS_MIXIN_MOD_UUID, avs_modbase_create },
525 { &AVS_MIXOUT_MOD_UUID, avs_modbase_create },
526 { &AVS_KPBUFF_MOD_UUID, avs_modbase_create },
527 { &AVS_COPIER_MOD_UUID, avs_copier_create },
528 { &AVS_PEAKVOL_MOD_UUID, avs_peakvol_create },
529 { &AVS_GAIN_MOD_UUID, avs_peakvol_create },
530 { &AVS_MICSEL_MOD_UUID, avs_micsel_create },
531 { &AVS_MUX_MOD_UUID, avs_mux_create },
532 { &AVS_UPDWMIX_MOD_UUID, avs_updown_mix_create },
533 { &AVS_SRCINTC_MOD_UUID, avs_src_create },
534 { &AVS_AEC_MOD_UUID, avs_aec_create },
535 { &AVS_ASRC_MOD_UUID, avs_asrc_create },
536 { &AVS_INTELWOV_MOD_UUID, avs_wov_create },
537 { &AVS_PROBE_MOD_UUID, avs_probe_create },
538};
539
540static int avs_path_module_type_create(struct avs_dev *adev, struct avs_path_module *mod)
541{
542 const guid_t *type = &mod->template->cfg_ext->type;
543
544 for (int i = 0; i < ARRAY_SIZE(avs_module_create); i++)
545 if (guid_equal(type, avs_module_create[i].guid))
546 return avs_module_create[i].create(adev, mod);
547
548 return avs_modext_create(adev, mod);
549}
550
551static int avs_path_module_send_init_configs(struct avs_dev *adev, struct avs_path_module *mod)
552{
553 struct avs_soc_component *acomp;
554
555 acomp = to_avs_soc_component(mod->template->owner->owner->owner->owner->comp);
556
557 u32 num_ids = mod->template->num_config_ids;
558 u32 *ids = mod->template->config_ids;
559
560 for (int i = 0; i < num_ids; i++) {
561 struct avs_tplg_init_config *config = &acomp->tplg->init_configs[ids[i]];
562 size_t len = config->length;
563 void *data = config->data;
564 u32 param = config->param;
565 int ret;
566
567 ret = avs_ipc_set_large_config(adev, mod->module_id, mod->instance_id,
568 param, data, len);
569 if (ret) {
570 dev_err(adev->dev, "send initial module config failed: %d\n", ret);
571 return AVS_IPC_RET(ret);
572 }
573 }
574
575 return 0;
576}
577
578static void avs_path_module_free(struct avs_dev *adev, struct avs_path_module *mod)
579{
580 kfree(mod);
581}
582
583static struct avs_path_module *
584avs_path_module_create(struct avs_dev *adev,
585 struct avs_path_pipeline *owner,
586 struct avs_tplg_module *template)
587{
588 struct avs_path_module *mod;
589 int module_id, ret;
590
591 module_id = avs_get_module_id(adev, &template->cfg_ext->type);
592 if (module_id < 0)
593 return ERR_PTR(module_id);
594
595 mod = kzalloc(sizeof(*mod), GFP_KERNEL);
596 if (!mod)
597 return ERR_PTR(-ENOMEM);
598
599 mod->template = template;
600 mod->module_id = module_id;
601 mod->owner = owner;
602 INIT_LIST_HEAD(&mod->node);
603
604 ret = avs_path_module_type_create(adev, mod);
605 if (ret) {
606 dev_err(adev->dev, "module-type create failed: %d\n", ret);
607 kfree(mod);
608 return ERR_PTR(ret);
609 }
610
611 ret = avs_path_module_send_init_configs(adev, mod);
612 if (ret) {
613 kfree(mod);
614 return ERR_PTR(ret);
615 }
616
617 return mod;
618}
619
620static int avs_path_binding_arm(struct avs_dev *adev, struct avs_path_binding *binding)
621{
622 struct avs_path_module *this_mod, *target_mod;
623 struct avs_path_pipeline *target_ppl;
624 struct avs_path *target_path;
625 struct avs_tplg_binding *t;
626
627 t = binding->template;
628 this_mod = avs_path_find_module(binding->owner,
629 t->mod_id);
630 if (!this_mod) {
631 dev_err(adev->dev, "path mod %d not found\n", t->mod_id);
632 return -EINVAL;
633 }
634
635 /* update with target_tplg_name too */
636 target_path = avs_path_find_path(adev, t->target_tplg_name,
637 t->target_path_tmpl_id);
638 if (!target_path) {
639 dev_err(adev->dev, "target path %s:%d not found\n",
640 t->target_tplg_name, t->target_path_tmpl_id);
641 return -EINVAL;
642 }
643
644 target_ppl = avs_path_find_pipeline(target_path,
645 t->target_ppl_id);
646 if (!target_ppl) {
647 dev_err(adev->dev, "target ppl %d not found\n", t->target_ppl_id);
648 return -EINVAL;
649 }
650
651 target_mod = avs_path_find_module(target_ppl, t->target_mod_id);
652 if (!target_mod) {
653 dev_err(adev->dev, "target mod %d not found\n", t->target_mod_id);
654 return -EINVAL;
655 }
656
657 if (t->is_sink) {
658 binding->sink = this_mod;
659 binding->sink_pin = t->mod_pin;
660 binding->source = target_mod;
661 binding->source_pin = t->target_mod_pin;
662 } else {
663 binding->sink = target_mod;
664 binding->sink_pin = t->target_mod_pin;
665 binding->source = this_mod;
666 binding->source_pin = t->mod_pin;
667 }
668
669 return 0;
670}
671
672static void avs_path_binding_free(struct avs_dev *adev, struct avs_path_binding *binding)
673{
674 kfree(binding);
675}
676
677static struct avs_path_binding *avs_path_binding_create(struct avs_dev *adev,
678 struct avs_path_pipeline *owner,
679 struct avs_tplg_binding *t)
680{
681 struct avs_path_binding *binding;
682
683 binding = kzalloc(sizeof(*binding), GFP_KERNEL);
684 if (!binding)
685 return ERR_PTR(-ENOMEM);
686
687 binding->template = t;
688 binding->owner = owner;
689 INIT_LIST_HEAD(&binding->node);
690
691 return binding;
692}
693
694static int avs_path_pipeline_arm(struct avs_dev *adev,
695 struct avs_path_pipeline *ppl)
696{
697 struct avs_path_module *mod;
698
699 list_for_each_entry(mod, &ppl->mod_list, node) {
700 struct avs_path_module *source, *sink;
701 int ret;
702
703 /*
704 * Only one module (so it's implicitly last) or it is the last
705 * one, either way we don't have next module to bind it to.
706 */
707 if (mod == list_last_entry(&ppl->mod_list,
708 struct avs_path_module, node))
709 break;
710
711 /* bind current module to next module on list */
712 source = mod;
713 sink = list_next_entry(mod, node);
714 if (!source || !sink)
715 return -EINVAL;
716
717 ret = avs_ipc_bind(adev, source->module_id, source->instance_id,
718 sink->module_id, sink->instance_id, 0, 0);
719 if (ret)
720 return AVS_IPC_RET(ret);
721 }
722
723 return 0;
724}
725
726static void avs_path_pipeline_free(struct avs_dev *adev,
727 struct avs_path_pipeline *ppl)
728{
729 struct avs_path_binding *binding, *bsave;
730 struct avs_path_module *mod, *save;
731
732 list_for_each_entry_safe(binding, bsave, &ppl->binding_list, node) {
733 list_del(&binding->node);
734 avs_path_binding_free(adev, binding);
735 }
736
737 avs_dsp_delete_pipeline(adev, ppl->instance_id);
738
739 /* Unload resources occupied by owned modules */
740 list_for_each_entry_safe(mod, save, &ppl->mod_list, node) {
741 avs_dsp_delete_module(adev, mod->module_id, mod->instance_id,
742 mod->owner->instance_id,
743 mod->template->core_id);
744 avs_path_module_free(adev, mod);
745 }
746
747 list_del(&ppl->node);
748 kfree(ppl);
749}
750
751static struct avs_path_pipeline *
752avs_path_pipeline_create(struct avs_dev *adev, struct avs_path *owner,
753 struct avs_tplg_pipeline *template)
754{
755 struct avs_path_pipeline *ppl;
756 struct avs_tplg_pplcfg *cfg = template->cfg;
757 struct avs_tplg_module *tmod;
758 int ret, i;
759
760 ppl = kzalloc(sizeof(*ppl), GFP_KERNEL);
761 if (!ppl)
762 return ERR_PTR(-ENOMEM);
763
764 ppl->template = template;
765 ppl->owner = owner;
766 INIT_LIST_HEAD(&ppl->binding_list);
767 INIT_LIST_HEAD(&ppl->mod_list);
768 INIT_LIST_HEAD(&ppl->node);
769
770 ret = avs_dsp_create_pipeline(adev, cfg->req_size, cfg->priority,
771 cfg->lp, cfg->attributes,
772 &ppl->instance_id);
773 if (ret) {
774 dev_err(adev->dev, "error creating pipeline %d\n", ret);
775 kfree(ppl);
776 return ERR_PTR(ret);
777 }
778
779 list_for_each_entry(tmod, &template->mod_list, node) {
780 struct avs_path_module *mod;
781
782 mod = avs_path_module_create(adev, ppl, tmod);
783 if (IS_ERR(mod)) {
784 ret = PTR_ERR(mod);
785 dev_err(adev->dev, "error creating module %d\n", ret);
786 goto init_err;
787 }
788
789 list_add_tail(&mod->node, &ppl->mod_list);
790 }
791
792 for (i = 0; i < template->num_bindings; i++) {
793 struct avs_path_binding *binding;
794
795 binding = avs_path_binding_create(adev, ppl, template->bindings[i]);
796 if (IS_ERR(binding)) {
797 ret = PTR_ERR(binding);
798 dev_err(adev->dev, "error creating binding %d\n", ret);
799 goto init_err;
800 }
801
802 list_add_tail(&binding->node, &ppl->binding_list);
803 }
804
805 return ppl;
806
807init_err:
808 avs_path_pipeline_free(adev, ppl);
809 return ERR_PTR(ret);
810}
811
812static int avs_path_init(struct avs_dev *adev, struct avs_path *path,
813 struct avs_tplg_path *template, u32 dma_id)
814{
815 struct avs_tplg_pipeline *tppl;
816
817 path->owner = adev;
818 path->template = template;
819 path->dma_id = dma_id;
820 INIT_LIST_HEAD(&path->ppl_list);
821 INIT_LIST_HEAD(&path->node);
822
823 /* create all the pipelines */
824 list_for_each_entry(tppl, &template->ppl_list, node) {
825 struct avs_path_pipeline *ppl;
826
827 ppl = avs_path_pipeline_create(adev, path, tppl);
828 if (IS_ERR(ppl))
829 return PTR_ERR(ppl);
830
831 list_add_tail(&ppl->node, &path->ppl_list);
832 }
833
834 spin_lock(&adev->path_list_lock);
835 list_add_tail(&path->node, &adev->path_list);
836 spin_unlock(&adev->path_list_lock);
837
838 return 0;
839}
840
841static int avs_path_arm(struct avs_dev *adev, struct avs_path *path)
842{
843 struct avs_path_pipeline *ppl;
844 struct avs_path_binding *binding;
845 int ret;
846
847 list_for_each_entry(ppl, &path->ppl_list, node) {
848 /*
849 * Arm all ppl bindings before binding internal modules
850 * as it costs no IPCs which isn't true for the latter.
851 */
852 list_for_each_entry(binding, &ppl->binding_list, node) {
853 ret = avs_path_binding_arm(adev, binding);
854 if (ret < 0)
855 return ret;
856 }
857
858 ret = avs_path_pipeline_arm(adev, ppl);
859 if (ret < 0)
860 return ret;
861 }
862
863 return 0;
864}
865
866static void avs_path_free_unlocked(struct avs_path *path)
867{
868 struct avs_path_pipeline *ppl, *save;
869
870 spin_lock(&path->owner->path_list_lock);
871 list_del(&path->node);
872 spin_unlock(&path->owner->path_list_lock);
873
874 list_for_each_entry_safe(ppl, save, &path->ppl_list, node)
875 avs_path_pipeline_free(path->owner, ppl);
876
877 kfree(path);
878}
879
880static struct avs_path *avs_path_create_unlocked(struct avs_dev *adev, u32 dma_id,
881 struct avs_tplg_path *template)
882{
883 struct avs_path *path;
884 int ret;
885
886 path = kzalloc(sizeof(*path), GFP_KERNEL);
887 if (!path)
888 return ERR_PTR(-ENOMEM);
889
890 ret = avs_path_init(adev, path, template, dma_id);
891 if (ret < 0)
892 goto err;
893
894 ret = avs_path_arm(adev, path);
895 if (ret < 0)
896 goto err;
897
898 path->state = AVS_PPL_STATE_INVALID;
899 return path;
900err:
901 avs_path_free_unlocked(path);
902 return ERR_PTR(ret);
903}
904
905void avs_path_free(struct avs_path *path)
906{
907 struct avs_dev *adev = path->owner;
908
909 mutex_lock(&adev->path_mutex);
910 avs_path_free_unlocked(path);
911 mutex_unlock(&adev->path_mutex);
912}
913
914struct avs_path *avs_path_create(struct avs_dev *adev, u32 dma_id,
915 struct avs_tplg_path_template *template,
916 struct snd_pcm_hw_params *fe_params,
917 struct snd_pcm_hw_params *be_params)
918{
919 struct avs_tplg_path *variant;
920 struct avs_path *path;
921
922 variant = avs_path_find_variant(adev, template, fe_params, be_params);
923 if (!variant) {
924 dev_err(adev->dev, "no matching variant found\n");
925 return ERR_PTR(-ENOENT);
926 }
927
928 /* Serialize path and its components creation. */
929 mutex_lock(&adev->path_mutex);
930 /* Satisfy needs of avs_path_find_tplg(). */
931 mutex_lock(&adev->comp_list_mutex);
932
933 path = avs_path_create_unlocked(adev, dma_id, variant);
934
935 mutex_unlock(&adev->comp_list_mutex);
936 mutex_unlock(&adev->path_mutex);
937
938 return path;
939}
940
941static int avs_path_bind_prepare(struct avs_dev *adev,
942 struct avs_path_binding *binding)
943{
944 const struct avs_audio_format *src_fmt, *sink_fmt;
945 struct avs_tplg_module *tsource = binding->source->template;
946 struct avs_path_module *source = binding->source;
947 int ret;
948
949 /*
950 * only copier modules about to be bound
951 * to output pin other than 0 need preparation
952 */
953 if (!binding->source_pin)
954 return 0;
955 if (!guid_equal(&tsource->cfg_ext->type, &AVS_COPIER_MOD_UUID))
956 return 0;
957
958 src_fmt = tsource->in_fmt;
959 sink_fmt = binding->sink->template->in_fmt;
960
961 ret = avs_ipc_copier_set_sink_format(adev, source->module_id,
962 source->instance_id, binding->source_pin,
963 src_fmt, sink_fmt);
964 if (ret) {
965 dev_err(adev->dev, "config copier failed: %d\n", ret);
966 return AVS_IPC_RET(ret);
967 }
968
969 return 0;
970}
971
972int avs_path_bind(struct avs_path *path)
973{
974 struct avs_path_pipeline *ppl;
975 struct avs_dev *adev = path->owner;
976 int ret;
977
978 list_for_each_entry(ppl, &path->ppl_list, node) {
979 struct avs_path_binding *binding;
980
981 list_for_each_entry(binding, &ppl->binding_list, node) {
982 struct avs_path_module *source, *sink;
983
984 source = binding->source;
985 sink = binding->sink;
986
987 ret = avs_path_bind_prepare(adev, binding);
988 if (ret < 0)
989 return ret;
990
991 ret = avs_ipc_bind(adev, source->module_id,
992 source->instance_id, sink->module_id,
993 sink->instance_id, binding->sink_pin,
994 binding->source_pin);
995 if (ret) {
996 dev_err(adev->dev, "bind path failed: %d\n", ret);
997 return AVS_IPC_RET(ret);
998 }
999 }
1000 }
1001
1002 return 0;
1003}
1004
1005int avs_path_unbind(struct avs_path *path)
1006{
1007 struct avs_path_pipeline *ppl;
1008 struct avs_dev *adev = path->owner;
1009 int ret;
1010
1011 list_for_each_entry(ppl, &path->ppl_list, node) {
1012 struct avs_path_binding *binding;
1013
1014 list_for_each_entry(binding, &ppl->binding_list, node) {
1015 struct avs_path_module *source, *sink;
1016
1017 source = binding->source;
1018 sink = binding->sink;
1019
1020 ret = avs_ipc_unbind(adev, source->module_id,
1021 source->instance_id, sink->module_id,
1022 sink->instance_id, binding->sink_pin,
1023 binding->source_pin);
1024 if (ret) {
1025 dev_err(adev->dev, "unbind path failed: %d\n", ret);
1026 return AVS_IPC_RET(ret);
1027 }
1028 }
1029 }
1030
1031 return 0;
1032}
1033
1034int avs_path_reset(struct avs_path *path)
1035{
1036 struct avs_path_pipeline *ppl;
1037 struct avs_dev *adev = path->owner;
1038 int ret;
1039
1040 if (path->state == AVS_PPL_STATE_RESET)
1041 return 0;
1042
1043 list_for_each_entry(ppl, &path->ppl_list, node) {
1044 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1045 AVS_PPL_STATE_RESET);
1046 if (ret) {
1047 dev_err(adev->dev, "reset path failed: %d\n", ret);
1048 path->state = AVS_PPL_STATE_INVALID;
1049 return AVS_IPC_RET(ret);
1050 }
1051 }
1052
1053 path->state = AVS_PPL_STATE_RESET;
1054 return 0;
1055}
1056
1057int avs_path_pause(struct avs_path *path)
1058{
1059 struct avs_path_pipeline *ppl;
1060 struct avs_dev *adev = path->owner;
1061 int ret;
1062
1063 if (path->state == AVS_PPL_STATE_PAUSED)
1064 return 0;
1065
1066 list_for_each_entry_reverse(ppl, &path->ppl_list, node) {
1067 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1068 AVS_PPL_STATE_PAUSED);
1069 if (ret) {
1070 dev_err(adev->dev, "pause path failed: %d\n", ret);
1071 path->state = AVS_PPL_STATE_INVALID;
1072 return AVS_IPC_RET(ret);
1073 }
1074 }
1075
1076 path->state = AVS_PPL_STATE_PAUSED;
1077 return 0;
1078}
1079
1080int avs_path_run(struct avs_path *path, int trigger)
1081{
1082 struct avs_path_pipeline *ppl;
1083 struct avs_dev *adev = path->owner;
1084 int ret;
1085
1086 if (path->state == AVS_PPL_STATE_RUNNING && trigger == AVS_TPLG_TRIGGER_AUTO)
1087 return 0;
1088
1089 list_for_each_entry(ppl, &path->ppl_list, node) {
1090 if (ppl->template->cfg->trigger != trigger)
1091 continue;
1092
1093 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id,
1094 AVS_PPL_STATE_RUNNING);
1095 if (ret) {
1096 dev_err(adev->dev, "run path failed: %d\n", ret);
1097 path->state = AVS_PPL_STATE_INVALID;
1098 return AVS_IPC_RET(ret);
1099 }
1100 }
1101
1102 path->state = AVS_PPL_STATE_RUNNING;
1103 return 0;
1104}