Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * Intel SST Firmware Loader
  3 *
  4 * Copyright (C) 2013, Intel Corporation. All rights reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License version
  8 * 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 20#include <linux/firmware.h>
 21#include <linux/export.h>
 22#include <linux/platform_device.h>
 23#include <linux/dma-mapping.h>
 24#include <linux/dmaengine.h>
 25#include <linux/pci.h>
 26
 27#include <asm/page.h>
 28#include <asm/pgtable.h>
 29
 30#include "sst-dsp.h"
 31#include "sst-dsp-priv.h"
 32
 33static void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
 34{
 35	u32 i;
 36
 37	/* copy one 32 bit word at a time as 64 bit access is not supported */
 38	for (i = 0; i < bytes; i += 4)
 39		memcpy_toio(dest + i, src + i, 4);
 40}
 41
 42/* create new generic firmware object */
 43struct sst_fw *sst_fw_new(struct sst_dsp *dsp, 
 44	const struct firmware *fw, void *private)
 45{
 46	struct sst_fw *sst_fw;
 47	int err;
 48
 49	if (!dsp->ops->parse_fw)
 50		return NULL;
 51
 52	sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
 53	if (sst_fw == NULL)
 54		return NULL;
 55
 56	sst_fw->dsp = dsp;
 57	sst_fw->private = private;
 58	sst_fw->size = fw->size;
 59
 60	/* allocate DMA buffer to store FW data */
 61	sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
 62				&sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
 63	if (!sst_fw->dma_buf) {
 64		dev_err(dsp->dev, "error: DMA alloc failed\n");
 65		kfree(sst_fw);
 66		return NULL;
 67	}
 68
 69	/* copy FW data to DMA-able memory */
 70	memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
 71
 72	/* call core specific FW paser to load FW data into DSP */
 73	err = dsp->ops->parse_fw(sst_fw);
 74	if (err < 0) {
 75		dev_err(dsp->dev, "error: parse fw failed %d\n", err);
 76		goto parse_err;
 77	}
 78
 79	mutex_lock(&dsp->mutex);
 80	list_add(&sst_fw->list, &dsp->fw_list);
 81	mutex_unlock(&dsp->mutex);
 82
 83	return sst_fw;
 84
 85parse_err:
 86	dma_free_coherent(dsp->dev, sst_fw->size,
 87				sst_fw->dma_buf,
 88				sst_fw->dmable_fw_paddr);
 89	kfree(sst_fw);
 90	return NULL;
 91}
 92EXPORT_SYMBOL_GPL(sst_fw_new);
 93
 94/* free single firmware object */
 95void sst_fw_free(struct sst_fw *sst_fw)
 96{
 97	struct sst_dsp *dsp = sst_fw->dsp;
 98
 99	mutex_lock(&dsp->mutex);
100	list_del(&sst_fw->list);
101	mutex_unlock(&dsp->mutex);
102
103	dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
104			sst_fw->dmable_fw_paddr);
105	kfree(sst_fw);
106}
107EXPORT_SYMBOL_GPL(sst_fw_free);
108
109/* free all firmware objects */
110void sst_fw_free_all(struct sst_dsp *dsp)
111{
112	struct sst_fw *sst_fw, *t;
113
114	mutex_lock(&dsp->mutex);
115	list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
116
117		list_del(&sst_fw->list);
118		dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
119			sst_fw->dmable_fw_paddr);
120		kfree(sst_fw);
121	}
122	mutex_unlock(&dsp->mutex);
123}
124EXPORT_SYMBOL_GPL(sst_fw_free_all);
125
126/* create a new SST generic module from FW template */
127struct sst_module *sst_module_new(struct sst_fw *sst_fw,
128	struct sst_module_template *template, void *private)
129{
130	struct sst_dsp *dsp = sst_fw->dsp;
131	struct sst_module *sst_module;
132
133	sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
134	if (sst_module == NULL)
135		return NULL;
136
137	sst_module->id = template->id;
138	sst_module->dsp = dsp;
139	sst_module->sst_fw = sst_fw;
140
141	memcpy(&sst_module->s, &template->s, sizeof(struct sst_module_data));
142	memcpy(&sst_module->p, &template->p, sizeof(struct sst_module_data));
143
144	INIT_LIST_HEAD(&sst_module->block_list);
145
146	mutex_lock(&dsp->mutex);
147	list_add(&sst_module->list, &dsp->module_list);
148	mutex_unlock(&dsp->mutex);
149
150	return sst_module;
151}
152EXPORT_SYMBOL_GPL(sst_module_new);
153
154/* free firmware module and remove from available list */
155void sst_module_free(struct sst_module *sst_module)
156{
157	struct sst_dsp *dsp = sst_module->dsp;
158
159	mutex_lock(&dsp->mutex);
160	list_del(&sst_module->list);
161	mutex_unlock(&dsp->mutex);
162
163	kfree(sst_module);
164}
165EXPORT_SYMBOL_GPL(sst_module_free);
166
167static struct sst_mem_block *find_block(struct sst_dsp *dsp, int type,
168	u32 offset)
169{
170	struct sst_mem_block *block;
171
172	list_for_each_entry(block, &dsp->free_block_list, list) {
173		if (block->type == type && block->offset == offset)
174			return block;
175	}
176
177	return NULL;
178}
179
180static int block_alloc_contiguous(struct sst_module *module,
181	struct sst_module_data *data, u32 offset, int size)
182{
183	struct list_head tmp = LIST_HEAD_INIT(tmp);
184	struct sst_dsp *dsp = module->dsp;
185	struct sst_mem_block *block;
186
187	while (size > 0) {
188		block = find_block(dsp, data->type, offset);
189		if (!block) {
190			list_splice(&tmp, &dsp->free_block_list);
191			return -ENOMEM;
192		}
193
194		list_move_tail(&block->list, &tmp);
195		offset += block->size;
196		size -= block->size;
197	}
198
199	list_for_each_entry(block, &tmp, list)
200		list_add(&block->module_list, &module->block_list);
201
202	list_splice(&tmp, &dsp->used_block_list);
203	return 0;
204}
205
206/* allocate free DSP blocks for module data - callers hold locks */
207static int block_alloc(struct sst_module *module,
208	struct sst_module_data *data)
209{
210	struct sst_dsp *dsp = module->dsp;
211	struct sst_mem_block *block, *tmp;
212	int ret = 0;
213
214	if (data->size == 0)
215		return 0;
216
217	/* find first free whole blocks that can hold module */
218	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
219
220		/* ignore blocks with wrong type */
221		if (block->type != data->type)
222			continue;
223
224		if (data->size > block->size)
225			continue;
226
227		data->offset = block->offset;
228		block->data_type = data->data_type;
229		block->bytes_used = data->size % block->size;
230		list_add(&block->module_list, &module->block_list);
231		list_move(&block->list, &dsp->used_block_list);
232		dev_dbg(dsp->dev, " *module %d added block %d:%d\n",
233			module->id, block->type, block->index);
234		return 0;
235	}
236
237	/* then find free multiple blocks that can hold module */
238	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
239
240		/* ignore blocks with wrong type */
241		if (block->type != data->type)
242			continue;
243
244		/* do we span > 1 blocks */
245		if (data->size > block->size) {
246			ret = block_alloc_contiguous(module, data,
247				block->offset, data->size);
248			if (ret == 0)
249				return ret;
250		}
251	}
252
253	/* not enough free block space */
254	return -ENOMEM;
255}
256
257/* remove module from memory - callers hold locks */
258static void block_module_remove(struct sst_module *module)
259{
260	struct sst_mem_block *block, *tmp;
261	struct sst_dsp *dsp = module->dsp;
262	int err;
263
264	/* disable each block  */
265	list_for_each_entry(block, &module->block_list, module_list) {
266
267		if (block->ops && block->ops->disable) {
268			err = block->ops->disable(block);
269			if (err < 0)
270				dev_err(dsp->dev,
271					"error: cant disable block %d:%d\n",
272					block->type, block->index);
273		}
274	}
275
276	/* mark each block as free */
277	list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
278		list_del(&block->module_list);
279		list_move(&block->list, &dsp->free_block_list);
280	}
281}
282
283/* prepare the memory block to receive data from host - callers hold locks */
284static int block_module_prepare(struct sst_module *module)
285{
286	struct sst_mem_block *block;
287	int ret = 0;
288
289	/* enable each block so that's it'e ready for module P/S data */
290	list_for_each_entry(block, &module->block_list, module_list) {
291
292		if (block->ops && block->ops->enable) {
293			ret = block->ops->enable(block);
294			if (ret < 0) {
295				dev_err(module->dsp->dev,
296					"error: cant disable block %d:%d\n",
297					block->type, block->index);
298				goto err;
299			}
300		}
301	}
302	return ret;
303
304err:
305	list_for_each_entry(block, &module->block_list, module_list) {
306		if (block->ops && block->ops->disable)
307			block->ops->disable(block);
308	}
309	return ret;
310}
311
312/* allocate memory blocks for static module addresses - callers hold locks */
313static int block_alloc_fixed(struct sst_module *module,
314	struct sst_module_data *data)
315{
316	struct sst_dsp *dsp = module->dsp;
317	struct sst_mem_block *block, *tmp;
318	u32 end = data->offset + data->size, block_end;
319	int err;
320
321	/* only IRAM/DRAM blocks are managed */
322	if (data->type != SST_MEM_IRAM && data->type != SST_MEM_DRAM)
323		return 0;
324
325	/* are blocks already attached to this module */
326	list_for_each_entry_safe(block, tmp, &module->block_list, module_list) {
327
328		/* force compacting mem blocks of the same data_type */
329		if (block->data_type != data->data_type)
330			continue;
331
332		block_end = block->offset + block->size;
333
334		/* find block that holds section */
335		if (data->offset >= block->offset && end < block_end)
336			return 0;
337
338		/* does block span more than 1 section */
339		if (data->offset >= block->offset && data->offset < block_end) {
340
341			err = block_alloc_contiguous(module, data,
342				block->offset + block->size,
343				data->size - block->size);
344			if (err < 0)
345				return -ENOMEM;
346
347			/* module already owns blocks */
348			return 0;
349		}
350	}
351
352	/* find first free blocks that can hold section in free list */
353	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
354		block_end = block->offset + block->size;
355
356		/* find block that holds section */
357		if (data->offset >= block->offset && end < block_end) {
358
359			/* add block */
360			block->data_type = data->data_type;
361			list_move(&block->list, &dsp->used_block_list);
362			list_add(&block->module_list, &module->block_list);
363			return 0;
364		}
365
366		/* does block span more than 1 section */
367		if (data->offset >= block->offset && data->offset < block_end) {
368
369			err = block_alloc_contiguous(module, data,
370				block->offset, data->size);
371			if (err < 0)
372				return -ENOMEM;
373
374			return 0;
375		}
376
377	}
378
379	return -ENOMEM;
380}
381
382/* Load fixed module data into DSP memory blocks */
383int sst_module_insert_fixed_block(struct sst_module *module,
384	struct sst_module_data *data)
385{
386	struct sst_dsp *dsp = module->dsp;
387	int ret;
388
389	mutex_lock(&dsp->mutex);
390
391	/* alloc blocks that includes this section */
392	ret = block_alloc_fixed(module, data);
393	if (ret < 0) {
394		dev_err(dsp->dev,
395			"error: no free blocks for section at offset 0x%x size 0x%x\n",
396			data->offset, data->size);
397		mutex_unlock(&dsp->mutex);
398		return -ENOMEM;
399	}
400
401	/* prepare DSP blocks for module copy */
402	ret = block_module_prepare(module);
403	if (ret < 0) {
404		dev_err(dsp->dev, "error: fw module prepare failed\n");
405		goto err;
406	}
407
408	/* copy partial module data to blocks */
409	sst_memcpy32(dsp->addr.lpe + data->offset, data->data, data->size);
410
411	mutex_unlock(&dsp->mutex);
412	return ret;
413
414err:
415	block_module_remove(module);
416	mutex_unlock(&dsp->mutex);
417	return ret;
418}
419EXPORT_SYMBOL_GPL(sst_module_insert_fixed_block);
420
421/* Unload entire module from DSP memory */
422int sst_block_module_remove(struct sst_module *module)
423{
424	struct sst_dsp *dsp = module->dsp;
425
426	mutex_lock(&dsp->mutex);
427	block_module_remove(module);
428	mutex_unlock(&dsp->mutex);
429	return 0;
430}
431EXPORT_SYMBOL_GPL(sst_block_module_remove);
432
433/* register a DSP memory block for use with FW based modules */
434struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
435	u32 size, enum sst_mem_type type, struct sst_block_ops *ops, u32 index,
436	void *private)
437{
438	struct sst_mem_block *block;
439
440	block = kzalloc(sizeof(*block), GFP_KERNEL);
441	if (block == NULL)
442		return NULL;
443
444	block->offset = offset;
445	block->size = size;
446	block->index = index;
447	block->type = type;
448	block->dsp = dsp;
449	block->private = private;
450	block->ops = ops;
451
452	mutex_lock(&dsp->mutex);
453	list_add(&block->list, &dsp->free_block_list);
454	mutex_unlock(&dsp->mutex);
455
456	return block;
457}
458EXPORT_SYMBOL_GPL(sst_mem_block_register);
459
460/* unregister all DSP memory blocks */
461void sst_mem_block_unregister_all(struct sst_dsp *dsp)
462{
463	struct sst_mem_block *block, *tmp;
464
465	mutex_lock(&dsp->mutex);
466
467	/* unregister used blocks */
468	list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
469		list_del(&block->list);
470		kfree(block);
471	}
472
473	/* unregister free blocks */
474	list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
475		list_del(&block->list);
476		kfree(block);
477	}
478
479	mutex_unlock(&dsp->mutex);
480}
481EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
482
483/* allocate scratch buffer blocks */
484struct sst_module *sst_mem_block_alloc_scratch(struct sst_dsp *dsp)
485{
486	struct sst_module *sst_module, *scratch;
487	struct sst_mem_block *block, *tmp;
488	u32 block_size;
489	int ret = 0;
490
491	scratch = kzalloc(sizeof(struct sst_module), GFP_KERNEL);
492	if (scratch == NULL)
493		return NULL;
494
495	mutex_lock(&dsp->mutex);
496
497	/* calculate required scratch size */
498	list_for_each_entry(sst_module, &dsp->module_list, list) {
499		if (scratch->s.size > sst_module->s.size)
500			scratch->s.size = scratch->s.size;
501		else
502			scratch->s.size = sst_module->s.size;
503	}
504
505	dev_dbg(dsp->dev, "scratch buffer required is %d bytes\n",
506		scratch->s.size);
507
508	/* init scratch module */
509	scratch->dsp = dsp;
510	scratch->s.type = SST_MEM_DRAM;
511	scratch->s.data_type = SST_DATA_S;
512	INIT_LIST_HEAD(&scratch->block_list);
513
514	/* check free blocks before looking at used blocks for space */
515	if (!list_empty(&dsp->free_block_list))
516		block = list_first_entry(&dsp->free_block_list,
517			struct sst_mem_block, list);
518	else
519		block = list_first_entry(&dsp->used_block_list,
520			struct sst_mem_block, list);
521	block_size = block->size;
522
523	/* allocate blocks for module scratch buffers */
524	dev_dbg(dsp->dev, "allocating scratch blocks\n");
525	ret = block_alloc(scratch, &scratch->s);
526	if (ret < 0) {
527		dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
528		goto err;
529	}
530
531	/* assign the same offset of scratch to each module */
532	list_for_each_entry(sst_module, &dsp->module_list, list)
533		sst_module->s.offset = scratch->s.offset;
534
535	mutex_unlock(&dsp->mutex);
536	return scratch;
537
538err:
539	list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
540		list_del(&block->module_list);
541	mutex_unlock(&dsp->mutex);
542	return NULL;
543}
544EXPORT_SYMBOL_GPL(sst_mem_block_alloc_scratch);
545
546/* free all scratch blocks */
547void sst_mem_block_free_scratch(struct sst_dsp *dsp,
548	struct sst_module *scratch)
549{
550	struct sst_mem_block *block, *tmp;
551
552	mutex_lock(&dsp->mutex);
553
554	list_for_each_entry_safe(block, tmp, &scratch->block_list, module_list)
555		list_del(&block->module_list);
556
557	mutex_unlock(&dsp->mutex);
558}
559EXPORT_SYMBOL_GPL(sst_mem_block_free_scratch);
560
561/* get a module from it's unique ID */
562struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
563{
564	struct sst_module *module;
565
566	mutex_lock(&dsp->mutex);
567
568	list_for_each_entry(module, &dsp->module_list, list) {
569		if (module->id == id) {
570			mutex_unlock(&dsp->mutex);
571			return module;
572		}
573	}
574
575	mutex_unlock(&dsp->mutex);
576	return NULL;
577}
578EXPORT_SYMBOL_GPL(sst_module_get_from_id);