Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Memory allocator for buffers shared with the TrustZone.
  4 *
  5 * Copyright (C) 2023-2024 Linaro Ltd.
  6 */
  7
  8#include <linux/bug.h>
  9#include <linux/cleanup.h>
 10#include <linux/dma-mapping.h>
 11#include <linux/err.h>
 12#include <linux/firmware/qcom/qcom_tzmem.h>
 13#include <linux/genalloc.h>
 14#include <linux/gfp.h>
 15#include <linux/kernel.h>
 16#include <linux/list.h>
 17#include <linux/mm.h>
 18#include <linux/radix-tree.h>
 19#include <linux/slab.h>
 20#include <linux/spinlock.h>
 21#include <linux/types.h>
 22
 23#include "qcom_tzmem.h"
 24
 25struct qcom_tzmem_area {
 26	struct list_head list;
 27	void *vaddr;
 28	dma_addr_t paddr;
 29	size_t size;
 30	void *priv;
 31};
 32
 33struct qcom_tzmem_pool {
 34	struct gen_pool *genpool;
 35	struct list_head areas;
 36	enum qcom_tzmem_policy policy;
 37	size_t increment;
 38	size_t max_size;
 39	spinlock_t lock;
 40};
 41
 42struct qcom_tzmem_chunk {
 43	size_t size;
 44	struct qcom_tzmem_pool *owner;
 45};
 46
 47static struct device *qcom_tzmem_dev;
 48static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
 49static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
 50
 51#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
 52
 53static int qcom_tzmem_init(void)
 54{
 55	return 0;
 56}
 57
 58static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
 59{
 60	return 0;
 61}
 62
 63static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
 64{
 65
 66}
 67
 68#elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
 69
 70#include <linux/firmware/qcom/qcom_scm.h>
 71#include <linux/of.h>
 72
 73#define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
 74
 75static bool qcom_tzmem_using_shm_bridge;
 76
 77/* List of machines that are known to not support SHM bridge correctly. */
 78static const char *const qcom_tzmem_blacklist[] = {
 79	"qcom,sc8180x",
 80	"qcom,sdm670", /* failure in GPU firmware loading */
 81	"qcom,sdm845", /* reset in rmtfs memory assignment */
 82	"qcom,sm8150", /* reset in rmtfs memory assignment */
 83	NULL
 84};
 85
 86static int qcom_tzmem_init(void)
 87{
 88	const char *const *platform;
 89	int ret;
 90
 91	for (platform = qcom_tzmem_blacklist; *platform; platform++) {
 92		if (of_machine_is_compatible(*platform))
 93			goto notsupp;
 94	}
 95
 96	ret = qcom_scm_shm_bridge_enable();
 97	if (ret == -EOPNOTSUPP)
 98		goto notsupp;
 99
100	if (!ret)
101		qcom_tzmem_using_shm_bridge = true;
102
103	return ret;
104
105notsupp:
106	dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
107	return 0;
108}
109
110static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
111{
112	u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
113	int ret;
114
115	if (!qcom_tzmem_using_shm_bridge)
116		return 0;
117
118	pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
119	ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
120	size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
121
122	u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
123	if (!handle)
124		return -ENOMEM;
125
126	ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
127					 ipfn_and_s_perm, size_and_flags,
128					 QCOM_SCM_VMID_HLOS, handle);
129	if (ret)
130		return ret;
131
132	area->priv = no_free_ptr(handle);
133
134	return 0;
135}
136
137static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
138{
139	u64 *handle = area->priv;
140
141	if (!qcom_tzmem_using_shm_bridge)
142		return;
143
144	qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
145	kfree(handle);
146}
147
148#endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
149
150static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
151				      size_t size, gfp_t gfp)
152{
153	int ret;
154
155	struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
156							     gfp);
157	if (!area)
158		return -ENOMEM;
159
160	area->size = PAGE_ALIGN(size);
161
162	area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
163					 &area->paddr, gfp);
164	if (!area->vaddr)
165		return -ENOMEM;
166
167	ret = qcom_tzmem_init_area(area);
168	if (ret) {
169		dma_free_coherent(qcom_tzmem_dev, area->size,
170				  area->vaddr, area->paddr);
171		return ret;
172	}
173
174	ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
175				(phys_addr_t)area->paddr, size, -1);
176	if (ret) {
177		dma_free_coherent(qcom_tzmem_dev, area->size,
178				  area->vaddr, area->paddr);
179		return ret;
180	}
181
182	scoped_guard(spinlock_irqsave, &pool->lock)
183		list_add_tail(&area->list, &pool->areas);
184
185	area = NULL;
186	return 0;
187}
188
189/**
190 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
191 * @config: Pool configuration.
192 *
193 * Create a new pool of memory suitable for sharing with the TrustZone.
194 *
195 * Must not be used in atomic context.
196 *
197 * Return: New memory pool address or ERR_PTR() on error.
198 */
199struct qcom_tzmem_pool *
200qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
201{
202	int ret = -ENOMEM;
203
204	might_sleep();
205
206	switch (config->policy) {
207	case QCOM_TZMEM_POLICY_STATIC:
208		if (!config->initial_size)
209			return ERR_PTR(-EINVAL);
210		break;
211	case QCOM_TZMEM_POLICY_MULTIPLIER:
212		if (!config->increment)
213			return ERR_PTR(-EINVAL);
214		break;
215	case QCOM_TZMEM_POLICY_ON_DEMAND:
216		break;
217	default:
218		return ERR_PTR(-EINVAL);
219	}
220
221	struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
222							     GFP_KERNEL);
223	if (!pool)
224		return ERR_PTR(-ENOMEM);
225
226	pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
227	if (!pool->genpool)
228		return ERR_PTR(-ENOMEM);
229
230	gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
231
232	pool->policy = config->policy;
233	pool->increment = config->increment;
234	pool->max_size = config->max_size;
235	INIT_LIST_HEAD(&pool->areas);
236	spin_lock_init(&pool->lock);
237
238	if (config->initial_size) {
239		ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
240						 GFP_KERNEL);
241		if (ret) {
242			gen_pool_destroy(pool->genpool);
243			return ERR_PTR(ret);
244		}
245	}
246
247	return_ptr(pool);
248}
249EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
250
251/**
252 * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
253 * @pool: Memory pool to free.
254 *
255 * Must not be called if any of the allocated chunks has not been freed.
256 * Must not be used in atomic context.
257 */
258void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
259{
260	struct qcom_tzmem_area *area, *next;
261	struct qcom_tzmem_chunk *chunk;
262	struct radix_tree_iter iter;
263	bool non_empty = false;
264	void __rcu **slot;
265
266	might_sleep();
267
268	if (!pool)
269		return;
270
271	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
272		radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
273			chunk = radix_tree_deref_slot_protected(slot,
274						&qcom_tzmem_chunks_lock);
275
276			if (chunk->owner == pool)
277				non_empty = true;
278		}
279	}
280
281	WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
282
283	list_for_each_entry_safe(area, next, &pool->areas, list) {
284		list_del(&area->list);
285		qcom_tzmem_cleanup_area(area);
286		dma_free_coherent(qcom_tzmem_dev, area->size,
287				  area->vaddr, area->paddr);
288		kfree(area);
289	}
290
291	gen_pool_destroy(pool->genpool);
292	kfree(pool);
293}
294EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
295
296static void devm_qcom_tzmem_pool_free(void *data)
297{
298	struct qcom_tzmem_pool *pool = data;
299
300	qcom_tzmem_pool_free(pool);
301}
302
303/**
304 * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
305 * @dev: Device managing this resource.
306 * @config: Pool configuration.
307 *
308 * Must not be used in atomic context.
309 *
310 * Return: Address of the managed pool or ERR_PTR() on failure.
311 */
312struct qcom_tzmem_pool *
313devm_qcom_tzmem_pool_new(struct device *dev,
314			 const struct qcom_tzmem_pool_config *config)
315{
316	struct qcom_tzmem_pool *pool;
317	int ret;
318
319	pool = qcom_tzmem_pool_new(config);
320	if (IS_ERR(pool))
321		return pool;
322
323	ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
324	if (ret)
325		return ERR_PTR(ret);
326
327	return pool;
328}
329EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
330
331static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
332				     size_t requested, gfp_t gfp)
333{
334	size_t current_size = gen_pool_size(pool->genpool);
335
336	if (pool->max_size && (current_size + requested) > pool->max_size)
337		return false;
338
339	switch (pool->policy) {
340	case QCOM_TZMEM_POLICY_STATIC:
341		return false;
342	case QCOM_TZMEM_POLICY_MULTIPLIER:
343		requested = current_size * pool->increment;
344		break;
345	case QCOM_TZMEM_POLICY_ON_DEMAND:
346		break;
347	}
348
349	return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
350}
351
352/**
353 * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
354 * @pool: TZ memory pool from which to allocate memory.
355 * @size: Number of bytes to allocate.
356 * @gfp: GFP flags.
357 *
358 * Can be used in any context.
359 *
360 * Return:
361 * Address of the allocated buffer or NULL if no more memory can be allocated.
362 * The buffer must be released using qcom_tzmem_free().
363 */
364void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
365{
366	unsigned long vaddr;
367	int ret;
368
369	if (!size)
370		return NULL;
371
372	size = PAGE_ALIGN(size);
373
374	struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
375							       gfp);
376	if (!chunk)
377		return NULL;
378
379again:
380	vaddr = gen_pool_alloc(pool->genpool, size);
381	if (!vaddr) {
382		if (qcom_tzmem_try_grow_pool(pool, size, gfp))
383			goto again;
384
385		return NULL;
386	}
387
388	chunk->size = size;
389	chunk->owner = pool;
390
391	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
392		ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
393		if (ret) {
394			gen_pool_free(pool->genpool, vaddr, size);
395			return NULL;
396		}
397
398		chunk = NULL;
399	}
400
401	return (void *)vaddr;
402}
403EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
404
405/**
406 * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
407 * @vaddr: Virtual address of the buffer.
408 *
409 * Can be used in any context.
410 */
411void qcom_tzmem_free(void *vaddr)
412{
413	struct qcom_tzmem_chunk *chunk;
414
415	scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
416		chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
417					       (unsigned long)vaddr, NULL);
418
419	if (!chunk) {
420		WARN(1, "Virtual address %p not owned by TZ memory allocator",
421		     vaddr);
422		return;
423	}
424
425	scoped_guard(spinlock_irqsave, &chunk->owner->lock)
426		gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
427			      chunk->size);
428	kfree(chunk);
429}
430EXPORT_SYMBOL_GPL(qcom_tzmem_free);
431
432/**
433 * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
434 * @vaddr: Virtual address of memory allocated from a TZ memory pool.
435 *
436 * Can be used in any context. The address must point to memory allocated
437 * using qcom_tzmem_alloc().
438 *
439 * Returns:
440 * Physical address mapped from the virtual or 0 if the mapping failed.
441 */
442phys_addr_t qcom_tzmem_to_phys(void *vaddr)
443{
444	struct qcom_tzmem_chunk *chunk;
445	struct radix_tree_iter iter;
446	void __rcu **slot;
447	phys_addr_t ret;
448
449	guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
450
451	radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
452		chunk = radix_tree_deref_slot_protected(slot,
453						&qcom_tzmem_chunks_lock);
454
455		ret = gen_pool_virt_to_phys(chunk->owner->genpool,
456					    (unsigned long)vaddr);
457		if (ret == -1)
458			continue;
459
460		return ret;
461	}
462
463	return 0;
464}
465EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
466
467int qcom_tzmem_enable(struct device *dev)
468{
469	if (qcom_tzmem_dev)
470		return -EBUSY;
471
472	qcom_tzmem_dev = dev;
473
474	return qcom_tzmem_init();
475}
476EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
477
478MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
479MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
480MODULE_LICENSE("GPL");