Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015-2021, Linaro Limited
  4 * Copyright (c) 2016, EPAM Systems
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/crash_dump.h>
 10#include <linux/errno.h>
 11#include <linux/io.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/string.h>
 16#include <linux/tee_drv.h>
 17#include <linux/types.h>
 
 18#include "optee_private.h"
 19
 20int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 21			       size_t size, size_t align,
 22			       int (*shm_register)(struct tee_context *ctx,
 23						   struct tee_shm *shm,
 24						   struct page **pages,
 25						   size_t num_pages,
 26						   unsigned long start))
 27{
 28	size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
 29	struct page **pages;
 30	unsigned int i;
 31	int rc = 0;
 32
 33	/*
 34	 * Ignore alignment since this is already going to be page aligned
 35	 * and there's no need for any larger alignment.
 36	 */
 37	shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
 38				       GFP_KERNEL | __GFP_ZERO);
 39	if (!shm->kaddr)
 40		return -ENOMEM;
 41
 42	shm->paddr = virt_to_phys(shm->kaddr);
 43	shm->size = nr_pages * PAGE_SIZE;
 
 44
 45	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
 46	if (!pages) {
 47		rc = -ENOMEM;
 48		goto err;
 49	}
 50
 51	for (i = 0; i < nr_pages; i++)
 52		pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
 
 
 
 53
 54	shm->pages = pages;
 55	shm->num_pages = nr_pages;
 56
 57	if (shm_register) {
 58		rc = shm_register(shm->ctx, shm, pages, nr_pages,
 59				  (unsigned long)shm->kaddr);
 
 60		if (rc)
 61			goto err;
 62	}
 63
 64	return 0;
 
 65err:
 66	free_pages_exact(shm->kaddr, shm->size);
 67	shm->kaddr = NULL;
 68	return rc;
 69}
 70
 71void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 72			       int (*shm_unregister)(struct tee_context *ctx,
 73						     struct tee_shm *shm))
 74{
 75	if (shm_unregister)
 76		shm_unregister(shm->ctx, shm);
 77	free_pages_exact(shm->kaddr, shm->size);
 78	shm->kaddr = NULL;
 79	kfree(shm->pages);
 80	shm->pages = NULL;
 81}
 82
 83static void optee_bus_scan(struct work_struct *work)
 84{
 85	WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
 86}
 87
 88int optee_open(struct tee_context *ctx, bool cap_memref_null)
 89{
 90	struct optee_context_data *ctxdata;
 91	struct tee_device *teedev = ctx->teedev;
 92	struct optee *optee = tee_get_drvdata(teedev);
 93
 94	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
 95	if (!ctxdata)
 96		return -ENOMEM;
 97
 98	if (teedev == optee->supp_teedev) {
 99		bool busy = true;
100
101		mutex_lock(&optee->supp.mutex);
102		if (!optee->supp.ctx) {
103			busy = false;
104			optee->supp.ctx = ctx;
105		}
106		mutex_unlock(&optee->supp.mutex);
107		if (busy) {
108			kfree(ctxdata);
109			return -EBUSY;
110		}
111
112		if (!optee->scan_bus_done) {
113			INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
114			schedule_work(&optee->scan_bus_work);
 
 
 
 
 
115			optee->scan_bus_done = true;
116		}
117	}
118	mutex_init(&ctxdata->mutex);
119	INIT_LIST_HEAD(&ctxdata->sess_list);
120
121	ctx->cap_memref_null = cap_memref_null;
122	ctx->data = ctxdata;
123	return 0;
124}
125
126static void optee_release_helper(struct tee_context *ctx,
127				 int (*close_session)(struct tee_context *ctx,
128						      u32 session,
129						      bool system_thread))
130{
131	struct optee_context_data *ctxdata = ctx->data;
132	struct optee_session *sess;
133	struct optee_session *sess_tmp;
134
135	if (!ctxdata)
136		return;
137
138	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
139				 list_node) {
140		list_del(&sess->list_node);
141		close_session(ctx, sess->session_id, sess->use_sys_thread);
142		kfree(sess);
143	}
144	kfree(ctxdata);
145	ctx->data = NULL;
146}
147
148void optee_release(struct tee_context *ctx)
149{
150	optee_release_helper(ctx, optee_close_session_helper);
151}
152
153void optee_release_supp(struct tee_context *ctx)
154{
155	struct optee *optee = tee_get_drvdata(ctx->teedev);
156
157	optee_release_helper(ctx, optee_close_session_helper);
158
 
 
 
159	optee_supp_release(&optee->supp);
160}
161
162void optee_remove_common(struct optee *optee)
163{
164	/* Unregister OP-TEE specific client devices on TEE bus */
165	optee_unregister_devices();
166
167	optee_notif_uninit(optee);
168	optee_shm_arg_cache_uninit(optee);
169	teedev_close_context(optee->ctx);
170	/*
171	 * The two devices have to be unregistered before we can free the
172	 * other resources.
173	 */
174	tee_device_unregister(optee->supp_teedev);
175	tee_device_unregister(optee->teedev);
176
177	tee_shm_pool_free(optee->pool);
178	optee_supp_uninit(&optee->supp);
179	mutex_destroy(&optee->call_queue.mutex);
180}
181
182static int smc_abi_rc;
183static int ffa_abi_rc;
184
185static int __init optee_core_init(void)
186{
187	/*
188	 * The kernel may have crashed at the same time that all available
189	 * secure world threads were suspended and we cannot reschedule the
190	 * suspended threads without access to the crashed kernel's wait_queue.
191	 * Therefore, we cannot reliably initialize the OP-TEE driver in the
192	 * kdump kernel.
193	 */
194	if (is_kdump_kernel())
195		return -ENODEV;
196
197	smc_abi_rc = optee_smc_abi_register();
198	ffa_abi_rc = optee_ffa_abi_register();
199
200	/* If both failed there's no point with this module */
201	if (smc_abi_rc && ffa_abi_rc)
202		return smc_abi_rc;
203	return 0;
204}
205module_init(optee_core_init);
206
207static void __exit optee_core_exit(void)
208{
209	if (!smc_abi_rc)
210		optee_smc_abi_unregister();
211	if (!ffa_abi_rc)
212		optee_ffa_abi_unregister();
213}
214module_exit(optee_core_exit);
215
216MODULE_AUTHOR("Linaro");
217MODULE_DESCRIPTION("OP-TEE driver");
218MODULE_VERSION("1.0");
219MODULE_LICENSE("GPL v2");
220MODULE_ALIAS("platform:optee");
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015-2021, Linaro Limited
  4 * Copyright (c) 2016, EPAM Systems
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/crash_dump.h>
 10#include <linux/errno.h>
 11#include <linux/io.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/string.h>
 16#include <linux/tee_drv.h>
 17#include <linux/types.h>
 18#include <linux/workqueue.h>
 19#include "optee_private.h"
 20
 21int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 22			       size_t size, size_t align,
 23			       int (*shm_register)(struct tee_context *ctx,
 24						   struct tee_shm *shm,
 25						   struct page **pages,
 26						   size_t num_pages,
 27						   unsigned long start))
 28{
 29	unsigned int order = get_order(size);
 30	struct page *page;
 
 31	int rc = 0;
 32
 33	/*
 34	 * Ignore alignment since this is already going to be page aligned
 35	 * and there's no need for any larger alignment.
 36	 */
 37	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
 38	if (!page)
 
 39		return -ENOMEM;
 40
 41	shm->kaddr = page_address(page);
 42	shm->paddr = page_to_phys(page);
 43	shm->size = PAGE_SIZE << order;
 44
 45	if (shm_register) {
 46		unsigned int nr_pages = 1 << order, i;
 47		struct page **pages;
 
 
 48
 49		pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
 50		if (!pages) {
 51			rc = -ENOMEM;
 52			goto err;
 53		}
 54
 55		for (i = 0; i < nr_pages; i++)
 56			pages[i] = page + i;
 57
 
 58		rc = shm_register(shm->ctx, shm, pages, nr_pages,
 59				  (unsigned long)shm->kaddr);
 60		kfree(pages);
 61		if (rc)
 62			goto err;
 63	}
 64
 65	return 0;
 66
 67err:
 68	free_pages((unsigned long)shm->kaddr, order);
 
 69	return rc;
 70}
 71
 72void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 73			       int (*shm_unregister)(struct tee_context *ctx,
 74						     struct tee_shm *shm))
 75{
 76	if (shm_unregister)
 77		shm_unregister(shm->ctx, shm);
 78	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
 79	shm->kaddr = NULL;
 
 
 80}
 81
 82static void optee_bus_scan(struct work_struct *work)
 83{
 84	WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
 85}
 86
 87int optee_open(struct tee_context *ctx, bool cap_memref_null)
 88{
 89	struct optee_context_data *ctxdata;
 90	struct tee_device *teedev = ctx->teedev;
 91	struct optee *optee = tee_get_drvdata(teedev);
 92
 93	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
 94	if (!ctxdata)
 95		return -ENOMEM;
 96
 97	if (teedev == optee->supp_teedev) {
 98		bool busy = true;
 99
100		mutex_lock(&optee->supp.mutex);
101		if (!optee->supp.ctx) {
102			busy = false;
103			optee->supp.ctx = ctx;
104		}
105		mutex_unlock(&optee->supp.mutex);
106		if (busy) {
107			kfree(ctxdata);
108			return -EBUSY;
109		}
110
111		if (!optee->scan_bus_done) {
112			INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
113			optee->scan_bus_wq = create_workqueue("optee_bus_scan");
114			if (!optee->scan_bus_wq) {
115				kfree(ctxdata);
116				return -ECHILD;
117			}
118			queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
119			optee->scan_bus_done = true;
120		}
121	}
122	mutex_init(&ctxdata->mutex);
123	INIT_LIST_HEAD(&ctxdata->sess_list);
124
125	ctx->cap_memref_null = cap_memref_null;
126	ctx->data = ctxdata;
127	return 0;
128}
129
130static void optee_release_helper(struct tee_context *ctx,
131				 int (*close_session)(struct tee_context *ctx,
132						      u32 session))
 
133{
134	struct optee_context_data *ctxdata = ctx->data;
135	struct optee_session *sess;
136	struct optee_session *sess_tmp;
137
138	if (!ctxdata)
139		return;
140
141	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
142				 list_node) {
143		list_del(&sess->list_node);
144		close_session(ctx, sess->session_id);
145		kfree(sess);
146	}
147	kfree(ctxdata);
148	ctx->data = NULL;
149}
150
151void optee_release(struct tee_context *ctx)
152{
153	optee_release_helper(ctx, optee_close_session_helper);
154}
155
156void optee_release_supp(struct tee_context *ctx)
157{
158	struct optee *optee = tee_get_drvdata(ctx->teedev);
159
160	optee_release_helper(ctx, optee_close_session_helper);
161	if (optee->scan_bus_wq) {
162		destroy_workqueue(optee->scan_bus_wq);
163		optee->scan_bus_wq = NULL;
164	}
165	optee_supp_release(&optee->supp);
166}
167
168void optee_remove_common(struct optee *optee)
169{
170	/* Unregister OP-TEE specific client devices on TEE bus */
171	optee_unregister_devices();
172
173	optee_notif_uninit(optee);
174	optee_shm_arg_cache_uninit(optee);
175	teedev_close_context(optee->ctx);
176	/*
177	 * The two devices have to be unregistered before we can free the
178	 * other resources.
179	 */
180	tee_device_unregister(optee->supp_teedev);
181	tee_device_unregister(optee->teedev);
182
183	tee_shm_pool_free(optee->pool);
184	optee_supp_uninit(&optee->supp);
185	mutex_destroy(&optee->call_queue.mutex);
186}
187
188static int smc_abi_rc;
189static int ffa_abi_rc;
190
191static int __init optee_core_init(void)
192{
193	/*
194	 * The kernel may have crashed at the same time that all available
195	 * secure world threads were suspended and we cannot reschedule the
196	 * suspended threads without access to the crashed kernel's wait_queue.
197	 * Therefore, we cannot reliably initialize the OP-TEE driver in the
198	 * kdump kernel.
199	 */
200	if (is_kdump_kernel())
201		return -ENODEV;
202
203	smc_abi_rc = optee_smc_abi_register();
204	ffa_abi_rc = optee_ffa_abi_register();
205
206	/* If both failed there's no point with this module */
207	if (smc_abi_rc && ffa_abi_rc)
208		return smc_abi_rc;
209	return 0;
210}
211module_init(optee_core_init);
212
213static void __exit optee_core_exit(void)
214{
215	if (!smc_abi_rc)
216		optee_smc_abi_unregister();
217	if (!ffa_abi_rc)
218		optee_ffa_abi_unregister();
219}
220module_exit(optee_core_exit);
221
222MODULE_AUTHOR("Linaro");
223MODULE_DESCRIPTION("OP-TEE driver");
224MODULE_VERSION("1.0");
225MODULE_LICENSE("GPL v2");
226MODULE_ALIAS("platform:optee");