Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015-2021, Linaro Limited
  4 * Copyright (c) 2016, EPAM Systems
  5 */
  6
  7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8
  9#include <linux/crash_dump.h>
 10#include <linux/errno.h>
 11#include <linux/io.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 
 
 
 14#include <linux/slab.h>
 15#include <linux/string.h>
 16#include <linux/tee_drv.h>
 17#include <linux/types.h>
 
 18#include "optee_private.h"
 
 
 19
 20int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 21			       size_t size, size_t align,
 22			       int (*shm_register)(struct tee_context *ctx,
 23						   struct tee_shm *shm,
 24						   struct page **pages,
 25						   size_t num_pages,
 26						   unsigned long start))
 27{
 28	size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
 29	struct page **pages;
 30	unsigned int i;
 31	int rc = 0;
 32
 33	/*
 34	 * Ignore alignment since this is already going to be page aligned
 35	 * and there's no need for any larger alignment.
 36	 */
 37	shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
 38				       GFP_KERNEL | __GFP_ZERO);
 39	if (!shm->kaddr)
 40		return -ENOMEM;
 41
 42	shm->paddr = virt_to_phys(shm->kaddr);
 43	shm->size = nr_pages * PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44
 45	pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
 46	if (!pages) {
 47		rc = -ENOMEM;
 48		goto err;
 
 49	}
 
 
 50
 51	for (i = 0; i < nr_pages; i++)
 52		pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
 
 
 
 53
 54	shm->pages = pages;
 55	shm->num_pages = nr_pages;
 56
 57	if (shm_register) {
 58		rc = shm_register(shm->ctx, shm, pages, nr_pages,
 59				  (unsigned long)shm->kaddr);
 60		if (rc)
 61			goto err;
 
 62	}
 63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64	return 0;
 65err:
 66	free_pages_exact(shm->kaddr, shm->size);
 67	shm->kaddr = NULL;
 68	return rc;
 69}
 70
 71void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
 72			       int (*shm_unregister)(struct tee_context *ctx,
 73						     struct tee_shm *shm))
 
 
 
 
 
 
 74{
 75	if (shm_unregister)
 76		shm_unregister(shm->ctx, shm);
 77	free_pages_exact(shm->kaddr, shm->size);
 78	shm->kaddr = NULL;
 79	kfree(shm->pages);
 80	shm->pages = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81}
 82
 83static void optee_bus_scan(struct work_struct *work)
 
 84{
 85	WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
 
 
 
 
 
 
 
 
 
 86}
 87
 88int optee_open(struct tee_context *ctx, bool cap_memref_null)
 89{
 90	struct optee_context_data *ctxdata;
 91	struct tee_device *teedev = ctx->teedev;
 92	struct optee *optee = tee_get_drvdata(teedev);
 93
 94	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
 95	if (!ctxdata)
 96		return -ENOMEM;
 97
 98	if (teedev == optee->supp_teedev) {
 99		bool busy = true;
100
101		mutex_lock(&optee->supp.mutex);
102		if (!optee->supp.ctx) {
103			busy = false;
104			optee->supp.ctx = ctx;
105		}
106		mutex_unlock(&optee->supp.mutex);
107		if (busy) {
108			kfree(ctxdata);
109			return -EBUSY;
110		}
111
112		if (!optee->scan_bus_done) {
113			INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
114			schedule_work(&optee->scan_bus_work);
115			optee->scan_bus_done = true;
116		}
117	}
 
118	mutex_init(&ctxdata->mutex);
119	INIT_LIST_HEAD(&ctxdata->sess_list);
120
121	ctx->cap_memref_null = cap_memref_null;
122	ctx->data = ctxdata;
123	return 0;
124}
125
126static void optee_release_helper(struct tee_context *ctx,
127				 int (*close_session)(struct tee_context *ctx,
128						      u32 session,
129						      bool system_thread))
130{
131	struct optee_context_data *ctxdata = ctx->data;
 
 
 
 
 
132	struct optee_session *sess;
133	struct optee_session *sess_tmp;
134
135	if (!ctxdata)
136		return;
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
139				 list_node) {
140		list_del(&sess->list_node);
141		close_session(ctx, sess->session_id, sess->use_sys_thread);
 
 
 
 
 
142		kfree(sess);
143	}
144	kfree(ctxdata);
 
 
 
 
145	ctx->data = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146}
147
148void optee_release(struct tee_context *ctx)
 
 
 
 
149{
150	optee_release_helper(ctx, optee_close_session_helper);
151}
152
153void optee_release_supp(struct tee_context *ctx)
154{
155	struct optee *optee = tee_get_drvdata(ctx->teedev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
157	optee_release_helper(ctx, optee_close_session_helper);
 
 
 
 
 
158
159	optee_supp_release(&optee->supp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160}
161
162void optee_remove_common(struct optee *optee)
163{
164	/* Unregister OP-TEE specific client devices on TEE bus */
165	optee_unregister_devices();
 
 
 
 
166
167	optee_notif_uninit(optee);
168	optee_shm_arg_cache_uninit(optee);
169	teedev_close_context(optee->ctx);
170	/*
171	 * The two devices have to be unregistered before we can free the
172	 * other resources.
173	 */
174	tee_device_unregister(optee->supp_teedev);
175	tee_device_unregister(optee->teedev);
176
177	tee_shm_pool_free(optee->pool);
 
 
 
178	optee_supp_uninit(&optee->supp);
179	mutex_destroy(&optee->call_queue.mutex);
 
 
180}
181
182static int smc_abi_rc;
183static int ffa_abi_rc;
 
 
184
185static int __init optee_core_init(void)
 
 
186{
187	/*
188	 * The kernel may have crashed at the same time that all available
189	 * secure world threads were suspended and we cannot reschedule the
190	 * suspended threads without access to the crashed kernel's wait_queue.
191	 * Therefore, we cannot reliably initialize the OP-TEE driver in the
192	 * kdump kernel.
193	 */
194	if (is_kdump_kernel())
195		return -ENODEV;
196
197	smc_abi_rc = optee_smc_abi_register();
198	ffa_abi_rc = optee_ffa_abi_register();
 
 
 
 
 
 
 
 
 
 
 
199
200	/* If both failed there's no point with this module */
201	if (smc_abi_rc && ffa_abi_rc)
202		return smc_abi_rc;
203	return 0;
204}
205module_init(optee_core_init);
206
207static void __exit optee_core_exit(void)
208{
209	if (!smc_abi_rc)
210		optee_smc_abi_unregister();
211	if (!ffa_abi_rc)
212		optee_ffa_abi_unregister();
 
213}
214module_exit(optee_core_exit);
215
216MODULE_AUTHOR("Linaro");
217MODULE_DESCRIPTION("OP-TEE driver");
 
218MODULE_VERSION("1.0");
219MODULE_LICENSE("GPL v2");
220MODULE_ALIAS("platform:optee");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015, Linaro Limited
 
  4 */
  5
  6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7
  8#include <linux/arm-smccc.h>
  9#include <linux/errno.h>
 10#include <linux/io.h>
 
 11#include <linux/module.h>
 12#include <linux/of.h>
 13#include <linux/of_platform.h>
 14#include <linux/platform_device.h>
 15#include <linux/slab.h>
 16#include <linux/string.h>
 17#include <linux/tee_drv.h>
 18#include <linux/types.h>
 19#include <linux/uaccess.h>
 20#include "optee_private.h"
 21#include "optee_smc.h"
 22#include "shm_pool.h"
 23
 24#define DRIVER_NAME "optee"
 
 
 
 
 
 
 
 
 
 
 
 25
 26#define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
 
 
 
 
 
 
 
 27
 28/**
 29 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
 30 *			    struct tee_param
 31 * @params:	subsystem internal parameter representation
 32 * @num_params:	number of elements in the parameter arrays
 33 * @msg_params:	OPTEE_MSG parameters
 34 * Returns 0 on success or <0 on failure
 35 */
 36int optee_from_msg_param(struct tee_param *params, size_t num_params,
 37			 const struct optee_msg_param *msg_params)
 38{
 39	int rc;
 40	size_t n;
 41	struct tee_shm *shm;
 42	phys_addr_t pa;
 43
 44	for (n = 0; n < num_params; n++) {
 45		struct tee_param *p = params + n;
 46		const struct optee_msg_param *mp = msg_params + n;
 47		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
 48
 49		switch (attr) {
 50		case OPTEE_MSG_ATTR_TYPE_NONE:
 51			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
 52			memset(&p->u, 0, sizeof(p->u));
 53			break;
 54		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
 55		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
 56		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
 57			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
 58				  attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
 59			p->u.value.a = mp->u.value.a;
 60			p->u.value.b = mp->u.value.b;
 61			p->u.value.c = mp->u.value.c;
 62			break;
 63		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
 64		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
 65		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
 66			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
 67				  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
 68			p->u.memref.size = mp->u.tmem.size;
 69			shm = (struct tee_shm *)(unsigned long)
 70				mp->u.tmem.shm_ref;
 71			if (!shm) {
 72				p->u.memref.shm_offs = 0;
 73				p->u.memref.shm = NULL;
 74				break;
 75			}
 76			rc = tee_shm_get_pa(shm, 0, &pa);
 77			if (rc)
 78				return rc;
 79			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
 80			p->u.memref.shm = shm;
 81
 82			/* Check that the memref is covered by the shm object */
 83			if (p->u.memref.size) {
 84				size_t o = p->u.memref.shm_offs +
 85					   p->u.memref.size - 1;
 86
 87				rc = tee_shm_get_pa(shm, o, NULL);
 88				if (rc)
 89					return rc;
 90			}
 91			break;
 92		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
 93		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
 94		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
 95			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
 96				  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 97			p->u.memref.size = mp->u.rmem.size;
 98			shm = (struct tee_shm *)(unsigned long)
 99				mp->u.rmem.shm_ref;
100
101			if (!shm) {
102				p->u.memref.shm_offs = 0;
103				p->u.memref.shm = NULL;
104				break;
105			}
106			p->u.memref.shm_offs = mp->u.rmem.offs;
107			p->u.memref.shm = shm;
108
109			break;
110
111		default:
112			return -EINVAL;
113		}
114	}
115	return 0;
116}
117
118static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
119				const struct tee_param *p)
120{
121	int rc;
122	phys_addr_t pa;
123
124	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
125		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
126
127	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
128	mp->u.tmem.size = p->u.memref.size;
129
130	if (!p->u.memref.shm) {
131		mp->u.tmem.buf_ptr = 0;
132		return 0;
133	}
134
135	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
136	if (rc)
137		return rc;
138
139	mp->u.tmem.buf_ptr = pa;
140	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
141		    OPTEE_MSG_ATTR_CACHE_SHIFT;
142
143	return 0;
144}
145
146static int to_msg_param_reg_mem(struct optee_msg_param *mp,
147				const struct tee_param *p)
148{
149	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
150		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
151
152	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
153	mp->u.rmem.size = p->u.memref.size;
154	mp->u.rmem.offs = p->u.memref.shm_offs;
155	return 0;
 
 
 
 
156}
157
158/**
159 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
160 * @msg_params:	OPTEE_MSG parameters
161 * @num_params:	number of elements in the parameter arrays
162 * @params:	subsystem itnernal parameter representation
163 * Returns 0 on success or <0 on failure
164 */
165int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
166		       const struct tee_param *params)
167{
168	int rc;
169	size_t n;
170
171	for (n = 0; n < num_params; n++) {
172		const struct tee_param *p = params + n;
173		struct optee_msg_param *mp = msg_params + n;
174
175		switch (p->attr) {
176		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
177			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
178			memset(&mp->u, 0, sizeof(mp->u));
179			break;
180		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
181		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
182		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
183			mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
184				   TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
185			mp->u.value.a = p->u.value.a;
186			mp->u.value.b = p->u.value.b;
187			mp->u.value.c = p->u.value.c;
188			break;
189		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
190		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
191		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
192			if (tee_shm_is_registered(p->u.memref.shm))
193				rc = to_msg_param_reg_mem(mp, p);
194			else
195				rc = to_msg_param_tmp_mem(mp, p);
196			if (rc)
197				return rc;
198			break;
199		default:
200			return -EINVAL;
201		}
202	}
203	return 0;
204}
205
206static void optee_get_version(struct tee_device *teedev,
207			      struct tee_ioctl_version_data *vers)
208{
209	struct tee_ioctl_version_data v = {
210		.impl_id = TEE_IMPL_ID_OPTEE,
211		.impl_caps = TEE_OPTEE_CAP_TZ,
212		.gen_caps = TEE_GEN_CAP_GP,
213	};
214	struct optee *optee = tee_get_drvdata(teedev);
215
216	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
217		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
218	*vers = v;
219}
220
221static int optee_open(struct tee_context *ctx)
222{
223	struct optee_context_data *ctxdata;
224	struct tee_device *teedev = ctx->teedev;
225	struct optee *optee = tee_get_drvdata(teedev);
226
227	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
228	if (!ctxdata)
229		return -ENOMEM;
230
231	if (teedev == optee->supp_teedev) {
232		bool busy = true;
233
234		mutex_lock(&optee->supp.mutex);
235		if (!optee->supp.ctx) {
236			busy = false;
237			optee->supp.ctx = ctx;
238		}
239		mutex_unlock(&optee->supp.mutex);
240		if (busy) {
241			kfree(ctxdata);
242			return -EBUSY;
243		}
 
 
 
 
 
 
244	}
245
246	mutex_init(&ctxdata->mutex);
247	INIT_LIST_HEAD(&ctxdata->sess_list);
248
 
249	ctx->data = ctxdata;
250	return 0;
251}
252
253static void optee_release(struct tee_context *ctx)
 
 
 
254{
255	struct optee_context_data *ctxdata = ctx->data;
256	struct tee_device *teedev = ctx->teedev;
257	struct optee *optee = tee_get_drvdata(teedev);
258	struct tee_shm *shm;
259	struct optee_msg_arg *arg = NULL;
260	phys_addr_t parg;
261	struct optee_session *sess;
262	struct optee_session *sess_tmp;
263
264	if (!ctxdata)
265		return;
266
267	shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
268	if (!IS_ERR(shm)) {
269		arg = tee_shm_get_va(shm, 0);
270		/*
271		 * If va2pa fails for some reason, we can't call into
272		 * secure world, only free the memory. Secure OS will leak
273		 * sessions and finally refuse more sessions, but we will
274		 * at least let normal world reclaim its memory.
275		 */
276		if (!IS_ERR(arg))
277			if (tee_shm_va2pa(shm, arg, &parg))
278				arg = NULL; /* prevent usage of parg below */
279	}
280
281	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
282				 list_node) {
283		list_del(&sess->list_node);
284		if (!IS_ERR_OR_NULL(arg)) {
285			memset(arg, 0, sizeof(*arg));
286			arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
287			arg->session = sess->session_id;
288			optee_do_call_with_arg(ctx, parg);
289		}
290		kfree(sess);
291	}
292	kfree(ctxdata);
293
294	if (!IS_ERR(shm))
295		tee_shm_free(shm);
296
297	ctx->data = NULL;
298
299	if (teedev == optee->supp_teedev)
300		optee_supp_release(&optee->supp);
301}
302
303static const struct tee_driver_ops optee_ops = {
304	.get_version = optee_get_version,
305	.open = optee_open,
306	.release = optee_release,
307	.open_session = optee_open_session,
308	.close_session = optee_close_session,
309	.invoke_func = optee_invoke_func,
310	.cancel_req = optee_cancel_req,
311	.shm_register = optee_shm_register,
312	.shm_unregister = optee_shm_unregister,
313};
314
315static const struct tee_desc optee_desc = {
316	.name = DRIVER_NAME "-clnt",
317	.ops = &optee_ops,
318	.owner = THIS_MODULE,
319};
320
321static const struct tee_driver_ops optee_supp_ops = {
322	.get_version = optee_get_version,
323	.open = optee_open,
324	.release = optee_release,
325	.supp_recv = optee_supp_recv,
326	.supp_send = optee_supp_send,
327	.shm_register = optee_shm_register_supp,
328	.shm_unregister = optee_shm_unregister_supp,
329};
330
331static const struct tee_desc optee_supp_desc = {
332	.name = DRIVER_NAME "-supp",
333	.ops = &optee_supp_ops,
334	.owner = THIS_MODULE,
335	.flags = TEE_DESC_PRIVILEGED,
336};
337
338static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
339{
340	struct arm_smccc_res res;
341
342	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
343
344	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
345	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
346		return true;
347	return false;
348}
349
350static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
351{
352	union {
353		struct arm_smccc_res smccc;
354		struct optee_smc_call_get_os_revision_result result;
355	} res = {
356		.result = {
357			.build_id = 0
358		}
359	};
360
361	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
362		  &res.smccc);
363
364	if (res.result.build_id)
365		pr_info("revision %lu.%lu (%08lx)", res.result.major,
366			res.result.minor, res.result.build_id);
367	else
368		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
369}
370
371static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
372{
373	union {
374		struct arm_smccc_res smccc;
375		struct optee_smc_calls_revision_result result;
376	} res;
377
378	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
379
380	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
381	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
382		return true;
383	return false;
384}
385
386static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
387					    u32 *sec_caps)
388{
389	union {
390		struct arm_smccc_res smccc;
391		struct optee_smc_exchange_capabilities_result result;
392	} res;
393	u32 a1 = 0;
394
395	/*
396	 * TODO This isn't enough to tell if it's UP system (from kernel
397	 * point of view) or not, is_smp() returns the the information
398	 * needed, but can't be called directly from here.
399	 */
400	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
401		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
402
403	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
404		  &res.smccc);
405
406	if (res.result.status != OPTEE_SMC_RETURN_OK)
407		return false;
408
409	*sec_caps = res.result.capabilities;
410	return true;
411}
412
413static struct tee_shm_pool *optee_config_dyn_shm(void)
414{
415	struct tee_shm_pool_mgr *priv_mgr;
416	struct tee_shm_pool_mgr *dmabuf_mgr;
417	void *rc;
418
419	rc = optee_shm_pool_alloc_pages();
420	if (IS_ERR(rc))
421		return rc;
422	priv_mgr = rc;
423
424	rc = optee_shm_pool_alloc_pages();
425	if (IS_ERR(rc)) {
426		tee_shm_pool_mgr_destroy(priv_mgr);
427		return rc;
428	}
429	dmabuf_mgr = rc;
430
431	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
432	if (IS_ERR(rc)) {
433		tee_shm_pool_mgr_destroy(priv_mgr);
434		tee_shm_pool_mgr_destroy(dmabuf_mgr);
435	}
436
437	return rc;
438}
439
440static struct tee_shm_pool *
441optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
442{
443	union {
444		struct arm_smccc_res smccc;
445		struct optee_smc_get_shm_config_result result;
446	} res;
447	unsigned long vaddr;
448	phys_addr_t paddr;
449	size_t size;
450	phys_addr_t begin;
451	phys_addr_t end;
452	void *va;
453	struct tee_shm_pool_mgr *priv_mgr;
454	struct tee_shm_pool_mgr *dmabuf_mgr;
455	void *rc;
456	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
457
458	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
459	if (res.result.status != OPTEE_SMC_RETURN_OK) {
460		pr_err("static shm service not available\n");
461		return ERR_PTR(-ENOENT);
462	}
463
464	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
465		pr_err("only normal cached shared memory supported\n");
466		return ERR_PTR(-EINVAL);
467	}
468
469	begin = roundup(res.result.start, PAGE_SIZE);
470	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
471	paddr = begin;
472	size = end - begin;
473
474	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
475		pr_err("too small shared memory area\n");
476		return ERR_PTR(-EINVAL);
477	}
478
479	va = memremap(paddr, size, MEMREMAP_WB);
480	if (!va) {
481		pr_err("shared memory ioremap failed\n");
482		return ERR_PTR(-EINVAL);
483	}
484	vaddr = (unsigned long)va;
485
486	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
487					    3 /* 8 bytes aligned */);
488	if (IS_ERR(rc))
489		goto err_memunmap;
490	priv_mgr = rc;
491
492	vaddr += sz;
493	paddr += sz;
494	size -= sz;
495
496	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
497	if (IS_ERR(rc))
498		goto err_free_priv_mgr;
499	dmabuf_mgr = rc;
500
501	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
502	if (IS_ERR(rc))
503		goto err_free_dmabuf_mgr;
504
505	*memremaped_shm = va;
506
507	return rc;
508
509err_free_dmabuf_mgr:
510	tee_shm_pool_mgr_destroy(dmabuf_mgr);
511err_free_priv_mgr:
512	tee_shm_pool_mgr_destroy(priv_mgr);
513err_memunmap:
514	memunmap(va);
515	return rc;
516}
517
518/* Simple wrapper functions to be able to use a function pointer */
519static void optee_smccc_smc(unsigned long a0, unsigned long a1,
520			    unsigned long a2, unsigned long a3,
521			    unsigned long a4, unsigned long a5,
522			    unsigned long a6, unsigned long a7,
523			    struct arm_smccc_res *res)
524{
525	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
526}
527
528static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
529			    unsigned long a2, unsigned long a3,
530			    unsigned long a4, unsigned long a5,
531			    unsigned long a6, unsigned long a7,
532			    struct arm_smccc_res *res)
533{
534	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
535}
536
537static optee_invoke_fn *get_invoke_func(struct device_node *np)
538{
539	const char *method;
540
541	pr_info("probing for conduit method from DT.\n");
542
543	if (of_property_read_string(np, "method", &method)) {
544		pr_warn("missing \"method\" property\n");
545		return ERR_PTR(-ENXIO);
546	}
547
548	if (!strcmp("hvc", method))
549		return optee_smccc_hvc;
550	else if (!strcmp("smc", method))
551		return optee_smccc_smc;
552
553	pr_warn("invalid \"method\" property: %s\n", method);
554	return ERR_PTR(-EINVAL);
555}
556
557static struct optee *optee_probe(struct device_node *np)
558{
559	optee_invoke_fn *invoke_fn;
560	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
561	struct optee *optee = NULL;
562	void *memremaped_shm = NULL;
563	struct tee_device *teedev;
564	u32 sec_caps;
565	int rc;
566
567	invoke_fn = get_invoke_func(np);
568	if (IS_ERR(invoke_fn))
569		return (void *)invoke_fn;
570
571	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
572		pr_warn("api uid mismatch\n");
573		return ERR_PTR(-EINVAL);
574	}
575
576	optee_msg_get_os_revision(invoke_fn);
577
578	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
579		pr_warn("api revision mismatch\n");
580		return ERR_PTR(-EINVAL);
581	}
582
583	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
584		pr_warn("capabilities mismatch\n");
585		return ERR_PTR(-EINVAL);
586	}
587
588	/*
589	 * Try to use dynamic shared memory if possible
590	 */
591	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
592		pool = optee_config_dyn_shm();
593
594	/*
595	 * If dynamic shared memory is not available or failed - try static one
596	 */
597	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
598		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
599
600	if (IS_ERR(pool))
601		return (void *)pool;
602
603	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
604	if (!optee) {
605		rc = -ENOMEM;
606		goto err;
607	}
608
609	optee->invoke_fn = invoke_fn;
610	optee->sec_caps = sec_caps;
611
612	teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
613	if (IS_ERR(teedev)) {
614		rc = PTR_ERR(teedev);
615		goto err;
616	}
617	optee->teedev = teedev;
618
619	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
620	if (IS_ERR(teedev)) {
621		rc = PTR_ERR(teedev);
622		goto err;
623	}
624	optee->supp_teedev = teedev;
625
626	rc = tee_device_register(optee->teedev);
627	if (rc)
628		goto err;
629
630	rc = tee_device_register(optee->supp_teedev);
631	if (rc)
632		goto err;
633
634	mutex_init(&optee->call_queue.mutex);
635	INIT_LIST_HEAD(&optee->call_queue.waiters);
636	optee_wait_queue_init(&optee->wait_queue);
637	optee_supp_init(&optee->supp);
638	optee->memremaped_shm = memremaped_shm;
639	optee->pool = pool;
640
641	optee_enable_shm_cache(optee);
642
643	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
644		pr_info("dynamic shared memory is enabled\n");
645
646	rc = optee_enumerate_devices();
647	if (rc)
648		goto err;
649
650	pr_info("initialized driver\n");
651	return optee;
652err:
653	if (optee) {
654		/*
655		 * tee_device_unregister() is safe to call even if the
656		 * devices hasn't been registered with
657		 * tee_device_register() yet.
658		 */
659		tee_device_unregister(optee->supp_teedev);
660		tee_device_unregister(optee->teedev);
661		kfree(optee);
662	}
663	if (pool)
664		tee_shm_pool_free(pool);
665	if (memremaped_shm)
666		memunmap(memremaped_shm);
667	return ERR_PTR(rc);
668}
669
670static void optee_remove(struct optee *optee)
671{
672	/*
673	 * Ask OP-TEE to free all cached shared memory objects to decrease
674	 * reference counters and also avoid wild pointers in secure world
675	 * into the old shared memory range.
676	 */
677	optee_disable_shm_cache(optee);
678
 
 
 
679	/*
680	 * The two devices has to be unregistered before we can free the
681	 * other resources.
682	 */
683	tee_device_unregister(optee->supp_teedev);
684	tee_device_unregister(optee->teedev);
685
686	tee_shm_pool_free(optee->pool);
687	if (optee->memremaped_shm)
688		memunmap(optee->memremaped_shm);
689	optee_wait_queue_exit(&optee->wait_queue);
690	optee_supp_uninit(&optee->supp);
691	mutex_destroy(&optee->call_queue.mutex);
692
693	kfree(optee);
694}
695
696static const struct of_device_id optee_match[] = {
697	{ .compatible = "linaro,optee-tz" },
698	{},
699};
700
701static struct optee *optee_svc;
702
703static int __init optee_driver_init(void)
704{
705	struct device_node *fw_np;
706	struct device_node *np;
707	struct optee *optee;
708
709	/* Node is supposed to be below /firmware */
710	fw_np = of_find_node_by_name(NULL, "firmware");
711	if (!fw_np)
 
712		return -ENODEV;
713
714	np = of_find_matching_node(fw_np, optee_match);
715	if (!np || !of_device_is_available(np)) {
716		of_node_put(np);
717		return -ENODEV;
718	}
719
720	optee = optee_probe(np);
721	of_node_put(np);
722
723	if (IS_ERR(optee))
724		return PTR_ERR(optee);
725
726	optee_svc = optee;
727
 
 
 
728	return 0;
729}
730module_init(optee_driver_init);
731
732static void __exit optee_driver_exit(void)
733{
734	struct optee *optee = optee_svc;
735
736	optee_svc = NULL;
737	if (optee)
738		optee_remove(optee);
739}
740module_exit(optee_driver_exit);
741
742MODULE_AUTHOR("Linaro");
743MODULE_DESCRIPTION("OP-TEE driver");
744MODULE_SUPPORTED_DEVICE("");
745MODULE_VERSION("1.0");
746MODULE_LICENSE("GPL v2");