Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2015-2021, 2023 Linaro Limited
   4 * Copyright (c) 2016, EPAM Systems
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/arm-smccc.h>
  10#include <linux/cpuhotplug.h>
  11#include <linux/errno.h>
  12#include <linux/firmware.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/irqdomain.h>
  16#include <linux/kernel.h>
  17#include <linux/mm.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/of_irq.h>
  21#include <linux/of_platform.h>
  22#include <linux/platform_device.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25#include <linux/string.h>
  26#include <linux/tee_drv.h>
  27#include <linux/types.h>
  28#include <linux/workqueue.h>
  29#include "optee_private.h"
  30#include "optee_smc.h"
  31#include "optee_rpc_cmd.h"
  32#include <linux/kmemleak.h>
  33#define CREATE_TRACE_POINTS
  34#include "optee_trace.h"
  35
  36/*
  37 * This file implement the SMC ABI used when communicating with secure world
  38 * OP-TEE OS via raw SMCs.
  39 * This file is divided into the following sections:
  40 * 1. Convert between struct tee_param and struct optee_msg_param
  41 * 2. Low level support functions to register shared memory in secure world
  42 * 3. Dynamic shared memory pool based on alloc_pages()
  43 * 4. Do a normal scheduled call into secure world
  44 * 5. Asynchronous notification
  45 * 6. Driver initialization.
  46 */
  47
  48/*
  49 * A typical OP-TEE private shm allocation is 224 bytes (argument struct
  50 * with 6 parameters, needed for open session). So with an alignment of 512
  51 * we'll waste a bit more than 50%. However, it's only expected that we'll
  52 * have a handful of these structs allocated at a time. Most memory will
  53 * be allocated aligned to the page size, So all in all this should scale
  54 * up and down quite well.
  55 */
  56#define OPTEE_MIN_STATIC_POOL_ALIGN    9 /* 512 bytes aligned */
  57
  58/* SMC ABI considers at most a single TEE firmware */
  59static unsigned int pcpu_irq_num;
  60
  61static int optee_cpuhp_enable_pcpu_irq(unsigned int cpu)
  62{
  63	enable_percpu_irq(pcpu_irq_num, IRQ_TYPE_NONE);
  64
  65	return 0;
  66}
  67
  68static int optee_cpuhp_disable_pcpu_irq(unsigned int cpu)
  69{
  70	disable_percpu_irq(pcpu_irq_num);
  71
  72	return 0;
  73}
  74
  75/*
  76 * 1. Convert between struct tee_param and struct optee_msg_param
  77 *
  78 * optee_from_msg_param() and optee_to_msg_param() are the main
  79 * functions.
  80 */
  81
  82static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
  83				  const struct optee_msg_param *mp)
  84{
  85	struct tee_shm *shm;
  86	phys_addr_t pa;
  87	int rc;
  88
  89	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
  90		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
  91	p->u.memref.size = mp->u.tmem.size;
  92	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
  93	if (!shm) {
  94		p->u.memref.shm_offs = 0;
  95		p->u.memref.shm = NULL;
  96		return 0;
  97	}
  98
  99	rc = tee_shm_get_pa(shm, 0, &pa);
 100	if (rc)
 101		return rc;
 102
 103	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
 104	p->u.memref.shm = shm;
 105
 106	return 0;
 107}
 108
 109static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
 110				   const struct optee_msg_param *mp)
 111{
 112	struct tee_shm *shm;
 113
 114	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
 115		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 116	p->u.memref.size = mp->u.rmem.size;
 117	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
 118
 119	if (shm) {
 120		p->u.memref.shm_offs = mp->u.rmem.offs;
 121		p->u.memref.shm = shm;
 122	} else {
 123		p->u.memref.shm_offs = 0;
 124		p->u.memref.shm = NULL;
 125	}
 126}
 127
 128/**
 129 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
 130 *			    struct tee_param
 131 * @optee:	main service struct
 132 * @params:	subsystem internal parameter representation
 133 * @num_params:	number of elements in the parameter arrays
 134 * @msg_params:	OPTEE_MSG parameters
 135 * Returns 0 on success or <0 on failure
 136 */
 137static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
 138				size_t num_params,
 139				const struct optee_msg_param *msg_params)
 140{
 141	int rc;
 142	size_t n;
 143
 144	for (n = 0; n < num_params; n++) {
 145		struct tee_param *p = params + n;
 146		const struct optee_msg_param *mp = msg_params + n;
 147		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
 148
 149		switch (attr) {
 150		case OPTEE_MSG_ATTR_TYPE_NONE:
 151			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
 152			memset(&p->u, 0, sizeof(p->u));
 153			break;
 154		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
 155		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
 156		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
 157			optee_from_msg_param_value(p, attr, mp);
 158			break;
 159		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
 160		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
 161		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
 162			rc = from_msg_param_tmp_mem(p, attr, mp);
 163			if (rc)
 164				return rc;
 165			break;
 166		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
 167		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
 168		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
 169			from_msg_param_reg_mem(p, attr, mp);
 170			break;
 171
 172		default:
 173			return -EINVAL;
 174		}
 175	}
 176	return 0;
 177}
 178
 179static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
 180				const struct tee_param *p)
 181{
 182	int rc;
 183	phys_addr_t pa;
 184
 185	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
 186		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
 187
 188	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
 189	mp->u.tmem.size = p->u.memref.size;
 190
 191	if (!p->u.memref.shm) {
 192		mp->u.tmem.buf_ptr = 0;
 193		return 0;
 194	}
 195
 196	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
 197	if (rc)
 198		return rc;
 199
 200	mp->u.tmem.buf_ptr = pa;
 201	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
 202		    OPTEE_MSG_ATTR_CACHE_SHIFT;
 203
 204	return 0;
 205}
 206
 207static int to_msg_param_reg_mem(struct optee_msg_param *mp,
 208				const struct tee_param *p)
 209{
 210	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
 211		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
 212
 213	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
 214	mp->u.rmem.size = p->u.memref.size;
 215	mp->u.rmem.offs = p->u.memref.shm_offs;
 216	return 0;
 217}
 218
 219/**
 220 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
 221 * @optee:	main service struct
 222 * @msg_params:	OPTEE_MSG parameters
 223 * @num_params:	number of elements in the parameter arrays
 224 * @params:	subsystem itnernal parameter representation
 225 * Returns 0 on success or <0 on failure
 226 */
 227static int optee_to_msg_param(struct optee *optee,
 228			      struct optee_msg_param *msg_params,
 229			      size_t num_params, const struct tee_param *params)
 230{
 231	int rc;
 232	size_t n;
 233
 234	for (n = 0; n < num_params; n++) {
 235		const struct tee_param *p = params + n;
 236		struct optee_msg_param *mp = msg_params + n;
 237
 238		switch (p->attr) {
 239		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
 240			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
 241			memset(&mp->u, 0, sizeof(mp->u));
 242			break;
 243		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
 244		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
 245		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
 246			optee_to_msg_param_value(mp, p);
 247			break;
 248		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
 249		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
 250		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
 251			if (tee_shm_is_dynamic(p->u.memref.shm))
 252				rc = to_msg_param_reg_mem(mp, p);
 253			else
 254				rc = to_msg_param_tmp_mem(mp, p);
 255			if (rc)
 256				return rc;
 257			break;
 258		default:
 259			return -EINVAL;
 260		}
 261	}
 262	return 0;
 263}
 264
 265/*
 266 * 2. Low level support functions to register shared memory in secure world
 267 *
 268 * Functions to enable/disable shared memory caching in secure world, that
 269 * is, lazy freeing of previously allocated shared memory. Freeing is
 270 * performed when a request has been compled.
 271 *
 272 * Functions to register and unregister shared memory both for normal
 273 * clients and for tee-supplicant.
 274 */
 275
 276/**
 277 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
 278 *			      in OP-TEE
 279 * @optee:	main service struct
 280 */
 281static void optee_enable_shm_cache(struct optee *optee)
 282{
 283	struct optee_call_waiter w;
 284
 285	/* We need to retry until secure world isn't busy. */
 286	optee_cq_wait_init(&optee->call_queue, &w, false);
 287	while (true) {
 288		struct arm_smccc_res res;
 289
 290		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
 291				     0, 0, 0, 0, 0, 0, 0, &res);
 292		if (res.a0 == OPTEE_SMC_RETURN_OK)
 293			break;
 294		optee_cq_wait_for_completion(&optee->call_queue, &w);
 295	}
 296	optee_cq_wait_final(&optee->call_queue, &w);
 297}
 298
 299/**
 300 * __optee_disable_shm_cache() - Disables caching of some shared memory
 301 *				 allocation in OP-TEE
 302 * @optee:	main service struct
 303 * @is_mapped:	true if the cached shared memory addresses were mapped by this
 304 *		kernel, are safe to dereference, and should be freed
 305 */
 306static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
 307{
 308	struct optee_call_waiter w;
 309
 310	/* We need to retry until secure world isn't busy. */
 311	optee_cq_wait_init(&optee->call_queue, &w, false);
 312	while (true) {
 313		union {
 314			struct arm_smccc_res smccc;
 315			struct optee_smc_disable_shm_cache_result result;
 316		} res;
 317
 318		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
 319				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
 320		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
 321			break; /* All shm's freed */
 322		if (res.result.status == OPTEE_SMC_RETURN_OK) {
 323			struct tee_shm *shm;
 324
 325			/*
 326			 * Shared memory references that were not mapped by
 327			 * this kernel must be ignored to prevent a crash.
 328			 */
 329			if (!is_mapped)
 330				continue;
 331
 332			shm = reg_pair_to_ptr(res.result.shm_upper32,
 333					      res.result.shm_lower32);
 334			tee_shm_free(shm);
 335		} else {
 336			optee_cq_wait_for_completion(&optee->call_queue, &w);
 337		}
 338	}
 339	optee_cq_wait_final(&optee->call_queue, &w);
 340}
 341
 342/**
 343 * optee_disable_shm_cache() - Disables caching of mapped shared memory
 344 *			       allocations in OP-TEE
 345 * @optee:	main service struct
 346 */
 347static void optee_disable_shm_cache(struct optee *optee)
 348{
 349	return __optee_disable_shm_cache(optee, true);
 350}
 351
 352/**
 353 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
 354 *					allocations in OP-TEE which are not
 355 *					currently mapped
 356 * @optee:	main service struct
 357 */
 358static void optee_disable_unmapped_shm_cache(struct optee *optee)
 359{
 360	return __optee_disable_shm_cache(optee, false);
 361}
 362
 363#define PAGELIST_ENTRIES_PER_PAGE				\
 364	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
 365
 366/*
 367 * The final entry in each pagelist page is a pointer to the next
 368 * pagelist page.
 369 */
 370static size_t get_pages_list_size(size_t num_entries)
 371{
 372	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
 373
 374	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 375}
 376
 377static u64 *optee_allocate_pages_list(size_t num_entries)
 378{
 379	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
 380}
 381
 382static void optee_free_pages_list(void *list, size_t num_entries)
 383{
 384	free_pages_exact(list, get_pages_list_size(num_entries));
 385}
 386
 387/**
 388 * optee_fill_pages_list() - write list of user pages to given shared
 389 * buffer.
 390 *
 391 * @dst: page-aligned buffer where list of pages will be stored
 392 * @pages: array of pages that represents shared buffer
 393 * @num_pages: number of entries in @pages
 394 * @page_offset: offset of user buffer from page start
 395 *
 396 * @dst should be big enough to hold list of user page addresses and
 397 *	links to the next pages of buffer
 398 */
 399static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
 400				  size_t page_offset)
 401{
 402	int n = 0;
 403	phys_addr_t optee_page;
 404	/*
 405	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
 406	 * for details.
 407	 */
 408	struct {
 409		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
 410		u64 next_page_data;
 411	} *pages_data;
 412
 413	/*
 414	 * Currently OP-TEE uses 4k page size and it does not looks
 415	 * like this will change in the future.  On other hand, there are
 416	 * no know ARM architectures with page size < 4k.
 417	 * Thus the next built assert looks redundant. But the following
 418	 * code heavily relies on this assumption, so it is better be
 419	 * safe than sorry.
 420	 */
 421	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 422
 423	pages_data = (void *)dst;
 424	/*
 425	 * If linux page is bigger than 4k, and user buffer offset is
 426	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
 427	 * because they bear no value data for OP-TEE.
 428	 */
 429	optee_page = page_to_phys(*pages) +
 430		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
 431
 432	while (true) {
 433		pages_data->pages_list[n++] = optee_page;
 434
 435		if (n == PAGELIST_ENTRIES_PER_PAGE) {
 436			pages_data->next_page_data =
 437				virt_to_phys(pages_data + 1);
 438			pages_data++;
 439			n = 0;
 440		}
 441
 442		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
 443		if (!(optee_page & ~PAGE_MASK)) {
 444			if (!--num_pages)
 445				break;
 446			pages++;
 447			optee_page = page_to_phys(*pages);
 448		}
 449	}
 450}
 451
 452static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
 453			      struct page **pages, size_t num_pages,
 454			      unsigned long start)
 455{
 456	struct optee *optee = tee_get_drvdata(ctx->teedev);
 457	struct optee_msg_arg *msg_arg;
 458	struct tee_shm *shm_arg;
 459	u64 *pages_list;
 460	size_t sz;
 461	int rc;
 462
 463	if (!num_pages)
 464		return -EINVAL;
 465
 466	rc = optee_check_mem_type(start, num_pages);
 467	if (rc)
 468		return rc;
 469
 470	pages_list = optee_allocate_pages_list(num_pages);
 471	if (!pages_list)
 472		return -ENOMEM;
 473
 474	/*
 475	 * We're about to register shared memory we can't register shared
 476	 * memory for this request or there's a catch-22.
 477	 *
 478	 * So in this we'll have to do the good old temporary private
 479	 * allocation instead of using optee_get_msg_arg().
 480	 */
 481	sz = optee_msg_arg_size(optee->rpc_param_count);
 482	shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
 483	if (IS_ERR(shm_arg)) {
 484		rc = PTR_ERR(shm_arg);
 485		goto out;
 486	}
 487	msg_arg = tee_shm_get_va(shm_arg, 0);
 488	if (IS_ERR(msg_arg)) {
 489		rc = PTR_ERR(msg_arg);
 490		goto out;
 491	}
 492
 493	optee_fill_pages_list(pages_list, pages, num_pages,
 494			      tee_shm_get_page_offset(shm));
 495
 496	memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
 497	msg_arg->num_params = 1;
 498	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
 499	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
 500				OPTEE_MSG_ATTR_NONCONTIG;
 501	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
 502	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
 503	/*
 504	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
 505	 * store buffer offset from 4k page, as described in OP-TEE ABI.
 506	 */
 507	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
 508	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 509
 510	if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
 511	    msg_arg->ret != TEEC_SUCCESS)
 512		rc = -EINVAL;
 513
 514	tee_shm_free(shm_arg);
 515out:
 516	optee_free_pages_list(pages_list, num_pages);
 517	return rc;
 518}
 519
 520static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
 521{
 522	struct optee *optee = tee_get_drvdata(ctx->teedev);
 523	struct optee_msg_arg *msg_arg;
 524	struct tee_shm *shm_arg;
 525	int rc = 0;
 526	size_t sz;
 527
 528	/*
 529	 * We're about to unregister shared memory and we may not be able
 530	 * register shared memory for this request in case we're called
 531	 * from optee_shm_arg_cache_uninit().
 532	 *
 533	 * So in order to keep things simple in this function just as in
 534	 * optee_shm_register() we'll use temporary private allocation
 535	 * instead of using optee_get_msg_arg().
 536	 */
 537	sz = optee_msg_arg_size(optee->rpc_param_count);
 538	shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
 539	if (IS_ERR(shm_arg))
 540		return PTR_ERR(shm_arg);
 541	msg_arg = tee_shm_get_va(shm_arg, 0);
 542	if (IS_ERR(msg_arg)) {
 543		rc = PTR_ERR(msg_arg);
 544		goto out;
 545	}
 546
 547	memset(msg_arg, 0, sz);
 548	msg_arg->num_params = 1;
 549	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
 550	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
 551	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
 552
 553	if (optee->ops->do_call_with_arg(ctx, shm_arg, 0, false) ||
 554	    msg_arg->ret != TEEC_SUCCESS)
 555		rc = -EINVAL;
 556out:
 557	tee_shm_free(shm_arg);
 558	return rc;
 559}
 560
 561static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
 562				   struct page **pages, size_t num_pages,
 563				   unsigned long start)
 564{
 565	/*
 566	 * We don't want to register supplicant memory in OP-TEE.
 567	 * Instead information about it will be passed in RPC code.
 568	 */
 569	return optee_check_mem_type(start, num_pages);
 570}
 571
 572static int optee_shm_unregister_supp(struct tee_context *ctx,
 573				     struct tee_shm *shm)
 574{
 575	return 0;
 576}
 577
 578/*
 579 * 3. Dynamic shared memory pool based on alloc_pages()
 580 *
 581 * Implements an OP-TEE specific shared memory pool which is used
 582 * when dynamic shared memory is supported by secure world.
 583 *
 584 * The main function is optee_shm_pool_alloc_pages().
 585 */
 586
 587static int pool_op_alloc(struct tee_shm_pool *pool,
 588			 struct tee_shm *shm, size_t size, size_t align)
 589{
 590	/*
 591	 * Shared memory private to the OP-TEE driver doesn't need
 592	 * to be registered with OP-TEE.
 593	 */
 594	if (shm->flags & TEE_SHM_PRIV)
 595		return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
 596
 597	return optee_pool_op_alloc_helper(pool, shm, size, align,
 598					  optee_shm_register);
 599}
 600
 601static void pool_op_free(struct tee_shm_pool *pool,
 602			 struct tee_shm *shm)
 603{
 604	if (!(shm->flags & TEE_SHM_PRIV))
 605		optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
 606	else
 607		optee_pool_op_free_helper(pool, shm, NULL);
 608}
 609
 610static void pool_op_destroy_pool(struct tee_shm_pool *pool)
 611{
 612	kfree(pool);
 613}
 614
 615static const struct tee_shm_pool_ops pool_ops = {
 616	.alloc = pool_op_alloc,
 617	.free = pool_op_free,
 618	.destroy_pool = pool_op_destroy_pool,
 619};
 620
 621/**
 622 * optee_shm_pool_alloc_pages() - create page-based allocator pool
 623 *
 624 * This pool is used when OP-TEE supports dymanic SHM. In this case
 625 * command buffers and such are allocated from kernel's own memory.
 626 */
 627static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
 628{
 629	struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
 630
 631	if (!pool)
 632		return ERR_PTR(-ENOMEM);
 633
 634	pool->ops = &pool_ops;
 635
 636	return pool;
 637}
 638
 639/*
 640 * 4. Do a normal scheduled call into secure world
 641 *
 642 * The function optee_smc_do_call_with_arg() performs a normal scheduled
 643 * call into secure world. During this call may normal world request help
 644 * from normal world using RPCs, Remote Procedure Calls. This includes
 645 * delivery of non-secure interrupts to for instance allow rescheduling of
 646 * the current task.
 647 */
 648
 649static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
 650					 struct optee_msg_arg *arg)
 651{
 652	struct tee_shm *shm;
 653
 654	arg->ret_origin = TEEC_ORIGIN_COMMS;
 655
 656	if (arg->num_params != 1 ||
 657	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
 658		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 659		return;
 660	}
 661
 662	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
 663	switch (arg->params[0].u.value.a) {
 664	case OPTEE_RPC_SHM_TYPE_APPL:
 665		optee_rpc_cmd_free_suppl(ctx, shm);
 666		break;
 667	case OPTEE_RPC_SHM_TYPE_KERNEL:
 668		tee_shm_free(shm);
 669		break;
 670	default:
 671		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 672	}
 673	arg->ret = TEEC_SUCCESS;
 674}
 675
 676static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
 677					  struct optee *optee,
 678					  struct optee_msg_arg *arg,
 679					  struct optee_call_ctx *call_ctx)
 680{
 681	struct tee_shm *shm;
 682	size_t sz;
 683	size_t n;
 684	struct page **pages;
 685	size_t page_count;
 686
 687	arg->ret_origin = TEEC_ORIGIN_COMMS;
 688
 689	if (!arg->num_params ||
 690	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
 691		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 692		return;
 693	}
 694
 695	for (n = 1; n < arg->num_params; n++) {
 696		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
 697			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 698			return;
 699		}
 700	}
 701
 702	sz = arg->params[0].u.value.b;
 703	switch (arg->params[0].u.value.a) {
 704	case OPTEE_RPC_SHM_TYPE_APPL:
 705		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
 706		break;
 707	case OPTEE_RPC_SHM_TYPE_KERNEL:
 708		shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
 709		break;
 710	default:
 711		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 712		return;
 713	}
 714
 715	if (IS_ERR(shm)) {
 716		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
 717		return;
 718	}
 719
 720	/*
 721	 * If there are pages it's dynamically allocated shared memory (not
 722	 * from the reserved shared memory pool) and needs to be
 723	 * registered.
 724	 */
 725	pages = tee_shm_get_pages(shm, &page_count);
 726	if (pages) {
 727		u64 *pages_list;
 728
 729		pages_list = optee_allocate_pages_list(page_count);
 730		if (!pages_list) {
 731			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
 732			goto bad;
 733		}
 734
 735		call_ctx->pages_list = pages_list;
 736		call_ctx->num_entries = page_count;
 737
 738		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
 739				      OPTEE_MSG_ATTR_NONCONTIG;
 740		/*
 741		 * In the least bits of u.tmem.buf_ptr we store buffer offset
 742		 * from 4k page, as described in OP-TEE ABI.
 743		 */
 744		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
 745			(tee_shm_get_page_offset(shm) &
 746			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 747
 748		optee_fill_pages_list(pages_list, pages, page_count,
 749				      tee_shm_get_page_offset(shm));
 750	} else {
 751		phys_addr_t pa;
 752
 753		if (tee_shm_get_pa(shm, 0, &pa)) {
 754			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
 755			goto bad;
 756		}
 757
 758		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
 759		arg->params[0].u.tmem.buf_ptr = pa;
 760	}
 761	arg->params[0].u.tmem.size = tee_shm_get_size(shm);
 762	arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
 763
 764	arg->ret = TEEC_SUCCESS;
 765	return;
 766bad:
 767	tee_shm_free(shm);
 768}
 769
 770static void free_pages_list(struct optee_call_ctx *call_ctx)
 771{
 772	if (call_ctx->pages_list) {
 773		optee_free_pages_list(call_ctx->pages_list,
 774				      call_ctx->num_entries);
 775		call_ctx->pages_list = NULL;
 776		call_ctx->num_entries = 0;
 777	}
 778}
 779
 780static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
 781{
 782	free_pages_list(call_ctx);
 783}
 784
 785static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
 786				struct optee_msg_arg *arg,
 787				struct optee_call_ctx *call_ctx)
 788{
 789
 790	switch (arg->cmd) {
 791	case OPTEE_RPC_CMD_SHM_ALLOC:
 792		free_pages_list(call_ctx);
 793		handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
 794		break;
 795	case OPTEE_RPC_CMD_SHM_FREE:
 796		handle_rpc_func_cmd_shm_free(ctx, arg);
 797		break;
 798	default:
 799		optee_rpc_cmd(ctx, optee, arg);
 800	}
 801}
 802
 803/**
 804 * optee_handle_rpc() - handle RPC from secure world
 805 * @ctx:	context doing the RPC
 806 * @rpc_arg:	pointer to RPC arguments if any, or NULL if none
 807 * @param:	value of registers for the RPC
 808 * @call_ctx:	call context. Preserved during one OP-TEE invocation
 809 *
 810 * Result of RPC is written back into @param.
 811 */
 812static void optee_handle_rpc(struct tee_context *ctx,
 813			     struct optee_msg_arg *rpc_arg,
 814			     struct optee_rpc_param *param,
 815			     struct optee_call_ctx *call_ctx)
 816{
 817	struct tee_device *teedev = ctx->teedev;
 818	struct optee *optee = tee_get_drvdata(teedev);
 819	struct optee_msg_arg *arg;
 820	struct tee_shm *shm;
 821	phys_addr_t pa;
 822
 823	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
 824	case OPTEE_SMC_RPC_FUNC_ALLOC:
 825		shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
 826		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
 827			reg_pair_from_64(&param->a1, &param->a2, pa);
 828			reg_pair_from_64(&param->a4, &param->a5,
 829					 (unsigned long)shm);
 830		} else {
 831			param->a1 = 0;
 832			param->a2 = 0;
 833			param->a4 = 0;
 834			param->a5 = 0;
 835		}
 836		kmemleak_not_leak(shm);
 837		break;
 838	case OPTEE_SMC_RPC_FUNC_FREE:
 839		shm = reg_pair_to_ptr(param->a1, param->a2);
 840		tee_shm_free(shm);
 841		break;
 842	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
 843		/*
 844		 * A foreign interrupt was raised while secure world was
 845		 * executing, since they are handled in Linux a dummy RPC is
 846		 * performed to let Linux take the interrupt through the normal
 847		 * vector.
 848		 */
 849		break;
 850	case OPTEE_SMC_RPC_FUNC_CMD:
 851		if (rpc_arg) {
 852			arg = rpc_arg;
 853		} else {
 854			shm = reg_pair_to_ptr(param->a1, param->a2);
 855			arg = tee_shm_get_va(shm, 0);
 856			if (IS_ERR(arg)) {
 857				pr_err("%s: tee_shm_get_va %p failed\n",
 858				       __func__, shm);
 859				break;
 860			}
 861		}
 862
 863		handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
 864		break;
 865	default:
 866		pr_warn("Unknown RPC func 0x%x\n",
 867			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
 868		break;
 869	}
 870
 871	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
 872}
 873
 874/**
 875 * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
 876 * @ctx:	calling context
 877 * @shm:	shared memory holding the message to pass to secure world
 878 * @offs:	offset of the message in @shm
 879 * @system_thread: true if caller requests TEE system thread support
 880 *
 881 * Does and SMC to OP-TEE in secure world and handles eventual resulting
 882 * Remote Procedure Calls (RPC) from OP-TEE.
 883 *
 884 * Returns return code from secure world, 0 is OK
 885 */
 886static int optee_smc_do_call_with_arg(struct tee_context *ctx,
 887				      struct tee_shm *shm, u_int offs,
 888				      bool system_thread)
 889{
 890	struct optee *optee = tee_get_drvdata(ctx->teedev);
 891	struct optee_call_waiter w;
 892	struct optee_rpc_param param = { };
 893	struct optee_call_ctx call_ctx = { };
 894	struct optee_msg_arg *rpc_arg = NULL;
 895	int rc;
 896
 897	if (optee->rpc_param_count) {
 898		struct optee_msg_arg *arg;
 899		unsigned int rpc_arg_offs;
 900
 901		arg = tee_shm_get_va(shm, offs);
 902		if (IS_ERR(arg))
 903			return PTR_ERR(arg);
 904
 905		rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
 906		rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
 907		if (IS_ERR(rpc_arg))
 908			return PTR_ERR(rpc_arg);
 909	}
 910
 911	if  (rpc_arg && tee_shm_is_dynamic(shm)) {
 912		param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
 913		reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
 914		param.a3 = offs;
 915	} else {
 916		phys_addr_t parg;
 917
 918		rc = tee_shm_get_pa(shm, offs, &parg);
 919		if (rc)
 920			return rc;
 921
 922		if (rpc_arg)
 923			param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
 924		else
 925			param.a0 = OPTEE_SMC_CALL_WITH_ARG;
 926		reg_pair_from_64(&param.a1, &param.a2, parg);
 927	}
 928	/* Initialize waiter */
 929	optee_cq_wait_init(&optee->call_queue, &w, system_thread);
 930	while (true) {
 931		struct arm_smccc_res res;
 932
 933		trace_optee_invoke_fn_begin(&param);
 934		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
 935				     param.a4, param.a5, param.a6, param.a7,
 936				     &res);
 937		trace_optee_invoke_fn_end(&param, &res);
 938
 939		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
 940			/*
 941			 * Out of threads in secure world, wait for a thread
 942			 * become available.
 943			 */
 944			optee_cq_wait_for_completion(&optee->call_queue, &w);
 945		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
 946			cond_resched();
 947			param.a0 = res.a0;
 948			param.a1 = res.a1;
 949			param.a2 = res.a2;
 950			param.a3 = res.a3;
 951			optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
 952		} else {
 953			rc = res.a0;
 954			break;
 955		}
 956	}
 957
 958	optee_rpc_finalize_call(&call_ctx);
 959	/*
 960	 * We're done with our thread in secure world, if there's any
 961	 * thread waiters wake up one.
 962	 */
 963	optee_cq_wait_final(&optee->call_queue, &w);
 964
 965	return rc;
 966}
 967
 968/*
 969 * 5. Asynchronous notification
 970 */
 971
 972static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
 973				 bool *value_pending)
 974{
 975	struct arm_smccc_res res;
 976
 977	invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
 978
 979	if (res.a0) {
 980		*value_valid = false;
 981		return 0;
 982	}
 983	*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
 984	*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
 985	return res.a1;
 986}
 987
 988static irqreturn_t irq_handler(struct optee *optee)
 989{
 990	bool do_bottom_half = false;
 991	bool value_valid;
 992	bool value_pending;
 993	u32 value;
 994
 995	do {
 996		value = get_async_notif_value(optee->smc.invoke_fn,
 997					      &value_valid, &value_pending);
 998		if (!value_valid)
 999			break;
1000
1001		if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
1002			do_bottom_half = true;
1003		else
1004			optee_notif_send(optee, value);
1005	} while (value_pending);
1006
1007	if (do_bottom_half)
1008		return IRQ_WAKE_THREAD;
1009	return IRQ_HANDLED;
1010}
1011
1012static irqreturn_t notif_irq_handler(int irq, void *dev_id)
1013{
1014	struct optee *optee = dev_id;
1015
1016	return irq_handler(optee);
1017}
1018
1019static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
1020{
1021	struct optee *optee = dev_id;
1022
1023	optee_do_bottom_half(optee->ctx);
1024
1025	return IRQ_HANDLED;
1026}
1027
1028static int init_irq(struct optee *optee, u_int irq)
1029{
1030	int rc;
1031
1032	rc = request_threaded_irq(irq, notif_irq_handler,
1033				  notif_irq_thread_fn,
1034				  0, "optee_notification", optee);
1035	if (rc)
1036		return rc;
1037
1038	optee->smc.notif_irq = irq;
1039
1040	return 0;
1041}
1042
1043static irqreturn_t notif_pcpu_irq_handler(int irq, void *dev_id)
1044{
1045	struct optee_pcpu *pcpu = dev_id;
1046	struct optee *optee = pcpu->optee;
1047
1048	if (irq_handler(optee) == IRQ_WAKE_THREAD)
1049		queue_work(optee->smc.notif_pcpu_wq,
1050			   &optee->smc.notif_pcpu_work);
1051
1052	return IRQ_HANDLED;
1053}
1054
1055static void notif_pcpu_irq_work_fn(struct work_struct *work)
1056{
1057	struct optee_smc *optee_smc = container_of(work, struct optee_smc,
1058						   notif_pcpu_work);
1059	struct optee *optee = container_of(optee_smc, struct optee, smc);
1060
1061	optee_do_bottom_half(optee->ctx);
1062}
1063
1064static int init_pcpu_irq(struct optee *optee, u_int irq)
1065{
1066	struct optee_pcpu __percpu *optee_pcpu;
1067	int cpu, rc;
1068
1069	optee_pcpu = alloc_percpu(struct optee_pcpu);
1070	if (!optee_pcpu)
1071		return -ENOMEM;
1072
1073	for_each_present_cpu(cpu)
1074		per_cpu_ptr(optee_pcpu, cpu)->optee = optee;
1075
1076	rc = request_percpu_irq(irq, notif_pcpu_irq_handler,
1077				"optee_pcpu_notification", optee_pcpu);
1078	if (rc)
1079		goto err_free_pcpu;
1080
1081	INIT_WORK(&optee->smc.notif_pcpu_work, notif_pcpu_irq_work_fn);
1082	optee->smc.notif_pcpu_wq = create_workqueue("optee_pcpu_notification");
1083	if (!optee->smc.notif_pcpu_wq) {
1084		rc = -EINVAL;
1085		goto err_free_pcpu_irq;
1086	}
1087
1088	optee->smc.optee_pcpu = optee_pcpu;
1089	optee->smc.notif_irq = irq;
1090
1091	pcpu_irq_num = irq;
1092	rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee/pcpu-notif:starting",
1093			       optee_cpuhp_enable_pcpu_irq,
1094			       optee_cpuhp_disable_pcpu_irq);
1095	if (!rc)
1096		rc = -EINVAL;
1097	if (rc < 0)
1098		goto err_free_pcpu_irq;
1099
1100	optee->smc.notif_cpuhp_state = rc;
1101
1102	return 0;
1103
1104err_free_pcpu_irq:
1105	free_percpu_irq(irq, optee_pcpu);
1106err_free_pcpu:
1107	free_percpu(optee_pcpu);
1108
1109	return rc;
1110}
1111
1112static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
1113{
1114	if (irq_is_percpu_devid(irq))
1115		return init_pcpu_irq(optee, irq);
1116	else
1117		return init_irq(optee, irq);
1118}
1119
1120static void uninit_pcpu_irq(struct optee *optee)
1121{
1122	cpuhp_remove_state(optee->smc.notif_cpuhp_state);
1123
1124	destroy_workqueue(optee->smc.notif_pcpu_wq);
1125
1126	free_percpu_irq(optee->smc.notif_irq, optee->smc.optee_pcpu);
1127	free_percpu(optee->smc.optee_pcpu);
1128}
1129
1130static void optee_smc_notif_uninit_irq(struct optee *optee)
1131{
1132	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1133		optee_stop_async_notif(optee->ctx);
1134		if (optee->smc.notif_irq) {
1135			if (irq_is_percpu_devid(optee->smc.notif_irq))
1136				uninit_pcpu_irq(optee);
1137			else
1138				free_irq(optee->smc.notif_irq, optee);
1139
1140			irq_dispose_mapping(optee->smc.notif_irq);
1141		}
1142	}
1143}
1144
1145/*
1146 * 6. Driver initialization
1147 *
1148 * During driver initialization is secure world probed to find out which
1149 * features it supports so the driver can be initialized with a matching
1150 * configuration. This involves for instance support for dynamic shared
1151 * memory instead of a static memory carvout.
1152 */
1153
1154static void optee_get_version(struct tee_device *teedev,
1155			      struct tee_ioctl_version_data *vers)
1156{
1157	struct tee_ioctl_version_data v = {
1158		.impl_id = TEE_IMPL_ID_OPTEE,
1159		.impl_caps = TEE_OPTEE_CAP_TZ,
1160		.gen_caps = TEE_GEN_CAP_GP,
1161	};
1162	struct optee *optee = tee_get_drvdata(teedev);
1163
1164	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1165		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1166	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1167		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1168	*vers = v;
1169}
1170
1171static int optee_smc_open(struct tee_context *ctx)
1172{
1173	struct optee *optee = tee_get_drvdata(ctx->teedev);
1174	u32 sec_caps = optee->smc.sec_caps;
1175
1176	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1177}
1178
1179static const struct tee_driver_ops optee_clnt_ops = {
1180	.get_version = optee_get_version,
1181	.open = optee_smc_open,
1182	.release = optee_release,
1183	.open_session = optee_open_session,
1184	.close_session = optee_close_session,
1185	.system_session = optee_system_session,
1186	.invoke_func = optee_invoke_func,
1187	.cancel_req = optee_cancel_req,
1188	.shm_register = optee_shm_register,
1189	.shm_unregister = optee_shm_unregister,
1190};
1191
1192static const struct tee_desc optee_clnt_desc = {
1193	.name = DRIVER_NAME "-clnt",
1194	.ops = &optee_clnt_ops,
1195	.owner = THIS_MODULE,
1196};
1197
1198static const struct tee_driver_ops optee_supp_ops = {
1199	.get_version = optee_get_version,
1200	.open = optee_smc_open,
1201	.release = optee_release_supp,
1202	.supp_recv = optee_supp_recv,
1203	.supp_send = optee_supp_send,
1204	.shm_register = optee_shm_register_supp,
1205	.shm_unregister = optee_shm_unregister_supp,
1206};
1207
1208static const struct tee_desc optee_supp_desc = {
1209	.name = DRIVER_NAME "-supp",
1210	.ops = &optee_supp_ops,
1211	.owner = THIS_MODULE,
1212	.flags = TEE_DESC_PRIVILEGED,
1213};
1214
1215static const struct optee_ops optee_ops = {
1216	.do_call_with_arg = optee_smc_do_call_with_arg,
1217	.to_msg_param = optee_to_msg_param,
1218	.from_msg_param = optee_from_msg_param,
1219};
1220
1221static int enable_async_notif(optee_invoke_fn *invoke_fn)
1222{
1223	struct arm_smccc_res res;
1224
1225	invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1226
1227	if (res.a0)
1228		return -EINVAL;
1229	return 0;
1230}
1231
1232static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1233{
1234	struct arm_smccc_res res;
1235
1236	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1237
1238	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1239	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1240		return true;
1241	return false;
1242}
1243
1244#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
1245static bool optee_msg_api_uid_is_optee_image_load(optee_invoke_fn *invoke_fn)
1246{
1247	struct arm_smccc_res res;
1248
1249	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1250
1251	if (res.a0 == OPTEE_MSG_IMAGE_LOAD_UID_0 &&
1252	    res.a1 == OPTEE_MSG_IMAGE_LOAD_UID_1 &&
1253	    res.a2 == OPTEE_MSG_IMAGE_LOAD_UID_2 &&
1254	    res.a3 == OPTEE_MSG_IMAGE_LOAD_UID_3)
1255		return true;
1256	return false;
1257}
1258#endif
1259
1260static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1261{
1262	union {
1263		struct arm_smccc_res smccc;
1264		struct optee_smc_call_get_os_revision_result result;
1265	} res = {
1266		.result = {
1267			.build_id = 0
1268		}
1269	};
1270
1271	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1272		  &res.smccc);
1273
1274	if (res.result.build_id)
1275		pr_info("revision %lu.%lu (%08lx)", res.result.major,
1276			res.result.minor, res.result.build_id);
1277	else
1278		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1279}
1280
1281static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1282{
1283	union {
1284		struct arm_smccc_res smccc;
1285		struct optee_smc_calls_revision_result result;
1286	} res;
1287
1288	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1289
1290	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1291	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1292		return true;
1293	return false;
1294}
1295
1296static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1297					    u32 *sec_caps, u32 *max_notif_value,
1298					    unsigned int *rpc_param_count)
1299{
1300	union {
1301		struct arm_smccc_res smccc;
1302		struct optee_smc_exchange_capabilities_result result;
1303	} res;
1304	u32 a1 = 0;
1305
1306	/*
1307	 * TODO This isn't enough to tell if it's UP system (from kernel
1308	 * point of view) or not, is_smp() returns the information
1309	 * needed, but can't be called directly from here.
1310	 */
1311	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1312		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1313
1314	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1315		  &res.smccc);
1316
1317	if (res.result.status != OPTEE_SMC_RETURN_OK)
1318		return false;
1319
1320	*sec_caps = res.result.capabilities;
1321	if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1322		*max_notif_value = res.result.max_notif_value;
1323	else
1324		*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1325	if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1326		*rpc_param_count = (u8)res.result.data;
1327	else
1328		*rpc_param_count = 0;
1329
1330	return true;
1331}
1332
1333static unsigned int optee_msg_get_thread_count(optee_invoke_fn *invoke_fn)
1334{
1335	struct arm_smccc_res res;
1336
1337	invoke_fn(OPTEE_SMC_GET_THREAD_COUNT, 0, 0, 0, 0, 0, 0, 0, &res);
1338	if (res.a0)
1339		return 0;
1340	return res.a1;
1341}
1342
1343static struct tee_shm_pool *
1344optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1345{
1346	union {
1347		struct arm_smccc_res smccc;
1348		struct optee_smc_get_shm_config_result result;
1349	} res;
1350	unsigned long vaddr;
1351	phys_addr_t paddr;
1352	size_t size;
1353	phys_addr_t begin;
1354	phys_addr_t end;
1355	void *va;
1356	void *rc;
1357
1358	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1359	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1360		pr_err("static shm service not available\n");
1361		return ERR_PTR(-ENOENT);
1362	}
1363
1364	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1365		pr_err("only normal cached shared memory supported\n");
1366		return ERR_PTR(-EINVAL);
1367	}
1368
1369	begin = roundup(res.result.start, PAGE_SIZE);
1370	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1371	paddr = begin;
1372	size = end - begin;
1373
1374	va = memremap(paddr, size, MEMREMAP_WB);
1375	if (!va) {
1376		pr_err("shared memory ioremap failed\n");
1377		return ERR_PTR(-EINVAL);
1378	}
1379	vaddr = (unsigned long)va;
1380
1381	rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
1382					OPTEE_MIN_STATIC_POOL_ALIGN);
1383	if (IS_ERR(rc))
1384		memunmap(va);
1385	else
1386		*memremaped_shm = va;
1387
1388	return rc;
1389}
1390
1391/* Simple wrapper functions to be able to use a function pointer */
1392static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1393			    unsigned long a2, unsigned long a3,
1394			    unsigned long a4, unsigned long a5,
1395			    unsigned long a6, unsigned long a7,
1396			    struct arm_smccc_res *res)
1397{
1398	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1399}
1400
1401static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1402			    unsigned long a2, unsigned long a3,
1403			    unsigned long a4, unsigned long a5,
1404			    unsigned long a6, unsigned long a7,
1405			    struct arm_smccc_res *res)
1406{
1407	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1408}
1409
1410static optee_invoke_fn *get_invoke_func(struct device *dev)
1411{
1412	const char *method;
1413
1414	pr_info("probing for conduit method.\n");
1415
1416	if (device_property_read_string(dev, "method", &method)) {
1417		pr_warn("missing \"method\" property\n");
1418		return ERR_PTR(-ENXIO);
1419	}
1420
1421	if (!strcmp("hvc", method))
1422		return optee_smccc_hvc;
1423	else if (!strcmp("smc", method))
1424		return optee_smccc_smc;
1425
1426	pr_warn("invalid \"method\" property: %s\n", method);
1427	return ERR_PTR(-EINVAL);
1428}
1429
1430/* optee_remove - Device Removal Routine
1431 * @pdev: platform device information struct
1432 *
1433 * optee_remove is called by platform subsystem to alert the driver
1434 * that it should release the device
1435 */
1436static int optee_smc_remove(struct platform_device *pdev)
1437{
1438	struct optee *optee = platform_get_drvdata(pdev);
1439
1440	/*
1441	 * Ask OP-TEE to free all cached shared memory objects to decrease
1442	 * reference counters and also avoid wild pointers in secure world
1443	 * into the old shared memory range.
1444	 */
1445	if (!optee->rpc_param_count)
1446		optee_disable_shm_cache(optee);
1447
1448	optee_smc_notif_uninit_irq(optee);
1449
1450	optee_remove_common(optee);
1451
1452	if (optee->smc.memremaped_shm)
1453		memunmap(optee->smc.memremaped_shm);
1454
1455	kfree(optee);
1456
1457	return 0;
1458}
1459
1460/* optee_shutdown - Device Removal Routine
1461 * @pdev: platform device information struct
1462 *
1463 * platform_shutdown is called by the platform subsystem to alert
1464 * the driver that a shutdown, reboot, or kexec is happening and
1465 * device must be disabled.
1466 */
1467static void optee_shutdown(struct platform_device *pdev)
1468{
1469	struct optee *optee = platform_get_drvdata(pdev);
1470
1471	if (!optee->rpc_param_count)
1472		optee_disable_shm_cache(optee);
1473}
1474
1475#ifdef CONFIG_OPTEE_INSECURE_LOAD_IMAGE
1476
1477#define OPTEE_FW_IMAGE "optee/tee.bin"
1478
1479static optee_invoke_fn *cpuhp_invoke_fn;
1480
1481static int optee_cpuhp_probe(unsigned int cpu)
1482{
1483	/*
1484	 * Invoking a call on a CPU will cause OP-TEE to perform the required
1485	 * setup for that CPU. Just invoke the call to get the UID since that
1486	 * has no side effects.
1487	 */
1488	if (optee_msg_api_uid_is_optee_api(cpuhp_invoke_fn))
1489		return 0;
1490	else
1491		return -EINVAL;
1492}
1493
1494static int optee_load_fw(struct platform_device *pdev,
1495			 optee_invoke_fn *invoke_fn)
1496{
1497	const struct firmware *fw = NULL;
1498	struct arm_smccc_res res;
1499	phys_addr_t data_pa;
1500	u8 *data_buf = NULL;
1501	u64 data_size;
1502	u32 data_pa_high, data_pa_low;
1503	u32 data_size_high, data_size_low;
1504	int rc;
1505	int hp_state;
1506
1507	if (!optee_msg_api_uid_is_optee_image_load(invoke_fn))
1508		return 0;
1509
1510	rc = request_firmware(&fw, OPTEE_FW_IMAGE, &pdev->dev);
1511	if (rc) {
1512		/*
1513		 * The firmware in the rootfs will not be accessible until we
1514		 * are in the SYSTEM_RUNNING state, so return EPROBE_DEFER until
1515		 * that point.
1516		 */
1517		if (system_state < SYSTEM_RUNNING)
1518			return -EPROBE_DEFER;
1519		goto fw_err;
1520	}
1521
1522	data_size = fw->size;
1523	/*
1524	 * This uses the GFP_DMA flag to ensure we are allocated memory in the
1525	 * 32-bit space since TF-A cannot map memory beyond the 32-bit boundary.
1526	 */
1527	data_buf = kmemdup(fw->data, fw->size, GFP_KERNEL | GFP_DMA);
1528	if (!data_buf) {
1529		rc = -ENOMEM;
1530		goto fw_err;
1531	}
1532	data_pa = virt_to_phys(data_buf);
1533	reg_pair_from_64(&data_pa_high, &data_pa_low, data_pa);
1534	reg_pair_from_64(&data_size_high, &data_size_low, data_size);
1535	goto fw_load;
1536
1537fw_err:
1538	pr_warn("image loading failed\n");
1539	data_pa_high = 0;
1540	data_pa_low = 0;
1541	data_size_high = 0;
1542	data_size_low = 0;
1543
1544fw_load:
1545	/*
1546	 * Always invoke the SMC, even if loading the image fails, to indicate
1547	 * to EL3 that we have passed the point where it should allow invoking
1548	 * this SMC.
1549	 */
1550	pr_warn("OP-TEE image loaded from kernel, this can be insecure");
1551	invoke_fn(OPTEE_SMC_CALL_LOAD_IMAGE, data_size_high, data_size_low,
1552		  data_pa_high, data_pa_low, 0, 0, 0, &res);
1553	if (!rc)
1554		rc = res.a0;
1555	if (fw)
1556		release_firmware(fw);
1557	kfree(data_buf);
1558
1559	if (!rc) {
1560		/*
1561		 * We need to initialize OP-TEE on all other running cores as
1562		 * well. Any cores that aren't running yet will get initialized
1563		 * when they are brought up by the power management functions in
1564		 * TF-A which are registered by the OP-TEE SPD. Due to that we
1565		 * can un-register the callback right after registering it.
1566		 */
1567		cpuhp_invoke_fn = invoke_fn;
1568		hp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "optee:probe",
1569					     optee_cpuhp_probe, NULL);
1570		if (hp_state < 0) {
1571			pr_warn("Failed with CPU hotplug setup for OP-TEE");
1572			return -EINVAL;
1573		}
1574		cpuhp_remove_state(hp_state);
1575		cpuhp_invoke_fn = NULL;
1576	}
1577
1578	return rc;
1579}
1580#else
1581static inline int optee_load_fw(struct platform_device *pdev,
1582				optee_invoke_fn *invoke_fn)
1583{
1584	return 0;
1585}
1586#endif
1587
1588static int optee_probe(struct platform_device *pdev)
1589{
1590	optee_invoke_fn *invoke_fn;
1591	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1592	struct optee *optee = NULL;
1593	void *memremaped_shm = NULL;
1594	unsigned int rpc_param_count;
1595	unsigned int thread_count;
1596	struct tee_device *teedev;
1597	struct tee_context *ctx;
1598	u32 max_notif_value;
1599	u32 arg_cache_flags;
1600	u32 sec_caps;
1601	int rc;
1602
1603	invoke_fn = get_invoke_func(&pdev->dev);
1604	if (IS_ERR(invoke_fn))
1605		return PTR_ERR(invoke_fn);
1606
1607	rc = optee_load_fw(pdev, invoke_fn);
1608	if (rc)
1609		return rc;
1610
1611	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1612		pr_warn("api uid mismatch\n");
1613		return -EINVAL;
1614	}
1615
1616	optee_msg_get_os_revision(invoke_fn);
1617
1618	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1619		pr_warn("api revision mismatch\n");
1620		return -EINVAL;
1621	}
1622
1623	thread_count = optee_msg_get_thread_count(invoke_fn);
1624	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1625					     &max_notif_value,
1626					     &rpc_param_count)) {
1627		pr_warn("capabilities mismatch\n");
1628		return -EINVAL;
1629	}
1630
1631	/*
1632	 * Try to use dynamic shared memory if possible
1633	 */
1634	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
1635		/*
1636		 * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
1637		 * optee_get_msg_arg() to pre-register (by having
1638		 * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
1639		 * an argument struct.
1640		 *
1641		 * With the page is pre-registered we can use a non-zero
1642		 * offset for argument struct, this is indicated with
1643		 * OPTEE_SHM_ARG_SHARED.
1644		 *
1645		 * This means that optee_smc_do_call_with_arg() will use
1646		 * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
1647		 */
1648		if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
1649			arg_cache_flags = OPTEE_SHM_ARG_SHARED;
1650		else
1651			arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
1652
1653		pool = optee_shm_pool_alloc_pages();
1654	}
1655
1656	/*
1657	 * If dynamic shared memory is not available or failed - try static one
1658	 */
1659	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
1660		/*
1661		 * The static memory pool can use non-zero page offsets so
1662		 * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
1663		 *
1664		 * optee_get_msg_arg() should not pre-register the
1665		 * allocated page used to pass an argument struct, this is
1666		 * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
1667		 *
1668		 * This means that optee_smc_do_call_with_arg() will use
1669		 * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
1670		 * OPTEE_SMC_CALL_WITH_RPC_ARG.
1671		 */
1672		arg_cache_flags = OPTEE_SHM_ARG_SHARED |
1673				  OPTEE_SHM_ARG_ALLOC_PRIV;
1674		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1675	}
1676
1677	if (IS_ERR(pool))
1678		return PTR_ERR(pool);
1679
1680	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1681	if (!optee) {
1682		rc = -ENOMEM;
1683		goto err_free_pool;
1684	}
1685
1686	optee->ops = &optee_ops;
1687	optee->smc.invoke_fn = invoke_fn;
1688	optee->smc.sec_caps = sec_caps;
1689	optee->rpc_param_count = rpc_param_count;
1690
1691	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1692	if (IS_ERR(teedev)) {
1693		rc = PTR_ERR(teedev);
1694		goto err_free_optee;
1695	}
1696	optee->teedev = teedev;
1697
1698	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1699	if (IS_ERR(teedev)) {
1700		rc = PTR_ERR(teedev);
1701		goto err_unreg_teedev;
1702	}
1703	optee->supp_teedev = teedev;
1704
1705	rc = tee_device_register(optee->teedev);
1706	if (rc)
1707		goto err_unreg_supp_teedev;
1708
1709	rc = tee_device_register(optee->supp_teedev);
1710	if (rc)
1711		goto err_unreg_supp_teedev;
1712
1713	optee_cq_init(&optee->call_queue, thread_count);
1714	optee_supp_init(&optee->supp);
1715	optee->smc.memremaped_shm = memremaped_shm;
1716	optee->pool = pool;
1717	optee_shm_arg_cache_init(optee, arg_cache_flags);
1718
1719	platform_set_drvdata(pdev, optee);
1720	ctx = teedev_open(optee->teedev);
1721	if (IS_ERR(ctx)) {
1722		rc = PTR_ERR(ctx);
1723		goto err_supp_uninit;
1724	}
1725	optee->ctx = ctx;
1726	rc = optee_notif_init(optee, max_notif_value);
1727	if (rc)
1728		goto err_close_ctx;
1729
1730	if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1731		unsigned int irq;
1732
1733		rc = platform_get_irq(pdev, 0);
1734		if (rc < 0) {
1735			pr_err("platform_get_irq: ret %d\n", rc);
1736			goto err_notif_uninit;
1737		}
1738		irq = rc;
1739
1740		rc = optee_smc_notif_init_irq(optee, irq);
1741		if (rc) {
1742			irq_dispose_mapping(irq);
1743			goto err_notif_uninit;
1744		}
1745		enable_async_notif(optee->smc.invoke_fn);
1746		pr_info("Asynchronous notifications enabled\n");
1747	}
1748
1749	/*
1750	 * Ensure that there are no pre-existing shm objects before enabling
1751	 * the shm cache so that there's no chance of receiving an invalid
1752	 * address during shutdown. This could occur, for example, if we're
1753	 * kexec booting from an older kernel that did not properly cleanup the
1754	 * shm cache.
1755	 */
1756	optee_disable_unmapped_shm_cache(optee);
1757
1758	/*
1759	 * Only enable the shm cache in case we're not able to pass the RPC
1760	 * arg struct right after the normal arg struct.
1761	 */
1762	if (!optee->rpc_param_count)
1763		optee_enable_shm_cache(optee);
1764
1765	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1766		pr_info("dynamic shared memory is enabled\n");
1767
1768	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1769	if (rc)
1770		goto err_disable_shm_cache;
1771
1772	pr_info("initialized driver\n");
1773	return 0;
1774
1775err_disable_shm_cache:
1776	if (!optee->rpc_param_count)
1777		optee_disable_shm_cache(optee);
1778	optee_smc_notif_uninit_irq(optee);
1779	optee_unregister_devices();
1780err_notif_uninit:
1781	optee_notif_uninit(optee);
1782err_close_ctx:
1783	teedev_close_context(ctx);
1784err_supp_uninit:
1785	optee_shm_arg_cache_uninit(optee);
1786	optee_supp_uninit(&optee->supp);
1787	mutex_destroy(&optee->call_queue.mutex);
1788err_unreg_supp_teedev:
1789	tee_device_unregister(optee->supp_teedev);
1790err_unreg_teedev:
1791	tee_device_unregister(optee->teedev);
1792err_free_optee:
1793	kfree(optee);
1794err_free_pool:
1795	tee_shm_pool_free(pool);
1796	if (memremaped_shm)
1797		memunmap(memremaped_shm);
1798	return rc;
1799}
1800
1801static const struct of_device_id optee_dt_match[] = {
1802	{ .compatible = "linaro,optee-tz" },
1803	{},
1804};
1805MODULE_DEVICE_TABLE(of, optee_dt_match);
1806
1807static struct platform_driver optee_driver = {
1808	.probe  = optee_probe,
1809	.remove = optee_smc_remove,
1810	.shutdown = optee_shutdown,
1811	.driver = {
1812		.name = "optee",
1813		.of_match_table = optee_dt_match,
1814	},
1815};
1816
1817int optee_smc_abi_register(void)
1818{
1819	return platform_driver_register(&optee_driver);
1820}
1821
1822void optee_smc_abi_unregister(void)
1823{
1824	platform_driver_unregister(&optee_driver);
1825}