Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5
   6#include <linux/arm-smccc.h>
   7#include <linux/bitfield.h>
   8#include <linux/bits.h>
   9#include <linux/cleanup.h>
  10#include <linux/clk.h>
  11#include <linux/completion.h>
  12#include <linux/cpumask.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/export.h>
  16#include <linux/firmware/qcom/qcom_scm.h>
  17#include <linux/firmware/qcom/qcom_tzmem.h>
  18#include <linux/init.h>
  19#include <linux/interconnect.h>
  20#include <linux/interrupt.h>
  21#include <linux/kstrtox.h>
  22#include <linux/module.h>
  23#include <linux/of.h>
  24#include <linux/of_address.h>
  25#include <linux/of_irq.h>
  26#include <linux/of_platform.h>
  27#include <linux/of_reserved_mem.h>
  28#include <linux/platform_device.h>
  29#include <linux/reset-controller.h>
  30#include <linux/sizes.h>
  31#include <linux/types.h>
  32
  33#include "qcom_scm.h"
  34#include "qcom_tzmem.h"
  35
  36static u32 download_mode;
  37
  38struct qcom_scm {
  39	struct device *dev;
  40	struct clk *core_clk;
  41	struct clk *iface_clk;
  42	struct clk *bus_clk;
  43	struct icc_path *path;
  44	struct completion waitq_comp;
  45	struct reset_controller_dev reset;
  46
  47	/* control access to the interconnect path */
  48	struct mutex scm_bw_lock;
  49	int scm_vote_count;
  50
  51	u64 dload_mode_addr;
  52
  53	struct qcom_tzmem_pool *mempool;
  54};
  55
  56struct qcom_scm_current_perm_info {
  57	__le32 vmid;
  58	__le32 perm;
  59	__le64 ctx;
  60	__le32 ctx_size;
  61	__le32 unused;
  62};
  63
  64struct qcom_scm_mem_map_info {
  65	__le64 mem_addr;
  66	__le64 mem_size;
  67};
  68
  69/**
  70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
  71 * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
  72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
  73 * @data:      Response data. The type of this data is given in @resp_type.
  74 */
  75struct qcom_scm_qseecom_resp {
  76	u64 result;
  77	u64 resp_type;
  78	u64 data;
  79};
  80
  81enum qcom_scm_qseecom_result {
  82	QSEECOM_RESULT_SUCCESS			= 0,
  83	QSEECOM_RESULT_INCOMPLETE		= 1,
  84	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
  85	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
  86};
  87
  88enum qcom_scm_qseecom_resp_type {
  89	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
  90	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
  91};
  92
  93enum qcom_scm_qseecom_tz_owner {
  94	QSEECOM_TZ_OWNER_SIP			= 2,
  95	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
  96	QSEECOM_TZ_OWNER_QSEE_OS		= 50
  97};
  98
  99enum qcom_scm_qseecom_tz_svc {
 100	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
 101	QSEECOM_TZ_SVC_APP_MGR			= 1,
 102	QSEECOM_TZ_SVC_INFO			= 6,
 103};
 104
 105enum qcom_scm_qseecom_tz_cmd_app {
 106	QSEECOM_TZ_CMD_APP_SEND			= 1,
 107	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
 108};
 109
 110enum qcom_scm_qseecom_tz_cmd_info {
 111	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
 112};
 113
 114#define QSEECOM_MAX_APP_NAME_SIZE		64
 115#define SHMBRIDGE_RESULT_NOTSUPP		4
 116
 117/* Each bit configures cold/warm boot address for one of the 4 CPUs */
 118static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 119	0, BIT(0), BIT(3), BIT(5)
 120};
 121static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 122	BIT(2), BIT(1), BIT(4), BIT(6)
 123};
 124
 125#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
 126
 127#define QCOM_DLOAD_MASK		GENMASK(5, 4)
 128#define QCOM_DLOAD_NODUMP	0
 129#define QCOM_DLOAD_FULLDUMP	1
 130#define QCOM_DLOAD_MINIDUMP	2
 131#define QCOM_DLOAD_BOTHDUMP	3
 132
 133static const char * const qcom_scm_convention_names[] = {
 134	[SMC_CONVENTION_UNKNOWN] = "unknown",
 135	[SMC_CONVENTION_ARM_32] = "smc arm 32",
 136	[SMC_CONVENTION_ARM_64] = "smc arm 64",
 137	[SMC_CONVENTION_LEGACY] = "smc legacy",
 138};
 139
 140static const char * const download_mode_name[] = {
 141	[QCOM_DLOAD_NODUMP]	= "off",
 142	[QCOM_DLOAD_FULLDUMP]	= "full",
 143	[QCOM_DLOAD_MINIDUMP]	= "mini",
 144	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
 145};
 146
 147static struct qcom_scm *__scm;
 148
 149static int qcom_scm_clk_enable(void)
 150{
 151	int ret;
 152
 153	ret = clk_prepare_enable(__scm->core_clk);
 154	if (ret)
 155		goto bail;
 156
 157	ret = clk_prepare_enable(__scm->iface_clk);
 158	if (ret)
 159		goto disable_core;
 160
 161	ret = clk_prepare_enable(__scm->bus_clk);
 162	if (ret)
 163		goto disable_iface;
 164
 165	return 0;
 166
 167disable_iface:
 168	clk_disable_unprepare(__scm->iface_clk);
 169disable_core:
 170	clk_disable_unprepare(__scm->core_clk);
 171bail:
 172	return ret;
 173}
 174
 175static void qcom_scm_clk_disable(void)
 176{
 177	clk_disable_unprepare(__scm->core_clk);
 178	clk_disable_unprepare(__scm->iface_clk);
 179	clk_disable_unprepare(__scm->bus_clk);
 180}
 181
 182static int qcom_scm_bw_enable(void)
 183{
 184	int ret = 0;
 185
 186	if (!__scm->path)
 187		return 0;
 188
 189	mutex_lock(&__scm->scm_bw_lock);
 190	if (!__scm->scm_vote_count) {
 191		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
 192		if (ret < 0) {
 193			dev_err(__scm->dev, "failed to set bandwidth request\n");
 194			goto err_bw;
 195		}
 196	}
 197	__scm->scm_vote_count++;
 198err_bw:
 199	mutex_unlock(&__scm->scm_bw_lock);
 200
 201	return ret;
 202}
 203
 204static void qcom_scm_bw_disable(void)
 205{
 206	if (!__scm->path)
 207		return;
 208
 209	mutex_lock(&__scm->scm_bw_lock);
 210	if (__scm->scm_vote_count-- == 1)
 211		icc_set_bw(__scm->path, 0, 0);
 212	mutex_unlock(&__scm->scm_bw_lock);
 213}
 214
 215enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 216static DEFINE_SPINLOCK(scm_query_lock);
 217
 218struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
 219{
 220	if (!qcom_scm_is_available())
 221		return NULL;
 222
 223	return __scm->mempool;
 224}
 225
 226static enum qcom_scm_convention __get_convention(void)
 227{
 228	unsigned long flags;
 229	struct qcom_scm_desc desc = {
 230		.svc = QCOM_SCM_SVC_INFO,
 231		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 232		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 233					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 234			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 235		.arginfo = QCOM_SCM_ARGS(1),
 236		.owner = ARM_SMCCC_OWNER_SIP,
 237	};
 238	struct qcom_scm_res res;
 239	enum qcom_scm_convention probed_convention;
 240	int ret;
 241	bool forced = false;
 242
 243	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 244		return qcom_scm_convention;
 245
 246	/*
 247	 * Per the "SMC calling convention specification", the 64-bit calling
 248	 * convention can only be used when the client is 64-bit, otherwise
 249	 * system will encounter the undefined behaviour.
 250	 */
 251#if IS_ENABLED(CONFIG_ARM64)
 252	/*
 253	 * Device isn't required as there is only one argument - no device
 254	 * needed to dma_map_single to secure world
 255	 */
 256	probed_convention = SMC_CONVENTION_ARM_64;
 257	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 258	if (!ret && res.result[0] == 1)
 259		goto found;
 260
 261	/*
 262	 * Some SC7180 firmwares didn't implement the
 263	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 264	 * calling conventions on these firmwares. Luckily we don't make any
 265	 * early calls into the firmware on these SoCs so the device pointer
 266	 * will be valid here to check if the compatible matches.
 267	 */
 268	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 269		forced = true;
 270		goto found;
 271	}
 272#endif
 273
 274	probed_convention = SMC_CONVENTION_ARM_32;
 275	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 276	if (!ret && res.result[0] == 1)
 277		goto found;
 278
 279	probed_convention = SMC_CONVENTION_LEGACY;
 280found:
 281	spin_lock_irqsave(&scm_query_lock, flags);
 282	if (probed_convention != qcom_scm_convention) {
 283		qcom_scm_convention = probed_convention;
 284		pr_info("qcom_scm: convention: %s%s\n",
 285			qcom_scm_convention_names[qcom_scm_convention],
 286			forced ? " (forced)" : "");
 287	}
 288	spin_unlock_irqrestore(&scm_query_lock, flags);
 289
 290	return qcom_scm_convention;
 291}
 292
 293/**
 294 * qcom_scm_call() - Invoke a syscall in the secure world
 295 * @dev:	device
 296 * @desc:	Descriptor structure containing arguments and return values
 297 * @res:        Structure containing results from SMC/HVC call
 298 *
 299 * Sends a command to the SCM and waits for the command to finish processing.
 300 * This should *only* be called in pre-emptible context.
 301 */
 302static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 303			 struct qcom_scm_res *res)
 304{
 305	might_sleep();
 306	switch (__get_convention()) {
 307	case SMC_CONVENTION_ARM_32:
 308	case SMC_CONVENTION_ARM_64:
 309		return scm_smc_call(dev, desc, res, false);
 310	case SMC_CONVENTION_LEGACY:
 311		return scm_legacy_call(dev, desc, res);
 312	default:
 313		pr_err("Unknown current SCM calling convention.\n");
 314		return -EINVAL;
 315	}
 316}
 317
 318/**
 319 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 320 * @dev:	device
 321 * @desc:	Descriptor structure containing arguments and return values
 322 * @res:	Structure containing results from SMC/HVC call
 323 *
 324 * Sends a command to the SCM and waits for the command to finish processing.
 325 * This can be called in atomic context.
 326 */
 327static int qcom_scm_call_atomic(struct device *dev,
 328				const struct qcom_scm_desc *desc,
 329				struct qcom_scm_res *res)
 330{
 331	switch (__get_convention()) {
 332	case SMC_CONVENTION_ARM_32:
 333	case SMC_CONVENTION_ARM_64:
 334		return scm_smc_call(dev, desc, res, true);
 335	case SMC_CONVENTION_LEGACY:
 336		return scm_legacy_call_atomic(dev, desc, res);
 337	default:
 338		pr_err("Unknown current SCM calling convention.\n");
 339		return -EINVAL;
 340	}
 341}
 342
 343static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 344					 u32 cmd_id)
 345{
 346	int ret;
 347	struct qcom_scm_desc desc = {
 348		.svc = QCOM_SCM_SVC_INFO,
 349		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 350		.owner = ARM_SMCCC_OWNER_SIP,
 351	};
 352	struct qcom_scm_res res;
 353
 354	desc.arginfo = QCOM_SCM_ARGS(1);
 355	switch (__get_convention()) {
 356	case SMC_CONVENTION_ARM_32:
 357	case SMC_CONVENTION_ARM_64:
 358		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 359				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 360		break;
 361	case SMC_CONVENTION_LEGACY:
 362		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 363		break;
 364	default:
 365		pr_err("Unknown SMC convention being used\n");
 366		return false;
 367	}
 368
 369	ret = qcom_scm_call(dev, &desc, &res);
 370
 371	return ret ? false : !!res.result[0];
 372}
 373
 374static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
 375{
 376	int cpu;
 377	unsigned int flags = 0;
 378	struct qcom_scm_desc desc = {
 379		.svc = QCOM_SCM_SVC_BOOT,
 380		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 381		.arginfo = QCOM_SCM_ARGS(2),
 382		.owner = ARM_SMCCC_OWNER_SIP,
 383	};
 384
 385	for_each_present_cpu(cpu) {
 386		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
 387			return -EINVAL;
 388		flags |= cpu_bits[cpu];
 389	}
 390
 391	desc.args[0] = flags;
 392	desc.args[1] = virt_to_phys(entry);
 393
 394	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 395}
 396
 397static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
 398{
 399	struct qcom_scm_desc desc = {
 400		.svc = QCOM_SCM_SVC_BOOT,
 401		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
 402		.owner = ARM_SMCCC_OWNER_SIP,
 403		.arginfo = QCOM_SCM_ARGS(6),
 404		.args = {
 405			virt_to_phys(entry),
 406			/* Apply to all CPUs in all affinity levels */
 407			~0ULL, ~0ULL, ~0ULL, ~0ULL,
 408			flags,
 409		},
 410	};
 411
 412	/* Need a device for DMA of the additional arguments */
 413	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
 414		return -EOPNOTSUPP;
 415
 416	return qcom_scm_call(__scm->dev, &desc, NULL);
 417}
 418
 419/**
 420 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
 421 * @entry: Entry point function for the cpus
 422 *
 423 * Set the Linux entry point for the SCM to transfer control to when coming
 424 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 425 */
 426int qcom_scm_set_warm_boot_addr(void *entry)
 427{
 428	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
 429		/* Fallback to old SCM call */
 430		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
 431	return 0;
 432}
 433EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
 434
 435/**
 436 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
 437 * @entry: Entry point function for the cpus
 438 */
 439int qcom_scm_set_cold_boot_addr(void *entry)
 440{
 441	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
 442		/* Fallback to old SCM call */
 443		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
 444	return 0;
 445}
 446EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
 447
 448/**
 449 * qcom_scm_cpu_power_down() - Power down the cpu
 450 * @flags:	Flags to flush cache
 451 *
 452 * This is an end point to power down cpu. If there was a pending interrupt,
 453 * the control would return from this function, otherwise, the cpu jumps to the
 454 * warm boot entry point set for this cpu upon reset.
 455 */
 456void qcom_scm_cpu_power_down(u32 flags)
 457{
 458	struct qcom_scm_desc desc = {
 459		.svc = QCOM_SCM_SVC_BOOT,
 460		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 461		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 462		.arginfo = QCOM_SCM_ARGS(1),
 463		.owner = ARM_SMCCC_OWNER_SIP,
 464	};
 465
 466	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 467}
 468EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
 469
 470int qcom_scm_set_remote_state(u32 state, u32 id)
 471{
 472	struct qcom_scm_desc desc = {
 473		.svc = QCOM_SCM_SVC_BOOT,
 474		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 475		.arginfo = QCOM_SCM_ARGS(2),
 476		.args[0] = state,
 477		.args[1] = id,
 478		.owner = ARM_SMCCC_OWNER_SIP,
 479	};
 480	struct qcom_scm_res res;
 481	int ret;
 482
 483	ret = qcom_scm_call(__scm->dev, &desc, &res);
 484
 485	return ret ? : res.result[0];
 486}
 487EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
 488
 489static int qcom_scm_disable_sdi(void)
 490{
 491	int ret;
 492	struct qcom_scm_desc desc = {
 493		.svc = QCOM_SCM_SVC_BOOT,
 494		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
 495		.args[0] = 1, /* Disable watchdog debug */
 496		.args[1] = 0, /* Disable SDI */
 497		.arginfo = QCOM_SCM_ARGS(2),
 498		.owner = ARM_SMCCC_OWNER_SIP,
 499	};
 500	struct qcom_scm_res res;
 501
 502	ret = qcom_scm_clk_enable();
 503	if (ret)
 504		return ret;
 505	ret = qcom_scm_call(__scm->dev, &desc, &res);
 506
 507	qcom_scm_clk_disable();
 508
 509	return ret ? : res.result[0];
 510}
 511
 512static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 513{
 514	struct qcom_scm_desc desc = {
 515		.svc = QCOM_SCM_SVC_BOOT,
 516		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 517		.arginfo = QCOM_SCM_ARGS(2),
 518		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 519		.owner = ARM_SMCCC_OWNER_SIP,
 520	};
 521
 522	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 523
 524	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 525}
 526
 527static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
 528{
 529	unsigned int old;
 530	unsigned int new;
 531	int ret;
 532
 533	ret = qcom_scm_io_readl(addr, &old);
 534	if (ret)
 535		return ret;
 536
 537	new = (old & ~mask) | (val & mask);
 538
 539	return qcom_scm_io_writel(addr, new);
 540}
 541
 542static void qcom_scm_set_download_mode(u32 dload_mode)
 543{
 544	int ret = 0;
 545
 546	if (__scm->dload_mode_addr) {
 547		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
 548				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
 549	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
 550						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
 551		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
 552	} else if (dload_mode) {
 553		dev_err(__scm->dev,
 554			"No available mechanism for setting download mode\n");
 555	}
 556
 557	if (ret)
 558		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 559}
 560
 561/**
 562 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 563 *			       state machine for a given peripheral, using the
 564 *			       metadata
 565 * @peripheral: peripheral id
 566 * @metadata:	pointer to memory containing ELF header, program header table
 567 *		and optional blob of data used for authenticating the metadata
 568 *		and the rest of the firmware
 569 * @size:	size of the metadata
 570 * @ctx:	optional metadata context
 571 *
 572 * Return: 0 on success.
 573 *
 574 * Upon successful return, the PAS metadata context (@ctx) will be used to
 575 * track the metadata allocation, this needs to be released by invoking
 576 * qcom_scm_pas_metadata_release() by the caller.
 577 */
 578int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
 579			    struct qcom_scm_pas_metadata *ctx)
 580{
 581	dma_addr_t mdata_phys;
 582	void *mdata_buf;
 583	int ret;
 584	struct qcom_scm_desc desc = {
 585		.svc = QCOM_SCM_SVC_PIL,
 586		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 587		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 588		.args[0] = peripheral,
 589		.owner = ARM_SMCCC_OWNER_SIP,
 590	};
 591	struct qcom_scm_res res;
 592
 593	/*
 594	 * During the scm call memory protection will be enabled for the meta
 595	 * data blob, so make sure it's physically contiguous, 4K aligned and
 596	 * non-cachable to avoid XPU violations.
 597	 *
 598	 * For PIL calls the hypervisor creates SHM Bridges for the blob
 599	 * buffers on behalf of Linux so we must not do it ourselves hence
 600	 * not using the TZMem allocator here.
 601	 *
 602	 * If we pass a buffer that is already part of an SHM Bridge to this
 603	 * call, it will fail.
 604	 */
 605	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 606				       GFP_KERNEL);
 607	if (!mdata_buf)
 608		return -ENOMEM;
 609
 610	memcpy(mdata_buf, metadata, size);
 611
 612	ret = qcom_scm_clk_enable();
 613	if (ret)
 614		goto out;
 615
 616	ret = qcom_scm_bw_enable();
 617	if (ret)
 618		goto disable_clk;
 619
 620	desc.args[1] = mdata_phys;
 621
 622	ret = qcom_scm_call(__scm->dev, &desc, &res);
 623	qcom_scm_bw_disable();
 624
 625disable_clk:
 626	qcom_scm_clk_disable();
 627
 628out:
 629	if (ret < 0 || !ctx) {
 630		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 631	} else if (ctx) {
 632		ctx->ptr = mdata_buf;
 633		ctx->phys = mdata_phys;
 634		ctx->size = size;
 635	}
 636
 637	return ret ? : res.result[0];
 638}
 639EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
 640
 641/**
 642 * qcom_scm_pas_metadata_release() - release metadata context
 643 * @ctx:	metadata context
 644 */
 645void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
 646{
 647	if (!ctx->ptr)
 648		return;
 649
 650	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
 651
 652	ctx->ptr = NULL;
 653	ctx->phys = 0;
 654	ctx->size = 0;
 655}
 656EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
 657
 658/**
 659 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 660 *			      for firmware loading
 661 * @peripheral:	peripheral id
 662 * @addr:	start address of memory area to prepare
 663 * @size:	size of the memory area to prepare
 664 *
 665 * Returns 0 on success.
 666 */
 667int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 668{
 669	int ret;
 670	struct qcom_scm_desc desc = {
 671		.svc = QCOM_SCM_SVC_PIL,
 672		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 673		.arginfo = QCOM_SCM_ARGS(3),
 674		.args[0] = peripheral,
 675		.args[1] = addr,
 676		.args[2] = size,
 677		.owner = ARM_SMCCC_OWNER_SIP,
 678	};
 679	struct qcom_scm_res res;
 680
 681	ret = qcom_scm_clk_enable();
 682	if (ret)
 683		return ret;
 684
 685	ret = qcom_scm_bw_enable();
 686	if (ret)
 687		goto disable_clk;
 688
 689	ret = qcom_scm_call(__scm->dev, &desc, &res);
 690	qcom_scm_bw_disable();
 691
 692disable_clk:
 693	qcom_scm_clk_disable();
 694
 695	return ret ? : res.result[0];
 696}
 697EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
 698
 699/**
 700 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 701 *				   and reset the remote processor
 702 * @peripheral:	peripheral id
 703 *
 704 * Return 0 on success.
 705 */
 706int qcom_scm_pas_auth_and_reset(u32 peripheral)
 707{
 708	int ret;
 709	struct qcom_scm_desc desc = {
 710		.svc = QCOM_SCM_SVC_PIL,
 711		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 712		.arginfo = QCOM_SCM_ARGS(1),
 713		.args[0] = peripheral,
 714		.owner = ARM_SMCCC_OWNER_SIP,
 715	};
 716	struct qcom_scm_res res;
 717
 718	ret = qcom_scm_clk_enable();
 719	if (ret)
 720		return ret;
 721
 722	ret = qcom_scm_bw_enable();
 723	if (ret)
 724		goto disable_clk;
 725
 726	ret = qcom_scm_call(__scm->dev, &desc, &res);
 727	qcom_scm_bw_disable();
 728
 729disable_clk:
 730	qcom_scm_clk_disable();
 731
 732	return ret ? : res.result[0];
 733}
 734EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
 735
 736/**
 737 * qcom_scm_pas_shutdown() - Shut down the remote processor
 738 * @peripheral: peripheral id
 739 *
 740 * Returns 0 on success.
 741 */
 742int qcom_scm_pas_shutdown(u32 peripheral)
 743{
 744	int ret;
 745	struct qcom_scm_desc desc = {
 746		.svc = QCOM_SCM_SVC_PIL,
 747		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 748		.arginfo = QCOM_SCM_ARGS(1),
 749		.args[0] = peripheral,
 750		.owner = ARM_SMCCC_OWNER_SIP,
 751	};
 752	struct qcom_scm_res res;
 753
 754	ret = qcom_scm_clk_enable();
 755	if (ret)
 756		return ret;
 757
 758	ret = qcom_scm_bw_enable();
 759	if (ret)
 760		goto disable_clk;
 761
 762	ret = qcom_scm_call(__scm->dev, &desc, &res);
 763	qcom_scm_bw_disable();
 764
 765disable_clk:
 766	qcom_scm_clk_disable();
 767
 768	return ret ? : res.result[0];
 769}
 770EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
 771
 772/**
 773 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 774 *			      available for the given peripherial
 775 * @peripheral:	peripheral id
 776 *
 777 * Returns true if PAS is supported for this peripheral, otherwise false.
 778 */
 779bool qcom_scm_pas_supported(u32 peripheral)
 780{
 781	int ret;
 782	struct qcom_scm_desc desc = {
 783		.svc = QCOM_SCM_SVC_PIL,
 784		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 785		.arginfo = QCOM_SCM_ARGS(1),
 786		.args[0] = peripheral,
 787		.owner = ARM_SMCCC_OWNER_SIP,
 788	};
 789	struct qcom_scm_res res;
 790
 791	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 792					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 793		return false;
 794
 795	ret = qcom_scm_call(__scm->dev, &desc, &res);
 796
 797	return ret ? false : !!res.result[0];
 798}
 799EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
 800
 801static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 802{
 803	struct qcom_scm_desc desc = {
 804		.svc = QCOM_SCM_SVC_PIL,
 805		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 806		.arginfo = QCOM_SCM_ARGS(2),
 807		.args[0] = reset,
 808		.args[1] = 0,
 809		.owner = ARM_SMCCC_OWNER_SIP,
 810	};
 811	struct qcom_scm_res res;
 812	int ret;
 813
 814	ret = qcom_scm_call(__scm->dev, &desc, &res);
 815
 816	return ret ? : res.result[0];
 817}
 818
 819static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 820				     unsigned long idx)
 821{
 822	if (idx != 0)
 823		return -EINVAL;
 824
 825	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 826}
 827
 828static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 829				       unsigned long idx)
 830{
 831	if (idx != 0)
 832		return -EINVAL;
 833
 834	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 835}
 836
 837static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 838	.assert = qcom_scm_pas_reset_assert,
 839	.deassert = qcom_scm_pas_reset_deassert,
 840};
 841
 842int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 843{
 844	struct qcom_scm_desc desc = {
 845		.svc = QCOM_SCM_SVC_IO,
 846		.cmd = QCOM_SCM_IO_READ,
 847		.arginfo = QCOM_SCM_ARGS(1),
 848		.args[0] = addr,
 849		.owner = ARM_SMCCC_OWNER_SIP,
 850	};
 851	struct qcom_scm_res res;
 852	int ret;
 853
 854
 855	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 856	if (ret >= 0)
 857		*val = res.result[0];
 858
 859	return ret < 0 ? ret : 0;
 860}
 861EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
 862
 863int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 864{
 865	struct qcom_scm_desc desc = {
 866		.svc = QCOM_SCM_SVC_IO,
 867		.cmd = QCOM_SCM_IO_WRITE,
 868		.arginfo = QCOM_SCM_ARGS(2),
 869		.args[0] = addr,
 870		.args[1] = val,
 871		.owner = ARM_SMCCC_OWNER_SIP,
 872	};
 873
 874	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 875}
 876EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
 877
 878/**
 879 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 880 * supports restore security config interface.
 881 *
 882 * Return true if restore-cfg interface is supported, false if not.
 883 */
 884bool qcom_scm_restore_sec_cfg_available(void)
 885{
 886	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 887					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 888}
 889EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
 890
 891int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 892{
 893	struct qcom_scm_desc desc = {
 894		.svc = QCOM_SCM_SVC_MP,
 895		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 896		.arginfo = QCOM_SCM_ARGS(2),
 897		.args[0] = device_id,
 898		.args[1] = spare,
 899		.owner = ARM_SMCCC_OWNER_SIP,
 900	};
 901	struct qcom_scm_res res;
 902	int ret;
 903
 904	ret = qcom_scm_call(__scm->dev, &desc, &res);
 905
 906	return ret ? : res.result[0];
 907}
 908EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
 909
 910#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK	GENMASK(7, 0)
 911
 912bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
 913{
 914	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 915					    QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
 916}
 917EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
 918
 919int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
 920{
 921	struct qcom_scm_desc desc = {
 922		.svc = QCOM_SCM_SVC_MP,
 923		.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
 924		.arginfo = QCOM_SCM_ARGS(4),
 925		.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
 926		.args[1] = 0xffffffff,
 927		.args[2] = 0xffffffff,
 928		.args[3] = 0xffffffff,
 929		.owner = ARM_SMCCC_OWNER_SIP
 930	};
 931
 932	return qcom_scm_call(__scm->dev, &desc, NULL);
 933}
 934EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
 935
 936int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 937{
 938	struct qcom_scm_desc desc = {
 939		.svc = QCOM_SCM_SVC_MP,
 940		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 941		.arginfo = QCOM_SCM_ARGS(1),
 942		.args[0] = spare,
 943		.owner = ARM_SMCCC_OWNER_SIP,
 944	};
 945	struct qcom_scm_res res;
 946	int ret;
 947
 948	ret = qcom_scm_call(__scm->dev, &desc, &res);
 949
 950	if (size)
 951		*size = res.result[0];
 952
 953	return ret ? : res.result[1];
 954}
 955EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
 956
 957int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 958{
 959	struct qcom_scm_desc desc = {
 960		.svc = QCOM_SCM_SVC_MP,
 961		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 962		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 963					 QCOM_SCM_VAL),
 964		.args[0] = addr,
 965		.args[1] = size,
 966		.args[2] = spare,
 967		.owner = ARM_SMCCC_OWNER_SIP,
 968	};
 969	int ret;
 970
 971	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 972
 973	/* the pg table has been initialized already, ignore the error */
 974	if (ret == -EPERM)
 975		ret = 0;
 976
 977	return ret;
 978}
 979EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
 980
 981int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
 982{
 983	struct qcom_scm_desc desc = {
 984		.svc = QCOM_SCM_SVC_MP,
 985		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
 986		.arginfo = QCOM_SCM_ARGS(2),
 987		.args[0] = size,
 988		.args[1] = spare,
 989		.owner = ARM_SMCCC_OWNER_SIP,
 990	};
 991
 992	return qcom_scm_call(__scm->dev, &desc, NULL);
 993}
 994EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
 995
 996int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 997				   u32 cp_nonpixel_start,
 998				   u32 cp_nonpixel_size)
 999{
1000	int ret;
1001	struct qcom_scm_desc desc = {
1002		.svc = QCOM_SCM_SVC_MP,
1003		.cmd = QCOM_SCM_MP_VIDEO_VAR,
1004		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1005					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1006		.args[0] = cp_start,
1007		.args[1] = cp_size,
1008		.args[2] = cp_nonpixel_start,
1009		.args[3] = cp_nonpixel_size,
1010		.owner = ARM_SMCCC_OWNER_SIP,
1011	};
1012	struct qcom_scm_res res;
1013
1014	ret = qcom_scm_call(__scm->dev, &desc, &res);
1015
1016	return ret ? : res.result[0];
1017}
1018EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1019
1020static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1021				 size_t mem_sz, phys_addr_t src, size_t src_sz,
1022				 phys_addr_t dest, size_t dest_sz)
1023{
1024	int ret;
1025	struct qcom_scm_desc desc = {
1026		.svc = QCOM_SCM_SVC_MP,
1027		.cmd = QCOM_SCM_MP_ASSIGN,
1028		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1029					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1030					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1031		.args[0] = mem_region,
1032		.args[1] = mem_sz,
1033		.args[2] = src,
1034		.args[3] = src_sz,
1035		.args[4] = dest,
1036		.args[5] = dest_sz,
1037		.args[6] = 0,
1038		.owner = ARM_SMCCC_OWNER_SIP,
1039	};
1040	struct qcom_scm_res res;
1041
1042	ret = qcom_scm_call(dev, &desc, &res);
1043
1044	return ret ? : res.result[0];
1045}
1046
1047/**
1048 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1049 * @mem_addr: mem region whose ownership need to be reassigned
1050 * @mem_sz:   size of the region.
1051 * @srcvm:    vmid for current set of owners, each set bit in
1052 *            flag indicate a unique owner
1053 * @newvm:    array having new owners and corresponding permission
1054 *            flags
1055 * @dest_cnt: number of owners in next set.
1056 *
1057 * Return negative errno on failure or 0 on success with @srcvm updated.
1058 */
1059int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1060			u64 *srcvm,
1061			const struct qcom_scm_vmperm *newvm,
1062			unsigned int dest_cnt)
1063{
1064	struct qcom_scm_current_perm_info *destvm;
1065	struct qcom_scm_mem_map_info *mem_to_map;
1066	phys_addr_t mem_to_map_phys;
1067	phys_addr_t dest_phys;
1068	phys_addr_t ptr_phys;
1069	size_t mem_to_map_sz;
1070	size_t dest_sz;
1071	size_t src_sz;
1072	size_t ptr_sz;
1073	int next_vm;
1074	__le32 *src;
1075	int ret, i, b;
1076	u64 srcvm_bits = *srcvm;
1077
1078	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1079	mem_to_map_sz = sizeof(*mem_to_map);
1080	dest_sz = dest_cnt * sizeof(*destvm);
1081	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1082			ALIGN(dest_sz, SZ_64);
1083
1084	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1085							ptr_sz, GFP_KERNEL);
1086	if (!ptr)
1087		return -ENOMEM;
1088
1089	ptr_phys = qcom_tzmem_to_phys(ptr);
1090
1091	/* Fill source vmid detail */
1092	src = ptr;
1093	i = 0;
1094	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1095		if (srcvm_bits & BIT(b))
1096			src[i++] = cpu_to_le32(b);
1097	}
1098
1099	/* Fill details of mem buff to map */
1100	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1101	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1102	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1103	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1104
1105	next_vm = 0;
1106	/* Fill details of next vmid detail */
1107	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1108	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1109	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1110		destvm->vmid = cpu_to_le32(newvm->vmid);
1111		destvm->perm = cpu_to_le32(newvm->perm);
1112		destvm->ctx = 0;
1113		destvm->ctx_size = 0;
1114		next_vm |= BIT(newvm->vmid);
1115	}
1116
1117	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1118				    ptr_phys, src_sz, dest_phys, dest_sz);
1119	if (ret) {
1120		dev_err(__scm->dev,
1121			"Assign memory protection call failed %d\n", ret);
1122		return -EINVAL;
1123	}
1124
1125	*srcvm = next_vm;
1126	return 0;
1127}
1128EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1129
1130/**
1131 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1132 */
1133bool qcom_scm_ocmem_lock_available(void)
1134{
1135	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1136					    QCOM_SCM_OCMEM_LOCK_CMD);
1137}
1138EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1139
1140/**
1141 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1142 * region to the specified initiator
1143 *
1144 * @id:     tz initiator id
1145 * @offset: OCMEM offset
1146 * @size:   OCMEM size
1147 * @mode:   access mode (WIDE/NARROW)
1148 */
1149int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1150			u32 mode)
1151{
1152	struct qcom_scm_desc desc = {
1153		.svc = QCOM_SCM_SVC_OCMEM,
1154		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1155		.args[0] = id,
1156		.args[1] = offset,
1157		.args[2] = size,
1158		.args[3] = mode,
1159		.arginfo = QCOM_SCM_ARGS(4),
1160	};
1161
1162	return qcom_scm_call(__scm->dev, &desc, NULL);
1163}
1164EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1165
1166/**
1167 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1168 * region from the specified initiator
1169 *
1170 * @id:     tz initiator id
1171 * @offset: OCMEM offset
1172 * @size:   OCMEM size
1173 */
1174int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1175{
1176	struct qcom_scm_desc desc = {
1177		.svc = QCOM_SCM_SVC_OCMEM,
1178		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1179		.args[0] = id,
1180		.args[1] = offset,
1181		.args[2] = size,
1182		.arginfo = QCOM_SCM_ARGS(3),
1183	};
1184
1185	return qcom_scm_call(__scm->dev, &desc, NULL);
1186}
1187EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1188
1189/**
1190 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1191 *
1192 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1193 *	   qcom_scm_ice_set_key() are available.
1194 */
1195bool qcom_scm_ice_available(void)
1196{
1197	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1198					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1199		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1200					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1201}
1202EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1203
1204/**
1205 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1206 * @index: the keyslot to invalidate
1207 *
1208 * The UFSHCI and eMMC standards define a standard way to do this, but it
1209 * doesn't work on these SoCs; only this SCM call does.
1210 *
1211 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1212 * call doesn't specify which ICE instance the keyslot belongs to.
1213 *
1214 * Return: 0 on success; -errno on failure.
1215 */
1216int qcom_scm_ice_invalidate_key(u32 index)
1217{
1218	struct qcom_scm_desc desc = {
1219		.svc = QCOM_SCM_SVC_ES,
1220		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1221		.arginfo = QCOM_SCM_ARGS(1),
1222		.args[0] = index,
1223		.owner = ARM_SMCCC_OWNER_SIP,
1224	};
1225
1226	return qcom_scm_call(__scm->dev, &desc, NULL);
1227}
1228EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1229
1230/**
1231 * qcom_scm_ice_set_key() - Set an inline encryption key
1232 * @index: the keyslot into which to set the key
1233 * @key: the key to program
1234 * @key_size: the size of the key in bytes
1235 * @cipher: the encryption algorithm the key is for
1236 * @data_unit_size: the encryption data unit size, i.e. the size of each
1237 *		    individual plaintext and ciphertext.  Given in 512-byte
1238 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1239 *
1240 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1241 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1242 *
1243 * The UFSHCI and eMMC standards define a standard way to do this, but it
1244 * doesn't work on these SoCs; only this SCM call does.
1245 *
1246 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1247 * call doesn't specify which ICE instance the keyslot belongs to.
1248 *
1249 * Return: 0 on success; -errno on failure.
1250 */
1251int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1252			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1253{
1254	struct qcom_scm_desc desc = {
1255		.svc = QCOM_SCM_SVC_ES,
1256		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1257		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1258					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1259					 QCOM_SCM_VAL),
1260		.args[0] = index,
1261		.args[2] = key_size,
1262		.args[3] = cipher,
1263		.args[4] = data_unit_size,
1264		.owner = ARM_SMCCC_OWNER_SIP,
1265	};
1266
1267	int ret;
1268
1269	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1270							   key_size,
1271							   GFP_KERNEL);
1272	if (!keybuf)
1273		return -ENOMEM;
1274	memcpy(keybuf, key, key_size);
1275	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1276
1277	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1278
1279	memzero_explicit(keybuf, key_size);
1280
1281	return ret;
1282}
1283EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1284
1285/**
1286 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1287 *
1288 * Return true if HDCP is supported, false if not.
1289 */
1290bool qcom_scm_hdcp_available(void)
1291{
1292	bool avail;
1293	int ret = qcom_scm_clk_enable();
1294
1295	if (ret)
1296		return ret;
1297
1298	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1299						QCOM_SCM_HDCP_INVOKE);
1300
1301	qcom_scm_clk_disable();
1302
1303	return avail;
1304}
1305EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1306
1307/**
1308 * qcom_scm_hdcp_req() - Send HDCP request.
1309 * @req: HDCP request array
1310 * @req_cnt: HDCP request array count
1311 * @resp: response buffer passed to SCM
1312 *
1313 * Write HDCP register(s) through SCM.
1314 */
1315int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1316{
1317	int ret;
1318	struct qcom_scm_desc desc = {
1319		.svc = QCOM_SCM_SVC_HDCP,
1320		.cmd = QCOM_SCM_HDCP_INVOKE,
1321		.arginfo = QCOM_SCM_ARGS(10),
1322		.args = {
1323			req[0].addr,
1324			req[0].val,
1325			req[1].addr,
1326			req[1].val,
1327			req[2].addr,
1328			req[2].val,
1329			req[3].addr,
1330			req[3].val,
1331			req[4].addr,
1332			req[4].val
1333		},
1334		.owner = ARM_SMCCC_OWNER_SIP,
1335	};
1336	struct qcom_scm_res res;
1337
1338	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1339		return -ERANGE;
1340
1341	ret = qcom_scm_clk_enable();
1342	if (ret)
1343		return ret;
1344
1345	ret = qcom_scm_call(__scm->dev, &desc, &res);
1346	*resp = res.result[0];
1347
1348	qcom_scm_clk_disable();
1349
1350	return ret;
1351}
1352EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1353
1354int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1355{
1356	struct qcom_scm_desc desc = {
1357		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1358		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1359		.arginfo = QCOM_SCM_ARGS(3),
1360		.args[0] = sec_id,
1361		.args[1] = ctx_num,
1362		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1363		.owner = ARM_SMCCC_OWNER_SIP,
1364	};
1365
1366	return qcom_scm_call(__scm->dev, &desc, NULL);
1367}
1368EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1369
1370int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1371{
1372	struct qcom_scm_desc desc = {
1373		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1374		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1375		.arginfo = QCOM_SCM_ARGS(2),
1376		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1377		.args[1] = en,
1378		.owner = ARM_SMCCC_OWNER_SIP,
1379	};
1380
1381
1382	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1383}
1384EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1385
1386bool qcom_scm_lmh_dcvsh_available(void)
1387{
1388	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1389}
1390EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1391
1392int qcom_scm_shm_bridge_enable(void)
1393{
1394	int ret;
1395
1396	struct qcom_scm_desc desc = {
1397		.svc = QCOM_SCM_SVC_MP,
1398		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1399		.owner = ARM_SMCCC_OWNER_SIP
1400	};
1401
1402	struct qcom_scm_res res;
1403
1404	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1405					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1406		return -EOPNOTSUPP;
1407
1408	ret = qcom_scm_call(__scm->dev, &desc, &res);
1409
1410	if (ret)
1411		return ret;
1412
1413	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1414		return -EOPNOTSUPP;
1415
1416	return res.result[0];
1417}
1418EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1419
1420int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1421			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1422			       u64 ns_vmids, u64 *handle)
1423{
1424	struct qcom_scm_desc desc = {
1425		.svc = QCOM_SCM_SVC_MP,
1426		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1427		.owner = ARM_SMCCC_OWNER_SIP,
1428		.args[0] = pfn_and_ns_perm_flags,
1429		.args[1] = ipfn_and_s_perm_flags,
1430		.args[2] = size_and_flags,
1431		.args[3] = ns_vmids,
1432		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1433					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1434	};
1435
1436	struct qcom_scm_res res;
1437	int ret;
1438
1439	ret = qcom_scm_call(__scm->dev, &desc, &res);
1440
1441	if (handle && !ret)
1442		*handle = res.result[1];
1443
1444	return ret ?: res.result[0];
1445}
1446EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1447
1448int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1449{
1450	struct qcom_scm_desc desc = {
1451		.svc = QCOM_SCM_SVC_MP,
1452		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1453		.owner = ARM_SMCCC_OWNER_SIP,
1454		.args[0] = handle,
1455		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1456	};
1457
1458	return qcom_scm_call(__scm->dev, &desc, NULL);
1459}
1460EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1461
1462int qcom_scm_lmh_profile_change(u32 profile_id)
1463{
1464	struct qcom_scm_desc desc = {
1465		.svc = QCOM_SCM_SVC_LMH,
1466		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1467		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1468		.args[0] = profile_id,
1469		.owner = ARM_SMCCC_OWNER_SIP,
1470	};
1471
1472	return qcom_scm_call(__scm->dev, &desc, NULL);
1473}
1474EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1475
1476int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1477		       u64 limit_node, u32 node_id, u64 version)
1478{
1479	int ret, payload_size = 5 * sizeof(u32);
1480
1481	struct qcom_scm_desc desc = {
1482		.svc = QCOM_SCM_SVC_LMH,
1483		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1484		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1485					QCOM_SCM_VAL, QCOM_SCM_VAL),
1486		.args[1] = payload_size,
1487		.args[2] = limit_node,
1488		.args[3] = node_id,
1489		.args[4] = version,
1490		.owner = ARM_SMCCC_OWNER_SIP,
1491	};
1492
1493	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1494							       payload_size,
1495							       GFP_KERNEL);
1496	if (!payload_buf)
1497		return -ENOMEM;
1498
1499	payload_buf[0] = payload_fn;
1500	payload_buf[1] = 0;
1501	payload_buf[2] = payload_reg;
1502	payload_buf[3] = 1;
1503	payload_buf[4] = payload_val;
1504
1505	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1506
1507	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1508
1509	return ret;
1510}
1511EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1512
1513int qcom_scm_gpu_init_regs(u32 gpu_req)
1514{
1515	struct qcom_scm_desc desc = {
1516		.svc = QCOM_SCM_SVC_GPU,
1517		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1518		.arginfo = QCOM_SCM_ARGS(1),
1519		.args[0] = gpu_req,
1520		.owner = ARM_SMCCC_OWNER_SIP,
1521	};
1522
1523	return qcom_scm_call(__scm->dev, &desc, NULL);
1524}
1525EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1526
1527static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1528{
1529	struct device_node *tcsr;
1530	struct device_node *np = dev->of_node;
1531	struct resource res;
1532	u32 offset;
1533	int ret;
1534
1535	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1536	if (!tcsr)
1537		return 0;
1538
1539	ret = of_address_to_resource(tcsr, 0, &res);
1540	of_node_put(tcsr);
1541	if (ret)
1542		return ret;
1543
1544	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1545	if (ret < 0)
1546		return ret;
1547
1548	*addr = res.start + offset;
1549
1550	return 0;
1551}
1552
1553#ifdef CONFIG_QCOM_QSEECOM
1554
1555/* Lock for QSEECOM SCM call executions */
1556static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1557
1558static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1559				   struct qcom_scm_qseecom_resp *res)
1560{
1561	struct qcom_scm_res scm_res = {};
1562	int status;
1563
1564	/*
1565	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1566	 * require the respective call lock to be held.
1567	 */
1568	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1569
1570	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1571
1572	res->result = scm_res.result[0];
1573	res->resp_type = scm_res.result[1];
1574	res->data = scm_res.result[2];
1575
1576	if (status)
1577		return status;
1578
1579	return 0;
1580}
1581
1582/**
1583 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1584 * @desc: SCM call descriptor.
1585 * @res:  SCM call response (output).
1586 *
1587 * Performs the QSEECOM SCM call described by @desc, returning the response in
1588 * @rsp.
1589 *
1590 * Return: Zero on success, nonzero on failure.
1591 */
1592static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1593				 struct qcom_scm_qseecom_resp *res)
1594{
1595	int status;
1596
1597	/*
1598	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1599	 * so lock things here. This needs to be extended to callback/listener
1600	 * handling when support for that is implemented.
1601	 */
1602
1603	mutex_lock(&qcom_scm_qseecom_call_lock);
1604	status = __qcom_scm_qseecom_call(desc, res);
1605	mutex_unlock(&qcom_scm_qseecom_call_lock);
1606
1607	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1608		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1609		res->resp_type, res->data);
1610
1611	if (status) {
1612		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1613		return status;
1614	}
1615
1616	/*
1617	 * TODO: Handle incomplete and blocked calls:
1618	 *
1619	 * Incomplete and blocked calls are not supported yet. Some devices
1620	 * and/or commands require those, some don't. Let's warn about them
1621	 * prominently in case someone attempts to try these commands with a
1622	 * device/command combination that isn't supported yet.
1623	 */
1624	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1625	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1626
1627	return 0;
1628}
1629
1630/**
1631 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1632 * @version: Pointer where the QSEECOM version will be stored.
1633 *
1634 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1635 * the TrustZone.
1636 *
1637 * Return: Zero on success, nonzero on failure.
1638 */
1639static int qcom_scm_qseecom_get_version(u32 *version)
1640{
1641	struct qcom_scm_desc desc = {};
1642	struct qcom_scm_qseecom_resp res = {};
1643	u32 feature = 10;
1644	int ret;
1645
1646	desc.owner = QSEECOM_TZ_OWNER_SIP;
1647	desc.svc = QSEECOM_TZ_SVC_INFO;
1648	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1649	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1650	desc.args[0] = feature;
1651
1652	ret = qcom_scm_qseecom_call(&desc, &res);
1653	if (ret)
1654		return ret;
1655
1656	*version = res.result;
1657	return 0;
1658}
1659
1660/**
1661 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1662 * @app_name: The name of the app.
1663 * @app_id:   The returned app ID.
1664 *
1665 * Query and return the application ID of the SEE app identified by the given
1666 * name. This returned ID is the unique identifier of the app required for
1667 * subsequent communication.
1668 *
1669 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1670 * loaded or could not be found.
1671 */
1672int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1673{
1674	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1675	unsigned long app_name_len = strlen(app_name);
1676	struct qcom_scm_desc desc = {};
1677	struct qcom_scm_qseecom_resp res = {};
1678	int status;
1679
1680	if (app_name_len >= name_buf_size)
1681		return -EINVAL;
1682
1683	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1684							     name_buf_size,
1685							     GFP_KERNEL);
1686	if (!name_buf)
1687		return -ENOMEM;
1688
1689	memcpy(name_buf, app_name, app_name_len);
1690
1691	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1692	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1693	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1694	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1695	desc.args[0] = qcom_tzmem_to_phys(name_buf);
1696	desc.args[1] = app_name_len;
1697
1698	status = qcom_scm_qseecom_call(&desc, &res);
1699
1700	if (status)
1701		return status;
1702
1703	if (res.result == QSEECOM_RESULT_FAILURE)
1704		return -ENOENT;
1705
1706	if (res.result != QSEECOM_RESULT_SUCCESS)
1707		return -EINVAL;
1708
1709	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1710		return -EINVAL;
1711
1712	*app_id = res.data;
1713	return 0;
1714}
1715EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1716
1717/**
1718 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1719 * @app_id:   The ID of the target app.
1720 * @req:      Request buffer sent to the app (must be TZ memory)
1721 * @req_size: Size of the request buffer.
1722 * @rsp:      Response buffer, written to by the app (must be TZ memory)
1723 * @rsp_size: Size of the response buffer.
1724 *
1725 * Sends a request to the QSEE app associated with the given ID and read back
1726 * its response. The caller must provide two DMA memory regions, one for the
1727 * request and one for the response, and fill out the @req region with the
1728 * respective (app-specific) request data. The QSEE app reads this and returns
1729 * its response in the @rsp region.
1730 *
1731 * Return: Zero on success, nonzero on failure.
1732 */
1733int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1734			      void *rsp, size_t rsp_size)
1735{
1736	struct qcom_scm_qseecom_resp res = {};
1737	struct qcom_scm_desc desc = {};
1738	phys_addr_t req_phys;
1739	phys_addr_t rsp_phys;
1740	int status;
1741
1742	req_phys = qcom_tzmem_to_phys(req);
1743	rsp_phys = qcom_tzmem_to_phys(rsp);
1744
1745	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1746	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1747	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1748	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1749				     QCOM_SCM_RW, QCOM_SCM_VAL,
1750				     QCOM_SCM_RW, QCOM_SCM_VAL);
1751	desc.args[0] = app_id;
1752	desc.args[1] = req_phys;
1753	desc.args[2] = req_size;
1754	desc.args[3] = rsp_phys;
1755	desc.args[4] = rsp_size;
1756
1757	status = qcom_scm_qseecom_call(&desc, &res);
1758
1759	if (status)
1760		return status;
1761
1762	if (res.result != QSEECOM_RESULT_SUCCESS)
1763		return -EIO;
1764
1765	return 0;
1766}
1767EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1768
1769/*
1770 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1771 + any potential issues with this, only allow validated machines for now.
1772 */
1773static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1774	{ .compatible = "dell,xps13-9345" },
1775	{ .compatible = "lenovo,flex-5g" },
1776	{ .compatible = "lenovo,thinkpad-t14s" },
1777	{ .compatible = "lenovo,thinkpad-x13s", },
1778	{ .compatible = "lenovo,yoga-slim7x" },
1779	{ .compatible = "microsoft,arcata", },
1780	{ .compatible = "microsoft,romulus13", },
1781	{ .compatible = "microsoft,romulus15", },
1782	{ .compatible = "qcom,sc8180x-primus" },
1783	{ .compatible = "qcom,x1e001de-devkit" },
1784	{ .compatible = "qcom,x1e80100-crd" },
1785	{ .compatible = "qcom,x1e80100-qcp" },
1786	{ }
1787};
1788
1789static bool qcom_scm_qseecom_machine_is_allowed(void)
1790{
1791	struct device_node *np;
1792	bool match;
1793
1794	np = of_find_node_by_path("/");
1795	if (!np)
1796		return false;
1797
1798	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1799	of_node_put(np);
1800
1801	return match;
1802}
1803
1804static void qcom_scm_qseecom_free(void *data)
1805{
1806	struct platform_device *qseecom_dev = data;
1807
1808	platform_device_del(qseecom_dev);
1809	platform_device_put(qseecom_dev);
1810}
1811
1812static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1813{
1814	struct platform_device *qseecom_dev;
1815	u32 version;
1816	int ret;
1817
1818	/*
1819	 * Note: We do two steps of validation here: First, we try to query the
1820	 * QSEECOM version as a check to see if the interface exists on this
1821	 * device. Second, we check against known good devices due to current
1822	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1823	 *
1824	 * Note that we deliberately do the machine check after the version
1825	 * check so that we can log potentially supported devices. This should
1826	 * be safe as downstream sources indicate that the version query is
1827	 * neither blocking nor reentrant.
1828	 */
1829	ret = qcom_scm_qseecom_get_version(&version);
1830	if (ret)
1831		return 0;
1832
1833	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1834
1835	if (!qcom_scm_qseecom_machine_is_allowed()) {
1836		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1837		return 0;
1838	}
1839
1840	/*
1841	 * Set up QSEECOM interface device. All application clients will be
1842	 * set up and managed by the corresponding driver for it.
1843	 */
1844	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1845	if (!qseecom_dev)
1846		return -ENOMEM;
1847
1848	qseecom_dev->dev.parent = scm->dev;
1849
1850	ret = platform_device_add(qseecom_dev);
1851	if (ret) {
1852		platform_device_put(qseecom_dev);
1853		return ret;
1854	}
1855
1856	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1857}
1858
1859#else /* CONFIG_QCOM_QSEECOM */
1860
1861static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1862{
1863	return 0;
1864}
1865
1866#endif /* CONFIG_QCOM_QSEECOM */
1867
1868/**
1869 * qcom_scm_is_available() - Checks if SCM is available
1870 */
1871bool qcom_scm_is_available(void)
1872{
1873	/* Paired with smp_store_release() in qcom_scm_probe */
1874	return !!smp_load_acquire(&__scm);
1875}
1876EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1877
1878static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1879{
1880	/* FW currently only supports a single wq_ctx (zero).
1881	 * TODO: Update this logic to include dynamic allocation and lookup of
1882	 * completion structs when FW supports more wq_ctx values.
1883	 */
1884	if (wq_ctx != 0) {
1885		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1886		return -EINVAL;
1887	}
1888
1889	return 0;
1890}
1891
1892int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1893{
1894	int ret;
1895
1896	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1897	if (ret)
1898		return ret;
1899
1900	wait_for_completion(&__scm->waitq_comp);
1901
1902	return 0;
1903}
1904
1905static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1906{
1907	int ret;
1908
1909	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1910	if (ret)
1911		return ret;
1912
1913	complete(&__scm->waitq_comp);
1914
1915	return 0;
1916}
1917
1918static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1919{
1920	int ret;
1921	struct qcom_scm *scm = data;
1922	u32 wq_ctx, flags, more_pending = 0;
1923
1924	do {
1925		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1926		if (ret) {
1927			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1928			goto out;
1929		}
1930
1931		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
1932			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
1933			goto out;
1934		}
1935
1936		ret = qcom_scm_waitq_wakeup(wq_ctx);
1937		if (ret)
1938			goto out;
1939	} while (more_pending);
1940
1941out:
1942	return IRQ_HANDLED;
1943}
1944
1945static int get_download_mode(char *buffer, const struct kernel_param *kp)
1946{
1947	if (download_mode >= ARRAY_SIZE(download_mode_name))
1948		return sysfs_emit(buffer, "unknown mode\n");
1949
1950	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
1951}
1952
1953static int set_download_mode(const char *val, const struct kernel_param *kp)
1954{
1955	bool tmp;
1956	int ret;
1957
1958	ret = sysfs_match_string(download_mode_name, val);
1959	if (ret < 0) {
1960		ret = kstrtobool(val, &tmp);
1961		if (ret < 0) {
1962			pr_err("qcom_scm: err: %d\n", ret);
1963			return ret;
1964		}
1965
1966		ret = tmp ? 1 : 0;
1967	}
1968
1969	download_mode = ret;
1970	if (__scm)
1971		qcom_scm_set_download_mode(download_mode);
1972
1973	return 0;
1974}
1975
1976static const struct kernel_param_ops download_mode_param_ops = {
1977	.get = get_download_mode,
1978	.set = set_download_mode,
1979};
1980
1981module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
1982MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
1983
1984static int qcom_scm_probe(struct platform_device *pdev)
1985{
1986	struct qcom_tzmem_pool_config pool_config;
1987	struct qcom_scm *scm;
1988	int irq, ret;
1989
1990	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1991	if (!scm)
1992		return -ENOMEM;
1993
1994	scm->dev = &pdev->dev;
1995	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1996	if (ret < 0)
1997		return ret;
1998
1999	init_completion(&scm->waitq_comp);
2000	mutex_init(&scm->scm_bw_lock);
2001
2002	scm->path = devm_of_icc_get(&pdev->dev, NULL);
2003	if (IS_ERR(scm->path))
2004		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2005				     "failed to acquire interconnect path\n");
2006
2007	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2008	if (IS_ERR(scm->core_clk))
2009		return PTR_ERR(scm->core_clk);
2010
2011	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2012	if (IS_ERR(scm->iface_clk))
2013		return PTR_ERR(scm->iface_clk);
2014
2015	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2016	if (IS_ERR(scm->bus_clk))
2017		return PTR_ERR(scm->bus_clk);
2018
2019	scm->reset.ops = &qcom_scm_pas_reset_ops;
2020	scm->reset.nr_resets = 1;
2021	scm->reset.of_node = pdev->dev.of_node;
2022	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2023	if (ret)
2024		return ret;
2025
2026	/* vote for max clk rate for highest performance */
2027	ret = clk_set_rate(scm->core_clk, INT_MAX);
2028	if (ret)
2029		return ret;
2030
2031	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2032	smp_store_release(&__scm, scm);
2033
2034	irq = platform_get_irq_optional(pdev, 0);
2035	if (irq < 0) {
2036		if (irq != -ENXIO) {
2037			ret = irq;
2038			goto err;
2039		}
2040	} else {
2041		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
2042						IRQF_ONESHOT, "qcom-scm", __scm);
2043		if (ret < 0) {
2044			dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
2045			goto err;
2046		}
2047	}
2048
2049	__get_convention();
2050
2051	/*
2052	 * If "download mode" is requested, from this point on warmboot
2053	 * will cause the boot stages to enter download mode, unless
2054	 * disabled below by a clean shutdown/reboot.
2055	 */
2056	qcom_scm_set_download_mode(download_mode);
2057
2058	/*
2059	 * Disable SDI if indicated by DT that it is enabled by default.
2060	 */
2061	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2062		qcom_scm_disable_sdi();
2063
2064	ret = of_reserved_mem_device_init(__scm->dev);
2065	if (ret && ret != -ENODEV) {
2066		dev_err_probe(__scm->dev, ret,
2067			      "Failed to setup the reserved memory region for TZ mem\n");
2068		goto err;
2069	}
2070
2071	ret = qcom_tzmem_enable(__scm->dev);
2072	if (ret) {
2073		dev_err_probe(__scm->dev, ret,
2074			      "Failed to enable the TrustZone memory allocator\n");
2075		goto err;
2076	}
2077
2078	memset(&pool_config, 0, sizeof(pool_config));
2079	pool_config.initial_size = 0;
2080	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2081	pool_config.max_size = SZ_256K;
2082
2083	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
2084	if (IS_ERR(__scm->mempool)) {
2085		dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
2086			      "Failed to create the SCM memory pool\n");
2087		goto err;
2088	}
2089
2090	/*
2091	 * Initialize the QSEECOM interface.
2092	 *
2093	 * Note: QSEECOM is fairly self-contained and this only adds the
2094	 * interface device (the driver of which does most of the heavy
2095	 * lifting). So any errors returned here should be either -ENOMEM or
2096	 * -EINVAL (with the latter only in case there's a bug in our code).
2097	 * This means that there is no need to bring down the whole SCM driver.
2098	 * Just log the error instead and let SCM live.
2099	 */
2100	ret = qcom_scm_qseecom_init(scm);
2101	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2102
2103	return 0;
2104
2105err:
2106	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2107	smp_store_release(&__scm, NULL);
2108
2109	return ret;
2110}
2111
2112static void qcom_scm_shutdown(struct platform_device *pdev)
2113{
2114	/* Clean shutdown, disable download mode to allow normal restart */
2115	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2116}
2117
2118static const struct of_device_id qcom_scm_dt_match[] = {
2119	{ .compatible = "qcom,scm" },
2120
2121	/* Legacy entries kept for backwards compatibility */
2122	{ .compatible = "qcom,scm-apq8064" },
2123	{ .compatible = "qcom,scm-apq8084" },
2124	{ .compatible = "qcom,scm-ipq4019" },
2125	{ .compatible = "qcom,scm-msm8953" },
2126	{ .compatible = "qcom,scm-msm8974" },
2127	{ .compatible = "qcom,scm-msm8996" },
2128	{}
2129};
2130MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2131
2132static struct platform_driver qcom_scm_driver = {
2133	.driver = {
2134		.name	= "qcom_scm",
2135		.of_match_table = qcom_scm_dt_match,
2136		.suppress_bind_attrs = true,
2137	},
2138	.probe = qcom_scm_probe,
2139	.shutdown = qcom_scm_shutdown,
2140};
2141
2142static int __init qcom_scm_init(void)
2143{
2144	return platform_driver_register(&qcom_scm_driver);
2145}
2146subsys_initcall(qcom_scm_init);
2147
2148MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2149MODULE_LICENSE("GPL v2");