Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5
   6#include <linux/arm-smccc.h>
   7#include <linux/clk.h>
   8#include <linux/completion.h>
   9#include <linux/cpumask.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/export.h>
  12#include <linux/firmware/qcom/qcom_scm.h>
  13#include <linux/init.h>
  14#include <linux/interconnect.h>
  15#include <linux/interrupt.h>
  16#include <linux/module.h>
  17#include <linux/of.h>
  18#include <linux/of_address.h>
  19#include <linux/of_irq.h>
  20#include <linux/of_platform.h>
  21#include <linux/platform_device.h>
  22#include <linux/reset-controller.h>
  23#include <linux/types.h>
  24
  25#include "qcom_scm.h"
  26
  27static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
  28module_param(download_mode, bool, 0);
  29
  30struct qcom_scm {
  31	struct device *dev;
  32	struct clk *core_clk;
  33	struct clk *iface_clk;
  34	struct clk *bus_clk;
  35	struct icc_path *path;
  36	struct completion waitq_comp;
  37	struct reset_controller_dev reset;
  38
  39	/* control access to the interconnect path */
  40	struct mutex scm_bw_lock;
  41	int scm_vote_count;
  42
  43	u64 dload_mode_addr;
  44};
  45
  46struct qcom_scm_current_perm_info {
  47	__le32 vmid;
  48	__le32 perm;
  49	__le64 ctx;
  50	__le32 ctx_size;
  51	__le32 unused;
  52};
  53
  54struct qcom_scm_mem_map_info {
  55	__le64 mem_addr;
  56	__le64 mem_size;
  57};
  58
  59/**
  60 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
  61 * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
  62 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
  63 * @data:      Response data. The type of this data is given in @resp_type.
  64 */
  65struct qcom_scm_qseecom_resp {
  66	u64 result;
  67	u64 resp_type;
  68	u64 data;
  69};
  70
  71enum qcom_scm_qseecom_result {
  72	QSEECOM_RESULT_SUCCESS			= 0,
  73	QSEECOM_RESULT_INCOMPLETE		= 1,
  74	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
  75	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
  76};
  77
  78enum qcom_scm_qseecom_resp_type {
  79	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
  80	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
  81};
  82
  83enum qcom_scm_qseecom_tz_owner {
  84	QSEECOM_TZ_OWNER_SIP			= 2,
  85	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
  86	QSEECOM_TZ_OWNER_QSEE_OS		= 50
  87};
  88
  89enum qcom_scm_qseecom_tz_svc {
  90	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
  91	QSEECOM_TZ_SVC_APP_MGR			= 1,
  92	QSEECOM_TZ_SVC_INFO			= 6,
  93};
  94
  95enum qcom_scm_qseecom_tz_cmd_app {
  96	QSEECOM_TZ_CMD_APP_SEND			= 1,
  97	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
  98};
  99
 100enum qcom_scm_qseecom_tz_cmd_info {
 101	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
 102};
 103
 104#define QSEECOM_MAX_APP_NAME_SIZE		64
 105
 106/* Each bit configures cold/warm boot address for one of the 4 CPUs */
 107static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 108	0, BIT(0), BIT(3), BIT(5)
 109};
 110static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 111	BIT(2), BIT(1), BIT(4), BIT(6)
 112};
 113
 114#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
 115#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL	BIT(1)
 116
 117static const char * const qcom_scm_convention_names[] = {
 118	[SMC_CONVENTION_UNKNOWN] = "unknown",
 119	[SMC_CONVENTION_ARM_32] = "smc arm 32",
 120	[SMC_CONVENTION_ARM_64] = "smc arm 64",
 121	[SMC_CONVENTION_LEGACY] = "smc legacy",
 122};
 123
 124static struct qcom_scm *__scm;
 125
 126static int qcom_scm_clk_enable(void)
 127{
 128	int ret;
 129
 130	ret = clk_prepare_enable(__scm->core_clk);
 131	if (ret)
 132		goto bail;
 133
 134	ret = clk_prepare_enable(__scm->iface_clk);
 135	if (ret)
 136		goto disable_core;
 137
 138	ret = clk_prepare_enable(__scm->bus_clk);
 139	if (ret)
 140		goto disable_iface;
 141
 142	return 0;
 143
 144disable_iface:
 145	clk_disable_unprepare(__scm->iface_clk);
 146disable_core:
 147	clk_disable_unprepare(__scm->core_clk);
 148bail:
 149	return ret;
 150}
 151
 152static void qcom_scm_clk_disable(void)
 153{
 154	clk_disable_unprepare(__scm->core_clk);
 155	clk_disable_unprepare(__scm->iface_clk);
 156	clk_disable_unprepare(__scm->bus_clk);
 157}
 158
 159static int qcom_scm_bw_enable(void)
 160{
 161	int ret = 0;
 162
 163	if (!__scm->path)
 164		return 0;
 165
 166	if (IS_ERR(__scm->path))
 167		return -EINVAL;
 168
 169	mutex_lock(&__scm->scm_bw_lock);
 170	if (!__scm->scm_vote_count) {
 171		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
 172		if (ret < 0) {
 173			dev_err(__scm->dev, "failed to set bandwidth request\n");
 174			goto err_bw;
 175		}
 176	}
 177	__scm->scm_vote_count++;
 178err_bw:
 179	mutex_unlock(&__scm->scm_bw_lock);
 180
 181	return ret;
 182}
 183
 184static void qcom_scm_bw_disable(void)
 185{
 186	if (IS_ERR_OR_NULL(__scm->path))
 187		return;
 188
 189	mutex_lock(&__scm->scm_bw_lock);
 190	if (__scm->scm_vote_count-- == 1)
 191		icc_set_bw(__scm->path, 0, 0);
 192	mutex_unlock(&__scm->scm_bw_lock);
 193}
 194
 195enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 196static DEFINE_SPINLOCK(scm_query_lock);
 197
 198static enum qcom_scm_convention __get_convention(void)
 199{
 200	unsigned long flags;
 201	struct qcom_scm_desc desc = {
 202		.svc = QCOM_SCM_SVC_INFO,
 203		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 204		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 205					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 206			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 207		.arginfo = QCOM_SCM_ARGS(1),
 208		.owner = ARM_SMCCC_OWNER_SIP,
 209	};
 210	struct qcom_scm_res res;
 211	enum qcom_scm_convention probed_convention;
 212	int ret;
 213	bool forced = false;
 214
 215	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 216		return qcom_scm_convention;
 217
 218	/*
 219	 * Per the "SMC calling convention specification", the 64-bit calling
 220	 * convention can only be used when the client is 64-bit, otherwise
 221	 * system will encounter the undefined behaviour.
 222	 */
 223#if IS_ENABLED(CONFIG_ARM64)
 224	/*
 225	 * Device isn't required as there is only one argument - no device
 226	 * needed to dma_map_single to secure world
 227	 */
 228	probed_convention = SMC_CONVENTION_ARM_64;
 229	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 230	if (!ret && res.result[0] == 1)
 231		goto found;
 232
 233	/*
 234	 * Some SC7180 firmwares didn't implement the
 235	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 236	 * calling conventions on these firmwares. Luckily we don't make any
 237	 * early calls into the firmware on these SoCs so the device pointer
 238	 * will be valid here to check if the compatible matches.
 239	 */
 240	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 241		forced = true;
 242		goto found;
 243	}
 244#endif
 245
 246	probed_convention = SMC_CONVENTION_ARM_32;
 247	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 248	if (!ret && res.result[0] == 1)
 249		goto found;
 250
 251	probed_convention = SMC_CONVENTION_LEGACY;
 252found:
 253	spin_lock_irqsave(&scm_query_lock, flags);
 254	if (probed_convention != qcom_scm_convention) {
 255		qcom_scm_convention = probed_convention;
 256		pr_info("qcom_scm: convention: %s%s\n",
 257			qcom_scm_convention_names[qcom_scm_convention],
 258			forced ? " (forced)" : "");
 259	}
 260	spin_unlock_irqrestore(&scm_query_lock, flags);
 261
 262	return qcom_scm_convention;
 263}
 264
 265/**
 266 * qcom_scm_call() - Invoke a syscall in the secure world
 267 * @dev:	device
 268 * @desc:	Descriptor structure containing arguments and return values
 269 * @res:        Structure containing results from SMC/HVC call
 270 *
 271 * Sends a command to the SCM and waits for the command to finish processing.
 272 * This should *only* be called in pre-emptible context.
 273 */
 274static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 275			 struct qcom_scm_res *res)
 276{
 277	might_sleep();
 278	switch (__get_convention()) {
 279	case SMC_CONVENTION_ARM_32:
 280	case SMC_CONVENTION_ARM_64:
 281		return scm_smc_call(dev, desc, res, false);
 282	case SMC_CONVENTION_LEGACY:
 283		return scm_legacy_call(dev, desc, res);
 284	default:
 285		pr_err("Unknown current SCM calling convention.\n");
 286		return -EINVAL;
 287	}
 288}
 289
 290/**
 291 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 292 * @dev:	device
 293 * @desc:	Descriptor structure containing arguments and return values
 294 * @res:	Structure containing results from SMC/HVC call
 295 *
 296 * Sends a command to the SCM and waits for the command to finish processing.
 297 * This can be called in atomic context.
 298 */
 299static int qcom_scm_call_atomic(struct device *dev,
 300				const struct qcom_scm_desc *desc,
 301				struct qcom_scm_res *res)
 302{
 303	switch (__get_convention()) {
 304	case SMC_CONVENTION_ARM_32:
 305	case SMC_CONVENTION_ARM_64:
 306		return scm_smc_call(dev, desc, res, true);
 307	case SMC_CONVENTION_LEGACY:
 308		return scm_legacy_call_atomic(dev, desc, res);
 309	default:
 310		pr_err("Unknown current SCM calling convention.\n");
 311		return -EINVAL;
 312	}
 313}
 314
 315static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 316					 u32 cmd_id)
 317{
 318	int ret;
 319	struct qcom_scm_desc desc = {
 320		.svc = QCOM_SCM_SVC_INFO,
 321		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 322		.owner = ARM_SMCCC_OWNER_SIP,
 323	};
 324	struct qcom_scm_res res;
 325
 326	desc.arginfo = QCOM_SCM_ARGS(1);
 327	switch (__get_convention()) {
 328	case SMC_CONVENTION_ARM_32:
 329	case SMC_CONVENTION_ARM_64:
 330		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 331				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 332		break;
 333	case SMC_CONVENTION_LEGACY:
 334		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 335		break;
 336	default:
 337		pr_err("Unknown SMC convention being used\n");
 338		return false;
 339	}
 340
 341	ret = qcom_scm_call(dev, &desc, &res);
 342
 343	return ret ? false : !!res.result[0];
 344}
 345
 346static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
 347{
 348	int cpu;
 349	unsigned int flags = 0;
 350	struct qcom_scm_desc desc = {
 351		.svc = QCOM_SCM_SVC_BOOT,
 352		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 353		.arginfo = QCOM_SCM_ARGS(2),
 354		.owner = ARM_SMCCC_OWNER_SIP,
 355	};
 356
 357	for_each_present_cpu(cpu) {
 358		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
 359			return -EINVAL;
 360		flags |= cpu_bits[cpu];
 361	}
 362
 363	desc.args[0] = flags;
 364	desc.args[1] = virt_to_phys(entry);
 365
 366	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 367}
 368
 369static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
 370{
 371	struct qcom_scm_desc desc = {
 372		.svc = QCOM_SCM_SVC_BOOT,
 373		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
 374		.owner = ARM_SMCCC_OWNER_SIP,
 375		.arginfo = QCOM_SCM_ARGS(6),
 376		.args = {
 377			virt_to_phys(entry),
 378			/* Apply to all CPUs in all affinity levels */
 379			~0ULL, ~0ULL, ~0ULL, ~0ULL,
 380			flags,
 381		},
 382	};
 383
 384	/* Need a device for DMA of the additional arguments */
 385	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
 386		return -EOPNOTSUPP;
 387
 388	return qcom_scm_call(__scm->dev, &desc, NULL);
 389}
 390
 391/**
 392 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
 393 * @entry: Entry point function for the cpus
 394 *
 395 * Set the Linux entry point for the SCM to transfer control to when coming
 396 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 397 */
 398int qcom_scm_set_warm_boot_addr(void *entry)
 399{
 400	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
 401		/* Fallback to old SCM call */
 402		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
 403	return 0;
 404}
 405EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
 406
 407/**
 408 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
 409 * @entry: Entry point function for the cpus
 410 */
 411int qcom_scm_set_cold_boot_addr(void *entry)
 412{
 413	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
 414		/* Fallback to old SCM call */
 415		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
 416	return 0;
 417}
 418EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
 419
 420/**
 421 * qcom_scm_cpu_power_down() - Power down the cpu
 422 * @flags:	Flags to flush cache
 423 *
 424 * This is an end point to power down cpu. If there was a pending interrupt,
 425 * the control would return from this function, otherwise, the cpu jumps to the
 426 * warm boot entry point set for this cpu upon reset.
 427 */
 428void qcom_scm_cpu_power_down(u32 flags)
 429{
 430	struct qcom_scm_desc desc = {
 431		.svc = QCOM_SCM_SVC_BOOT,
 432		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 433		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 434		.arginfo = QCOM_SCM_ARGS(1),
 435		.owner = ARM_SMCCC_OWNER_SIP,
 436	};
 437
 438	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 439}
 440EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
 441
 442int qcom_scm_set_remote_state(u32 state, u32 id)
 443{
 444	struct qcom_scm_desc desc = {
 445		.svc = QCOM_SCM_SVC_BOOT,
 446		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 447		.arginfo = QCOM_SCM_ARGS(2),
 448		.args[0] = state,
 449		.args[1] = id,
 450		.owner = ARM_SMCCC_OWNER_SIP,
 451	};
 452	struct qcom_scm_res res;
 453	int ret;
 454
 455	ret = qcom_scm_call(__scm->dev, &desc, &res);
 456
 457	return ret ? : res.result[0];
 458}
 459EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
 460
 461static int qcom_scm_disable_sdi(void)
 462{
 463	int ret;
 464	struct qcom_scm_desc desc = {
 465		.svc = QCOM_SCM_SVC_BOOT,
 466		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
 467		.args[0] = 1, /* Disable watchdog debug */
 468		.args[1] = 0, /* Disable SDI */
 469		.arginfo = QCOM_SCM_ARGS(2),
 470		.owner = ARM_SMCCC_OWNER_SIP,
 471	};
 472	struct qcom_scm_res res;
 473
 474	ret = qcom_scm_clk_enable();
 475	if (ret)
 476		return ret;
 477	ret = qcom_scm_call(__scm->dev, &desc, &res);
 478
 479	qcom_scm_clk_disable();
 480
 481	return ret ? : res.result[0];
 482}
 483
 484static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 485{
 486	struct qcom_scm_desc desc = {
 487		.svc = QCOM_SCM_SVC_BOOT,
 488		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 489		.arginfo = QCOM_SCM_ARGS(2),
 490		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 491		.owner = ARM_SMCCC_OWNER_SIP,
 492	};
 493
 494	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 495
 496	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 497}
 498
 499static void qcom_scm_set_download_mode(bool enable)
 500{
 501	bool avail;
 502	int ret = 0;
 503
 504	avail = __qcom_scm_is_call_available(__scm->dev,
 505					     QCOM_SCM_SVC_BOOT,
 506					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
 507	if (avail) {
 508		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
 509	} else if (__scm->dload_mode_addr) {
 510		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
 511				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
 512	} else {
 513		dev_err(__scm->dev,
 514			"No available mechanism for setting download mode\n");
 515	}
 516
 517	if (ret)
 518		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 519}
 520
 521/**
 522 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 523 *			       state machine for a given peripheral, using the
 524 *			       metadata
 525 * @peripheral: peripheral id
 526 * @metadata:	pointer to memory containing ELF header, program header table
 527 *		and optional blob of data used for authenticating the metadata
 528 *		and the rest of the firmware
 529 * @size:	size of the metadata
 530 * @ctx:	optional metadata context
 531 *
 532 * Return: 0 on success.
 533 *
 534 * Upon successful return, the PAS metadata context (@ctx) will be used to
 535 * track the metadata allocation, this needs to be released by invoking
 536 * qcom_scm_pas_metadata_release() by the caller.
 537 */
 538int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
 539			    struct qcom_scm_pas_metadata *ctx)
 540{
 541	dma_addr_t mdata_phys;
 542	void *mdata_buf;
 543	int ret;
 544	struct qcom_scm_desc desc = {
 545		.svc = QCOM_SCM_SVC_PIL,
 546		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 547		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 548		.args[0] = peripheral,
 549		.owner = ARM_SMCCC_OWNER_SIP,
 550	};
 551	struct qcom_scm_res res;
 552
 553	/*
 554	 * During the scm call memory protection will be enabled for the meta
 555	 * data blob, so make sure it's physically contiguous, 4K aligned and
 556	 * non-cachable to avoid XPU violations.
 557	 */
 558	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 559				       GFP_KERNEL);
 560	if (!mdata_buf) {
 561		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
 562		return -ENOMEM;
 563	}
 564	memcpy(mdata_buf, metadata, size);
 565
 566	ret = qcom_scm_clk_enable();
 567	if (ret)
 568		goto out;
 569
 570	ret = qcom_scm_bw_enable();
 571	if (ret)
 572		return ret;
 573
 574	desc.args[1] = mdata_phys;
 575
 576	ret = qcom_scm_call(__scm->dev, &desc, &res);
 577
 578	qcom_scm_bw_disable();
 579	qcom_scm_clk_disable();
 580
 581out:
 582	if (ret < 0 || !ctx) {
 583		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 584	} else if (ctx) {
 585		ctx->ptr = mdata_buf;
 586		ctx->phys = mdata_phys;
 587		ctx->size = size;
 588	}
 589
 590	return ret ? : res.result[0];
 591}
 592EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
 593
 594/**
 595 * qcom_scm_pas_metadata_release() - release metadata context
 596 * @ctx:	metadata context
 597 */
 598void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
 599{
 600	if (!ctx->ptr)
 601		return;
 602
 603	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
 604
 605	ctx->ptr = NULL;
 606	ctx->phys = 0;
 607	ctx->size = 0;
 608}
 609EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
 610
 611/**
 612 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 613 *			      for firmware loading
 614 * @peripheral:	peripheral id
 615 * @addr:	start address of memory area to prepare
 616 * @size:	size of the memory area to prepare
 617 *
 618 * Returns 0 on success.
 619 */
 620int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 621{
 622	int ret;
 623	struct qcom_scm_desc desc = {
 624		.svc = QCOM_SCM_SVC_PIL,
 625		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 626		.arginfo = QCOM_SCM_ARGS(3),
 627		.args[0] = peripheral,
 628		.args[1] = addr,
 629		.args[2] = size,
 630		.owner = ARM_SMCCC_OWNER_SIP,
 631	};
 632	struct qcom_scm_res res;
 633
 634	ret = qcom_scm_clk_enable();
 635	if (ret)
 636		return ret;
 637
 638	ret = qcom_scm_bw_enable();
 639	if (ret)
 640		return ret;
 641
 642	ret = qcom_scm_call(__scm->dev, &desc, &res);
 643	qcom_scm_bw_disable();
 644	qcom_scm_clk_disable();
 645
 646	return ret ? : res.result[0];
 647}
 648EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
 649
 650/**
 651 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 652 *				   and reset the remote processor
 653 * @peripheral:	peripheral id
 654 *
 655 * Return 0 on success.
 656 */
 657int qcom_scm_pas_auth_and_reset(u32 peripheral)
 658{
 659	int ret;
 660	struct qcom_scm_desc desc = {
 661		.svc = QCOM_SCM_SVC_PIL,
 662		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 663		.arginfo = QCOM_SCM_ARGS(1),
 664		.args[0] = peripheral,
 665		.owner = ARM_SMCCC_OWNER_SIP,
 666	};
 667	struct qcom_scm_res res;
 668
 669	ret = qcom_scm_clk_enable();
 670	if (ret)
 671		return ret;
 672
 673	ret = qcom_scm_bw_enable();
 674	if (ret)
 675		return ret;
 676
 677	ret = qcom_scm_call(__scm->dev, &desc, &res);
 678	qcom_scm_bw_disable();
 679	qcom_scm_clk_disable();
 680
 681	return ret ? : res.result[0];
 682}
 683EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
 684
 685/**
 686 * qcom_scm_pas_shutdown() - Shut down the remote processor
 687 * @peripheral: peripheral id
 688 *
 689 * Returns 0 on success.
 690 */
 691int qcom_scm_pas_shutdown(u32 peripheral)
 692{
 693	int ret;
 694	struct qcom_scm_desc desc = {
 695		.svc = QCOM_SCM_SVC_PIL,
 696		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 697		.arginfo = QCOM_SCM_ARGS(1),
 698		.args[0] = peripheral,
 699		.owner = ARM_SMCCC_OWNER_SIP,
 700	};
 701	struct qcom_scm_res res;
 702
 703	ret = qcom_scm_clk_enable();
 704	if (ret)
 705		return ret;
 706
 707	ret = qcom_scm_bw_enable();
 708	if (ret)
 709		return ret;
 710
 711	ret = qcom_scm_call(__scm->dev, &desc, &res);
 712
 713	qcom_scm_bw_disable();
 714	qcom_scm_clk_disable();
 715
 716	return ret ? : res.result[0];
 717}
 718EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
 719
 720/**
 721 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 722 *			      available for the given peripherial
 723 * @peripheral:	peripheral id
 724 *
 725 * Returns true if PAS is supported for this peripheral, otherwise false.
 726 */
 727bool qcom_scm_pas_supported(u32 peripheral)
 728{
 729	int ret;
 730	struct qcom_scm_desc desc = {
 731		.svc = QCOM_SCM_SVC_PIL,
 732		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 733		.arginfo = QCOM_SCM_ARGS(1),
 734		.args[0] = peripheral,
 735		.owner = ARM_SMCCC_OWNER_SIP,
 736	};
 737	struct qcom_scm_res res;
 738
 739	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 740					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 741		return false;
 742
 743	ret = qcom_scm_call(__scm->dev, &desc, &res);
 744
 745	return ret ? false : !!res.result[0];
 746}
 747EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
 748
 749static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 750{
 751	struct qcom_scm_desc desc = {
 752		.svc = QCOM_SCM_SVC_PIL,
 753		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 754		.arginfo = QCOM_SCM_ARGS(2),
 755		.args[0] = reset,
 756		.args[1] = 0,
 757		.owner = ARM_SMCCC_OWNER_SIP,
 758	};
 759	struct qcom_scm_res res;
 760	int ret;
 761
 762	ret = qcom_scm_call(__scm->dev, &desc, &res);
 763
 764	return ret ? : res.result[0];
 765}
 766
 767static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 768				     unsigned long idx)
 769{
 770	if (idx != 0)
 771		return -EINVAL;
 772
 773	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 774}
 775
 776static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 777				       unsigned long idx)
 778{
 779	if (idx != 0)
 780		return -EINVAL;
 781
 782	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 783}
 784
 785static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 786	.assert = qcom_scm_pas_reset_assert,
 787	.deassert = qcom_scm_pas_reset_deassert,
 788};
 789
 790int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 791{
 792	struct qcom_scm_desc desc = {
 793		.svc = QCOM_SCM_SVC_IO,
 794		.cmd = QCOM_SCM_IO_READ,
 795		.arginfo = QCOM_SCM_ARGS(1),
 796		.args[0] = addr,
 797		.owner = ARM_SMCCC_OWNER_SIP,
 798	};
 799	struct qcom_scm_res res;
 800	int ret;
 801
 802
 803	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 804	if (ret >= 0)
 805		*val = res.result[0];
 806
 807	return ret < 0 ? ret : 0;
 808}
 809EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
 810
 811int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 812{
 813	struct qcom_scm_desc desc = {
 814		.svc = QCOM_SCM_SVC_IO,
 815		.cmd = QCOM_SCM_IO_WRITE,
 816		.arginfo = QCOM_SCM_ARGS(2),
 817		.args[0] = addr,
 818		.args[1] = val,
 819		.owner = ARM_SMCCC_OWNER_SIP,
 820	};
 821
 822	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 823}
 824EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
 825
 826/**
 827 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 828 * supports restore security config interface.
 829 *
 830 * Return true if restore-cfg interface is supported, false if not.
 831 */
 832bool qcom_scm_restore_sec_cfg_available(void)
 833{
 834	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 835					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 836}
 837EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
 838
 839int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 840{
 841	struct qcom_scm_desc desc = {
 842		.svc = QCOM_SCM_SVC_MP,
 843		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 844		.arginfo = QCOM_SCM_ARGS(2),
 845		.args[0] = device_id,
 846		.args[1] = spare,
 847		.owner = ARM_SMCCC_OWNER_SIP,
 848	};
 849	struct qcom_scm_res res;
 850	int ret;
 851
 852	ret = qcom_scm_call(__scm->dev, &desc, &res);
 853
 854	return ret ? : res.result[0];
 855}
 856EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
 857
 858int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 859{
 860	struct qcom_scm_desc desc = {
 861		.svc = QCOM_SCM_SVC_MP,
 862		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 863		.arginfo = QCOM_SCM_ARGS(1),
 864		.args[0] = spare,
 865		.owner = ARM_SMCCC_OWNER_SIP,
 866	};
 867	struct qcom_scm_res res;
 868	int ret;
 869
 870	ret = qcom_scm_call(__scm->dev, &desc, &res);
 871
 872	if (size)
 873		*size = res.result[0];
 874
 875	return ret ? : res.result[1];
 876}
 877EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
 878
 879int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 880{
 881	struct qcom_scm_desc desc = {
 882		.svc = QCOM_SCM_SVC_MP,
 883		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 884		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 885					 QCOM_SCM_VAL),
 886		.args[0] = addr,
 887		.args[1] = size,
 888		.args[2] = spare,
 889		.owner = ARM_SMCCC_OWNER_SIP,
 890	};
 891	int ret;
 892
 893	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 894
 895	/* the pg table has been initialized already, ignore the error */
 896	if (ret == -EPERM)
 897		ret = 0;
 898
 899	return ret;
 900}
 901EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
 902
 903int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
 904{
 905	struct qcom_scm_desc desc = {
 906		.svc = QCOM_SCM_SVC_MP,
 907		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
 908		.arginfo = QCOM_SCM_ARGS(2),
 909		.args[0] = size,
 910		.args[1] = spare,
 911		.owner = ARM_SMCCC_OWNER_SIP,
 912	};
 913
 914	return qcom_scm_call(__scm->dev, &desc, NULL);
 915}
 916EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
 917
 918int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 919				   u32 cp_nonpixel_start,
 920				   u32 cp_nonpixel_size)
 921{
 922	int ret;
 923	struct qcom_scm_desc desc = {
 924		.svc = QCOM_SCM_SVC_MP,
 925		.cmd = QCOM_SCM_MP_VIDEO_VAR,
 926		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
 927					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 928		.args[0] = cp_start,
 929		.args[1] = cp_size,
 930		.args[2] = cp_nonpixel_start,
 931		.args[3] = cp_nonpixel_size,
 932		.owner = ARM_SMCCC_OWNER_SIP,
 933	};
 934	struct qcom_scm_res res;
 935
 936	ret = qcom_scm_call(__scm->dev, &desc, &res);
 937
 938	return ret ? : res.result[0];
 939}
 940EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
 941
 942static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 943				 size_t mem_sz, phys_addr_t src, size_t src_sz,
 944				 phys_addr_t dest, size_t dest_sz)
 945{
 946	int ret;
 947	struct qcom_scm_desc desc = {
 948		.svc = QCOM_SCM_SVC_MP,
 949		.cmd = QCOM_SCM_MP_ASSIGN,
 950		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
 951					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
 952					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 953		.args[0] = mem_region,
 954		.args[1] = mem_sz,
 955		.args[2] = src,
 956		.args[3] = src_sz,
 957		.args[4] = dest,
 958		.args[5] = dest_sz,
 959		.args[6] = 0,
 960		.owner = ARM_SMCCC_OWNER_SIP,
 961	};
 962	struct qcom_scm_res res;
 963
 964	ret = qcom_scm_call(dev, &desc, &res);
 965
 966	return ret ? : res.result[0];
 967}
 968
 969/**
 970 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 971 * @mem_addr: mem region whose ownership need to be reassigned
 972 * @mem_sz:   size of the region.
 973 * @srcvm:    vmid for current set of owners, each set bit in
 974 *            flag indicate a unique owner
 975 * @newvm:    array having new owners and corresponding permission
 976 *            flags
 977 * @dest_cnt: number of owners in next set.
 978 *
 979 * Return negative errno on failure or 0 on success with @srcvm updated.
 980 */
 981int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 982			u64 *srcvm,
 983			const struct qcom_scm_vmperm *newvm,
 984			unsigned int dest_cnt)
 985{
 986	struct qcom_scm_current_perm_info *destvm;
 987	struct qcom_scm_mem_map_info *mem_to_map;
 988	phys_addr_t mem_to_map_phys;
 989	phys_addr_t dest_phys;
 990	dma_addr_t ptr_phys;
 991	size_t mem_to_map_sz;
 992	size_t dest_sz;
 993	size_t src_sz;
 994	size_t ptr_sz;
 995	int next_vm;
 996	__le32 *src;
 997	void *ptr;
 998	int ret, i, b;
 999	u64 srcvm_bits = *srcvm;
1000
1001	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1002	mem_to_map_sz = sizeof(*mem_to_map);
1003	dest_sz = dest_cnt * sizeof(*destvm);
1004	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1005			ALIGN(dest_sz, SZ_64);
1006
1007	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
1008	if (!ptr)
1009		return -ENOMEM;
1010
1011	/* Fill source vmid detail */
1012	src = ptr;
1013	i = 0;
1014	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1015		if (srcvm_bits & BIT(b))
1016			src[i++] = cpu_to_le32(b);
1017	}
1018
1019	/* Fill details of mem buff to map */
1020	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1021	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1022	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1023	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1024
1025	next_vm = 0;
1026	/* Fill details of next vmid detail */
1027	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1028	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1029	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1030		destvm->vmid = cpu_to_le32(newvm->vmid);
1031		destvm->perm = cpu_to_le32(newvm->perm);
1032		destvm->ctx = 0;
1033		destvm->ctx_size = 0;
1034		next_vm |= BIT(newvm->vmid);
1035	}
1036
1037	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1038				    ptr_phys, src_sz, dest_phys, dest_sz);
1039	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
1040	if (ret) {
1041		dev_err(__scm->dev,
1042			"Assign memory protection call failed %d\n", ret);
1043		return -EINVAL;
1044	}
1045
1046	*srcvm = next_vm;
1047	return 0;
1048}
1049EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1050
1051/**
1052 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1053 */
1054bool qcom_scm_ocmem_lock_available(void)
1055{
1056	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1057					    QCOM_SCM_OCMEM_LOCK_CMD);
1058}
1059EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1060
1061/**
1062 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1063 * region to the specified initiator
1064 *
1065 * @id:     tz initiator id
1066 * @offset: OCMEM offset
1067 * @size:   OCMEM size
1068 * @mode:   access mode (WIDE/NARROW)
1069 */
1070int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1071			u32 mode)
1072{
1073	struct qcom_scm_desc desc = {
1074		.svc = QCOM_SCM_SVC_OCMEM,
1075		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1076		.args[0] = id,
1077		.args[1] = offset,
1078		.args[2] = size,
1079		.args[3] = mode,
1080		.arginfo = QCOM_SCM_ARGS(4),
1081	};
1082
1083	return qcom_scm_call(__scm->dev, &desc, NULL);
1084}
1085EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1086
1087/**
1088 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1089 * region from the specified initiator
1090 *
1091 * @id:     tz initiator id
1092 * @offset: OCMEM offset
1093 * @size:   OCMEM size
1094 */
1095int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1096{
1097	struct qcom_scm_desc desc = {
1098		.svc = QCOM_SCM_SVC_OCMEM,
1099		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1100		.args[0] = id,
1101		.args[1] = offset,
1102		.args[2] = size,
1103		.arginfo = QCOM_SCM_ARGS(3),
1104	};
1105
1106	return qcom_scm_call(__scm->dev, &desc, NULL);
1107}
1108EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1109
1110/**
1111 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1112 *
1113 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1114 *	   qcom_scm_ice_set_key() are available.
1115 */
1116bool qcom_scm_ice_available(void)
1117{
1118	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1119					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1120		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1121					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1122}
1123EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1124
1125/**
1126 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1127 * @index: the keyslot to invalidate
1128 *
1129 * The UFSHCI and eMMC standards define a standard way to do this, but it
1130 * doesn't work on these SoCs; only this SCM call does.
1131 *
1132 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1133 * call doesn't specify which ICE instance the keyslot belongs to.
1134 *
1135 * Return: 0 on success; -errno on failure.
1136 */
1137int qcom_scm_ice_invalidate_key(u32 index)
1138{
1139	struct qcom_scm_desc desc = {
1140		.svc = QCOM_SCM_SVC_ES,
1141		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1142		.arginfo = QCOM_SCM_ARGS(1),
1143		.args[0] = index,
1144		.owner = ARM_SMCCC_OWNER_SIP,
1145	};
1146
1147	return qcom_scm_call(__scm->dev, &desc, NULL);
1148}
1149EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1150
1151/**
1152 * qcom_scm_ice_set_key() - Set an inline encryption key
1153 * @index: the keyslot into which to set the key
1154 * @key: the key to program
1155 * @key_size: the size of the key in bytes
1156 * @cipher: the encryption algorithm the key is for
1157 * @data_unit_size: the encryption data unit size, i.e. the size of each
1158 *		    individual plaintext and ciphertext.  Given in 512-byte
1159 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1160 *
1161 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1162 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1163 *
1164 * The UFSHCI and eMMC standards define a standard way to do this, but it
1165 * doesn't work on these SoCs; only this SCM call does.
1166 *
1167 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1168 * call doesn't specify which ICE instance the keyslot belongs to.
1169 *
1170 * Return: 0 on success; -errno on failure.
1171 */
1172int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1173			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1174{
1175	struct qcom_scm_desc desc = {
1176		.svc = QCOM_SCM_SVC_ES,
1177		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1178		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1179					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1180					 QCOM_SCM_VAL),
1181		.args[0] = index,
1182		.args[2] = key_size,
1183		.args[3] = cipher,
1184		.args[4] = data_unit_size,
1185		.owner = ARM_SMCCC_OWNER_SIP,
1186	};
1187	void *keybuf;
1188	dma_addr_t key_phys;
1189	int ret;
1190
1191	/*
1192	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1193	 * physical address that's been properly flushed.  The sanctioned way to
1194	 * do this is by using the DMA API.  But as is best practice for crypto
1195	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1196	 * dma_map_single() not clearly correct, since the DMA API can use
1197	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1198	 * keys is normally rare and thus not performance-critical.
1199	 */
1200
1201	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1202				    GFP_KERNEL);
1203	if (!keybuf)
1204		return -ENOMEM;
1205	memcpy(keybuf, key, key_size);
1206	desc.args[1] = key_phys;
1207
1208	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1209
1210	memzero_explicit(keybuf, key_size);
1211
1212	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1213	return ret;
1214}
1215EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1216
1217/**
1218 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1219 *
1220 * Return true if HDCP is supported, false if not.
1221 */
1222bool qcom_scm_hdcp_available(void)
1223{
1224	bool avail;
1225	int ret = qcom_scm_clk_enable();
1226
1227	if (ret)
1228		return ret;
1229
1230	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1231						QCOM_SCM_HDCP_INVOKE);
1232
1233	qcom_scm_clk_disable();
1234
1235	return avail;
1236}
1237EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1238
1239/**
1240 * qcom_scm_hdcp_req() - Send HDCP request.
1241 * @req: HDCP request array
1242 * @req_cnt: HDCP request array count
1243 * @resp: response buffer passed to SCM
1244 *
1245 * Write HDCP register(s) through SCM.
1246 */
1247int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1248{
1249	int ret;
1250	struct qcom_scm_desc desc = {
1251		.svc = QCOM_SCM_SVC_HDCP,
1252		.cmd = QCOM_SCM_HDCP_INVOKE,
1253		.arginfo = QCOM_SCM_ARGS(10),
1254		.args = {
1255			req[0].addr,
1256			req[0].val,
1257			req[1].addr,
1258			req[1].val,
1259			req[2].addr,
1260			req[2].val,
1261			req[3].addr,
1262			req[3].val,
1263			req[4].addr,
1264			req[4].val
1265		},
1266		.owner = ARM_SMCCC_OWNER_SIP,
1267	};
1268	struct qcom_scm_res res;
1269
1270	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1271		return -ERANGE;
1272
1273	ret = qcom_scm_clk_enable();
1274	if (ret)
1275		return ret;
1276
1277	ret = qcom_scm_call(__scm->dev, &desc, &res);
1278	*resp = res.result[0];
1279
1280	qcom_scm_clk_disable();
1281
1282	return ret;
1283}
1284EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1285
1286int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1287{
1288	struct qcom_scm_desc desc = {
1289		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1290		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1291		.arginfo = QCOM_SCM_ARGS(3),
1292		.args[0] = sec_id,
1293		.args[1] = ctx_num,
1294		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1295		.owner = ARM_SMCCC_OWNER_SIP,
1296	};
1297
1298	return qcom_scm_call(__scm->dev, &desc, NULL);
1299}
1300EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1301
1302int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1303{
1304	struct qcom_scm_desc desc = {
1305		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1306		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1307		.arginfo = QCOM_SCM_ARGS(2),
1308		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1309		.args[1] = en,
1310		.owner = ARM_SMCCC_OWNER_SIP,
1311	};
1312
1313
1314	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1315}
1316EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1317
1318bool qcom_scm_lmh_dcvsh_available(void)
1319{
1320	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1321}
1322EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1323
1324int qcom_scm_lmh_profile_change(u32 profile_id)
1325{
1326	struct qcom_scm_desc desc = {
1327		.svc = QCOM_SCM_SVC_LMH,
1328		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1329		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1330		.args[0] = profile_id,
1331		.owner = ARM_SMCCC_OWNER_SIP,
1332	};
1333
1334	return qcom_scm_call(__scm->dev, &desc, NULL);
1335}
1336EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1337
1338int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1339		       u64 limit_node, u32 node_id, u64 version)
1340{
1341	dma_addr_t payload_phys;
1342	u32 *payload_buf;
1343	int ret, payload_size = 5 * sizeof(u32);
1344
1345	struct qcom_scm_desc desc = {
1346		.svc = QCOM_SCM_SVC_LMH,
1347		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1348		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1349					QCOM_SCM_VAL, QCOM_SCM_VAL),
1350		.args[1] = payload_size,
1351		.args[2] = limit_node,
1352		.args[3] = node_id,
1353		.args[4] = version,
1354		.owner = ARM_SMCCC_OWNER_SIP,
1355	};
1356
1357	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1358	if (!payload_buf)
1359		return -ENOMEM;
1360
1361	payload_buf[0] = payload_fn;
1362	payload_buf[1] = 0;
1363	payload_buf[2] = payload_reg;
1364	payload_buf[3] = 1;
1365	payload_buf[4] = payload_val;
1366
1367	desc.args[0] = payload_phys;
1368
1369	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1370
1371	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1372	return ret;
1373}
1374EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1375
1376static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1377{
1378	struct device_node *tcsr;
1379	struct device_node *np = dev->of_node;
1380	struct resource res;
1381	u32 offset;
1382	int ret;
1383
1384	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1385	if (!tcsr)
1386		return 0;
1387
1388	ret = of_address_to_resource(tcsr, 0, &res);
1389	of_node_put(tcsr);
1390	if (ret)
1391		return ret;
1392
1393	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1394	if (ret < 0)
1395		return ret;
1396
1397	*addr = res.start + offset;
1398
1399	return 0;
1400}
1401
1402#ifdef CONFIG_QCOM_QSEECOM
1403
1404/* Lock for QSEECOM SCM call executions */
1405static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1406
1407static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1408				   struct qcom_scm_qseecom_resp *res)
1409{
1410	struct qcom_scm_res scm_res = {};
1411	int status;
1412
1413	/*
1414	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1415	 * require the respective call lock to be held.
1416	 */
1417	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1418
1419	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1420
1421	res->result = scm_res.result[0];
1422	res->resp_type = scm_res.result[1];
1423	res->data = scm_res.result[2];
1424
1425	if (status)
1426		return status;
1427
1428	return 0;
1429}
1430
1431/**
1432 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1433 * @desc: SCM call descriptor.
1434 * @res:  SCM call response (output).
1435 *
1436 * Performs the QSEECOM SCM call described by @desc, returning the response in
1437 * @rsp.
1438 *
1439 * Return: Zero on success, nonzero on failure.
1440 */
1441static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1442				 struct qcom_scm_qseecom_resp *res)
1443{
1444	int status;
1445
1446	/*
1447	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1448	 * so lock things here. This needs to be extended to callback/listener
1449	 * handling when support for that is implemented.
1450	 */
1451
1452	mutex_lock(&qcom_scm_qseecom_call_lock);
1453	status = __qcom_scm_qseecom_call(desc, res);
1454	mutex_unlock(&qcom_scm_qseecom_call_lock);
1455
1456	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1457		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1458		res->resp_type, res->data);
1459
1460	if (status) {
1461		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1462		return status;
1463	}
1464
1465	/*
1466	 * TODO: Handle incomplete and blocked calls:
1467	 *
1468	 * Incomplete and blocked calls are not supported yet. Some devices
1469	 * and/or commands require those, some don't. Let's warn about them
1470	 * prominently in case someone attempts to try these commands with a
1471	 * device/command combination that isn't supported yet.
1472	 */
1473	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1474	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1475
1476	return 0;
1477}
1478
1479/**
1480 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1481 * @version: Pointer where the QSEECOM version will be stored.
1482 *
1483 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1484 * the TrustZone.
1485 *
1486 * Return: Zero on success, nonzero on failure.
1487 */
1488static int qcom_scm_qseecom_get_version(u32 *version)
1489{
1490	struct qcom_scm_desc desc = {};
1491	struct qcom_scm_qseecom_resp res = {};
1492	u32 feature = 10;
1493	int ret;
1494
1495	desc.owner = QSEECOM_TZ_OWNER_SIP;
1496	desc.svc = QSEECOM_TZ_SVC_INFO;
1497	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1498	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1499	desc.args[0] = feature;
1500
1501	ret = qcom_scm_qseecom_call(&desc, &res);
1502	if (ret)
1503		return ret;
1504
1505	*version = res.result;
1506	return 0;
1507}
1508
1509/**
1510 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1511 * @app_name: The name of the app.
1512 * @app_id:   The returned app ID.
1513 *
1514 * Query and return the application ID of the SEE app identified by the given
1515 * name. This returned ID is the unique identifier of the app required for
1516 * subsequent communication.
1517 *
1518 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1519 * loaded or could not be found.
1520 */
1521int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1522{
1523	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1524	unsigned long app_name_len = strlen(app_name);
1525	struct qcom_scm_desc desc = {};
1526	struct qcom_scm_qseecom_resp res = {};
1527	dma_addr_t name_buf_phys;
1528	char *name_buf;
1529	int status;
1530
1531	if (app_name_len >= name_buf_size)
1532		return -EINVAL;
1533
1534	name_buf = kzalloc(name_buf_size, GFP_KERNEL);
1535	if (!name_buf)
1536		return -ENOMEM;
1537
1538	memcpy(name_buf, app_name, app_name_len);
1539
1540	name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
1541	status = dma_mapping_error(__scm->dev, name_buf_phys);
1542	if (status) {
1543		kfree(name_buf);
1544		dev_err(__scm->dev, "qseecom: failed to map dma address\n");
1545		return status;
1546	}
1547
1548	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1549	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1550	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1551	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1552	desc.args[0] = name_buf_phys;
1553	desc.args[1] = app_name_len;
1554
1555	status = qcom_scm_qseecom_call(&desc, &res);
1556	dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
1557	kfree(name_buf);
1558
1559	if (status)
1560		return status;
1561
1562	if (res.result == QSEECOM_RESULT_FAILURE)
1563		return -ENOENT;
1564
1565	if (res.result != QSEECOM_RESULT_SUCCESS)
1566		return -EINVAL;
1567
1568	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1569		return -EINVAL;
1570
1571	*app_id = res.data;
1572	return 0;
1573}
1574EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1575
1576/**
1577 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1578 * @app_id:   The ID of the target app.
1579 * @req:      Request buffer sent to the app (must be DMA-mappable).
1580 * @req_size: Size of the request buffer.
1581 * @rsp:      Response buffer, written to by the app (must be DMA-mappable).
1582 * @rsp_size: Size of the response buffer.
1583 *
1584 * Sends a request to the QSEE app associated with the given ID and read back
1585 * its response. The caller must provide two DMA memory regions, one for the
1586 * request and one for the response, and fill out the @req region with the
1587 * respective (app-specific) request data. The QSEE app reads this and returns
1588 * its response in the @rsp region.
1589 *
1590 * Return: Zero on success, nonzero on failure.
1591 */
1592int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
1593			      size_t rsp_size)
1594{
1595	struct qcom_scm_qseecom_resp res = {};
1596	struct qcom_scm_desc desc = {};
1597	dma_addr_t req_phys;
1598	dma_addr_t rsp_phys;
1599	int status;
1600
1601	/* Map request buffer */
1602	req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
1603	status = dma_mapping_error(__scm->dev, req_phys);
1604	if (status) {
1605		dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
1606		return status;
1607	}
1608
1609	/* Map response buffer */
1610	rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
1611	status = dma_mapping_error(__scm->dev, rsp_phys);
1612	if (status) {
1613		dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1614		dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
1615		return status;
1616	}
1617
1618	/* Set up SCM call data */
1619	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1620	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1621	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1622	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1623				     QCOM_SCM_RW, QCOM_SCM_VAL,
1624				     QCOM_SCM_RW, QCOM_SCM_VAL);
1625	desc.args[0] = app_id;
1626	desc.args[1] = req_phys;
1627	desc.args[2] = req_size;
1628	desc.args[3] = rsp_phys;
1629	desc.args[4] = rsp_size;
1630
1631	/* Perform call */
1632	status = qcom_scm_qseecom_call(&desc, &res);
1633
1634	/* Unmap buffers */
1635	dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
1636	dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1637
1638	if (status)
1639		return status;
1640
1641	if (res.result != QSEECOM_RESULT_SUCCESS)
1642		return -EIO;
1643
1644	return 0;
1645}
1646EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1647
1648/*
1649 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1650 + any potential issues with this, only allow validated machines for now.
1651 */
1652static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
1653	{ .compatible = "lenovo,thinkpad-x13s", },
1654	{ }
1655};
1656
1657static bool qcom_scm_qseecom_machine_is_allowed(void)
1658{
1659	struct device_node *np;
1660	bool match;
1661
1662	np = of_find_node_by_path("/");
1663	if (!np)
1664		return false;
1665
1666	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1667	of_node_put(np);
1668
1669	return match;
1670}
1671
1672static void qcom_scm_qseecom_free(void *data)
1673{
1674	struct platform_device *qseecom_dev = data;
1675
1676	platform_device_del(qseecom_dev);
1677	platform_device_put(qseecom_dev);
1678}
1679
1680static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1681{
1682	struct platform_device *qseecom_dev;
1683	u32 version;
1684	int ret;
1685
1686	/*
1687	 * Note: We do two steps of validation here: First, we try to query the
1688	 * QSEECOM version as a check to see if the interface exists on this
1689	 * device. Second, we check against known good devices due to current
1690	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1691	 *
1692	 * Note that we deliberately do the machine check after the version
1693	 * check so that we can log potentially supported devices. This should
1694	 * be safe as downstream sources indicate that the version query is
1695	 * neither blocking nor reentrant.
1696	 */
1697	ret = qcom_scm_qseecom_get_version(&version);
1698	if (ret)
1699		return 0;
1700
1701	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1702
1703	if (!qcom_scm_qseecom_machine_is_allowed()) {
1704		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1705		return 0;
1706	}
1707
1708	/*
1709	 * Set up QSEECOM interface device. All application clients will be
1710	 * set up and managed by the corresponding driver for it.
1711	 */
1712	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1713	if (!qseecom_dev)
1714		return -ENOMEM;
1715
1716	qseecom_dev->dev.parent = scm->dev;
1717
1718	ret = platform_device_add(qseecom_dev);
1719	if (ret) {
1720		platform_device_put(qseecom_dev);
1721		return ret;
1722	}
1723
1724	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1725}
1726
1727#else /* CONFIG_QCOM_QSEECOM */
1728
1729static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1730{
1731	return 0;
1732}
1733
1734#endif /* CONFIG_QCOM_QSEECOM */
1735
1736/**
1737 * qcom_scm_is_available() - Checks if SCM is available
1738 */
1739bool qcom_scm_is_available(void)
1740{
1741	return !!__scm;
1742}
1743EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1744
1745static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1746{
1747	/* FW currently only supports a single wq_ctx (zero).
1748	 * TODO: Update this logic to include dynamic allocation and lookup of
1749	 * completion structs when FW supports more wq_ctx values.
1750	 */
1751	if (wq_ctx != 0) {
1752		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1753		return -EINVAL;
1754	}
1755
1756	return 0;
1757}
1758
1759int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1760{
1761	int ret;
1762
1763	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1764	if (ret)
1765		return ret;
1766
1767	wait_for_completion(&__scm->waitq_comp);
1768
1769	return 0;
1770}
1771
1772static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
1773{
1774	int ret;
1775
1776	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1777	if (ret)
1778		return ret;
1779
1780	complete(&__scm->waitq_comp);
1781
1782	return 0;
1783}
1784
1785static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1786{
1787	int ret;
1788	struct qcom_scm *scm = data;
1789	u32 wq_ctx, flags, more_pending = 0;
1790
1791	do {
1792		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1793		if (ret) {
1794			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1795			goto out;
1796		}
1797
1798		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1799		    flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1800			dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1801			goto out;
1802		}
1803
1804		ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
1805		if (ret)
1806			goto out;
1807	} while (more_pending);
1808
1809out:
1810	return IRQ_HANDLED;
1811}
1812
1813static int qcom_scm_probe(struct platform_device *pdev)
1814{
1815	struct qcom_scm *scm;
1816	int irq, ret;
1817
1818	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1819	if (!scm)
1820		return -ENOMEM;
1821
1822	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1823	if (ret < 0)
1824		return ret;
1825
1826	mutex_init(&scm->scm_bw_lock);
1827
1828	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1829	if (IS_ERR(scm->path))
1830		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1831				     "failed to acquire interconnect path\n");
1832
1833	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1834	if (IS_ERR(scm->core_clk))
1835		return PTR_ERR(scm->core_clk);
1836
1837	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1838	if (IS_ERR(scm->iface_clk))
1839		return PTR_ERR(scm->iface_clk);
1840
1841	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1842	if (IS_ERR(scm->bus_clk))
1843		return PTR_ERR(scm->bus_clk);
1844
1845	scm->reset.ops = &qcom_scm_pas_reset_ops;
1846	scm->reset.nr_resets = 1;
1847	scm->reset.of_node = pdev->dev.of_node;
1848	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1849	if (ret)
1850		return ret;
1851
1852	/* vote for max clk rate for highest performance */
1853	ret = clk_set_rate(scm->core_clk, INT_MAX);
1854	if (ret)
1855		return ret;
1856
1857	__scm = scm;
1858	__scm->dev = &pdev->dev;
1859
1860	init_completion(&__scm->waitq_comp);
1861
1862	irq = platform_get_irq_optional(pdev, 0);
1863	if (irq < 0) {
1864		if (irq != -ENXIO)
1865			return irq;
1866	} else {
1867		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1868						IRQF_ONESHOT, "qcom-scm", __scm);
1869		if (ret < 0)
1870			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
1871	}
1872
1873	__get_convention();
1874
1875	/*
1876	 * If requested enable "download mode", from this point on warmboot
1877	 * will cause the boot stages to enter download mode, unless
1878	 * disabled below by a clean shutdown/reboot.
1879	 */
1880	if (download_mode)
1881		qcom_scm_set_download_mode(true);
1882
1883
1884	/*
1885	 * Disable SDI if indicated by DT that it is enabled by default.
1886	 */
1887	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1888		qcom_scm_disable_sdi();
1889
1890	/*
1891	 * Initialize the QSEECOM interface.
1892	 *
1893	 * Note: QSEECOM is fairly self-contained and this only adds the
1894	 * interface device (the driver of which does most of the heavy
1895	 * lifting). So any errors returned here should be either -ENOMEM or
1896	 * -EINVAL (with the latter only in case there's a bug in our code).
1897	 * This means that there is no need to bring down the whole SCM driver.
1898	 * Just log the error instead and let SCM live.
1899	 */
1900	ret = qcom_scm_qseecom_init(scm);
1901	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
1902
1903	return 0;
1904}
1905
1906static void qcom_scm_shutdown(struct platform_device *pdev)
1907{
1908	/* Clean shutdown, disable download mode to allow normal restart */
1909	qcom_scm_set_download_mode(false);
1910}
1911
1912static const struct of_device_id qcom_scm_dt_match[] = {
1913	{ .compatible = "qcom,scm" },
1914
1915	/* Legacy entries kept for backwards compatibility */
1916	{ .compatible = "qcom,scm-apq8064" },
1917	{ .compatible = "qcom,scm-apq8084" },
1918	{ .compatible = "qcom,scm-ipq4019" },
1919	{ .compatible = "qcom,scm-msm8953" },
1920	{ .compatible = "qcom,scm-msm8974" },
1921	{ .compatible = "qcom,scm-msm8996" },
1922	{}
1923};
1924MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1925
1926static struct platform_driver qcom_scm_driver = {
1927	.driver = {
1928		.name	= "qcom_scm",
1929		.of_match_table = qcom_scm_dt_match,
1930		.suppress_bind_attrs = true,
1931	},
1932	.probe = qcom_scm_probe,
1933	.shutdown = qcom_scm_shutdown,
1934};
1935
1936static int __init qcom_scm_init(void)
1937{
1938	return platform_driver_register(&qcom_scm_driver);
1939}
1940subsys_initcall(qcom_scm_init);
1941
1942MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1943MODULE_LICENSE("GPL v2");