Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5
   6#include <linux/arm-smccc.h>
 
 
 
   7#include <linux/clk.h>
   8#include <linux/completion.h>
   9#include <linux/cpumask.h>
  10#include <linux/dma-mapping.h>
 
  11#include <linux/export.h>
  12#include <linux/firmware/qcom/qcom_scm.h>
 
  13#include <linux/init.h>
  14#include <linux/interconnect.h>
  15#include <linux/interrupt.h>
 
  16#include <linux/module.h>
  17#include <linux/of.h>
  18#include <linux/of_address.h>
  19#include <linux/of_irq.h>
  20#include <linux/of_platform.h>
 
  21#include <linux/platform_device.h>
  22#include <linux/reset-controller.h>
 
  23#include <linux/types.h>
  24
  25#include "qcom_scm.h"
 
  26
  27static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
  28module_param(download_mode, bool, 0);
  29
  30struct qcom_scm {
  31	struct device *dev;
  32	struct clk *core_clk;
  33	struct clk *iface_clk;
  34	struct clk *bus_clk;
  35	struct icc_path *path;
  36	struct completion waitq_comp;
  37	struct reset_controller_dev reset;
  38
  39	/* control access to the interconnect path */
  40	struct mutex scm_bw_lock;
  41	int scm_vote_count;
  42
  43	u64 dload_mode_addr;
 
 
  44};
  45
  46struct qcom_scm_current_perm_info {
  47	__le32 vmid;
  48	__le32 perm;
  49	__le64 ctx;
  50	__le32 ctx_size;
  51	__le32 unused;
  52};
  53
  54struct qcom_scm_mem_map_info {
  55	__le64 mem_addr;
  56	__le64 mem_size;
  57};
  58
  59/**
  60 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
  61 * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
  62 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
  63 * @data:      Response data. The type of this data is given in @resp_type.
  64 */
  65struct qcom_scm_qseecom_resp {
  66	u64 result;
  67	u64 resp_type;
  68	u64 data;
  69};
  70
  71enum qcom_scm_qseecom_result {
  72	QSEECOM_RESULT_SUCCESS			= 0,
  73	QSEECOM_RESULT_INCOMPLETE		= 1,
  74	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
  75	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
  76};
  77
  78enum qcom_scm_qseecom_resp_type {
  79	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
  80	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
  81};
  82
  83enum qcom_scm_qseecom_tz_owner {
  84	QSEECOM_TZ_OWNER_SIP			= 2,
  85	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
  86	QSEECOM_TZ_OWNER_QSEE_OS		= 50
  87};
  88
  89enum qcom_scm_qseecom_tz_svc {
  90	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
  91	QSEECOM_TZ_SVC_APP_MGR			= 1,
  92	QSEECOM_TZ_SVC_INFO			= 6,
  93};
  94
  95enum qcom_scm_qseecom_tz_cmd_app {
  96	QSEECOM_TZ_CMD_APP_SEND			= 1,
  97	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
  98};
  99
 100enum qcom_scm_qseecom_tz_cmd_info {
 101	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
 102};
 103
 104#define QSEECOM_MAX_APP_NAME_SIZE		64
 
 105
 106/* Each bit configures cold/warm boot address for one of the 4 CPUs */
 107static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 108	0, BIT(0), BIT(3), BIT(5)
 109};
 110static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 111	BIT(2), BIT(1), BIT(4), BIT(6)
 112};
 113
 114#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
 115#define QCOM_SMC_WAITQ_FLAG_WAKE_ALL	BIT(1)
 
 
 
 
 
 116
 117static const char * const qcom_scm_convention_names[] = {
 118	[SMC_CONVENTION_UNKNOWN] = "unknown",
 119	[SMC_CONVENTION_ARM_32] = "smc arm 32",
 120	[SMC_CONVENTION_ARM_64] = "smc arm 64",
 121	[SMC_CONVENTION_LEGACY] = "smc legacy",
 122};
 123
 
 
 
 
 
 
 
 124static struct qcom_scm *__scm;
 125
 126static int qcom_scm_clk_enable(void)
 127{
 128	int ret;
 129
 130	ret = clk_prepare_enable(__scm->core_clk);
 131	if (ret)
 132		goto bail;
 133
 134	ret = clk_prepare_enable(__scm->iface_clk);
 135	if (ret)
 136		goto disable_core;
 137
 138	ret = clk_prepare_enable(__scm->bus_clk);
 139	if (ret)
 140		goto disable_iface;
 141
 142	return 0;
 143
 144disable_iface:
 145	clk_disable_unprepare(__scm->iface_clk);
 146disable_core:
 147	clk_disable_unprepare(__scm->core_clk);
 148bail:
 149	return ret;
 150}
 151
 152static void qcom_scm_clk_disable(void)
 153{
 154	clk_disable_unprepare(__scm->core_clk);
 155	clk_disable_unprepare(__scm->iface_clk);
 156	clk_disable_unprepare(__scm->bus_clk);
 157}
 158
 159static int qcom_scm_bw_enable(void)
 160{
 161	int ret = 0;
 162
 163	if (!__scm->path)
 164		return 0;
 165
 166	if (IS_ERR(__scm->path))
 167		return -EINVAL;
 168
 169	mutex_lock(&__scm->scm_bw_lock);
 170	if (!__scm->scm_vote_count) {
 171		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
 172		if (ret < 0) {
 173			dev_err(__scm->dev, "failed to set bandwidth request\n");
 174			goto err_bw;
 175		}
 176	}
 177	__scm->scm_vote_count++;
 178err_bw:
 179	mutex_unlock(&__scm->scm_bw_lock);
 180
 181	return ret;
 182}
 183
 184static void qcom_scm_bw_disable(void)
 185{
 186	if (IS_ERR_OR_NULL(__scm->path))
 187		return;
 188
 189	mutex_lock(&__scm->scm_bw_lock);
 190	if (__scm->scm_vote_count-- == 1)
 191		icc_set_bw(__scm->path, 0, 0);
 192	mutex_unlock(&__scm->scm_bw_lock);
 193}
 194
 195enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 196static DEFINE_SPINLOCK(scm_query_lock);
 197
 
 
 
 
 
 
 
 
 198static enum qcom_scm_convention __get_convention(void)
 199{
 200	unsigned long flags;
 201	struct qcom_scm_desc desc = {
 202		.svc = QCOM_SCM_SVC_INFO,
 203		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 204		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 205					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 206			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 207		.arginfo = QCOM_SCM_ARGS(1),
 208		.owner = ARM_SMCCC_OWNER_SIP,
 209	};
 210	struct qcom_scm_res res;
 211	enum qcom_scm_convention probed_convention;
 212	int ret;
 213	bool forced = false;
 214
 215	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 216		return qcom_scm_convention;
 217
 218	/*
 219	 * Per the "SMC calling convention specification", the 64-bit calling
 220	 * convention can only be used when the client is 64-bit, otherwise
 221	 * system will encounter the undefined behaviour.
 222	 */
 223#if IS_ENABLED(CONFIG_ARM64)
 224	/*
 225	 * Device isn't required as there is only one argument - no device
 226	 * needed to dma_map_single to secure world
 227	 */
 228	probed_convention = SMC_CONVENTION_ARM_64;
 229	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 230	if (!ret && res.result[0] == 1)
 231		goto found;
 232
 233	/*
 234	 * Some SC7180 firmwares didn't implement the
 235	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 236	 * calling conventions on these firmwares. Luckily we don't make any
 237	 * early calls into the firmware on these SoCs so the device pointer
 238	 * will be valid here to check if the compatible matches.
 239	 */
 240	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 241		forced = true;
 242		goto found;
 243	}
 244#endif
 245
 246	probed_convention = SMC_CONVENTION_ARM_32;
 247	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 248	if (!ret && res.result[0] == 1)
 249		goto found;
 250
 251	probed_convention = SMC_CONVENTION_LEGACY;
 252found:
 253	spin_lock_irqsave(&scm_query_lock, flags);
 254	if (probed_convention != qcom_scm_convention) {
 255		qcom_scm_convention = probed_convention;
 256		pr_info("qcom_scm: convention: %s%s\n",
 257			qcom_scm_convention_names[qcom_scm_convention],
 258			forced ? " (forced)" : "");
 259	}
 260	spin_unlock_irqrestore(&scm_query_lock, flags);
 261
 262	return qcom_scm_convention;
 263}
 264
 265/**
 266 * qcom_scm_call() - Invoke a syscall in the secure world
 267 * @dev:	device
 268 * @desc:	Descriptor structure containing arguments and return values
 269 * @res:        Structure containing results from SMC/HVC call
 270 *
 271 * Sends a command to the SCM and waits for the command to finish processing.
 272 * This should *only* be called in pre-emptible context.
 273 */
 274static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 275			 struct qcom_scm_res *res)
 276{
 277	might_sleep();
 278	switch (__get_convention()) {
 279	case SMC_CONVENTION_ARM_32:
 280	case SMC_CONVENTION_ARM_64:
 281		return scm_smc_call(dev, desc, res, false);
 282	case SMC_CONVENTION_LEGACY:
 283		return scm_legacy_call(dev, desc, res);
 284	default:
 285		pr_err("Unknown current SCM calling convention.\n");
 286		return -EINVAL;
 287	}
 288}
 289
 290/**
 291 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 292 * @dev:	device
 293 * @desc:	Descriptor structure containing arguments and return values
 294 * @res:	Structure containing results from SMC/HVC call
 295 *
 296 * Sends a command to the SCM and waits for the command to finish processing.
 297 * This can be called in atomic context.
 298 */
 299static int qcom_scm_call_atomic(struct device *dev,
 300				const struct qcom_scm_desc *desc,
 301				struct qcom_scm_res *res)
 302{
 303	switch (__get_convention()) {
 304	case SMC_CONVENTION_ARM_32:
 305	case SMC_CONVENTION_ARM_64:
 306		return scm_smc_call(dev, desc, res, true);
 307	case SMC_CONVENTION_LEGACY:
 308		return scm_legacy_call_atomic(dev, desc, res);
 309	default:
 310		pr_err("Unknown current SCM calling convention.\n");
 311		return -EINVAL;
 312	}
 313}
 314
 315static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 316					 u32 cmd_id)
 317{
 318	int ret;
 319	struct qcom_scm_desc desc = {
 320		.svc = QCOM_SCM_SVC_INFO,
 321		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 322		.owner = ARM_SMCCC_OWNER_SIP,
 323	};
 324	struct qcom_scm_res res;
 325
 326	desc.arginfo = QCOM_SCM_ARGS(1);
 327	switch (__get_convention()) {
 328	case SMC_CONVENTION_ARM_32:
 329	case SMC_CONVENTION_ARM_64:
 330		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 331				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 332		break;
 333	case SMC_CONVENTION_LEGACY:
 334		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 335		break;
 336	default:
 337		pr_err("Unknown SMC convention being used\n");
 338		return false;
 339	}
 340
 341	ret = qcom_scm_call(dev, &desc, &res);
 342
 343	return ret ? false : !!res.result[0];
 344}
 345
 346static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
 347{
 348	int cpu;
 349	unsigned int flags = 0;
 350	struct qcom_scm_desc desc = {
 351		.svc = QCOM_SCM_SVC_BOOT,
 352		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 353		.arginfo = QCOM_SCM_ARGS(2),
 354		.owner = ARM_SMCCC_OWNER_SIP,
 355	};
 356
 357	for_each_present_cpu(cpu) {
 358		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
 359			return -EINVAL;
 360		flags |= cpu_bits[cpu];
 361	}
 362
 363	desc.args[0] = flags;
 364	desc.args[1] = virt_to_phys(entry);
 365
 366	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 367}
 368
 369static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
 370{
 371	struct qcom_scm_desc desc = {
 372		.svc = QCOM_SCM_SVC_BOOT,
 373		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
 374		.owner = ARM_SMCCC_OWNER_SIP,
 375		.arginfo = QCOM_SCM_ARGS(6),
 376		.args = {
 377			virt_to_phys(entry),
 378			/* Apply to all CPUs in all affinity levels */
 379			~0ULL, ~0ULL, ~0ULL, ~0ULL,
 380			flags,
 381		},
 382	};
 383
 384	/* Need a device for DMA of the additional arguments */
 385	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
 386		return -EOPNOTSUPP;
 387
 388	return qcom_scm_call(__scm->dev, &desc, NULL);
 389}
 390
 391/**
 392 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
 393 * @entry: Entry point function for the cpus
 394 *
 395 * Set the Linux entry point for the SCM to transfer control to when coming
 396 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 397 */
 398int qcom_scm_set_warm_boot_addr(void *entry)
 399{
 400	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
 401		/* Fallback to old SCM call */
 402		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
 403	return 0;
 404}
 405EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
 406
 407/**
 408 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
 409 * @entry: Entry point function for the cpus
 410 */
 411int qcom_scm_set_cold_boot_addr(void *entry)
 412{
 413	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
 414		/* Fallback to old SCM call */
 415		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
 416	return 0;
 417}
 418EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
 419
 420/**
 421 * qcom_scm_cpu_power_down() - Power down the cpu
 422 * @flags:	Flags to flush cache
 423 *
 424 * This is an end point to power down cpu. If there was a pending interrupt,
 425 * the control would return from this function, otherwise, the cpu jumps to the
 426 * warm boot entry point set for this cpu upon reset.
 427 */
 428void qcom_scm_cpu_power_down(u32 flags)
 429{
 430	struct qcom_scm_desc desc = {
 431		.svc = QCOM_SCM_SVC_BOOT,
 432		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 433		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 434		.arginfo = QCOM_SCM_ARGS(1),
 435		.owner = ARM_SMCCC_OWNER_SIP,
 436	};
 437
 438	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 439}
 440EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
 441
 442int qcom_scm_set_remote_state(u32 state, u32 id)
 443{
 444	struct qcom_scm_desc desc = {
 445		.svc = QCOM_SCM_SVC_BOOT,
 446		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 447		.arginfo = QCOM_SCM_ARGS(2),
 448		.args[0] = state,
 449		.args[1] = id,
 450		.owner = ARM_SMCCC_OWNER_SIP,
 451	};
 452	struct qcom_scm_res res;
 453	int ret;
 454
 455	ret = qcom_scm_call(__scm->dev, &desc, &res);
 456
 457	return ret ? : res.result[0];
 458}
 459EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
 460
 461static int qcom_scm_disable_sdi(void)
 462{
 463	int ret;
 464	struct qcom_scm_desc desc = {
 465		.svc = QCOM_SCM_SVC_BOOT,
 466		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
 467		.args[0] = 1, /* Disable watchdog debug */
 468		.args[1] = 0, /* Disable SDI */
 469		.arginfo = QCOM_SCM_ARGS(2),
 470		.owner = ARM_SMCCC_OWNER_SIP,
 471	};
 472	struct qcom_scm_res res;
 473
 474	ret = qcom_scm_clk_enable();
 475	if (ret)
 476		return ret;
 477	ret = qcom_scm_call(__scm->dev, &desc, &res);
 478
 479	qcom_scm_clk_disable();
 480
 481	return ret ? : res.result[0];
 482}
 483
 484static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 485{
 486	struct qcom_scm_desc desc = {
 487		.svc = QCOM_SCM_SVC_BOOT,
 488		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 489		.arginfo = QCOM_SCM_ARGS(2),
 490		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 491		.owner = ARM_SMCCC_OWNER_SIP,
 492	};
 493
 494	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 495
 496	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 497}
 498
 499static void qcom_scm_set_download_mode(bool enable)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500{
 501	bool avail;
 502	int ret = 0;
 503
 504	avail = __qcom_scm_is_call_available(__scm->dev,
 505					     QCOM_SCM_SVC_BOOT,
 506					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
 507	if (avail) {
 508		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
 509	} else if (__scm->dload_mode_addr) {
 510		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
 511				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
 512	} else {
 513		dev_err(__scm->dev,
 514			"No available mechanism for setting download mode\n");
 515	}
 516
 517	if (ret)
 518		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 519}
 520
 521/**
 522 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 523 *			       state machine for a given peripheral, using the
 524 *			       metadata
 525 * @peripheral: peripheral id
 526 * @metadata:	pointer to memory containing ELF header, program header table
 527 *		and optional blob of data used for authenticating the metadata
 528 *		and the rest of the firmware
 529 * @size:	size of the metadata
 530 * @ctx:	optional metadata context
 531 *
 532 * Return: 0 on success.
 533 *
 534 * Upon successful return, the PAS metadata context (@ctx) will be used to
 535 * track the metadata allocation, this needs to be released by invoking
 536 * qcom_scm_pas_metadata_release() by the caller.
 537 */
 538int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
 539			    struct qcom_scm_pas_metadata *ctx)
 540{
 541	dma_addr_t mdata_phys;
 542	void *mdata_buf;
 543	int ret;
 544	struct qcom_scm_desc desc = {
 545		.svc = QCOM_SCM_SVC_PIL,
 546		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 547		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 548		.args[0] = peripheral,
 549		.owner = ARM_SMCCC_OWNER_SIP,
 550	};
 551	struct qcom_scm_res res;
 552
 553	/*
 554	 * During the scm call memory protection will be enabled for the meta
 555	 * data blob, so make sure it's physically contiguous, 4K aligned and
 556	 * non-cachable to avoid XPU violations.
 
 
 
 
 
 
 
 557	 */
 558	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 559				       GFP_KERNEL);
 560	if (!mdata_buf) {
 561		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
 562		return -ENOMEM;
 563	}
 564	memcpy(mdata_buf, metadata, size);
 565
 566	ret = qcom_scm_clk_enable();
 567	if (ret)
 568		goto out;
 569
 570	ret = qcom_scm_bw_enable();
 571	if (ret)
 572		return ret;
 573
 574	desc.args[1] = mdata_phys;
 575
 576	ret = qcom_scm_call(__scm->dev, &desc, &res);
 577
 578	qcom_scm_bw_disable();
 
 
 579	qcom_scm_clk_disable();
 580
 581out:
 582	if (ret < 0 || !ctx) {
 583		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 584	} else if (ctx) {
 585		ctx->ptr = mdata_buf;
 586		ctx->phys = mdata_phys;
 587		ctx->size = size;
 588	}
 589
 590	return ret ? : res.result[0];
 591}
 592EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
 593
 594/**
 595 * qcom_scm_pas_metadata_release() - release metadata context
 596 * @ctx:	metadata context
 597 */
 598void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
 599{
 600	if (!ctx->ptr)
 601		return;
 602
 603	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
 604
 605	ctx->ptr = NULL;
 606	ctx->phys = 0;
 607	ctx->size = 0;
 608}
 609EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
 610
 611/**
 612 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 613 *			      for firmware loading
 614 * @peripheral:	peripheral id
 615 * @addr:	start address of memory area to prepare
 616 * @size:	size of the memory area to prepare
 617 *
 618 * Returns 0 on success.
 619 */
 620int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 621{
 622	int ret;
 623	struct qcom_scm_desc desc = {
 624		.svc = QCOM_SCM_SVC_PIL,
 625		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 626		.arginfo = QCOM_SCM_ARGS(3),
 627		.args[0] = peripheral,
 628		.args[1] = addr,
 629		.args[2] = size,
 630		.owner = ARM_SMCCC_OWNER_SIP,
 631	};
 632	struct qcom_scm_res res;
 633
 634	ret = qcom_scm_clk_enable();
 635	if (ret)
 636		return ret;
 637
 638	ret = qcom_scm_bw_enable();
 639	if (ret)
 640		return ret;
 641
 642	ret = qcom_scm_call(__scm->dev, &desc, &res);
 643	qcom_scm_bw_disable();
 
 
 644	qcom_scm_clk_disable();
 645
 646	return ret ? : res.result[0];
 647}
 648EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
 649
 650/**
 651 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 652 *				   and reset the remote processor
 653 * @peripheral:	peripheral id
 654 *
 655 * Return 0 on success.
 656 */
 657int qcom_scm_pas_auth_and_reset(u32 peripheral)
 658{
 659	int ret;
 660	struct qcom_scm_desc desc = {
 661		.svc = QCOM_SCM_SVC_PIL,
 662		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 663		.arginfo = QCOM_SCM_ARGS(1),
 664		.args[0] = peripheral,
 665		.owner = ARM_SMCCC_OWNER_SIP,
 666	};
 667	struct qcom_scm_res res;
 668
 669	ret = qcom_scm_clk_enable();
 670	if (ret)
 671		return ret;
 672
 673	ret = qcom_scm_bw_enable();
 674	if (ret)
 675		return ret;
 676
 677	ret = qcom_scm_call(__scm->dev, &desc, &res);
 678	qcom_scm_bw_disable();
 
 
 679	qcom_scm_clk_disable();
 680
 681	return ret ? : res.result[0];
 682}
 683EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
 684
 685/**
 686 * qcom_scm_pas_shutdown() - Shut down the remote processor
 687 * @peripheral: peripheral id
 688 *
 689 * Returns 0 on success.
 690 */
 691int qcom_scm_pas_shutdown(u32 peripheral)
 692{
 693	int ret;
 694	struct qcom_scm_desc desc = {
 695		.svc = QCOM_SCM_SVC_PIL,
 696		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 697		.arginfo = QCOM_SCM_ARGS(1),
 698		.args[0] = peripheral,
 699		.owner = ARM_SMCCC_OWNER_SIP,
 700	};
 701	struct qcom_scm_res res;
 702
 703	ret = qcom_scm_clk_enable();
 704	if (ret)
 705		return ret;
 706
 707	ret = qcom_scm_bw_enable();
 708	if (ret)
 709		return ret;
 710
 711	ret = qcom_scm_call(__scm->dev, &desc, &res);
 712
 713	qcom_scm_bw_disable();
 
 
 714	qcom_scm_clk_disable();
 715
 716	return ret ? : res.result[0];
 717}
 718EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
 719
 720/**
 721 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 722 *			      available for the given peripherial
 723 * @peripheral:	peripheral id
 724 *
 725 * Returns true if PAS is supported for this peripheral, otherwise false.
 726 */
 727bool qcom_scm_pas_supported(u32 peripheral)
 728{
 729	int ret;
 730	struct qcom_scm_desc desc = {
 731		.svc = QCOM_SCM_SVC_PIL,
 732		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 733		.arginfo = QCOM_SCM_ARGS(1),
 734		.args[0] = peripheral,
 735		.owner = ARM_SMCCC_OWNER_SIP,
 736	};
 737	struct qcom_scm_res res;
 738
 739	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 740					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 741		return false;
 742
 743	ret = qcom_scm_call(__scm->dev, &desc, &res);
 744
 745	return ret ? false : !!res.result[0];
 746}
 747EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
 748
 749static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 750{
 751	struct qcom_scm_desc desc = {
 752		.svc = QCOM_SCM_SVC_PIL,
 753		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 754		.arginfo = QCOM_SCM_ARGS(2),
 755		.args[0] = reset,
 756		.args[1] = 0,
 757		.owner = ARM_SMCCC_OWNER_SIP,
 758	};
 759	struct qcom_scm_res res;
 760	int ret;
 761
 762	ret = qcom_scm_call(__scm->dev, &desc, &res);
 763
 764	return ret ? : res.result[0];
 765}
 766
 767static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 768				     unsigned long idx)
 769{
 770	if (idx != 0)
 771		return -EINVAL;
 772
 773	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 774}
 775
 776static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 777				       unsigned long idx)
 778{
 779	if (idx != 0)
 780		return -EINVAL;
 781
 782	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 783}
 784
 785static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 786	.assert = qcom_scm_pas_reset_assert,
 787	.deassert = qcom_scm_pas_reset_deassert,
 788};
 789
 790int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 791{
 792	struct qcom_scm_desc desc = {
 793		.svc = QCOM_SCM_SVC_IO,
 794		.cmd = QCOM_SCM_IO_READ,
 795		.arginfo = QCOM_SCM_ARGS(1),
 796		.args[0] = addr,
 797		.owner = ARM_SMCCC_OWNER_SIP,
 798	};
 799	struct qcom_scm_res res;
 800	int ret;
 801
 802
 803	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 804	if (ret >= 0)
 805		*val = res.result[0];
 806
 807	return ret < 0 ? ret : 0;
 808}
 809EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
 810
 811int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 812{
 813	struct qcom_scm_desc desc = {
 814		.svc = QCOM_SCM_SVC_IO,
 815		.cmd = QCOM_SCM_IO_WRITE,
 816		.arginfo = QCOM_SCM_ARGS(2),
 817		.args[0] = addr,
 818		.args[1] = val,
 819		.owner = ARM_SMCCC_OWNER_SIP,
 820	};
 821
 822	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 823}
 824EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
 825
 826/**
 827 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 828 * supports restore security config interface.
 829 *
 830 * Return true if restore-cfg interface is supported, false if not.
 831 */
 832bool qcom_scm_restore_sec_cfg_available(void)
 833{
 834	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 835					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 836}
 837EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
 838
 839int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 840{
 841	struct qcom_scm_desc desc = {
 842		.svc = QCOM_SCM_SVC_MP,
 843		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 844		.arginfo = QCOM_SCM_ARGS(2),
 845		.args[0] = device_id,
 846		.args[1] = spare,
 847		.owner = ARM_SMCCC_OWNER_SIP,
 848	};
 849	struct qcom_scm_res res;
 850	int ret;
 851
 852	ret = qcom_scm_call(__scm->dev, &desc, &res);
 853
 854	return ret ? : res.result[0];
 855}
 856EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
 857
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 859{
 860	struct qcom_scm_desc desc = {
 861		.svc = QCOM_SCM_SVC_MP,
 862		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 863		.arginfo = QCOM_SCM_ARGS(1),
 864		.args[0] = spare,
 865		.owner = ARM_SMCCC_OWNER_SIP,
 866	};
 867	struct qcom_scm_res res;
 868	int ret;
 869
 870	ret = qcom_scm_call(__scm->dev, &desc, &res);
 871
 872	if (size)
 873		*size = res.result[0];
 874
 875	return ret ? : res.result[1];
 876}
 877EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
 878
 879int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 880{
 881	struct qcom_scm_desc desc = {
 882		.svc = QCOM_SCM_SVC_MP,
 883		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 884		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 885					 QCOM_SCM_VAL),
 886		.args[0] = addr,
 887		.args[1] = size,
 888		.args[2] = spare,
 889		.owner = ARM_SMCCC_OWNER_SIP,
 890	};
 891	int ret;
 892
 893	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 894
 895	/* the pg table has been initialized already, ignore the error */
 896	if (ret == -EPERM)
 897		ret = 0;
 898
 899	return ret;
 900}
 901EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
 902
 903int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
 904{
 905	struct qcom_scm_desc desc = {
 906		.svc = QCOM_SCM_SVC_MP,
 907		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
 908		.arginfo = QCOM_SCM_ARGS(2),
 909		.args[0] = size,
 910		.args[1] = spare,
 911		.owner = ARM_SMCCC_OWNER_SIP,
 912	};
 913
 914	return qcom_scm_call(__scm->dev, &desc, NULL);
 915}
 916EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
 917
 918int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 919				   u32 cp_nonpixel_start,
 920				   u32 cp_nonpixel_size)
 921{
 922	int ret;
 923	struct qcom_scm_desc desc = {
 924		.svc = QCOM_SCM_SVC_MP,
 925		.cmd = QCOM_SCM_MP_VIDEO_VAR,
 926		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
 927					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 928		.args[0] = cp_start,
 929		.args[1] = cp_size,
 930		.args[2] = cp_nonpixel_start,
 931		.args[3] = cp_nonpixel_size,
 932		.owner = ARM_SMCCC_OWNER_SIP,
 933	};
 934	struct qcom_scm_res res;
 935
 936	ret = qcom_scm_call(__scm->dev, &desc, &res);
 937
 938	return ret ? : res.result[0];
 939}
 940EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
 941
 942static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 943				 size_t mem_sz, phys_addr_t src, size_t src_sz,
 944				 phys_addr_t dest, size_t dest_sz)
 945{
 946	int ret;
 947	struct qcom_scm_desc desc = {
 948		.svc = QCOM_SCM_SVC_MP,
 949		.cmd = QCOM_SCM_MP_ASSIGN,
 950		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
 951					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
 952					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 953		.args[0] = mem_region,
 954		.args[1] = mem_sz,
 955		.args[2] = src,
 956		.args[3] = src_sz,
 957		.args[4] = dest,
 958		.args[5] = dest_sz,
 959		.args[6] = 0,
 960		.owner = ARM_SMCCC_OWNER_SIP,
 961	};
 962	struct qcom_scm_res res;
 963
 964	ret = qcom_scm_call(dev, &desc, &res);
 965
 966	return ret ? : res.result[0];
 967}
 968
 969/**
 970 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 971 * @mem_addr: mem region whose ownership need to be reassigned
 972 * @mem_sz:   size of the region.
 973 * @srcvm:    vmid for current set of owners, each set bit in
 974 *            flag indicate a unique owner
 975 * @newvm:    array having new owners and corresponding permission
 976 *            flags
 977 * @dest_cnt: number of owners in next set.
 978 *
 979 * Return negative errno on failure or 0 on success with @srcvm updated.
 980 */
 981int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 982			u64 *srcvm,
 983			const struct qcom_scm_vmperm *newvm,
 984			unsigned int dest_cnt)
 985{
 986	struct qcom_scm_current_perm_info *destvm;
 987	struct qcom_scm_mem_map_info *mem_to_map;
 988	phys_addr_t mem_to_map_phys;
 989	phys_addr_t dest_phys;
 990	dma_addr_t ptr_phys;
 991	size_t mem_to_map_sz;
 992	size_t dest_sz;
 993	size_t src_sz;
 994	size_t ptr_sz;
 995	int next_vm;
 996	__le32 *src;
 997	void *ptr;
 998	int ret, i, b;
 999	u64 srcvm_bits = *srcvm;
1000
1001	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1002	mem_to_map_sz = sizeof(*mem_to_map);
1003	dest_sz = dest_cnt * sizeof(*destvm);
1004	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1005			ALIGN(dest_sz, SZ_64);
1006
1007	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
 
1008	if (!ptr)
1009		return -ENOMEM;
1010
 
 
1011	/* Fill source vmid detail */
1012	src = ptr;
1013	i = 0;
1014	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1015		if (srcvm_bits & BIT(b))
1016			src[i++] = cpu_to_le32(b);
1017	}
1018
1019	/* Fill details of mem buff to map */
1020	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1021	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1022	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1023	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1024
1025	next_vm = 0;
1026	/* Fill details of next vmid detail */
1027	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1028	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1029	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1030		destvm->vmid = cpu_to_le32(newvm->vmid);
1031		destvm->perm = cpu_to_le32(newvm->perm);
1032		destvm->ctx = 0;
1033		destvm->ctx_size = 0;
1034		next_vm |= BIT(newvm->vmid);
1035	}
1036
1037	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1038				    ptr_phys, src_sz, dest_phys, dest_sz);
1039	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
1040	if (ret) {
1041		dev_err(__scm->dev,
1042			"Assign memory protection call failed %d\n", ret);
1043		return -EINVAL;
1044	}
1045
1046	*srcvm = next_vm;
1047	return 0;
1048}
1049EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1050
1051/**
1052 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1053 */
1054bool qcom_scm_ocmem_lock_available(void)
1055{
1056	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1057					    QCOM_SCM_OCMEM_LOCK_CMD);
1058}
1059EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1060
1061/**
1062 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1063 * region to the specified initiator
1064 *
1065 * @id:     tz initiator id
1066 * @offset: OCMEM offset
1067 * @size:   OCMEM size
1068 * @mode:   access mode (WIDE/NARROW)
1069 */
1070int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1071			u32 mode)
1072{
1073	struct qcom_scm_desc desc = {
1074		.svc = QCOM_SCM_SVC_OCMEM,
1075		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1076		.args[0] = id,
1077		.args[1] = offset,
1078		.args[2] = size,
1079		.args[3] = mode,
1080		.arginfo = QCOM_SCM_ARGS(4),
1081	};
1082
1083	return qcom_scm_call(__scm->dev, &desc, NULL);
1084}
1085EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1086
1087/**
1088 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1089 * region from the specified initiator
1090 *
1091 * @id:     tz initiator id
1092 * @offset: OCMEM offset
1093 * @size:   OCMEM size
1094 */
1095int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1096{
1097	struct qcom_scm_desc desc = {
1098		.svc = QCOM_SCM_SVC_OCMEM,
1099		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1100		.args[0] = id,
1101		.args[1] = offset,
1102		.args[2] = size,
1103		.arginfo = QCOM_SCM_ARGS(3),
1104	};
1105
1106	return qcom_scm_call(__scm->dev, &desc, NULL);
1107}
1108EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1109
1110/**
1111 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1112 *
1113 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1114 *	   qcom_scm_ice_set_key() are available.
1115 */
1116bool qcom_scm_ice_available(void)
1117{
1118	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1119					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1120		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1121					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1122}
1123EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1124
1125/**
1126 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1127 * @index: the keyslot to invalidate
1128 *
1129 * The UFSHCI and eMMC standards define a standard way to do this, but it
1130 * doesn't work on these SoCs; only this SCM call does.
1131 *
1132 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1133 * call doesn't specify which ICE instance the keyslot belongs to.
1134 *
1135 * Return: 0 on success; -errno on failure.
1136 */
1137int qcom_scm_ice_invalidate_key(u32 index)
1138{
1139	struct qcom_scm_desc desc = {
1140		.svc = QCOM_SCM_SVC_ES,
1141		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1142		.arginfo = QCOM_SCM_ARGS(1),
1143		.args[0] = index,
1144		.owner = ARM_SMCCC_OWNER_SIP,
1145	};
1146
1147	return qcom_scm_call(__scm->dev, &desc, NULL);
1148}
1149EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1150
1151/**
1152 * qcom_scm_ice_set_key() - Set an inline encryption key
1153 * @index: the keyslot into which to set the key
1154 * @key: the key to program
1155 * @key_size: the size of the key in bytes
1156 * @cipher: the encryption algorithm the key is for
1157 * @data_unit_size: the encryption data unit size, i.e. the size of each
1158 *		    individual plaintext and ciphertext.  Given in 512-byte
1159 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1160 *
1161 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1162 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1163 *
1164 * The UFSHCI and eMMC standards define a standard way to do this, but it
1165 * doesn't work on these SoCs; only this SCM call does.
1166 *
1167 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1168 * call doesn't specify which ICE instance the keyslot belongs to.
1169 *
1170 * Return: 0 on success; -errno on failure.
1171 */
1172int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1173			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1174{
1175	struct qcom_scm_desc desc = {
1176		.svc = QCOM_SCM_SVC_ES,
1177		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1178		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1179					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1180					 QCOM_SCM_VAL),
1181		.args[0] = index,
1182		.args[2] = key_size,
1183		.args[3] = cipher,
1184		.args[4] = data_unit_size,
1185		.owner = ARM_SMCCC_OWNER_SIP,
1186	};
1187	void *keybuf;
1188	dma_addr_t key_phys;
1189	int ret;
1190
1191	/*
1192	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1193	 * physical address that's been properly flushed.  The sanctioned way to
1194	 * do this is by using the DMA API.  But as is best practice for crypto
1195	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1196	 * dma_map_single() not clearly correct, since the DMA API can use
1197	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1198	 * keys is normally rare and thus not performance-critical.
1199	 */
1200
1201	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1202				    GFP_KERNEL);
 
1203	if (!keybuf)
1204		return -ENOMEM;
1205	memcpy(keybuf, key, key_size);
1206	desc.args[1] = key_phys;
1207
1208	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1209
1210	memzero_explicit(keybuf, key_size);
1211
1212	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1213	return ret;
1214}
1215EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1216
1217/**
1218 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1219 *
1220 * Return true if HDCP is supported, false if not.
1221 */
1222bool qcom_scm_hdcp_available(void)
1223{
1224	bool avail;
1225	int ret = qcom_scm_clk_enable();
1226
1227	if (ret)
1228		return ret;
1229
1230	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1231						QCOM_SCM_HDCP_INVOKE);
1232
1233	qcom_scm_clk_disable();
1234
1235	return avail;
1236}
1237EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1238
1239/**
1240 * qcom_scm_hdcp_req() - Send HDCP request.
1241 * @req: HDCP request array
1242 * @req_cnt: HDCP request array count
1243 * @resp: response buffer passed to SCM
1244 *
1245 * Write HDCP register(s) through SCM.
1246 */
1247int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1248{
1249	int ret;
1250	struct qcom_scm_desc desc = {
1251		.svc = QCOM_SCM_SVC_HDCP,
1252		.cmd = QCOM_SCM_HDCP_INVOKE,
1253		.arginfo = QCOM_SCM_ARGS(10),
1254		.args = {
1255			req[0].addr,
1256			req[0].val,
1257			req[1].addr,
1258			req[1].val,
1259			req[2].addr,
1260			req[2].val,
1261			req[3].addr,
1262			req[3].val,
1263			req[4].addr,
1264			req[4].val
1265		},
1266		.owner = ARM_SMCCC_OWNER_SIP,
1267	};
1268	struct qcom_scm_res res;
1269
1270	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1271		return -ERANGE;
1272
1273	ret = qcom_scm_clk_enable();
1274	if (ret)
1275		return ret;
1276
1277	ret = qcom_scm_call(__scm->dev, &desc, &res);
1278	*resp = res.result[0];
1279
1280	qcom_scm_clk_disable();
1281
1282	return ret;
1283}
1284EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1285
1286int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1287{
1288	struct qcom_scm_desc desc = {
1289		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1290		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1291		.arginfo = QCOM_SCM_ARGS(3),
1292		.args[0] = sec_id,
1293		.args[1] = ctx_num,
1294		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1295		.owner = ARM_SMCCC_OWNER_SIP,
1296	};
1297
1298	return qcom_scm_call(__scm->dev, &desc, NULL);
1299}
1300EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1301
1302int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1303{
1304	struct qcom_scm_desc desc = {
1305		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1306		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1307		.arginfo = QCOM_SCM_ARGS(2),
1308		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1309		.args[1] = en,
1310		.owner = ARM_SMCCC_OWNER_SIP,
1311	};
1312
1313
1314	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1315}
1316EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1317
1318bool qcom_scm_lmh_dcvsh_available(void)
1319{
1320	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1321}
1322EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324int qcom_scm_lmh_profile_change(u32 profile_id)
1325{
1326	struct qcom_scm_desc desc = {
1327		.svc = QCOM_SCM_SVC_LMH,
1328		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1329		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1330		.args[0] = profile_id,
1331		.owner = ARM_SMCCC_OWNER_SIP,
1332	};
1333
1334	return qcom_scm_call(__scm->dev, &desc, NULL);
1335}
1336EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1337
1338int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1339		       u64 limit_node, u32 node_id, u64 version)
1340{
1341	dma_addr_t payload_phys;
1342	u32 *payload_buf;
1343	int ret, payload_size = 5 * sizeof(u32);
1344
1345	struct qcom_scm_desc desc = {
1346		.svc = QCOM_SCM_SVC_LMH,
1347		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1348		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1349					QCOM_SCM_VAL, QCOM_SCM_VAL),
1350		.args[1] = payload_size,
1351		.args[2] = limit_node,
1352		.args[3] = node_id,
1353		.args[4] = version,
1354		.owner = ARM_SMCCC_OWNER_SIP,
1355	};
1356
1357	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
 
 
1358	if (!payload_buf)
1359		return -ENOMEM;
1360
1361	payload_buf[0] = payload_fn;
1362	payload_buf[1] = 0;
1363	payload_buf[2] = payload_reg;
1364	payload_buf[3] = 1;
1365	payload_buf[4] = payload_val;
1366
1367	desc.args[0] = payload_phys;
1368
1369	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1370
1371	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1372	return ret;
1373}
1374EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1377{
1378	struct device_node *tcsr;
1379	struct device_node *np = dev->of_node;
1380	struct resource res;
1381	u32 offset;
1382	int ret;
1383
1384	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1385	if (!tcsr)
1386		return 0;
1387
1388	ret = of_address_to_resource(tcsr, 0, &res);
1389	of_node_put(tcsr);
1390	if (ret)
1391		return ret;
1392
1393	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1394	if (ret < 0)
1395		return ret;
1396
1397	*addr = res.start + offset;
1398
1399	return 0;
1400}
1401
1402#ifdef CONFIG_QCOM_QSEECOM
1403
1404/* Lock for QSEECOM SCM call executions */
1405static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1406
1407static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1408				   struct qcom_scm_qseecom_resp *res)
1409{
1410	struct qcom_scm_res scm_res = {};
1411	int status;
1412
1413	/*
1414	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1415	 * require the respective call lock to be held.
1416	 */
1417	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1418
1419	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1420
1421	res->result = scm_res.result[0];
1422	res->resp_type = scm_res.result[1];
1423	res->data = scm_res.result[2];
1424
1425	if (status)
1426		return status;
1427
1428	return 0;
1429}
1430
1431/**
1432 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1433 * @desc: SCM call descriptor.
1434 * @res:  SCM call response (output).
1435 *
1436 * Performs the QSEECOM SCM call described by @desc, returning the response in
1437 * @rsp.
1438 *
1439 * Return: Zero on success, nonzero on failure.
1440 */
1441static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1442				 struct qcom_scm_qseecom_resp *res)
1443{
1444	int status;
1445
1446	/*
1447	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1448	 * so lock things here. This needs to be extended to callback/listener
1449	 * handling when support for that is implemented.
1450	 */
1451
1452	mutex_lock(&qcom_scm_qseecom_call_lock);
1453	status = __qcom_scm_qseecom_call(desc, res);
1454	mutex_unlock(&qcom_scm_qseecom_call_lock);
1455
1456	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1457		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1458		res->resp_type, res->data);
1459
1460	if (status) {
1461		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1462		return status;
1463	}
1464
1465	/*
1466	 * TODO: Handle incomplete and blocked calls:
1467	 *
1468	 * Incomplete and blocked calls are not supported yet. Some devices
1469	 * and/or commands require those, some don't. Let's warn about them
1470	 * prominently in case someone attempts to try these commands with a
1471	 * device/command combination that isn't supported yet.
1472	 */
1473	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1474	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1475
1476	return 0;
1477}
1478
1479/**
1480 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1481 * @version: Pointer where the QSEECOM version will be stored.
1482 *
1483 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1484 * the TrustZone.
1485 *
1486 * Return: Zero on success, nonzero on failure.
1487 */
1488static int qcom_scm_qseecom_get_version(u32 *version)
1489{
1490	struct qcom_scm_desc desc = {};
1491	struct qcom_scm_qseecom_resp res = {};
1492	u32 feature = 10;
1493	int ret;
1494
1495	desc.owner = QSEECOM_TZ_OWNER_SIP;
1496	desc.svc = QSEECOM_TZ_SVC_INFO;
1497	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1498	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1499	desc.args[0] = feature;
1500
1501	ret = qcom_scm_qseecom_call(&desc, &res);
1502	if (ret)
1503		return ret;
1504
1505	*version = res.result;
1506	return 0;
1507}
1508
1509/**
1510 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1511 * @app_name: The name of the app.
1512 * @app_id:   The returned app ID.
1513 *
1514 * Query and return the application ID of the SEE app identified by the given
1515 * name. This returned ID is the unique identifier of the app required for
1516 * subsequent communication.
1517 *
1518 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1519 * loaded or could not be found.
1520 */
1521int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1522{
1523	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1524	unsigned long app_name_len = strlen(app_name);
1525	struct qcom_scm_desc desc = {};
1526	struct qcom_scm_qseecom_resp res = {};
1527	dma_addr_t name_buf_phys;
1528	char *name_buf;
1529	int status;
1530
1531	if (app_name_len >= name_buf_size)
1532		return -EINVAL;
1533
1534	name_buf = kzalloc(name_buf_size, GFP_KERNEL);
 
 
1535	if (!name_buf)
1536		return -ENOMEM;
1537
1538	memcpy(name_buf, app_name, app_name_len);
1539
1540	name_buf_phys = dma_map_single(__scm->dev, name_buf, name_buf_size, DMA_TO_DEVICE);
1541	status = dma_mapping_error(__scm->dev, name_buf_phys);
1542	if (status) {
1543		kfree(name_buf);
1544		dev_err(__scm->dev, "qseecom: failed to map dma address\n");
1545		return status;
1546	}
1547
1548	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1549	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1550	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1551	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1552	desc.args[0] = name_buf_phys;
1553	desc.args[1] = app_name_len;
1554
1555	status = qcom_scm_qseecom_call(&desc, &res);
1556	dma_unmap_single(__scm->dev, name_buf_phys, name_buf_size, DMA_TO_DEVICE);
1557	kfree(name_buf);
1558
1559	if (status)
1560		return status;
1561
1562	if (res.result == QSEECOM_RESULT_FAILURE)
1563		return -ENOENT;
1564
1565	if (res.result != QSEECOM_RESULT_SUCCESS)
1566		return -EINVAL;
1567
1568	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1569		return -EINVAL;
1570
1571	*app_id = res.data;
1572	return 0;
1573}
1574EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1575
1576/**
1577 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1578 * @app_id:   The ID of the target app.
1579 * @req:      Request buffer sent to the app (must be DMA-mappable).
1580 * @req_size: Size of the request buffer.
1581 * @rsp:      Response buffer, written to by the app (must be DMA-mappable).
1582 * @rsp_size: Size of the response buffer.
1583 *
1584 * Sends a request to the QSEE app associated with the given ID and read back
1585 * its response. The caller must provide two DMA memory regions, one for the
1586 * request and one for the response, and fill out the @req region with the
1587 * respective (app-specific) request data. The QSEE app reads this and returns
1588 * its response in the @rsp region.
1589 *
1590 * Return: Zero on success, nonzero on failure.
1591 */
1592int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
1593			      size_t rsp_size)
1594{
1595	struct qcom_scm_qseecom_resp res = {};
1596	struct qcom_scm_desc desc = {};
1597	dma_addr_t req_phys;
1598	dma_addr_t rsp_phys;
1599	int status;
1600
1601	/* Map request buffer */
1602	req_phys = dma_map_single(__scm->dev, req, req_size, DMA_TO_DEVICE);
1603	status = dma_mapping_error(__scm->dev, req_phys);
1604	if (status) {
1605		dev_err(__scm->dev, "qseecom: failed to map request buffer\n");
1606		return status;
1607	}
1608
1609	/* Map response buffer */
1610	rsp_phys = dma_map_single(__scm->dev, rsp, rsp_size, DMA_FROM_DEVICE);
1611	status = dma_mapping_error(__scm->dev, rsp_phys);
1612	if (status) {
1613		dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1614		dev_err(__scm->dev, "qseecom: failed to map response buffer\n");
1615		return status;
1616	}
1617
1618	/* Set up SCM call data */
1619	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1620	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1621	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1622	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1623				     QCOM_SCM_RW, QCOM_SCM_VAL,
1624				     QCOM_SCM_RW, QCOM_SCM_VAL);
1625	desc.args[0] = app_id;
1626	desc.args[1] = req_phys;
1627	desc.args[2] = req_size;
1628	desc.args[3] = rsp_phys;
1629	desc.args[4] = rsp_size;
1630
1631	/* Perform call */
1632	status = qcom_scm_qseecom_call(&desc, &res);
1633
1634	/* Unmap buffers */
1635	dma_unmap_single(__scm->dev, rsp_phys, rsp_size, DMA_FROM_DEVICE);
1636	dma_unmap_single(__scm->dev, req_phys, req_size, DMA_TO_DEVICE);
1637
1638	if (status)
1639		return status;
1640
1641	if (res.result != QSEECOM_RESULT_SUCCESS)
1642		return -EIO;
1643
1644	return 0;
1645}
1646EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1647
1648/*
1649 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1650 + any potential issues with this, only allow validated machines for now.
1651 */
1652static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
 
 
 
1653	{ .compatible = "lenovo,thinkpad-x13s", },
 
 
 
 
 
 
 
 
1654	{ }
1655};
1656
1657static bool qcom_scm_qseecom_machine_is_allowed(void)
1658{
1659	struct device_node *np;
1660	bool match;
1661
1662	np = of_find_node_by_path("/");
1663	if (!np)
1664		return false;
1665
1666	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1667	of_node_put(np);
1668
1669	return match;
1670}
1671
1672static void qcom_scm_qseecom_free(void *data)
1673{
1674	struct platform_device *qseecom_dev = data;
1675
1676	platform_device_del(qseecom_dev);
1677	platform_device_put(qseecom_dev);
1678}
1679
1680static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1681{
1682	struct platform_device *qseecom_dev;
1683	u32 version;
1684	int ret;
1685
1686	/*
1687	 * Note: We do two steps of validation here: First, we try to query the
1688	 * QSEECOM version as a check to see if the interface exists on this
1689	 * device. Second, we check against known good devices due to current
1690	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1691	 *
1692	 * Note that we deliberately do the machine check after the version
1693	 * check so that we can log potentially supported devices. This should
1694	 * be safe as downstream sources indicate that the version query is
1695	 * neither blocking nor reentrant.
1696	 */
1697	ret = qcom_scm_qseecom_get_version(&version);
1698	if (ret)
1699		return 0;
1700
1701	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1702
1703	if (!qcom_scm_qseecom_machine_is_allowed()) {
1704		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1705		return 0;
1706	}
1707
1708	/*
1709	 * Set up QSEECOM interface device. All application clients will be
1710	 * set up and managed by the corresponding driver for it.
1711	 */
1712	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1713	if (!qseecom_dev)
1714		return -ENOMEM;
1715
1716	qseecom_dev->dev.parent = scm->dev;
1717
1718	ret = platform_device_add(qseecom_dev);
1719	if (ret) {
1720		platform_device_put(qseecom_dev);
1721		return ret;
1722	}
1723
1724	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1725}
1726
1727#else /* CONFIG_QCOM_QSEECOM */
1728
1729static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1730{
1731	return 0;
1732}
1733
1734#endif /* CONFIG_QCOM_QSEECOM */
1735
1736/**
1737 * qcom_scm_is_available() - Checks if SCM is available
1738 */
1739bool qcom_scm_is_available(void)
1740{
1741	return !!__scm;
 
1742}
1743EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1744
1745static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1746{
1747	/* FW currently only supports a single wq_ctx (zero).
1748	 * TODO: Update this logic to include dynamic allocation and lookup of
1749	 * completion structs when FW supports more wq_ctx values.
1750	 */
1751	if (wq_ctx != 0) {
1752		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1753		return -EINVAL;
1754	}
1755
1756	return 0;
1757}
1758
1759int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1760{
1761	int ret;
1762
1763	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1764	if (ret)
1765		return ret;
1766
1767	wait_for_completion(&__scm->waitq_comp);
1768
1769	return 0;
1770}
1771
1772static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx)
1773{
1774	int ret;
1775
1776	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1777	if (ret)
1778		return ret;
1779
1780	complete(&__scm->waitq_comp);
1781
1782	return 0;
1783}
1784
1785static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1786{
1787	int ret;
1788	struct qcom_scm *scm = data;
1789	u32 wq_ctx, flags, more_pending = 0;
1790
1791	do {
1792		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1793		if (ret) {
1794			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1795			goto out;
1796		}
1797
1798		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE &&
1799		    flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) {
1800			dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags);
1801			goto out;
1802		}
1803
1804		ret = qcom_scm_waitq_wakeup(scm, wq_ctx);
1805		if (ret)
1806			goto out;
1807	} while (more_pending);
1808
1809out:
1810	return IRQ_HANDLED;
1811}
1812
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1813static int qcom_scm_probe(struct platform_device *pdev)
1814{
 
1815	struct qcom_scm *scm;
1816	int irq, ret;
1817
1818	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1819	if (!scm)
1820		return -ENOMEM;
1821
 
1822	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1823	if (ret < 0)
1824		return ret;
1825
 
1826	mutex_init(&scm->scm_bw_lock);
1827
1828	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1829	if (IS_ERR(scm->path))
1830		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1831				     "failed to acquire interconnect path\n");
1832
1833	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
1834	if (IS_ERR(scm->core_clk))
1835		return PTR_ERR(scm->core_clk);
1836
1837	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
1838	if (IS_ERR(scm->iface_clk))
1839		return PTR_ERR(scm->iface_clk);
1840
1841	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
1842	if (IS_ERR(scm->bus_clk))
1843		return PTR_ERR(scm->bus_clk);
1844
1845	scm->reset.ops = &qcom_scm_pas_reset_ops;
1846	scm->reset.nr_resets = 1;
1847	scm->reset.of_node = pdev->dev.of_node;
1848	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1849	if (ret)
1850		return ret;
1851
1852	/* vote for max clk rate for highest performance */
1853	ret = clk_set_rate(scm->core_clk, INT_MAX);
1854	if (ret)
1855		return ret;
1856
1857	__scm = scm;
1858	__scm->dev = &pdev->dev;
1859
1860	init_completion(&__scm->waitq_comp);
1861
1862	irq = platform_get_irq_optional(pdev, 0);
1863	if (irq < 0) {
1864		if (irq != -ENXIO)
1865			return irq;
 
 
1866	} else {
1867		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
1868						IRQF_ONESHOT, "qcom-scm", __scm);
1869		if (ret < 0)
1870			return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
 
 
1871	}
1872
1873	__get_convention();
1874
1875	/*
1876	 * If requested enable "download mode", from this point on warmboot
1877	 * will cause the boot stages to enter download mode, unless
1878	 * disabled below by a clean shutdown/reboot.
1879	 */
1880	if (download_mode)
1881		qcom_scm_set_download_mode(true);
1882
1883
1884	/*
1885	 * Disable SDI if indicated by DT that it is enabled by default.
1886	 */
1887	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled"))
1888		qcom_scm_disable_sdi();
1889
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890	/*
1891	 * Initialize the QSEECOM interface.
1892	 *
1893	 * Note: QSEECOM is fairly self-contained and this only adds the
1894	 * interface device (the driver of which does most of the heavy
1895	 * lifting). So any errors returned here should be either -ENOMEM or
1896	 * -EINVAL (with the latter only in case there's a bug in our code).
1897	 * This means that there is no need to bring down the whole SCM driver.
1898	 * Just log the error instead and let SCM live.
1899	 */
1900	ret = qcom_scm_qseecom_init(scm);
1901	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
1902
1903	return 0;
 
 
 
 
 
 
1904}
1905
1906static void qcom_scm_shutdown(struct platform_device *pdev)
1907{
1908	/* Clean shutdown, disable download mode to allow normal restart */
1909	qcom_scm_set_download_mode(false);
1910}
1911
1912static const struct of_device_id qcom_scm_dt_match[] = {
1913	{ .compatible = "qcom,scm" },
1914
1915	/* Legacy entries kept for backwards compatibility */
1916	{ .compatible = "qcom,scm-apq8064" },
1917	{ .compatible = "qcom,scm-apq8084" },
1918	{ .compatible = "qcom,scm-ipq4019" },
1919	{ .compatible = "qcom,scm-msm8953" },
1920	{ .compatible = "qcom,scm-msm8974" },
1921	{ .compatible = "qcom,scm-msm8996" },
1922	{}
1923};
1924MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1925
1926static struct platform_driver qcom_scm_driver = {
1927	.driver = {
1928		.name	= "qcom_scm",
1929		.of_match_table = qcom_scm_dt_match,
1930		.suppress_bind_attrs = true,
1931	},
1932	.probe = qcom_scm_probe,
1933	.shutdown = qcom_scm_shutdown,
1934};
1935
1936static int __init qcom_scm_init(void)
1937{
1938	return platform_driver_register(&qcom_scm_driver);
1939}
1940subsys_initcall(qcom_scm_init);
1941
1942MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1943MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5
   6#include <linux/arm-smccc.h>
   7#include <linux/bitfield.h>
   8#include <linux/bits.h>
   9#include <linux/cleanup.h>
  10#include <linux/clk.h>
  11#include <linux/completion.h>
  12#include <linux/cpumask.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/export.h>
  16#include <linux/firmware/qcom/qcom_scm.h>
  17#include <linux/firmware/qcom/qcom_tzmem.h>
  18#include <linux/init.h>
  19#include <linux/interconnect.h>
  20#include <linux/interrupt.h>
  21#include <linux/kstrtox.h>
  22#include <linux/module.h>
  23#include <linux/of.h>
  24#include <linux/of_address.h>
  25#include <linux/of_irq.h>
  26#include <linux/of_platform.h>
  27#include <linux/of_reserved_mem.h>
  28#include <linux/platform_device.h>
  29#include <linux/reset-controller.h>
  30#include <linux/sizes.h>
  31#include <linux/types.h>
  32
  33#include "qcom_scm.h"
  34#include "qcom_tzmem.h"
  35
  36static u32 download_mode;
 
  37
  38struct qcom_scm {
  39	struct device *dev;
  40	struct clk *core_clk;
  41	struct clk *iface_clk;
  42	struct clk *bus_clk;
  43	struct icc_path *path;
  44	struct completion waitq_comp;
  45	struct reset_controller_dev reset;
  46
  47	/* control access to the interconnect path */
  48	struct mutex scm_bw_lock;
  49	int scm_vote_count;
  50
  51	u64 dload_mode_addr;
  52
  53	struct qcom_tzmem_pool *mempool;
  54};
  55
  56struct qcom_scm_current_perm_info {
  57	__le32 vmid;
  58	__le32 perm;
  59	__le64 ctx;
  60	__le32 ctx_size;
  61	__le32 unused;
  62};
  63
  64struct qcom_scm_mem_map_info {
  65	__le64 mem_addr;
  66	__le64 mem_size;
  67};
  68
  69/**
  70 * struct qcom_scm_qseecom_resp - QSEECOM SCM call response.
  71 * @result:    Result or status of the SCM call. See &enum qcom_scm_qseecom_result.
  72 * @resp_type: Type of the response. See &enum qcom_scm_qseecom_resp_type.
  73 * @data:      Response data. The type of this data is given in @resp_type.
  74 */
  75struct qcom_scm_qseecom_resp {
  76	u64 result;
  77	u64 resp_type;
  78	u64 data;
  79};
  80
  81enum qcom_scm_qseecom_result {
  82	QSEECOM_RESULT_SUCCESS			= 0,
  83	QSEECOM_RESULT_INCOMPLETE		= 1,
  84	QSEECOM_RESULT_BLOCKED_ON_LISTENER	= 2,
  85	QSEECOM_RESULT_FAILURE			= 0xFFFFFFFF,
  86};
  87
  88enum qcom_scm_qseecom_resp_type {
  89	QSEECOM_SCM_RES_APP_ID			= 0xEE01,
  90	QSEECOM_SCM_RES_QSEOS_LISTENER_ID	= 0xEE02,
  91};
  92
  93enum qcom_scm_qseecom_tz_owner {
  94	QSEECOM_TZ_OWNER_SIP			= 2,
  95	QSEECOM_TZ_OWNER_TZ_APPS		= 48,
  96	QSEECOM_TZ_OWNER_QSEE_OS		= 50
  97};
  98
  99enum qcom_scm_qseecom_tz_svc {
 100	QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER	= 0,
 101	QSEECOM_TZ_SVC_APP_MGR			= 1,
 102	QSEECOM_TZ_SVC_INFO			= 6,
 103};
 104
 105enum qcom_scm_qseecom_tz_cmd_app {
 106	QSEECOM_TZ_CMD_APP_SEND			= 1,
 107	QSEECOM_TZ_CMD_APP_LOOKUP		= 3,
 108};
 109
 110enum qcom_scm_qseecom_tz_cmd_info {
 111	QSEECOM_TZ_CMD_INFO_VERSION		= 3,
 112};
 113
 114#define QSEECOM_MAX_APP_NAME_SIZE		64
 115#define SHMBRIDGE_RESULT_NOTSUPP		4
 116
 117/* Each bit configures cold/warm boot address for one of the 4 CPUs */
 118static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 119	0, BIT(0), BIT(3), BIT(5)
 120};
 121static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
 122	BIT(2), BIT(1), BIT(4), BIT(6)
 123};
 124
 125#define QCOM_SMC_WAITQ_FLAG_WAKE_ONE	BIT(0)
 126
 127#define QCOM_DLOAD_MASK		GENMASK(5, 4)
 128#define QCOM_DLOAD_NODUMP	0
 129#define QCOM_DLOAD_FULLDUMP	1
 130#define QCOM_DLOAD_MINIDUMP	2
 131#define QCOM_DLOAD_BOTHDUMP	3
 132
 133static const char * const qcom_scm_convention_names[] = {
 134	[SMC_CONVENTION_UNKNOWN] = "unknown",
 135	[SMC_CONVENTION_ARM_32] = "smc arm 32",
 136	[SMC_CONVENTION_ARM_64] = "smc arm 64",
 137	[SMC_CONVENTION_LEGACY] = "smc legacy",
 138};
 139
 140static const char * const download_mode_name[] = {
 141	[QCOM_DLOAD_NODUMP]	= "off",
 142	[QCOM_DLOAD_FULLDUMP]	= "full",
 143	[QCOM_DLOAD_MINIDUMP]	= "mini",
 144	[QCOM_DLOAD_BOTHDUMP]	= "full,mini",
 145};
 146
 147static struct qcom_scm *__scm;
 148
 149static int qcom_scm_clk_enable(void)
 150{
 151	int ret;
 152
 153	ret = clk_prepare_enable(__scm->core_clk);
 154	if (ret)
 155		goto bail;
 156
 157	ret = clk_prepare_enable(__scm->iface_clk);
 158	if (ret)
 159		goto disable_core;
 160
 161	ret = clk_prepare_enable(__scm->bus_clk);
 162	if (ret)
 163		goto disable_iface;
 164
 165	return 0;
 166
 167disable_iface:
 168	clk_disable_unprepare(__scm->iface_clk);
 169disable_core:
 170	clk_disable_unprepare(__scm->core_clk);
 171bail:
 172	return ret;
 173}
 174
 175static void qcom_scm_clk_disable(void)
 176{
 177	clk_disable_unprepare(__scm->core_clk);
 178	clk_disable_unprepare(__scm->iface_clk);
 179	clk_disable_unprepare(__scm->bus_clk);
 180}
 181
 182static int qcom_scm_bw_enable(void)
 183{
 184	int ret = 0;
 185
 186	if (!__scm->path)
 187		return 0;
 188
 
 
 
 189	mutex_lock(&__scm->scm_bw_lock);
 190	if (!__scm->scm_vote_count) {
 191		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
 192		if (ret < 0) {
 193			dev_err(__scm->dev, "failed to set bandwidth request\n");
 194			goto err_bw;
 195		}
 196	}
 197	__scm->scm_vote_count++;
 198err_bw:
 199	mutex_unlock(&__scm->scm_bw_lock);
 200
 201	return ret;
 202}
 203
 204static void qcom_scm_bw_disable(void)
 205{
 206	if (!__scm->path)
 207		return;
 208
 209	mutex_lock(&__scm->scm_bw_lock);
 210	if (__scm->scm_vote_count-- == 1)
 211		icc_set_bw(__scm->path, 0, 0);
 212	mutex_unlock(&__scm->scm_bw_lock);
 213}
 214
 215enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 216static DEFINE_SPINLOCK(scm_query_lock);
 217
 218struct qcom_tzmem_pool *qcom_scm_get_tzmem_pool(void)
 219{
 220	if (!qcom_scm_is_available())
 221		return NULL;
 222
 223	return __scm->mempool;
 224}
 225
 226static enum qcom_scm_convention __get_convention(void)
 227{
 228	unsigned long flags;
 229	struct qcom_scm_desc desc = {
 230		.svc = QCOM_SCM_SVC_INFO,
 231		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 232		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 233					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 234			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 235		.arginfo = QCOM_SCM_ARGS(1),
 236		.owner = ARM_SMCCC_OWNER_SIP,
 237	};
 238	struct qcom_scm_res res;
 239	enum qcom_scm_convention probed_convention;
 240	int ret;
 241	bool forced = false;
 242
 243	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 244		return qcom_scm_convention;
 245
 246	/*
 247	 * Per the "SMC calling convention specification", the 64-bit calling
 248	 * convention can only be used when the client is 64-bit, otherwise
 249	 * system will encounter the undefined behaviour.
 250	 */
 251#if IS_ENABLED(CONFIG_ARM64)
 252	/*
 253	 * Device isn't required as there is only one argument - no device
 254	 * needed to dma_map_single to secure world
 255	 */
 256	probed_convention = SMC_CONVENTION_ARM_64;
 257	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 258	if (!ret && res.result[0] == 1)
 259		goto found;
 260
 261	/*
 262	 * Some SC7180 firmwares didn't implement the
 263	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 264	 * calling conventions on these firmwares. Luckily we don't make any
 265	 * early calls into the firmware on these SoCs so the device pointer
 266	 * will be valid here to check if the compatible matches.
 267	 */
 268	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 269		forced = true;
 270		goto found;
 271	}
 272#endif
 273
 274	probed_convention = SMC_CONVENTION_ARM_32;
 275	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 276	if (!ret && res.result[0] == 1)
 277		goto found;
 278
 279	probed_convention = SMC_CONVENTION_LEGACY;
 280found:
 281	spin_lock_irqsave(&scm_query_lock, flags);
 282	if (probed_convention != qcom_scm_convention) {
 283		qcom_scm_convention = probed_convention;
 284		pr_info("qcom_scm: convention: %s%s\n",
 285			qcom_scm_convention_names[qcom_scm_convention],
 286			forced ? " (forced)" : "");
 287	}
 288	spin_unlock_irqrestore(&scm_query_lock, flags);
 289
 290	return qcom_scm_convention;
 291}
 292
 293/**
 294 * qcom_scm_call() - Invoke a syscall in the secure world
 295 * @dev:	device
 296 * @desc:	Descriptor structure containing arguments and return values
 297 * @res:        Structure containing results from SMC/HVC call
 298 *
 299 * Sends a command to the SCM and waits for the command to finish processing.
 300 * This should *only* be called in pre-emptible context.
 301 */
 302static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 303			 struct qcom_scm_res *res)
 304{
 305	might_sleep();
 306	switch (__get_convention()) {
 307	case SMC_CONVENTION_ARM_32:
 308	case SMC_CONVENTION_ARM_64:
 309		return scm_smc_call(dev, desc, res, false);
 310	case SMC_CONVENTION_LEGACY:
 311		return scm_legacy_call(dev, desc, res);
 312	default:
 313		pr_err("Unknown current SCM calling convention.\n");
 314		return -EINVAL;
 315	}
 316}
 317
 318/**
 319 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 320 * @dev:	device
 321 * @desc:	Descriptor structure containing arguments and return values
 322 * @res:	Structure containing results from SMC/HVC call
 323 *
 324 * Sends a command to the SCM and waits for the command to finish processing.
 325 * This can be called in atomic context.
 326 */
 327static int qcom_scm_call_atomic(struct device *dev,
 328				const struct qcom_scm_desc *desc,
 329				struct qcom_scm_res *res)
 330{
 331	switch (__get_convention()) {
 332	case SMC_CONVENTION_ARM_32:
 333	case SMC_CONVENTION_ARM_64:
 334		return scm_smc_call(dev, desc, res, true);
 335	case SMC_CONVENTION_LEGACY:
 336		return scm_legacy_call_atomic(dev, desc, res);
 337	default:
 338		pr_err("Unknown current SCM calling convention.\n");
 339		return -EINVAL;
 340	}
 341}
 342
 343static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 344					 u32 cmd_id)
 345{
 346	int ret;
 347	struct qcom_scm_desc desc = {
 348		.svc = QCOM_SCM_SVC_INFO,
 349		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 350		.owner = ARM_SMCCC_OWNER_SIP,
 351	};
 352	struct qcom_scm_res res;
 353
 354	desc.arginfo = QCOM_SCM_ARGS(1);
 355	switch (__get_convention()) {
 356	case SMC_CONVENTION_ARM_32:
 357	case SMC_CONVENTION_ARM_64:
 358		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 359				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 360		break;
 361	case SMC_CONVENTION_LEGACY:
 362		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 363		break;
 364	default:
 365		pr_err("Unknown SMC convention being used\n");
 366		return false;
 367	}
 368
 369	ret = qcom_scm_call(dev, &desc, &res);
 370
 371	return ret ? false : !!res.result[0];
 372}
 373
 374static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
 375{
 376	int cpu;
 377	unsigned int flags = 0;
 378	struct qcom_scm_desc desc = {
 379		.svc = QCOM_SCM_SVC_BOOT,
 380		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 381		.arginfo = QCOM_SCM_ARGS(2),
 382		.owner = ARM_SMCCC_OWNER_SIP,
 383	};
 384
 385	for_each_present_cpu(cpu) {
 386		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
 387			return -EINVAL;
 388		flags |= cpu_bits[cpu];
 389	}
 390
 391	desc.args[0] = flags;
 392	desc.args[1] = virt_to_phys(entry);
 393
 394	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 395}
 396
 397static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
 398{
 399	struct qcom_scm_desc desc = {
 400		.svc = QCOM_SCM_SVC_BOOT,
 401		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
 402		.owner = ARM_SMCCC_OWNER_SIP,
 403		.arginfo = QCOM_SCM_ARGS(6),
 404		.args = {
 405			virt_to_phys(entry),
 406			/* Apply to all CPUs in all affinity levels */
 407			~0ULL, ~0ULL, ~0ULL, ~0ULL,
 408			flags,
 409		},
 410	};
 411
 412	/* Need a device for DMA of the additional arguments */
 413	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
 414		return -EOPNOTSUPP;
 415
 416	return qcom_scm_call(__scm->dev, &desc, NULL);
 417}
 418
 419/**
 420 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
 421 * @entry: Entry point function for the cpus
 422 *
 423 * Set the Linux entry point for the SCM to transfer control to when coming
 424 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 425 */
 426int qcom_scm_set_warm_boot_addr(void *entry)
 427{
 428	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
 429		/* Fallback to old SCM call */
 430		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
 431	return 0;
 432}
 433EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr);
 434
 435/**
 436 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
 437 * @entry: Entry point function for the cpus
 438 */
 439int qcom_scm_set_cold_boot_addr(void *entry)
 440{
 441	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
 442		/* Fallback to old SCM call */
 443		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
 444	return 0;
 445}
 446EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr);
 447
 448/**
 449 * qcom_scm_cpu_power_down() - Power down the cpu
 450 * @flags:	Flags to flush cache
 451 *
 452 * This is an end point to power down cpu. If there was a pending interrupt,
 453 * the control would return from this function, otherwise, the cpu jumps to the
 454 * warm boot entry point set for this cpu upon reset.
 455 */
 456void qcom_scm_cpu_power_down(u32 flags)
 457{
 458	struct qcom_scm_desc desc = {
 459		.svc = QCOM_SCM_SVC_BOOT,
 460		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 461		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 462		.arginfo = QCOM_SCM_ARGS(1),
 463		.owner = ARM_SMCCC_OWNER_SIP,
 464	};
 465
 466	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 467}
 468EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down);
 469
 470int qcom_scm_set_remote_state(u32 state, u32 id)
 471{
 472	struct qcom_scm_desc desc = {
 473		.svc = QCOM_SCM_SVC_BOOT,
 474		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 475		.arginfo = QCOM_SCM_ARGS(2),
 476		.args[0] = state,
 477		.args[1] = id,
 478		.owner = ARM_SMCCC_OWNER_SIP,
 479	};
 480	struct qcom_scm_res res;
 481	int ret;
 482
 483	ret = qcom_scm_call(__scm->dev, &desc, &res);
 484
 485	return ret ? : res.result[0];
 486}
 487EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state);
 488
 489static int qcom_scm_disable_sdi(void)
 490{
 491	int ret;
 492	struct qcom_scm_desc desc = {
 493		.svc = QCOM_SCM_SVC_BOOT,
 494		.cmd = QCOM_SCM_BOOT_SDI_CONFIG,
 495		.args[0] = 1, /* Disable watchdog debug */
 496		.args[1] = 0, /* Disable SDI */
 497		.arginfo = QCOM_SCM_ARGS(2),
 498		.owner = ARM_SMCCC_OWNER_SIP,
 499	};
 500	struct qcom_scm_res res;
 501
 502	ret = qcom_scm_clk_enable();
 503	if (ret)
 504		return ret;
 505	ret = qcom_scm_call(__scm->dev, &desc, &res);
 506
 507	qcom_scm_clk_disable();
 508
 509	return ret ? : res.result[0];
 510}
 511
 512static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 513{
 514	struct qcom_scm_desc desc = {
 515		.svc = QCOM_SCM_SVC_BOOT,
 516		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 517		.arginfo = QCOM_SCM_ARGS(2),
 518		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 519		.owner = ARM_SMCCC_OWNER_SIP,
 520	};
 521
 522	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 523
 524	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 525}
 526
 527static int qcom_scm_io_rmw(phys_addr_t addr, unsigned int mask, unsigned int val)
 528{
 529	unsigned int old;
 530	unsigned int new;
 531	int ret;
 532
 533	ret = qcom_scm_io_readl(addr, &old);
 534	if (ret)
 535		return ret;
 536
 537	new = (old & ~mask) | (val & mask);
 538
 539	return qcom_scm_io_writel(addr, new);
 540}
 541
 542static void qcom_scm_set_download_mode(u32 dload_mode)
 543{
 
 544	int ret = 0;
 545
 546	if (__scm->dload_mode_addr) {
 547		ret = qcom_scm_io_rmw(__scm->dload_mode_addr, QCOM_DLOAD_MASK,
 548				      FIELD_PREP(QCOM_DLOAD_MASK, dload_mode));
 549	} else if (__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_BOOT,
 550						QCOM_SCM_BOOT_SET_DLOAD_MODE)) {
 551		ret = __qcom_scm_set_dload_mode(__scm->dev, !!dload_mode);
 552	} else if (dload_mode) {
 
 
 553		dev_err(__scm->dev,
 554			"No available mechanism for setting download mode\n");
 555	}
 556
 557	if (ret)
 558		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 559}
 560
 561/**
 562 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 563 *			       state machine for a given peripheral, using the
 564 *			       metadata
 565 * @peripheral: peripheral id
 566 * @metadata:	pointer to memory containing ELF header, program header table
 567 *		and optional blob of data used for authenticating the metadata
 568 *		and the rest of the firmware
 569 * @size:	size of the metadata
 570 * @ctx:	optional metadata context
 571 *
 572 * Return: 0 on success.
 573 *
 574 * Upon successful return, the PAS metadata context (@ctx) will be used to
 575 * track the metadata allocation, this needs to be released by invoking
 576 * qcom_scm_pas_metadata_release() by the caller.
 577 */
 578int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
 579			    struct qcom_scm_pas_metadata *ctx)
 580{
 581	dma_addr_t mdata_phys;
 582	void *mdata_buf;
 583	int ret;
 584	struct qcom_scm_desc desc = {
 585		.svc = QCOM_SCM_SVC_PIL,
 586		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 587		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 588		.args[0] = peripheral,
 589		.owner = ARM_SMCCC_OWNER_SIP,
 590	};
 591	struct qcom_scm_res res;
 592
 593	/*
 594	 * During the scm call memory protection will be enabled for the meta
 595	 * data blob, so make sure it's physically contiguous, 4K aligned and
 596	 * non-cachable to avoid XPU violations.
 597	 *
 598	 * For PIL calls the hypervisor creates SHM Bridges for the blob
 599	 * buffers on behalf of Linux so we must not do it ourselves hence
 600	 * not using the TZMem allocator here.
 601	 *
 602	 * If we pass a buffer that is already part of an SHM Bridge to this
 603	 * call, it will fail.
 604	 */
 605	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 606				       GFP_KERNEL);
 607	if (!mdata_buf)
 
 608		return -ENOMEM;
 609
 610	memcpy(mdata_buf, metadata, size);
 611
 612	ret = qcom_scm_clk_enable();
 613	if (ret)
 614		goto out;
 615
 616	ret = qcom_scm_bw_enable();
 617	if (ret)
 618		goto disable_clk;
 619
 620	desc.args[1] = mdata_phys;
 621
 622	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
 623	qcom_scm_bw_disable();
 624
 625disable_clk:
 626	qcom_scm_clk_disable();
 627
 628out:
 629	if (ret < 0 || !ctx) {
 630		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 631	} else if (ctx) {
 632		ctx->ptr = mdata_buf;
 633		ctx->phys = mdata_phys;
 634		ctx->size = size;
 635	}
 636
 637	return ret ? : res.result[0];
 638}
 639EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image);
 640
 641/**
 642 * qcom_scm_pas_metadata_release() - release metadata context
 643 * @ctx:	metadata context
 644 */
 645void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
 646{
 647	if (!ctx->ptr)
 648		return;
 649
 650	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
 651
 652	ctx->ptr = NULL;
 653	ctx->phys = 0;
 654	ctx->size = 0;
 655}
 656EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release);
 657
 658/**
 659 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 660 *			      for firmware loading
 661 * @peripheral:	peripheral id
 662 * @addr:	start address of memory area to prepare
 663 * @size:	size of the memory area to prepare
 664 *
 665 * Returns 0 on success.
 666 */
 667int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 668{
 669	int ret;
 670	struct qcom_scm_desc desc = {
 671		.svc = QCOM_SCM_SVC_PIL,
 672		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 673		.arginfo = QCOM_SCM_ARGS(3),
 674		.args[0] = peripheral,
 675		.args[1] = addr,
 676		.args[2] = size,
 677		.owner = ARM_SMCCC_OWNER_SIP,
 678	};
 679	struct qcom_scm_res res;
 680
 681	ret = qcom_scm_clk_enable();
 682	if (ret)
 683		return ret;
 684
 685	ret = qcom_scm_bw_enable();
 686	if (ret)
 687		goto disable_clk;
 688
 689	ret = qcom_scm_call(__scm->dev, &desc, &res);
 690	qcom_scm_bw_disable();
 691
 692disable_clk:
 693	qcom_scm_clk_disable();
 694
 695	return ret ? : res.result[0];
 696}
 697EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup);
 698
 699/**
 700 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 701 *				   and reset the remote processor
 702 * @peripheral:	peripheral id
 703 *
 704 * Return 0 on success.
 705 */
 706int qcom_scm_pas_auth_and_reset(u32 peripheral)
 707{
 708	int ret;
 709	struct qcom_scm_desc desc = {
 710		.svc = QCOM_SCM_SVC_PIL,
 711		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 712		.arginfo = QCOM_SCM_ARGS(1),
 713		.args[0] = peripheral,
 714		.owner = ARM_SMCCC_OWNER_SIP,
 715	};
 716	struct qcom_scm_res res;
 717
 718	ret = qcom_scm_clk_enable();
 719	if (ret)
 720		return ret;
 721
 722	ret = qcom_scm_bw_enable();
 723	if (ret)
 724		goto disable_clk;
 725
 726	ret = qcom_scm_call(__scm->dev, &desc, &res);
 727	qcom_scm_bw_disable();
 728
 729disable_clk:
 730	qcom_scm_clk_disable();
 731
 732	return ret ? : res.result[0];
 733}
 734EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset);
 735
 736/**
 737 * qcom_scm_pas_shutdown() - Shut down the remote processor
 738 * @peripheral: peripheral id
 739 *
 740 * Returns 0 on success.
 741 */
 742int qcom_scm_pas_shutdown(u32 peripheral)
 743{
 744	int ret;
 745	struct qcom_scm_desc desc = {
 746		.svc = QCOM_SCM_SVC_PIL,
 747		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 748		.arginfo = QCOM_SCM_ARGS(1),
 749		.args[0] = peripheral,
 750		.owner = ARM_SMCCC_OWNER_SIP,
 751	};
 752	struct qcom_scm_res res;
 753
 754	ret = qcom_scm_clk_enable();
 755	if (ret)
 756		return ret;
 757
 758	ret = qcom_scm_bw_enable();
 759	if (ret)
 760		goto disable_clk;
 761
 762	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
 763	qcom_scm_bw_disable();
 764
 765disable_clk:
 766	qcom_scm_clk_disable();
 767
 768	return ret ? : res.result[0];
 769}
 770EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown);
 771
 772/**
 773 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 774 *			      available for the given peripherial
 775 * @peripheral:	peripheral id
 776 *
 777 * Returns true if PAS is supported for this peripheral, otherwise false.
 778 */
 779bool qcom_scm_pas_supported(u32 peripheral)
 780{
 781	int ret;
 782	struct qcom_scm_desc desc = {
 783		.svc = QCOM_SCM_SVC_PIL,
 784		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 785		.arginfo = QCOM_SCM_ARGS(1),
 786		.args[0] = peripheral,
 787		.owner = ARM_SMCCC_OWNER_SIP,
 788	};
 789	struct qcom_scm_res res;
 790
 791	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 792					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 793		return false;
 794
 795	ret = qcom_scm_call(__scm->dev, &desc, &res);
 796
 797	return ret ? false : !!res.result[0];
 798}
 799EXPORT_SYMBOL_GPL(qcom_scm_pas_supported);
 800
 801static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 802{
 803	struct qcom_scm_desc desc = {
 804		.svc = QCOM_SCM_SVC_PIL,
 805		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 806		.arginfo = QCOM_SCM_ARGS(2),
 807		.args[0] = reset,
 808		.args[1] = 0,
 809		.owner = ARM_SMCCC_OWNER_SIP,
 810	};
 811	struct qcom_scm_res res;
 812	int ret;
 813
 814	ret = qcom_scm_call(__scm->dev, &desc, &res);
 815
 816	return ret ? : res.result[0];
 817}
 818
 819static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 820				     unsigned long idx)
 821{
 822	if (idx != 0)
 823		return -EINVAL;
 824
 825	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 826}
 827
 828static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 829				       unsigned long idx)
 830{
 831	if (idx != 0)
 832		return -EINVAL;
 833
 834	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 835}
 836
 837static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 838	.assert = qcom_scm_pas_reset_assert,
 839	.deassert = qcom_scm_pas_reset_deassert,
 840};
 841
 842int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 843{
 844	struct qcom_scm_desc desc = {
 845		.svc = QCOM_SCM_SVC_IO,
 846		.cmd = QCOM_SCM_IO_READ,
 847		.arginfo = QCOM_SCM_ARGS(1),
 848		.args[0] = addr,
 849		.owner = ARM_SMCCC_OWNER_SIP,
 850	};
 851	struct qcom_scm_res res;
 852	int ret;
 853
 854
 855	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 856	if (ret >= 0)
 857		*val = res.result[0];
 858
 859	return ret < 0 ? ret : 0;
 860}
 861EXPORT_SYMBOL_GPL(qcom_scm_io_readl);
 862
 863int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 864{
 865	struct qcom_scm_desc desc = {
 866		.svc = QCOM_SCM_SVC_IO,
 867		.cmd = QCOM_SCM_IO_WRITE,
 868		.arginfo = QCOM_SCM_ARGS(2),
 869		.args[0] = addr,
 870		.args[1] = val,
 871		.owner = ARM_SMCCC_OWNER_SIP,
 872	};
 873
 874	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 875}
 876EXPORT_SYMBOL_GPL(qcom_scm_io_writel);
 877
 878/**
 879 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 880 * supports restore security config interface.
 881 *
 882 * Return true if restore-cfg interface is supported, false if not.
 883 */
 884bool qcom_scm_restore_sec_cfg_available(void)
 885{
 886	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 887					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 888}
 889EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available);
 890
 891int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 892{
 893	struct qcom_scm_desc desc = {
 894		.svc = QCOM_SCM_SVC_MP,
 895		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 896		.arginfo = QCOM_SCM_ARGS(2),
 897		.args[0] = device_id,
 898		.args[1] = spare,
 899		.owner = ARM_SMCCC_OWNER_SIP,
 900	};
 901	struct qcom_scm_res res;
 902	int ret;
 903
 904	ret = qcom_scm_call(__scm->dev, &desc, &res);
 905
 906	return ret ? : res.result[0];
 907}
 908EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg);
 909
 910#define QCOM_SCM_CP_APERTURE_CONTEXT_MASK	GENMASK(7, 0)
 911
 912bool qcom_scm_set_gpu_smmu_aperture_is_available(void)
 913{
 914	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 915					    QCOM_SCM_MP_CP_SMMU_APERTURE_ID);
 916}
 917EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture_is_available);
 918
 919int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank)
 920{
 921	struct qcom_scm_desc desc = {
 922		.svc = QCOM_SCM_SVC_MP,
 923		.cmd = QCOM_SCM_MP_CP_SMMU_APERTURE_ID,
 924		.arginfo = QCOM_SCM_ARGS(4),
 925		.args[0] = 0xffff0000 | FIELD_PREP(QCOM_SCM_CP_APERTURE_CONTEXT_MASK, context_bank),
 926		.args[1] = 0xffffffff,
 927		.args[2] = 0xffffffff,
 928		.args[3] = 0xffffffff,
 929		.owner = ARM_SMCCC_OWNER_SIP
 930	};
 931
 932	return qcom_scm_call(__scm->dev, &desc, NULL);
 933}
 934EXPORT_SYMBOL_GPL(qcom_scm_set_gpu_smmu_aperture);
 935
 936int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 937{
 938	struct qcom_scm_desc desc = {
 939		.svc = QCOM_SCM_SVC_MP,
 940		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 941		.arginfo = QCOM_SCM_ARGS(1),
 942		.args[0] = spare,
 943		.owner = ARM_SMCCC_OWNER_SIP,
 944	};
 945	struct qcom_scm_res res;
 946	int ret;
 947
 948	ret = qcom_scm_call(__scm->dev, &desc, &res);
 949
 950	if (size)
 951		*size = res.result[0];
 952
 953	return ret ? : res.result[1];
 954}
 955EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size);
 956
 957int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 958{
 959	struct qcom_scm_desc desc = {
 960		.svc = QCOM_SCM_SVC_MP,
 961		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 962		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 963					 QCOM_SCM_VAL),
 964		.args[0] = addr,
 965		.args[1] = size,
 966		.args[2] = spare,
 967		.owner = ARM_SMCCC_OWNER_SIP,
 968	};
 969	int ret;
 970
 971	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 972
 973	/* the pg table has been initialized already, ignore the error */
 974	if (ret == -EPERM)
 975		ret = 0;
 976
 977	return ret;
 978}
 979EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init);
 980
 981int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
 982{
 983	struct qcom_scm_desc desc = {
 984		.svc = QCOM_SCM_SVC_MP,
 985		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
 986		.arginfo = QCOM_SCM_ARGS(2),
 987		.args[0] = size,
 988		.args[1] = spare,
 989		.owner = ARM_SMCCC_OWNER_SIP,
 990	};
 991
 992	return qcom_scm_call(__scm->dev, &desc, NULL);
 993}
 994EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size);
 995
 996int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 997				   u32 cp_nonpixel_start,
 998				   u32 cp_nonpixel_size)
 999{
1000	int ret;
1001	struct qcom_scm_desc desc = {
1002		.svc = QCOM_SCM_SVC_MP,
1003		.cmd = QCOM_SCM_MP_VIDEO_VAR,
1004		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1005					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1006		.args[0] = cp_start,
1007		.args[1] = cp_size,
1008		.args[2] = cp_nonpixel_start,
1009		.args[3] = cp_nonpixel_size,
1010		.owner = ARM_SMCCC_OWNER_SIP,
1011	};
1012	struct qcom_scm_res res;
1013
1014	ret = qcom_scm_call(__scm->dev, &desc, &res);
1015
1016	return ret ? : res.result[0];
1017}
1018EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var);
1019
1020static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
1021				 size_t mem_sz, phys_addr_t src, size_t src_sz,
1022				 phys_addr_t dest, size_t dest_sz)
1023{
1024	int ret;
1025	struct qcom_scm_desc desc = {
1026		.svc = QCOM_SCM_SVC_MP,
1027		.cmd = QCOM_SCM_MP_ASSIGN,
1028		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
1029					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
1030					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1031		.args[0] = mem_region,
1032		.args[1] = mem_sz,
1033		.args[2] = src,
1034		.args[3] = src_sz,
1035		.args[4] = dest,
1036		.args[5] = dest_sz,
1037		.args[6] = 0,
1038		.owner = ARM_SMCCC_OWNER_SIP,
1039	};
1040	struct qcom_scm_res res;
1041
1042	ret = qcom_scm_call(dev, &desc, &res);
1043
1044	return ret ? : res.result[0];
1045}
1046
1047/**
1048 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
1049 * @mem_addr: mem region whose ownership need to be reassigned
1050 * @mem_sz:   size of the region.
1051 * @srcvm:    vmid for current set of owners, each set bit in
1052 *            flag indicate a unique owner
1053 * @newvm:    array having new owners and corresponding permission
1054 *            flags
1055 * @dest_cnt: number of owners in next set.
1056 *
1057 * Return negative errno on failure or 0 on success with @srcvm updated.
1058 */
1059int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1060			u64 *srcvm,
1061			const struct qcom_scm_vmperm *newvm,
1062			unsigned int dest_cnt)
1063{
1064	struct qcom_scm_current_perm_info *destvm;
1065	struct qcom_scm_mem_map_info *mem_to_map;
1066	phys_addr_t mem_to_map_phys;
1067	phys_addr_t dest_phys;
1068	phys_addr_t ptr_phys;
1069	size_t mem_to_map_sz;
1070	size_t dest_sz;
1071	size_t src_sz;
1072	size_t ptr_sz;
1073	int next_vm;
1074	__le32 *src;
 
1075	int ret, i, b;
1076	u64 srcvm_bits = *srcvm;
1077
1078	src_sz = hweight64(srcvm_bits) * sizeof(*src);
1079	mem_to_map_sz = sizeof(*mem_to_map);
1080	dest_sz = dest_cnt * sizeof(*destvm);
1081	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1082			ALIGN(dest_sz, SZ_64);
1083
1084	void *ptr __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1085							ptr_sz, GFP_KERNEL);
1086	if (!ptr)
1087		return -ENOMEM;
1088
1089	ptr_phys = qcom_tzmem_to_phys(ptr);
1090
1091	/* Fill source vmid detail */
1092	src = ptr;
1093	i = 0;
1094	for (b = 0; b < BITS_PER_TYPE(u64); b++) {
1095		if (srcvm_bits & BIT(b))
1096			src[i++] = cpu_to_le32(b);
1097	}
1098
1099	/* Fill details of mem buff to map */
1100	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
1101	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
1102	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
1103	mem_to_map->mem_size = cpu_to_le64(mem_sz);
1104
1105	next_vm = 0;
1106	/* Fill details of next vmid detail */
1107	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1108	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
1109	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
1110		destvm->vmid = cpu_to_le32(newvm->vmid);
1111		destvm->perm = cpu_to_le32(newvm->perm);
1112		destvm->ctx = 0;
1113		destvm->ctx_size = 0;
1114		next_vm |= BIT(newvm->vmid);
1115	}
1116
1117	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1118				    ptr_phys, src_sz, dest_phys, dest_sz);
 
1119	if (ret) {
1120		dev_err(__scm->dev,
1121			"Assign memory protection call failed %d\n", ret);
1122		return -EINVAL;
1123	}
1124
1125	*srcvm = next_vm;
1126	return 0;
1127}
1128EXPORT_SYMBOL_GPL(qcom_scm_assign_mem);
1129
1130/**
1131 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
1132 */
1133bool qcom_scm_ocmem_lock_available(void)
1134{
1135	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
1136					    QCOM_SCM_OCMEM_LOCK_CMD);
1137}
1138EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available);
1139
1140/**
1141 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
1142 * region to the specified initiator
1143 *
1144 * @id:     tz initiator id
1145 * @offset: OCMEM offset
1146 * @size:   OCMEM size
1147 * @mode:   access mode (WIDE/NARROW)
1148 */
1149int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
1150			u32 mode)
1151{
1152	struct qcom_scm_desc desc = {
1153		.svc = QCOM_SCM_SVC_OCMEM,
1154		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
1155		.args[0] = id,
1156		.args[1] = offset,
1157		.args[2] = size,
1158		.args[3] = mode,
1159		.arginfo = QCOM_SCM_ARGS(4),
1160	};
1161
1162	return qcom_scm_call(__scm->dev, &desc, NULL);
1163}
1164EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock);
1165
1166/**
1167 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1168 * region from the specified initiator
1169 *
1170 * @id:     tz initiator id
1171 * @offset: OCMEM offset
1172 * @size:   OCMEM size
1173 */
1174int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1175{
1176	struct qcom_scm_desc desc = {
1177		.svc = QCOM_SCM_SVC_OCMEM,
1178		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1179		.args[0] = id,
1180		.args[1] = offset,
1181		.args[2] = size,
1182		.arginfo = QCOM_SCM_ARGS(3),
1183	};
1184
1185	return qcom_scm_call(__scm->dev, &desc, NULL);
1186}
1187EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock);
1188
1189/**
1190 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1191 *
1192 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1193 *	   qcom_scm_ice_set_key() are available.
1194 */
1195bool qcom_scm_ice_available(void)
1196{
1197	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1198					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1199		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1200					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1201}
1202EXPORT_SYMBOL_GPL(qcom_scm_ice_available);
1203
1204/**
1205 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1206 * @index: the keyslot to invalidate
1207 *
1208 * The UFSHCI and eMMC standards define a standard way to do this, but it
1209 * doesn't work on these SoCs; only this SCM call does.
1210 *
1211 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1212 * call doesn't specify which ICE instance the keyslot belongs to.
1213 *
1214 * Return: 0 on success; -errno on failure.
1215 */
1216int qcom_scm_ice_invalidate_key(u32 index)
1217{
1218	struct qcom_scm_desc desc = {
1219		.svc = QCOM_SCM_SVC_ES,
1220		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1221		.arginfo = QCOM_SCM_ARGS(1),
1222		.args[0] = index,
1223		.owner = ARM_SMCCC_OWNER_SIP,
1224	};
1225
1226	return qcom_scm_call(__scm->dev, &desc, NULL);
1227}
1228EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key);
1229
1230/**
1231 * qcom_scm_ice_set_key() - Set an inline encryption key
1232 * @index: the keyslot into which to set the key
1233 * @key: the key to program
1234 * @key_size: the size of the key in bytes
1235 * @cipher: the encryption algorithm the key is for
1236 * @data_unit_size: the encryption data unit size, i.e. the size of each
1237 *		    individual plaintext and ciphertext.  Given in 512-byte
1238 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1239 *
1240 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1241 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1242 *
1243 * The UFSHCI and eMMC standards define a standard way to do this, but it
1244 * doesn't work on these SoCs; only this SCM call does.
1245 *
1246 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1247 * call doesn't specify which ICE instance the keyslot belongs to.
1248 *
1249 * Return: 0 on success; -errno on failure.
1250 */
1251int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1252			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1253{
1254	struct qcom_scm_desc desc = {
1255		.svc = QCOM_SCM_SVC_ES,
1256		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1257		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1258					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1259					 QCOM_SCM_VAL),
1260		.args[0] = index,
1261		.args[2] = key_size,
1262		.args[3] = cipher,
1263		.args[4] = data_unit_size,
1264		.owner = ARM_SMCCC_OWNER_SIP,
1265	};
 
 
 
1266
1267	int ret;
 
 
 
 
 
 
 
 
1268
1269	void *keybuf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1270							   key_size,
1271							   GFP_KERNEL);
1272	if (!keybuf)
1273		return -ENOMEM;
1274	memcpy(keybuf, key, key_size);
1275	desc.args[1] = qcom_tzmem_to_phys(keybuf);
1276
1277	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1278
1279	memzero_explicit(keybuf, key_size);
1280
 
1281	return ret;
1282}
1283EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key);
1284
1285/**
1286 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1287 *
1288 * Return true if HDCP is supported, false if not.
1289 */
1290bool qcom_scm_hdcp_available(void)
1291{
1292	bool avail;
1293	int ret = qcom_scm_clk_enable();
1294
1295	if (ret)
1296		return ret;
1297
1298	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1299						QCOM_SCM_HDCP_INVOKE);
1300
1301	qcom_scm_clk_disable();
1302
1303	return avail;
1304}
1305EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available);
1306
1307/**
1308 * qcom_scm_hdcp_req() - Send HDCP request.
1309 * @req: HDCP request array
1310 * @req_cnt: HDCP request array count
1311 * @resp: response buffer passed to SCM
1312 *
1313 * Write HDCP register(s) through SCM.
1314 */
1315int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1316{
1317	int ret;
1318	struct qcom_scm_desc desc = {
1319		.svc = QCOM_SCM_SVC_HDCP,
1320		.cmd = QCOM_SCM_HDCP_INVOKE,
1321		.arginfo = QCOM_SCM_ARGS(10),
1322		.args = {
1323			req[0].addr,
1324			req[0].val,
1325			req[1].addr,
1326			req[1].val,
1327			req[2].addr,
1328			req[2].val,
1329			req[3].addr,
1330			req[3].val,
1331			req[4].addr,
1332			req[4].val
1333		},
1334		.owner = ARM_SMCCC_OWNER_SIP,
1335	};
1336	struct qcom_scm_res res;
1337
1338	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1339		return -ERANGE;
1340
1341	ret = qcom_scm_clk_enable();
1342	if (ret)
1343		return ret;
1344
1345	ret = qcom_scm_call(__scm->dev, &desc, &res);
1346	*resp = res.result[0];
1347
1348	qcom_scm_clk_disable();
1349
1350	return ret;
1351}
1352EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req);
1353
1354int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1355{
1356	struct qcom_scm_desc desc = {
1357		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1358		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1359		.arginfo = QCOM_SCM_ARGS(3),
1360		.args[0] = sec_id,
1361		.args[1] = ctx_num,
1362		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1363		.owner = ARM_SMCCC_OWNER_SIP,
1364	};
1365
1366	return qcom_scm_call(__scm->dev, &desc, NULL);
1367}
1368EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format);
1369
1370int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1371{
1372	struct qcom_scm_desc desc = {
1373		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1374		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1375		.arginfo = QCOM_SCM_ARGS(2),
1376		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1377		.args[1] = en,
1378		.owner = ARM_SMCCC_OWNER_SIP,
1379	};
1380
1381
1382	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1383}
1384EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle);
1385
1386bool qcom_scm_lmh_dcvsh_available(void)
1387{
1388	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1389}
1390EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available);
1391
1392int qcom_scm_shm_bridge_enable(void)
1393{
1394	int ret;
1395
1396	struct qcom_scm_desc desc = {
1397		.svc = QCOM_SCM_SVC_MP,
1398		.cmd = QCOM_SCM_MP_SHM_BRIDGE_ENABLE,
1399		.owner = ARM_SMCCC_OWNER_SIP
1400	};
1401
1402	struct qcom_scm_res res;
1403
1404	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
1405					  QCOM_SCM_MP_SHM_BRIDGE_ENABLE))
1406		return -EOPNOTSUPP;
1407
1408	ret = qcom_scm_call(__scm->dev, &desc, &res);
1409
1410	if (ret)
1411		return ret;
1412
1413	if (res.result[0] == SHMBRIDGE_RESULT_NOTSUPP)
1414		return -EOPNOTSUPP;
1415
1416	return res.result[0];
1417}
1418EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_enable);
1419
1420int qcom_scm_shm_bridge_create(struct device *dev, u64 pfn_and_ns_perm_flags,
1421			       u64 ipfn_and_s_perm_flags, u64 size_and_flags,
1422			       u64 ns_vmids, u64 *handle)
1423{
1424	struct qcom_scm_desc desc = {
1425		.svc = QCOM_SCM_SVC_MP,
1426		.cmd = QCOM_SCM_MP_SHM_BRIDGE_CREATE,
1427		.owner = ARM_SMCCC_OWNER_SIP,
1428		.args[0] = pfn_and_ns_perm_flags,
1429		.args[1] = ipfn_and_s_perm_flags,
1430		.args[2] = size_and_flags,
1431		.args[3] = ns_vmids,
1432		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
1433					 QCOM_SCM_VAL, QCOM_SCM_VAL),
1434	};
1435
1436	struct qcom_scm_res res;
1437	int ret;
1438
1439	ret = qcom_scm_call(__scm->dev, &desc, &res);
1440
1441	if (handle && !ret)
1442		*handle = res.result[1];
1443
1444	return ret ?: res.result[0];
1445}
1446EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_create);
1447
1448int qcom_scm_shm_bridge_delete(struct device *dev, u64 handle)
1449{
1450	struct qcom_scm_desc desc = {
1451		.svc = QCOM_SCM_SVC_MP,
1452		.cmd = QCOM_SCM_MP_SHM_BRIDGE_DELETE,
1453		.owner = ARM_SMCCC_OWNER_SIP,
1454		.args[0] = handle,
1455		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1456	};
1457
1458	return qcom_scm_call(__scm->dev, &desc, NULL);
1459}
1460EXPORT_SYMBOL_GPL(qcom_scm_shm_bridge_delete);
1461
1462int qcom_scm_lmh_profile_change(u32 profile_id)
1463{
1464	struct qcom_scm_desc desc = {
1465		.svc = QCOM_SCM_SVC_LMH,
1466		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1467		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1468		.args[0] = profile_id,
1469		.owner = ARM_SMCCC_OWNER_SIP,
1470	};
1471
1472	return qcom_scm_call(__scm->dev, &desc, NULL);
1473}
1474EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change);
1475
1476int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1477		       u64 limit_node, u32 node_id, u64 version)
1478{
 
 
1479	int ret, payload_size = 5 * sizeof(u32);
1480
1481	struct qcom_scm_desc desc = {
1482		.svc = QCOM_SCM_SVC_LMH,
1483		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1484		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1485					QCOM_SCM_VAL, QCOM_SCM_VAL),
1486		.args[1] = payload_size,
1487		.args[2] = limit_node,
1488		.args[3] = node_id,
1489		.args[4] = version,
1490		.owner = ARM_SMCCC_OWNER_SIP,
1491	};
1492
1493	u32 *payload_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1494							       payload_size,
1495							       GFP_KERNEL);
1496	if (!payload_buf)
1497		return -ENOMEM;
1498
1499	payload_buf[0] = payload_fn;
1500	payload_buf[1] = 0;
1501	payload_buf[2] = payload_reg;
1502	payload_buf[3] = 1;
1503	payload_buf[4] = payload_val;
1504
1505	desc.args[0] = qcom_tzmem_to_phys(payload_buf);
1506
1507	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1508
 
1509	return ret;
1510}
1511EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
1512
1513int qcom_scm_gpu_init_regs(u32 gpu_req)
1514{
1515	struct qcom_scm_desc desc = {
1516		.svc = QCOM_SCM_SVC_GPU,
1517		.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
1518		.arginfo = QCOM_SCM_ARGS(1),
1519		.args[0] = gpu_req,
1520		.owner = ARM_SMCCC_OWNER_SIP,
1521	};
1522
1523	return qcom_scm_call(__scm->dev, &desc, NULL);
1524}
1525EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
1526
1527static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1528{
1529	struct device_node *tcsr;
1530	struct device_node *np = dev->of_node;
1531	struct resource res;
1532	u32 offset;
1533	int ret;
1534
1535	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1536	if (!tcsr)
1537		return 0;
1538
1539	ret = of_address_to_resource(tcsr, 0, &res);
1540	of_node_put(tcsr);
1541	if (ret)
1542		return ret;
1543
1544	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1545	if (ret < 0)
1546		return ret;
1547
1548	*addr = res.start + offset;
1549
1550	return 0;
1551}
1552
1553#ifdef CONFIG_QCOM_QSEECOM
1554
1555/* Lock for QSEECOM SCM call executions */
1556static DEFINE_MUTEX(qcom_scm_qseecom_call_lock);
1557
1558static int __qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1559				   struct qcom_scm_qseecom_resp *res)
1560{
1561	struct qcom_scm_res scm_res = {};
1562	int status;
1563
1564	/*
1565	 * QSEECOM SCM calls should not be executed concurrently. Therefore, we
1566	 * require the respective call lock to be held.
1567	 */
1568	lockdep_assert_held(&qcom_scm_qseecom_call_lock);
1569
1570	status = qcom_scm_call(__scm->dev, desc, &scm_res);
1571
1572	res->result = scm_res.result[0];
1573	res->resp_type = scm_res.result[1];
1574	res->data = scm_res.result[2];
1575
1576	if (status)
1577		return status;
1578
1579	return 0;
1580}
1581
1582/**
1583 * qcom_scm_qseecom_call() - Perform a QSEECOM SCM call.
1584 * @desc: SCM call descriptor.
1585 * @res:  SCM call response (output).
1586 *
1587 * Performs the QSEECOM SCM call described by @desc, returning the response in
1588 * @rsp.
1589 *
1590 * Return: Zero on success, nonzero on failure.
1591 */
1592static int qcom_scm_qseecom_call(const struct qcom_scm_desc *desc,
1593				 struct qcom_scm_qseecom_resp *res)
1594{
1595	int status;
1596
1597	/*
1598	 * Note: Multiple QSEECOM SCM calls should not be executed same time,
1599	 * so lock things here. This needs to be extended to callback/listener
1600	 * handling when support for that is implemented.
1601	 */
1602
1603	mutex_lock(&qcom_scm_qseecom_call_lock);
1604	status = __qcom_scm_qseecom_call(desc, res);
1605	mutex_unlock(&qcom_scm_qseecom_call_lock);
1606
1607	dev_dbg(__scm->dev, "%s: owner=%x, svc=%x, cmd=%x, result=%lld, type=%llx, data=%llx\n",
1608		__func__, desc->owner, desc->svc, desc->cmd, res->result,
1609		res->resp_type, res->data);
1610
1611	if (status) {
1612		dev_err(__scm->dev, "qseecom: scm call failed with error %d\n", status);
1613		return status;
1614	}
1615
1616	/*
1617	 * TODO: Handle incomplete and blocked calls:
1618	 *
1619	 * Incomplete and blocked calls are not supported yet. Some devices
1620	 * and/or commands require those, some don't. Let's warn about them
1621	 * prominently in case someone attempts to try these commands with a
1622	 * device/command combination that isn't supported yet.
1623	 */
1624	WARN_ON(res->result == QSEECOM_RESULT_INCOMPLETE);
1625	WARN_ON(res->result == QSEECOM_RESULT_BLOCKED_ON_LISTENER);
1626
1627	return 0;
1628}
1629
1630/**
1631 * qcom_scm_qseecom_get_version() - Query the QSEECOM version.
1632 * @version: Pointer where the QSEECOM version will be stored.
1633 *
1634 * Performs the QSEECOM SCM querying the QSEECOM version currently running in
1635 * the TrustZone.
1636 *
1637 * Return: Zero on success, nonzero on failure.
1638 */
1639static int qcom_scm_qseecom_get_version(u32 *version)
1640{
1641	struct qcom_scm_desc desc = {};
1642	struct qcom_scm_qseecom_resp res = {};
1643	u32 feature = 10;
1644	int ret;
1645
1646	desc.owner = QSEECOM_TZ_OWNER_SIP;
1647	desc.svc = QSEECOM_TZ_SVC_INFO;
1648	desc.cmd = QSEECOM_TZ_CMD_INFO_VERSION;
1649	desc.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL);
1650	desc.args[0] = feature;
1651
1652	ret = qcom_scm_qseecom_call(&desc, &res);
1653	if (ret)
1654		return ret;
1655
1656	*version = res.result;
1657	return 0;
1658}
1659
1660/**
1661 * qcom_scm_qseecom_app_get_id() - Query the app ID for a given QSEE app name.
1662 * @app_name: The name of the app.
1663 * @app_id:   The returned app ID.
1664 *
1665 * Query and return the application ID of the SEE app identified by the given
1666 * name. This returned ID is the unique identifier of the app required for
1667 * subsequent communication.
1668 *
1669 * Return: Zero on success, nonzero on failure, -ENOENT if the app has not been
1670 * loaded or could not be found.
1671 */
1672int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
1673{
1674	unsigned long name_buf_size = QSEECOM_MAX_APP_NAME_SIZE;
1675	unsigned long app_name_len = strlen(app_name);
1676	struct qcom_scm_desc desc = {};
1677	struct qcom_scm_qseecom_resp res = {};
 
 
1678	int status;
1679
1680	if (app_name_len >= name_buf_size)
1681		return -EINVAL;
1682
1683	char *name_buf __free(qcom_tzmem) = qcom_tzmem_alloc(__scm->mempool,
1684							     name_buf_size,
1685							     GFP_KERNEL);
1686	if (!name_buf)
1687		return -ENOMEM;
1688
1689	memcpy(name_buf, app_name, app_name_len);
1690
 
 
 
 
 
 
 
 
1691	desc.owner = QSEECOM_TZ_OWNER_QSEE_OS;
1692	desc.svc = QSEECOM_TZ_SVC_APP_MGR;
1693	desc.cmd = QSEECOM_TZ_CMD_APP_LOOKUP;
1694	desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL);
1695	desc.args[0] = qcom_tzmem_to_phys(name_buf);
1696	desc.args[1] = app_name_len;
1697
1698	status = qcom_scm_qseecom_call(&desc, &res);
 
 
1699
1700	if (status)
1701		return status;
1702
1703	if (res.result == QSEECOM_RESULT_FAILURE)
1704		return -ENOENT;
1705
1706	if (res.result != QSEECOM_RESULT_SUCCESS)
1707		return -EINVAL;
1708
1709	if (res.resp_type != QSEECOM_SCM_RES_APP_ID)
1710		return -EINVAL;
1711
1712	*app_id = res.data;
1713	return 0;
1714}
1715EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_get_id);
1716
1717/**
1718 * qcom_scm_qseecom_app_send() - Send to and receive data from a given QSEE app.
1719 * @app_id:   The ID of the target app.
1720 * @req:      Request buffer sent to the app (must be TZ memory)
1721 * @req_size: Size of the request buffer.
1722 * @rsp:      Response buffer, written to by the app (must be TZ memory)
1723 * @rsp_size: Size of the response buffer.
1724 *
1725 * Sends a request to the QSEE app associated with the given ID and read back
1726 * its response. The caller must provide two DMA memory regions, one for the
1727 * request and one for the response, and fill out the @req region with the
1728 * respective (app-specific) request data. The QSEE app reads this and returns
1729 * its response in the @rsp region.
1730 *
1731 * Return: Zero on success, nonzero on failure.
1732 */
1733int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size,
1734			      void *rsp, size_t rsp_size)
1735{
1736	struct qcom_scm_qseecom_resp res = {};
1737	struct qcom_scm_desc desc = {};
1738	phys_addr_t req_phys;
1739	phys_addr_t rsp_phys;
1740	int status;
1741
1742	req_phys = qcom_tzmem_to_phys(req);
1743	rsp_phys = qcom_tzmem_to_phys(rsp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1744
 
1745	desc.owner = QSEECOM_TZ_OWNER_TZ_APPS;
1746	desc.svc = QSEECOM_TZ_SVC_APP_ID_PLACEHOLDER;
1747	desc.cmd = QSEECOM_TZ_CMD_APP_SEND;
1748	desc.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL,
1749				     QCOM_SCM_RW, QCOM_SCM_VAL,
1750				     QCOM_SCM_RW, QCOM_SCM_VAL);
1751	desc.args[0] = app_id;
1752	desc.args[1] = req_phys;
1753	desc.args[2] = req_size;
1754	desc.args[3] = rsp_phys;
1755	desc.args[4] = rsp_size;
1756
 
1757	status = qcom_scm_qseecom_call(&desc, &res);
1758
 
 
 
 
1759	if (status)
1760		return status;
1761
1762	if (res.result != QSEECOM_RESULT_SUCCESS)
1763		return -EIO;
1764
1765	return 0;
1766}
1767EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
1768
1769/*
1770 * We do not yet support re-entrant calls via the qseecom interface. To prevent
1771 + any potential issues with this, only allow validated machines for now.
1772 */
1773static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
1774	{ .compatible = "dell,xps13-9345" },
1775	{ .compatible = "lenovo,flex-5g" },
1776	{ .compatible = "lenovo,thinkpad-t14s" },
1777	{ .compatible = "lenovo,thinkpad-x13s", },
1778	{ .compatible = "lenovo,yoga-slim7x" },
1779	{ .compatible = "microsoft,arcata", },
1780	{ .compatible = "microsoft,romulus13", },
1781	{ .compatible = "microsoft,romulus15", },
1782	{ .compatible = "qcom,sc8180x-primus" },
1783	{ .compatible = "qcom,x1e001de-devkit" },
1784	{ .compatible = "qcom,x1e80100-crd" },
1785	{ .compatible = "qcom,x1e80100-qcp" },
1786	{ }
1787};
1788
1789static bool qcom_scm_qseecom_machine_is_allowed(void)
1790{
1791	struct device_node *np;
1792	bool match;
1793
1794	np = of_find_node_by_path("/");
1795	if (!np)
1796		return false;
1797
1798	match = of_match_node(qcom_scm_qseecom_allowlist, np);
1799	of_node_put(np);
1800
1801	return match;
1802}
1803
1804static void qcom_scm_qseecom_free(void *data)
1805{
1806	struct platform_device *qseecom_dev = data;
1807
1808	platform_device_del(qseecom_dev);
1809	platform_device_put(qseecom_dev);
1810}
1811
1812static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1813{
1814	struct platform_device *qseecom_dev;
1815	u32 version;
1816	int ret;
1817
1818	/*
1819	 * Note: We do two steps of validation here: First, we try to query the
1820	 * QSEECOM version as a check to see if the interface exists on this
1821	 * device. Second, we check against known good devices due to current
1822	 * driver limitations (see comment in qcom_scm_qseecom_allowlist).
1823	 *
1824	 * Note that we deliberately do the machine check after the version
1825	 * check so that we can log potentially supported devices. This should
1826	 * be safe as downstream sources indicate that the version query is
1827	 * neither blocking nor reentrant.
1828	 */
1829	ret = qcom_scm_qseecom_get_version(&version);
1830	if (ret)
1831		return 0;
1832
1833	dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
1834
1835	if (!qcom_scm_qseecom_machine_is_allowed()) {
1836		dev_info(scm->dev, "qseecom: untested machine, skipping\n");
1837		return 0;
1838	}
1839
1840	/*
1841	 * Set up QSEECOM interface device. All application clients will be
1842	 * set up and managed by the corresponding driver for it.
1843	 */
1844	qseecom_dev = platform_device_alloc("qcom_qseecom", -1);
1845	if (!qseecom_dev)
1846		return -ENOMEM;
1847
1848	qseecom_dev->dev.parent = scm->dev;
1849
1850	ret = platform_device_add(qseecom_dev);
1851	if (ret) {
1852		platform_device_put(qseecom_dev);
1853		return ret;
1854	}
1855
1856	return devm_add_action_or_reset(scm->dev, qcom_scm_qseecom_free, qseecom_dev);
1857}
1858
1859#else /* CONFIG_QCOM_QSEECOM */
1860
1861static int qcom_scm_qseecom_init(struct qcom_scm *scm)
1862{
1863	return 0;
1864}
1865
1866#endif /* CONFIG_QCOM_QSEECOM */
1867
1868/**
1869 * qcom_scm_is_available() - Checks if SCM is available
1870 */
1871bool qcom_scm_is_available(void)
1872{
1873	/* Paired with smp_store_release() in qcom_scm_probe */
1874	return !!smp_load_acquire(&__scm);
1875}
1876EXPORT_SYMBOL_GPL(qcom_scm_is_available);
1877
1878static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
1879{
1880	/* FW currently only supports a single wq_ctx (zero).
1881	 * TODO: Update this logic to include dynamic allocation and lookup of
1882	 * completion structs when FW supports more wq_ctx values.
1883	 */
1884	if (wq_ctx != 0) {
1885		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
1886		return -EINVAL;
1887	}
1888
1889	return 0;
1890}
1891
1892int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
1893{
1894	int ret;
1895
1896	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1897	if (ret)
1898		return ret;
1899
1900	wait_for_completion(&__scm->waitq_comp);
1901
1902	return 0;
1903}
1904
1905static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
1906{
1907	int ret;
1908
1909	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
1910	if (ret)
1911		return ret;
1912
1913	complete(&__scm->waitq_comp);
1914
1915	return 0;
1916}
1917
1918static irqreturn_t qcom_scm_irq_handler(int irq, void *data)
1919{
1920	int ret;
1921	struct qcom_scm *scm = data;
1922	u32 wq_ctx, flags, more_pending = 0;
1923
1924	do {
1925		ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending);
1926		if (ret) {
1927			dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret);
1928			goto out;
1929		}
1930
1931		if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE) {
1932			dev_err(scm->dev, "Invalid flags received for wq_ctx: %u\n", flags);
 
1933			goto out;
1934		}
1935
1936		ret = qcom_scm_waitq_wakeup(wq_ctx);
1937		if (ret)
1938			goto out;
1939	} while (more_pending);
1940
1941out:
1942	return IRQ_HANDLED;
1943}
1944
1945static int get_download_mode(char *buffer, const struct kernel_param *kp)
1946{
1947	if (download_mode >= ARRAY_SIZE(download_mode_name))
1948		return sysfs_emit(buffer, "unknown mode\n");
1949
1950	return sysfs_emit(buffer, "%s\n", download_mode_name[download_mode]);
1951}
1952
1953static int set_download_mode(const char *val, const struct kernel_param *kp)
1954{
1955	bool tmp;
1956	int ret;
1957
1958	ret = sysfs_match_string(download_mode_name, val);
1959	if (ret < 0) {
1960		ret = kstrtobool(val, &tmp);
1961		if (ret < 0) {
1962			pr_err("qcom_scm: err: %d\n", ret);
1963			return ret;
1964		}
1965
1966		ret = tmp ? 1 : 0;
1967	}
1968
1969	download_mode = ret;
1970	if (__scm)
1971		qcom_scm_set_download_mode(download_mode);
1972
1973	return 0;
1974}
1975
1976static const struct kernel_param_ops download_mode_param_ops = {
1977	.get = get_download_mode,
1978	.set = set_download_mode,
1979};
1980
1981module_param_cb(download_mode, &download_mode_param_ops, NULL, 0644);
1982MODULE_PARM_DESC(download_mode, "download mode: off/0/N for no dump mode, full/on/1/Y for full dump mode, mini for minidump mode and full,mini for both full and minidump mode together are acceptable values");
1983
1984static int qcom_scm_probe(struct platform_device *pdev)
1985{
1986	struct qcom_tzmem_pool_config pool_config;
1987	struct qcom_scm *scm;
1988	int irq, ret;
1989
1990	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1991	if (!scm)
1992		return -ENOMEM;
1993
1994	scm->dev = &pdev->dev;
1995	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1996	if (ret < 0)
1997		return ret;
1998
1999	init_completion(&scm->waitq_comp);
2000	mutex_init(&scm->scm_bw_lock);
2001
2002	scm->path = devm_of_icc_get(&pdev->dev, NULL);
2003	if (IS_ERR(scm->path))
2004		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
2005				     "failed to acquire interconnect path\n");
2006
2007	scm->core_clk = devm_clk_get_optional(&pdev->dev, "core");
2008	if (IS_ERR(scm->core_clk))
2009		return PTR_ERR(scm->core_clk);
2010
2011	scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface");
2012	if (IS_ERR(scm->iface_clk))
2013		return PTR_ERR(scm->iface_clk);
2014
2015	scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus");
2016	if (IS_ERR(scm->bus_clk))
2017		return PTR_ERR(scm->bus_clk);
2018
2019	scm->reset.ops = &qcom_scm_pas_reset_ops;
2020	scm->reset.nr_resets = 1;
2021	scm->reset.of_node = pdev->dev.of_node;
2022	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
2023	if (ret)
2024		return ret;
2025
2026	/* vote for max clk rate for highest performance */
2027	ret = clk_set_rate(scm->core_clk, INT_MAX);
2028	if (ret)
2029		return ret;
2030
2031	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2032	smp_store_release(&__scm, scm);
 
 
2033
2034	irq = platform_get_irq_optional(pdev, 0);
2035	if (irq < 0) {
2036		if (irq != -ENXIO) {
2037			ret = irq;
2038			goto err;
2039		}
2040	} else {
2041		ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler,
2042						IRQF_ONESHOT, "qcom-scm", __scm);
2043		if (ret < 0) {
2044			dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n");
2045			goto err;
2046		}
2047	}
2048
2049	__get_convention();
2050
2051	/*
2052	 * If "download mode" is requested, from this point on warmboot
2053	 * will cause the boot stages to enter download mode, unless
2054	 * disabled below by a clean shutdown/reboot.
2055	 */
2056	qcom_scm_set_download_mode(download_mode);
 
 
2057
2058	/*
2059	 * Disable SDI if indicated by DT that it is enabled by default.
2060	 */
2061	if (of_property_read_bool(pdev->dev.of_node, "qcom,sdi-enabled") || !download_mode)
2062		qcom_scm_disable_sdi();
2063
2064	ret = of_reserved_mem_device_init(__scm->dev);
2065	if (ret && ret != -ENODEV) {
2066		dev_err_probe(__scm->dev, ret,
2067			      "Failed to setup the reserved memory region for TZ mem\n");
2068		goto err;
2069	}
2070
2071	ret = qcom_tzmem_enable(__scm->dev);
2072	if (ret) {
2073		dev_err_probe(__scm->dev, ret,
2074			      "Failed to enable the TrustZone memory allocator\n");
2075		goto err;
2076	}
2077
2078	memset(&pool_config, 0, sizeof(pool_config));
2079	pool_config.initial_size = 0;
2080	pool_config.policy = QCOM_TZMEM_POLICY_ON_DEMAND;
2081	pool_config.max_size = SZ_256K;
2082
2083	__scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config);
2084	if (IS_ERR(__scm->mempool)) {
2085		dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool),
2086			      "Failed to create the SCM memory pool\n");
2087		goto err;
2088	}
2089
2090	/*
2091	 * Initialize the QSEECOM interface.
2092	 *
2093	 * Note: QSEECOM is fairly self-contained and this only adds the
2094	 * interface device (the driver of which does most of the heavy
2095	 * lifting). So any errors returned here should be either -ENOMEM or
2096	 * -EINVAL (with the latter only in case there's a bug in our code).
2097	 * This means that there is no need to bring down the whole SCM driver.
2098	 * Just log the error instead and let SCM live.
2099	 */
2100	ret = qcom_scm_qseecom_init(scm);
2101	WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
2102
2103	return 0;
2104
2105err:
2106	/* Paired with smp_load_acquire() in qcom_scm_is_available(). */
2107	smp_store_release(&__scm, NULL);
2108
2109	return ret;
2110}
2111
2112static void qcom_scm_shutdown(struct platform_device *pdev)
2113{
2114	/* Clean shutdown, disable download mode to allow normal restart */
2115	qcom_scm_set_download_mode(QCOM_DLOAD_NODUMP);
2116}
2117
2118static const struct of_device_id qcom_scm_dt_match[] = {
2119	{ .compatible = "qcom,scm" },
2120
2121	/* Legacy entries kept for backwards compatibility */
2122	{ .compatible = "qcom,scm-apq8064" },
2123	{ .compatible = "qcom,scm-apq8084" },
2124	{ .compatible = "qcom,scm-ipq4019" },
2125	{ .compatible = "qcom,scm-msm8953" },
2126	{ .compatible = "qcom,scm-msm8974" },
2127	{ .compatible = "qcom,scm-msm8996" },
2128	{}
2129};
2130MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
2131
2132static struct platform_driver qcom_scm_driver = {
2133	.driver = {
2134		.name	= "qcom_scm",
2135		.of_match_table = qcom_scm_dt_match,
2136		.suppress_bind_attrs = true,
2137	},
2138	.probe = qcom_scm_probe,
2139	.shutdown = qcom_scm_shutdown,
2140};
2141
2142static int __init qcom_scm_init(void)
2143{
2144	return platform_driver_register(&qcom_scm_driver);
2145}
2146subsys_initcall(qcom_scm_init);
2147
2148MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
2149MODULE_LICENSE("GPL v2");