Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5#include <linux/platform_device.h>
   6#include <linux/init.h>
   7#include <linux/cpumask.h>
   8#include <linux/export.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/interconnect.h>
  11#include <linux/module.h>
  12#include <linux/types.h>
  13#include <linux/qcom_scm.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/of_platform.h>
  17#include <linux/clk.h>
  18#include <linux/reset-controller.h>
  19#include <linux/arm-smccc.h>
  20
  21#include "qcom_scm.h"
  22
  23static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
  24module_param(download_mode, bool, 0);
  25
  26#define SCM_HAS_CORE_CLK	BIT(0)
  27#define SCM_HAS_IFACE_CLK	BIT(1)
  28#define SCM_HAS_BUS_CLK		BIT(2)
  29
  30struct qcom_scm {
  31	struct device *dev;
  32	struct clk *core_clk;
  33	struct clk *iface_clk;
  34	struct clk *bus_clk;
  35	struct icc_path *path;
  36	struct reset_controller_dev reset;
  37
  38	/* control access to the interconnect path */
  39	struct mutex scm_bw_lock;
  40	int scm_vote_count;
  41
  42	u64 dload_mode_addr;
  43};
  44
  45struct qcom_scm_current_perm_info {
  46	__le32 vmid;
  47	__le32 perm;
  48	__le64 ctx;
  49	__le32 ctx_size;
  50	__le32 unused;
  51};
  52
  53struct qcom_scm_mem_map_info {
  54	__le64 mem_addr;
  55	__le64 mem_size;
  56};
  57
  58/* Each bit configures cold/warm boot address for one of the 4 CPUs */
  59static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
  60	0, BIT(0), BIT(3), BIT(5)
 
 
 
 
 
 
 
 
 
 
  61};
  62static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
  63	BIT(2), BIT(1), BIT(4), BIT(6)
 
 
 
 
  64};
  65
  66static const char * const qcom_scm_convention_names[] = {
  67	[SMC_CONVENTION_UNKNOWN] = "unknown",
  68	[SMC_CONVENTION_ARM_32] = "smc arm 32",
  69	[SMC_CONVENTION_ARM_64] = "smc arm 64",
  70	[SMC_CONVENTION_LEGACY] = "smc legacy",
  71};
  72
  73static struct qcom_scm *__scm;
  74
  75static int qcom_scm_clk_enable(void)
  76{
  77	int ret;
  78
  79	ret = clk_prepare_enable(__scm->core_clk);
  80	if (ret)
  81		goto bail;
  82
  83	ret = clk_prepare_enable(__scm->iface_clk);
  84	if (ret)
  85		goto disable_core;
  86
  87	ret = clk_prepare_enable(__scm->bus_clk);
  88	if (ret)
  89		goto disable_iface;
  90
  91	return 0;
  92
  93disable_iface:
  94	clk_disable_unprepare(__scm->iface_clk);
  95disable_core:
  96	clk_disable_unprepare(__scm->core_clk);
  97bail:
  98	return ret;
  99}
 100
 101static void qcom_scm_clk_disable(void)
 102{
 103	clk_disable_unprepare(__scm->core_clk);
 104	clk_disable_unprepare(__scm->iface_clk);
 105	clk_disable_unprepare(__scm->bus_clk);
 106}
 107
 108static int qcom_scm_bw_enable(void)
 109{
 110	int ret = 0;
 111
 112	if (!__scm->path)
 113		return 0;
 114
 115	if (IS_ERR(__scm->path))
 116		return -EINVAL;
 117
 118	mutex_lock(&__scm->scm_bw_lock);
 119	if (!__scm->scm_vote_count) {
 120		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
 121		if (ret < 0) {
 122			dev_err(__scm->dev, "failed to set bandwidth request\n");
 123			goto err_bw;
 124		}
 125	}
 126	__scm->scm_vote_count++;
 127err_bw:
 128	mutex_unlock(&__scm->scm_bw_lock);
 129
 130	return ret;
 131}
 132
 133static void qcom_scm_bw_disable(void)
 134{
 135	if (IS_ERR_OR_NULL(__scm->path))
 136		return;
 137
 138	mutex_lock(&__scm->scm_bw_lock);
 139	if (__scm->scm_vote_count-- == 1)
 140		icc_set_bw(__scm->path, 0, 0);
 141	mutex_unlock(&__scm->scm_bw_lock);
 142}
 143
 144enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 145static DEFINE_SPINLOCK(scm_query_lock);
 146
 147static enum qcom_scm_convention __get_convention(void)
 148{
 149	unsigned long flags;
 150	struct qcom_scm_desc desc = {
 151		.svc = QCOM_SCM_SVC_INFO,
 152		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 153		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 154					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 155			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 156		.arginfo = QCOM_SCM_ARGS(1),
 157		.owner = ARM_SMCCC_OWNER_SIP,
 158	};
 159	struct qcom_scm_res res;
 160	enum qcom_scm_convention probed_convention;
 161	int ret;
 162	bool forced = false;
 163
 164	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 165		return qcom_scm_convention;
 166
 167	/*
 168	 * Device isn't required as there is only one argument - no device
 169	 * needed to dma_map_single to secure world
 170	 */
 171	probed_convention = SMC_CONVENTION_ARM_64;
 172	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 173	if (!ret && res.result[0] == 1)
 174		goto found;
 175
 176	/*
 177	 * Some SC7180 firmwares didn't implement the
 178	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 179	 * calling conventions on these firmwares. Luckily we don't make any
 180	 * early calls into the firmware on these SoCs so the device pointer
 181	 * will be valid here to check if the compatible matches.
 182	 */
 183	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 184		forced = true;
 185		goto found;
 186	}
 187
 188	probed_convention = SMC_CONVENTION_ARM_32;
 189	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 190	if (!ret && res.result[0] == 1)
 191		goto found;
 192
 193	probed_convention = SMC_CONVENTION_LEGACY;
 194found:
 195	spin_lock_irqsave(&scm_query_lock, flags);
 196	if (probed_convention != qcom_scm_convention) {
 197		qcom_scm_convention = probed_convention;
 198		pr_info("qcom_scm: convention: %s%s\n",
 199			qcom_scm_convention_names[qcom_scm_convention],
 200			forced ? " (forced)" : "");
 201	}
 202	spin_unlock_irqrestore(&scm_query_lock, flags);
 203
 204	return qcom_scm_convention;
 205}
 206
 207/**
 208 * qcom_scm_call() - Invoke a syscall in the secure world
 209 * @dev:	device
 
 
 210 * @desc:	Descriptor structure containing arguments and return values
 211 * @res:        Structure containing results from SMC/HVC call
 212 *
 213 * Sends a command to the SCM and waits for the command to finish processing.
 214 * This should *only* be called in pre-emptible context.
 215 */
 216static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 217			 struct qcom_scm_res *res)
 218{
 219	might_sleep();
 220	switch (__get_convention()) {
 221	case SMC_CONVENTION_ARM_32:
 222	case SMC_CONVENTION_ARM_64:
 223		return scm_smc_call(dev, desc, res, false);
 224	case SMC_CONVENTION_LEGACY:
 225		return scm_legacy_call(dev, desc, res);
 226	default:
 227		pr_err("Unknown current SCM calling convention.\n");
 228		return -EINVAL;
 229	}
 230}
 231
 232/**
 233 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 234 * @dev:	device
 
 
 235 * @desc:	Descriptor structure containing arguments and return values
 236 * @res:	Structure containing results from SMC/HVC call
 237 *
 238 * Sends a command to the SCM and waits for the command to finish processing.
 239 * This can be called in atomic context.
 240 */
 241static int qcom_scm_call_atomic(struct device *dev,
 242				const struct qcom_scm_desc *desc,
 243				struct qcom_scm_res *res)
 244{
 245	switch (__get_convention()) {
 246	case SMC_CONVENTION_ARM_32:
 247	case SMC_CONVENTION_ARM_64:
 248		return scm_smc_call(dev, desc, res, true);
 249	case SMC_CONVENTION_LEGACY:
 250		return scm_legacy_call_atomic(dev, desc, res);
 251	default:
 252		pr_err("Unknown current SCM calling convention.\n");
 253		return -EINVAL;
 254	}
 255}
 256
 257static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 258					 u32 cmd_id)
 259{
 260	int ret;
 261	struct qcom_scm_desc desc = {
 262		.svc = QCOM_SCM_SVC_INFO,
 263		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 264		.owner = ARM_SMCCC_OWNER_SIP,
 265	};
 266	struct qcom_scm_res res;
 267
 268	desc.arginfo = QCOM_SCM_ARGS(1);
 269	switch (__get_convention()) {
 270	case SMC_CONVENTION_ARM_32:
 271	case SMC_CONVENTION_ARM_64:
 272		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 273				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 274		break;
 275	case SMC_CONVENTION_LEGACY:
 276		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 277		break;
 278	default:
 279		pr_err("Unknown SMC convention being used\n");
 280		return false;
 281	}
 282
 283	ret = qcom_scm_call(dev, &desc, &res);
 284
 285	return ret ? false : !!res.result[0];
 286}
 287
 288static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
 
 
 
 
 
 
 
 
 289{
 
 
 290	int cpu;
 291	unsigned int flags = 0;
 292	struct qcom_scm_desc desc = {
 293		.svc = QCOM_SCM_SVC_BOOT,
 294		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 295		.arginfo = QCOM_SCM_ARGS(2),
 296		.owner = ARM_SMCCC_OWNER_SIP,
 297	};
 298
 299	for_each_present_cpu(cpu) {
 300		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
 301			return -EINVAL;
 302		flags |= cpu_bits[cpu];
 
 
 
 
 303	}
 304
 
 
 
 
 305	desc.args[0] = flags;
 306	desc.args[1] = virt_to_phys(entry);
 307
 308	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 
 
 
 
 
 
 309}
 
 310
 311static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
 
 
 
 
 
 
 
 
 312{
 
 
 
 
 
 
 
 
 313	struct qcom_scm_desc desc = {
 314		.svc = QCOM_SCM_SVC_BOOT,
 315		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
 
 316		.owner = ARM_SMCCC_OWNER_SIP,
 317		.arginfo = QCOM_SCM_ARGS(6),
 318		.args = {
 319			virt_to_phys(entry),
 320			/* Apply to all CPUs in all affinity levels */
 321			~0ULL, ~0ULL, ~0ULL, ~0ULL,
 322			flags,
 323		},
 324	};
 325
 326	/* Need a device for DMA of the additional arguments */
 327	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
 328		return -EOPNOTSUPP;
 329
 330	return qcom_scm_call(__scm->dev, &desc, NULL);
 331}
 
 
 
 
 332
 333/**
 334 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
 335 * @entry: Entry point function for the cpus
 336 *
 337 * Set the Linux entry point for the SCM to transfer control to when coming
 338 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 339 */
 340int qcom_scm_set_warm_boot_addr(void *entry)
 341{
 342	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
 343		/* Fallback to old SCM call */
 344		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
 345	return 0;
 346}
 347EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
 348
 349/**
 350 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
 351 * @entry: Entry point function for the cpus
 352 */
 353int qcom_scm_set_cold_boot_addr(void *entry)
 354{
 355	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
 356		/* Fallback to old SCM call */
 357		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
 358	return 0;
 359}
 360EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
 361
 362/**
 363 * qcom_scm_cpu_power_down() - Power down the cpu
 364 * @flags:	Flags to flush cache
 365 *
 366 * This is an end point to power down cpu. If there was a pending interrupt,
 367 * the control would return from this function, otherwise, the cpu jumps to the
 368 * warm boot entry point set for this cpu upon reset.
 369 */
 370void qcom_scm_cpu_power_down(u32 flags)
 371{
 372	struct qcom_scm_desc desc = {
 373		.svc = QCOM_SCM_SVC_BOOT,
 374		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 375		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 376		.arginfo = QCOM_SCM_ARGS(1),
 377		.owner = ARM_SMCCC_OWNER_SIP,
 378	};
 379
 380	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 381}
 382EXPORT_SYMBOL(qcom_scm_cpu_power_down);
 383
 384int qcom_scm_set_remote_state(u32 state, u32 id)
 385{
 386	struct qcom_scm_desc desc = {
 387		.svc = QCOM_SCM_SVC_BOOT,
 388		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 389		.arginfo = QCOM_SCM_ARGS(2),
 390		.args[0] = state,
 391		.args[1] = id,
 392		.owner = ARM_SMCCC_OWNER_SIP,
 393	};
 394	struct qcom_scm_res res;
 395	int ret;
 396
 397	ret = qcom_scm_call(__scm->dev, &desc, &res);
 398
 399	return ret ? : res.result[0];
 400}
 401EXPORT_SYMBOL(qcom_scm_set_remote_state);
 402
 403static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 404{
 405	struct qcom_scm_desc desc = {
 406		.svc = QCOM_SCM_SVC_BOOT,
 407		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 408		.arginfo = QCOM_SCM_ARGS(2),
 409		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 410		.owner = ARM_SMCCC_OWNER_SIP,
 411	};
 412
 413	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 414
 415	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 416}
 417
 418static void qcom_scm_set_download_mode(bool enable)
 419{
 420	bool avail;
 421	int ret = 0;
 422
 423	avail = __qcom_scm_is_call_available(__scm->dev,
 424					     QCOM_SCM_SVC_BOOT,
 425					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
 426	if (avail) {
 427		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
 428	} else if (__scm->dload_mode_addr) {
 429		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
 430				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
 431	} else {
 432		dev_err(__scm->dev,
 433			"No available mechanism for setting download mode\n");
 434	}
 435
 436	if (ret)
 437		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 438}
 439
 440/**
 441 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 442 *			       state machine for a given peripheral, using the
 443 *			       metadata
 444 * @peripheral: peripheral id
 445 * @metadata:	pointer to memory containing ELF header, program header table
 446 *		and optional blob of data used for authenticating the metadata
 447 *		and the rest of the firmware
 448 * @size:	size of the metadata
 449 * @ctx:	optional metadata context
 450 *
 451 * Return: 0 on success.
 452 *
 453 * Upon successful return, the PAS metadata context (@ctx) will be used to
 454 * track the metadata allocation, this needs to be released by invoking
 455 * qcom_scm_pas_metadata_release() by the caller.
 456 */
 457int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
 458			    struct qcom_scm_pas_metadata *ctx)
 459{
 460	dma_addr_t mdata_phys;
 461	void *mdata_buf;
 462	int ret;
 463	struct qcom_scm_desc desc = {
 464		.svc = QCOM_SCM_SVC_PIL,
 465		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 466		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 467		.args[0] = peripheral,
 468		.owner = ARM_SMCCC_OWNER_SIP,
 469	};
 470	struct qcom_scm_res res;
 471
 472	/*
 473	 * During the scm call memory protection will be enabled for the meta
 474	 * data blob, so make sure it's physically contiguous, 4K aligned and
 475	 * non-cachable to avoid XPU violations.
 476	 */
 477	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 478				       GFP_KERNEL);
 479	if (!mdata_buf) {
 480		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
 481		return -ENOMEM;
 482	}
 483	memcpy(mdata_buf, metadata, size);
 484
 485	ret = qcom_scm_clk_enable();
 486	if (ret)
 487		goto out;
 488
 489	ret = qcom_scm_bw_enable();
 490	if (ret)
 491		return ret;
 492
 493	desc.args[1] = mdata_phys;
 494
 495	ret = qcom_scm_call(__scm->dev, &desc, &res);
 496
 497	qcom_scm_bw_disable();
 498	qcom_scm_clk_disable();
 499
 500out:
 501	if (ret < 0 || !ctx) {
 502		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 503	} else if (ctx) {
 504		ctx->ptr = mdata_buf;
 505		ctx->phys = mdata_phys;
 506		ctx->size = size;
 507	}
 508
 509	return ret ? : res.result[0];
 510}
 511EXPORT_SYMBOL(qcom_scm_pas_init_image);
 512
 513/**
 514 * qcom_scm_pas_metadata_release() - release metadata context
 515 * @ctx:	metadata context
 516 */
 517void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
 518{
 519	if (!ctx->ptr)
 520		return;
 521
 522	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
 523
 524	ctx->ptr = NULL;
 525	ctx->phys = 0;
 526	ctx->size = 0;
 527}
 528EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
 529
 530/**
 531 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 532 *			      for firmware loading
 533 * @peripheral:	peripheral id
 534 * @addr:	start address of memory area to prepare
 535 * @size:	size of the memory area to prepare
 536 *
 537 * Returns 0 on success.
 538 */
 539int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 540{
 541	int ret;
 542	struct qcom_scm_desc desc = {
 543		.svc = QCOM_SCM_SVC_PIL,
 544		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 545		.arginfo = QCOM_SCM_ARGS(3),
 546		.args[0] = peripheral,
 547		.args[1] = addr,
 548		.args[2] = size,
 549		.owner = ARM_SMCCC_OWNER_SIP,
 550	};
 551	struct qcom_scm_res res;
 552
 553	ret = qcom_scm_clk_enable();
 554	if (ret)
 555		return ret;
 556
 557	ret = qcom_scm_bw_enable();
 558	if (ret)
 559		return ret;
 560
 561	ret = qcom_scm_call(__scm->dev, &desc, &res);
 562	qcom_scm_bw_disable();
 563	qcom_scm_clk_disable();
 564
 565	return ret ? : res.result[0];
 566}
 567EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
 568
 569/**
 570 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 571 *				   and reset the remote processor
 572 * @peripheral:	peripheral id
 573 *
 574 * Return 0 on success.
 575 */
 576int qcom_scm_pas_auth_and_reset(u32 peripheral)
 577{
 578	int ret;
 579	struct qcom_scm_desc desc = {
 580		.svc = QCOM_SCM_SVC_PIL,
 581		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 582		.arginfo = QCOM_SCM_ARGS(1),
 583		.args[0] = peripheral,
 584		.owner = ARM_SMCCC_OWNER_SIP,
 585	};
 586	struct qcom_scm_res res;
 587
 588	ret = qcom_scm_clk_enable();
 589	if (ret)
 590		return ret;
 591
 592	ret = qcom_scm_bw_enable();
 593	if (ret)
 594		return ret;
 595
 596	ret = qcom_scm_call(__scm->dev, &desc, &res);
 597	qcom_scm_bw_disable();
 598	qcom_scm_clk_disable();
 599
 600	return ret ? : res.result[0];
 601}
 602EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
 603
 604/**
 605 * qcom_scm_pas_shutdown() - Shut down the remote processor
 606 * @peripheral: peripheral id
 607 *
 608 * Returns 0 on success.
 609 */
 610int qcom_scm_pas_shutdown(u32 peripheral)
 611{
 612	int ret;
 613	struct qcom_scm_desc desc = {
 614		.svc = QCOM_SCM_SVC_PIL,
 615		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 616		.arginfo = QCOM_SCM_ARGS(1),
 617		.args[0] = peripheral,
 618		.owner = ARM_SMCCC_OWNER_SIP,
 619	};
 620	struct qcom_scm_res res;
 621
 622	ret = qcom_scm_clk_enable();
 623	if (ret)
 624		return ret;
 625
 626	ret = qcom_scm_bw_enable();
 627	if (ret)
 628		return ret;
 629
 630	ret = qcom_scm_call(__scm->dev, &desc, &res);
 631
 632	qcom_scm_bw_disable();
 633	qcom_scm_clk_disable();
 634
 635	return ret ? : res.result[0];
 636}
 637EXPORT_SYMBOL(qcom_scm_pas_shutdown);
 638
 639/**
 640 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 641 *			      available for the given peripherial
 642 * @peripheral:	peripheral id
 643 *
 644 * Returns true if PAS is supported for this peripheral, otherwise false.
 645 */
 646bool qcom_scm_pas_supported(u32 peripheral)
 647{
 648	int ret;
 649	struct qcom_scm_desc desc = {
 650		.svc = QCOM_SCM_SVC_PIL,
 651		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 652		.arginfo = QCOM_SCM_ARGS(1),
 653		.args[0] = peripheral,
 654		.owner = ARM_SMCCC_OWNER_SIP,
 655	};
 656	struct qcom_scm_res res;
 657
 658	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 659					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 660		return false;
 661
 662	ret = qcom_scm_call(__scm->dev, &desc, &res);
 663
 664	return ret ? false : !!res.result[0];
 665}
 666EXPORT_SYMBOL(qcom_scm_pas_supported);
 667
 668static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 669{
 670	struct qcom_scm_desc desc = {
 671		.svc = QCOM_SCM_SVC_PIL,
 672		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 673		.arginfo = QCOM_SCM_ARGS(2),
 674		.args[0] = reset,
 675		.args[1] = 0,
 676		.owner = ARM_SMCCC_OWNER_SIP,
 677	};
 678	struct qcom_scm_res res;
 679	int ret;
 680
 681	ret = qcom_scm_call(__scm->dev, &desc, &res);
 682
 683	return ret ? : res.result[0];
 684}
 685
 686static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 687				     unsigned long idx)
 688{
 689	if (idx != 0)
 690		return -EINVAL;
 691
 692	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 693}
 694
 695static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 696				       unsigned long idx)
 697{
 698	if (idx != 0)
 699		return -EINVAL;
 700
 701	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 702}
 703
 704static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 705	.assert = qcom_scm_pas_reset_assert,
 706	.deassert = qcom_scm_pas_reset_deassert,
 707};
 708
 709int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 710{
 711	struct qcom_scm_desc desc = {
 712		.svc = QCOM_SCM_SVC_IO,
 713		.cmd = QCOM_SCM_IO_READ,
 714		.arginfo = QCOM_SCM_ARGS(1),
 715		.args[0] = addr,
 716		.owner = ARM_SMCCC_OWNER_SIP,
 717	};
 718	struct qcom_scm_res res;
 719	int ret;
 720
 721
 722	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 723	if (ret >= 0)
 724		*val = res.result[0];
 725
 726	return ret < 0 ? ret : 0;
 727}
 728EXPORT_SYMBOL(qcom_scm_io_readl);
 729
 730int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 731{
 732	struct qcom_scm_desc desc = {
 733		.svc = QCOM_SCM_SVC_IO,
 734		.cmd = QCOM_SCM_IO_WRITE,
 735		.arginfo = QCOM_SCM_ARGS(2),
 736		.args[0] = addr,
 737		.args[1] = val,
 738		.owner = ARM_SMCCC_OWNER_SIP,
 739	};
 740
 741	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 742}
 743EXPORT_SYMBOL(qcom_scm_io_writel);
 744
 745/**
 746 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 747 * supports restore security config interface.
 748 *
 749 * Return true if restore-cfg interface is supported, false if not.
 750 */
 751bool qcom_scm_restore_sec_cfg_available(void)
 752{
 753	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 754					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 755}
 756EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
 757
 758int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 759{
 760	struct qcom_scm_desc desc = {
 761		.svc = QCOM_SCM_SVC_MP,
 762		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 763		.arginfo = QCOM_SCM_ARGS(2),
 764		.args[0] = device_id,
 765		.args[1] = spare,
 766		.owner = ARM_SMCCC_OWNER_SIP,
 767	};
 768	struct qcom_scm_res res;
 769	int ret;
 770
 771	ret = qcom_scm_call(__scm->dev, &desc, &res);
 772
 773	return ret ? : res.result[0];
 774}
 775EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
 776
 777int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 778{
 779	struct qcom_scm_desc desc = {
 780		.svc = QCOM_SCM_SVC_MP,
 781		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 782		.arginfo = QCOM_SCM_ARGS(1),
 783		.args[0] = spare,
 784		.owner = ARM_SMCCC_OWNER_SIP,
 785	};
 786	struct qcom_scm_res res;
 787	int ret;
 788
 789	ret = qcom_scm_call(__scm->dev, &desc, &res);
 790
 791	if (size)
 792		*size = res.result[0];
 793
 794	return ret ? : res.result[1];
 795}
 796EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
 797
 798int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 799{
 800	struct qcom_scm_desc desc = {
 801		.svc = QCOM_SCM_SVC_MP,
 802		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 803		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 804					 QCOM_SCM_VAL),
 805		.args[0] = addr,
 806		.args[1] = size,
 807		.args[2] = spare,
 808		.owner = ARM_SMCCC_OWNER_SIP,
 809	};
 810	int ret;
 811
 
 
 
 
 
 
 812	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 813
 814	/* the pg table has been initialized already, ignore the error */
 815	if (ret == -EPERM)
 816		ret = 0;
 817
 818	return ret;
 819}
 820EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
 821
 822int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
 823{
 824	struct qcom_scm_desc desc = {
 825		.svc = QCOM_SCM_SVC_MP,
 826		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
 827		.arginfo = QCOM_SCM_ARGS(2),
 828		.args[0] = size,
 829		.args[1] = spare,
 830		.owner = ARM_SMCCC_OWNER_SIP,
 831	};
 832
 833	return qcom_scm_call(__scm->dev, &desc, NULL);
 834}
 835EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
 836
 837int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 838				   u32 cp_nonpixel_start,
 839				   u32 cp_nonpixel_size)
 840{
 841	int ret;
 842	struct qcom_scm_desc desc = {
 843		.svc = QCOM_SCM_SVC_MP,
 844		.cmd = QCOM_SCM_MP_VIDEO_VAR,
 845		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
 846					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 847		.args[0] = cp_start,
 848		.args[1] = cp_size,
 849		.args[2] = cp_nonpixel_start,
 850		.args[3] = cp_nonpixel_size,
 851		.owner = ARM_SMCCC_OWNER_SIP,
 852	};
 853	struct qcom_scm_res res;
 854
 855	ret = qcom_scm_call(__scm->dev, &desc, &res);
 856
 857	return ret ? : res.result[0];
 858}
 859EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
 860
 861static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 862				 size_t mem_sz, phys_addr_t src, size_t src_sz,
 863				 phys_addr_t dest, size_t dest_sz)
 864{
 865	int ret;
 866	struct qcom_scm_desc desc = {
 867		.svc = QCOM_SCM_SVC_MP,
 868		.cmd = QCOM_SCM_MP_ASSIGN,
 869		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
 870					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
 871					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 872		.args[0] = mem_region,
 873		.args[1] = mem_sz,
 874		.args[2] = src,
 875		.args[3] = src_sz,
 876		.args[4] = dest,
 877		.args[5] = dest_sz,
 878		.args[6] = 0,
 879		.owner = ARM_SMCCC_OWNER_SIP,
 880	};
 881	struct qcom_scm_res res;
 882
 883	ret = qcom_scm_call(dev, &desc, &res);
 884
 885	return ret ? : res.result[0];
 886}
 887
 888/**
 889 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 890 * @mem_addr: mem region whose ownership need to be reassigned
 891 * @mem_sz:   size of the region.
 892 * @srcvm:    vmid for current set of owners, each set bit in
 893 *            flag indicate a unique owner
 894 * @newvm:    array having new owners and corresponding permission
 895 *            flags
 896 * @dest_cnt: number of owners in next set.
 897 *
 898 * Return negative errno on failure or 0 on success with @srcvm updated.
 899 */
 900int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 901			unsigned int *srcvm,
 902			const struct qcom_scm_vmperm *newvm,
 903			unsigned int dest_cnt)
 904{
 905	struct qcom_scm_current_perm_info *destvm;
 906	struct qcom_scm_mem_map_info *mem_to_map;
 907	phys_addr_t mem_to_map_phys;
 908	phys_addr_t dest_phys;
 909	dma_addr_t ptr_phys;
 910	size_t mem_to_map_sz;
 911	size_t dest_sz;
 912	size_t src_sz;
 913	size_t ptr_sz;
 914	int next_vm;
 915	__le32 *src;
 916	void *ptr;
 917	int ret, i, b;
 918	unsigned long srcvm_bits = *srcvm;
 919
 920	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
 921	mem_to_map_sz = sizeof(*mem_to_map);
 922	dest_sz = dest_cnt * sizeof(*destvm);
 923	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
 924			ALIGN(dest_sz, SZ_64);
 925
 926	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
 927	if (!ptr)
 928		return -ENOMEM;
 929
 930	/* Fill source vmid detail */
 931	src = ptr;
 932	i = 0;
 933	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
 934		src[i++] = cpu_to_le32(b);
 935
 936	/* Fill details of mem buff to map */
 937	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
 938	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
 939	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
 940	mem_to_map->mem_size = cpu_to_le64(mem_sz);
 941
 942	next_vm = 0;
 943	/* Fill details of next vmid detail */
 944	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 945	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 946	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
 947		destvm->vmid = cpu_to_le32(newvm->vmid);
 948		destvm->perm = cpu_to_le32(newvm->perm);
 949		destvm->ctx = 0;
 950		destvm->ctx_size = 0;
 951		next_vm |= BIT(newvm->vmid);
 952	}
 953
 954	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
 955				    ptr_phys, src_sz, dest_phys, dest_sz);
 956	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
 957	if (ret) {
 958		dev_err(__scm->dev,
 959			"Assign memory protection call failed %d\n", ret);
 960		return -EINVAL;
 961	}
 962
 963	*srcvm = next_vm;
 964	return 0;
 965}
 966EXPORT_SYMBOL(qcom_scm_assign_mem);
 967
 968/**
 969 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
 970 */
 971bool qcom_scm_ocmem_lock_available(void)
 972{
 973	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
 974					    QCOM_SCM_OCMEM_LOCK_CMD);
 975}
 976EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
 977
 978/**
 979 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
 980 * region to the specified initiator
 981 *
 982 * @id:     tz initiator id
 983 * @offset: OCMEM offset
 984 * @size:   OCMEM size
 985 * @mode:   access mode (WIDE/NARROW)
 986 */
 987int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
 988			u32 mode)
 989{
 990	struct qcom_scm_desc desc = {
 991		.svc = QCOM_SCM_SVC_OCMEM,
 992		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
 993		.args[0] = id,
 994		.args[1] = offset,
 995		.args[2] = size,
 996		.args[3] = mode,
 997		.arginfo = QCOM_SCM_ARGS(4),
 998	};
 999
1000	return qcom_scm_call(__scm->dev, &desc, NULL);
1001}
1002EXPORT_SYMBOL(qcom_scm_ocmem_lock);
1003
1004/**
1005 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1006 * region from the specified initiator
1007 *
1008 * @id:     tz initiator id
1009 * @offset: OCMEM offset
1010 * @size:   OCMEM size
1011 */
1012int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1013{
1014	struct qcom_scm_desc desc = {
1015		.svc = QCOM_SCM_SVC_OCMEM,
1016		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1017		.args[0] = id,
1018		.args[1] = offset,
1019		.args[2] = size,
1020		.arginfo = QCOM_SCM_ARGS(3),
1021	};
1022
1023	return qcom_scm_call(__scm->dev, &desc, NULL);
1024}
1025EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
1026
1027/**
1028 * qcom_scm_ice_available() - Is the ICE key programming interface available?
1029 *
1030 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1031 *	   qcom_scm_ice_set_key() are available.
1032 */
1033bool qcom_scm_ice_available(void)
1034{
1035	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1036					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1037		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1038					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1039}
1040EXPORT_SYMBOL(qcom_scm_ice_available);
1041
1042/**
1043 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1044 * @index: the keyslot to invalidate
1045 *
1046 * The UFSHCI and eMMC standards define a standard way to do this, but it
1047 * doesn't work on these SoCs; only this SCM call does.
1048 *
1049 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1050 * call doesn't specify which ICE instance the keyslot belongs to.
1051 *
1052 * Return: 0 on success; -errno on failure.
1053 */
1054int qcom_scm_ice_invalidate_key(u32 index)
1055{
1056	struct qcom_scm_desc desc = {
1057		.svc = QCOM_SCM_SVC_ES,
1058		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1059		.arginfo = QCOM_SCM_ARGS(1),
1060		.args[0] = index,
1061		.owner = ARM_SMCCC_OWNER_SIP,
1062	};
1063
1064	return qcom_scm_call(__scm->dev, &desc, NULL);
1065}
1066EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
1067
1068/**
1069 * qcom_scm_ice_set_key() - Set an inline encryption key
1070 * @index: the keyslot into which to set the key
1071 * @key: the key to program
1072 * @key_size: the size of the key in bytes
1073 * @cipher: the encryption algorithm the key is for
1074 * @data_unit_size: the encryption data unit size, i.e. the size of each
1075 *		    individual plaintext and ciphertext.  Given in 512-byte
1076 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1077 *
1078 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1079 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1080 *
1081 * The UFSHCI and eMMC standards define a standard way to do this, but it
1082 * doesn't work on these SoCs; only this SCM call does.
1083 *
1084 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1085 * call doesn't specify which ICE instance the keyslot belongs to.
1086 *
1087 * Return: 0 on success; -errno on failure.
1088 */
1089int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1090			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1091{
1092	struct qcom_scm_desc desc = {
1093		.svc = QCOM_SCM_SVC_ES,
1094		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1095		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1096					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1097					 QCOM_SCM_VAL),
1098		.args[0] = index,
1099		.args[2] = key_size,
1100		.args[3] = cipher,
1101		.args[4] = data_unit_size,
1102		.owner = ARM_SMCCC_OWNER_SIP,
1103	};
1104	void *keybuf;
1105	dma_addr_t key_phys;
1106	int ret;
1107
1108	/*
1109	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1110	 * physical address that's been properly flushed.  The sanctioned way to
1111	 * do this is by using the DMA API.  But as is best practice for crypto
1112	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1113	 * dma_map_single() not clearly correct, since the DMA API can use
1114	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1115	 * keys is normally rare and thus not performance-critical.
1116	 */
1117
1118	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1119				    GFP_KERNEL);
1120	if (!keybuf)
1121		return -ENOMEM;
1122	memcpy(keybuf, key, key_size);
1123	desc.args[1] = key_phys;
1124
1125	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1126
1127	memzero_explicit(keybuf, key_size);
1128
1129	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1130	return ret;
1131}
1132EXPORT_SYMBOL(qcom_scm_ice_set_key);
1133
1134/**
1135 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1136 *
1137 * Return true if HDCP is supported, false if not.
1138 */
1139bool qcom_scm_hdcp_available(void)
1140{
1141	bool avail;
1142	int ret = qcom_scm_clk_enable();
1143
1144	if (ret)
1145		return ret;
1146
1147	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1148						QCOM_SCM_HDCP_INVOKE);
1149
1150	qcom_scm_clk_disable();
1151
1152	return avail;
1153}
1154EXPORT_SYMBOL(qcom_scm_hdcp_available);
1155
1156/**
1157 * qcom_scm_hdcp_req() - Send HDCP request.
1158 * @req: HDCP request array
1159 * @req_cnt: HDCP request array count
1160 * @resp: response buffer passed to SCM
1161 *
1162 * Write HDCP register(s) through SCM.
1163 */
1164int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1165{
1166	int ret;
1167	struct qcom_scm_desc desc = {
1168		.svc = QCOM_SCM_SVC_HDCP,
1169		.cmd = QCOM_SCM_HDCP_INVOKE,
1170		.arginfo = QCOM_SCM_ARGS(10),
1171		.args = {
1172			req[0].addr,
1173			req[0].val,
1174			req[1].addr,
1175			req[1].val,
1176			req[2].addr,
1177			req[2].val,
1178			req[3].addr,
1179			req[3].val,
1180			req[4].addr,
1181			req[4].val
1182		},
1183		.owner = ARM_SMCCC_OWNER_SIP,
1184	};
1185	struct qcom_scm_res res;
1186
1187	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1188		return -ERANGE;
1189
1190	ret = qcom_scm_clk_enable();
1191	if (ret)
1192		return ret;
1193
1194	ret = qcom_scm_call(__scm->dev, &desc, &res);
1195	*resp = res.result[0];
1196
1197	qcom_scm_clk_disable();
1198
1199	return ret;
1200}
1201EXPORT_SYMBOL(qcom_scm_hdcp_req);
1202
1203int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1204{
1205	struct qcom_scm_desc desc = {
1206		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1207		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1208		.arginfo = QCOM_SCM_ARGS(3),
1209		.args[0] = sec_id,
1210		.args[1] = ctx_num,
1211		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1212		.owner = ARM_SMCCC_OWNER_SIP,
1213	};
1214
1215	return qcom_scm_call(__scm->dev, &desc, NULL);
1216}
1217EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
1218
1219int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1220{
1221	struct qcom_scm_desc desc = {
1222		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1223		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1224		.arginfo = QCOM_SCM_ARGS(2),
1225		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1226		.args[1] = en,
1227		.owner = ARM_SMCCC_OWNER_SIP,
1228	};
1229
1230
1231	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1232}
1233EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1234
1235bool qcom_scm_lmh_dcvsh_available(void)
1236{
1237	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1238}
1239EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1240
1241int qcom_scm_lmh_profile_change(u32 profile_id)
1242{
1243	struct qcom_scm_desc desc = {
1244		.svc = QCOM_SCM_SVC_LMH,
1245		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1246		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1247		.args[0] = profile_id,
1248		.owner = ARM_SMCCC_OWNER_SIP,
1249	};
1250
1251	return qcom_scm_call(__scm->dev, &desc, NULL);
1252}
1253EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1254
1255int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1256		       u64 limit_node, u32 node_id, u64 version)
1257{
1258	dma_addr_t payload_phys;
1259	u32 *payload_buf;
1260	int ret, payload_size = 5 * sizeof(u32);
1261
1262	struct qcom_scm_desc desc = {
1263		.svc = QCOM_SCM_SVC_LMH,
1264		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1265		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1266					QCOM_SCM_VAL, QCOM_SCM_VAL),
1267		.args[1] = payload_size,
1268		.args[2] = limit_node,
1269		.args[3] = node_id,
1270		.args[4] = version,
1271		.owner = ARM_SMCCC_OWNER_SIP,
1272	};
1273
1274	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1275	if (!payload_buf)
1276		return -ENOMEM;
1277
1278	payload_buf[0] = payload_fn;
1279	payload_buf[1] = 0;
1280	payload_buf[2] = payload_reg;
1281	payload_buf[3] = 1;
1282	payload_buf[4] = payload_val;
1283
1284	desc.args[0] = payload_phys;
1285
1286	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1287
1288	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1289	return ret;
1290}
1291EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1292
1293static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1294{
1295	struct device_node *tcsr;
1296	struct device_node *np = dev->of_node;
1297	struct resource res;
1298	u32 offset;
1299	int ret;
1300
1301	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1302	if (!tcsr)
1303		return 0;
1304
1305	ret = of_address_to_resource(tcsr, 0, &res);
1306	of_node_put(tcsr);
1307	if (ret)
1308		return ret;
1309
1310	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1311	if (ret < 0)
1312		return ret;
1313
1314	*addr = res.start + offset;
1315
1316	return 0;
1317}
1318
1319/**
1320 * qcom_scm_is_available() - Checks if SCM is available
1321 */
1322bool qcom_scm_is_available(void)
1323{
1324	return !!__scm;
1325}
1326EXPORT_SYMBOL(qcom_scm_is_available);
1327
1328static int qcom_scm_probe(struct platform_device *pdev)
1329{
1330	struct qcom_scm *scm;
1331	unsigned long clks;
1332	int ret;
1333
1334	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1335	if (!scm)
1336		return -ENOMEM;
1337
1338	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1339	if (ret < 0)
1340		return ret;
1341
1342	mutex_init(&scm->scm_bw_lock);
1343
1344	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1345
1346	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1347	if (IS_ERR(scm->path))
1348		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1349				     "failed to acquire interconnect path\n");
1350
1351	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1352	if (IS_ERR(scm->core_clk)) {
1353		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1354			return PTR_ERR(scm->core_clk);
1355
1356		if (clks & SCM_HAS_CORE_CLK) {
1357			dev_err(&pdev->dev, "failed to acquire core clk\n");
1358			return PTR_ERR(scm->core_clk);
1359		}
1360
1361		scm->core_clk = NULL;
1362	}
1363
1364	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1365	if (IS_ERR(scm->iface_clk)) {
1366		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1367			return PTR_ERR(scm->iface_clk);
1368
1369		if (clks & SCM_HAS_IFACE_CLK) {
1370			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1371			return PTR_ERR(scm->iface_clk);
1372		}
1373
1374		scm->iface_clk = NULL;
1375	}
1376
1377	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1378	if (IS_ERR(scm->bus_clk)) {
1379		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1380			return PTR_ERR(scm->bus_clk);
1381
1382		if (clks & SCM_HAS_BUS_CLK) {
1383			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1384			return PTR_ERR(scm->bus_clk);
1385		}
1386
1387		scm->bus_clk = NULL;
1388	}
1389
1390	scm->reset.ops = &qcom_scm_pas_reset_ops;
1391	scm->reset.nr_resets = 1;
1392	scm->reset.of_node = pdev->dev.of_node;
1393	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1394	if (ret)
1395		return ret;
1396
1397	/* vote for max clk rate for highest performance */
1398	ret = clk_set_rate(scm->core_clk, INT_MAX);
1399	if (ret)
1400		return ret;
1401
1402	__scm = scm;
1403	__scm->dev = &pdev->dev;
1404
1405	__get_convention();
1406
1407	/*
1408	 * If requested enable "download mode", from this point on warmboot
1409	 * will cause the boot stages to enter download mode, unless
1410	 * disabled below by a clean shutdown/reboot.
1411	 */
1412	if (download_mode)
1413		qcom_scm_set_download_mode(true);
1414
1415	return 0;
1416}
1417
1418static void qcom_scm_shutdown(struct platform_device *pdev)
1419{
1420	/* Clean shutdown, disable download mode to allow normal restart */
1421	if (download_mode)
1422		qcom_scm_set_download_mode(false);
1423}
1424
1425static const struct of_device_id qcom_scm_dt_match[] = {
1426	{ .compatible = "qcom,scm-apq8064",
1427	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1428	},
1429	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1430							     SCM_HAS_IFACE_CLK |
1431							     SCM_HAS_BUS_CLK)
1432	},
1433	{ .compatible = "qcom,scm-ipq4019" },
1434	{ .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1435							     SCM_HAS_IFACE_CLK |
1436							     SCM_HAS_BUS_CLK) },
1437	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1438	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1439	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1440							     SCM_HAS_IFACE_CLK |
1441							     SCM_HAS_BUS_CLK)
1442	},
1443	{ .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1444							     SCM_HAS_IFACE_CLK |
1445							     SCM_HAS_BUS_CLK)
1446	},
1447	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1448							     SCM_HAS_IFACE_CLK |
1449							     SCM_HAS_BUS_CLK)
1450	},
1451	{ .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK |
1452							     SCM_HAS_IFACE_CLK |
1453							     SCM_HAS_BUS_CLK)
1454	},
1455	{ .compatible = "qcom,scm-msm8994" },
1456	{ .compatible = "qcom,scm-msm8996" },
1457	{ .compatible = "qcom,scm" },
1458	{}
1459};
1460MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1461
1462static struct platform_driver qcom_scm_driver = {
1463	.driver = {
1464		.name	= "qcom_scm",
1465		.of_match_table = qcom_scm_dt_match,
1466		.suppress_bind_attrs = true,
1467	},
1468	.probe = qcom_scm_probe,
1469	.shutdown = qcom_scm_shutdown,
1470};
1471
1472static int __init qcom_scm_init(void)
1473{
1474	return platform_driver_register(&qcom_scm_driver);
1475}
1476subsys_initcall(qcom_scm_init);
1477
1478MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1479MODULE_LICENSE("GPL v2");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
   3 * Copyright (C) 2015 Linaro Ltd.
   4 */
   5#include <linux/platform_device.h>
   6#include <linux/init.h>
   7#include <linux/cpumask.h>
   8#include <linux/export.h>
   9#include <linux/dma-mapping.h>
 
  10#include <linux/module.h>
  11#include <linux/types.h>
  12#include <linux/qcom_scm.h>
  13#include <linux/of.h>
  14#include <linux/of_address.h>
  15#include <linux/of_platform.h>
  16#include <linux/clk.h>
  17#include <linux/reset-controller.h>
  18#include <linux/arm-smccc.h>
  19
  20#include "qcom_scm.h"
  21
  22static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
  23module_param(download_mode, bool, 0);
  24
  25#define SCM_HAS_CORE_CLK	BIT(0)
  26#define SCM_HAS_IFACE_CLK	BIT(1)
  27#define SCM_HAS_BUS_CLK		BIT(2)
  28
  29struct qcom_scm {
  30	struct device *dev;
  31	struct clk *core_clk;
  32	struct clk *iface_clk;
  33	struct clk *bus_clk;
 
  34	struct reset_controller_dev reset;
  35
 
 
 
 
  36	u64 dload_mode_addr;
  37};
  38
  39struct qcom_scm_current_perm_info {
  40	__le32 vmid;
  41	__le32 perm;
  42	__le64 ctx;
  43	__le32 ctx_size;
  44	__le32 unused;
  45};
  46
  47struct qcom_scm_mem_map_info {
  48	__le64 mem_addr;
  49	__le64 mem_size;
  50};
  51
  52#define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
  53#define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
  54#define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
  55#define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
  56
  57#define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
  58#define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
  59#define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
  60#define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
  61
  62struct qcom_scm_wb_entry {
  63	int flag;
  64	void *entry;
  65};
  66
  67static struct qcom_scm_wb_entry qcom_scm_wb[] = {
  68	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
  69	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
  70	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
  71	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
  72};
  73
  74static const char *qcom_scm_convention_names[] = {
  75	[SMC_CONVENTION_UNKNOWN] = "unknown",
  76	[SMC_CONVENTION_ARM_32] = "smc arm 32",
  77	[SMC_CONVENTION_ARM_64] = "smc arm 64",
  78	[SMC_CONVENTION_LEGACY] = "smc legacy",
  79};
  80
  81static struct qcom_scm *__scm;
  82
  83static int qcom_scm_clk_enable(void)
  84{
  85	int ret;
  86
  87	ret = clk_prepare_enable(__scm->core_clk);
  88	if (ret)
  89		goto bail;
  90
  91	ret = clk_prepare_enable(__scm->iface_clk);
  92	if (ret)
  93		goto disable_core;
  94
  95	ret = clk_prepare_enable(__scm->bus_clk);
  96	if (ret)
  97		goto disable_iface;
  98
  99	return 0;
 100
 101disable_iface:
 102	clk_disable_unprepare(__scm->iface_clk);
 103disable_core:
 104	clk_disable_unprepare(__scm->core_clk);
 105bail:
 106	return ret;
 107}
 108
 109static void qcom_scm_clk_disable(void)
 110{
 111	clk_disable_unprepare(__scm->core_clk);
 112	clk_disable_unprepare(__scm->iface_clk);
 113	clk_disable_unprepare(__scm->bus_clk);
 114}
 115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
 117static DEFINE_SPINLOCK(scm_query_lock);
 118
 119static enum qcom_scm_convention __get_convention(void)
 120{
 121	unsigned long flags;
 122	struct qcom_scm_desc desc = {
 123		.svc = QCOM_SCM_SVC_INFO,
 124		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 125		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
 126					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
 127			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
 128		.arginfo = QCOM_SCM_ARGS(1),
 129		.owner = ARM_SMCCC_OWNER_SIP,
 130	};
 131	struct qcom_scm_res res;
 132	enum qcom_scm_convention probed_convention;
 133	int ret;
 134	bool forced = false;
 135
 136	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
 137		return qcom_scm_convention;
 138
 139	/*
 140	 * Device isn't required as there is only one argument - no device
 141	 * needed to dma_map_single to secure world
 142	 */
 143	probed_convention = SMC_CONVENTION_ARM_64;
 144	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 145	if (!ret && res.result[0] == 1)
 146		goto found;
 147
 148	/*
 149	 * Some SC7180 firmwares didn't implement the
 150	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
 151	 * calling conventions on these firmwares. Luckily we don't make any
 152	 * early calls into the firmware on these SoCs so the device pointer
 153	 * will be valid here to check if the compatible matches.
 154	 */
 155	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
 156		forced = true;
 157		goto found;
 158	}
 159
 160	probed_convention = SMC_CONVENTION_ARM_32;
 161	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
 162	if (!ret && res.result[0] == 1)
 163		goto found;
 164
 165	probed_convention = SMC_CONVENTION_LEGACY;
 166found:
 167	spin_lock_irqsave(&scm_query_lock, flags);
 168	if (probed_convention != qcom_scm_convention) {
 169		qcom_scm_convention = probed_convention;
 170		pr_info("qcom_scm: convention: %s%s\n",
 171			qcom_scm_convention_names[qcom_scm_convention],
 172			forced ? " (forced)" : "");
 173	}
 174	spin_unlock_irqrestore(&scm_query_lock, flags);
 175
 176	return qcom_scm_convention;
 177}
 178
 179/**
 180 * qcom_scm_call() - Invoke a syscall in the secure world
 181 * @dev:	device
 182 * @svc_id:	service identifier
 183 * @cmd_id:	command identifier
 184 * @desc:	Descriptor structure containing arguments and return values
 
 185 *
 186 * Sends a command to the SCM and waits for the command to finish processing.
 187 * This should *only* be called in pre-emptible context.
 188 */
 189static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
 190			 struct qcom_scm_res *res)
 191{
 192	might_sleep();
 193	switch (__get_convention()) {
 194	case SMC_CONVENTION_ARM_32:
 195	case SMC_CONVENTION_ARM_64:
 196		return scm_smc_call(dev, desc, res, false);
 197	case SMC_CONVENTION_LEGACY:
 198		return scm_legacy_call(dev, desc, res);
 199	default:
 200		pr_err("Unknown current SCM calling convention.\n");
 201		return -EINVAL;
 202	}
 203}
 204
 205/**
 206 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
 207 * @dev:	device
 208 * @svc_id:	service identifier
 209 * @cmd_id:	command identifier
 210 * @desc:	Descriptor structure containing arguments and return values
 211 * @res:	Structure containing results from SMC/HVC call
 212 *
 213 * Sends a command to the SCM and waits for the command to finish processing.
 214 * This can be called in atomic context.
 215 */
 216static int qcom_scm_call_atomic(struct device *dev,
 217				const struct qcom_scm_desc *desc,
 218				struct qcom_scm_res *res)
 219{
 220	switch (__get_convention()) {
 221	case SMC_CONVENTION_ARM_32:
 222	case SMC_CONVENTION_ARM_64:
 223		return scm_smc_call(dev, desc, res, true);
 224	case SMC_CONVENTION_LEGACY:
 225		return scm_legacy_call_atomic(dev, desc, res);
 226	default:
 227		pr_err("Unknown current SCM calling convention.\n");
 228		return -EINVAL;
 229	}
 230}
 231
 232static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
 233					 u32 cmd_id)
 234{
 235	int ret;
 236	struct qcom_scm_desc desc = {
 237		.svc = QCOM_SCM_SVC_INFO,
 238		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
 239		.owner = ARM_SMCCC_OWNER_SIP,
 240	};
 241	struct qcom_scm_res res;
 242
 243	desc.arginfo = QCOM_SCM_ARGS(1);
 244	switch (__get_convention()) {
 245	case SMC_CONVENTION_ARM_32:
 246	case SMC_CONVENTION_ARM_64:
 247		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
 248				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
 249		break;
 250	case SMC_CONVENTION_LEGACY:
 251		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
 252		break;
 253	default:
 254		pr_err("Unknown SMC convention being used\n");
 255		return -EINVAL;
 256	}
 257
 258	ret = qcom_scm_call(dev, &desc, &res);
 259
 260	return ret ? false : !!res.result[0];
 261}
 262
 263/**
 264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
 265 * @entry: Entry point function for the cpus
 266 * @cpus: The cpumask of cpus that will use the entry point
 267 *
 268 * Set the Linux entry point for the SCM to transfer control to when coming
 269 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
 270 */
 271int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
 272{
 273	int ret;
 274	int flags = 0;
 275	int cpu;
 
 276	struct qcom_scm_desc desc = {
 277		.svc = QCOM_SCM_SVC_BOOT,
 278		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 279		.arginfo = QCOM_SCM_ARGS(2),
 
 280	};
 281
 282	/*
 283	 * Reassign only if we are switching from hotplug entry point
 284	 * to cpuidle entry point or vice versa.
 285	 */
 286	for_each_cpu(cpu, cpus) {
 287		if (entry == qcom_scm_wb[cpu].entry)
 288			continue;
 289		flags |= qcom_scm_wb[cpu].flag;
 290	}
 291
 292	/* No change in entry function */
 293	if (!flags)
 294		return 0;
 295
 296	desc.args[0] = flags;
 297	desc.args[1] = virt_to_phys(entry);
 298
 299	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 300	if (!ret) {
 301		for_each_cpu(cpu, cpus)
 302			qcom_scm_wb[cpu].entry = entry;
 303	}
 304
 305	return ret;
 306}
 307EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
 308
 309/**
 310 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
 311 * @entry: Entry point function for the cpus
 312 * @cpus: The cpumask of cpus that will use the entry point
 313 *
 314 * Set the cold boot address of the cpus. Any cpu outside the supported
 315 * range would be removed from the cpu present mask.
 316 */
 317int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
 318{
 319	int flags = 0;
 320	int cpu;
 321	int scm_cb_flags[] = {
 322		QCOM_SCM_FLAG_COLDBOOT_CPU0,
 323		QCOM_SCM_FLAG_COLDBOOT_CPU1,
 324		QCOM_SCM_FLAG_COLDBOOT_CPU2,
 325		QCOM_SCM_FLAG_COLDBOOT_CPU3,
 326	};
 327	struct qcom_scm_desc desc = {
 328		.svc = QCOM_SCM_SVC_BOOT,
 329		.cmd = QCOM_SCM_BOOT_SET_ADDR,
 330		.arginfo = QCOM_SCM_ARGS(2),
 331		.owner = ARM_SMCCC_OWNER_SIP,
 
 
 
 
 
 
 
 332	};
 333
 334	if (!cpus || (cpus && cpumask_empty(cpus)))
 335		return -EINVAL;
 
 336
 337	for_each_cpu(cpu, cpus) {
 338		if (cpu < ARRAY_SIZE(scm_cb_flags))
 339			flags |= scm_cb_flags[cpu];
 340		else
 341			set_cpu_present(cpu, false);
 342	}
 343
 344	desc.args[0] = flags;
 345	desc.args[1] = virt_to_phys(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 346
 347	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 
 
 
 
 
 
 
 
 
 348}
 349EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
 350
 351/**
 352 * qcom_scm_cpu_power_down() - Power down the cpu
 353 * @flags - Flags to flush cache
 354 *
 355 * This is an end point to power down cpu. If there was a pending interrupt,
 356 * the control would return from this function, otherwise, the cpu jumps to the
 357 * warm boot entry point set for this cpu upon reset.
 358 */
 359void qcom_scm_cpu_power_down(u32 flags)
 360{
 361	struct qcom_scm_desc desc = {
 362		.svc = QCOM_SCM_SVC_BOOT,
 363		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
 364		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
 365		.arginfo = QCOM_SCM_ARGS(1),
 366		.owner = ARM_SMCCC_OWNER_SIP,
 367	};
 368
 369	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
 370}
 371EXPORT_SYMBOL(qcom_scm_cpu_power_down);
 372
 373int qcom_scm_set_remote_state(u32 state, u32 id)
 374{
 375	struct qcom_scm_desc desc = {
 376		.svc = QCOM_SCM_SVC_BOOT,
 377		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
 378		.arginfo = QCOM_SCM_ARGS(2),
 379		.args[0] = state,
 380		.args[1] = id,
 381		.owner = ARM_SMCCC_OWNER_SIP,
 382	};
 383	struct qcom_scm_res res;
 384	int ret;
 385
 386	ret = qcom_scm_call(__scm->dev, &desc, &res);
 387
 388	return ret ? : res.result[0];
 389}
 390EXPORT_SYMBOL(qcom_scm_set_remote_state);
 391
 392static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
 393{
 394	struct qcom_scm_desc desc = {
 395		.svc = QCOM_SCM_SVC_BOOT,
 396		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 397		.arginfo = QCOM_SCM_ARGS(2),
 398		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
 399		.owner = ARM_SMCCC_OWNER_SIP,
 400	};
 401
 402	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
 403
 404	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 405}
 406
 407static void qcom_scm_set_download_mode(bool enable)
 408{
 409	bool avail;
 410	int ret = 0;
 411
 412	avail = __qcom_scm_is_call_available(__scm->dev,
 413					     QCOM_SCM_SVC_BOOT,
 414					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
 415	if (avail) {
 416		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
 417	} else if (__scm->dload_mode_addr) {
 418		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
 419				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
 420	} else {
 421		dev_err(__scm->dev,
 422			"No available mechanism for setting download mode\n");
 423	}
 424
 425	if (ret)
 426		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
 427}
 428
 429/**
 430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
 431 *			       state machine for a given peripheral, using the
 432 *			       metadata
 433 * @peripheral: peripheral id
 434 * @metadata:	pointer to memory containing ELF header, program header table
 435 *		and optional blob of data used for authenticating the metadata
 436 *		and the rest of the firmware
 437 * @size:	size of the metadata
 
 438 *
 439 * Returns 0 on success.
 
 
 
 
 440 */
 441int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
 
 442{
 443	dma_addr_t mdata_phys;
 444	void *mdata_buf;
 445	int ret;
 446	struct qcom_scm_desc desc = {
 447		.svc = QCOM_SCM_SVC_PIL,
 448		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
 449		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
 450		.args[0] = peripheral,
 451		.owner = ARM_SMCCC_OWNER_SIP,
 452	};
 453	struct qcom_scm_res res;
 454
 455	/*
 456	 * During the scm call memory protection will be enabled for the meta
 457	 * data blob, so make sure it's physically contiguous, 4K aligned and
 458	 * non-cachable to avoid XPU violations.
 459	 */
 460	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
 461				       GFP_KERNEL);
 462	if (!mdata_buf) {
 463		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
 464		return -ENOMEM;
 465	}
 466	memcpy(mdata_buf, metadata, size);
 467
 468	ret = qcom_scm_clk_enable();
 469	if (ret)
 470		goto free_metadata;
 
 
 
 
 471
 472	desc.args[1] = mdata_phys;
 473
 474	ret = qcom_scm_call(__scm->dev, &desc, &res);
 475
 
 476	qcom_scm_clk_disable();
 477
 478free_metadata:
 479	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
 
 
 
 
 
 
 480
 481	return ret ? : res.result[0];
 482}
 483EXPORT_SYMBOL(qcom_scm_pas_init_image);
 484
 485/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 486 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
 487 *			      for firmware loading
 488 * @peripheral:	peripheral id
 489 * @addr:	start address of memory area to prepare
 490 * @size:	size of the memory area to prepare
 491 *
 492 * Returns 0 on success.
 493 */
 494int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
 495{
 496	int ret;
 497	struct qcom_scm_desc desc = {
 498		.svc = QCOM_SCM_SVC_PIL,
 499		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
 500		.arginfo = QCOM_SCM_ARGS(3),
 501		.args[0] = peripheral,
 502		.args[1] = addr,
 503		.args[2] = size,
 504		.owner = ARM_SMCCC_OWNER_SIP,
 505	};
 506	struct qcom_scm_res res;
 507
 508	ret = qcom_scm_clk_enable();
 509	if (ret)
 510		return ret;
 511
 
 
 
 
 512	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
 513	qcom_scm_clk_disable();
 514
 515	return ret ? : res.result[0];
 516}
 517EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
 518
 519/**
 520 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
 521 *				   and reset the remote processor
 522 * @peripheral:	peripheral id
 523 *
 524 * Return 0 on success.
 525 */
 526int qcom_scm_pas_auth_and_reset(u32 peripheral)
 527{
 528	int ret;
 529	struct qcom_scm_desc desc = {
 530		.svc = QCOM_SCM_SVC_PIL,
 531		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
 532		.arginfo = QCOM_SCM_ARGS(1),
 533		.args[0] = peripheral,
 534		.owner = ARM_SMCCC_OWNER_SIP,
 535	};
 536	struct qcom_scm_res res;
 537
 538	ret = qcom_scm_clk_enable();
 539	if (ret)
 540		return ret;
 541
 
 
 
 
 542	ret = qcom_scm_call(__scm->dev, &desc, &res);
 
 543	qcom_scm_clk_disable();
 544
 545	return ret ? : res.result[0];
 546}
 547EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
 548
 549/**
 550 * qcom_scm_pas_shutdown() - Shut down the remote processor
 551 * @peripheral: peripheral id
 552 *
 553 * Returns 0 on success.
 554 */
 555int qcom_scm_pas_shutdown(u32 peripheral)
 556{
 557	int ret;
 558	struct qcom_scm_desc desc = {
 559		.svc = QCOM_SCM_SVC_PIL,
 560		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
 561		.arginfo = QCOM_SCM_ARGS(1),
 562		.args[0] = peripheral,
 563		.owner = ARM_SMCCC_OWNER_SIP,
 564	};
 565	struct qcom_scm_res res;
 566
 567	ret = qcom_scm_clk_enable();
 568	if (ret)
 569		return ret;
 570
 
 
 
 
 571	ret = qcom_scm_call(__scm->dev, &desc, &res);
 572
 
 573	qcom_scm_clk_disable();
 574
 575	return ret ? : res.result[0];
 576}
 577EXPORT_SYMBOL(qcom_scm_pas_shutdown);
 578
 579/**
 580 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
 581 *			      available for the given peripherial
 582 * @peripheral:	peripheral id
 583 *
 584 * Returns true if PAS is supported for this peripheral, otherwise false.
 585 */
 586bool qcom_scm_pas_supported(u32 peripheral)
 587{
 588	int ret;
 589	struct qcom_scm_desc desc = {
 590		.svc = QCOM_SCM_SVC_PIL,
 591		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
 592		.arginfo = QCOM_SCM_ARGS(1),
 593		.args[0] = peripheral,
 594		.owner = ARM_SMCCC_OWNER_SIP,
 595	};
 596	struct qcom_scm_res res;
 597
 598	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
 599					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
 600		return false;
 601
 602	ret = qcom_scm_call(__scm->dev, &desc, &res);
 603
 604	return ret ? false : !!res.result[0];
 605}
 606EXPORT_SYMBOL(qcom_scm_pas_supported);
 607
 608static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
 609{
 610	struct qcom_scm_desc desc = {
 611		.svc = QCOM_SCM_SVC_PIL,
 612		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
 613		.arginfo = QCOM_SCM_ARGS(2),
 614		.args[0] = reset,
 615		.args[1] = 0,
 616		.owner = ARM_SMCCC_OWNER_SIP,
 617	};
 618	struct qcom_scm_res res;
 619	int ret;
 620
 621	ret = qcom_scm_call(__scm->dev, &desc, &res);
 622
 623	return ret ? : res.result[0];
 624}
 625
 626static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
 627				     unsigned long idx)
 628{
 629	if (idx != 0)
 630		return -EINVAL;
 631
 632	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
 633}
 634
 635static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
 636				       unsigned long idx)
 637{
 638	if (idx != 0)
 639		return -EINVAL;
 640
 641	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
 642}
 643
 644static const struct reset_control_ops qcom_scm_pas_reset_ops = {
 645	.assert = qcom_scm_pas_reset_assert,
 646	.deassert = qcom_scm_pas_reset_deassert,
 647};
 648
 649int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
 650{
 651	struct qcom_scm_desc desc = {
 652		.svc = QCOM_SCM_SVC_IO,
 653		.cmd = QCOM_SCM_IO_READ,
 654		.arginfo = QCOM_SCM_ARGS(1),
 655		.args[0] = addr,
 656		.owner = ARM_SMCCC_OWNER_SIP,
 657	};
 658	struct qcom_scm_res res;
 659	int ret;
 660
 661
 662	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
 663	if (ret >= 0)
 664		*val = res.result[0];
 665
 666	return ret < 0 ? ret : 0;
 667}
 668EXPORT_SYMBOL(qcom_scm_io_readl);
 669
 670int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
 671{
 672	struct qcom_scm_desc desc = {
 673		.svc = QCOM_SCM_SVC_IO,
 674		.cmd = QCOM_SCM_IO_WRITE,
 675		.arginfo = QCOM_SCM_ARGS(2),
 676		.args[0] = addr,
 677		.args[1] = val,
 678		.owner = ARM_SMCCC_OWNER_SIP,
 679	};
 680
 681	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
 682}
 683EXPORT_SYMBOL(qcom_scm_io_writel);
 684
 685/**
 686 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
 687 * supports restore security config interface.
 688 *
 689 * Return true if restore-cfg interface is supported, false if not.
 690 */
 691bool qcom_scm_restore_sec_cfg_available(void)
 692{
 693	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
 694					    QCOM_SCM_MP_RESTORE_SEC_CFG);
 695}
 696EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
 697
 698int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
 699{
 700	struct qcom_scm_desc desc = {
 701		.svc = QCOM_SCM_SVC_MP,
 702		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
 703		.arginfo = QCOM_SCM_ARGS(2),
 704		.args[0] = device_id,
 705		.args[1] = spare,
 706		.owner = ARM_SMCCC_OWNER_SIP,
 707	};
 708	struct qcom_scm_res res;
 709	int ret;
 710
 711	ret = qcom_scm_call(__scm->dev, &desc, &res);
 712
 713	return ret ? : res.result[0];
 714}
 715EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
 716
 717int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
 718{
 719	struct qcom_scm_desc desc = {
 720		.svc = QCOM_SCM_SVC_MP,
 721		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
 722		.arginfo = QCOM_SCM_ARGS(1),
 723		.args[0] = spare,
 724		.owner = ARM_SMCCC_OWNER_SIP,
 725	};
 726	struct qcom_scm_res res;
 727	int ret;
 728
 729	ret = qcom_scm_call(__scm->dev, &desc, &res);
 730
 731	if (size)
 732		*size = res.result[0];
 733
 734	return ret ? : res.result[1];
 735}
 736EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
 737
 738int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
 739{
 740	struct qcom_scm_desc desc = {
 741		.svc = QCOM_SCM_SVC_MP,
 742		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
 743		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 744					 QCOM_SCM_VAL),
 745		.args[0] = addr,
 746		.args[1] = size,
 747		.args[2] = spare,
 748		.owner = ARM_SMCCC_OWNER_SIP,
 749	};
 750	int ret;
 751
 752	desc.args[0] = addr;
 753	desc.args[1] = size;
 754	desc.args[2] = spare;
 755	desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
 756				     QCOM_SCM_VAL);
 757
 758	ret = qcom_scm_call(__scm->dev, &desc, NULL);
 759
 760	/* the pg table has been initialized already, ignore the error */
 761	if (ret == -EPERM)
 762		ret = 0;
 763
 764	return ret;
 765}
 766EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
 767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 768int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
 769				   u32 cp_nonpixel_start,
 770				   u32 cp_nonpixel_size)
 771{
 772	int ret;
 773	struct qcom_scm_desc desc = {
 774		.svc = QCOM_SCM_SVC_MP,
 775		.cmd = QCOM_SCM_MP_VIDEO_VAR,
 776		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
 777					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 778		.args[0] = cp_start,
 779		.args[1] = cp_size,
 780		.args[2] = cp_nonpixel_start,
 781		.args[3] = cp_nonpixel_size,
 782		.owner = ARM_SMCCC_OWNER_SIP,
 783	};
 784	struct qcom_scm_res res;
 785
 786	ret = qcom_scm_call(__scm->dev, &desc, &res);
 787
 788	return ret ? : res.result[0];
 789}
 790EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
 791
 792static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
 793				 size_t mem_sz, phys_addr_t src, size_t src_sz,
 794				 phys_addr_t dest, size_t dest_sz)
 795{
 796	int ret;
 797	struct qcom_scm_desc desc = {
 798		.svc = QCOM_SCM_SVC_MP,
 799		.cmd = QCOM_SCM_MP_ASSIGN,
 800		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
 801					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
 802					 QCOM_SCM_VAL, QCOM_SCM_VAL),
 803		.args[0] = mem_region,
 804		.args[1] = mem_sz,
 805		.args[2] = src,
 806		.args[3] = src_sz,
 807		.args[4] = dest,
 808		.args[5] = dest_sz,
 809		.args[6] = 0,
 810		.owner = ARM_SMCCC_OWNER_SIP,
 811	};
 812	struct qcom_scm_res res;
 813
 814	ret = qcom_scm_call(dev, &desc, &res);
 815
 816	return ret ? : res.result[0];
 817}
 818
 819/**
 820 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
 821 * @mem_addr: mem region whose ownership need to be reassigned
 822 * @mem_sz:   size of the region.
 823 * @srcvm:    vmid for current set of owners, each set bit in
 824 *            flag indicate a unique owner
 825 * @newvm:    array having new owners and corresponding permission
 826 *            flags
 827 * @dest_cnt: number of owners in next set.
 828 *
 829 * Return negative errno on failure or 0 on success with @srcvm updated.
 830 */
 831int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
 832			unsigned int *srcvm,
 833			const struct qcom_scm_vmperm *newvm,
 834			unsigned int dest_cnt)
 835{
 836	struct qcom_scm_current_perm_info *destvm;
 837	struct qcom_scm_mem_map_info *mem_to_map;
 838	phys_addr_t mem_to_map_phys;
 839	phys_addr_t dest_phys;
 840	dma_addr_t ptr_phys;
 841	size_t mem_to_map_sz;
 842	size_t dest_sz;
 843	size_t src_sz;
 844	size_t ptr_sz;
 845	int next_vm;
 846	__le32 *src;
 847	void *ptr;
 848	int ret, i, b;
 849	unsigned long srcvm_bits = *srcvm;
 850
 851	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
 852	mem_to_map_sz = sizeof(*mem_to_map);
 853	dest_sz = dest_cnt * sizeof(*destvm);
 854	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
 855			ALIGN(dest_sz, SZ_64);
 856
 857	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
 858	if (!ptr)
 859		return -ENOMEM;
 860
 861	/* Fill source vmid detail */
 862	src = ptr;
 863	i = 0;
 864	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
 865		src[i++] = cpu_to_le32(b);
 866
 867	/* Fill details of mem buff to map */
 868	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
 869	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
 870	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
 871	mem_to_map->mem_size = cpu_to_le64(mem_sz);
 872
 873	next_vm = 0;
 874	/* Fill details of next vmid detail */
 875	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 876	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
 877	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
 878		destvm->vmid = cpu_to_le32(newvm->vmid);
 879		destvm->perm = cpu_to_le32(newvm->perm);
 880		destvm->ctx = 0;
 881		destvm->ctx_size = 0;
 882		next_vm |= BIT(newvm->vmid);
 883	}
 884
 885	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
 886				    ptr_phys, src_sz, dest_phys, dest_sz);
 887	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
 888	if (ret) {
 889		dev_err(__scm->dev,
 890			"Assign memory protection call failed %d\n", ret);
 891		return -EINVAL;
 892	}
 893
 894	*srcvm = next_vm;
 895	return 0;
 896}
 897EXPORT_SYMBOL(qcom_scm_assign_mem);
 898
 899/**
 900 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
 901 */
 902bool qcom_scm_ocmem_lock_available(void)
 903{
 904	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
 905					    QCOM_SCM_OCMEM_LOCK_CMD);
 906}
 907EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
 908
 909/**
 910 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
 911 * region to the specified initiator
 912 *
 913 * @id:     tz initiator id
 914 * @offset: OCMEM offset
 915 * @size:   OCMEM size
 916 * @mode:   access mode (WIDE/NARROW)
 917 */
 918int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
 919			u32 mode)
 920{
 921	struct qcom_scm_desc desc = {
 922		.svc = QCOM_SCM_SVC_OCMEM,
 923		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
 924		.args[0] = id,
 925		.args[1] = offset,
 926		.args[2] = size,
 927		.args[3] = mode,
 928		.arginfo = QCOM_SCM_ARGS(4),
 929	};
 930
 931	return qcom_scm_call(__scm->dev, &desc, NULL);
 932}
 933EXPORT_SYMBOL(qcom_scm_ocmem_lock);
 934
 935/**
 936 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
 937 * region from the specified initiator
 938 *
 939 * @id:     tz initiator id
 940 * @offset: OCMEM offset
 941 * @size:   OCMEM size
 942 */
 943int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
 944{
 945	struct qcom_scm_desc desc = {
 946		.svc = QCOM_SCM_SVC_OCMEM,
 947		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
 948		.args[0] = id,
 949		.args[1] = offset,
 950		.args[2] = size,
 951		.arginfo = QCOM_SCM_ARGS(3),
 952	};
 953
 954	return qcom_scm_call(__scm->dev, &desc, NULL);
 955}
 956EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
 957
 958/**
 959 * qcom_scm_ice_available() - Is the ICE key programming interface available?
 960 *
 961 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
 962 *	   qcom_scm_ice_set_key() are available.
 963 */
 964bool qcom_scm_ice_available(void)
 965{
 966	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
 967					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
 968		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
 969					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
 970}
 971EXPORT_SYMBOL(qcom_scm_ice_available);
 972
 973/**
 974 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
 975 * @index: the keyslot to invalidate
 976 *
 977 * The UFSHCI and eMMC standards define a standard way to do this, but it
 978 * doesn't work on these SoCs; only this SCM call does.
 979 *
 980 * It is assumed that the SoC has only one ICE instance being used, as this SCM
 981 * call doesn't specify which ICE instance the keyslot belongs to.
 982 *
 983 * Return: 0 on success; -errno on failure.
 984 */
 985int qcom_scm_ice_invalidate_key(u32 index)
 986{
 987	struct qcom_scm_desc desc = {
 988		.svc = QCOM_SCM_SVC_ES,
 989		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
 990		.arginfo = QCOM_SCM_ARGS(1),
 991		.args[0] = index,
 992		.owner = ARM_SMCCC_OWNER_SIP,
 993	};
 994
 995	return qcom_scm_call(__scm->dev, &desc, NULL);
 996}
 997EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
 998
 999/**
1000 * qcom_scm_ice_set_key() - Set an inline encryption key
1001 * @index: the keyslot into which to set the key
1002 * @key: the key to program
1003 * @key_size: the size of the key in bytes
1004 * @cipher: the encryption algorithm the key is for
1005 * @data_unit_size: the encryption data unit size, i.e. the size of each
1006 *		    individual plaintext and ciphertext.  Given in 512-byte
1007 *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1008 *
1009 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1010 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1011 *
1012 * The UFSHCI and eMMC standards define a standard way to do this, but it
1013 * doesn't work on these SoCs; only this SCM call does.
1014 *
1015 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1016 * call doesn't specify which ICE instance the keyslot belongs to.
1017 *
1018 * Return: 0 on success; -errno on failure.
1019 */
1020int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1021			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1022{
1023	struct qcom_scm_desc desc = {
1024		.svc = QCOM_SCM_SVC_ES,
1025		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1026		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1027					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1028					 QCOM_SCM_VAL),
1029		.args[0] = index,
1030		.args[2] = key_size,
1031		.args[3] = cipher,
1032		.args[4] = data_unit_size,
1033		.owner = ARM_SMCCC_OWNER_SIP,
1034	};
1035	void *keybuf;
1036	dma_addr_t key_phys;
1037	int ret;
1038
1039	/*
1040	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1041	 * physical address that's been properly flushed.  The sanctioned way to
1042	 * do this is by using the DMA API.  But as is best practice for crypto
1043	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1044	 * dma_map_single() not clearly correct, since the DMA API can use
1045	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1046	 * keys is normally rare and thus not performance-critical.
1047	 */
1048
1049	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1050				    GFP_KERNEL);
1051	if (!keybuf)
1052		return -ENOMEM;
1053	memcpy(keybuf, key, key_size);
1054	desc.args[1] = key_phys;
1055
1056	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1057
1058	memzero_explicit(keybuf, key_size);
1059
1060	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1061	return ret;
1062}
1063EXPORT_SYMBOL(qcom_scm_ice_set_key);
1064
1065/**
1066 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1067 *
1068 * Return true if HDCP is supported, false if not.
1069 */
1070bool qcom_scm_hdcp_available(void)
1071{
1072	bool avail;
1073	int ret = qcom_scm_clk_enable();
1074
1075	if (ret)
1076		return ret;
1077
1078	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1079						QCOM_SCM_HDCP_INVOKE);
1080
1081	qcom_scm_clk_disable();
1082
1083	return avail;
1084}
1085EXPORT_SYMBOL(qcom_scm_hdcp_available);
1086
1087/**
1088 * qcom_scm_hdcp_req() - Send HDCP request.
1089 * @req: HDCP request array
1090 * @req_cnt: HDCP request array count
1091 * @resp: response buffer passed to SCM
1092 *
1093 * Write HDCP register(s) through SCM.
1094 */
1095int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1096{
1097	int ret;
1098	struct qcom_scm_desc desc = {
1099		.svc = QCOM_SCM_SVC_HDCP,
1100		.cmd = QCOM_SCM_HDCP_INVOKE,
1101		.arginfo = QCOM_SCM_ARGS(10),
1102		.args = {
1103			req[0].addr,
1104			req[0].val,
1105			req[1].addr,
1106			req[1].val,
1107			req[2].addr,
1108			req[2].val,
1109			req[3].addr,
1110			req[3].val,
1111			req[4].addr,
1112			req[4].val
1113		},
1114		.owner = ARM_SMCCC_OWNER_SIP,
1115	};
1116	struct qcom_scm_res res;
1117
1118	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1119		return -ERANGE;
1120
1121	ret = qcom_scm_clk_enable();
1122	if (ret)
1123		return ret;
1124
1125	ret = qcom_scm_call(__scm->dev, &desc, &res);
1126	*resp = res.result[0];
1127
1128	qcom_scm_clk_disable();
1129
1130	return ret;
1131}
1132EXPORT_SYMBOL(qcom_scm_hdcp_req);
1133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1135{
1136	struct qcom_scm_desc desc = {
1137		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1138		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1139		.arginfo = QCOM_SCM_ARGS(2),
1140		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1141		.args[1] = en,
1142		.owner = ARM_SMCCC_OWNER_SIP,
1143	};
1144
1145
1146	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1147}
1148EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1151{
1152	struct device_node *tcsr;
1153	struct device_node *np = dev->of_node;
1154	struct resource res;
1155	u32 offset;
1156	int ret;
1157
1158	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1159	if (!tcsr)
1160		return 0;
1161
1162	ret = of_address_to_resource(tcsr, 0, &res);
1163	of_node_put(tcsr);
1164	if (ret)
1165		return ret;
1166
1167	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1168	if (ret < 0)
1169		return ret;
1170
1171	*addr = res.start + offset;
1172
1173	return 0;
1174}
1175
1176/**
1177 * qcom_scm_is_available() - Checks if SCM is available
1178 */
1179bool qcom_scm_is_available(void)
1180{
1181	return !!__scm;
1182}
1183EXPORT_SYMBOL(qcom_scm_is_available);
1184
1185static int qcom_scm_probe(struct platform_device *pdev)
1186{
1187	struct qcom_scm *scm;
1188	unsigned long clks;
1189	int ret;
1190
1191	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1192	if (!scm)
1193		return -ENOMEM;
1194
1195	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1196	if (ret < 0)
1197		return ret;
1198
 
 
1199	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1200
 
 
 
 
 
1201	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1202	if (IS_ERR(scm->core_clk)) {
1203		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1204			return PTR_ERR(scm->core_clk);
1205
1206		if (clks & SCM_HAS_CORE_CLK) {
1207			dev_err(&pdev->dev, "failed to acquire core clk\n");
1208			return PTR_ERR(scm->core_clk);
1209		}
1210
1211		scm->core_clk = NULL;
1212	}
1213
1214	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1215	if (IS_ERR(scm->iface_clk)) {
1216		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1217			return PTR_ERR(scm->iface_clk);
1218
1219		if (clks & SCM_HAS_IFACE_CLK) {
1220			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1221			return PTR_ERR(scm->iface_clk);
1222		}
1223
1224		scm->iface_clk = NULL;
1225	}
1226
1227	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1228	if (IS_ERR(scm->bus_clk)) {
1229		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1230			return PTR_ERR(scm->bus_clk);
1231
1232		if (clks & SCM_HAS_BUS_CLK) {
1233			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1234			return PTR_ERR(scm->bus_clk);
1235		}
1236
1237		scm->bus_clk = NULL;
1238	}
1239
1240	scm->reset.ops = &qcom_scm_pas_reset_ops;
1241	scm->reset.nr_resets = 1;
1242	scm->reset.of_node = pdev->dev.of_node;
1243	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1244	if (ret)
1245		return ret;
1246
1247	/* vote for max clk rate for highest performance */
1248	ret = clk_set_rate(scm->core_clk, INT_MAX);
1249	if (ret)
1250		return ret;
1251
1252	__scm = scm;
1253	__scm->dev = &pdev->dev;
1254
1255	__get_convention();
1256
1257	/*
1258	 * If requested enable "download mode", from this point on warmboot
1259	 * will cause the the boot stages to enter download mode, unless
1260	 * disabled below by a clean shutdown/reboot.
1261	 */
1262	if (download_mode)
1263		qcom_scm_set_download_mode(true);
1264
1265	return 0;
1266}
1267
1268static void qcom_scm_shutdown(struct platform_device *pdev)
1269{
1270	/* Clean shutdown, disable download mode to allow normal restart */
1271	if (download_mode)
1272		qcom_scm_set_download_mode(false);
1273}
1274
1275static const struct of_device_id qcom_scm_dt_match[] = {
1276	{ .compatible = "qcom,scm-apq8064",
1277	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1278	},
1279	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1280							     SCM_HAS_IFACE_CLK |
1281							     SCM_HAS_BUS_CLK)
1282	},
1283	{ .compatible = "qcom,scm-ipq4019" },
1284	{ .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1285							     SCM_HAS_IFACE_CLK |
1286							     SCM_HAS_BUS_CLK) },
1287	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1288	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1289	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1290							     SCM_HAS_IFACE_CLK |
1291							     SCM_HAS_BUS_CLK)
1292	},
 
 
 
 
1293	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1294							     SCM_HAS_IFACE_CLK |
1295							     SCM_HAS_BUS_CLK)
1296	},
 
 
 
 
1297	{ .compatible = "qcom,scm-msm8994" },
1298	{ .compatible = "qcom,scm-msm8996" },
1299	{ .compatible = "qcom,scm" },
1300	{}
1301};
 
1302
1303static struct platform_driver qcom_scm_driver = {
1304	.driver = {
1305		.name	= "qcom_scm",
1306		.of_match_table = qcom_scm_dt_match,
1307		.suppress_bind_attrs = true,
1308	},
1309	.probe = qcom_scm_probe,
1310	.shutdown = qcom_scm_shutdown,
1311};
1312
1313static int __init qcom_scm_init(void)
1314{
1315	return platform_driver_register(&qcom_scm_driver);
1316}
1317subsys_initcall(qcom_scm_init);