Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
4 */
5#include <linux/platform_device.h>
6#include <linux/init.h>
7#include <linux/cpumask.h>
8#include <linux/export.h>
9#include <linux/dma-mapping.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/qcom_scm.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/of_platform.h>
16#include <linux/clk.h>
17#include <linux/reset-controller.h>
18#include <linux/arm-smccc.h>
19
20#include "qcom_scm.h"
21
22static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23module_param(download_mode, bool, 0);
24
25#define SCM_HAS_CORE_CLK BIT(0)
26#define SCM_HAS_IFACE_CLK BIT(1)
27#define SCM_HAS_BUS_CLK BIT(2)
28
29struct qcom_scm {
30 struct device *dev;
31 struct clk *core_clk;
32 struct clk *iface_clk;
33 struct clk *bus_clk;
34 struct reset_controller_dev reset;
35
36 u64 dload_mode_addr;
37};
38
39struct qcom_scm_current_perm_info {
40 __le32 vmid;
41 __le32 perm;
42 __le64 ctx;
43 __le32 ctx_size;
44 __le32 unused;
45};
46
47struct qcom_scm_mem_map_info {
48 __le64 mem_addr;
49 __le64 mem_size;
50};
51
52#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
56
57#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
61
62struct qcom_scm_wb_entry {
63 int flag;
64 void *entry;
65};
66
67static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
72};
73
74static const char *qcom_scm_convention_names[] = {
75 [SMC_CONVENTION_UNKNOWN] = "unknown",
76 [SMC_CONVENTION_ARM_32] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY] = "smc legacy",
79};
80
81static struct qcom_scm *__scm;
82
83static int qcom_scm_clk_enable(void)
84{
85 int ret;
86
87 ret = clk_prepare_enable(__scm->core_clk);
88 if (ret)
89 goto bail;
90
91 ret = clk_prepare_enable(__scm->iface_clk);
92 if (ret)
93 goto disable_core;
94
95 ret = clk_prepare_enable(__scm->bus_clk);
96 if (ret)
97 goto disable_iface;
98
99 return 0;
100
101disable_iface:
102 clk_disable_unprepare(__scm->iface_clk);
103disable_core:
104 clk_disable_unprepare(__scm->core_clk);
105bail:
106 return ret;
107}
108
109static void qcom_scm_clk_disable(void)
110{
111 clk_disable_unprepare(__scm->core_clk);
112 clk_disable_unprepare(__scm->iface_clk);
113 clk_disable_unprepare(__scm->bus_clk);
114}
115
116static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
117 u32 cmd_id);
118
119enum qcom_scm_convention qcom_scm_convention;
120static bool has_queried __read_mostly;
121static DEFINE_SPINLOCK(query_lock);
122
123static void __query_convention(void)
124{
125 unsigned long flags;
126 struct qcom_scm_desc desc = {
127 .svc = QCOM_SCM_SVC_INFO,
128 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
129 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
130 QCOM_SCM_INFO_IS_CALL_AVAIL) |
131 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
132 .arginfo = QCOM_SCM_ARGS(1),
133 .owner = ARM_SMCCC_OWNER_SIP,
134 };
135 struct qcom_scm_res res;
136 int ret;
137
138 spin_lock_irqsave(&query_lock, flags);
139 if (has_queried)
140 goto out;
141
142 qcom_scm_convention = SMC_CONVENTION_ARM_64;
143 // Device isn't required as there is only one argument - no device
144 // needed to dma_map_single to secure world
145 ret = scm_smc_call(NULL, &desc, &res, true);
146 if (!ret && res.result[0] == 1)
147 goto out;
148
149 qcom_scm_convention = SMC_CONVENTION_ARM_32;
150 ret = scm_smc_call(NULL, &desc, &res, true);
151 if (!ret && res.result[0] == 1)
152 goto out;
153
154 qcom_scm_convention = SMC_CONVENTION_LEGACY;
155out:
156 has_queried = true;
157 spin_unlock_irqrestore(&query_lock, flags);
158 pr_info("qcom_scm: convention: %s\n",
159 qcom_scm_convention_names[qcom_scm_convention]);
160}
161
162static inline enum qcom_scm_convention __get_convention(void)
163{
164 if (unlikely(!has_queried))
165 __query_convention();
166 return qcom_scm_convention;
167}
168
169/**
170 * qcom_scm_call() - Invoke a syscall in the secure world
171 * @dev: device
172 * @svc_id: service identifier
173 * @cmd_id: command identifier
174 * @desc: Descriptor structure containing arguments and return values
175 *
176 * Sends a command to the SCM and waits for the command to finish processing.
177 * This should *only* be called in pre-emptible context.
178 */
179static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
180 struct qcom_scm_res *res)
181{
182 might_sleep();
183 switch (__get_convention()) {
184 case SMC_CONVENTION_ARM_32:
185 case SMC_CONVENTION_ARM_64:
186 return scm_smc_call(dev, desc, res, false);
187 case SMC_CONVENTION_LEGACY:
188 return scm_legacy_call(dev, desc, res);
189 default:
190 pr_err("Unknown current SCM calling convention.\n");
191 return -EINVAL;
192 }
193}
194
195/**
196 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
197 * @dev: device
198 * @svc_id: service identifier
199 * @cmd_id: command identifier
200 * @desc: Descriptor structure containing arguments and return values
201 * @res: Structure containing results from SMC/HVC call
202 *
203 * Sends a command to the SCM and waits for the command to finish processing.
204 * This can be called in atomic context.
205 */
206static int qcom_scm_call_atomic(struct device *dev,
207 const struct qcom_scm_desc *desc,
208 struct qcom_scm_res *res)
209{
210 switch (__get_convention()) {
211 case SMC_CONVENTION_ARM_32:
212 case SMC_CONVENTION_ARM_64:
213 return scm_smc_call(dev, desc, res, true);
214 case SMC_CONVENTION_LEGACY:
215 return scm_legacy_call_atomic(dev, desc, res);
216 default:
217 pr_err("Unknown current SCM calling convention.\n");
218 return -EINVAL;
219 }
220}
221
222static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
223 u32 cmd_id)
224{
225 int ret;
226 struct qcom_scm_desc desc = {
227 .svc = QCOM_SCM_SVC_INFO,
228 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
229 .owner = ARM_SMCCC_OWNER_SIP,
230 };
231 struct qcom_scm_res res;
232
233 desc.arginfo = QCOM_SCM_ARGS(1);
234 switch (__get_convention()) {
235 case SMC_CONVENTION_ARM_32:
236 case SMC_CONVENTION_ARM_64:
237 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
238 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
239 break;
240 case SMC_CONVENTION_LEGACY:
241 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
242 break;
243 default:
244 pr_err("Unknown SMC convention being used\n");
245 return -EINVAL;
246 }
247
248 ret = qcom_scm_call(dev, &desc, &res);
249
250 return ret ? : res.result[0];
251}
252
253/**
254 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
255 * @entry: Entry point function for the cpus
256 * @cpus: The cpumask of cpus that will use the entry point
257 *
258 * Set the Linux entry point for the SCM to transfer control to when coming
259 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
260 */
261int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
262{
263 int ret;
264 int flags = 0;
265 int cpu;
266 struct qcom_scm_desc desc = {
267 .svc = QCOM_SCM_SVC_BOOT,
268 .cmd = QCOM_SCM_BOOT_SET_ADDR,
269 .arginfo = QCOM_SCM_ARGS(2),
270 };
271
272 /*
273 * Reassign only if we are switching from hotplug entry point
274 * to cpuidle entry point or vice versa.
275 */
276 for_each_cpu(cpu, cpus) {
277 if (entry == qcom_scm_wb[cpu].entry)
278 continue;
279 flags |= qcom_scm_wb[cpu].flag;
280 }
281
282 /* No change in entry function */
283 if (!flags)
284 return 0;
285
286 desc.args[0] = flags;
287 desc.args[1] = virt_to_phys(entry);
288
289 ret = qcom_scm_call(__scm->dev, &desc, NULL);
290 if (!ret) {
291 for_each_cpu(cpu, cpus)
292 qcom_scm_wb[cpu].entry = entry;
293 }
294
295 return ret;
296}
297EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
298
299/**
300 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
301 * @entry: Entry point function for the cpus
302 * @cpus: The cpumask of cpus that will use the entry point
303 *
304 * Set the cold boot address of the cpus. Any cpu outside the supported
305 * range would be removed from the cpu present mask.
306 */
307int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
308{
309 int flags = 0;
310 int cpu;
311 int scm_cb_flags[] = {
312 QCOM_SCM_FLAG_COLDBOOT_CPU0,
313 QCOM_SCM_FLAG_COLDBOOT_CPU1,
314 QCOM_SCM_FLAG_COLDBOOT_CPU2,
315 QCOM_SCM_FLAG_COLDBOOT_CPU3,
316 };
317 struct qcom_scm_desc desc = {
318 .svc = QCOM_SCM_SVC_BOOT,
319 .cmd = QCOM_SCM_BOOT_SET_ADDR,
320 .arginfo = QCOM_SCM_ARGS(2),
321 .owner = ARM_SMCCC_OWNER_SIP,
322 };
323
324 if (!cpus || (cpus && cpumask_empty(cpus)))
325 return -EINVAL;
326
327 for_each_cpu(cpu, cpus) {
328 if (cpu < ARRAY_SIZE(scm_cb_flags))
329 flags |= scm_cb_flags[cpu];
330 else
331 set_cpu_present(cpu, false);
332 }
333
334 desc.args[0] = flags;
335 desc.args[1] = virt_to_phys(entry);
336
337 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
338}
339EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
340
341/**
342 * qcom_scm_cpu_power_down() - Power down the cpu
343 * @flags - Flags to flush cache
344 *
345 * This is an end point to power down cpu. If there was a pending interrupt,
346 * the control would return from this function, otherwise, the cpu jumps to the
347 * warm boot entry point set for this cpu upon reset.
348 */
349void qcom_scm_cpu_power_down(u32 flags)
350{
351 struct qcom_scm_desc desc = {
352 .svc = QCOM_SCM_SVC_BOOT,
353 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
354 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
355 .arginfo = QCOM_SCM_ARGS(1),
356 .owner = ARM_SMCCC_OWNER_SIP,
357 };
358
359 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
360}
361EXPORT_SYMBOL(qcom_scm_cpu_power_down);
362
363int qcom_scm_set_remote_state(u32 state, u32 id)
364{
365 struct qcom_scm_desc desc = {
366 .svc = QCOM_SCM_SVC_BOOT,
367 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
368 .arginfo = QCOM_SCM_ARGS(2),
369 .args[0] = state,
370 .args[1] = id,
371 .owner = ARM_SMCCC_OWNER_SIP,
372 };
373 struct qcom_scm_res res;
374 int ret;
375
376 ret = qcom_scm_call(__scm->dev, &desc, &res);
377
378 return ret ? : res.result[0];
379}
380EXPORT_SYMBOL(qcom_scm_set_remote_state);
381
382static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
383{
384 struct qcom_scm_desc desc = {
385 .svc = QCOM_SCM_SVC_BOOT,
386 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
387 .arginfo = QCOM_SCM_ARGS(2),
388 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
389 .owner = ARM_SMCCC_OWNER_SIP,
390 };
391
392 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
393
394 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
395}
396
397static void qcom_scm_set_download_mode(bool enable)
398{
399 bool avail;
400 int ret = 0;
401
402 avail = __qcom_scm_is_call_available(__scm->dev,
403 QCOM_SCM_SVC_BOOT,
404 QCOM_SCM_BOOT_SET_DLOAD_MODE);
405 if (avail) {
406 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
407 } else if (__scm->dload_mode_addr) {
408 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
409 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
410 } else {
411 dev_err(__scm->dev,
412 "No available mechanism for setting download mode\n");
413 }
414
415 if (ret)
416 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
417}
418
419/**
420 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
421 * state machine for a given peripheral, using the
422 * metadata
423 * @peripheral: peripheral id
424 * @metadata: pointer to memory containing ELF header, program header table
425 * and optional blob of data used for authenticating the metadata
426 * and the rest of the firmware
427 * @size: size of the metadata
428 *
429 * Returns 0 on success.
430 */
431int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
432{
433 dma_addr_t mdata_phys;
434 void *mdata_buf;
435 int ret;
436 struct qcom_scm_desc desc = {
437 .svc = QCOM_SCM_SVC_PIL,
438 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
439 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
440 .args[0] = peripheral,
441 .owner = ARM_SMCCC_OWNER_SIP,
442 };
443 struct qcom_scm_res res;
444
445 /*
446 * During the scm call memory protection will be enabled for the meta
447 * data blob, so make sure it's physically contiguous, 4K aligned and
448 * non-cachable to avoid XPU violations.
449 */
450 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
451 GFP_KERNEL);
452 if (!mdata_buf) {
453 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
454 return -ENOMEM;
455 }
456 memcpy(mdata_buf, metadata, size);
457
458 ret = qcom_scm_clk_enable();
459 if (ret)
460 goto free_metadata;
461
462 desc.args[1] = mdata_phys;
463
464 ret = qcom_scm_call(__scm->dev, &desc, &res);
465
466 qcom_scm_clk_disable();
467
468free_metadata:
469 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
470
471 return ret ? : res.result[0];
472}
473EXPORT_SYMBOL(qcom_scm_pas_init_image);
474
475/**
476 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
477 * for firmware loading
478 * @peripheral: peripheral id
479 * @addr: start address of memory area to prepare
480 * @size: size of the memory area to prepare
481 *
482 * Returns 0 on success.
483 */
484int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
485{
486 int ret;
487 struct qcom_scm_desc desc = {
488 .svc = QCOM_SCM_SVC_PIL,
489 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
490 .arginfo = QCOM_SCM_ARGS(3),
491 .args[0] = peripheral,
492 .args[1] = addr,
493 .args[2] = size,
494 .owner = ARM_SMCCC_OWNER_SIP,
495 };
496 struct qcom_scm_res res;
497
498 ret = qcom_scm_clk_enable();
499 if (ret)
500 return ret;
501
502 ret = qcom_scm_call(__scm->dev, &desc, &res);
503 qcom_scm_clk_disable();
504
505 return ret ? : res.result[0];
506}
507EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
508
509/**
510 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
511 * and reset the remote processor
512 * @peripheral: peripheral id
513 *
514 * Return 0 on success.
515 */
516int qcom_scm_pas_auth_and_reset(u32 peripheral)
517{
518 int ret;
519 struct qcom_scm_desc desc = {
520 .svc = QCOM_SCM_SVC_PIL,
521 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
522 .arginfo = QCOM_SCM_ARGS(1),
523 .args[0] = peripheral,
524 .owner = ARM_SMCCC_OWNER_SIP,
525 };
526 struct qcom_scm_res res;
527
528 ret = qcom_scm_clk_enable();
529 if (ret)
530 return ret;
531
532 ret = qcom_scm_call(__scm->dev, &desc, &res);
533 qcom_scm_clk_disable();
534
535 return ret ? : res.result[0];
536}
537EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
538
539/**
540 * qcom_scm_pas_shutdown() - Shut down the remote processor
541 * @peripheral: peripheral id
542 *
543 * Returns 0 on success.
544 */
545int qcom_scm_pas_shutdown(u32 peripheral)
546{
547 int ret;
548 struct qcom_scm_desc desc = {
549 .svc = QCOM_SCM_SVC_PIL,
550 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
551 .arginfo = QCOM_SCM_ARGS(1),
552 .args[0] = peripheral,
553 .owner = ARM_SMCCC_OWNER_SIP,
554 };
555 struct qcom_scm_res res;
556
557 ret = qcom_scm_clk_enable();
558 if (ret)
559 return ret;
560
561 ret = qcom_scm_call(__scm->dev, &desc, &res);
562
563 qcom_scm_clk_disable();
564
565 return ret ? : res.result[0];
566}
567EXPORT_SYMBOL(qcom_scm_pas_shutdown);
568
569/**
570 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
571 * available for the given peripherial
572 * @peripheral: peripheral id
573 *
574 * Returns true if PAS is supported for this peripheral, otherwise false.
575 */
576bool qcom_scm_pas_supported(u32 peripheral)
577{
578 int ret;
579 struct qcom_scm_desc desc = {
580 .svc = QCOM_SCM_SVC_PIL,
581 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
582 .arginfo = QCOM_SCM_ARGS(1),
583 .args[0] = peripheral,
584 .owner = ARM_SMCCC_OWNER_SIP,
585 };
586 struct qcom_scm_res res;
587
588 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
589 QCOM_SCM_PIL_PAS_IS_SUPPORTED);
590 if (ret <= 0)
591 return false;
592
593 ret = qcom_scm_call(__scm->dev, &desc, &res);
594
595 return ret ? false : !!res.result[0];
596}
597EXPORT_SYMBOL(qcom_scm_pas_supported);
598
599static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
600{
601 struct qcom_scm_desc desc = {
602 .svc = QCOM_SCM_SVC_PIL,
603 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
604 .arginfo = QCOM_SCM_ARGS(2),
605 .args[0] = reset,
606 .args[1] = 0,
607 .owner = ARM_SMCCC_OWNER_SIP,
608 };
609 struct qcom_scm_res res;
610 int ret;
611
612 ret = qcom_scm_call(__scm->dev, &desc, &res);
613
614 return ret ? : res.result[0];
615}
616
617static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
618 unsigned long idx)
619{
620 if (idx != 0)
621 return -EINVAL;
622
623 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
624}
625
626static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
627 unsigned long idx)
628{
629 if (idx != 0)
630 return -EINVAL;
631
632 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
633}
634
635static const struct reset_control_ops qcom_scm_pas_reset_ops = {
636 .assert = qcom_scm_pas_reset_assert,
637 .deassert = qcom_scm_pas_reset_deassert,
638};
639
640int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
641{
642 struct qcom_scm_desc desc = {
643 .svc = QCOM_SCM_SVC_IO,
644 .cmd = QCOM_SCM_IO_READ,
645 .arginfo = QCOM_SCM_ARGS(1),
646 .args[0] = addr,
647 .owner = ARM_SMCCC_OWNER_SIP,
648 };
649 struct qcom_scm_res res;
650 int ret;
651
652
653 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
654 if (ret >= 0)
655 *val = res.result[0];
656
657 return ret < 0 ? ret : 0;
658}
659EXPORT_SYMBOL(qcom_scm_io_readl);
660
661int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
662{
663 struct qcom_scm_desc desc = {
664 .svc = QCOM_SCM_SVC_IO,
665 .cmd = QCOM_SCM_IO_WRITE,
666 .arginfo = QCOM_SCM_ARGS(2),
667 .args[0] = addr,
668 .args[1] = val,
669 .owner = ARM_SMCCC_OWNER_SIP,
670 };
671
672 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
673}
674EXPORT_SYMBOL(qcom_scm_io_writel);
675
676/**
677 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
678 * supports restore security config interface.
679 *
680 * Return true if restore-cfg interface is supported, false if not.
681 */
682bool qcom_scm_restore_sec_cfg_available(void)
683{
684 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
685 QCOM_SCM_MP_RESTORE_SEC_CFG);
686}
687EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
688
689int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
690{
691 struct qcom_scm_desc desc = {
692 .svc = QCOM_SCM_SVC_MP,
693 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
694 .arginfo = QCOM_SCM_ARGS(2),
695 .args[0] = device_id,
696 .args[1] = spare,
697 .owner = ARM_SMCCC_OWNER_SIP,
698 };
699 struct qcom_scm_res res;
700 int ret;
701
702 ret = qcom_scm_call(__scm->dev, &desc, &res);
703
704 return ret ? : res.result[0];
705}
706EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
707
708int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
709{
710 struct qcom_scm_desc desc = {
711 .svc = QCOM_SCM_SVC_MP,
712 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
713 .arginfo = QCOM_SCM_ARGS(1),
714 .args[0] = spare,
715 .owner = ARM_SMCCC_OWNER_SIP,
716 };
717 struct qcom_scm_res res;
718 int ret;
719
720 ret = qcom_scm_call(__scm->dev, &desc, &res);
721
722 if (size)
723 *size = res.result[0];
724
725 return ret ? : res.result[1];
726}
727EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
728
729int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
730{
731 struct qcom_scm_desc desc = {
732 .svc = QCOM_SCM_SVC_MP,
733 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
734 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
735 QCOM_SCM_VAL),
736 .args[0] = addr,
737 .args[1] = size,
738 .args[2] = spare,
739 .owner = ARM_SMCCC_OWNER_SIP,
740 };
741 int ret;
742
743 desc.args[0] = addr;
744 desc.args[1] = size;
745 desc.args[2] = spare;
746 desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
747 QCOM_SCM_VAL);
748
749 ret = qcom_scm_call(__scm->dev, &desc, NULL);
750
751 /* the pg table has been initialized already, ignore the error */
752 if (ret == -EPERM)
753 ret = 0;
754
755 return ret;
756}
757EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
758
759static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
760 size_t mem_sz, phys_addr_t src, size_t src_sz,
761 phys_addr_t dest, size_t dest_sz)
762{
763 int ret;
764 struct qcom_scm_desc desc = {
765 .svc = QCOM_SCM_SVC_MP,
766 .cmd = QCOM_SCM_MP_ASSIGN,
767 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
768 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
769 QCOM_SCM_VAL, QCOM_SCM_VAL),
770 .args[0] = mem_region,
771 .args[1] = mem_sz,
772 .args[2] = src,
773 .args[3] = src_sz,
774 .args[4] = dest,
775 .args[5] = dest_sz,
776 .args[6] = 0,
777 .owner = ARM_SMCCC_OWNER_SIP,
778 };
779 struct qcom_scm_res res;
780
781 ret = qcom_scm_call(dev, &desc, &res);
782
783 return ret ? : res.result[0];
784}
785
786/**
787 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
788 * @mem_addr: mem region whose ownership need to be reassigned
789 * @mem_sz: size of the region.
790 * @srcvm: vmid for current set of owners, each set bit in
791 * flag indicate a unique owner
792 * @newvm: array having new owners and corresponding permission
793 * flags
794 * @dest_cnt: number of owners in next set.
795 *
796 * Return negative errno on failure or 0 on success with @srcvm updated.
797 */
798int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
799 unsigned int *srcvm,
800 const struct qcom_scm_vmperm *newvm,
801 unsigned int dest_cnt)
802{
803 struct qcom_scm_current_perm_info *destvm;
804 struct qcom_scm_mem_map_info *mem_to_map;
805 phys_addr_t mem_to_map_phys;
806 phys_addr_t dest_phys;
807 dma_addr_t ptr_phys;
808 size_t mem_to_map_sz;
809 size_t dest_sz;
810 size_t src_sz;
811 size_t ptr_sz;
812 int next_vm;
813 __le32 *src;
814 void *ptr;
815 int ret, i, b;
816 unsigned long srcvm_bits = *srcvm;
817
818 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
819 mem_to_map_sz = sizeof(*mem_to_map);
820 dest_sz = dest_cnt * sizeof(*destvm);
821 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
822 ALIGN(dest_sz, SZ_64);
823
824 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
825 if (!ptr)
826 return -ENOMEM;
827
828 /* Fill source vmid detail */
829 src = ptr;
830 i = 0;
831 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
832 src[i++] = cpu_to_le32(b);
833
834 /* Fill details of mem buff to map */
835 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
836 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
837 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
838 mem_to_map->mem_size = cpu_to_le64(mem_sz);
839
840 next_vm = 0;
841 /* Fill details of next vmid detail */
842 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
843 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
844 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
845 destvm->vmid = cpu_to_le32(newvm->vmid);
846 destvm->perm = cpu_to_le32(newvm->perm);
847 destvm->ctx = 0;
848 destvm->ctx_size = 0;
849 next_vm |= BIT(newvm->vmid);
850 }
851
852 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
853 ptr_phys, src_sz, dest_phys, dest_sz);
854 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
855 if (ret) {
856 dev_err(__scm->dev,
857 "Assign memory protection call failed %d\n", ret);
858 return -EINVAL;
859 }
860
861 *srcvm = next_vm;
862 return 0;
863}
864EXPORT_SYMBOL(qcom_scm_assign_mem);
865
866/**
867 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
868 */
869bool qcom_scm_ocmem_lock_available(void)
870{
871 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
872 QCOM_SCM_OCMEM_LOCK_CMD);
873}
874EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
875
876/**
877 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
878 * region to the specified initiator
879 *
880 * @id: tz initiator id
881 * @offset: OCMEM offset
882 * @size: OCMEM size
883 * @mode: access mode (WIDE/NARROW)
884 */
885int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
886 u32 mode)
887{
888 struct qcom_scm_desc desc = {
889 .svc = QCOM_SCM_SVC_OCMEM,
890 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
891 .args[0] = id,
892 .args[1] = offset,
893 .args[2] = size,
894 .args[3] = mode,
895 .arginfo = QCOM_SCM_ARGS(4),
896 };
897
898 return qcom_scm_call(__scm->dev, &desc, NULL);
899}
900EXPORT_SYMBOL(qcom_scm_ocmem_lock);
901
902/**
903 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
904 * region from the specified initiator
905 *
906 * @id: tz initiator id
907 * @offset: OCMEM offset
908 * @size: OCMEM size
909 */
910int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
911{
912 struct qcom_scm_desc desc = {
913 .svc = QCOM_SCM_SVC_OCMEM,
914 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
915 .args[0] = id,
916 .args[1] = offset,
917 .args[2] = size,
918 .arginfo = QCOM_SCM_ARGS(3),
919 };
920
921 return qcom_scm_call(__scm->dev, &desc, NULL);
922}
923EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
924
925/**
926 * qcom_scm_ice_available() - Is the ICE key programming interface available?
927 *
928 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
929 * qcom_scm_ice_set_key() are available.
930 */
931bool qcom_scm_ice_available(void)
932{
933 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
934 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
935 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
936 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
937}
938EXPORT_SYMBOL(qcom_scm_ice_available);
939
940/**
941 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
942 * @index: the keyslot to invalidate
943 *
944 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
945 * these SoCs; only this SCM call does.
946 *
947 * Return: 0 on success; -errno on failure.
948 */
949int qcom_scm_ice_invalidate_key(u32 index)
950{
951 struct qcom_scm_desc desc = {
952 .svc = QCOM_SCM_SVC_ES,
953 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
954 .arginfo = QCOM_SCM_ARGS(1),
955 .args[0] = index,
956 .owner = ARM_SMCCC_OWNER_SIP,
957 };
958
959 return qcom_scm_call(__scm->dev, &desc, NULL);
960}
961EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
962
963/**
964 * qcom_scm_ice_set_key() - Set an inline encryption key
965 * @index: the keyslot into which to set the key
966 * @key: the key to program
967 * @key_size: the size of the key in bytes
968 * @cipher: the encryption algorithm the key is for
969 * @data_unit_size: the encryption data unit size, i.e. the size of each
970 * individual plaintext and ciphertext. Given in 512-byte
971 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
972 *
973 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
974 * can then be used to encrypt/decrypt UFS I/O requests inline.
975 *
976 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
977 * these SoCs; only this SCM call does.
978 *
979 * Return: 0 on success; -errno on failure.
980 */
981int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
982 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
983{
984 struct qcom_scm_desc desc = {
985 .svc = QCOM_SCM_SVC_ES,
986 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
987 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
988 QCOM_SCM_VAL, QCOM_SCM_VAL,
989 QCOM_SCM_VAL),
990 .args[0] = index,
991 .args[2] = key_size,
992 .args[3] = cipher,
993 .args[4] = data_unit_size,
994 .owner = ARM_SMCCC_OWNER_SIP,
995 };
996 void *keybuf;
997 dma_addr_t key_phys;
998 int ret;
999
1000 /*
1001 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1002 * physical address that's been properly flushed. The sanctioned way to
1003 * do this is by using the DMA API. But as is best practice for crypto
1004 * keys, we also must wipe the key after use. This makes kmemdup() +
1005 * dma_map_single() not clearly correct, since the DMA API can use
1006 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1007 * keys is normally rare and thus not performance-critical.
1008 */
1009
1010 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1011 GFP_KERNEL);
1012 if (!keybuf)
1013 return -ENOMEM;
1014 memcpy(keybuf, key, key_size);
1015 desc.args[1] = key_phys;
1016
1017 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1018
1019 memzero_explicit(keybuf, key_size);
1020
1021 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1022 return ret;
1023}
1024EXPORT_SYMBOL(qcom_scm_ice_set_key);
1025
1026/**
1027 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1028 *
1029 * Return true if HDCP is supported, false if not.
1030 */
1031bool qcom_scm_hdcp_available(void)
1032{
1033 int ret = qcom_scm_clk_enable();
1034
1035 if (ret)
1036 return ret;
1037
1038 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1039 QCOM_SCM_HDCP_INVOKE);
1040
1041 qcom_scm_clk_disable();
1042
1043 return ret > 0;
1044}
1045EXPORT_SYMBOL(qcom_scm_hdcp_available);
1046
1047/**
1048 * qcom_scm_hdcp_req() - Send HDCP request.
1049 * @req: HDCP request array
1050 * @req_cnt: HDCP request array count
1051 * @resp: response buffer passed to SCM
1052 *
1053 * Write HDCP register(s) through SCM.
1054 */
1055int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1056{
1057 int ret;
1058 struct qcom_scm_desc desc = {
1059 .svc = QCOM_SCM_SVC_HDCP,
1060 .cmd = QCOM_SCM_HDCP_INVOKE,
1061 .arginfo = QCOM_SCM_ARGS(10),
1062 .args = {
1063 req[0].addr,
1064 req[0].val,
1065 req[1].addr,
1066 req[1].val,
1067 req[2].addr,
1068 req[2].val,
1069 req[3].addr,
1070 req[3].val,
1071 req[4].addr,
1072 req[4].val
1073 },
1074 .owner = ARM_SMCCC_OWNER_SIP,
1075 };
1076 struct qcom_scm_res res;
1077
1078 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1079 return -ERANGE;
1080
1081 ret = qcom_scm_clk_enable();
1082 if (ret)
1083 return ret;
1084
1085 ret = qcom_scm_call(__scm->dev, &desc, &res);
1086 *resp = res.result[0];
1087
1088 qcom_scm_clk_disable();
1089
1090 return ret;
1091}
1092EXPORT_SYMBOL(qcom_scm_hdcp_req);
1093
1094int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1095{
1096 struct qcom_scm_desc desc = {
1097 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1098 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1099 .arginfo = QCOM_SCM_ARGS(2),
1100 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1101 .args[1] = en,
1102 .owner = ARM_SMCCC_OWNER_SIP,
1103 };
1104
1105
1106 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1107}
1108EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1109
1110static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1111{
1112 struct device_node *tcsr;
1113 struct device_node *np = dev->of_node;
1114 struct resource res;
1115 u32 offset;
1116 int ret;
1117
1118 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1119 if (!tcsr)
1120 return 0;
1121
1122 ret = of_address_to_resource(tcsr, 0, &res);
1123 of_node_put(tcsr);
1124 if (ret)
1125 return ret;
1126
1127 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1128 if (ret < 0)
1129 return ret;
1130
1131 *addr = res.start + offset;
1132
1133 return 0;
1134}
1135
1136/**
1137 * qcom_scm_is_available() - Checks if SCM is available
1138 */
1139bool qcom_scm_is_available(void)
1140{
1141 return !!__scm;
1142}
1143EXPORT_SYMBOL(qcom_scm_is_available);
1144
1145static int qcom_scm_probe(struct platform_device *pdev)
1146{
1147 struct qcom_scm *scm;
1148 unsigned long clks;
1149 int ret;
1150
1151 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1152 if (!scm)
1153 return -ENOMEM;
1154
1155 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1156 if (ret < 0)
1157 return ret;
1158
1159 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1160
1161 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1162 if (IS_ERR(scm->core_clk)) {
1163 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1164 return PTR_ERR(scm->core_clk);
1165
1166 if (clks & SCM_HAS_CORE_CLK) {
1167 dev_err(&pdev->dev, "failed to acquire core clk\n");
1168 return PTR_ERR(scm->core_clk);
1169 }
1170
1171 scm->core_clk = NULL;
1172 }
1173
1174 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1175 if (IS_ERR(scm->iface_clk)) {
1176 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1177 return PTR_ERR(scm->iface_clk);
1178
1179 if (clks & SCM_HAS_IFACE_CLK) {
1180 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1181 return PTR_ERR(scm->iface_clk);
1182 }
1183
1184 scm->iface_clk = NULL;
1185 }
1186
1187 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1188 if (IS_ERR(scm->bus_clk)) {
1189 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1190 return PTR_ERR(scm->bus_clk);
1191
1192 if (clks & SCM_HAS_BUS_CLK) {
1193 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1194 return PTR_ERR(scm->bus_clk);
1195 }
1196
1197 scm->bus_clk = NULL;
1198 }
1199
1200 scm->reset.ops = &qcom_scm_pas_reset_ops;
1201 scm->reset.nr_resets = 1;
1202 scm->reset.of_node = pdev->dev.of_node;
1203 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1204 if (ret)
1205 return ret;
1206
1207 /* vote for max clk rate for highest performance */
1208 ret = clk_set_rate(scm->core_clk, INT_MAX);
1209 if (ret)
1210 return ret;
1211
1212 __scm = scm;
1213 __scm->dev = &pdev->dev;
1214
1215 __query_convention();
1216
1217 /*
1218 * If requested enable "download mode", from this point on warmboot
1219 * will cause the the boot stages to enter download mode, unless
1220 * disabled below by a clean shutdown/reboot.
1221 */
1222 if (download_mode)
1223 qcom_scm_set_download_mode(true);
1224
1225 return 0;
1226}
1227
1228static void qcom_scm_shutdown(struct platform_device *pdev)
1229{
1230 /* Clean shutdown, disable download mode to allow normal restart */
1231 if (download_mode)
1232 qcom_scm_set_download_mode(false);
1233}
1234
1235static const struct of_device_id qcom_scm_dt_match[] = {
1236 { .compatible = "qcom,scm-apq8064",
1237 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1238 },
1239 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1240 SCM_HAS_IFACE_CLK |
1241 SCM_HAS_BUS_CLK)
1242 },
1243 { .compatible = "qcom,scm-ipq4019" },
1244 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1245 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1246 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1247 SCM_HAS_IFACE_CLK |
1248 SCM_HAS_BUS_CLK)
1249 },
1250 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1251 SCM_HAS_IFACE_CLK |
1252 SCM_HAS_BUS_CLK)
1253 },
1254 { .compatible = "qcom,scm-msm8994" },
1255 { .compatible = "qcom,scm-msm8996" },
1256 { .compatible = "qcom,scm" },
1257 {}
1258};
1259
1260static struct platform_driver qcom_scm_driver = {
1261 .driver = {
1262 .name = "qcom_scm",
1263 .of_match_table = qcom_scm_dt_match,
1264 },
1265 .probe = qcom_scm_probe,
1266 .shutdown = qcom_scm_shutdown,
1267};
1268
1269static int __init qcom_scm_init(void)
1270{
1271 return platform_driver_register(&qcom_scm_driver);
1272}
1273subsys_initcall(qcom_scm_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
4 */
5#include <linux/platform_device.h>
6#include <linux/init.h>
7#include <linux/cpumask.h>
8#include <linux/export.h>
9#include <linux/dma-mapping.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/qcom_scm.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/of_platform.h>
16#include <linux/clk.h>
17#include <linux/reset-controller.h>
18#include <linux/arm-smccc.h>
19
20#include "qcom_scm.h"
21
22static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23module_param(download_mode, bool, 0);
24
25#define SCM_HAS_CORE_CLK BIT(0)
26#define SCM_HAS_IFACE_CLK BIT(1)
27#define SCM_HAS_BUS_CLK BIT(2)
28
29struct qcom_scm {
30 struct device *dev;
31 struct clk *core_clk;
32 struct clk *iface_clk;
33 struct clk *bus_clk;
34 struct reset_controller_dev reset;
35
36 u64 dload_mode_addr;
37};
38
39struct qcom_scm_current_perm_info {
40 __le32 vmid;
41 __le32 perm;
42 __le64 ctx;
43 __le32 ctx_size;
44 __le32 unused;
45};
46
47struct qcom_scm_mem_map_info {
48 __le64 mem_addr;
49 __le64 mem_size;
50};
51
52#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
56
57#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
61
62struct qcom_scm_wb_entry {
63 int flag;
64 void *entry;
65};
66
67static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
72};
73
74static const char *qcom_scm_convention_names[] = {
75 [SMC_CONVENTION_UNKNOWN] = "unknown",
76 [SMC_CONVENTION_ARM_32] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY] = "smc legacy",
79};
80
81static struct qcom_scm *__scm;
82
83static int qcom_scm_clk_enable(void)
84{
85 int ret;
86
87 ret = clk_prepare_enable(__scm->core_clk);
88 if (ret)
89 goto bail;
90
91 ret = clk_prepare_enable(__scm->iface_clk);
92 if (ret)
93 goto disable_core;
94
95 ret = clk_prepare_enable(__scm->bus_clk);
96 if (ret)
97 goto disable_iface;
98
99 return 0;
100
101disable_iface:
102 clk_disable_unprepare(__scm->iface_clk);
103disable_core:
104 clk_disable_unprepare(__scm->core_clk);
105bail:
106 return ret;
107}
108
109static void qcom_scm_clk_disable(void)
110{
111 clk_disable_unprepare(__scm->core_clk);
112 clk_disable_unprepare(__scm->iface_clk);
113 clk_disable_unprepare(__scm->bus_clk);
114}
115
116enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
117static DEFINE_SPINLOCK(scm_query_lock);
118
119static enum qcom_scm_convention __get_convention(void)
120{
121 unsigned long flags;
122 struct qcom_scm_desc desc = {
123 .svc = QCOM_SCM_SVC_INFO,
124 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
125 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
126 QCOM_SCM_INFO_IS_CALL_AVAIL) |
127 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
128 .arginfo = QCOM_SCM_ARGS(1),
129 .owner = ARM_SMCCC_OWNER_SIP,
130 };
131 struct qcom_scm_res res;
132 enum qcom_scm_convention probed_convention;
133 int ret;
134 bool forced = false;
135
136 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
137 return qcom_scm_convention;
138
139 /*
140 * Device isn't required as there is only one argument - no device
141 * needed to dma_map_single to secure world
142 */
143 probed_convention = SMC_CONVENTION_ARM_64;
144 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
145 if (!ret && res.result[0] == 1)
146 goto found;
147
148 /*
149 * Some SC7180 firmwares didn't implement the
150 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
151 * calling conventions on these firmwares. Luckily we don't make any
152 * early calls into the firmware on these SoCs so the device pointer
153 * will be valid here to check if the compatible matches.
154 */
155 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
156 forced = true;
157 goto found;
158 }
159
160 probed_convention = SMC_CONVENTION_ARM_32;
161 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
162 if (!ret && res.result[0] == 1)
163 goto found;
164
165 probed_convention = SMC_CONVENTION_LEGACY;
166found:
167 spin_lock_irqsave(&scm_query_lock, flags);
168 if (probed_convention != qcom_scm_convention) {
169 qcom_scm_convention = probed_convention;
170 pr_info("qcom_scm: convention: %s%s\n",
171 qcom_scm_convention_names[qcom_scm_convention],
172 forced ? " (forced)" : "");
173 }
174 spin_unlock_irqrestore(&scm_query_lock, flags);
175
176 return qcom_scm_convention;
177}
178
179/**
180 * qcom_scm_call() - Invoke a syscall in the secure world
181 * @dev: device
182 * @svc_id: service identifier
183 * @cmd_id: command identifier
184 * @desc: Descriptor structure containing arguments and return values
185 *
186 * Sends a command to the SCM and waits for the command to finish processing.
187 * This should *only* be called in pre-emptible context.
188 */
189static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
190 struct qcom_scm_res *res)
191{
192 might_sleep();
193 switch (__get_convention()) {
194 case SMC_CONVENTION_ARM_32:
195 case SMC_CONVENTION_ARM_64:
196 return scm_smc_call(dev, desc, res, false);
197 case SMC_CONVENTION_LEGACY:
198 return scm_legacy_call(dev, desc, res);
199 default:
200 pr_err("Unknown current SCM calling convention.\n");
201 return -EINVAL;
202 }
203}
204
205/**
206 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
207 * @dev: device
208 * @svc_id: service identifier
209 * @cmd_id: command identifier
210 * @desc: Descriptor structure containing arguments and return values
211 * @res: Structure containing results from SMC/HVC call
212 *
213 * Sends a command to the SCM and waits for the command to finish processing.
214 * This can be called in atomic context.
215 */
216static int qcom_scm_call_atomic(struct device *dev,
217 const struct qcom_scm_desc *desc,
218 struct qcom_scm_res *res)
219{
220 switch (__get_convention()) {
221 case SMC_CONVENTION_ARM_32:
222 case SMC_CONVENTION_ARM_64:
223 return scm_smc_call(dev, desc, res, true);
224 case SMC_CONVENTION_LEGACY:
225 return scm_legacy_call_atomic(dev, desc, res);
226 default:
227 pr_err("Unknown current SCM calling convention.\n");
228 return -EINVAL;
229 }
230}
231
232static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
233 u32 cmd_id)
234{
235 int ret;
236 struct qcom_scm_desc desc = {
237 .svc = QCOM_SCM_SVC_INFO,
238 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
239 .owner = ARM_SMCCC_OWNER_SIP,
240 };
241 struct qcom_scm_res res;
242
243 desc.arginfo = QCOM_SCM_ARGS(1);
244 switch (__get_convention()) {
245 case SMC_CONVENTION_ARM_32:
246 case SMC_CONVENTION_ARM_64:
247 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
248 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
249 break;
250 case SMC_CONVENTION_LEGACY:
251 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
252 break;
253 default:
254 pr_err("Unknown SMC convention being used\n");
255 return -EINVAL;
256 }
257
258 ret = qcom_scm_call(dev, &desc, &res);
259
260 return ret ? false : !!res.result[0];
261}
262
263/**
264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
265 * @entry: Entry point function for the cpus
266 * @cpus: The cpumask of cpus that will use the entry point
267 *
268 * Set the Linux entry point for the SCM to transfer control to when coming
269 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
270 */
271int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
272{
273 int ret;
274 int flags = 0;
275 int cpu;
276 struct qcom_scm_desc desc = {
277 .svc = QCOM_SCM_SVC_BOOT,
278 .cmd = QCOM_SCM_BOOT_SET_ADDR,
279 .arginfo = QCOM_SCM_ARGS(2),
280 };
281
282 /*
283 * Reassign only if we are switching from hotplug entry point
284 * to cpuidle entry point or vice versa.
285 */
286 for_each_cpu(cpu, cpus) {
287 if (entry == qcom_scm_wb[cpu].entry)
288 continue;
289 flags |= qcom_scm_wb[cpu].flag;
290 }
291
292 /* No change in entry function */
293 if (!flags)
294 return 0;
295
296 desc.args[0] = flags;
297 desc.args[1] = virt_to_phys(entry);
298
299 ret = qcom_scm_call(__scm->dev, &desc, NULL);
300 if (!ret) {
301 for_each_cpu(cpu, cpus)
302 qcom_scm_wb[cpu].entry = entry;
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
308
309/**
310 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
311 * @entry: Entry point function for the cpus
312 * @cpus: The cpumask of cpus that will use the entry point
313 *
314 * Set the cold boot address of the cpus. Any cpu outside the supported
315 * range would be removed from the cpu present mask.
316 */
317int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
318{
319 int flags = 0;
320 int cpu;
321 int scm_cb_flags[] = {
322 QCOM_SCM_FLAG_COLDBOOT_CPU0,
323 QCOM_SCM_FLAG_COLDBOOT_CPU1,
324 QCOM_SCM_FLAG_COLDBOOT_CPU2,
325 QCOM_SCM_FLAG_COLDBOOT_CPU3,
326 };
327 struct qcom_scm_desc desc = {
328 .svc = QCOM_SCM_SVC_BOOT,
329 .cmd = QCOM_SCM_BOOT_SET_ADDR,
330 .arginfo = QCOM_SCM_ARGS(2),
331 .owner = ARM_SMCCC_OWNER_SIP,
332 };
333
334 if (!cpus || (cpus && cpumask_empty(cpus)))
335 return -EINVAL;
336
337 for_each_cpu(cpu, cpus) {
338 if (cpu < ARRAY_SIZE(scm_cb_flags))
339 flags |= scm_cb_flags[cpu];
340 else
341 set_cpu_present(cpu, false);
342 }
343
344 desc.args[0] = flags;
345 desc.args[1] = virt_to_phys(entry);
346
347 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
348}
349EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
350
351/**
352 * qcom_scm_cpu_power_down() - Power down the cpu
353 * @flags - Flags to flush cache
354 *
355 * This is an end point to power down cpu. If there was a pending interrupt,
356 * the control would return from this function, otherwise, the cpu jumps to the
357 * warm boot entry point set for this cpu upon reset.
358 */
359void qcom_scm_cpu_power_down(u32 flags)
360{
361 struct qcom_scm_desc desc = {
362 .svc = QCOM_SCM_SVC_BOOT,
363 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
364 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
365 .arginfo = QCOM_SCM_ARGS(1),
366 .owner = ARM_SMCCC_OWNER_SIP,
367 };
368
369 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
370}
371EXPORT_SYMBOL(qcom_scm_cpu_power_down);
372
373int qcom_scm_set_remote_state(u32 state, u32 id)
374{
375 struct qcom_scm_desc desc = {
376 .svc = QCOM_SCM_SVC_BOOT,
377 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
378 .arginfo = QCOM_SCM_ARGS(2),
379 .args[0] = state,
380 .args[1] = id,
381 .owner = ARM_SMCCC_OWNER_SIP,
382 };
383 struct qcom_scm_res res;
384 int ret;
385
386 ret = qcom_scm_call(__scm->dev, &desc, &res);
387
388 return ret ? : res.result[0];
389}
390EXPORT_SYMBOL(qcom_scm_set_remote_state);
391
392static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
393{
394 struct qcom_scm_desc desc = {
395 .svc = QCOM_SCM_SVC_BOOT,
396 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
397 .arginfo = QCOM_SCM_ARGS(2),
398 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
399 .owner = ARM_SMCCC_OWNER_SIP,
400 };
401
402 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
403
404 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
405}
406
407static void qcom_scm_set_download_mode(bool enable)
408{
409 bool avail;
410 int ret = 0;
411
412 avail = __qcom_scm_is_call_available(__scm->dev,
413 QCOM_SCM_SVC_BOOT,
414 QCOM_SCM_BOOT_SET_DLOAD_MODE);
415 if (avail) {
416 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
417 } else if (__scm->dload_mode_addr) {
418 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
419 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
420 } else {
421 dev_err(__scm->dev,
422 "No available mechanism for setting download mode\n");
423 }
424
425 if (ret)
426 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
427}
428
429/**
430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
431 * state machine for a given peripheral, using the
432 * metadata
433 * @peripheral: peripheral id
434 * @metadata: pointer to memory containing ELF header, program header table
435 * and optional blob of data used for authenticating the metadata
436 * and the rest of the firmware
437 * @size: size of the metadata
438 *
439 * Returns 0 on success.
440 */
441int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
442{
443 dma_addr_t mdata_phys;
444 void *mdata_buf;
445 int ret;
446 struct qcom_scm_desc desc = {
447 .svc = QCOM_SCM_SVC_PIL,
448 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
449 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
450 .args[0] = peripheral,
451 .owner = ARM_SMCCC_OWNER_SIP,
452 };
453 struct qcom_scm_res res;
454
455 /*
456 * During the scm call memory protection will be enabled for the meta
457 * data blob, so make sure it's physically contiguous, 4K aligned and
458 * non-cachable to avoid XPU violations.
459 */
460 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
461 GFP_KERNEL);
462 if (!mdata_buf) {
463 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
464 return -ENOMEM;
465 }
466 memcpy(mdata_buf, metadata, size);
467
468 ret = qcom_scm_clk_enable();
469 if (ret)
470 goto free_metadata;
471
472 desc.args[1] = mdata_phys;
473
474 ret = qcom_scm_call(__scm->dev, &desc, &res);
475
476 qcom_scm_clk_disable();
477
478free_metadata:
479 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
480
481 return ret ? : res.result[0];
482}
483EXPORT_SYMBOL(qcom_scm_pas_init_image);
484
485/**
486 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
487 * for firmware loading
488 * @peripheral: peripheral id
489 * @addr: start address of memory area to prepare
490 * @size: size of the memory area to prepare
491 *
492 * Returns 0 on success.
493 */
494int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
495{
496 int ret;
497 struct qcom_scm_desc desc = {
498 .svc = QCOM_SCM_SVC_PIL,
499 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
500 .arginfo = QCOM_SCM_ARGS(3),
501 .args[0] = peripheral,
502 .args[1] = addr,
503 .args[2] = size,
504 .owner = ARM_SMCCC_OWNER_SIP,
505 };
506 struct qcom_scm_res res;
507
508 ret = qcom_scm_clk_enable();
509 if (ret)
510 return ret;
511
512 ret = qcom_scm_call(__scm->dev, &desc, &res);
513 qcom_scm_clk_disable();
514
515 return ret ? : res.result[0];
516}
517EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
518
519/**
520 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
521 * and reset the remote processor
522 * @peripheral: peripheral id
523 *
524 * Return 0 on success.
525 */
526int qcom_scm_pas_auth_and_reset(u32 peripheral)
527{
528 int ret;
529 struct qcom_scm_desc desc = {
530 .svc = QCOM_SCM_SVC_PIL,
531 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
532 .arginfo = QCOM_SCM_ARGS(1),
533 .args[0] = peripheral,
534 .owner = ARM_SMCCC_OWNER_SIP,
535 };
536 struct qcom_scm_res res;
537
538 ret = qcom_scm_clk_enable();
539 if (ret)
540 return ret;
541
542 ret = qcom_scm_call(__scm->dev, &desc, &res);
543 qcom_scm_clk_disable();
544
545 return ret ? : res.result[0];
546}
547EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
548
549/**
550 * qcom_scm_pas_shutdown() - Shut down the remote processor
551 * @peripheral: peripheral id
552 *
553 * Returns 0 on success.
554 */
555int qcom_scm_pas_shutdown(u32 peripheral)
556{
557 int ret;
558 struct qcom_scm_desc desc = {
559 .svc = QCOM_SCM_SVC_PIL,
560 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
561 .arginfo = QCOM_SCM_ARGS(1),
562 .args[0] = peripheral,
563 .owner = ARM_SMCCC_OWNER_SIP,
564 };
565 struct qcom_scm_res res;
566
567 ret = qcom_scm_clk_enable();
568 if (ret)
569 return ret;
570
571 ret = qcom_scm_call(__scm->dev, &desc, &res);
572
573 qcom_scm_clk_disable();
574
575 return ret ? : res.result[0];
576}
577EXPORT_SYMBOL(qcom_scm_pas_shutdown);
578
579/**
580 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
581 * available for the given peripherial
582 * @peripheral: peripheral id
583 *
584 * Returns true if PAS is supported for this peripheral, otherwise false.
585 */
586bool qcom_scm_pas_supported(u32 peripheral)
587{
588 int ret;
589 struct qcom_scm_desc desc = {
590 .svc = QCOM_SCM_SVC_PIL,
591 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
592 .arginfo = QCOM_SCM_ARGS(1),
593 .args[0] = peripheral,
594 .owner = ARM_SMCCC_OWNER_SIP,
595 };
596 struct qcom_scm_res res;
597
598 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
599 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
600 return false;
601
602 ret = qcom_scm_call(__scm->dev, &desc, &res);
603
604 return ret ? false : !!res.result[0];
605}
606EXPORT_SYMBOL(qcom_scm_pas_supported);
607
608static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
609{
610 struct qcom_scm_desc desc = {
611 .svc = QCOM_SCM_SVC_PIL,
612 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
613 .arginfo = QCOM_SCM_ARGS(2),
614 .args[0] = reset,
615 .args[1] = 0,
616 .owner = ARM_SMCCC_OWNER_SIP,
617 };
618 struct qcom_scm_res res;
619 int ret;
620
621 ret = qcom_scm_call(__scm->dev, &desc, &res);
622
623 return ret ? : res.result[0];
624}
625
626static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
627 unsigned long idx)
628{
629 if (idx != 0)
630 return -EINVAL;
631
632 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
633}
634
635static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
636 unsigned long idx)
637{
638 if (idx != 0)
639 return -EINVAL;
640
641 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
642}
643
644static const struct reset_control_ops qcom_scm_pas_reset_ops = {
645 .assert = qcom_scm_pas_reset_assert,
646 .deassert = qcom_scm_pas_reset_deassert,
647};
648
649int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
650{
651 struct qcom_scm_desc desc = {
652 .svc = QCOM_SCM_SVC_IO,
653 .cmd = QCOM_SCM_IO_READ,
654 .arginfo = QCOM_SCM_ARGS(1),
655 .args[0] = addr,
656 .owner = ARM_SMCCC_OWNER_SIP,
657 };
658 struct qcom_scm_res res;
659 int ret;
660
661
662 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
663 if (ret >= 0)
664 *val = res.result[0];
665
666 return ret < 0 ? ret : 0;
667}
668EXPORT_SYMBOL(qcom_scm_io_readl);
669
670int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
671{
672 struct qcom_scm_desc desc = {
673 .svc = QCOM_SCM_SVC_IO,
674 .cmd = QCOM_SCM_IO_WRITE,
675 .arginfo = QCOM_SCM_ARGS(2),
676 .args[0] = addr,
677 .args[1] = val,
678 .owner = ARM_SMCCC_OWNER_SIP,
679 };
680
681 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
682}
683EXPORT_SYMBOL(qcom_scm_io_writel);
684
685/**
686 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
687 * supports restore security config interface.
688 *
689 * Return true if restore-cfg interface is supported, false if not.
690 */
691bool qcom_scm_restore_sec_cfg_available(void)
692{
693 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
694 QCOM_SCM_MP_RESTORE_SEC_CFG);
695}
696EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
697
698int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
699{
700 struct qcom_scm_desc desc = {
701 .svc = QCOM_SCM_SVC_MP,
702 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
703 .arginfo = QCOM_SCM_ARGS(2),
704 .args[0] = device_id,
705 .args[1] = spare,
706 .owner = ARM_SMCCC_OWNER_SIP,
707 };
708 struct qcom_scm_res res;
709 int ret;
710
711 ret = qcom_scm_call(__scm->dev, &desc, &res);
712
713 return ret ? : res.result[0];
714}
715EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
716
717int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
718{
719 struct qcom_scm_desc desc = {
720 .svc = QCOM_SCM_SVC_MP,
721 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
722 .arginfo = QCOM_SCM_ARGS(1),
723 .args[0] = spare,
724 .owner = ARM_SMCCC_OWNER_SIP,
725 };
726 struct qcom_scm_res res;
727 int ret;
728
729 ret = qcom_scm_call(__scm->dev, &desc, &res);
730
731 if (size)
732 *size = res.result[0];
733
734 return ret ? : res.result[1];
735}
736EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
737
738int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
739{
740 struct qcom_scm_desc desc = {
741 .svc = QCOM_SCM_SVC_MP,
742 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
743 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
744 QCOM_SCM_VAL),
745 .args[0] = addr,
746 .args[1] = size,
747 .args[2] = spare,
748 .owner = ARM_SMCCC_OWNER_SIP,
749 };
750 int ret;
751
752 desc.args[0] = addr;
753 desc.args[1] = size;
754 desc.args[2] = spare;
755 desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
756 QCOM_SCM_VAL);
757
758 ret = qcom_scm_call(__scm->dev, &desc, NULL);
759
760 /* the pg table has been initialized already, ignore the error */
761 if (ret == -EPERM)
762 ret = 0;
763
764 return ret;
765}
766EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
767
768int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
769 u32 cp_nonpixel_start,
770 u32 cp_nonpixel_size)
771{
772 int ret;
773 struct qcom_scm_desc desc = {
774 .svc = QCOM_SCM_SVC_MP,
775 .cmd = QCOM_SCM_MP_VIDEO_VAR,
776 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
777 QCOM_SCM_VAL, QCOM_SCM_VAL),
778 .args[0] = cp_start,
779 .args[1] = cp_size,
780 .args[2] = cp_nonpixel_start,
781 .args[3] = cp_nonpixel_size,
782 .owner = ARM_SMCCC_OWNER_SIP,
783 };
784 struct qcom_scm_res res;
785
786 ret = qcom_scm_call(__scm->dev, &desc, &res);
787
788 return ret ? : res.result[0];
789}
790EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
791
792static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
793 size_t mem_sz, phys_addr_t src, size_t src_sz,
794 phys_addr_t dest, size_t dest_sz)
795{
796 int ret;
797 struct qcom_scm_desc desc = {
798 .svc = QCOM_SCM_SVC_MP,
799 .cmd = QCOM_SCM_MP_ASSIGN,
800 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
801 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
802 QCOM_SCM_VAL, QCOM_SCM_VAL),
803 .args[0] = mem_region,
804 .args[1] = mem_sz,
805 .args[2] = src,
806 .args[3] = src_sz,
807 .args[4] = dest,
808 .args[5] = dest_sz,
809 .args[6] = 0,
810 .owner = ARM_SMCCC_OWNER_SIP,
811 };
812 struct qcom_scm_res res;
813
814 ret = qcom_scm_call(dev, &desc, &res);
815
816 return ret ? : res.result[0];
817}
818
819/**
820 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
821 * @mem_addr: mem region whose ownership need to be reassigned
822 * @mem_sz: size of the region.
823 * @srcvm: vmid for current set of owners, each set bit in
824 * flag indicate a unique owner
825 * @newvm: array having new owners and corresponding permission
826 * flags
827 * @dest_cnt: number of owners in next set.
828 *
829 * Return negative errno on failure or 0 on success with @srcvm updated.
830 */
831int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
832 unsigned int *srcvm,
833 const struct qcom_scm_vmperm *newvm,
834 unsigned int dest_cnt)
835{
836 struct qcom_scm_current_perm_info *destvm;
837 struct qcom_scm_mem_map_info *mem_to_map;
838 phys_addr_t mem_to_map_phys;
839 phys_addr_t dest_phys;
840 dma_addr_t ptr_phys;
841 size_t mem_to_map_sz;
842 size_t dest_sz;
843 size_t src_sz;
844 size_t ptr_sz;
845 int next_vm;
846 __le32 *src;
847 void *ptr;
848 int ret, i, b;
849 unsigned long srcvm_bits = *srcvm;
850
851 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
852 mem_to_map_sz = sizeof(*mem_to_map);
853 dest_sz = dest_cnt * sizeof(*destvm);
854 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
855 ALIGN(dest_sz, SZ_64);
856
857 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
858 if (!ptr)
859 return -ENOMEM;
860
861 /* Fill source vmid detail */
862 src = ptr;
863 i = 0;
864 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
865 src[i++] = cpu_to_le32(b);
866
867 /* Fill details of mem buff to map */
868 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
869 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
870 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
871 mem_to_map->mem_size = cpu_to_le64(mem_sz);
872
873 next_vm = 0;
874 /* Fill details of next vmid detail */
875 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
876 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
877 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
878 destvm->vmid = cpu_to_le32(newvm->vmid);
879 destvm->perm = cpu_to_le32(newvm->perm);
880 destvm->ctx = 0;
881 destvm->ctx_size = 0;
882 next_vm |= BIT(newvm->vmid);
883 }
884
885 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
886 ptr_phys, src_sz, dest_phys, dest_sz);
887 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
888 if (ret) {
889 dev_err(__scm->dev,
890 "Assign memory protection call failed %d\n", ret);
891 return -EINVAL;
892 }
893
894 *srcvm = next_vm;
895 return 0;
896}
897EXPORT_SYMBOL(qcom_scm_assign_mem);
898
899/**
900 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
901 */
902bool qcom_scm_ocmem_lock_available(void)
903{
904 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
905 QCOM_SCM_OCMEM_LOCK_CMD);
906}
907EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
908
909/**
910 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
911 * region to the specified initiator
912 *
913 * @id: tz initiator id
914 * @offset: OCMEM offset
915 * @size: OCMEM size
916 * @mode: access mode (WIDE/NARROW)
917 */
918int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
919 u32 mode)
920{
921 struct qcom_scm_desc desc = {
922 .svc = QCOM_SCM_SVC_OCMEM,
923 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
924 .args[0] = id,
925 .args[1] = offset,
926 .args[2] = size,
927 .args[3] = mode,
928 .arginfo = QCOM_SCM_ARGS(4),
929 };
930
931 return qcom_scm_call(__scm->dev, &desc, NULL);
932}
933EXPORT_SYMBOL(qcom_scm_ocmem_lock);
934
935/**
936 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
937 * region from the specified initiator
938 *
939 * @id: tz initiator id
940 * @offset: OCMEM offset
941 * @size: OCMEM size
942 */
943int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
944{
945 struct qcom_scm_desc desc = {
946 .svc = QCOM_SCM_SVC_OCMEM,
947 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
948 .args[0] = id,
949 .args[1] = offset,
950 .args[2] = size,
951 .arginfo = QCOM_SCM_ARGS(3),
952 };
953
954 return qcom_scm_call(__scm->dev, &desc, NULL);
955}
956EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
957
958/**
959 * qcom_scm_ice_available() - Is the ICE key programming interface available?
960 *
961 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
962 * qcom_scm_ice_set_key() are available.
963 */
964bool qcom_scm_ice_available(void)
965{
966 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
967 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
968 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
969 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
970}
971EXPORT_SYMBOL(qcom_scm_ice_available);
972
973/**
974 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
975 * @index: the keyslot to invalidate
976 *
977 * The UFSHCI and eMMC standards define a standard way to do this, but it
978 * doesn't work on these SoCs; only this SCM call does.
979 *
980 * It is assumed that the SoC has only one ICE instance being used, as this SCM
981 * call doesn't specify which ICE instance the keyslot belongs to.
982 *
983 * Return: 0 on success; -errno on failure.
984 */
985int qcom_scm_ice_invalidate_key(u32 index)
986{
987 struct qcom_scm_desc desc = {
988 .svc = QCOM_SCM_SVC_ES,
989 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
990 .arginfo = QCOM_SCM_ARGS(1),
991 .args[0] = index,
992 .owner = ARM_SMCCC_OWNER_SIP,
993 };
994
995 return qcom_scm_call(__scm->dev, &desc, NULL);
996}
997EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
998
999/**
1000 * qcom_scm_ice_set_key() - Set an inline encryption key
1001 * @index: the keyslot into which to set the key
1002 * @key: the key to program
1003 * @key_size: the size of the key in bytes
1004 * @cipher: the encryption algorithm the key is for
1005 * @data_unit_size: the encryption data unit size, i.e. the size of each
1006 * individual plaintext and ciphertext. Given in 512-byte
1007 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1008 *
1009 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1010 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1011 *
1012 * The UFSHCI and eMMC standards define a standard way to do this, but it
1013 * doesn't work on these SoCs; only this SCM call does.
1014 *
1015 * It is assumed that the SoC has only one ICE instance being used, as this SCM
1016 * call doesn't specify which ICE instance the keyslot belongs to.
1017 *
1018 * Return: 0 on success; -errno on failure.
1019 */
1020int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1021 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1022{
1023 struct qcom_scm_desc desc = {
1024 .svc = QCOM_SCM_SVC_ES,
1025 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1026 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1027 QCOM_SCM_VAL, QCOM_SCM_VAL,
1028 QCOM_SCM_VAL),
1029 .args[0] = index,
1030 .args[2] = key_size,
1031 .args[3] = cipher,
1032 .args[4] = data_unit_size,
1033 .owner = ARM_SMCCC_OWNER_SIP,
1034 };
1035 void *keybuf;
1036 dma_addr_t key_phys;
1037 int ret;
1038
1039 /*
1040 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1041 * physical address that's been properly flushed. The sanctioned way to
1042 * do this is by using the DMA API. But as is best practice for crypto
1043 * keys, we also must wipe the key after use. This makes kmemdup() +
1044 * dma_map_single() not clearly correct, since the DMA API can use
1045 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1046 * keys is normally rare and thus not performance-critical.
1047 */
1048
1049 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1050 GFP_KERNEL);
1051 if (!keybuf)
1052 return -ENOMEM;
1053 memcpy(keybuf, key, key_size);
1054 desc.args[1] = key_phys;
1055
1056 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1057
1058 memzero_explicit(keybuf, key_size);
1059
1060 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1061 return ret;
1062}
1063EXPORT_SYMBOL(qcom_scm_ice_set_key);
1064
1065/**
1066 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1067 *
1068 * Return true if HDCP is supported, false if not.
1069 */
1070bool qcom_scm_hdcp_available(void)
1071{
1072 bool avail;
1073 int ret = qcom_scm_clk_enable();
1074
1075 if (ret)
1076 return ret;
1077
1078 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1079 QCOM_SCM_HDCP_INVOKE);
1080
1081 qcom_scm_clk_disable();
1082
1083 return avail;
1084}
1085EXPORT_SYMBOL(qcom_scm_hdcp_available);
1086
1087/**
1088 * qcom_scm_hdcp_req() - Send HDCP request.
1089 * @req: HDCP request array
1090 * @req_cnt: HDCP request array count
1091 * @resp: response buffer passed to SCM
1092 *
1093 * Write HDCP register(s) through SCM.
1094 */
1095int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1096{
1097 int ret;
1098 struct qcom_scm_desc desc = {
1099 .svc = QCOM_SCM_SVC_HDCP,
1100 .cmd = QCOM_SCM_HDCP_INVOKE,
1101 .arginfo = QCOM_SCM_ARGS(10),
1102 .args = {
1103 req[0].addr,
1104 req[0].val,
1105 req[1].addr,
1106 req[1].val,
1107 req[2].addr,
1108 req[2].val,
1109 req[3].addr,
1110 req[3].val,
1111 req[4].addr,
1112 req[4].val
1113 },
1114 .owner = ARM_SMCCC_OWNER_SIP,
1115 };
1116 struct qcom_scm_res res;
1117
1118 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1119 return -ERANGE;
1120
1121 ret = qcom_scm_clk_enable();
1122 if (ret)
1123 return ret;
1124
1125 ret = qcom_scm_call(__scm->dev, &desc, &res);
1126 *resp = res.result[0];
1127
1128 qcom_scm_clk_disable();
1129
1130 return ret;
1131}
1132EXPORT_SYMBOL(qcom_scm_hdcp_req);
1133
1134int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1135{
1136 struct qcom_scm_desc desc = {
1137 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1138 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1139 .arginfo = QCOM_SCM_ARGS(2),
1140 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1141 .args[1] = en,
1142 .owner = ARM_SMCCC_OWNER_SIP,
1143 };
1144
1145
1146 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1147}
1148EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1149
1150static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1151{
1152 struct device_node *tcsr;
1153 struct device_node *np = dev->of_node;
1154 struct resource res;
1155 u32 offset;
1156 int ret;
1157
1158 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1159 if (!tcsr)
1160 return 0;
1161
1162 ret = of_address_to_resource(tcsr, 0, &res);
1163 of_node_put(tcsr);
1164 if (ret)
1165 return ret;
1166
1167 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1168 if (ret < 0)
1169 return ret;
1170
1171 *addr = res.start + offset;
1172
1173 return 0;
1174}
1175
1176/**
1177 * qcom_scm_is_available() - Checks if SCM is available
1178 */
1179bool qcom_scm_is_available(void)
1180{
1181 return !!__scm;
1182}
1183EXPORT_SYMBOL(qcom_scm_is_available);
1184
1185static int qcom_scm_probe(struct platform_device *pdev)
1186{
1187 struct qcom_scm *scm;
1188 unsigned long clks;
1189 int ret;
1190
1191 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1192 if (!scm)
1193 return -ENOMEM;
1194
1195 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1196 if (ret < 0)
1197 return ret;
1198
1199 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1200
1201 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1202 if (IS_ERR(scm->core_clk)) {
1203 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1204 return PTR_ERR(scm->core_clk);
1205
1206 if (clks & SCM_HAS_CORE_CLK) {
1207 dev_err(&pdev->dev, "failed to acquire core clk\n");
1208 return PTR_ERR(scm->core_clk);
1209 }
1210
1211 scm->core_clk = NULL;
1212 }
1213
1214 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1215 if (IS_ERR(scm->iface_clk)) {
1216 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1217 return PTR_ERR(scm->iface_clk);
1218
1219 if (clks & SCM_HAS_IFACE_CLK) {
1220 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1221 return PTR_ERR(scm->iface_clk);
1222 }
1223
1224 scm->iface_clk = NULL;
1225 }
1226
1227 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1228 if (IS_ERR(scm->bus_clk)) {
1229 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1230 return PTR_ERR(scm->bus_clk);
1231
1232 if (clks & SCM_HAS_BUS_CLK) {
1233 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1234 return PTR_ERR(scm->bus_clk);
1235 }
1236
1237 scm->bus_clk = NULL;
1238 }
1239
1240 scm->reset.ops = &qcom_scm_pas_reset_ops;
1241 scm->reset.nr_resets = 1;
1242 scm->reset.of_node = pdev->dev.of_node;
1243 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1244 if (ret)
1245 return ret;
1246
1247 /* vote for max clk rate for highest performance */
1248 ret = clk_set_rate(scm->core_clk, INT_MAX);
1249 if (ret)
1250 return ret;
1251
1252 __scm = scm;
1253 __scm->dev = &pdev->dev;
1254
1255 __get_convention();
1256
1257 /*
1258 * If requested enable "download mode", from this point on warmboot
1259 * will cause the the boot stages to enter download mode, unless
1260 * disabled below by a clean shutdown/reboot.
1261 */
1262 if (download_mode)
1263 qcom_scm_set_download_mode(true);
1264
1265 return 0;
1266}
1267
1268static void qcom_scm_shutdown(struct platform_device *pdev)
1269{
1270 /* Clean shutdown, disable download mode to allow normal restart */
1271 if (download_mode)
1272 qcom_scm_set_download_mode(false);
1273}
1274
1275static const struct of_device_id qcom_scm_dt_match[] = {
1276 { .compatible = "qcom,scm-apq8064",
1277 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1278 },
1279 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1280 SCM_HAS_IFACE_CLK |
1281 SCM_HAS_BUS_CLK)
1282 },
1283 { .compatible = "qcom,scm-ipq4019" },
1284 { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1285 SCM_HAS_IFACE_CLK |
1286 SCM_HAS_BUS_CLK) },
1287 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1288 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1289 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1290 SCM_HAS_IFACE_CLK |
1291 SCM_HAS_BUS_CLK)
1292 },
1293 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1294 SCM_HAS_IFACE_CLK |
1295 SCM_HAS_BUS_CLK)
1296 },
1297 { .compatible = "qcom,scm-msm8994" },
1298 { .compatible = "qcom,scm-msm8996" },
1299 { .compatible = "qcom,scm" },
1300 {}
1301};
1302
1303static struct platform_driver qcom_scm_driver = {
1304 .driver = {
1305 .name = "qcom_scm",
1306 .of_match_table = qcom_scm_dt_match,
1307 .suppress_bind_attrs = true,
1308 },
1309 .probe = qcom_scm_probe,
1310 .shutdown = qcom_scm_shutdown,
1311};
1312
1313static int __init qcom_scm_init(void)
1314{
1315 return platform_driver_register(&qcom_scm_driver);
1316}
1317subsys_initcall(qcom_scm_init);