Loading...
1/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2015 Linaro Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#include <linux/slab.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/errno.h>
24#include <linux/err.h>
25#include <linux/qcom_scm.h>
26
27#include <asm/cacheflush.h>
28
29#include "qcom_scm.h"
30
31#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
32#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
33#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
34#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
35
36#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
37#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
38#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
39#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
40
41struct qcom_scm_entry {
42 int flag;
43 void *entry;
44};
45
46static struct qcom_scm_entry qcom_scm_wb[] = {
47 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
48 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
49 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
50 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
51};
52
53static DEFINE_MUTEX(qcom_scm_lock);
54
55/**
56 * struct qcom_scm_command - one SCM command buffer
57 * @len: total available memory for command and response
58 * @buf_offset: start of command buffer
59 * @resp_hdr_offset: start of response buffer
60 * @id: command to be executed
61 * @buf: buffer returned from qcom_scm_get_command_buffer()
62 *
63 * An SCM command is laid out in memory as follows:
64 *
65 * ------------------- <--- struct qcom_scm_command
66 * | command header |
67 * ------------------- <--- qcom_scm_get_command_buffer()
68 * | command buffer |
69 * ------------------- <--- struct qcom_scm_response and
70 * | response header | qcom_scm_command_to_response()
71 * ------------------- <--- qcom_scm_get_response_buffer()
72 * | response buffer |
73 * -------------------
74 *
75 * There can be arbitrary padding between the headers and buffers so
76 * you should always use the appropriate qcom_scm_get_*_buffer() routines
77 * to access the buffers in a safe manner.
78 */
79struct qcom_scm_command {
80 __le32 len;
81 __le32 buf_offset;
82 __le32 resp_hdr_offset;
83 __le32 id;
84 __le32 buf[0];
85};
86
87/**
88 * struct qcom_scm_response - one SCM response buffer
89 * @len: total available memory for response
90 * @buf_offset: start of response data relative to start of qcom_scm_response
91 * @is_complete: indicates if the command has finished processing
92 */
93struct qcom_scm_response {
94 __le32 len;
95 __le32 buf_offset;
96 __le32 is_complete;
97};
98
99/**
100 * alloc_qcom_scm_command() - Allocate an SCM command
101 * @cmd_size: size of the command buffer
102 * @resp_size: size of the response buffer
103 *
104 * Allocate an SCM command, including enough room for the command
105 * and response headers as well as the command and response buffers.
106 *
107 * Returns a valid &qcom_scm_command on success or %NULL if the allocation fails.
108 */
109static struct qcom_scm_command *alloc_qcom_scm_command(size_t cmd_size, size_t resp_size)
110{
111 struct qcom_scm_command *cmd;
112 size_t len = sizeof(*cmd) + sizeof(struct qcom_scm_response) + cmd_size +
113 resp_size;
114 u32 offset;
115
116 cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
117 if (cmd) {
118 cmd->len = cpu_to_le32(len);
119 offset = offsetof(struct qcom_scm_command, buf);
120 cmd->buf_offset = cpu_to_le32(offset);
121 cmd->resp_hdr_offset = cpu_to_le32(offset + cmd_size);
122 }
123 return cmd;
124}
125
126/**
127 * free_qcom_scm_command() - Free an SCM command
128 * @cmd: command to free
129 *
130 * Free an SCM command.
131 */
132static inline void free_qcom_scm_command(struct qcom_scm_command *cmd)
133{
134 kfree(cmd);
135}
136
137/**
138 * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
139 * @cmd: command
140 *
141 * Returns a pointer to a response for a command.
142 */
143static inline struct qcom_scm_response *qcom_scm_command_to_response(
144 const struct qcom_scm_command *cmd)
145{
146 return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
147}
148
149/**
150 * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
151 * @cmd: command
152 *
153 * Returns a pointer to the command buffer of a command.
154 */
155static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
156{
157 return (void *)cmd->buf;
158}
159
160/**
161 * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
162 * @rsp: response
163 *
164 * Returns a pointer to a response buffer of a response.
165 */
166static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
167{
168 return (void *)rsp + le32_to_cpu(rsp->buf_offset);
169}
170
171static int qcom_scm_remap_error(int err)
172{
173 pr_err("qcom_scm_call failed with error code %d\n", err);
174 switch (err) {
175 case QCOM_SCM_ERROR:
176 return -EIO;
177 case QCOM_SCM_EINVAL_ADDR:
178 case QCOM_SCM_EINVAL_ARG:
179 return -EINVAL;
180 case QCOM_SCM_EOPNOTSUPP:
181 return -EOPNOTSUPP;
182 case QCOM_SCM_ENOMEM:
183 return -ENOMEM;
184 }
185 return -EINVAL;
186}
187
188static u32 smc(u32 cmd_addr)
189{
190 int context_id;
191 register u32 r0 asm("r0") = 1;
192 register u32 r1 asm("r1") = (u32)&context_id;
193 register u32 r2 asm("r2") = cmd_addr;
194 do {
195 asm volatile(
196 __asmeq("%0", "r0")
197 __asmeq("%1", "r0")
198 __asmeq("%2", "r1")
199 __asmeq("%3", "r2")
200#ifdef REQUIRES_SEC
201 ".arch_extension sec\n"
202#endif
203 "smc #0 @ switch to secure world\n"
204 : "=r" (r0)
205 : "r" (r0), "r" (r1), "r" (r2)
206 : "r3");
207 } while (r0 == QCOM_SCM_INTERRUPTED);
208
209 return r0;
210}
211
212static int __qcom_scm_call(const struct qcom_scm_command *cmd)
213{
214 int ret;
215 u32 cmd_addr = virt_to_phys(cmd);
216
217 /*
218 * Flush the command buffer so that the secure world sees
219 * the correct data.
220 */
221 secure_flush_area(cmd, cmd->len);
222
223 ret = smc(cmd_addr);
224 if (ret < 0)
225 ret = qcom_scm_remap_error(ret);
226
227 return ret;
228}
229
230static void qcom_scm_inv_range(unsigned long start, unsigned long end)
231{
232 u32 cacheline_size, ctr;
233
234 asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
235 cacheline_size = 4 << ((ctr >> 16) & 0xf);
236
237 start = round_down(start, cacheline_size);
238 end = round_up(end, cacheline_size);
239 outer_inv_range(start, end);
240 while (start < end) {
241 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
242 : "memory");
243 start += cacheline_size;
244 }
245 dsb();
246 isb();
247}
248
249/**
250 * qcom_scm_call() - Send an SCM command
251 * @svc_id: service identifier
252 * @cmd_id: command identifier
253 * @cmd_buf: command buffer
254 * @cmd_len: length of the command buffer
255 * @resp_buf: response buffer
256 * @resp_len: length of the response buffer
257 *
258 * Sends a command to the SCM and waits for the command to finish processing.
259 *
260 * A note on cache maintenance:
261 * Note that any buffers that are expected to be accessed by the secure world
262 * must be flushed before invoking qcom_scm_call and invalidated in the cache
263 * immediately after qcom_scm_call returns. Cache maintenance on the command
264 * and response buffers is taken care of by qcom_scm_call; however, callers are
265 * responsible for any other cached buffers passed over to the secure world.
266 */
267static int qcom_scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
268 size_t cmd_len, void *resp_buf, size_t resp_len)
269{
270 int ret;
271 struct qcom_scm_command *cmd;
272 struct qcom_scm_response *rsp;
273 unsigned long start, end;
274
275 cmd = alloc_qcom_scm_command(cmd_len, resp_len);
276 if (!cmd)
277 return -ENOMEM;
278
279 cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
280 if (cmd_buf)
281 memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
282
283 mutex_lock(&qcom_scm_lock);
284 ret = __qcom_scm_call(cmd);
285 mutex_unlock(&qcom_scm_lock);
286 if (ret)
287 goto out;
288
289 rsp = qcom_scm_command_to_response(cmd);
290 start = (unsigned long)rsp;
291
292 do {
293 qcom_scm_inv_range(start, start + sizeof(*rsp));
294 } while (!rsp->is_complete);
295
296 end = (unsigned long)qcom_scm_get_response_buffer(rsp) + resp_len;
297 qcom_scm_inv_range(start, end);
298
299 if (resp_buf)
300 memcpy(resp_buf, qcom_scm_get_response_buffer(rsp), resp_len);
301out:
302 free_qcom_scm_command(cmd);
303 return ret;
304}
305
306#define SCM_CLASS_REGISTER (0x2 << 8)
307#define SCM_MASK_IRQS BIT(5)
308#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
309 SCM_CLASS_REGISTER | \
310 SCM_MASK_IRQS | \
311 (n & 0xf))
312
313/**
314 * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
315 * @svc_id: service identifier
316 * @cmd_id: command identifier
317 * @arg1: first argument
318 *
319 * This shall only be used with commands that are guaranteed to be
320 * uninterruptable, atomic and SMP safe.
321 */
322static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
323{
324 int context_id;
325
326 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
327 register u32 r1 asm("r1") = (u32)&context_id;
328 register u32 r2 asm("r2") = arg1;
329
330 asm volatile(
331 __asmeq("%0", "r0")
332 __asmeq("%1", "r0")
333 __asmeq("%2", "r1")
334 __asmeq("%3", "r2")
335#ifdef REQUIRES_SEC
336 ".arch_extension sec\n"
337#endif
338 "smc #0 @ switch to secure world\n"
339 : "=r" (r0)
340 : "r" (r0), "r" (r1), "r" (r2)
341 : "r3");
342 return r0;
343}
344
345u32 qcom_scm_get_version(void)
346{
347 int context_id;
348 static u32 version = -1;
349 register u32 r0 asm("r0");
350 register u32 r1 asm("r1");
351
352 if (version != -1)
353 return version;
354
355 mutex_lock(&qcom_scm_lock);
356
357 r0 = 0x1 << 8;
358 r1 = (u32)&context_id;
359 do {
360 asm volatile(
361 __asmeq("%0", "r0")
362 __asmeq("%1", "r1")
363 __asmeq("%2", "r0")
364 __asmeq("%3", "r1")
365#ifdef REQUIRES_SEC
366 ".arch_extension sec\n"
367#endif
368 "smc #0 @ switch to secure world\n"
369 : "=r" (r0), "=r" (r1)
370 : "r" (r0), "r" (r1)
371 : "r2", "r3");
372 } while (r0 == QCOM_SCM_INTERRUPTED);
373
374 version = r1;
375 mutex_unlock(&qcom_scm_lock);
376
377 return version;
378}
379EXPORT_SYMBOL(qcom_scm_get_version);
380
381/*
382 * Set the cold/warm boot address for one of the CPU cores.
383 */
384static int qcom_scm_set_boot_addr(u32 addr, int flags)
385{
386 struct {
387 __le32 flags;
388 __le32 addr;
389 } cmd;
390
391 cmd.addr = cpu_to_le32(addr);
392 cmd.flags = cpu_to_le32(flags);
393 return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
394 &cmd, sizeof(cmd), NULL, 0);
395}
396
397/**
398 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
399 * @entry: Entry point function for the cpus
400 * @cpus: The cpumask of cpus that will use the entry point
401 *
402 * Set the cold boot address of the cpus. Any cpu outside the supported
403 * range would be removed from the cpu present mask.
404 */
405int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
406{
407 int flags = 0;
408 int cpu;
409 int scm_cb_flags[] = {
410 QCOM_SCM_FLAG_COLDBOOT_CPU0,
411 QCOM_SCM_FLAG_COLDBOOT_CPU1,
412 QCOM_SCM_FLAG_COLDBOOT_CPU2,
413 QCOM_SCM_FLAG_COLDBOOT_CPU3,
414 };
415
416 if (!cpus || (cpus && cpumask_empty(cpus)))
417 return -EINVAL;
418
419 for_each_cpu(cpu, cpus) {
420 if (cpu < ARRAY_SIZE(scm_cb_flags))
421 flags |= scm_cb_flags[cpu];
422 else
423 set_cpu_present(cpu, false);
424 }
425
426 return qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
427}
428
429/**
430 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
431 * @entry: Entry point function for the cpus
432 * @cpus: The cpumask of cpus that will use the entry point
433 *
434 * Set the Linux entry point for the SCM to transfer control to when coming
435 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
436 */
437int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
438{
439 int ret;
440 int flags = 0;
441 int cpu;
442
443 /*
444 * Reassign only if we are switching from hotplug entry point
445 * to cpuidle entry point or vice versa.
446 */
447 for_each_cpu(cpu, cpus) {
448 if (entry == qcom_scm_wb[cpu].entry)
449 continue;
450 flags |= qcom_scm_wb[cpu].flag;
451 }
452
453 /* No change in entry function */
454 if (!flags)
455 return 0;
456
457 ret = qcom_scm_set_boot_addr(virt_to_phys(entry), flags);
458 if (!ret) {
459 for_each_cpu(cpu, cpus)
460 qcom_scm_wb[cpu].entry = entry;
461 }
462
463 return ret;
464}
465
466/**
467 * qcom_scm_cpu_power_down() - Power down the cpu
468 * @flags - Flags to flush cache
469 *
470 * This is an end point to power down cpu. If there was a pending interrupt,
471 * the control would return from this function, otherwise, the cpu jumps to the
472 * warm boot entry point set for this cpu upon reset.
473 */
474void __qcom_scm_cpu_power_down(u32 flags)
475{
476 qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
477 flags & QCOM_SCM_FLUSH_FLAG_MASK);
478}
479
480int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
481{
482 int ret;
483 __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
484 __le32 ret_val = 0;
485
486 ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd,
487 sizeof(svc_cmd), &ret_val, sizeof(ret_val));
488 if (ret)
489 return ret;
490
491 return le32_to_cpu(ret_val);
492}
493
494int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
495{
496 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
497 return -ERANGE;
498
499 return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
500 req, req_cnt * sizeof(*req), resp, sizeof(*resp));
501}
1/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2015 Linaro Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA.
17 */
18
19#include <linux/slab.h>
20#include <linux/io.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/errno.h>
24#include <linux/err.h>
25#include <linux/qcom_scm.h>
26#include <linux/dma-mapping.h>
27
28#include "qcom_scm.h"
29
30#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
31#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
32#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
33#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
34
35#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
36#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
37#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
38#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
39
40struct qcom_scm_entry {
41 int flag;
42 void *entry;
43};
44
45static struct qcom_scm_entry qcom_scm_wb[] = {
46 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
47 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
48 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
49 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
50};
51
52static DEFINE_MUTEX(qcom_scm_lock);
53
54/**
55 * struct qcom_scm_command - one SCM command buffer
56 * @len: total available memory for command and response
57 * @buf_offset: start of command buffer
58 * @resp_hdr_offset: start of response buffer
59 * @id: command to be executed
60 * @buf: buffer returned from qcom_scm_get_command_buffer()
61 *
62 * An SCM command is laid out in memory as follows:
63 *
64 * ------------------- <--- struct qcom_scm_command
65 * | command header |
66 * ------------------- <--- qcom_scm_get_command_buffer()
67 * | command buffer |
68 * ------------------- <--- struct qcom_scm_response and
69 * | response header | qcom_scm_command_to_response()
70 * ------------------- <--- qcom_scm_get_response_buffer()
71 * | response buffer |
72 * -------------------
73 *
74 * There can be arbitrary padding between the headers and buffers so
75 * you should always use the appropriate qcom_scm_get_*_buffer() routines
76 * to access the buffers in a safe manner.
77 */
78struct qcom_scm_command {
79 __le32 len;
80 __le32 buf_offset;
81 __le32 resp_hdr_offset;
82 __le32 id;
83 __le32 buf[0];
84};
85
86/**
87 * struct qcom_scm_response - one SCM response buffer
88 * @len: total available memory for response
89 * @buf_offset: start of response data relative to start of qcom_scm_response
90 * @is_complete: indicates if the command has finished processing
91 */
92struct qcom_scm_response {
93 __le32 len;
94 __le32 buf_offset;
95 __le32 is_complete;
96};
97
98/**
99 * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
100 * @cmd: command
101 *
102 * Returns a pointer to a response for a command.
103 */
104static inline struct qcom_scm_response *qcom_scm_command_to_response(
105 const struct qcom_scm_command *cmd)
106{
107 return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
108}
109
110/**
111 * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
112 * @cmd: command
113 *
114 * Returns a pointer to the command buffer of a command.
115 */
116static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
117{
118 return (void *)cmd->buf;
119}
120
121/**
122 * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
123 * @rsp: response
124 *
125 * Returns a pointer to a response buffer of a response.
126 */
127static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
128{
129 return (void *)rsp + le32_to_cpu(rsp->buf_offset);
130}
131
132static u32 smc(u32 cmd_addr)
133{
134 int context_id;
135 register u32 r0 asm("r0") = 1;
136 register u32 r1 asm("r1") = (u32)&context_id;
137 register u32 r2 asm("r2") = cmd_addr;
138 do {
139 asm volatile(
140 __asmeq("%0", "r0")
141 __asmeq("%1", "r0")
142 __asmeq("%2", "r1")
143 __asmeq("%3", "r2")
144#ifdef REQUIRES_SEC
145 ".arch_extension sec\n"
146#endif
147 "smc #0 @ switch to secure world\n"
148 : "=r" (r0)
149 : "r" (r0), "r" (r1), "r" (r2)
150 : "r3");
151 } while (r0 == QCOM_SCM_INTERRUPTED);
152
153 return r0;
154}
155
156/**
157 * qcom_scm_call() - Send an SCM command
158 * @dev: struct device
159 * @svc_id: service identifier
160 * @cmd_id: command identifier
161 * @cmd_buf: command buffer
162 * @cmd_len: length of the command buffer
163 * @resp_buf: response buffer
164 * @resp_len: length of the response buffer
165 *
166 * Sends a command to the SCM and waits for the command to finish processing.
167 *
168 * A note on cache maintenance:
169 * Note that any buffers that are expected to be accessed by the secure world
170 * must be flushed before invoking qcom_scm_call and invalidated in the cache
171 * immediately after qcom_scm_call returns. Cache maintenance on the command
172 * and response buffers is taken care of by qcom_scm_call; however, callers are
173 * responsible for any other cached buffers passed over to the secure world.
174 */
175static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
176 const void *cmd_buf, size_t cmd_len, void *resp_buf,
177 size_t resp_len)
178{
179 int ret;
180 struct qcom_scm_command *cmd;
181 struct qcom_scm_response *rsp;
182 size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
183 dma_addr_t cmd_phys;
184
185 cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
186 if (!cmd)
187 return -ENOMEM;
188
189 cmd->len = cpu_to_le32(alloc_len);
190 cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
191 cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
192
193 cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
194 if (cmd_buf)
195 memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
196
197 rsp = qcom_scm_command_to_response(cmd);
198
199 cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
200 if (dma_mapping_error(dev, cmd_phys)) {
201 kfree(cmd);
202 return -ENOMEM;
203 }
204
205 mutex_lock(&qcom_scm_lock);
206 ret = smc(cmd_phys);
207 if (ret < 0)
208 ret = qcom_scm_remap_error(ret);
209 mutex_unlock(&qcom_scm_lock);
210 if (ret)
211 goto out;
212
213 do {
214 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
215 sizeof(*rsp), DMA_FROM_DEVICE);
216 } while (!rsp->is_complete);
217
218 if (resp_buf) {
219 dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
220 le32_to_cpu(rsp->buf_offset),
221 resp_len, DMA_FROM_DEVICE);
222 memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
223 resp_len);
224 }
225out:
226 dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
227 kfree(cmd);
228 return ret;
229}
230
231#define SCM_CLASS_REGISTER (0x2 << 8)
232#define SCM_MASK_IRQS BIT(5)
233#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
234 SCM_CLASS_REGISTER | \
235 SCM_MASK_IRQS | \
236 (n & 0xf))
237
238/**
239 * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
240 * @svc_id: service identifier
241 * @cmd_id: command identifier
242 * @arg1: first argument
243 *
244 * This shall only be used with commands that are guaranteed to be
245 * uninterruptable, atomic and SMP safe.
246 */
247static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
248{
249 int context_id;
250
251 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
252 register u32 r1 asm("r1") = (u32)&context_id;
253 register u32 r2 asm("r2") = arg1;
254
255 asm volatile(
256 __asmeq("%0", "r0")
257 __asmeq("%1", "r0")
258 __asmeq("%2", "r1")
259 __asmeq("%3", "r2")
260#ifdef REQUIRES_SEC
261 ".arch_extension sec\n"
262#endif
263 "smc #0 @ switch to secure world\n"
264 : "=r" (r0)
265 : "r" (r0), "r" (r1), "r" (r2)
266 : "r3");
267 return r0;
268}
269
270/**
271 * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
272 * @svc_id: service identifier
273 * @cmd_id: command identifier
274 * @arg1: first argument
275 * @arg2: second argument
276 *
277 * This shall only be used with commands that are guaranteed to be
278 * uninterruptable, atomic and SMP safe.
279 */
280static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
281{
282 int context_id;
283
284 register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
285 register u32 r1 asm("r1") = (u32)&context_id;
286 register u32 r2 asm("r2") = arg1;
287 register u32 r3 asm("r3") = arg2;
288
289 asm volatile(
290 __asmeq("%0", "r0")
291 __asmeq("%1", "r0")
292 __asmeq("%2", "r1")
293 __asmeq("%3", "r2")
294 __asmeq("%4", "r3")
295#ifdef REQUIRES_SEC
296 ".arch_extension sec\n"
297#endif
298 "smc #0 @ switch to secure world\n"
299 : "=r" (r0)
300 : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
301 );
302 return r0;
303}
304
305u32 qcom_scm_get_version(void)
306{
307 int context_id;
308 static u32 version = -1;
309 register u32 r0 asm("r0");
310 register u32 r1 asm("r1");
311
312 if (version != -1)
313 return version;
314
315 mutex_lock(&qcom_scm_lock);
316
317 r0 = 0x1 << 8;
318 r1 = (u32)&context_id;
319 do {
320 asm volatile(
321 __asmeq("%0", "r0")
322 __asmeq("%1", "r1")
323 __asmeq("%2", "r0")
324 __asmeq("%3", "r1")
325#ifdef REQUIRES_SEC
326 ".arch_extension sec\n"
327#endif
328 "smc #0 @ switch to secure world\n"
329 : "=r" (r0), "=r" (r1)
330 : "r" (r0), "r" (r1)
331 : "r2", "r3");
332 } while (r0 == QCOM_SCM_INTERRUPTED);
333
334 version = r1;
335 mutex_unlock(&qcom_scm_lock);
336
337 return version;
338}
339EXPORT_SYMBOL(qcom_scm_get_version);
340
341/**
342 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
343 * @entry: Entry point function for the cpus
344 * @cpus: The cpumask of cpus that will use the entry point
345 *
346 * Set the cold boot address of the cpus. Any cpu outside the supported
347 * range would be removed from the cpu present mask.
348 */
349int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
350{
351 int flags = 0;
352 int cpu;
353 int scm_cb_flags[] = {
354 QCOM_SCM_FLAG_COLDBOOT_CPU0,
355 QCOM_SCM_FLAG_COLDBOOT_CPU1,
356 QCOM_SCM_FLAG_COLDBOOT_CPU2,
357 QCOM_SCM_FLAG_COLDBOOT_CPU3,
358 };
359
360 if (!cpus || (cpus && cpumask_empty(cpus)))
361 return -EINVAL;
362
363 for_each_cpu(cpu, cpus) {
364 if (cpu < ARRAY_SIZE(scm_cb_flags))
365 flags |= scm_cb_flags[cpu];
366 else
367 set_cpu_present(cpu, false);
368 }
369
370 return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
371 flags, virt_to_phys(entry));
372}
373
374/**
375 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
376 * @entry: Entry point function for the cpus
377 * @cpus: The cpumask of cpus that will use the entry point
378 *
379 * Set the Linux entry point for the SCM to transfer control to when coming
380 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
381 */
382int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
383 const cpumask_t *cpus)
384{
385 int ret;
386 int flags = 0;
387 int cpu;
388 struct {
389 __le32 flags;
390 __le32 addr;
391 } cmd;
392
393 /*
394 * Reassign only if we are switching from hotplug entry point
395 * to cpuidle entry point or vice versa.
396 */
397 for_each_cpu(cpu, cpus) {
398 if (entry == qcom_scm_wb[cpu].entry)
399 continue;
400 flags |= qcom_scm_wb[cpu].flag;
401 }
402
403 /* No change in entry function */
404 if (!flags)
405 return 0;
406
407 cmd.addr = cpu_to_le32(virt_to_phys(entry));
408 cmd.flags = cpu_to_le32(flags);
409 ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
410 &cmd, sizeof(cmd), NULL, 0);
411 if (!ret) {
412 for_each_cpu(cpu, cpus)
413 qcom_scm_wb[cpu].entry = entry;
414 }
415
416 return ret;
417}
418
419/**
420 * qcom_scm_cpu_power_down() - Power down the cpu
421 * @flags - Flags to flush cache
422 *
423 * This is an end point to power down cpu. If there was a pending interrupt,
424 * the control would return from this function, otherwise, the cpu jumps to the
425 * warm boot entry point set for this cpu upon reset.
426 */
427void __qcom_scm_cpu_power_down(u32 flags)
428{
429 qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
430 flags & QCOM_SCM_FLUSH_FLAG_MASK);
431}
432
433int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
434{
435 int ret;
436 __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
437 __le32 ret_val = 0;
438
439 ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
440 &svc_cmd, sizeof(svc_cmd), &ret_val,
441 sizeof(ret_val));
442 if (ret)
443 return ret;
444
445 return le32_to_cpu(ret_val);
446}
447
448int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
449 u32 req_cnt, u32 *resp)
450{
451 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
452 return -ERANGE;
453
454 return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
455 req, req_cnt * sizeof(*req), resp, sizeof(*resp));
456}
457
458void __qcom_scm_init(void)
459{
460}
461
462bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
463{
464 __le32 out;
465 __le32 in;
466 int ret;
467
468 in = cpu_to_le32(peripheral);
469 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
470 QCOM_SCM_PAS_IS_SUPPORTED_CMD,
471 &in, sizeof(in),
472 &out, sizeof(out));
473
474 return ret ? false : !!out;
475}
476
477int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
478 dma_addr_t metadata_phys)
479{
480 __le32 scm_ret;
481 int ret;
482 struct {
483 __le32 proc;
484 __le32 image_addr;
485 } request;
486
487 request.proc = cpu_to_le32(peripheral);
488 request.image_addr = cpu_to_le32(metadata_phys);
489
490 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
491 QCOM_SCM_PAS_INIT_IMAGE_CMD,
492 &request, sizeof(request),
493 &scm_ret, sizeof(scm_ret));
494
495 return ret ? : le32_to_cpu(scm_ret);
496}
497
498int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
499 phys_addr_t addr, phys_addr_t size)
500{
501 __le32 scm_ret;
502 int ret;
503 struct {
504 __le32 proc;
505 __le32 addr;
506 __le32 len;
507 } request;
508
509 request.proc = cpu_to_le32(peripheral);
510 request.addr = cpu_to_le32(addr);
511 request.len = cpu_to_le32(size);
512
513 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
514 QCOM_SCM_PAS_MEM_SETUP_CMD,
515 &request, sizeof(request),
516 &scm_ret, sizeof(scm_ret));
517
518 return ret ? : le32_to_cpu(scm_ret);
519}
520
521int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
522{
523 __le32 out;
524 __le32 in;
525 int ret;
526
527 in = cpu_to_le32(peripheral);
528 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
529 QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
530 &in, sizeof(in),
531 &out, sizeof(out));
532
533 return ret ? : le32_to_cpu(out);
534}
535
536int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
537{
538 __le32 out;
539 __le32 in;
540 int ret;
541
542 in = cpu_to_le32(peripheral);
543 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
544 QCOM_SCM_PAS_SHUTDOWN_CMD,
545 &in, sizeof(in),
546 &out, sizeof(out));
547
548 return ret ? : le32_to_cpu(out);
549}
550
551int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
552{
553 __le32 out;
554 __le32 in = cpu_to_le32(reset);
555 int ret;
556
557 ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
558 &in, sizeof(in),
559 &out, sizeof(out));
560
561 return ret ? : le32_to_cpu(out);
562}