Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SBI initialilization and all extension implementation.
4 *
5 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6 */
7
8#include <linux/bits.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/pm.h>
12#include <linux/reboot.h>
13#include <asm/sbi.h>
14#include <asm/smp.h>
15#include <asm/tlbflush.h>
16
17/* default SBI version is 0.1 */
18unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
19EXPORT_SYMBOL(sbi_spec_version);
20
21static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
22static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
23static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
24 unsigned long start, unsigned long size,
25 unsigned long arg4, unsigned long arg5) __ro_after_init;
26
27struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
28 unsigned long arg1, unsigned long arg2,
29 unsigned long arg3, unsigned long arg4,
30 unsigned long arg5)
31{
32 struct sbiret ret;
33
34 register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
35 register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
36 register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
37 register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
38 register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
39 register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
40 register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
41 register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
42 asm volatile ("ecall"
43 : "+r" (a0), "+r" (a1)
44 : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
45 : "memory");
46 ret.error = a0;
47 ret.value = a1;
48
49 return ret;
50}
51EXPORT_SYMBOL(sbi_ecall);
52
53int sbi_err_map_linux_errno(int err)
54{
55 switch (err) {
56 case SBI_SUCCESS:
57 return 0;
58 case SBI_ERR_DENIED:
59 return -EPERM;
60 case SBI_ERR_INVALID_PARAM:
61 return -EINVAL;
62 case SBI_ERR_INVALID_ADDRESS:
63 return -EFAULT;
64 case SBI_ERR_NOT_SUPPORTED:
65 case SBI_ERR_FAILURE:
66 default:
67 return -ENOTSUPP;
68 };
69}
70EXPORT_SYMBOL(sbi_err_map_linux_errno);
71
72#ifdef CONFIG_RISCV_SBI_V01
73static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
74{
75 unsigned long cpuid, hartid;
76 unsigned long hmask = 0;
77
78 /*
79 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
80 * associated with hartid. As SBI v0.1 is only kept for backward compatibility
81 * and will be removed in the future, there is no point in supporting hartid
82 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
83 * should be used for platforms with hartid greater than BITS_PER_LONG.
84 */
85 for_each_cpu(cpuid, cpu_mask) {
86 hartid = cpuid_to_hartid_map(cpuid);
87 if (hartid >= BITS_PER_LONG) {
88 pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
89 break;
90 }
91 hmask |= BIT(hartid);
92 }
93
94 return hmask;
95}
96
97/**
98 * sbi_console_putchar() - Writes given character to the console device.
99 * @ch: The data to be written to the console.
100 *
101 * Return: None
102 */
103void sbi_console_putchar(int ch)
104{
105 sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
106}
107EXPORT_SYMBOL(sbi_console_putchar);
108
109/**
110 * sbi_console_getchar() - Reads a byte from console device.
111 *
112 * Returns the value read from console.
113 */
114int sbi_console_getchar(void)
115{
116 struct sbiret ret;
117
118 ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
119
120 return ret.error;
121}
122EXPORT_SYMBOL(sbi_console_getchar);
123
124/**
125 * sbi_shutdown() - Remove all the harts from executing supervisor code.
126 *
127 * Return: None
128 */
129void sbi_shutdown(void)
130{
131 sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
132}
133EXPORT_SYMBOL(sbi_shutdown);
134
135/**
136 * __sbi_set_timer_v01() - Program the timer for next timer event.
137 * @stime_value: The value after which next timer event should fire.
138 *
139 * Return: None
140 */
141static void __sbi_set_timer_v01(uint64_t stime_value)
142{
143#if __riscv_xlen == 32
144 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
145 stime_value >> 32, 0, 0, 0, 0);
146#else
147 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
148#endif
149}
150
151static void __sbi_send_ipi_v01(unsigned int cpu)
152{
153 unsigned long hart_mask =
154 __sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
155 sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
156 0, 0, 0, 0, 0);
157}
158
159static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
160 unsigned long start, unsigned long size,
161 unsigned long arg4, unsigned long arg5)
162{
163 int result = 0;
164 unsigned long hart_mask;
165
166 if (!cpu_mask || cpumask_empty(cpu_mask))
167 cpu_mask = cpu_online_mask;
168 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
169
170 /* v0.2 function IDs are equivalent to v0.1 extension IDs */
171 switch (fid) {
172 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
173 sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
174 (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
175 break;
176 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
177 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
178 (unsigned long)&hart_mask, start, size,
179 0, 0, 0);
180 break;
181 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
182 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
183 (unsigned long)&hart_mask, start, size,
184 arg4, 0, 0);
185 break;
186 default:
187 pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
188 result = -EINVAL;
189 }
190
191 return result;
192}
193
194static void sbi_set_power_off(void)
195{
196 pm_power_off = sbi_shutdown;
197}
198#else
199static void __sbi_set_timer_v01(uint64_t stime_value)
200{
201 pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
202 sbi_major_version(), sbi_minor_version());
203}
204
205static void __sbi_send_ipi_v01(unsigned int cpu)
206{
207 pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
208 sbi_major_version(), sbi_minor_version());
209}
210
211static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
212 unsigned long start, unsigned long size,
213 unsigned long arg4, unsigned long arg5)
214{
215 pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
216 sbi_major_version(), sbi_minor_version());
217
218 return 0;
219}
220
221static void sbi_set_power_off(void) {}
222#endif /* CONFIG_RISCV_SBI_V01 */
223
224static void __sbi_set_timer_v02(uint64_t stime_value)
225{
226#if __riscv_xlen == 32
227 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
228 stime_value >> 32, 0, 0, 0, 0);
229#else
230 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
231 0, 0, 0, 0);
232#endif
233}
234
235static void __sbi_send_ipi_v02(unsigned int cpu)
236{
237 int result;
238 struct sbiret ret = {0};
239
240 ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
241 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
242 if (ret.error) {
243 result = sbi_err_map_linux_errno(ret.error);
244 pr_err("%s: hbase = [%lu] failed (error [%d])\n",
245 __func__, cpuid_to_hartid_map(cpu), result);
246 }
247}
248
249static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
250 unsigned long hbase, unsigned long start,
251 unsigned long size, unsigned long arg4,
252 unsigned long arg5)
253{
254 struct sbiret ret = {0};
255 int ext = SBI_EXT_RFENCE;
256 int result = 0;
257
258 switch (fid) {
259 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
260 ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
261 break;
262 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
263 ret = sbi_ecall(ext, fid, hmask, hbase, start,
264 size, 0, 0);
265 break;
266 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
267 ret = sbi_ecall(ext, fid, hmask, hbase, start,
268 size, arg4, 0);
269 break;
270
271 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
272 ret = sbi_ecall(ext, fid, hmask, hbase, start,
273 size, 0, 0);
274 break;
275 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
276 ret = sbi_ecall(ext, fid, hmask, hbase, start,
277 size, arg4, 0);
278 break;
279 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
280 ret = sbi_ecall(ext, fid, hmask, hbase, start,
281 size, 0, 0);
282 break;
283 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
284 ret = sbi_ecall(ext, fid, hmask, hbase, start,
285 size, arg4, 0);
286 break;
287 default:
288 pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
289 fid, ext);
290 result = -EINVAL;
291 }
292
293 if (ret.error) {
294 result = sbi_err_map_linux_errno(ret.error);
295 pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
296 __func__, hbase, hmask, result);
297 }
298
299 return result;
300}
301
302static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
303 unsigned long start, unsigned long size,
304 unsigned long arg4, unsigned long arg5)
305{
306 unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
307 int result;
308
309 if (!cpu_mask || cpumask_empty(cpu_mask))
310 cpu_mask = cpu_online_mask;
311
312 for_each_cpu(cpuid, cpu_mask) {
313 hartid = cpuid_to_hartid_map(cpuid);
314 if (hmask) {
315 if (hartid + BITS_PER_LONG <= htop ||
316 hbase + BITS_PER_LONG <= hartid) {
317 result = __sbi_rfence_v02_call(fid, hmask,
318 hbase, start, size, arg4, arg5);
319 if (result)
320 return result;
321 hmask = 0;
322 } else if (hartid < hbase) {
323 /* shift the mask to fit lower hartid */
324 hmask <<= hbase - hartid;
325 hbase = hartid;
326 }
327 }
328 if (!hmask) {
329 hbase = hartid;
330 htop = hartid;
331 } else if (hartid > htop) {
332 htop = hartid;
333 }
334 hmask |= BIT(hartid - hbase);
335 }
336
337 if (hmask) {
338 result = __sbi_rfence_v02_call(fid, hmask, hbase,
339 start, size, arg4, arg5);
340 if (result)
341 return result;
342 }
343
344 return 0;
345}
346
347/**
348 * sbi_set_timer() - Program the timer for next timer event.
349 * @stime_value: The value after which next timer event should fire.
350 *
351 * Return: None.
352 */
353void sbi_set_timer(uint64_t stime_value)
354{
355 __sbi_set_timer(stime_value);
356}
357
358/**
359 * sbi_send_ipi() - Send an IPI to any hart.
360 * @cpu: Logical id of the target CPU.
361 */
362void sbi_send_ipi(unsigned int cpu)
363{
364 __sbi_send_ipi(cpu);
365}
366EXPORT_SYMBOL(sbi_send_ipi);
367
368/**
369 * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
370 * @cpu_mask: A cpu mask containing all the target harts.
371 *
372 * Return: 0 on success, appropriate linux error code otherwise.
373 */
374int sbi_remote_fence_i(const struct cpumask *cpu_mask)
375{
376 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
377 cpu_mask, 0, 0, 0, 0);
378}
379EXPORT_SYMBOL(sbi_remote_fence_i);
380
381/**
382 * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
383 * remote harts for a virtual address range belonging to a specific ASID or not.
384 *
385 * @cpu_mask: A cpu mask containing all the target harts.
386 * @start: Start of the virtual address
387 * @size: Total size of the virtual address range.
388 * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
389 * for flushing all address spaces.
390 *
391 * Return: 0 on success, appropriate linux error code otherwise.
392 */
393int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
394 unsigned long start,
395 unsigned long size,
396 unsigned long asid)
397{
398 if (asid == FLUSH_TLB_NO_ASID)
399 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
400 cpu_mask, start, size, 0, 0);
401 else
402 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
403 cpu_mask, start, size, asid, 0);
404}
405EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
406
407/**
408 * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
409 * harts for the specified guest physical address range.
410 * @cpu_mask: A cpu mask containing all the target harts.
411 * @start: Start of the guest physical address
412 * @size: Total size of the guest physical address range.
413 *
414 * Return: None
415 */
416int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
417 unsigned long start,
418 unsigned long size)
419{
420 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
421 cpu_mask, start, size, 0, 0);
422}
423EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
424
425/**
426 * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
427 * remote harts for a guest physical address range belonging to a specific VMID.
428 *
429 * @cpu_mask: A cpu mask containing all the target harts.
430 * @start: Start of the guest physical address
431 * @size: Total size of the guest physical address range.
432 * @vmid: The value of guest ID (VMID).
433 *
434 * Return: 0 if success, Error otherwise.
435 */
436int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
437 unsigned long start,
438 unsigned long size,
439 unsigned long vmid)
440{
441 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
442 cpu_mask, start, size, vmid, 0);
443}
444EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
445
446/**
447 * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
448 * harts for the current guest virtual address range.
449 * @cpu_mask: A cpu mask containing all the target harts.
450 * @start: Start of the current guest virtual address
451 * @size: Total size of the current guest virtual address range.
452 *
453 * Return: None
454 */
455int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
456 unsigned long start,
457 unsigned long size)
458{
459 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
460 cpu_mask, start, size, 0, 0);
461}
462EXPORT_SYMBOL(sbi_remote_hfence_vvma);
463
464/**
465 * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
466 * remote harts for current guest virtual address range belonging to a specific
467 * ASID.
468 *
469 * @cpu_mask: A cpu mask containing all the target harts.
470 * @start: Start of the current guest virtual address
471 * @size: Total size of the current guest virtual address range.
472 * @asid: The value of address space identifier (ASID).
473 *
474 * Return: None
475 */
476int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
477 unsigned long start,
478 unsigned long size,
479 unsigned long asid)
480{
481 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
482 cpu_mask, start, size, asid, 0);
483}
484EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
485
486static void sbi_srst_reset(unsigned long type, unsigned long reason)
487{
488 sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
489 0, 0, 0, 0);
490 pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
491 __func__, type, reason);
492}
493
494static int sbi_srst_reboot(struct notifier_block *this,
495 unsigned long mode, void *cmd)
496{
497 sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
498 SBI_SRST_RESET_TYPE_WARM_REBOOT :
499 SBI_SRST_RESET_TYPE_COLD_REBOOT,
500 SBI_SRST_RESET_REASON_NONE);
501 return NOTIFY_DONE;
502}
503
504static struct notifier_block sbi_srst_reboot_nb;
505
506static void sbi_srst_power_off(void)
507{
508 sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
509 SBI_SRST_RESET_REASON_NONE);
510}
511
512/**
513 * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
514 * @extid: The extension ID to be probed.
515 *
516 * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
517 */
518long sbi_probe_extension(int extid)
519{
520 struct sbiret ret;
521
522 ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
523 0, 0, 0, 0, 0);
524 if (!ret.error)
525 return ret.value;
526
527 return 0;
528}
529EXPORT_SYMBOL(sbi_probe_extension);
530
531static long __sbi_base_ecall(int fid)
532{
533 struct sbiret ret;
534
535 ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
536 if (!ret.error)
537 return ret.value;
538 else
539 return sbi_err_map_linux_errno(ret.error);
540}
541
542static inline long sbi_get_spec_version(void)
543{
544 return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
545}
546
547static inline long sbi_get_firmware_id(void)
548{
549 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
550}
551
552static inline long sbi_get_firmware_version(void)
553{
554 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
555}
556
557long sbi_get_mvendorid(void)
558{
559 return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
560}
561EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
562
563long sbi_get_marchid(void)
564{
565 return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
566}
567EXPORT_SYMBOL_GPL(sbi_get_marchid);
568
569long sbi_get_mimpid(void)
570{
571 return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
572}
573EXPORT_SYMBOL_GPL(sbi_get_mimpid);
574
575bool sbi_debug_console_available;
576
577int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
578{
579 phys_addr_t base_addr;
580 struct sbiret ret;
581
582 if (!sbi_debug_console_available)
583 return -EOPNOTSUPP;
584
585 if (is_vmalloc_addr(bytes))
586 base_addr = page_to_phys(vmalloc_to_page(bytes)) +
587 offset_in_page(bytes);
588 else
589 base_addr = __pa(bytes);
590 if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
591 num_bytes = PAGE_SIZE - offset_in_page(bytes);
592
593 if (IS_ENABLED(CONFIG_32BIT))
594 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
595 num_bytes, lower_32_bits(base_addr),
596 upper_32_bits(base_addr), 0, 0, 0);
597 else
598 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
599 num_bytes, base_addr, 0, 0, 0, 0);
600
601 if (ret.error == SBI_ERR_FAILURE)
602 return -EIO;
603 return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
604}
605
606int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
607{
608 phys_addr_t base_addr;
609 struct sbiret ret;
610
611 if (!sbi_debug_console_available)
612 return -EOPNOTSUPP;
613
614 if (is_vmalloc_addr(bytes))
615 base_addr = page_to_phys(vmalloc_to_page(bytes)) +
616 offset_in_page(bytes);
617 else
618 base_addr = __pa(bytes);
619 if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
620 num_bytes = PAGE_SIZE - offset_in_page(bytes);
621
622 if (IS_ENABLED(CONFIG_32BIT))
623 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
624 num_bytes, lower_32_bits(base_addr),
625 upper_32_bits(base_addr), 0, 0, 0);
626 else
627 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
628 num_bytes, base_addr, 0, 0, 0, 0);
629
630 if (ret.error == SBI_ERR_FAILURE)
631 return -EIO;
632 return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
633}
634
635void __init sbi_init(void)
636{
637 int ret;
638
639 sbi_set_power_off();
640 ret = sbi_get_spec_version();
641 if (ret > 0)
642 sbi_spec_version = ret;
643
644 pr_info("SBI specification v%lu.%lu detected\n",
645 sbi_major_version(), sbi_minor_version());
646
647 if (!sbi_spec_is_0_1()) {
648 pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
649 sbi_get_firmware_id(), sbi_get_firmware_version());
650 if (sbi_probe_extension(SBI_EXT_TIME)) {
651 __sbi_set_timer = __sbi_set_timer_v02;
652 pr_info("SBI TIME extension detected\n");
653 } else {
654 __sbi_set_timer = __sbi_set_timer_v01;
655 }
656 if (sbi_probe_extension(SBI_EXT_IPI)) {
657 __sbi_send_ipi = __sbi_send_ipi_v02;
658 pr_info("SBI IPI extension detected\n");
659 } else {
660 __sbi_send_ipi = __sbi_send_ipi_v01;
661 }
662 if (sbi_probe_extension(SBI_EXT_RFENCE)) {
663 __sbi_rfence = __sbi_rfence_v02;
664 pr_info("SBI RFENCE extension detected\n");
665 } else {
666 __sbi_rfence = __sbi_rfence_v01;
667 }
668 if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
669 sbi_probe_extension(SBI_EXT_SRST)) {
670 pr_info("SBI SRST extension detected\n");
671 pm_power_off = sbi_srst_power_off;
672 sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
673 sbi_srst_reboot_nb.priority = 192;
674 register_restart_handler(&sbi_srst_reboot_nb);
675 }
676 if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
677 (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
678 pr_info("SBI DBCN extension detected\n");
679 sbi_debug_console_available = true;
680 }
681 } else {
682 __sbi_set_timer = __sbi_set_timer_v01;
683 __sbi_send_ipi = __sbi_send_ipi_v01;
684 __sbi_rfence = __sbi_rfence_v01;
685 }
686}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SBI initialilization and all extension implementation.
4 *
5 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
6 */
7
8#include <linux/bits.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/pm.h>
12#include <linux/reboot.h>
13#include <asm/sbi.h>
14#include <asm/smp.h>
15#include <asm/tlbflush.h>
16
17/* default SBI version is 0.1 */
18unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
19EXPORT_SYMBOL(sbi_spec_version);
20
21static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
22static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
23static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
24 unsigned long start, unsigned long size,
25 unsigned long arg4, unsigned long arg5) __ro_after_init;
26
27#ifdef CONFIG_RISCV_SBI_V01
28static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
29{
30 unsigned long cpuid, hartid;
31 unsigned long hmask = 0;
32
33 /*
34 * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
35 * associated with hartid. As SBI v0.1 is only kept for backward compatibility
36 * and will be removed in the future, there is no point in supporting hartid
37 * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
38 * should be used for platforms with hartid greater than BITS_PER_LONG.
39 */
40 for_each_cpu(cpuid, cpu_mask) {
41 hartid = cpuid_to_hartid_map(cpuid);
42 if (hartid >= BITS_PER_LONG) {
43 pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
44 break;
45 }
46 hmask |= BIT(hartid);
47 }
48
49 return hmask;
50}
51
52/**
53 * sbi_console_putchar() - Writes given character to the console device.
54 * @ch: The data to be written to the console.
55 *
56 * Return: None
57 */
58void sbi_console_putchar(int ch)
59{
60 sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
61}
62EXPORT_SYMBOL(sbi_console_putchar);
63
64/**
65 * sbi_console_getchar() - Reads a byte from console device.
66 *
67 * Returns the value read from console.
68 */
69int sbi_console_getchar(void)
70{
71 struct sbiret ret;
72
73 ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
74
75 return ret.error;
76}
77EXPORT_SYMBOL(sbi_console_getchar);
78
79/**
80 * sbi_shutdown() - Remove all the harts from executing supervisor code.
81 *
82 * Return: None
83 */
84void sbi_shutdown(void)
85{
86 sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
87}
88EXPORT_SYMBOL(sbi_shutdown);
89
90/**
91 * __sbi_set_timer_v01() - Program the timer for next timer event.
92 * @stime_value: The value after which next timer event should fire.
93 *
94 * Return: None
95 */
96static void __sbi_set_timer_v01(uint64_t stime_value)
97{
98#if __riscv_xlen == 32
99 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
100 stime_value >> 32, 0, 0, 0, 0);
101#else
102 sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
103#endif
104}
105
106static void __sbi_send_ipi_v01(unsigned int cpu)
107{
108 unsigned long hart_mask =
109 __sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
110 sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
111 0, 0, 0, 0, 0);
112}
113
114static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
115 unsigned long start, unsigned long size,
116 unsigned long arg4, unsigned long arg5)
117{
118 int result = 0;
119 unsigned long hart_mask;
120
121 if (!cpu_mask || cpumask_empty(cpu_mask))
122 cpu_mask = cpu_online_mask;
123 hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
124
125 /* v0.2 function IDs are equivalent to v0.1 extension IDs */
126 switch (fid) {
127 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
128 sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
129 (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
130 break;
131 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
132 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
133 (unsigned long)&hart_mask, start, size,
134 0, 0, 0);
135 break;
136 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
137 sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
138 (unsigned long)&hart_mask, start, size,
139 arg4, 0, 0);
140 break;
141 default:
142 pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
143 result = -EINVAL;
144 }
145
146 return result;
147}
148
149static void sbi_set_power_off(void)
150{
151 pm_power_off = sbi_shutdown;
152}
153#else
154static void __sbi_set_timer_v01(uint64_t stime_value)
155{
156 pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
157 sbi_major_version(), sbi_minor_version());
158}
159
160static void __sbi_send_ipi_v01(unsigned int cpu)
161{
162 pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
163 sbi_major_version(), sbi_minor_version());
164}
165
166static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
167 unsigned long start, unsigned long size,
168 unsigned long arg4, unsigned long arg5)
169{
170 pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
171 sbi_major_version(), sbi_minor_version());
172
173 return 0;
174}
175
176static void sbi_set_power_off(void) {}
177#endif /* CONFIG_RISCV_SBI_V01 */
178
179static void __sbi_set_timer_v02(uint64_t stime_value)
180{
181#if __riscv_xlen == 32
182 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
183 stime_value >> 32, 0, 0, 0, 0);
184#else
185 sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
186 0, 0, 0, 0);
187#endif
188}
189
190static void __sbi_send_ipi_v02(unsigned int cpu)
191{
192 int result;
193 struct sbiret ret = {0};
194
195 ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
196 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
197 if (ret.error) {
198 result = sbi_err_map_linux_errno(ret.error);
199 pr_err("%s: hbase = [%lu] failed (error [%d])\n",
200 __func__, cpuid_to_hartid_map(cpu), result);
201 }
202}
203
204static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
205 unsigned long hbase, unsigned long start,
206 unsigned long size, unsigned long arg4,
207 unsigned long arg5)
208{
209 struct sbiret ret = {0};
210 int ext = SBI_EXT_RFENCE;
211 int result = 0;
212
213 switch (fid) {
214 case SBI_EXT_RFENCE_REMOTE_FENCE_I:
215 ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
216 break;
217 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
218 ret = sbi_ecall(ext, fid, hmask, hbase, start,
219 size, 0, 0);
220 break;
221 case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
222 ret = sbi_ecall(ext, fid, hmask, hbase, start,
223 size, arg4, 0);
224 break;
225
226 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
227 ret = sbi_ecall(ext, fid, hmask, hbase, start,
228 size, 0, 0);
229 break;
230 case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
231 ret = sbi_ecall(ext, fid, hmask, hbase, start,
232 size, arg4, 0);
233 break;
234 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
235 ret = sbi_ecall(ext, fid, hmask, hbase, start,
236 size, 0, 0);
237 break;
238 case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
239 ret = sbi_ecall(ext, fid, hmask, hbase, start,
240 size, arg4, 0);
241 break;
242 default:
243 pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
244 fid, ext);
245 result = -EINVAL;
246 }
247
248 if (ret.error) {
249 result = sbi_err_map_linux_errno(ret.error);
250 pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
251 __func__, hbase, hmask, result);
252 }
253
254 return result;
255}
256
257static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
258 unsigned long start, unsigned long size,
259 unsigned long arg4, unsigned long arg5)
260{
261 unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
262 int result;
263
264 if (!cpu_mask || cpumask_empty(cpu_mask))
265 cpu_mask = cpu_online_mask;
266
267 for_each_cpu(cpuid, cpu_mask) {
268 hartid = cpuid_to_hartid_map(cpuid);
269 if (hmask) {
270 if (hartid + BITS_PER_LONG <= htop ||
271 hbase + BITS_PER_LONG <= hartid) {
272 result = __sbi_rfence_v02_call(fid, hmask,
273 hbase, start, size, arg4, arg5);
274 if (result)
275 return result;
276 hmask = 0;
277 } else if (hartid < hbase) {
278 /* shift the mask to fit lower hartid */
279 hmask <<= hbase - hartid;
280 hbase = hartid;
281 }
282 }
283 if (!hmask) {
284 hbase = hartid;
285 htop = hartid;
286 } else if (hartid > htop) {
287 htop = hartid;
288 }
289 hmask |= BIT(hartid - hbase);
290 }
291
292 if (hmask) {
293 result = __sbi_rfence_v02_call(fid, hmask, hbase,
294 start, size, arg4, arg5);
295 if (result)
296 return result;
297 }
298
299 return 0;
300}
301
302/**
303 * sbi_set_timer() - Program the timer for next timer event.
304 * @stime_value: The value after which next timer event should fire.
305 *
306 * Return: None.
307 */
308void sbi_set_timer(uint64_t stime_value)
309{
310 __sbi_set_timer(stime_value);
311}
312
313/**
314 * sbi_send_ipi() - Send an IPI to any hart.
315 * @cpu: Logical id of the target CPU.
316 */
317void sbi_send_ipi(unsigned int cpu)
318{
319 __sbi_send_ipi(cpu);
320}
321EXPORT_SYMBOL(sbi_send_ipi);
322
323/**
324 * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
325 * @cpu_mask: A cpu mask containing all the target harts.
326 *
327 * Return: 0 on success, appropriate linux error code otherwise.
328 */
329int sbi_remote_fence_i(const struct cpumask *cpu_mask)
330{
331 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
332 cpu_mask, 0, 0, 0, 0);
333}
334EXPORT_SYMBOL(sbi_remote_fence_i);
335
336/**
337 * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
338 * remote harts for a virtual address range belonging to a specific ASID or not.
339 *
340 * @cpu_mask: A cpu mask containing all the target harts.
341 * @start: Start of the virtual address
342 * @size: Total size of the virtual address range.
343 * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
344 * for flushing all address spaces.
345 *
346 * Return: 0 on success, appropriate linux error code otherwise.
347 */
348int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
349 unsigned long start,
350 unsigned long size,
351 unsigned long asid)
352{
353 if (asid == FLUSH_TLB_NO_ASID)
354 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
355 cpu_mask, start, size, 0, 0);
356 else
357 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
358 cpu_mask, start, size, asid, 0);
359}
360EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
361
362/**
363 * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
364 * harts for the specified guest physical address range.
365 * @cpu_mask: A cpu mask containing all the target harts.
366 * @start: Start of the guest physical address
367 * @size: Total size of the guest physical address range.
368 *
369 * Return: None
370 */
371int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
372 unsigned long start,
373 unsigned long size)
374{
375 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
376 cpu_mask, start, size, 0, 0);
377}
378EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
379
380/**
381 * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
382 * remote harts for a guest physical address range belonging to a specific VMID.
383 *
384 * @cpu_mask: A cpu mask containing all the target harts.
385 * @start: Start of the guest physical address
386 * @size: Total size of the guest physical address range.
387 * @vmid: The value of guest ID (VMID).
388 *
389 * Return: 0 if success, Error otherwise.
390 */
391int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
392 unsigned long start,
393 unsigned long size,
394 unsigned long vmid)
395{
396 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
397 cpu_mask, start, size, vmid, 0);
398}
399EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
400
401/**
402 * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
403 * harts for the current guest virtual address range.
404 * @cpu_mask: A cpu mask containing all the target harts.
405 * @start: Start of the current guest virtual address
406 * @size: Total size of the current guest virtual address range.
407 *
408 * Return: None
409 */
410int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
411 unsigned long start,
412 unsigned long size)
413{
414 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
415 cpu_mask, start, size, 0, 0);
416}
417EXPORT_SYMBOL(sbi_remote_hfence_vvma);
418
419/**
420 * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
421 * remote harts for current guest virtual address range belonging to a specific
422 * ASID.
423 *
424 * @cpu_mask: A cpu mask containing all the target harts.
425 * @start: Start of the current guest virtual address
426 * @size: Total size of the current guest virtual address range.
427 * @asid: The value of address space identifier (ASID).
428 *
429 * Return: None
430 */
431int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
432 unsigned long start,
433 unsigned long size,
434 unsigned long asid)
435{
436 return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
437 cpu_mask, start, size, asid, 0);
438}
439EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
440
441static void sbi_srst_reset(unsigned long type, unsigned long reason)
442{
443 sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
444 0, 0, 0, 0);
445 pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
446 __func__, type, reason);
447}
448
449static int sbi_srst_reboot(struct notifier_block *this,
450 unsigned long mode, void *cmd)
451{
452 sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
453 SBI_SRST_RESET_TYPE_WARM_REBOOT :
454 SBI_SRST_RESET_TYPE_COLD_REBOOT,
455 SBI_SRST_RESET_REASON_NONE);
456 return NOTIFY_DONE;
457}
458
459static struct notifier_block sbi_srst_reboot_nb;
460
461static void sbi_srst_power_off(void)
462{
463 sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
464 SBI_SRST_RESET_REASON_NONE);
465}
466
467/**
468 * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
469 * @extid: The extension ID to be probed.
470 *
471 * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
472 */
473long sbi_probe_extension(int extid)
474{
475 struct sbiret ret;
476
477 ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
478 0, 0, 0, 0, 0);
479 if (!ret.error)
480 return ret.value;
481
482 return 0;
483}
484EXPORT_SYMBOL(sbi_probe_extension);
485
486static inline long sbi_get_spec_version(void)
487{
488 return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
489}
490
491static inline long sbi_get_firmware_id(void)
492{
493 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
494}
495
496static inline long sbi_get_firmware_version(void)
497{
498 return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
499}
500
501long sbi_get_mvendorid(void)
502{
503 return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
504}
505EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
506
507long sbi_get_marchid(void)
508{
509 return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
510}
511EXPORT_SYMBOL_GPL(sbi_get_marchid);
512
513long sbi_get_mimpid(void)
514{
515 return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
516}
517EXPORT_SYMBOL_GPL(sbi_get_mimpid);
518
519bool sbi_debug_console_available;
520
521int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
522{
523 phys_addr_t base_addr;
524 struct sbiret ret;
525
526 if (!sbi_debug_console_available)
527 return -EOPNOTSUPP;
528
529 if (is_vmalloc_addr(bytes))
530 base_addr = page_to_phys(vmalloc_to_page(bytes)) +
531 offset_in_page(bytes);
532 else
533 base_addr = __pa(bytes);
534 if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
535 num_bytes = PAGE_SIZE - offset_in_page(bytes);
536
537 if (IS_ENABLED(CONFIG_32BIT))
538 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
539 num_bytes, lower_32_bits(base_addr),
540 upper_32_bits(base_addr), 0, 0, 0);
541 else
542 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
543 num_bytes, base_addr, 0, 0, 0, 0);
544
545 if (ret.error == SBI_ERR_FAILURE)
546 return -EIO;
547 return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
548}
549
550int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
551{
552 phys_addr_t base_addr;
553 struct sbiret ret;
554
555 if (!sbi_debug_console_available)
556 return -EOPNOTSUPP;
557
558 if (is_vmalloc_addr(bytes))
559 base_addr = page_to_phys(vmalloc_to_page(bytes)) +
560 offset_in_page(bytes);
561 else
562 base_addr = __pa(bytes);
563 if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
564 num_bytes = PAGE_SIZE - offset_in_page(bytes);
565
566 if (IS_ENABLED(CONFIG_32BIT))
567 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
568 num_bytes, lower_32_bits(base_addr),
569 upper_32_bits(base_addr), 0, 0, 0);
570 else
571 ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
572 num_bytes, base_addr, 0, 0, 0, 0);
573
574 if (ret.error == SBI_ERR_FAILURE)
575 return -EIO;
576 return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
577}
578
579void __init sbi_init(void)
580{
581 int ret;
582
583 sbi_set_power_off();
584 ret = sbi_get_spec_version();
585 if (ret > 0)
586 sbi_spec_version = ret;
587
588 pr_info("SBI specification v%lu.%lu detected\n",
589 sbi_major_version(), sbi_minor_version());
590
591 if (!sbi_spec_is_0_1()) {
592 pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
593 sbi_get_firmware_id(), sbi_get_firmware_version());
594 if (sbi_probe_extension(SBI_EXT_TIME)) {
595 __sbi_set_timer = __sbi_set_timer_v02;
596 pr_info("SBI TIME extension detected\n");
597 } else {
598 __sbi_set_timer = __sbi_set_timer_v01;
599 }
600 if (sbi_probe_extension(SBI_EXT_IPI)) {
601 __sbi_send_ipi = __sbi_send_ipi_v02;
602 pr_info("SBI IPI extension detected\n");
603 } else {
604 __sbi_send_ipi = __sbi_send_ipi_v01;
605 }
606 if (sbi_probe_extension(SBI_EXT_RFENCE)) {
607 __sbi_rfence = __sbi_rfence_v02;
608 pr_info("SBI RFENCE extension detected\n");
609 } else {
610 __sbi_rfence = __sbi_rfence_v01;
611 }
612 if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
613 sbi_probe_extension(SBI_EXT_SRST)) {
614 pr_info("SBI SRST extension detected\n");
615 pm_power_off = sbi_srst_power_off;
616 sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
617 sbi_srst_reboot_nb.priority = 192;
618 register_restart_handler(&sbi_srst_reboot_nb);
619 }
620 if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
621 (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
622 pr_info("SBI DBCN extension detected\n");
623 sbi_debug_console_available = true;
624 }
625 } else {
626 __sbi_set_timer = __sbi_set_timer_v01;
627 __sbi_send_ipi = __sbi_send_ipi_v01;
628 __sbi_rfence = __sbi_rfence_v01;
629 }
630}