Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#include <linux/acpi.h>
7#include <linux/cpu.h>
8#include <linux/ctype.h>
9#include <linux/init.h>
10#include <linux/seq_file.h>
11#include <linux/of.h>
12#include <asm/acpi.h>
13#include <asm/cpufeature.h>
14#include <asm/csr.h>
15#include <asm/hwcap.h>
16#include <asm/sbi.h>
17#include <asm/smp.h>
18#include <asm/pgtable.h>
19
20bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
21{
22 return phys_id == cpuid_to_hartid_map(cpu);
23}
24
25/*
26 * Returns the hart ID of the given device tree node, or -ENODEV if the node
27 * isn't an enabled and valid RISC-V hart node.
28 */
29int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
30{
31 int cpu;
32
33 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
34 if (*hart == ~0UL) {
35 pr_warn("Found CPU without hart ID\n");
36 return -ENODEV;
37 }
38
39 cpu = riscv_hartid_to_cpuid(*hart);
40 if (cpu < 0)
41 return cpu;
42
43 if (!cpu_possible(cpu))
44 return -ENODEV;
45
46 return 0;
47}
48
49int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
50{
51 const char *isa;
52
53 if (!of_device_is_compatible(node, "riscv")) {
54 pr_warn("Found incompatible CPU\n");
55 return -ENODEV;
56 }
57
58 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
59 if (*hart == ~0UL) {
60 pr_warn("Found CPU without hart ID\n");
61 return -ENODEV;
62 }
63
64 if (!of_device_is_available(node)) {
65 pr_info("CPU with hartid=%lu is not available\n", *hart);
66 return -ENODEV;
67 }
68
69 if (of_property_read_string(node, "riscv,isa-base", &isa))
70 goto old_interface;
71
72 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
73 pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
74 return -ENODEV;
75 }
76
77 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
78 pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
79 return -ENODEV;
80 }
81
82 if (!of_property_present(node, "riscv,isa-extensions"))
83 return -ENODEV;
84
85 if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
86 of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
87 of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
88 pr_warn("CPU with hartid=%lu does not support ima", *hart);
89 return -ENODEV;
90 }
91
92 return 0;
93
94old_interface:
95 if (!riscv_isa_fallback) {
96 pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
97 *hart);
98 return -ENODEV;
99 }
100
101 if (of_property_read_string(node, "riscv,isa", &isa)) {
102 pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
103 *hart);
104 return -ENODEV;
105 }
106
107 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
108 pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
109 return -ENODEV;
110 }
111
112 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
113 pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
114 return -ENODEV;
115 }
116
117 return 0;
118}
119
120/*
121 * Find hart ID of the CPU DT node under which given DT node falls.
122 *
123 * To achieve this, we walk up the DT tree until we find an active
124 * RISC-V core (HART) node and extract the cpuid from it.
125 */
126int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
127{
128 for (; node; node = node->parent) {
129 if (of_device_is_compatible(node, "riscv")) {
130 *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
131 if (*hartid == ~0UL) {
132 pr_warn("Found CPU without hart ID\n");
133 return -ENODEV;
134 }
135 return 0;
136 }
137 }
138
139 return -1;
140}
141
142DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
143
144unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
145{
146 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
147
148 return ci->mvendorid;
149}
150EXPORT_SYMBOL(riscv_cached_mvendorid);
151
152unsigned long riscv_cached_marchid(unsigned int cpu_id)
153{
154 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
155
156 return ci->marchid;
157}
158EXPORT_SYMBOL(riscv_cached_marchid);
159
160unsigned long riscv_cached_mimpid(unsigned int cpu_id)
161{
162 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
163
164 return ci->mimpid;
165}
166EXPORT_SYMBOL(riscv_cached_mimpid);
167
168static int riscv_cpuinfo_starting(unsigned int cpu)
169{
170 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
171
172#if IS_ENABLED(CONFIG_RISCV_SBI)
173 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
174 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
175 ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
176#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
177 ci->mvendorid = csr_read(CSR_MVENDORID);
178 ci->marchid = csr_read(CSR_MARCHID);
179 ci->mimpid = csr_read(CSR_MIMPID);
180#else
181 ci->mvendorid = 0;
182 ci->marchid = 0;
183 ci->mimpid = 0;
184#endif
185
186 return 0;
187}
188
189static int __init riscv_cpuinfo_init(void)
190{
191 int ret;
192
193 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting",
194 riscv_cpuinfo_starting, NULL);
195 if (ret < 0) {
196 pr_err("cpuinfo: failed to register hotplug callbacks.\n");
197 return ret;
198 }
199
200 return 0;
201}
202arch_initcall(riscv_cpuinfo_init);
203
204#ifdef CONFIG_PROC_FS
205
206static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
207{
208
209 if (IS_ENABLED(CONFIG_32BIT))
210 seq_write(f, "rv32", 4);
211 else
212 seq_write(f, "rv64", 4);
213
214 for (int i = 0; i < riscv_isa_ext_count; i++) {
215 if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
216 continue;
217
218 /* Only multi-letter extensions are split by underscores */
219 if (strnlen(riscv_isa_ext[i].name, 2) != 1)
220 seq_puts(f, "_");
221
222 seq_printf(f, "%s", riscv_isa_ext[i].name);
223 }
224
225 seq_puts(f, "\n");
226}
227
228static void print_mmu(struct seq_file *f)
229{
230 const char *sv_type;
231
232#ifdef CONFIG_MMU
233#if defined(CONFIG_32BIT)
234 sv_type = "sv32";
235#elif defined(CONFIG_64BIT)
236 if (pgtable_l5_enabled)
237 sv_type = "sv57";
238 else if (pgtable_l4_enabled)
239 sv_type = "sv48";
240 else
241 sv_type = "sv39";
242#endif
243#else
244 sv_type = "none";
245#endif /* CONFIG_MMU */
246 seq_printf(f, "mmu\t\t: %s\n", sv_type);
247}
248
249static void *c_start(struct seq_file *m, loff_t *pos)
250{
251 if (*pos == nr_cpu_ids)
252 return NULL;
253
254 *pos = cpumask_next(*pos - 1, cpu_online_mask);
255 if ((*pos) < nr_cpu_ids)
256 return (void *)(uintptr_t)(1 + *pos);
257 return NULL;
258}
259
260static void *c_next(struct seq_file *m, void *v, loff_t *pos)
261{
262 (*pos)++;
263 return c_start(m, pos);
264}
265
266static void c_stop(struct seq_file *m, void *v)
267{
268}
269
270static int c_show(struct seq_file *m, void *v)
271{
272 unsigned long cpu_id = (unsigned long)v - 1;
273 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
274 struct device_node *node;
275 const char *compat;
276
277 seq_printf(m, "processor\t: %lu\n", cpu_id);
278 seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
279
280 /*
281 * For historical raisins, the isa: line is limited to the lowest common
282 * denominator of extensions supported across all harts. A true list of
283 * extensions supported on this hart is printed later in the hart isa:
284 * line.
285 */
286 seq_puts(m, "isa\t\t: ");
287 print_isa(m, NULL);
288 print_mmu(m);
289
290 if (acpi_disabled) {
291 node = of_get_cpu_node(cpu_id, NULL);
292
293 if (!of_property_read_string(node, "compatible", &compat) &&
294 strcmp(compat, "riscv"))
295 seq_printf(m, "uarch\t\t: %s\n", compat);
296
297 of_node_put(node);
298 }
299
300 seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
301 seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
302 seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
303
304 /*
305 * Print the ISA extensions specific to this hart, which may show
306 * additional extensions not present across all harts.
307 */
308 seq_puts(m, "hart isa\t: ");
309 print_isa(m, hart_isa[cpu_id].isa);
310 seq_puts(m, "\n");
311
312 return 0;
313}
314
315const struct seq_operations cpuinfo_op = {
316 .start = c_start,
317 .next = c_next,
318 .stop = c_stop,
319 .show = c_show
320};
321
322#endif /* CONFIG_PROC_FS */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6#include <linux/acpi.h>
7#include <linux/cpu.h>
8#include <linux/ctype.h>
9#include <linux/init.h>
10#include <linux/seq_file.h>
11#include <linux/of.h>
12#include <asm/acpi.h>
13#include <asm/cpufeature.h>
14#include <asm/csr.h>
15#include <asm/hwcap.h>
16#include <asm/sbi.h>
17#include <asm/smp.h>
18#include <asm/pgtable.h>
19
20bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
21{
22 return phys_id == cpuid_to_hartid_map(cpu);
23}
24
25/*
26 * Returns the hart ID of the given device tree node, or -ENODEV if the node
27 * isn't an enabled and valid RISC-V hart node.
28 */
29int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
30{
31 int cpu;
32
33 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
34 if (*hart == ~0UL) {
35 pr_warn("Found CPU without hart ID\n");
36 return -ENODEV;
37 }
38
39 cpu = riscv_hartid_to_cpuid(*hart);
40 if (cpu < 0)
41 return cpu;
42
43 if (!cpu_possible(cpu))
44 return -ENODEV;
45
46 return 0;
47}
48
49int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
50{
51 const char *isa;
52
53 if (!of_device_is_compatible(node, "riscv")) {
54 pr_warn("Found incompatible CPU\n");
55 return -ENODEV;
56 }
57
58 *hart = (unsigned long)of_get_cpu_hwid(node, 0);
59 if (*hart == ~0UL) {
60 pr_warn("Found CPU without hart ID\n");
61 return -ENODEV;
62 }
63
64 if (!of_device_is_available(node)) {
65 pr_info("CPU with hartid=%lu is not available\n", *hart);
66 return -ENODEV;
67 }
68
69 if (of_property_read_string(node, "riscv,isa-base", &isa))
70 goto old_interface;
71
72 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
73 pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
74 return -ENODEV;
75 }
76
77 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
78 pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
79 return -ENODEV;
80 }
81
82 if (!of_property_present(node, "riscv,isa-extensions"))
83 return -ENODEV;
84
85 if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
86 of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
87 of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
88 pr_warn("CPU with hartid=%lu does not support ima", *hart);
89 return -ENODEV;
90 }
91
92 return 0;
93
94old_interface:
95 if (!riscv_isa_fallback) {
96 pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
97 *hart);
98 return -ENODEV;
99 }
100
101 if (of_property_read_string(node, "riscv,isa", &isa)) {
102 pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
103 *hart);
104 return -ENODEV;
105 }
106
107 if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
108 pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
109 return -ENODEV;
110 }
111
112 if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
113 pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
114 return -ENODEV;
115 }
116
117 return 0;
118}
119
120/*
121 * Find hart ID of the CPU DT node under which given DT node falls.
122 *
123 * To achieve this, we walk up the DT tree until we find an active
124 * RISC-V core (HART) node and extract the cpuid from it.
125 */
126int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
127{
128 for (; node; node = node->parent) {
129 if (of_device_is_compatible(node, "riscv")) {
130 *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
131 if (*hartid == ~0UL) {
132 pr_warn("Found CPU without hart ID\n");
133 return -ENODEV;
134 }
135 return 0;
136 }
137 }
138
139 return -1;
140}
141
142unsigned long __init riscv_get_marchid(void)
143{
144 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
145
146#if IS_ENABLED(CONFIG_RISCV_SBI)
147 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
148#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
149 ci->marchid = csr_read(CSR_MARCHID);
150#else
151 ci->marchid = 0;
152#endif
153 return ci->marchid;
154}
155
156unsigned long __init riscv_get_mvendorid(void)
157{
158 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
159
160#if IS_ENABLED(CONFIG_RISCV_SBI)
161 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
162#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
163 ci->mvendorid = csr_read(CSR_MVENDORID);
164#else
165 ci->mvendorid = 0;
166#endif
167 return ci->mvendorid;
168}
169
170DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
171
172unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
173{
174 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
175
176 return ci->mvendorid;
177}
178EXPORT_SYMBOL(riscv_cached_mvendorid);
179
180unsigned long riscv_cached_marchid(unsigned int cpu_id)
181{
182 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
183
184 return ci->marchid;
185}
186EXPORT_SYMBOL(riscv_cached_marchid);
187
188unsigned long riscv_cached_mimpid(unsigned int cpu_id)
189{
190 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
191
192 return ci->mimpid;
193}
194EXPORT_SYMBOL(riscv_cached_mimpid);
195
196static int riscv_cpuinfo_starting(unsigned int cpu)
197{
198 struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
199
200#if IS_ENABLED(CONFIG_RISCV_SBI)
201 if (!ci->mvendorid)
202 ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
203 if (!ci->marchid)
204 ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
205 ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
206#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
207 if (!ci->mvendorid)
208 ci->mvendorid = csr_read(CSR_MVENDORID);
209 if (!ci->marchid)
210 ci->marchid = csr_read(CSR_MARCHID);
211 ci->mimpid = csr_read(CSR_MIMPID);
212#else
213 ci->mvendorid = 0;
214 ci->marchid = 0;
215 ci->mimpid = 0;
216#endif
217
218 return 0;
219}
220
221static int __init riscv_cpuinfo_init(void)
222{
223 int ret;
224
225 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting",
226 riscv_cpuinfo_starting, NULL);
227 if (ret < 0) {
228 pr_err("cpuinfo: failed to register hotplug callbacks.\n");
229 return ret;
230 }
231
232 return 0;
233}
234arch_initcall(riscv_cpuinfo_init);
235
236#ifdef CONFIG_PROC_FS
237
238static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
239{
240
241 if (IS_ENABLED(CONFIG_32BIT))
242 seq_write(f, "rv32", 4);
243 else
244 seq_write(f, "rv64", 4);
245
246 for (int i = 0; i < riscv_isa_ext_count; i++) {
247 if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
248 continue;
249
250 /* Only multi-letter extensions are split by underscores */
251 if (strnlen(riscv_isa_ext[i].name, 2) != 1)
252 seq_puts(f, "_");
253
254 seq_printf(f, "%s", riscv_isa_ext[i].name);
255 }
256
257 seq_puts(f, "\n");
258}
259
260static void print_mmu(struct seq_file *f)
261{
262 const char *sv_type;
263
264#ifdef CONFIG_MMU
265#if defined(CONFIG_32BIT)
266 sv_type = "sv32";
267#elif defined(CONFIG_64BIT)
268 if (pgtable_l5_enabled)
269 sv_type = "sv57";
270 else if (pgtable_l4_enabled)
271 sv_type = "sv48";
272 else
273 sv_type = "sv39";
274#endif
275#else
276 sv_type = "none";
277#endif /* CONFIG_MMU */
278 seq_printf(f, "mmu\t\t: %s\n", sv_type);
279}
280
281static void *c_start(struct seq_file *m, loff_t *pos)
282{
283 if (*pos == nr_cpu_ids)
284 return NULL;
285
286 *pos = cpumask_next(*pos - 1, cpu_online_mask);
287 if ((*pos) < nr_cpu_ids)
288 return (void *)(uintptr_t)(1 + *pos);
289 return NULL;
290}
291
292static void *c_next(struct seq_file *m, void *v, loff_t *pos)
293{
294 (*pos)++;
295 return c_start(m, pos);
296}
297
298static void c_stop(struct seq_file *m, void *v)
299{
300}
301
302static int c_show(struct seq_file *m, void *v)
303{
304 unsigned long cpu_id = (unsigned long)v - 1;
305 struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
306 struct device_node *node;
307 const char *compat;
308
309 seq_printf(m, "processor\t: %lu\n", cpu_id);
310 seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
311
312 /*
313 * For historical raisins, the isa: line is limited to the lowest common
314 * denominator of extensions supported across all harts. A true list of
315 * extensions supported on this hart is printed later in the hart isa:
316 * line.
317 */
318 seq_puts(m, "isa\t\t: ");
319 print_isa(m, NULL);
320 print_mmu(m);
321
322 if (acpi_disabled) {
323 node = of_get_cpu_node(cpu_id, NULL);
324
325 if (!of_property_read_string(node, "compatible", &compat) &&
326 strcmp(compat, "riscv"))
327 seq_printf(m, "uarch\t\t: %s\n", compat);
328
329 of_node_put(node);
330 }
331
332 seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
333 seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
334 seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
335
336 /*
337 * Print the ISA extensions specific to this hart, which may show
338 * additional extensions not present across all harts.
339 */
340 seq_puts(m, "hart isa\t: ");
341 print_isa(m, hart_isa[cpu_id].isa);
342 seq_puts(m, "\n");
343
344 return 0;
345}
346
347const struct seq_operations cpuinfo_op = {
348 .start = c_start,
349 .next = c_next,
350 .stop = c_stop,
351 .show = c_show
352};
353
354#endif /* CONFIG_PROC_FS */