Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copied from arch/arm64/kernel/cpufeature.c
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Copyright (C) 2017 SiFive
7 */
8
9#include <linux/acpi.h>
10#include <linux/bitmap.h>
11#include <linux/cpu.h>
12#include <linux/cpuhotplug.h>
13#include <linux/ctype.h>
14#include <linux/jump_label.h>
15#include <linux/log2.h>
16#include <linux/memory.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <asm/acpi.h>
20#include <asm/alternative.h>
21#include <asm/cacheflush.h>
22#include <asm/cpufeature.h>
23#include <asm/hwcap.h>
24#include <asm/hwprobe.h>
25#include <asm/patch.h>
26#include <asm/processor.h>
27#include <asm/sbi.h>
28#include <asm/vector.h>
29
30#include "copy-unaligned.h"
31
32#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
33
34#define MISALIGNED_ACCESS_JIFFIES_LG2 1
35#define MISALIGNED_BUFFER_SIZE 0x4000
36#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
37#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
38
39unsigned long elf_hwcap __read_mostly;
40
41/* Host ISA bitmap */
42static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
43
44/* Per-cpu ISA extensions. */
45struct riscv_isainfo hart_isa[NR_CPUS];
46
47/* Performance information */
48DEFINE_PER_CPU(long, misaligned_access_speed);
49
50static cpumask_t fast_misaligned_access;
51
52/**
53 * riscv_isa_extension_base() - Get base extension word
54 *
55 * @isa_bitmap: ISA bitmap to use
56 * Return: base extension word as unsigned long value
57 *
58 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
59 */
60unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
61{
62 if (!isa_bitmap)
63 return riscv_isa[0];
64 return isa_bitmap[0];
65}
66EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
67
68/**
69 * __riscv_isa_extension_available() - Check whether given extension
70 * is available or not
71 *
72 * @isa_bitmap: ISA bitmap to use
73 * @bit: bit position of the desired extension
74 * Return: true or false
75 *
76 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
77 */
78bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
79{
80 const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
81
82 if (bit >= RISCV_ISA_EXT_MAX)
83 return false;
84
85 return test_bit(bit, bmap) ? true : false;
86}
87EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
88
89static bool riscv_isa_extension_check(int id)
90{
91 switch (id) {
92 case RISCV_ISA_EXT_ZICBOM:
93 if (!riscv_cbom_block_size) {
94 pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
95 return false;
96 } else if (!is_power_of_2(riscv_cbom_block_size)) {
97 pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
98 return false;
99 }
100 return true;
101 case RISCV_ISA_EXT_ZICBOZ:
102 if (!riscv_cboz_block_size) {
103 pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
104 return false;
105 } else if (!is_power_of_2(riscv_cboz_block_size)) {
106 pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
107 return false;
108 }
109 return true;
110 case RISCV_ISA_EXT_INVALID:
111 return false;
112 }
113
114 return true;
115}
116
117#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \
118 .name = #_name, \
119 .property = #_name, \
120 .id = _id, \
121 .subset_ext_ids = _subset_exts, \
122 .subset_ext_size = _subset_exts_size \
123}
124
125#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
126
127/* Used to declare pure "lasso" extension (Zk for instance) */
128#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
129 _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
130
131/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
132#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
133 _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
134
135static const unsigned int riscv_zk_bundled_exts[] = {
136 RISCV_ISA_EXT_ZBKB,
137 RISCV_ISA_EXT_ZBKC,
138 RISCV_ISA_EXT_ZBKX,
139 RISCV_ISA_EXT_ZKND,
140 RISCV_ISA_EXT_ZKNE,
141 RISCV_ISA_EXT_ZKR,
142 RISCV_ISA_EXT_ZKT,
143};
144
145static const unsigned int riscv_zkn_bundled_exts[] = {
146 RISCV_ISA_EXT_ZBKB,
147 RISCV_ISA_EXT_ZBKC,
148 RISCV_ISA_EXT_ZBKX,
149 RISCV_ISA_EXT_ZKND,
150 RISCV_ISA_EXT_ZKNE,
151 RISCV_ISA_EXT_ZKNH,
152};
153
154static const unsigned int riscv_zks_bundled_exts[] = {
155 RISCV_ISA_EXT_ZBKB,
156 RISCV_ISA_EXT_ZBKC,
157 RISCV_ISA_EXT_ZKSED,
158 RISCV_ISA_EXT_ZKSH
159};
160
161#define RISCV_ISA_EXT_ZVKN \
162 RISCV_ISA_EXT_ZVKNED, \
163 RISCV_ISA_EXT_ZVKNHB, \
164 RISCV_ISA_EXT_ZVKB, \
165 RISCV_ISA_EXT_ZVKT
166
167static const unsigned int riscv_zvkn_bundled_exts[] = {
168 RISCV_ISA_EXT_ZVKN
169};
170
171static const unsigned int riscv_zvknc_bundled_exts[] = {
172 RISCV_ISA_EXT_ZVKN,
173 RISCV_ISA_EXT_ZVBC
174};
175
176static const unsigned int riscv_zvkng_bundled_exts[] = {
177 RISCV_ISA_EXT_ZVKN,
178 RISCV_ISA_EXT_ZVKG
179};
180
181#define RISCV_ISA_EXT_ZVKS \
182 RISCV_ISA_EXT_ZVKSED, \
183 RISCV_ISA_EXT_ZVKSH, \
184 RISCV_ISA_EXT_ZVKB, \
185 RISCV_ISA_EXT_ZVKT
186
187static const unsigned int riscv_zvks_bundled_exts[] = {
188 RISCV_ISA_EXT_ZVKS
189};
190
191static const unsigned int riscv_zvksc_bundled_exts[] = {
192 RISCV_ISA_EXT_ZVKS,
193 RISCV_ISA_EXT_ZVBC
194};
195
196static const unsigned int riscv_zvksg_bundled_exts[] = {
197 RISCV_ISA_EXT_ZVKS,
198 RISCV_ISA_EXT_ZVKG
199};
200
201static const unsigned int riscv_zvbb_exts[] = {
202 RISCV_ISA_EXT_ZVKB
203};
204
205/*
206 * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V
207 * privileged ISA, the existence of the CSRs is implied by any extension which
208 * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the
209 * existence of the CSR, and treat it as a subset of those other extensions.
210 */
211static const unsigned int riscv_xlinuxenvcfg_exts[] = {
212 RISCV_ISA_EXT_XLINUXENVCFG
213};
214
215/*
216 * The canonical order of ISA extension names in the ISA string is defined in
217 * chapter 27 of the unprivileged specification.
218 *
219 * Ordinarily, for in-kernel data structures, this order is unimportant but
220 * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
221 *
222 * The specification uses vague wording, such as should, when it comes to
223 * ordering, so for our purposes the following rules apply:
224 *
225 * 1. All multi-letter extensions must be separated from other extensions by an
226 * underscore.
227 *
228 * 2. Additional standard extensions (starting with 'Z') must be sorted after
229 * single-letter extensions and before any higher-privileged extensions.
230 *
231 * 3. The first letter following the 'Z' conventionally indicates the most
232 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
233 * If multiple 'Z' extensions are named, they must be ordered first by
234 * category, then alphabetically within a category.
235 *
236 * 3. Standard supervisor-level extensions (starting with 'S') must be listed
237 * after standard unprivileged extensions. If multiple supervisor-level
238 * extensions are listed, they must be ordered alphabetically.
239 *
240 * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
241 * after any lower-privileged, standard extensions. If multiple
242 * machine-level extensions are listed, they must be ordered
243 * alphabetically.
244 *
245 * 5. Non-standard extensions (starting with 'X') must be listed after all
246 * standard extensions. If multiple non-standard extensions are listed, they
247 * must be ordered alphabetically.
248 *
249 * An example string following the order is:
250 * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
251 *
252 * New entries to this struct should follow the ordering rules described above.
253 */
254const struct riscv_isa_ext_data riscv_isa_ext[] = {
255 __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
256 __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
257 __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
258 __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
259 __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
260 __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
261 __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
262 __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
263 __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
264 __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts),
265 __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts),
266 __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
267 __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
268 __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
269 __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
270 __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
271 __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
272 __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
273 __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
274 __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
275 __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
276 __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
277 __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
278 __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
279 __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
280 __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
281 __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
282 __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
283 __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
284 __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
285 __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
286 __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
287 __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
288 __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
289 __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
290 __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
291 __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
292 __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
293 __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
294 __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
295 __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
296 __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
297 __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
298 __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
299 __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
300 __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
301 __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
302 __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
303 __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
304 __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
305 __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
306 __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
307 __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
308 __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
309 __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
310 __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
311 __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
312 __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
313 __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
314 __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
315 __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
316 __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
317 __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
318 __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
319 __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
320 __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
321};
322
323const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
324
325static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
326 const char *name_end, struct riscv_isainfo *isainfo)
327{
328 if ((name_end - name == strlen(ext->name)) &&
329 !strncasecmp(name, ext->name, name_end - name)) {
330 /*
331 * If this is a bundle, enable all the ISA extensions that
332 * comprise the bundle.
333 */
334 if (ext->subset_ext_size) {
335 for (int i = 0; i < ext->subset_ext_size; i++) {
336 if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
337 set_bit(ext->subset_ext_ids[i], isainfo->isa);
338 }
339 }
340
341 /*
342 * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
343 * (rejected by riscv_isa_extension_check()).
344 */
345 if (riscv_isa_extension_check(ext->id))
346 set_bit(ext->id, isainfo->isa);
347 }
348}
349
350static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
351 unsigned long *isa2hwcap, const char *isa)
352{
353 /*
354 * For all possible cpus, we have already validated in
355 * the boot process that they at least contain "rv" and
356 * whichever of "32"/"64" this kernel supports, and so this
357 * section can be skipped.
358 */
359 isa += 4;
360
361 while (*isa) {
362 const char *ext = isa++;
363 const char *ext_end = isa;
364 bool ext_long = false, ext_err = false;
365
366 switch (*ext) {
367 case 's':
368 /*
369 * Workaround for invalid single-letter 's' & 'u' (QEMU).
370 * No need to set the bit in riscv_isa as 's' & 'u' are
371 * not valid ISA extensions. It works unless the first
372 * multi-letter extension in the ISA string begins with
373 * "Su" and is not prefixed with an underscore.
374 */
375 if (ext[-1] != '_' && ext[1] == 'u') {
376 ++isa;
377 ext_err = true;
378 break;
379 }
380 fallthrough;
381 case 'S':
382 case 'x':
383 case 'X':
384 case 'z':
385 case 'Z':
386 /*
387 * Before attempting to parse the extension itself, we find its end.
388 * As multi-letter extensions must be split from other multi-letter
389 * extensions with an "_", the end of a multi-letter extension will
390 * either be the null character or the "_" at the start of the next
391 * multi-letter extension.
392 *
393 * Next, as the extensions version is currently ignored, we
394 * eliminate that portion. This is done by parsing backwards from
395 * the end of the extension, removing any numbers. This may be a
396 * major or minor number however, so the process is repeated if a
397 * minor number was found.
398 *
399 * ext_end is intended to represent the first character *after* the
400 * name portion of an extension, but will be decremented to the last
401 * character itself while eliminating the extensions version number.
402 * A simple re-increment solves this problem.
403 */
404 ext_long = true;
405 for (; *isa && *isa != '_'; ++isa)
406 if (unlikely(!isalnum(*isa)))
407 ext_err = true;
408
409 ext_end = isa;
410 if (unlikely(ext_err))
411 break;
412
413 if (!isdigit(ext_end[-1]))
414 break;
415
416 while (isdigit(*--ext_end))
417 ;
418
419 if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
420 ++ext_end;
421 break;
422 }
423
424 while (isdigit(*--ext_end))
425 ;
426
427 ++ext_end;
428 break;
429 default:
430 /*
431 * Things are a little easier for single-letter extensions, as they
432 * are parsed forwards.
433 *
434 * After checking that our starting position is valid, we need to
435 * ensure that, when isa was incremented at the start of the loop,
436 * that it arrived at the start of the next extension.
437 *
438 * If we are already on a non-digit, there is nothing to do. Either
439 * we have a multi-letter extension's _, or the start of an
440 * extension.
441 *
442 * Otherwise we have found the current extension's major version
443 * number. Parse past it, and a subsequent p/minor version number
444 * if present. The `p` extension must not appear immediately after
445 * a number, so there is no fear of missing it.
446 *
447 */
448 if (unlikely(!isalpha(*ext))) {
449 ext_err = true;
450 break;
451 }
452
453 if (!isdigit(*isa))
454 break;
455
456 while (isdigit(*++isa))
457 ;
458
459 if (tolower(*isa) != 'p')
460 break;
461
462 if (!isdigit(*++isa)) {
463 --isa;
464 break;
465 }
466
467 while (isdigit(*++isa))
468 ;
469
470 break;
471 }
472
473 /*
474 * The parser expects that at the start of an iteration isa points to the
475 * first character of the next extension. As we stop parsing an extension
476 * on meeting a non-alphanumeric character, an extra increment is needed
477 * where the succeeding extension is a multi-letter prefixed with an "_".
478 */
479 if (*isa == '_')
480 ++isa;
481
482 if (unlikely(ext_err))
483 continue;
484 if (!ext_long) {
485 int nr = tolower(*ext) - 'a';
486
487 if (riscv_isa_extension_check(nr)) {
488 *this_hwcap |= isa2hwcap[nr];
489 set_bit(nr, isainfo->isa);
490 }
491 } else {
492 for (int i = 0; i < riscv_isa_ext_count; i++)
493 match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
494 }
495 }
496}
497
498static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
499{
500 struct device_node *node;
501 const char *isa;
502 int rc;
503 struct acpi_table_header *rhct;
504 acpi_status status;
505 unsigned int cpu;
506
507 if (!acpi_disabled) {
508 status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
509 if (ACPI_FAILURE(status))
510 return;
511 }
512
513 for_each_possible_cpu(cpu) {
514 struct riscv_isainfo *isainfo = &hart_isa[cpu];
515 unsigned long this_hwcap = 0;
516
517 if (acpi_disabled) {
518 node = of_cpu_device_node_get(cpu);
519 if (!node) {
520 pr_warn("Unable to find cpu node\n");
521 continue;
522 }
523
524 rc = of_property_read_string(node, "riscv,isa", &isa);
525 of_node_put(node);
526 if (rc) {
527 pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
528 continue;
529 }
530 } else {
531 rc = acpi_get_riscv_isa(rhct, cpu, &isa);
532 if (rc < 0) {
533 pr_warn("Unable to get ISA for the hart - %d\n", cpu);
534 continue;
535 }
536 }
537
538 riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
539
540 /*
541 * These ones were as they were part of the base ISA when the
542 * port & dt-bindings were upstreamed, and so can be set
543 * unconditionally where `i` is in riscv,isa on DT systems.
544 */
545 if (acpi_disabled) {
546 set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
547 set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
548 set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
549 set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
550 }
551
552 /*
553 * "V" in ISA strings is ambiguous in practice: it should mean
554 * just the standard V-1.0 but vendors aren't well behaved.
555 * Many vendors with T-Head CPU cores which implement the 0.7.1
556 * version of the vector specification put "v" into their DTs.
557 * CPU cores with the ratified spec will contain non-zero
558 * marchid.
559 */
560 if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
561 riscv_cached_marchid(cpu) == 0x0) {
562 this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
563 clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
564 }
565
566 /*
567 * All "okay" hart should have same isa. Set HWCAP based on
568 * common capabilities of every "okay" hart, in case they don't
569 * have.
570 */
571 if (elf_hwcap)
572 elf_hwcap &= this_hwcap;
573 else
574 elf_hwcap = this_hwcap;
575
576 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
577 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
578 else
579 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
580 }
581
582 if (!acpi_disabled && rhct)
583 acpi_put_table((struct acpi_table_header *)rhct);
584}
585
586static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
587{
588 unsigned int cpu;
589
590 for_each_possible_cpu(cpu) {
591 unsigned long this_hwcap = 0;
592 struct device_node *cpu_node;
593 struct riscv_isainfo *isainfo = &hart_isa[cpu];
594
595 cpu_node = of_cpu_device_node_get(cpu);
596 if (!cpu_node) {
597 pr_warn("Unable to find cpu node\n");
598 continue;
599 }
600
601 if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
602 of_node_put(cpu_node);
603 continue;
604 }
605
606 for (int i = 0; i < riscv_isa_ext_count; i++) {
607 const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
608
609 if (of_property_match_string(cpu_node, "riscv,isa-extensions",
610 ext->property) < 0)
611 continue;
612
613 if (ext->subset_ext_size) {
614 for (int j = 0; j < ext->subset_ext_size; j++) {
615 if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
616 set_bit(ext->subset_ext_ids[j], isainfo->isa);
617 }
618 }
619
620 if (riscv_isa_extension_check(ext->id)) {
621 set_bit(ext->id, isainfo->isa);
622
623 /* Only single letter extensions get set in hwcap */
624 if (strnlen(riscv_isa_ext[i].name, 2) == 1)
625 this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
626 }
627 }
628
629 of_node_put(cpu_node);
630
631 /*
632 * All "okay" harts should have same isa. Set HWCAP based on
633 * common capabilities of every "okay" hart, in case they don't.
634 */
635 if (elf_hwcap)
636 elf_hwcap &= this_hwcap;
637 else
638 elf_hwcap = this_hwcap;
639
640 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
641 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
642 else
643 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
644 }
645
646 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
647 return -ENOENT;
648
649 return 0;
650}
651
652#ifdef CONFIG_RISCV_ISA_FALLBACK
653bool __initdata riscv_isa_fallback = true;
654#else
655bool __initdata riscv_isa_fallback;
656static int __init riscv_isa_fallback_setup(char *__unused)
657{
658 riscv_isa_fallback = true;
659 return 1;
660}
661early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
662#endif
663
664void __init riscv_fill_hwcap(void)
665{
666 char print_str[NUM_ALPHA_EXTS + 1];
667 unsigned long isa2hwcap[26] = {0};
668 int i, j;
669
670 isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
671 isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
672 isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
673 isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
674 isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
675 isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
676 isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
677
678 if (!acpi_disabled) {
679 riscv_fill_hwcap_from_isa_string(isa2hwcap);
680 } else {
681 int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
682
683 if (ret && riscv_isa_fallback) {
684 pr_info("Falling back to deprecated \"riscv,isa\"\n");
685 riscv_fill_hwcap_from_isa_string(isa2hwcap);
686 }
687 }
688
689 /*
690 * We don't support systems with F but without D, so mask those out
691 * here.
692 */
693 if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
694 pr_info("This kernel does not support systems with F but not D\n");
695 elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
696 }
697
698 if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
699 riscv_v_setup_vsize();
700 /*
701 * ISA string in device tree might have 'v' flag, but
702 * CONFIG_RISCV_ISA_V is disabled in kernel.
703 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
704 */
705 if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
706 elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
707 }
708
709 memset(print_str, 0, sizeof(print_str));
710 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
711 if (riscv_isa[0] & BIT_MASK(i))
712 print_str[j++] = (char)('a' + i);
713 pr_info("riscv: base ISA extensions %s\n", print_str);
714
715 memset(print_str, 0, sizeof(print_str));
716 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
717 if (elf_hwcap & BIT_MASK(i))
718 print_str[j++] = (char)('a' + i);
719 pr_info("riscv: ELF capabilities %s\n", print_str);
720}
721
722unsigned long riscv_get_elf_hwcap(void)
723{
724 unsigned long hwcap;
725
726 hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
727
728 if (!riscv_v_vstate_ctrl_user_allowed())
729 hwcap &= ~COMPAT_HWCAP_ISA_V;
730
731 return hwcap;
732}
733
734static int check_unaligned_access(void *param)
735{
736 int cpu = smp_processor_id();
737 u64 start_cycles, end_cycles;
738 u64 word_cycles;
739 u64 byte_cycles;
740 int ratio;
741 unsigned long start_jiffies, now;
742 struct page *page = param;
743 void *dst;
744 void *src;
745 long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
746
747 if (check_unaligned_access_emulated(cpu))
748 return 0;
749
750 /* Make an unaligned destination buffer. */
751 dst = (void *)((unsigned long)page_address(page) | 0x1);
752 /* Unalign src as well, but differently (off by 1 + 2 = 3). */
753 src = dst + (MISALIGNED_BUFFER_SIZE / 2);
754 src += 2;
755 word_cycles = -1ULL;
756 /* Do a warmup. */
757 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
758 preempt_disable();
759 start_jiffies = jiffies;
760 while ((now = jiffies) == start_jiffies)
761 cpu_relax();
762
763 /*
764 * For a fixed amount of time, repeatedly try the function, and take
765 * the best time in cycles as the measurement.
766 */
767 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
768 start_cycles = get_cycles64();
769 /* Ensure the CSR read can't reorder WRT to the copy. */
770 mb();
771 __riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
772 /* Ensure the copy ends before the end time is snapped. */
773 mb();
774 end_cycles = get_cycles64();
775 if ((end_cycles - start_cycles) < word_cycles)
776 word_cycles = end_cycles - start_cycles;
777 }
778
779 byte_cycles = -1ULL;
780 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
781 start_jiffies = jiffies;
782 while ((now = jiffies) == start_jiffies)
783 cpu_relax();
784
785 while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
786 start_cycles = get_cycles64();
787 mb();
788 __riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
789 mb();
790 end_cycles = get_cycles64();
791 if ((end_cycles - start_cycles) < byte_cycles)
792 byte_cycles = end_cycles - start_cycles;
793 }
794
795 preempt_enable();
796
797 /* Don't divide by zero. */
798 if (!word_cycles || !byte_cycles) {
799 pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
800 cpu);
801
802 return 0;
803 }
804
805 if (word_cycles < byte_cycles)
806 speed = RISCV_HWPROBE_MISALIGNED_FAST;
807
808 ratio = div_u64((byte_cycles * 100), word_cycles);
809 pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
810 cpu,
811 ratio / 100,
812 ratio % 100,
813 (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
814
815 per_cpu(misaligned_access_speed, cpu) = speed;
816
817 /*
818 * Set the value of fast_misaligned_access of a CPU. These operations
819 * are atomic to avoid race conditions.
820 */
821 if (speed == RISCV_HWPROBE_MISALIGNED_FAST)
822 cpumask_set_cpu(cpu, &fast_misaligned_access);
823 else
824 cpumask_clear_cpu(cpu, &fast_misaligned_access);
825
826 return 0;
827}
828
829static void check_unaligned_access_nonboot_cpu(void *param)
830{
831 unsigned int cpu = smp_processor_id();
832 struct page **pages = param;
833
834 if (smp_processor_id() != 0)
835 check_unaligned_access(pages[cpu]);
836}
837
838DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
839
840static void modify_unaligned_access_branches(cpumask_t *mask, int weight)
841{
842 if (cpumask_weight(mask) == weight)
843 static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key);
844 else
845 static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key);
846}
847
848static void set_unaligned_access_static_branches_except_cpu(int cpu)
849{
850 /*
851 * Same as set_unaligned_access_static_branches, except excludes the
852 * given CPU from the result. When a CPU is hotplugged into an offline
853 * state, this function is called before the CPU is set to offline in
854 * the cpumask, and thus the CPU needs to be explicitly excluded.
855 */
856
857 cpumask_t fast_except_me;
858
859 cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask);
860 cpumask_clear_cpu(cpu, &fast_except_me);
861
862 modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1);
863}
864
865static void set_unaligned_access_static_branches(void)
866{
867 /*
868 * This will be called after check_unaligned_access_all_cpus so the
869 * result of unaligned access speed for all CPUs will be available.
870 *
871 * To avoid the number of online cpus changing between reading
872 * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be
873 * held before calling this function.
874 */
875
876 cpumask_t fast_and_online;
877
878 cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask);
879
880 modify_unaligned_access_branches(&fast_and_online, num_online_cpus());
881}
882
883static int lock_and_set_unaligned_access_static_branch(void)
884{
885 cpus_read_lock();
886 set_unaligned_access_static_branches();
887 cpus_read_unlock();
888
889 return 0;
890}
891
892arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
893
894static int riscv_online_cpu(unsigned int cpu)
895{
896 static struct page *buf;
897
898 /* We are already set since the last check */
899 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
900 goto exit;
901
902 buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
903 if (!buf) {
904 pr_warn("Allocation failure, not measuring misaligned performance\n");
905 return -ENOMEM;
906 }
907
908 check_unaligned_access(buf);
909 __free_pages(buf, MISALIGNED_BUFFER_ORDER);
910
911exit:
912 set_unaligned_access_static_branches();
913
914 return 0;
915}
916
917static int riscv_offline_cpu(unsigned int cpu)
918{
919 set_unaligned_access_static_branches_except_cpu(cpu);
920
921 return 0;
922}
923
924/* Measure unaligned access on all CPUs present at boot in parallel. */
925static int check_unaligned_access_all_cpus(void)
926{
927 unsigned int cpu;
928 unsigned int cpu_count = num_possible_cpus();
929 struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
930 GFP_KERNEL);
931
932 if (!bufs) {
933 pr_warn("Allocation failure, not measuring misaligned performance\n");
934 return 0;
935 }
936
937 /*
938 * Allocate separate buffers for each CPU so there's no fighting over
939 * cache lines.
940 */
941 for_each_cpu(cpu, cpu_online_mask) {
942 bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
943 if (!bufs[cpu]) {
944 pr_warn("Allocation failure, not measuring misaligned performance\n");
945 goto out;
946 }
947 }
948
949 /* Check everybody except 0, who stays behind to tend jiffies. */
950 on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
951
952 /* Check core 0. */
953 smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
954
955 /*
956 * Setup hotplug callbacks for any new CPUs that come online or go
957 * offline.
958 */
959 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
960 riscv_online_cpu, riscv_offline_cpu);
961
962out:
963 unaligned_emulation_finish();
964 for_each_cpu(cpu, cpu_online_mask) {
965 if (bufs[cpu])
966 __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
967 }
968
969 kfree(bufs);
970 return 0;
971}
972
973arch_initcall(check_unaligned_access_all_cpus);
974
975void riscv_user_isa_enable(void)
976{
977 if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
978 csr_set(CSR_ENVCFG, ENVCFG_CBZE);
979}
980
981#ifdef CONFIG_RISCV_ALTERNATIVE
982/*
983 * Alternative patch sites consider 48 bits when determining when to patch
984 * the old instruction sequence with the new. These bits are broken into a
985 * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
986 * patch site is for an erratum, identified by the 32-bit patch ID. When
987 * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
988 * further break down patch ID into two 16-bit numbers. The lower 16 bits
989 * are the cpufeature ID and the upper 16 bits are used for a value specific
990 * to the cpufeature and patch site. If the upper 16 bits are zero, then it
991 * implies no specific value is specified. cpufeatures that want to control
992 * patching on a per-site basis will provide non-zero values and implement
993 * checks here. The checks return true when patching should be done, and
994 * false otherwise.
995 */
996static bool riscv_cpufeature_patch_check(u16 id, u16 value)
997{
998 if (!value)
999 return true;
1000
1001 switch (id) {
1002 case RISCV_ISA_EXT_ZICBOZ:
1003 /*
1004 * Zicboz alternative applications provide the maximum
1005 * supported block size order, or zero when it doesn't
1006 * matter. If the current block size exceeds the maximum,
1007 * then the alternative cannot be applied.
1008 */
1009 return riscv_cboz_block_size <= (1U << value);
1010 }
1011
1012 return false;
1013}
1014
1015void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
1016 struct alt_entry *end,
1017 unsigned int stage)
1018{
1019 struct alt_entry *alt;
1020 void *oldptr, *altptr;
1021 u16 id, value;
1022
1023 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
1024 return;
1025
1026 for (alt = begin; alt < end; alt++) {
1027 if (alt->vendor_id != 0)
1028 continue;
1029
1030 id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
1031
1032 if (id >= RISCV_ISA_EXT_MAX) {
1033 WARN(1, "This extension id:%d is not in ISA extension list", id);
1034 continue;
1035 }
1036
1037 if (!__riscv_isa_extension_available(NULL, id))
1038 continue;
1039
1040 value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
1041 if (!riscv_cpufeature_patch_check(id, value))
1042 continue;
1043
1044 oldptr = ALT_OLD_PTR(alt);
1045 altptr = ALT_ALT_PTR(alt);
1046
1047 mutex_lock(&text_mutex);
1048 patch_text_nosync(oldptr, altptr, alt->alt_len);
1049 riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
1050 mutex_unlock(&text_mutex);
1051 }
1052}
1053#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copied from arch/arm64/kernel/cpufeature.c
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Copyright (C) 2017 SiFive
7 */
8
9#include <linux/acpi.h>
10#include <linux/bitmap.h>
11#include <linux/cpu.h>
12#include <linux/cpuhotplug.h>
13#include <linux/ctype.h>
14#include <linux/log2.h>
15#include <linux/memory.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <asm/acpi.h>
19#include <asm/alternative.h>
20#include <asm/cacheflush.h>
21#include <asm/cpufeature.h>
22#include <asm/hwcap.h>
23#include <asm/patch.h>
24#include <asm/processor.h>
25#include <asm/sbi.h>
26#include <asm/vector.h>
27
28#define NUM_ALPHA_EXTS ('z' - 'a' + 1)
29
30unsigned long elf_hwcap __read_mostly;
31
32/* Host ISA bitmap */
33static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
34
35/* Per-cpu ISA extensions. */
36struct riscv_isainfo hart_isa[NR_CPUS];
37
38/**
39 * riscv_isa_extension_base() - Get base extension word
40 *
41 * @isa_bitmap: ISA bitmap to use
42 * Return: base extension word as unsigned long value
43 *
44 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
45 */
46unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
47{
48 if (!isa_bitmap)
49 return riscv_isa[0];
50 return isa_bitmap[0];
51}
52EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
53
54/**
55 * __riscv_isa_extension_available() - Check whether given extension
56 * is available or not
57 *
58 * @isa_bitmap: ISA bitmap to use
59 * @bit: bit position of the desired extension
60 * Return: true or false
61 *
62 * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
63 */
64bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
65{
66 const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
67
68 if (bit >= RISCV_ISA_EXT_MAX)
69 return false;
70
71 return test_bit(bit, bmap) ? true : false;
72}
73EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
74
75static bool riscv_isa_extension_check(int id)
76{
77 switch (id) {
78 case RISCV_ISA_EXT_ZICBOM:
79 if (!riscv_cbom_block_size) {
80 pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
81 return false;
82 } else if (!is_power_of_2(riscv_cbom_block_size)) {
83 pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
84 return false;
85 }
86 return true;
87 case RISCV_ISA_EXT_ZICBOZ:
88 if (!riscv_cboz_block_size) {
89 pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
90 return false;
91 } else if (!is_power_of_2(riscv_cboz_block_size)) {
92 pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
93 return false;
94 }
95 return true;
96 case RISCV_ISA_EXT_INVALID:
97 return false;
98 }
99
100 return true;
101}
102
103#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \
104 .name = #_name, \
105 .property = #_name, \
106 .id = _id, \
107 .subset_ext_ids = _subset_exts, \
108 .subset_ext_size = _subset_exts_size \
109}
110
111#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
112
113/* Used to declare pure "lasso" extension (Zk for instance) */
114#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
115 _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
116
117/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
118#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
119 _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
120
121static const unsigned int riscv_zk_bundled_exts[] = {
122 RISCV_ISA_EXT_ZBKB,
123 RISCV_ISA_EXT_ZBKC,
124 RISCV_ISA_EXT_ZBKX,
125 RISCV_ISA_EXT_ZKND,
126 RISCV_ISA_EXT_ZKNE,
127 RISCV_ISA_EXT_ZKR,
128 RISCV_ISA_EXT_ZKT,
129};
130
131static const unsigned int riscv_zkn_bundled_exts[] = {
132 RISCV_ISA_EXT_ZBKB,
133 RISCV_ISA_EXT_ZBKC,
134 RISCV_ISA_EXT_ZBKX,
135 RISCV_ISA_EXT_ZKND,
136 RISCV_ISA_EXT_ZKNE,
137 RISCV_ISA_EXT_ZKNH,
138};
139
140static const unsigned int riscv_zks_bundled_exts[] = {
141 RISCV_ISA_EXT_ZBKB,
142 RISCV_ISA_EXT_ZBKC,
143 RISCV_ISA_EXT_ZKSED,
144 RISCV_ISA_EXT_ZKSH
145};
146
147#define RISCV_ISA_EXT_ZVKN \
148 RISCV_ISA_EXT_ZVKNED, \
149 RISCV_ISA_EXT_ZVKNHB, \
150 RISCV_ISA_EXT_ZVKB, \
151 RISCV_ISA_EXT_ZVKT
152
153static const unsigned int riscv_zvkn_bundled_exts[] = {
154 RISCV_ISA_EXT_ZVKN
155};
156
157static const unsigned int riscv_zvknc_bundled_exts[] = {
158 RISCV_ISA_EXT_ZVKN,
159 RISCV_ISA_EXT_ZVBC
160};
161
162static const unsigned int riscv_zvkng_bundled_exts[] = {
163 RISCV_ISA_EXT_ZVKN,
164 RISCV_ISA_EXT_ZVKG
165};
166
167#define RISCV_ISA_EXT_ZVKS \
168 RISCV_ISA_EXT_ZVKSED, \
169 RISCV_ISA_EXT_ZVKSH, \
170 RISCV_ISA_EXT_ZVKB, \
171 RISCV_ISA_EXT_ZVKT
172
173static const unsigned int riscv_zvks_bundled_exts[] = {
174 RISCV_ISA_EXT_ZVKS
175};
176
177static const unsigned int riscv_zvksc_bundled_exts[] = {
178 RISCV_ISA_EXT_ZVKS,
179 RISCV_ISA_EXT_ZVBC
180};
181
182static const unsigned int riscv_zvksg_bundled_exts[] = {
183 RISCV_ISA_EXT_ZVKS,
184 RISCV_ISA_EXT_ZVKG
185};
186
187static const unsigned int riscv_zvbb_exts[] = {
188 RISCV_ISA_EXT_ZVKB
189};
190
191/*
192 * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V
193 * privileged ISA, the existence of the CSRs is implied by any extension which
194 * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the
195 * existence of the CSR, and treat it as a subset of those other extensions.
196 */
197static const unsigned int riscv_xlinuxenvcfg_exts[] = {
198 RISCV_ISA_EXT_XLINUXENVCFG
199};
200
201/*
202 * The canonical order of ISA extension names in the ISA string is defined in
203 * chapter 27 of the unprivileged specification.
204 *
205 * Ordinarily, for in-kernel data structures, this order is unimportant but
206 * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
207 *
208 * The specification uses vague wording, such as should, when it comes to
209 * ordering, so for our purposes the following rules apply:
210 *
211 * 1. All multi-letter extensions must be separated from other extensions by an
212 * underscore.
213 *
214 * 2. Additional standard extensions (starting with 'Z') must be sorted after
215 * single-letter extensions and before any higher-privileged extensions.
216 *
217 * 3. The first letter following the 'Z' conventionally indicates the most
218 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
219 * If multiple 'Z' extensions are named, they must be ordered first by
220 * category, then alphabetically within a category.
221 *
222 * 3. Standard supervisor-level extensions (starting with 'S') must be listed
223 * after standard unprivileged extensions. If multiple supervisor-level
224 * extensions are listed, they must be ordered alphabetically.
225 *
226 * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
227 * after any lower-privileged, standard extensions. If multiple
228 * machine-level extensions are listed, they must be ordered
229 * alphabetically.
230 *
231 * 5. Non-standard extensions (starting with 'X') must be listed after all
232 * standard extensions. If multiple non-standard extensions are listed, they
233 * must be ordered alphabetically.
234 *
235 * An example string following the order is:
236 * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
237 *
238 * New entries to this struct should follow the ordering rules described above.
239 */
240const struct riscv_isa_ext_data riscv_isa_ext[] = {
241 __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
242 __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
243 __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
244 __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
245 __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
246 __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
247 __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
248 __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
249 __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
250 __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts),
251 __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts),
252 __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
253 __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
254 __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
255 __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
256 __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
257 __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
258 __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
259 __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
260 __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
261 __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
262 __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
263 __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
264 __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
265 __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
266 __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
267 __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
268 __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
269 __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
270 __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
271 __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
272 __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
273 __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
274 __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
275 __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
276 __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
277 __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
278 __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
279 __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
280 __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
281 __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
282 __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
283 __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
284 __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
285 __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
286 __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
287 __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
288 __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
289 __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
290 __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
291 __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
292 __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
293 __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
294 __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
295 __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
296 __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
297 __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
298 __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
299 __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
300 __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
301 __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
302 __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
303 __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
304 __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
305 __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
306 __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
307 __RISCV_ISA_EXT_DATA(xandespmu, RISCV_ISA_EXT_XANDESPMU),
308};
309
310const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
311
312static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
313 const char *name_end, struct riscv_isainfo *isainfo)
314{
315 if ((name_end - name == strlen(ext->name)) &&
316 !strncasecmp(name, ext->name, name_end - name)) {
317 /*
318 * If this is a bundle, enable all the ISA extensions that
319 * comprise the bundle.
320 */
321 if (ext->subset_ext_size) {
322 for (int i = 0; i < ext->subset_ext_size; i++) {
323 if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
324 set_bit(ext->subset_ext_ids[i], isainfo->isa);
325 }
326 }
327
328 /*
329 * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
330 * (rejected by riscv_isa_extension_check()).
331 */
332 if (riscv_isa_extension_check(ext->id))
333 set_bit(ext->id, isainfo->isa);
334 }
335}
336
337static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
338 unsigned long *isa2hwcap, const char *isa)
339{
340 /*
341 * For all possible cpus, we have already validated in
342 * the boot process that they at least contain "rv" and
343 * whichever of "32"/"64" this kernel supports, and so this
344 * section can be skipped.
345 */
346 isa += 4;
347
348 while (*isa) {
349 const char *ext = isa++;
350 const char *ext_end = isa;
351 bool ext_long = false, ext_err = false;
352
353 switch (*ext) {
354 case 's':
355 /*
356 * Workaround for invalid single-letter 's' & 'u' (QEMU).
357 * No need to set the bit in riscv_isa as 's' & 'u' are
358 * not valid ISA extensions. It works unless the first
359 * multi-letter extension in the ISA string begins with
360 * "Su" and is not prefixed with an underscore.
361 */
362 if (ext[-1] != '_' && ext[1] == 'u') {
363 ++isa;
364 ext_err = true;
365 break;
366 }
367 fallthrough;
368 case 'S':
369 case 'x':
370 case 'X':
371 case 'z':
372 case 'Z':
373 /*
374 * Before attempting to parse the extension itself, we find its end.
375 * As multi-letter extensions must be split from other multi-letter
376 * extensions with an "_", the end of a multi-letter extension will
377 * either be the null character or the "_" at the start of the next
378 * multi-letter extension.
379 *
380 * Next, as the extensions version is currently ignored, we
381 * eliminate that portion. This is done by parsing backwards from
382 * the end of the extension, removing any numbers. This may be a
383 * major or minor number however, so the process is repeated if a
384 * minor number was found.
385 *
386 * ext_end is intended to represent the first character *after* the
387 * name portion of an extension, but will be decremented to the last
388 * character itself while eliminating the extensions version number.
389 * A simple re-increment solves this problem.
390 */
391 ext_long = true;
392 for (; *isa && *isa != '_'; ++isa)
393 if (unlikely(!isalnum(*isa)))
394 ext_err = true;
395
396 ext_end = isa;
397 if (unlikely(ext_err))
398 break;
399
400 if (!isdigit(ext_end[-1]))
401 break;
402
403 while (isdigit(*--ext_end))
404 ;
405
406 if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
407 ++ext_end;
408 break;
409 }
410
411 while (isdigit(*--ext_end))
412 ;
413
414 ++ext_end;
415 break;
416 default:
417 /*
418 * Things are a little easier for single-letter extensions, as they
419 * are parsed forwards.
420 *
421 * After checking that our starting position is valid, we need to
422 * ensure that, when isa was incremented at the start of the loop,
423 * that it arrived at the start of the next extension.
424 *
425 * If we are already on a non-digit, there is nothing to do. Either
426 * we have a multi-letter extension's _, or the start of an
427 * extension.
428 *
429 * Otherwise we have found the current extension's major version
430 * number. Parse past it, and a subsequent p/minor version number
431 * if present. The `p` extension must not appear immediately after
432 * a number, so there is no fear of missing it.
433 *
434 */
435 if (unlikely(!isalpha(*ext))) {
436 ext_err = true;
437 break;
438 }
439
440 if (!isdigit(*isa))
441 break;
442
443 while (isdigit(*++isa))
444 ;
445
446 if (tolower(*isa) != 'p')
447 break;
448
449 if (!isdigit(*++isa)) {
450 --isa;
451 break;
452 }
453
454 while (isdigit(*++isa))
455 ;
456
457 break;
458 }
459
460 /*
461 * The parser expects that at the start of an iteration isa points to the
462 * first character of the next extension. As we stop parsing an extension
463 * on meeting a non-alphanumeric character, an extra increment is needed
464 * where the succeeding extension is a multi-letter prefixed with an "_".
465 */
466 if (*isa == '_')
467 ++isa;
468
469 if (unlikely(ext_err))
470 continue;
471 if (!ext_long) {
472 int nr = tolower(*ext) - 'a';
473
474 if (riscv_isa_extension_check(nr)) {
475 *this_hwcap |= isa2hwcap[nr];
476 set_bit(nr, isainfo->isa);
477 }
478 } else {
479 for (int i = 0; i < riscv_isa_ext_count; i++)
480 match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
481 }
482 }
483}
484
485static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
486{
487 struct device_node *node;
488 const char *isa;
489 int rc;
490 struct acpi_table_header *rhct;
491 acpi_status status;
492 unsigned int cpu;
493 u64 boot_vendorid;
494 u64 boot_archid;
495
496 if (!acpi_disabled) {
497 status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
498 if (ACPI_FAILURE(status))
499 return;
500 }
501
502 boot_vendorid = riscv_get_mvendorid();
503 boot_archid = riscv_get_marchid();
504
505 for_each_possible_cpu(cpu) {
506 struct riscv_isainfo *isainfo = &hart_isa[cpu];
507 unsigned long this_hwcap = 0;
508
509 if (acpi_disabled) {
510 node = of_cpu_device_node_get(cpu);
511 if (!node) {
512 pr_warn("Unable to find cpu node\n");
513 continue;
514 }
515
516 rc = of_property_read_string(node, "riscv,isa", &isa);
517 of_node_put(node);
518 if (rc) {
519 pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
520 continue;
521 }
522 } else {
523 rc = acpi_get_riscv_isa(rhct, cpu, &isa);
524 if (rc < 0) {
525 pr_warn("Unable to get ISA for the hart - %d\n", cpu);
526 continue;
527 }
528 }
529
530 riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
531
532 /*
533 * These ones were as they were part of the base ISA when the
534 * port & dt-bindings were upstreamed, and so can be set
535 * unconditionally where `i` is in riscv,isa on DT systems.
536 */
537 if (acpi_disabled) {
538 set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
539 set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
540 set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
541 set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
542 }
543
544 /*
545 * "V" in ISA strings is ambiguous in practice: it should mean
546 * just the standard V-1.0 but vendors aren't well behaved.
547 * Many vendors with T-Head CPU cores which implement the 0.7.1
548 * version of the vector specification put "v" into their DTs.
549 * CPU cores with the ratified spec will contain non-zero
550 * marchid.
551 */
552 if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
553 this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
554 clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
555 }
556
557 /*
558 * All "okay" hart should have same isa. Set HWCAP based on
559 * common capabilities of every "okay" hart, in case they don't
560 * have.
561 */
562 if (elf_hwcap)
563 elf_hwcap &= this_hwcap;
564 else
565 elf_hwcap = this_hwcap;
566
567 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
568 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
569 else
570 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
571 }
572
573 if (!acpi_disabled && rhct)
574 acpi_put_table((struct acpi_table_header *)rhct);
575}
576
577static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
578{
579 unsigned int cpu;
580
581 for_each_possible_cpu(cpu) {
582 unsigned long this_hwcap = 0;
583 struct device_node *cpu_node;
584 struct riscv_isainfo *isainfo = &hart_isa[cpu];
585
586 cpu_node = of_cpu_device_node_get(cpu);
587 if (!cpu_node) {
588 pr_warn("Unable to find cpu node\n");
589 continue;
590 }
591
592 if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
593 of_node_put(cpu_node);
594 continue;
595 }
596
597 for (int i = 0; i < riscv_isa_ext_count; i++) {
598 const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
599
600 if (of_property_match_string(cpu_node, "riscv,isa-extensions",
601 ext->property) < 0)
602 continue;
603
604 if (ext->subset_ext_size) {
605 for (int j = 0; j < ext->subset_ext_size; j++) {
606 if (riscv_isa_extension_check(ext->subset_ext_ids[j]))
607 set_bit(ext->subset_ext_ids[j], isainfo->isa);
608 }
609 }
610
611 if (riscv_isa_extension_check(ext->id)) {
612 set_bit(ext->id, isainfo->isa);
613
614 /* Only single letter extensions get set in hwcap */
615 if (strnlen(riscv_isa_ext[i].name, 2) == 1)
616 this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
617 }
618 }
619
620 of_node_put(cpu_node);
621
622 /*
623 * All "okay" harts should have same isa. Set HWCAP based on
624 * common capabilities of every "okay" hart, in case they don't.
625 */
626 if (elf_hwcap)
627 elf_hwcap &= this_hwcap;
628 else
629 elf_hwcap = this_hwcap;
630
631 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
632 bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
633 else
634 bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
635 }
636
637 if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
638 return -ENOENT;
639
640 return 0;
641}
642
643#ifdef CONFIG_RISCV_ISA_FALLBACK
644bool __initdata riscv_isa_fallback = true;
645#else
646bool __initdata riscv_isa_fallback;
647static int __init riscv_isa_fallback_setup(char *__unused)
648{
649 riscv_isa_fallback = true;
650 return 1;
651}
652early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
653#endif
654
655void __init riscv_fill_hwcap(void)
656{
657 char print_str[NUM_ALPHA_EXTS + 1];
658 unsigned long isa2hwcap[26] = {0};
659 int i, j;
660
661 isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
662 isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
663 isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
664 isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
665 isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
666 isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
667 isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
668
669 if (!acpi_disabled) {
670 riscv_fill_hwcap_from_isa_string(isa2hwcap);
671 } else {
672 int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
673
674 if (ret && riscv_isa_fallback) {
675 pr_info("Falling back to deprecated \"riscv,isa\"\n");
676 riscv_fill_hwcap_from_isa_string(isa2hwcap);
677 }
678 }
679
680 /*
681 * We don't support systems with F but without D, so mask those out
682 * here.
683 */
684 if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
685 pr_info("This kernel does not support systems with F but not D\n");
686 elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
687 }
688
689 if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
690 riscv_v_setup_vsize();
691 /*
692 * ISA string in device tree might have 'v' flag, but
693 * CONFIG_RISCV_ISA_V is disabled in kernel.
694 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
695 */
696 if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
697 elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
698 }
699
700 memset(print_str, 0, sizeof(print_str));
701 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
702 if (riscv_isa[0] & BIT_MASK(i))
703 print_str[j++] = (char)('a' + i);
704 pr_info("riscv: base ISA extensions %s\n", print_str);
705
706 memset(print_str, 0, sizeof(print_str));
707 for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
708 if (elf_hwcap & BIT_MASK(i))
709 print_str[j++] = (char)('a' + i);
710 pr_info("riscv: ELF capabilities %s\n", print_str);
711}
712
713unsigned long riscv_get_elf_hwcap(void)
714{
715 unsigned long hwcap;
716
717 hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
718
719 if (!riscv_v_vstate_ctrl_user_allowed())
720 hwcap &= ~COMPAT_HWCAP_ISA_V;
721
722 return hwcap;
723}
724
725void riscv_user_isa_enable(void)
726{
727 if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
728 csr_set(CSR_ENVCFG, ENVCFG_CBZE);
729}
730
731#ifdef CONFIG_RISCV_ALTERNATIVE
732/*
733 * Alternative patch sites consider 48 bits when determining when to patch
734 * the old instruction sequence with the new. These bits are broken into a
735 * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
736 * patch site is for an erratum, identified by the 32-bit patch ID. When
737 * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
738 * further break down patch ID into two 16-bit numbers. The lower 16 bits
739 * are the cpufeature ID and the upper 16 bits are used for a value specific
740 * to the cpufeature and patch site. If the upper 16 bits are zero, then it
741 * implies no specific value is specified. cpufeatures that want to control
742 * patching on a per-site basis will provide non-zero values and implement
743 * checks here. The checks return true when patching should be done, and
744 * false otherwise.
745 */
746static bool riscv_cpufeature_patch_check(u16 id, u16 value)
747{
748 if (!value)
749 return true;
750
751 switch (id) {
752 case RISCV_ISA_EXT_ZICBOZ:
753 /*
754 * Zicboz alternative applications provide the maximum
755 * supported block size order, or zero when it doesn't
756 * matter. If the current block size exceeds the maximum,
757 * then the alternative cannot be applied.
758 */
759 return riscv_cboz_block_size <= (1U << value);
760 }
761
762 return false;
763}
764
765void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
766 struct alt_entry *end,
767 unsigned int stage)
768{
769 struct alt_entry *alt;
770 void *oldptr, *altptr;
771 u16 id, value;
772
773 if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
774 return;
775
776 for (alt = begin; alt < end; alt++) {
777 if (alt->vendor_id != 0)
778 continue;
779
780 id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
781
782 if (id >= RISCV_ISA_EXT_MAX) {
783 WARN(1, "This extension id:%d is not in ISA extension list", id);
784 continue;
785 }
786
787 if (!__riscv_isa_extension_available(NULL, id))
788 continue;
789
790 value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
791 if (!riscv_cpufeature_patch_check(id, value))
792 continue;
793
794 oldptr = ALT_OLD_PTR(alt);
795 altptr = ALT_ALT_PTR(alt);
796
797 mutex_lock(&text_mutex);
798 patch_text_nosync(oldptr, altptr, alt->alt_len);
799 riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
800 mutex_unlock(&text_mutex);
801 }
802}
803#endif