Loading...
1/*
2 * Copyright (C) 2017 SiFive
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/cacheinfo.h>
15#include <linux/cpu.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18
19static void ci_leaf_init(struct cacheinfo *this_leaf,
20 struct device_node *node,
21 enum cache_type type, unsigned int level)
22{
23 this_leaf->of_node = node;
24 this_leaf->level = level;
25 this_leaf->type = type;
26 /* not a sector cache */
27 this_leaf->physical_line_partition = 1;
28 /* TODO: Add to DTS */
29 this_leaf->attributes =
30 CACHE_WRITE_BACK
31 | CACHE_READ_ALLOCATE
32 | CACHE_WRITE_ALLOCATE;
33}
34
35static int __init_cache_level(unsigned int cpu)
36{
37 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
38 struct device_node *np = of_cpu_device_node_get(cpu);
39 int levels = 0, leaves = 0, level;
40
41 if (of_property_read_bool(np, "cache-size"))
42 ++leaves;
43 if (of_property_read_bool(np, "i-cache-size"))
44 ++leaves;
45 if (of_property_read_bool(np, "d-cache-size"))
46 ++leaves;
47 if (leaves > 0)
48 levels = 1;
49
50 while ((np = of_find_next_cache_node(np))) {
51 if (!of_device_is_compatible(np, "cache"))
52 break;
53 if (of_property_read_u32(np, "cache-level", &level))
54 break;
55 if (level <= levels)
56 break;
57 if (of_property_read_bool(np, "cache-size"))
58 ++leaves;
59 if (of_property_read_bool(np, "i-cache-size"))
60 ++leaves;
61 if (of_property_read_bool(np, "d-cache-size"))
62 ++leaves;
63 levels = level;
64 }
65
66 this_cpu_ci->num_levels = levels;
67 this_cpu_ci->num_leaves = leaves;
68 return 0;
69}
70
71static int __populate_cache_leaves(unsigned int cpu)
72{
73 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
74 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
75 struct device_node *np = of_cpu_device_node_get(cpu);
76 int levels = 1, level = 1;
77
78 if (of_property_read_bool(np, "cache-size"))
79 ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
80 if (of_property_read_bool(np, "i-cache-size"))
81 ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
82 if (of_property_read_bool(np, "d-cache-size"))
83 ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
84
85 while ((np = of_find_next_cache_node(np))) {
86 if (!of_device_is_compatible(np, "cache"))
87 break;
88 if (of_property_read_u32(np, "cache-level", &level))
89 break;
90 if (level <= levels)
91 break;
92 if (of_property_read_bool(np, "cache-size"))
93 ci_leaf_init(this_leaf++, np, CACHE_TYPE_UNIFIED, level);
94 if (of_property_read_bool(np, "i-cache-size"))
95 ci_leaf_init(this_leaf++, np, CACHE_TYPE_INST, level);
96 if (of_property_read_bool(np, "d-cache-size"))
97 ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
98 levels = level;
99 }
100
101 return 0;
102}
103
104DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
105DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017 SiFive
4 */
5
6#include <linux/acpi.h>
7#include <linux/cpu.h>
8#include <linux/of.h>
9#include <asm/cacheinfo.h>
10
11static struct riscv_cacheinfo_ops *rv_cache_ops;
12
13void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
14{
15 rv_cache_ops = ops;
16}
17EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
18
19const struct attribute_group *
20cache_get_priv_group(struct cacheinfo *this_leaf)
21{
22 if (rv_cache_ops && rv_cache_ops->get_priv_group)
23 return rv_cache_ops->get_priv_group(this_leaf);
24 return NULL;
25}
26
27static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
28{
29 /*
30 * Using raw_smp_processor_id() elides a preemptability check, but this
31 * is really indicative of a larger problem: the cacheinfo UABI assumes
32 * that cores have a homonogenous view of the cache hierarchy. That
33 * happens to be the case for the current set of RISC-V systems, but
34 * likely won't be true in general. Since there's no way to provide
35 * correct information for these systems via the current UABI we're
36 * just eliding the check for now.
37 */
38 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
39 struct cacheinfo *this_leaf;
40 int index;
41
42 for (index = 0; index < this_cpu_ci->num_leaves; index++) {
43 this_leaf = this_cpu_ci->info_list + index;
44 if (this_leaf->level == level && this_leaf->type == type)
45 return this_leaf;
46 }
47
48 return NULL;
49}
50
51uintptr_t get_cache_size(u32 level, enum cache_type type)
52{
53 struct cacheinfo *this_leaf = get_cacheinfo(level, type);
54
55 return this_leaf ? this_leaf->size : 0;
56}
57
58uintptr_t get_cache_geometry(u32 level, enum cache_type type)
59{
60 struct cacheinfo *this_leaf = get_cacheinfo(level, type);
61
62 return this_leaf ? (this_leaf->ways_of_associativity << 16 |
63 this_leaf->coherency_line_size) :
64 0;
65}
66
67static void ci_leaf_init(struct cacheinfo *this_leaf,
68 enum cache_type type, unsigned int level)
69{
70 this_leaf->level = level;
71 this_leaf->type = type;
72}
73
74int init_cache_level(unsigned int cpu)
75{
76 return init_of_cache_level(cpu);
77}
78
79int populate_cache_leaves(unsigned int cpu)
80{
81 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
82 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
83 struct device_node *np, *prev;
84 int levels = 1, level = 1;
85
86 if (!acpi_disabled) {
87 int ret, fw_levels, split_levels;
88
89 ret = acpi_get_cache_info(cpu, &fw_levels, &split_levels);
90 if (ret)
91 return ret;
92
93 BUG_ON((split_levels > fw_levels) ||
94 (split_levels + fw_levels > this_cpu_ci->num_leaves));
95
96 for (; level <= this_cpu_ci->num_levels; level++) {
97 if (level <= split_levels) {
98 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
99 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
100 } else {
101 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
102 }
103 }
104 return 0;
105 }
106
107 np = of_cpu_device_node_get(cpu);
108 if (!np)
109 return -ENOENT;
110
111 if (of_property_present(np, "cache-size"))
112 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
113 if (of_property_present(np, "i-cache-size"))
114 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
115 if (of_property_present(np, "d-cache-size"))
116 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
117
118 prev = np;
119 while ((np = of_find_next_cache_node(np))) {
120 of_node_put(prev);
121 prev = np;
122 if (!of_device_is_compatible(np, "cache"))
123 break;
124 if (of_property_read_u32(np, "cache-level", &level))
125 break;
126 if (level <= levels)
127 break;
128 if (of_property_present(np, "cache-size"))
129 ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level);
130 if (of_property_present(np, "i-cache-size"))
131 ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
132 if (of_property_present(np, "d-cache-size"))
133 ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
134 levels = level;
135 }
136 of_node_put(np);
137
138 return 0;
139}