Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Extract CPU cache information and expose them via sysfs.
4 *
5 * Copyright IBM Corp. 2012
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/seq_file.h>
10#include <linux/cpu.h>
11#include <linux/cacheinfo.h>
12#include <asm/facility.h>
13
14enum {
15 CACHE_SCOPE_NOTEXISTS,
16 CACHE_SCOPE_PRIVATE,
17 CACHE_SCOPE_SHARED,
18 CACHE_SCOPE_RESERVED,
19};
20
21enum {
22 CTYPE_SEPARATE,
23 CTYPE_DATA,
24 CTYPE_INSTRUCTION,
25 CTYPE_UNIFIED,
26};
27
28enum {
29 EXTRACT_TOPOLOGY,
30 EXTRACT_LINE_SIZE,
31 EXTRACT_SIZE,
32 EXTRACT_ASSOCIATIVITY,
33};
34
35enum {
36 CACHE_TI_UNIFIED = 0,
37 CACHE_TI_DATA = 0,
38 CACHE_TI_INSTRUCTION,
39};
40
41struct cache_info {
42 unsigned char : 4;
43 unsigned char scope : 2;
44 unsigned char type : 2;
45};
46
47#define CACHE_MAX_LEVEL 8
48union cache_topology {
49 struct cache_info ci[CACHE_MAX_LEVEL];
50 unsigned long long raw;
51};
52
53static const char * const cache_type_string[] = {
54 "",
55 "Instruction",
56 "Data",
57 "",
58 "Unified",
59};
60
61static const enum cache_type cache_type_map[] = {
62 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
63 [CTYPE_DATA] = CACHE_TYPE_DATA,
64 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
65 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
66};
67
68void show_cacheinfo(struct seq_file *m)
69{
70 struct cpu_cacheinfo *this_cpu_ci;
71 struct cacheinfo *cache;
72 int idx;
73
74 if (!test_facility(34))
75 return;
76 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
77 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
78 cache = this_cpu_ci->info_list + idx;
79 seq_printf(m, "cache%-11d: ", idx);
80 seq_printf(m, "level=%d ", cache->level);
81 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
82 seq_printf(m, "scope=%s ",
83 cache->disable_sysfs ? "Shared" : "Private");
84 seq_printf(m, "size=%dK ", cache->size >> 10);
85 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
86 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
87 seq_puts(m, "\n");
88 }
89}
90
91static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
92{
93 if (level >= CACHE_MAX_LEVEL)
94 return CACHE_TYPE_NOCACHE;
95 ci += level;
96 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
97 return CACHE_TYPE_NOCACHE;
98 return cache_type_map[ci->type];
99}
100
101static inline unsigned long ecag(int ai, int li, int ti)
102{
103 return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
104}
105
106static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
107 enum cache_type type, unsigned int level, int cpu)
108{
109 int ti, num_sets;
110
111 if (type == CACHE_TYPE_INST)
112 ti = CACHE_TI_INSTRUCTION;
113 else
114 ti = CACHE_TI_UNIFIED;
115 this_leaf->level = level + 1;
116 this_leaf->type = type;
117 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
118 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
119 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
120 num_sets = this_leaf->size / this_leaf->coherency_line_size;
121 num_sets /= this_leaf->ways_of_associativity;
122 this_leaf->number_of_sets = num_sets;
123 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
124 if (!private)
125 this_leaf->disable_sysfs = true;
126}
127
128int init_cache_level(unsigned int cpu)
129{
130 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
131 unsigned int level = 0, leaves = 0;
132 union cache_topology ct;
133 enum cache_type ctype;
134
135 if (!test_facility(34))
136 return -EOPNOTSUPP;
137 if (!this_cpu_ci)
138 return -EINVAL;
139 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
140 do {
141 ctype = get_cache_type(&ct.ci[0], level);
142 if (ctype == CACHE_TYPE_NOCACHE)
143 break;
144 /* Separate instruction and data caches */
145 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
146 } while (++level < CACHE_MAX_LEVEL);
147 this_cpu_ci->num_levels = level;
148 this_cpu_ci->num_leaves = leaves;
149 return 0;
150}
151
152int populate_cache_leaves(unsigned int cpu)
153{
154 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
155 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
156 unsigned int level, idx, pvt;
157 union cache_topology ct;
158 enum cache_type ctype;
159
160 if (!test_facility(34))
161 return -EOPNOTSUPP;
162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
164 idx < this_cpu_ci->num_leaves; idx++, level++) {
165 if (!this_leaf)
166 return -EINVAL;
167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
168 ctype = get_cache_type(&ct.ci[0], level);
169 if (ctype == CACHE_TYPE_SEPARATE) {
170 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
171 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
172 } else {
173 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
174 }
175 }
176 return 0;
177}
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/notifier.h>
9#include <linux/seq_file.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/cpu.h>
14#include <asm/facility.h>
15
16struct cache {
17 unsigned long size;
18 unsigned int line_size;
19 unsigned int associativity;
20 unsigned int nr_sets;
21 unsigned int level : 3;
22 unsigned int type : 2;
23 unsigned int private : 1;
24 struct list_head list;
25};
26
27struct cache_dir {
28 struct kobject *kobj;
29 struct cache_index_dir *index;
30};
31
32struct cache_index_dir {
33 struct kobject kobj;
34 int cpu;
35 struct cache *cache;
36 struct cache_index_dir *next;
37};
38
39enum {
40 CACHE_SCOPE_NOTEXISTS,
41 CACHE_SCOPE_PRIVATE,
42 CACHE_SCOPE_SHARED,
43 CACHE_SCOPE_RESERVED,
44};
45
46enum {
47 CACHE_TYPE_SEPARATE,
48 CACHE_TYPE_DATA,
49 CACHE_TYPE_INSTRUCTION,
50 CACHE_TYPE_UNIFIED,
51};
52
53enum {
54 EXTRACT_TOPOLOGY,
55 EXTRACT_LINE_SIZE,
56 EXTRACT_SIZE,
57 EXTRACT_ASSOCIATIVITY,
58};
59
60enum {
61 CACHE_TI_UNIFIED = 0,
62 CACHE_TI_DATA = 0,
63 CACHE_TI_INSTRUCTION,
64};
65
66struct cache_info {
67 unsigned char : 4;
68 unsigned char scope : 2;
69 unsigned char type : 2;
70};
71
72#define CACHE_MAX_LEVEL 8
73
74union cache_topology {
75 struct cache_info ci[CACHE_MAX_LEVEL];
76 unsigned long long raw;
77};
78
79static const char * const cache_type_string[] = {
80 "Data",
81 "Instruction",
82 "Unified",
83};
84
85static struct cache_dir *cache_dir_cpu[NR_CPUS];
86static LIST_HEAD(cache_list);
87
88void show_cacheinfo(struct seq_file *m)
89{
90 struct cache *cache;
91 int index = 0;
92
93 list_for_each_entry(cache, &cache_list, list) {
94 seq_printf(m, "cache%-11d: ", index);
95 seq_printf(m, "level=%d ", cache->level);
96 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
97 seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
98 seq_printf(m, "size=%luK ", cache->size >> 10);
99 seq_printf(m, "line_size=%u ", cache->line_size);
100 seq_printf(m, "associativity=%d", cache->associativity);
101 seq_puts(m, "\n");
102 index++;
103 }
104}
105
106static inline unsigned long ecag(int ai, int li, int ti)
107{
108 unsigned long cmd, val;
109
110 cmd = ai << 4 | li << 1 | ti;
111 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112 : "=d" (val) : "a" (cmd));
113 return val;
114}
115
116static int __init cache_add(int level, int private, int type)
117{
118 struct cache *cache;
119 int ti;
120
121 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122 if (!cache)
123 return -ENOMEM;
124 if (type == CACHE_TYPE_INSTRUCTION)
125 ti = CACHE_TI_INSTRUCTION;
126 else
127 ti = CACHE_TI_UNIFIED;
128 cache->size = ecag(EXTRACT_SIZE, level, ti);
129 cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130 cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131 cache->nr_sets = cache->size / cache->associativity;
132 cache->nr_sets /= cache->line_size;
133 cache->private = private;
134 cache->level = level + 1;
135 cache->type = type - 1;
136 list_add_tail(&cache->list, &cache_list);
137 return 0;
138}
139
140static void __init cache_build_info(void)
141{
142 struct cache *cache, *next;
143 union cache_topology ct;
144 int level, private, rc;
145
146 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147 for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148 switch (ct.ci[level].scope) {
149 case CACHE_SCOPE_SHARED:
150 private = 0;
151 break;
152 case CACHE_SCOPE_PRIVATE:
153 private = 1;
154 break;
155 default:
156 return;
157 }
158 if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
159 rc = cache_add(level, private, CACHE_TYPE_DATA);
160 rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
161 } else {
162 rc = cache_add(level, private, ct.ci[level].type);
163 }
164 if (rc)
165 goto error;
166 }
167 return;
168error:
169 list_for_each_entry_safe(cache, next, &cache_list, list) {
170 list_del(&cache->list);
171 kfree(cache);
172 }
173}
174
175static struct cache_dir *cache_create_cache_dir(int cpu)
176{
177 struct cache_dir *cache_dir;
178 struct kobject *kobj = NULL;
179 struct device *dev;
180
181 dev = get_cpu_device(cpu);
182 if (!dev)
183 goto out;
184 kobj = kobject_create_and_add("cache", &dev->kobj);
185 if (!kobj)
186 goto out;
187 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
188 if (!cache_dir)
189 goto out;
190 cache_dir->kobj = kobj;
191 cache_dir_cpu[cpu] = cache_dir;
192 return cache_dir;
193out:
194 kobject_put(kobj);
195 return NULL;
196}
197
198static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
199{
200 return container_of(kobj, struct cache_index_dir, kobj);
201}
202
203static void cache_index_release(struct kobject *kobj)
204{
205 struct cache_index_dir *index;
206
207 index = kobj_to_cache_index_dir(kobj);
208 kfree(index);
209}
210
211static ssize_t cache_index_show(struct kobject *kobj,
212 struct attribute *attr, char *buf)
213{
214 struct kobj_attribute *kobj_attr;
215
216 kobj_attr = container_of(attr, struct kobj_attribute, attr);
217 return kobj_attr->show(kobj, kobj_attr, buf);
218}
219
220#define DEFINE_CACHE_ATTR(_name, _format, _value) \
221static ssize_t cache_##_name##_show(struct kobject *kobj, \
222 struct kobj_attribute *attr, \
223 char *buf) \
224{ \
225 struct cache_index_dir *index; \
226 \
227 index = kobj_to_cache_index_dir(kobj); \
228 return sprintf(buf, _format, _value); \
229} \
230static struct kobj_attribute cache_##_name##_attr = \
231 __ATTR(_name, 0444, cache_##_name##_show, NULL);
232
233DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
234DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
235DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
236DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
237DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
238DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
239
240static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
241{
242 struct cache_index_dir *index;
243 int len;
244
245 index = kobj_to_cache_index_dir(kobj);
246 len = type ?
247 cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
248 cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
249 len += sprintf(&buf[len], "\n");
250 return len;
251}
252
253static ssize_t shared_cpu_map_show(struct kobject *kobj,
254 struct kobj_attribute *attr, char *buf)
255{
256 return shared_cpu_map_func(kobj, 0, buf);
257}
258static struct kobj_attribute cache_shared_cpu_map_attr =
259 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
260
261static ssize_t shared_cpu_list_show(struct kobject *kobj,
262 struct kobj_attribute *attr, char *buf)
263{
264 return shared_cpu_map_func(kobj, 1, buf);
265}
266static struct kobj_attribute cache_shared_cpu_list_attr =
267 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
268
269static struct attribute *cache_index_default_attrs[] = {
270 &cache_type_attr.attr,
271 &cache_size_attr.attr,
272 &cache_number_of_sets_attr.attr,
273 &cache_ways_of_associativity_attr.attr,
274 &cache_level_attr.attr,
275 &cache_coherency_line_size_attr.attr,
276 &cache_shared_cpu_map_attr.attr,
277 &cache_shared_cpu_list_attr.attr,
278 NULL,
279};
280
281static const struct sysfs_ops cache_index_ops = {
282 .show = cache_index_show,
283};
284
285static struct kobj_type cache_index_type = {
286 .sysfs_ops = &cache_index_ops,
287 .release = cache_index_release,
288 .default_attrs = cache_index_default_attrs,
289};
290
291static int cache_create_index_dir(struct cache_dir *cache_dir,
292 struct cache *cache, int index, int cpu)
293{
294 struct cache_index_dir *index_dir;
295 int rc;
296
297 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
298 if (!index_dir)
299 return -ENOMEM;
300 index_dir->cache = cache;
301 index_dir->cpu = cpu;
302 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
303 cache_dir->kobj, "index%d", index);
304 if (rc)
305 goto out;
306 index_dir->next = cache_dir->index;
307 cache_dir->index = index_dir;
308 return 0;
309out:
310 kfree(index_dir);
311 return rc;
312}
313
314static int cache_add_cpu(int cpu)
315{
316 struct cache_dir *cache_dir;
317 struct cache *cache;
318 int rc, index = 0;
319
320 if (list_empty(&cache_list))
321 return 0;
322 cache_dir = cache_create_cache_dir(cpu);
323 if (!cache_dir)
324 return -ENOMEM;
325 list_for_each_entry(cache, &cache_list, list) {
326 if (!cache->private)
327 break;
328 rc = cache_create_index_dir(cache_dir, cache, index, cpu);
329 if (rc)
330 return rc;
331 index++;
332 }
333 return 0;
334}
335
336static void cache_remove_cpu(int cpu)
337{
338 struct cache_index_dir *index, *next;
339 struct cache_dir *cache_dir;
340
341 cache_dir = cache_dir_cpu[cpu];
342 if (!cache_dir)
343 return;
344 index = cache_dir->index;
345 while (index) {
346 next = index->next;
347 kobject_put(&index->kobj);
348 index = next;
349 }
350 kobject_put(cache_dir->kobj);
351 kfree(cache_dir);
352 cache_dir_cpu[cpu] = NULL;
353}
354
355static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
356 void *hcpu)
357{
358 int cpu = (long)hcpu;
359 int rc = 0;
360
361 switch (action & ~CPU_TASKS_FROZEN) {
362 case CPU_ONLINE:
363 rc = cache_add_cpu(cpu);
364 if (rc)
365 cache_remove_cpu(cpu);
366 break;
367 case CPU_DEAD:
368 cache_remove_cpu(cpu);
369 break;
370 }
371 return rc ? NOTIFY_BAD : NOTIFY_OK;
372}
373
374static int __init cache_init(void)
375{
376 int cpu;
377
378 if (!test_facility(34))
379 return 0;
380 cache_build_info();
381
382 cpu_notifier_register_begin();
383 for_each_online_cpu(cpu)
384 cache_add_cpu(cpu);
385 __hotcpu_notifier(cache_hotplug, 0);
386 cpu_notifier_register_done();
387 return 0;
388}
389device_initcall(cache_init);