Loading...
1/*
2 * cacheinfo support - processor cache information via sysfs
3 *
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#include <linux/bitops.h>
20#include <linux/cacheinfo.h>
21#include <linux/compiler.h>
22#include <linux/cpu.h>
23#include <linux/device.h>
24#include <linux/init.h>
25#include <linux/of.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/smp.h>
29#include <linux/sysfs.h>
30
31/* pointer to per cpu cacheinfo */
32static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
33#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
34#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
35#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
36
37struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
38{
39 return ci_cacheinfo(cpu);
40}
41
42#ifdef CONFIG_OF
43static int cache_setup_of_node(unsigned int cpu)
44{
45 struct device_node *np;
46 struct cacheinfo *this_leaf;
47 struct device *cpu_dev = get_cpu_device(cpu);
48 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
49 unsigned int index = 0;
50
51 /* skip if of_node is already populated */
52 if (this_cpu_ci->info_list->of_node)
53 return 0;
54
55 if (!cpu_dev) {
56 pr_err("No cpu device for CPU %d\n", cpu);
57 return -ENODEV;
58 }
59 np = cpu_dev->of_node;
60 if (!np) {
61 pr_err("Failed to find cpu%d device node\n", cpu);
62 return -ENOENT;
63 }
64
65 while (index < cache_leaves(cpu)) {
66 this_leaf = this_cpu_ci->info_list + index;
67 if (this_leaf->level != 1)
68 np = of_find_next_cache_node(np);
69 else
70 np = of_node_get(np);/* cpu node itself */
71 if (!np)
72 break;
73 this_leaf->of_node = np;
74 index++;
75 }
76
77 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
78 return -ENOENT;
79
80 return 0;
81}
82
83static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
84 struct cacheinfo *sib_leaf)
85{
86 return sib_leaf->of_node == this_leaf->of_node;
87}
88#else
89static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
90static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
91 struct cacheinfo *sib_leaf)
92{
93 /*
94 * For non-DT systems, assume unique level 1 cache, system-wide
95 * shared caches for all other levels. This will be used only if
96 * arch specific code has not populated shared_cpu_map
97 */
98 return !(this_leaf->level == 1);
99}
100#endif
101
102static int cache_shared_cpu_map_setup(unsigned int cpu)
103{
104 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
105 struct cacheinfo *this_leaf, *sib_leaf;
106 unsigned int index;
107 int ret;
108
109 ret = cache_setup_of_node(cpu);
110 if (ret)
111 return ret;
112
113 for (index = 0; index < cache_leaves(cpu); index++) {
114 unsigned int i;
115
116 this_leaf = this_cpu_ci->info_list + index;
117 /* skip if shared_cpu_map is already populated */
118 if (!cpumask_empty(&this_leaf->shared_cpu_map))
119 continue;
120
121 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
122 for_each_online_cpu(i) {
123 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
124
125 if (i == cpu || !sib_cpu_ci->info_list)
126 continue;/* skip if itself or no cacheinfo */
127 sib_leaf = sib_cpu_ci->info_list + index;
128 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
129 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
130 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
131 }
132 }
133 }
134
135 return 0;
136}
137
138static void cache_shared_cpu_map_remove(unsigned int cpu)
139{
140 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
141 struct cacheinfo *this_leaf, *sib_leaf;
142 unsigned int sibling, index;
143
144 for (index = 0; index < cache_leaves(cpu); index++) {
145 this_leaf = this_cpu_ci->info_list + index;
146 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
147 struct cpu_cacheinfo *sib_cpu_ci;
148
149 if (sibling == cpu) /* skip itself */
150 continue;
151
152 sib_cpu_ci = get_cpu_cacheinfo(sibling);
153 if (!sib_cpu_ci->info_list)
154 continue;
155
156 sib_leaf = sib_cpu_ci->info_list + index;
157 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
158 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
159 }
160 of_node_put(this_leaf->of_node);
161 }
162}
163
164static void free_cache_attributes(unsigned int cpu)
165{
166 if (!per_cpu_cacheinfo(cpu))
167 return;
168
169 cache_shared_cpu_map_remove(cpu);
170
171 kfree(per_cpu_cacheinfo(cpu));
172 per_cpu_cacheinfo(cpu) = NULL;
173}
174
175int __weak init_cache_level(unsigned int cpu)
176{
177 return -ENOENT;
178}
179
180int __weak populate_cache_leaves(unsigned int cpu)
181{
182 return -ENOENT;
183}
184
185static int detect_cache_attributes(unsigned int cpu)
186{
187 int ret;
188
189 if (init_cache_level(cpu) || !cache_leaves(cpu))
190 return -ENOENT;
191
192 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
193 sizeof(struct cacheinfo), GFP_KERNEL);
194 if (per_cpu_cacheinfo(cpu) == NULL)
195 return -ENOMEM;
196
197 ret = populate_cache_leaves(cpu);
198 if (ret)
199 goto free_ci;
200 /*
201 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
202 * will be set up here only if they are not populated already
203 */
204 ret = cache_shared_cpu_map_setup(cpu);
205 if (ret) {
206 pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
207 cpu);
208 goto free_ci;
209 }
210 return 0;
211
212free_ci:
213 free_cache_attributes(cpu);
214 return ret;
215}
216
217/* pointer to cpuX/cache device */
218static DEFINE_PER_CPU(struct device *, ci_cache_dev);
219#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
220
221static cpumask_t cache_dev_map;
222
223/* pointer to array of devices for cpuX/cache/indexY */
224static DEFINE_PER_CPU(struct device **, ci_index_dev);
225#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
226#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
227
228#define show_one(file_name, object) \
229static ssize_t file_name##_show(struct device *dev, \
230 struct device_attribute *attr, char *buf) \
231{ \
232 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
233 return sprintf(buf, "%u\n", this_leaf->object); \
234}
235
236show_one(level, level);
237show_one(coherency_line_size, coherency_line_size);
238show_one(number_of_sets, number_of_sets);
239show_one(physical_line_partition, physical_line_partition);
240show_one(ways_of_associativity, ways_of_associativity);
241
242static ssize_t size_show(struct device *dev,
243 struct device_attribute *attr, char *buf)
244{
245 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
246
247 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
248}
249
250static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
251{
252 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
253 const struct cpumask *mask = &this_leaf->shared_cpu_map;
254
255 return cpumap_print_to_pagebuf(list, buf, mask);
256}
257
258static ssize_t shared_cpu_map_show(struct device *dev,
259 struct device_attribute *attr, char *buf)
260{
261 return shared_cpumap_show_func(dev, false, buf);
262}
263
264static ssize_t shared_cpu_list_show(struct device *dev,
265 struct device_attribute *attr, char *buf)
266{
267 return shared_cpumap_show_func(dev, true, buf);
268}
269
270static ssize_t type_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
274
275 switch (this_leaf->type) {
276 case CACHE_TYPE_DATA:
277 return sprintf(buf, "Data\n");
278 case CACHE_TYPE_INST:
279 return sprintf(buf, "Instruction\n");
280 case CACHE_TYPE_UNIFIED:
281 return sprintf(buf, "Unified\n");
282 default:
283 return -EINVAL;
284 }
285}
286
287static ssize_t allocation_policy_show(struct device *dev,
288 struct device_attribute *attr, char *buf)
289{
290 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
291 unsigned int ci_attr = this_leaf->attributes;
292 int n = 0;
293
294 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
295 n = sprintf(buf, "ReadWriteAllocate\n");
296 else if (ci_attr & CACHE_READ_ALLOCATE)
297 n = sprintf(buf, "ReadAllocate\n");
298 else if (ci_attr & CACHE_WRITE_ALLOCATE)
299 n = sprintf(buf, "WriteAllocate\n");
300 return n;
301}
302
303static ssize_t write_policy_show(struct device *dev,
304 struct device_attribute *attr, char *buf)
305{
306 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
307 unsigned int ci_attr = this_leaf->attributes;
308 int n = 0;
309
310 if (ci_attr & CACHE_WRITE_THROUGH)
311 n = sprintf(buf, "WriteThrough\n");
312 else if (ci_attr & CACHE_WRITE_BACK)
313 n = sprintf(buf, "WriteBack\n");
314 return n;
315}
316
317static DEVICE_ATTR_RO(level);
318static DEVICE_ATTR_RO(type);
319static DEVICE_ATTR_RO(coherency_line_size);
320static DEVICE_ATTR_RO(ways_of_associativity);
321static DEVICE_ATTR_RO(number_of_sets);
322static DEVICE_ATTR_RO(size);
323static DEVICE_ATTR_RO(allocation_policy);
324static DEVICE_ATTR_RO(write_policy);
325static DEVICE_ATTR_RO(shared_cpu_map);
326static DEVICE_ATTR_RO(shared_cpu_list);
327static DEVICE_ATTR_RO(physical_line_partition);
328
329static struct attribute *cache_default_attrs[] = {
330 &dev_attr_type.attr,
331 &dev_attr_level.attr,
332 &dev_attr_shared_cpu_map.attr,
333 &dev_attr_shared_cpu_list.attr,
334 &dev_attr_coherency_line_size.attr,
335 &dev_attr_ways_of_associativity.attr,
336 &dev_attr_number_of_sets.attr,
337 &dev_attr_size.attr,
338 &dev_attr_allocation_policy.attr,
339 &dev_attr_write_policy.attr,
340 &dev_attr_physical_line_partition.attr,
341 NULL
342};
343
344static umode_t
345cache_default_attrs_is_visible(struct kobject *kobj,
346 struct attribute *attr, int unused)
347{
348 struct device *dev = kobj_to_dev(kobj);
349 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
350 const struct cpumask *mask = &this_leaf->shared_cpu_map;
351 umode_t mode = attr->mode;
352
353 if ((attr == &dev_attr_type.attr) && this_leaf->type)
354 return mode;
355 if ((attr == &dev_attr_level.attr) && this_leaf->level)
356 return mode;
357 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
358 return mode;
359 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
360 return mode;
361 if ((attr == &dev_attr_coherency_line_size.attr) &&
362 this_leaf->coherency_line_size)
363 return mode;
364 if ((attr == &dev_attr_ways_of_associativity.attr) &&
365 this_leaf->size) /* allow 0 = full associativity */
366 return mode;
367 if ((attr == &dev_attr_number_of_sets.attr) &&
368 this_leaf->number_of_sets)
369 return mode;
370 if ((attr == &dev_attr_size.attr) && this_leaf->size)
371 return mode;
372 if ((attr == &dev_attr_write_policy.attr) &&
373 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
374 return mode;
375 if ((attr == &dev_attr_allocation_policy.attr) &&
376 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
377 return mode;
378 if ((attr == &dev_attr_physical_line_partition.attr) &&
379 this_leaf->physical_line_partition)
380 return mode;
381
382 return 0;
383}
384
385static const struct attribute_group cache_default_group = {
386 .attrs = cache_default_attrs,
387 .is_visible = cache_default_attrs_is_visible,
388};
389
390static const struct attribute_group *cache_default_groups[] = {
391 &cache_default_group,
392 NULL,
393};
394
395static const struct attribute_group *cache_private_groups[] = {
396 &cache_default_group,
397 NULL, /* Place holder for private group */
398 NULL,
399};
400
401const struct attribute_group *
402__weak cache_get_priv_group(struct cacheinfo *this_leaf)
403{
404 return NULL;
405}
406
407static const struct attribute_group **
408cache_get_attribute_groups(struct cacheinfo *this_leaf)
409{
410 const struct attribute_group *priv_group =
411 cache_get_priv_group(this_leaf);
412
413 if (!priv_group)
414 return cache_default_groups;
415
416 if (!cache_private_groups[1])
417 cache_private_groups[1] = priv_group;
418
419 return cache_private_groups;
420}
421
422/* Add/Remove cache interface for CPU device */
423static void cpu_cache_sysfs_exit(unsigned int cpu)
424{
425 int i;
426 struct device *ci_dev;
427
428 if (per_cpu_index_dev(cpu)) {
429 for (i = 0; i < cache_leaves(cpu); i++) {
430 ci_dev = per_cache_index_dev(cpu, i);
431 if (!ci_dev)
432 continue;
433 device_unregister(ci_dev);
434 }
435 kfree(per_cpu_index_dev(cpu));
436 per_cpu_index_dev(cpu) = NULL;
437 }
438 device_unregister(per_cpu_cache_dev(cpu));
439 per_cpu_cache_dev(cpu) = NULL;
440}
441
442static int cpu_cache_sysfs_init(unsigned int cpu)
443{
444 struct device *dev = get_cpu_device(cpu);
445
446 if (per_cpu_cacheinfo(cpu) == NULL)
447 return -ENOENT;
448
449 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
450 if (IS_ERR(per_cpu_cache_dev(cpu)))
451 return PTR_ERR(per_cpu_cache_dev(cpu));
452
453 /* Allocate all required memory */
454 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
455 sizeof(struct device *), GFP_KERNEL);
456 if (unlikely(per_cpu_index_dev(cpu) == NULL))
457 goto err_out;
458
459 return 0;
460
461err_out:
462 cpu_cache_sysfs_exit(cpu);
463 return -ENOMEM;
464}
465
466static int cache_add_dev(unsigned int cpu)
467{
468 unsigned int i;
469 int rc;
470 struct device *ci_dev, *parent;
471 struct cacheinfo *this_leaf;
472 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
473 const struct attribute_group **cache_groups;
474
475 rc = cpu_cache_sysfs_init(cpu);
476 if (unlikely(rc < 0))
477 return rc;
478
479 parent = per_cpu_cache_dev(cpu);
480 for (i = 0; i < cache_leaves(cpu); i++) {
481 this_leaf = this_cpu_ci->info_list + i;
482 if (this_leaf->disable_sysfs)
483 continue;
484 cache_groups = cache_get_attribute_groups(this_leaf);
485 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
486 "index%1u", i);
487 if (IS_ERR(ci_dev)) {
488 rc = PTR_ERR(ci_dev);
489 goto err;
490 }
491 per_cache_index_dev(cpu, i) = ci_dev;
492 }
493 cpumask_set_cpu(cpu, &cache_dev_map);
494
495 return 0;
496err:
497 cpu_cache_sysfs_exit(cpu);
498 return rc;
499}
500
501static void cache_remove_dev(unsigned int cpu)
502{
503 if (!cpumask_test_cpu(cpu, &cache_dev_map))
504 return;
505 cpumask_clear_cpu(cpu, &cache_dev_map);
506
507 cpu_cache_sysfs_exit(cpu);
508}
509
510static int cacheinfo_cpu_callback(struct notifier_block *nfb,
511 unsigned long action, void *hcpu)
512{
513 unsigned int cpu = (unsigned long)hcpu;
514 int rc = 0;
515
516 switch (action & ~CPU_TASKS_FROZEN) {
517 case CPU_ONLINE:
518 rc = detect_cache_attributes(cpu);
519 if (!rc)
520 rc = cache_add_dev(cpu);
521 break;
522 case CPU_DEAD:
523 cache_remove_dev(cpu);
524 free_cache_attributes(cpu);
525 break;
526 }
527 return notifier_from_errno(rc);
528}
529
530static int __init cacheinfo_sysfs_init(void)
531{
532 int cpu, rc = 0;
533
534 cpu_notifier_register_begin();
535
536 for_each_online_cpu(cpu) {
537 rc = detect_cache_attributes(cpu);
538 if (rc)
539 goto out;
540 rc = cache_add_dev(cpu);
541 if (rc) {
542 free_cache_attributes(cpu);
543 pr_err("error populating cacheinfo..cpu%d\n", cpu);
544 goto out;
545 }
546 }
547 __hotcpu_notifier(cacheinfo_cpu_callback, 0);
548
549out:
550 cpu_notifier_register_done();
551 return rc;
552}
553
554device_initcall(cacheinfo_sysfs_init);
1/*
2 * cacheinfo support - processor cache information via sysfs
3 *
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/acpi.h>
22#include <linux/bitops.h>
23#include <linux/cacheinfo.h>
24#include <linux/compiler.h>
25#include <linux/cpu.h>
26#include <linux/device.h>
27#include <linux/init.h>
28#include <linux/of.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/smp.h>
32#include <linux/sysfs.h>
33
34/* pointer to per cpu cacheinfo */
35static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
39
40struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
41{
42 return ci_cacheinfo(cpu);
43}
44
45#ifdef CONFIG_OF
46static int cache_setup_of_node(unsigned int cpu)
47{
48 struct device_node *np;
49 struct cacheinfo *this_leaf;
50 struct device *cpu_dev = get_cpu_device(cpu);
51 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 unsigned int index = 0;
53
54 /* skip if of_node is already populated */
55 if (this_cpu_ci->info_list->of_node)
56 return 0;
57
58 if (!cpu_dev) {
59 pr_err("No cpu device for CPU %d\n", cpu);
60 return -ENODEV;
61 }
62 np = cpu_dev->of_node;
63 if (!np) {
64 pr_err("Failed to find cpu%d device node\n", cpu);
65 return -ENOENT;
66 }
67
68 while (index < cache_leaves(cpu)) {
69 this_leaf = this_cpu_ci->info_list + index;
70 if (this_leaf->level != 1)
71 np = of_find_next_cache_node(np);
72 else
73 np = of_node_get(np);/* cpu node itself */
74 if (!np)
75 break;
76 this_leaf->of_node = np;
77 index++;
78 }
79
80 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
81 return -ENOENT;
82
83 return 0;
84}
85
86static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 struct cacheinfo *sib_leaf)
88{
89 return sib_leaf->of_node == this_leaf->of_node;
90}
91
92/* OF properties to query for a given cache type */
93struct cache_type_info {
94 const char *size_prop;
95 const char *line_size_props[2];
96 const char *nr_sets_prop;
97};
98
99static const struct cache_type_info cache_type_info[] = {
100 {
101 .size_prop = "cache-size",
102 .line_size_props = { "cache-line-size",
103 "cache-block-size", },
104 .nr_sets_prop = "cache-sets",
105 }, {
106 .size_prop = "i-cache-size",
107 .line_size_props = { "i-cache-line-size",
108 "i-cache-block-size", },
109 .nr_sets_prop = "i-cache-sets",
110 }, {
111 .size_prop = "d-cache-size",
112 .line_size_props = { "d-cache-line-size",
113 "d-cache-block-size", },
114 .nr_sets_prop = "d-cache-sets",
115 },
116};
117
118static inline int get_cacheinfo_idx(enum cache_type type)
119{
120 if (type == CACHE_TYPE_UNIFIED)
121 return 0;
122 return type;
123}
124
125static void cache_size(struct cacheinfo *this_leaf)
126{
127 const char *propname;
128 const __be32 *cache_size;
129 int ct_idx;
130
131 ct_idx = get_cacheinfo_idx(this_leaf->type);
132 propname = cache_type_info[ct_idx].size_prop;
133
134 cache_size = of_get_property(this_leaf->of_node, propname, NULL);
135 if (cache_size)
136 this_leaf->size = of_read_number(cache_size, 1);
137}
138
139/* not cache_line_size() because that's a macro in include/linux/cache.h */
140static void cache_get_line_size(struct cacheinfo *this_leaf)
141{
142 const __be32 *line_size;
143 int i, lim, ct_idx;
144
145 ct_idx = get_cacheinfo_idx(this_leaf->type);
146 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
147
148 for (i = 0; i < lim; i++) {
149 const char *propname;
150
151 propname = cache_type_info[ct_idx].line_size_props[i];
152 line_size = of_get_property(this_leaf->of_node, propname, NULL);
153 if (line_size)
154 break;
155 }
156
157 if (line_size)
158 this_leaf->coherency_line_size = of_read_number(line_size, 1);
159}
160
161static void cache_nr_sets(struct cacheinfo *this_leaf)
162{
163 const char *propname;
164 const __be32 *nr_sets;
165 int ct_idx;
166
167 ct_idx = get_cacheinfo_idx(this_leaf->type);
168 propname = cache_type_info[ct_idx].nr_sets_prop;
169
170 nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
171 if (nr_sets)
172 this_leaf->number_of_sets = of_read_number(nr_sets, 1);
173}
174
175static void cache_associativity(struct cacheinfo *this_leaf)
176{
177 unsigned int line_size = this_leaf->coherency_line_size;
178 unsigned int nr_sets = this_leaf->number_of_sets;
179 unsigned int size = this_leaf->size;
180
181 /*
182 * If the cache is fully associative, there is no need to
183 * check the other properties.
184 */
185 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
186 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
187}
188
189static void cache_of_override_properties(unsigned int cpu)
190{
191 int index;
192 struct cacheinfo *this_leaf;
193 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
194
195 for (index = 0; index < cache_leaves(cpu); index++) {
196 this_leaf = this_cpu_ci->info_list + index;
197 cache_size(this_leaf);
198 cache_get_line_size(this_leaf);
199 cache_nr_sets(this_leaf);
200 cache_associativity(this_leaf);
201 }
202}
203#else
204static void cache_of_override_properties(unsigned int cpu) { }
205static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
206static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
207 struct cacheinfo *sib_leaf)
208{
209 /*
210 * For non-DT systems, assume unique level 1 cache, system-wide
211 * shared caches for all other levels. This will be used only if
212 * arch specific code has not populated shared_cpu_map
213 */
214 return !(this_leaf->level == 1);
215}
216#endif
217
218static int cache_shared_cpu_map_setup(unsigned int cpu)
219{
220 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
221 struct cacheinfo *this_leaf, *sib_leaf;
222 unsigned int index;
223 int ret = 0;
224
225 if (this_cpu_ci->cpu_map_populated)
226 return 0;
227
228 if (of_have_populated_dt())
229 ret = cache_setup_of_node(cpu);
230 else if (!acpi_disabled)
231 /* No cache property/hierarchy support yet in ACPI */
232 ret = -ENOTSUPP;
233 if (ret)
234 return ret;
235
236 for (index = 0; index < cache_leaves(cpu); index++) {
237 unsigned int i;
238
239 this_leaf = this_cpu_ci->info_list + index;
240 /* skip if shared_cpu_map is already populated */
241 if (!cpumask_empty(&this_leaf->shared_cpu_map))
242 continue;
243
244 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
245 for_each_online_cpu(i) {
246 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
247
248 if (i == cpu || !sib_cpu_ci->info_list)
249 continue;/* skip if itself or no cacheinfo */
250 sib_leaf = sib_cpu_ci->info_list + index;
251 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
252 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
253 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
254 }
255 }
256 }
257
258 return 0;
259}
260
261static void cache_shared_cpu_map_remove(unsigned int cpu)
262{
263 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
264 struct cacheinfo *this_leaf, *sib_leaf;
265 unsigned int sibling, index;
266
267 for (index = 0; index < cache_leaves(cpu); index++) {
268 this_leaf = this_cpu_ci->info_list + index;
269 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
270 struct cpu_cacheinfo *sib_cpu_ci;
271
272 if (sibling == cpu) /* skip itself */
273 continue;
274
275 sib_cpu_ci = get_cpu_cacheinfo(sibling);
276 if (!sib_cpu_ci->info_list)
277 continue;
278
279 sib_leaf = sib_cpu_ci->info_list + index;
280 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
281 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
282 }
283 of_node_put(this_leaf->of_node);
284 }
285}
286
287static void cache_override_properties(unsigned int cpu)
288{
289 if (of_have_populated_dt())
290 return cache_of_override_properties(cpu);
291}
292
293static void free_cache_attributes(unsigned int cpu)
294{
295 if (!per_cpu_cacheinfo(cpu))
296 return;
297
298 cache_shared_cpu_map_remove(cpu);
299
300 kfree(per_cpu_cacheinfo(cpu));
301 per_cpu_cacheinfo(cpu) = NULL;
302}
303
304int __weak init_cache_level(unsigned int cpu)
305{
306 return -ENOENT;
307}
308
309int __weak populate_cache_leaves(unsigned int cpu)
310{
311 return -ENOENT;
312}
313
314static int detect_cache_attributes(unsigned int cpu)
315{
316 int ret;
317
318 if (init_cache_level(cpu) || !cache_leaves(cpu))
319 return -ENOENT;
320
321 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
322 sizeof(struct cacheinfo), GFP_KERNEL);
323 if (per_cpu_cacheinfo(cpu) == NULL)
324 return -ENOMEM;
325
326 ret = populate_cache_leaves(cpu);
327 if (ret)
328 goto free_ci;
329 /*
330 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
331 * will be set up here only if they are not populated already
332 */
333 ret = cache_shared_cpu_map_setup(cpu);
334 if (ret) {
335 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
336 goto free_ci;
337 }
338
339 cache_override_properties(cpu);
340 return 0;
341
342free_ci:
343 free_cache_attributes(cpu);
344 return ret;
345}
346
347/* pointer to cpuX/cache device */
348static DEFINE_PER_CPU(struct device *, ci_cache_dev);
349#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
350
351static cpumask_t cache_dev_map;
352
353/* pointer to array of devices for cpuX/cache/indexY */
354static DEFINE_PER_CPU(struct device **, ci_index_dev);
355#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
356#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
357
358#define show_one(file_name, object) \
359static ssize_t file_name##_show(struct device *dev, \
360 struct device_attribute *attr, char *buf) \
361{ \
362 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
363 return sprintf(buf, "%u\n", this_leaf->object); \
364}
365
366show_one(id, id);
367show_one(level, level);
368show_one(coherency_line_size, coherency_line_size);
369show_one(number_of_sets, number_of_sets);
370show_one(physical_line_partition, physical_line_partition);
371show_one(ways_of_associativity, ways_of_associativity);
372
373static ssize_t size_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
375{
376 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
377
378 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
379}
380
381static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
382{
383 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
384 const struct cpumask *mask = &this_leaf->shared_cpu_map;
385
386 return cpumap_print_to_pagebuf(list, buf, mask);
387}
388
389static ssize_t shared_cpu_map_show(struct device *dev,
390 struct device_attribute *attr, char *buf)
391{
392 return shared_cpumap_show_func(dev, false, buf);
393}
394
395static ssize_t shared_cpu_list_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 return shared_cpumap_show_func(dev, true, buf);
399}
400
401static ssize_t type_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
403{
404 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
405
406 switch (this_leaf->type) {
407 case CACHE_TYPE_DATA:
408 return sprintf(buf, "Data\n");
409 case CACHE_TYPE_INST:
410 return sprintf(buf, "Instruction\n");
411 case CACHE_TYPE_UNIFIED:
412 return sprintf(buf, "Unified\n");
413 default:
414 return -EINVAL;
415 }
416}
417
418static ssize_t allocation_policy_show(struct device *dev,
419 struct device_attribute *attr, char *buf)
420{
421 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
422 unsigned int ci_attr = this_leaf->attributes;
423 int n = 0;
424
425 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
426 n = sprintf(buf, "ReadWriteAllocate\n");
427 else if (ci_attr & CACHE_READ_ALLOCATE)
428 n = sprintf(buf, "ReadAllocate\n");
429 else if (ci_attr & CACHE_WRITE_ALLOCATE)
430 n = sprintf(buf, "WriteAllocate\n");
431 return n;
432}
433
434static ssize_t write_policy_show(struct device *dev,
435 struct device_attribute *attr, char *buf)
436{
437 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
438 unsigned int ci_attr = this_leaf->attributes;
439 int n = 0;
440
441 if (ci_attr & CACHE_WRITE_THROUGH)
442 n = sprintf(buf, "WriteThrough\n");
443 else if (ci_attr & CACHE_WRITE_BACK)
444 n = sprintf(buf, "WriteBack\n");
445 return n;
446}
447
448static DEVICE_ATTR_RO(id);
449static DEVICE_ATTR_RO(level);
450static DEVICE_ATTR_RO(type);
451static DEVICE_ATTR_RO(coherency_line_size);
452static DEVICE_ATTR_RO(ways_of_associativity);
453static DEVICE_ATTR_RO(number_of_sets);
454static DEVICE_ATTR_RO(size);
455static DEVICE_ATTR_RO(allocation_policy);
456static DEVICE_ATTR_RO(write_policy);
457static DEVICE_ATTR_RO(shared_cpu_map);
458static DEVICE_ATTR_RO(shared_cpu_list);
459static DEVICE_ATTR_RO(physical_line_partition);
460
461static struct attribute *cache_default_attrs[] = {
462 &dev_attr_id.attr,
463 &dev_attr_type.attr,
464 &dev_attr_level.attr,
465 &dev_attr_shared_cpu_map.attr,
466 &dev_attr_shared_cpu_list.attr,
467 &dev_attr_coherency_line_size.attr,
468 &dev_attr_ways_of_associativity.attr,
469 &dev_attr_number_of_sets.attr,
470 &dev_attr_size.attr,
471 &dev_attr_allocation_policy.attr,
472 &dev_attr_write_policy.attr,
473 &dev_attr_physical_line_partition.attr,
474 NULL
475};
476
477static umode_t
478cache_default_attrs_is_visible(struct kobject *kobj,
479 struct attribute *attr, int unused)
480{
481 struct device *dev = kobj_to_dev(kobj);
482 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
483 const struct cpumask *mask = &this_leaf->shared_cpu_map;
484 umode_t mode = attr->mode;
485
486 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
487 return mode;
488 if ((attr == &dev_attr_type.attr) && this_leaf->type)
489 return mode;
490 if ((attr == &dev_attr_level.attr) && this_leaf->level)
491 return mode;
492 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
493 return mode;
494 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
495 return mode;
496 if ((attr == &dev_attr_coherency_line_size.attr) &&
497 this_leaf->coherency_line_size)
498 return mode;
499 if ((attr == &dev_attr_ways_of_associativity.attr) &&
500 this_leaf->size) /* allow 0 = full associativity */
501 return mode;
502 if ((attr == &dev_attr_number_of_sets.attr) &&
503 this_leaf->number_of_sets)
504 return mode;
505 if ((attr == &dev_attr_size.attr) && this_leaf->size)
506 return mode;
507 if ((attr == &dev_attr_write_policy.attr) &&
508 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
509 return mode;
510 if ((attr == &dev_attr_allocation_policy.attr) &&
511 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
512 return mode;
513 if ((attr == &dev_attr_physical_line_partition.attr) &&
514 this_leaf->physical_line_partition)
515 return mode;
516
517 return 0;
518}
519
520static const struct attribute_group cache_default_group = {
521 .attrs = cache_default_attrs,
522 .is_visible = cache_default_attrs_is_visible,
523};
524
525static const struct attribute_group *cache_default_groups[] = {
526 &cache_default_group,
527 NULL,
528};
529
530static const struct attribute_group *cache_private_groups[] = {
531 &cache_default_group,
532 NULL, /* Place holder for private group */
533 NULL,
534};
535
536const struct attribute_group *
537__weak cache_get_priv_group(struct cacheinfo *this_leaf)
538{
539 return NULL;
540}
541
542static const struct attribute_group **
543cache_get_attribute_groups(struct cacheinfo *this_leaf)
544{
545 const struct attribute_group *priv_group =
546 cache_get_priv_group(this_leaf);
547
548 if (!priv_group)
549 return cache_default_groups;
550
551 if (!cache_private_groups[1])
552 cache_private_groups[1] = priv_group;
553
554 return cache_private_groups;
555}
556
557/* Add/Remove cache interface for CPU device */
558static void cpu_cache_sysfs_exit(unsigned int cpu)
559{
560 int i;
561 struct device *ci_dev;
562
563 if (per_cpu_index_dev(cpu)) {
564 for (i = 0; i < cache_leaves(cpu); i++) {
565 ci_dev = per_cache_index_dev(cpu, i);
566 if (!ci_dev)
567 continue;
568 device_unregister(ci_dev);
569 }
570 kfree(per_cpu_index_dev(cpu));
571 per_cpu_index_dev(cpu) = NULL;
572 }
573 device_unregister(per_cpu_cache_dev(cpu));
574 per_cpu_cache_dev(cpu) = NULL;
575}
576
577static int cpu_cache_sysfs_init(unsigned int cpu)
578{
579 struct device *dev = get_cpu_device(cpu);
580
581 if (per_cpu_cacheinfo(cpu) == NULL)
582 return -ENOENT;
583
584 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
585 if (IS_ERR(per_cpu_cache_dev(cpu)))
586 return PTR_ERR(per_cpu_cache_dev(cpu));
587
588 /* Allocate all required memory */
589 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
590 sizeof(struct device *), GFP_KERNEL);
591 if (unlikely(per_cpu_index_dev(cpu) == NULL))
592 goto err_out;
593
594 return 0;
595
596err_out:
597 cpu_cache_sysfs_exit(cpu);
598 return -ENOMEM;
599}
600
601static int cache_add_dev(unsigned int cpu)
602{
603 unsigned int i;
604 int rc;
605 struct device *ci_dev, *parent;
606 struct cacheinfo *this_leaf;
607 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
608 const struct attribute_group **cache_groups;
609
610 rc = cpu_cache_sysfs_init(cpu);
611 if (unlikely(rc < 0))
612 return rc;
613
614 parent = per_cpu_cache_dev(cpu);
615 for (i = 0; i < cache_leaves(cpu); i++) {
616 this_leaf = this_cpu_ci->info_list + i;
617 if (this_leaf->disable_sysfs)
618 continue;
619 cache_groups = cache_get_attribute_groups(this_leaf);
620 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
621 "index%1u", i);
622 if (IS_ERR(ci_dev)) {
623 rc = PTR_ERR(ci_dev);
624 goto err;
625 }
626 per_cache_index_dev(cpu, i) = ci_dev;
627 }
628 cpumask_set_cpu(cpu, &cache_dev_map);
629
630 return 0;
631err:
632 cpu_cache_sysfs_exit(cpu);
633 return rc;
634}
635
636static int cacheinfo_cpu_online(unsigned int cpu)
637{
638 int rc = detect_cache_attributes(cpu);
639
640 if (rc)
641 return rc;
642 rc = cache_add_dev(cpu);
643 if (rc)
644 free_cache_attributes(cpu);
645 return rc;
646}
647
648static int cacheinfo_cpu_pre_down(unsigned int cpu)
649{
650 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
651 cpu_cache_sysfs_exit(cpu);
652
653 free_cache_attributes(cpu);
654 return 0;
655}
656
657static int __init cacheinfo_sysfs_init(void)
658{
659 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
660 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
661}
662device_initcall(cacheinfo_sysfs_init);