Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Processor cache information made available to userspace via sysfs;
4 * intended to be compatible with x86 intel_cacheinfo implementation.
5 *
6 * Copyright 2008 IBM Corporation
7 * Author: Nathan Lynch
8 */
9
10#define pr_fmt(fmt) "cacheinfo: " fmt
11
12#include <linux/cpu.h>
13#include <linux/cpumask.h>
14#include <linux/kernel.h>
15#include <linux/kobject.h>
16#include <linux/list.h>
17#include <linux/notifier.h>
18#include <linux/of.h>
19#include <linux/percpu.h>
20#include <linux/slab.h>
21#include <asm/prom.h>
22#include <asm/cputhreads.h>
23#include <asm/smp.h>
24
25#include "cacheinfo.h"
26
27/* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
30 */
31struct cache_dir {
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
33 * directory */
34 struct cache_index_dir *index; /* list of index objects */
35};
36
37/* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
40 */
41struct cache_index_dir {
42 struct kobject kobj;
43 struct cache_index_dir *next; /* next index in parent directory */
44 struct cache *cache;
45};
46
47/* Template for determining which OF properties to query for a given
48 * cache type */
49struct cache_type_info {
50 const char *name;
51 const char *size_prop;
52
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
59
60 const char *line_size_props[2];
61 const char *nr_sets_prop;
62};
63
64/* These are used to index the cache_type_info array. */
65#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
66#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
67#define CACHE_TYPE_INSTRUCTION 2
68#define CACHE_TYPE_DATA 3
69
70static const struct cache_type_info cache_type_info[] = {
71 {
72 /* Embedded systems that use cache-size, cache-block-size,
73 * etc. for the Unified (typically L2) cache. */
74 .name = "Unified",
75 .size_prop = "cache-size",
76 .line_size_props = { "cache-line-size",
77 "cache-block-size", },
78 .nr_sets_prop = "cache-sets",
79 },
80 {
81 /* PowerPC Processor binding says the [di]-cache-*
82 * must be equal on unified caches, so just use
83 * d-cache properties. */
84 .name = "Unified",
85 .size_prop = "d-cache-size",
86 .line_size_props = { "d-cache-line-size",
87 "d-cache-block-size", },
88 .nr_sets_prop = "d-cache-sets",
89 },
90 {
91 .name = "Instruction",
92 .size_prop = "i-cache-size",
93 .line_size_props = { "i-cache-line-size",
94 "i-cache-block-size", },
95 .nr_sets_prop = "i-cache-sets",
96 },
97 {
98 .name = "Data",
99 .size_prop = "d-cache-size",
100 .line_size_props = { "d-cache-line-size",
101 "d-cache-block-size", },
102 .nr_sets_prop = "d-cache-sets",
103 },
104};
105
106/* Cache object: each instance of this corresponds to a distinct cache
107 * in the system. There are separate objects for Harvard caches: one
108 * each for instruction and data, and each refers to the same OF node.
109 * The refcount of the OF node is elevated for the lifetime of the
110 * cache object. A cache object is released when its shared_cpu_map
111 * is cleared (see cache_cpu_clear).
112 *
113 * A cache object is on two lists: an unsorted global list
114 * (cache_list) of cache objects; and a singly-linked list
115 * representing the local cache hierarchy, which is ordered by level
116 * (e.g. L1d -> L1i -> L2 -> L3).
117 */
118struct cache {
119 struct device_node *ofnode; /* OF node for this cache, may be cpu */
120 struct cpumask shared_cpu_map; /* online CPUs using this cache */
121 int type; /* split cache disambiguation */
122 int level; /* level not explicit in device tree */
123 struct list_head list; /* global list of cache objects */
124 struct cache *next_local; /* next cache of >= level */
125};
126
127static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
128
129/* traversal/modification of this list occurs only at cpu hotplug time;
130 * access is serialized by cpu hotplug locking
131 */
132static LIST_HEAD(cache_list);
133
134static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
135{
136 return container_of(k, struct cache_index_dir, kobj);
137}
138
139static const char *cache_type_string(const struct cache *cache)
140{
141 return cache_type_info[cache->type].name;
142}
143
144static void cache_init(struct cache *cache, int type, int level,
145 struct device_node *ofnode)
146{
147 cache->type = type;
148 cache->level = level;
149 cache->ofnode = of_node_get(ofnode);
150 INIT_LIST_HEAD(&cache->list);
151 list_add(&cache->list, &cache_list);
152}
153
154static struct cache *new_cache(int type, int level, struct device_node *ofnode)
155{
156 struct cache *cache;
157
158 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
159 if (cache)
160 cache_init(cache, type, level, ofnode);
161
162 return cache;
163}
164
165static void release_cache_debugcheck(struct cache *cache)
166{
167 struct cache *iter;
168
169 list_for_each_entry(iter, &cache_list, list)
170 WARN_ONCE(iter->next_local == cache,
171 "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
172 iter->ofnode,
173 cache_type_string(iter),
174 cache->ofnode,
175 cache_type_string(cache));
176}
177
178static void release_cache(struct cache *cache)
179{
180 if (!cache)
181 return;
182
183 pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
184 cache_type_string(cache), cache->ofnode);
185
186 release_cache_debugcheck(cache);
187 list_del(&cache->list);
188 of_node_put(cache->ofnode);
189 kfree(cache);
190}
191
192static void cache_cpu_set(struct cache *cache, int cpu)
193{
194 struct cache *next = cache;
195
196 while (next) {
197 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
198 "CPU %i already accounted in %pOFP(%s)\n",
199 cpu, next->ofnode,
200 cache_type_string(next));
201 cpumask_set_cpu(cpu, &next->shared_cpu_map);
202 next = next->next_local;
203 }
204}
205
206static int cache_size(const struct cache *cache, unsigned int *ret)
207{
208 const char *propname;
209 const __be32 *cache_size;
210
211 propname = cache_type_info[cache->type].size_prop;
212
213 cache_size = of_get_property(cache->ofnode, propname, NULL);
214 if (!cache_size)
215 return -ENODEV;
216
217 *ret = of_read_number(cache_size, 1);
218 return 0;
219}
220
221static int cache_size_kb(const struct cache *cache, unsigned int *ret)
222{
223 unsigned int size;
224
225 if (cache_size(cache, &size))
226 return -ENODEV;
227
228 *ret = size / 1024;
229 return 0;
230}
231
232/* not cache_line_size() because that's a macro in include/linux/cache.h */
233static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
234{
235 const __be32 *line_size;
236 int i, lim;
237
238 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
239
240 for (i = 0; i < lim; i++) {
241 const char *propname;
242
243 propname = cache_type_info[cache->type].line_size_props[i];
244 line_size = of_get_property(cache->ofnode, propname, NULL);
245 if (line_size)
246 break;
247 }
248
249 if (!line_size)
250 return -ENODEV;
251
252 *ret = of_read_number(line_size, 1);
253 return 0;
254}
255
256static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
257{
258 const char *propname;
259 const __be32 *nr_sets;
260
261 propname = cache_type_info[cache->type].nr_sets_prop;
262
263 nr_sets = of_get_property(cache->ofnode, propname, NULL);
264 if (!nr_sets)
265 return -ENODEV;
266
267 *ret = of_read_number(nr_sets, 1);
268 return 0;
269}
270
271static int cache_associativity(const struct cache *cache, unsigned int *ret)
272{
273 unsigned int line_size;
274 unsigned int nr_sets;
275 unsigned int size;
276
277 if (cache_nr_sets(cache, &nr_sets))
278 goto err;
279
280 /* If the cache is fully associative, there is no need to
281 * check the other properties.
282 */
283 if (nr_sets == 1) {
284 *ret = 0;
285 return 0;
286 }
287
288 if (cache_get_line_size(cache, &line_size))
289 goto err;
290 if (cache_size(cache, &size))
291 goto err;
292
293 if (!(nr_sets > 0 && size > 0 && line_size > 0))
294 goto err;
295
296 *ret = (size / nr_sets) / line_size;
297 return 0;
298err:
299 return -ENODEV;
300}
301
302/* helper for dealing with split caches */
303static struct cache *cache_find_first_sibling(struct cache *cache)
304{
305 struct cache *iter;
306
307 if (cache->type == CACHE_TYPE_UNIFIED ||
308 cache->type == CACHE_TYPE_UNIFIED_D)
309 return cache;
310
311 list_for_each_entry(iter, &cache_list, list)
312 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
313 return iter;
314
315 return cache;
316}
317
318/* return the first cache on a local list matching node */
319static struct cache *cache_lookup_by_node(const struct device_node *node)
320{
321 struct cache *cache = NULL;
322 struct cache *iter;
323
324 list_for_each_entry(iter, &cache_list, list) {
325 if (iter->ofnode != node)
326 continue;
327 cache = cache_find_first_sibling(iter);
328 break;
329 }
330
331 return cache;
332}
333
334static bool cache_node_is_unified(const struct device_node *np)
335{
336 return of_get_property(np, "cache-unified", NULL);
337}
338
339/*
340 * Unified caches can have two different sets of tags. Most embedded
341 * use cache-size, etc. for the unified cache size, but open firmware systems
342 * use d-cache-size, etc. Check on initialization for which type we have, and
343 * return the appropriate structure type. Assume it's embedded if it isn't
344 * open firmware. If it's yet a 3rd type, then there will be missing entries
345 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
346 * to be extended further.
347 */
348static int cache_is_unified_d(const struct device_node *np)
349{
350 return of_get_property(np,
351 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
352 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
353}
354
355static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
356{
357 pr_debug("creating L%d ucache for %pOFP\n", level, node);
358
359 return new_cache(cache_is_unified_d(node), level, node);
360}
361
362static struct cache *cache_do_one_devnode_split(struct device_node *node,
363 int level)
364{
365 struct cache *dcache, *icache;
366
367 pr_debug("creating L%d dcache and icache for %pOFP\n", level,
368 node);
369
370 dcache = new_cache(CACHE_TYPE_DATA, level, node);
371 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
372
373 if (!dcache || !icache)
374 goto err;
375
376 dcache->next_local = icache;
377
378 return dcache;
379err:
380 release_cache(dcache);
381 release_cache(icache);
382 return NULL;
383}
384
385static struct cache *cache_do_one_devnode(struct device_node *node, int level)
386{
387 struct cache *cache;
388
389 if (cache_node_is_unified(node))
390 cache = cache_do_one_devnode_unified(node, level);
391 else
392 cache = cache_do_one_devnode_split(node, level);
393
394 return cache;
395}
396
397static struct cache *cache_lookup_or_instantiate(struct device_node *node,
398 int level)
399{
400 struct cache *cache;
401
402 cache = cache_lookup_by_node(node);
403
404 WARN_ONCE(cache && cache->level != level,
405 "cache level mismatch on lookup (got %d, expected %d)\n",
406 cache->level, level);
407
408 if (!cache)
409 cache = cache_do_one_devnode(node, level);
410
411 return cache;
412}
413
414static void link_cache_lists(struct cache *smaller, struct cache *bigger)
415{
416 while (smaller->next_local) {
417 if (smaller->next_local == bigger)
418 return; /* already linked */
419 smaller = smaller->next_local;
420 }
421
422 smaller->next_local = bigger;
423
424 /*
425 * The cache->next_local list sorts by level ascending:
426 * L1d -> L1i -> L2 -> L3 ...
427 */
428 WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
429 (smaller->level > 1 && bigger->level != smaller->level + 1),
430 "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
431 smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
432}
433
434static void do_subsidiary_caches_debugcheck(struct cache *cache)
435{
436 WARN_ONCE(cache->level != 1,
437 "instantiating cache chain from L%d %s cache for "
438 "%pOFP instead of an L1\n", cache->level,
439 cache_type_string(cache), cache->ofnode);
440 WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
441 "instantiating cache chain from node %pOFP of type '%s' "
442 "instead of a cpu node\n", cache->ofnode,
443 of_node_get_device_type(cache->ofnode));
444}
445
446static void do_subsidiary_caches(struct cache *cache)
447{
448 struct device_node *subcache_node;
449 int level = cache->level;
450
451 do_subsidiary_caches_debugcheck(cache);
452
453 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
454 struct cache *subcache;
455
456 level++;
457 subcache = cache_lookup_or_instantiate(subcache_node, level);
458 of_node_put(subcache_node);
459 if (!subcache)
460 break;
461
462 link_cache_lists(cache, subcache);
463 cache = subcache;
464 }
465}
466
467static struct cache *cache_chain_instantiate(unsigned int cpu_id)
468{
469 struct device_node *cpu_node;
470 struct cache *cpu_cache = NULL;
471
472 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
473
474 cpu_node = of_get_cpu_node(cpu_id, NULL);
475 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
476 if (!cpu_node)
477 goto out;
478
479 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
480 if (!cpu_cache)
481 goto out;
482
483 do_subsidiary_caches(cpu_cache);
484
485 cache_cpu_set(cpu_cache, cpu_id);
486out:
487 of_node_put(cpu_node);
488
489 return cpu_cache;
490}
491
492static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
493{
494 struct cache_dir *cache_dir;
495 struct device *dev;
496 struct kobject *kobj = NULL;
497
498 dev = get_cpu_device(cpu_id);
499 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
500 if (!dev)
501 goto err;
502
503 kobj = kobject_create_and_add("cache", &dev->kobj);
504 if (!kobj)
505 goto err;
506
507 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
508 if (!cache_dir)
509 goto err;
510
511 cache_dir->kobj = kobj;
512
513 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
514
515 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
516
517 return cache_dir;
518err:
519 kobject_put(kobj);
520 return NULL;
521}
522
523static void cache_index_release(struct kobject *kobj)
524{
525 struct cache_index_dir *index;
526
527 index = kobj_to_cache_index_dir(kobj);
528
529 pr_debug("freeing index directory for L%d %s cache\n",
530 index->cache->level, cache_type_string(index->cache));
531
532 kfree(index);
533}
534
535static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
536{
537 struct kobj_attribute *kobj_attr;
538
539 kobj_attr = container_of(attr, struct kobj_attribute, attr);
540
541 return kobj_attr->show(k, kobj_attr, buf);
542}
543
544static struct cache *index_kobj_to_cache(struct kobject *k)
545{
546 struct cache_index_dir *index;
547
548 index = kobj_to_cache_index_dir(k);
549
550 return index->cache;
551}
552
553static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
554{
555 unsigned int size_kb;
556 struct cache *cache;
557
558 cache = index_kobj_to_cache(k);
559
560 if (cache_size_kb(cache, &size_kb))
561 return -ENODEV;
562
563 return sprintf(buf, "%uK\n", size_kb);
564}
565
566static struct kobj_attribute cache_size_attr =
567 __ATTR(size, 0444, size_show, NULL);
568
569
570static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
571{
572 unsigned int line_size;
573 struct cache *cache;
574
575 cache = index_kobj_to_cache(k);
576
577 if (cache_get_line_size(cache, &line_size))
578 return -ENODEV;
579
580 return sprintf(buf, "%u\n", line_size);
581}
582
583static struct kobj_attribute cache_line_size_attr =
584 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
585
586static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
587{
588 unsigned int nr_sets;
589 struct cache *cache;
590
591 cache = index_kobj_to_cache(k);
592
593 if (cache_nr_sets(cache, &nr_sets))
594 return -ENODEV;
595
596 return sprintf(buf, "%u\n", nr_sets);
597}
598
599static struct kobj_attribute cache_nr_sets_attr =
600 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
601
602static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603{
604 unsigned int associativity;
605 struct cache *cache;
606
607 cache = index_kobj_to_cache(k);
608
609 if (cache_associativity(cache, &associativity))
610 return -ENODEV;
611
612 return sprintf(buf, "%u\n", associativity);
613}
614
615static struct kobj_attribute cache_assoc_attr =
616 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
617
618static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
619{
620 struct cache *cache;
621
622 cache = index_kobj_to_cache(k);
623
624 return sprintf(buf, "%s\n", cache_type_string(cache));
625}
626
627static struct kobj_attribute cache_type_attr =
628 __ATTR(type, 0444, type_show, NULL);
629
630static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
631{
632 struct cache_index_dir *index;
633 struct cache *cache;
634
635 index = kobj_to_cache_index_dir(k);
636 cache = index->cache;
637
638 return sprintf(buf, "%d\n", cache->level);
639}
640
641static struct kobj_attribute cache_level_attr =
642 __ATTR(level, 0444, level_show, NULL);
643
644static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
645{
646 struct kobject *index_dir_kobj = &index->kobj;
647 struct kobject *cache_dir_kobj = index_dir_kobj->parent;
648 struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
649 struct device *dev = kobj_to_dev(cpu_dev_kobj);
650
651 return dev->id;
652}
653
654/*
655 * On big-core systems, each core has two groups of CPUs each of which
656 * has its own L1-cache. The thread-siblings which share l1-cache with
657 * @cpu can be obtained via cpu_smallcore_mask().
658 */
659static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
660{
661 if (cache->level == 1)
662 return cpu_smallcore_mask(cpu);
663
664 return &cache->shared_cpu_map;
665}
666
667static ssize_t
668show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
669{
670 struct cache_index_dir *index;
671 struct cache *cache;
672 const struct cpumask *mask;
673 int cpu;
674
675 index = kobj_to_cache_index_dir(k);
676 cache = index->cache;
677
678 if (has_big_cores) {
679 cpu = index_dir_to_cpu(index);
680 mask = get_big_core_shared_cpu_map(cpu, cache);
681 } else {
682 mask = &cache->shared_cpu_map;
683 }
684
685 return cpumap_print_to_pagebuf(list, buf, mask);
686}
687
688static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
689{
690 return show_shared_cpumap(k, attr, buf, false);
691}
692
693static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
694{
695 return show_shared_cpumap(k, attr, buf, true);
696}
697
698static struct kobj_attribute cache_shared_cpu_map_attr =
699 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
700
701static struct kobj_attribute cache_shared_cpu_list_attr =
702 __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
703
704/* Attributes which should always be created -- the kobject/sysfs core
705 * does this automatically via kobj_type->default_attrs. This is the
706 * minimum data required to uniquely identify a cache.
707 */
708static struct attribute *cache_index_default_attrs[] = {
709 &cache_type_attr.attr,
710 &cache_level_attr.attr,
711 &cache_shared_cpu_map_attr.attr,
712 &cache_shared_cpu_list_attr.attr,
713 NULL,
714};
715
716/* Attributes which should be created if the cache device node has the
717 * right properties -- see cacheinfo_create_index_opt_attrs
718 */
719static struct kobj_attribute *cache_index_opt_attrs[] = {
720 &cache_size_attr,
721 &cache_line_size_attr,
722 &cache_nr_sets_attr,
723 &cache_assoc_attr,
724};
725
726static const struct sysfs_ops cache_index_ops = {
727 .show = cache_index_show,
728};
729
730static struct kobj_type cache_index_type = {
731 .release = cache_index_release,
732 .sysfs_ops = &cache_index_ops,
733 .default_attrs = cache_index_default_attrs,
734};
735
736static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
737{
738 const char *cache_type;
739 struct cache *cache;
740 char *buf;
741 int i;
742
743 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
744 if (!buf)
745 return;
746
747 cache = dir->cache;
748 cache_type = cache_type_string(cache);
749
750 /* We don't want to create an attribute that can't provide a
751 * meaningful value. Check the return value of each optional
752 * attribute's ->show method before registering the
753 * attribute.
754 */
755 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
756 struct kobj_attribute *attr;
757 ssize_t rc;
758
759 attr = cache_index_opt_attrs[i];
760
761 rc = attr->show(&dir->kobj, attr, buf);
762 if (rc <= 0) {
763 pr_debug("not creating %s attribute for "
764 "%pOFP(%s) (rc = %zd)\n",
765 attr->attr.name, cache->ofnode,
766 cache_type, rc);
767 continue;
768 }
769 if (sysfs_create_file(&dir->kobj, &attr->attr))
770 pr_debug("could not create %s attribute for %pOFP(%s)\n",
771 attr->attr.name, cache->ofnode, cache_type);
772 }
773
774 kfree(buf);
775}
776
777static void cacheinfo_create_index_dir(struct cache *cache, int index,
778 struct cache_dir *cache_dir)
779{
780 struct cache_index_dir *index_dir;
781 int rc;
782
783 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
784 if (!index_dir)
785 return;
786
787 index_dir->cache = cache;
788
789 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
790 cache_dir->kobj, "index%d", index);
791 if (rc) {
792 kobject_put(&index_dir->kobj);
793 return;
794 }
795
796 index_dir->next = cache_dir->index;
797 cache_dir->index = index_dir;
798
799 cacheinfo_create_index_opt_attrs(index_dir);
800}
801
802static void cacheinfo_sysfs_populate(unsigned int cpu_id,
803 struct cache *cache_list)
804{
805 struct cache_dir *cache_dir;
806 struct cache *cache;
807 int index = 0;
808
809 cache_dir = cacheinfo_create_cache_dir(cpu_id);
810 if (!cache_dir)
811 return;
812
813 cache = cache_list;
814 while (cache) {
815 cacheinfo_create_index_dir(cache, index, cache_dir);
816 index++;
817 cache = cache->next_local;
818 }
819}
820
821void cacheinfo_cpu_online(unsigned int cpu_id)
822{
823 struct cache *cache;
824
825 cache = cache_chain_instantiate(cpu_id);
826 if (!cache)
827 return;
828
829 cacheinfo_sysfs_populate(cpu_id, cache);
830}
831
832/* functions needed to remove cache entry for cpu offline or suspend/resume */
833
834#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
835 defined(CONFIG_HOTPLUG_CPU)
836
837static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
838{
839 struct device_node *cpu_node;
840 struct cache *cache;
841
842 cpu_node = of_get_cpu_node(cpu_id, NULL);
843 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
844 if (!cpu_node)
845 return NULL;
846
847 cache = cache_lookup_by_node(cpu_node);
848 of_node_put(cpu_node);
849
850 return cache;
851}
852
853static void remove_index_dirs(struct cache_dir *cache_dir)
854{
855 struct cache_index_dir *index;
856
857 index = cache_dir->index;
858
859 while (index) {
860 struct cache_index_dir *next;
861
862 next = index->next;
863 kobject_put(&index->kobj);
864 index = next;
865 }
866}
867
868static void remove_cache_dir(struct cache_dir *cache_dir)
869{
870 remove_index_dirs(cache_dir);
871
872 /* Remove cache dir from sysfs */
873 kobject_del(cache_dir->kobj);
874
875 kobject_put(cache_dir->kobj);
876
877 kfree(cache_dir);
878}
879
880static void cache_cpu_clear(struct cache *cache, int cpu)
881{
882 while (cache) {
883 struct cache *next = cache->next_local;
884
885 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
886 "CPU %i not accounted in %pOFP(%s)\n",
887 cpu, cache->ofnode,
888 cache_type_string(cache));
889
890 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
891
892 /* Release the cache object if all the cpus using it
893 * are offline */
894 if (cpumask_empty(&cache->shared_cpu_map))
895 release_cache(cache);
896
897 cache = next;
898 }
899}
900
901void cacheinfo_cpu_offline(unsigned int cpu_id)
902{
903 struct cache_dir *cache_dir;
904 struct cache *cache;
905
906 /* Prevent userspace from seeing inconsistent state - remove
907 * the sysfs hierarchy first */
908 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
909
910 /* careful, sysfs population may have failed */
911 if (cache_dir)
912 remove_cache_dir(cache_dir);
913
914 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
915
916 /* clear the CPU's bit in its cache chain, possibly freeing
917 * cache objects */
918 cache = cache_lookup_by_cpu(cpu_id);
919 if (cache)
920 cache_cpu_clear(cache, cpu_id);
921}
922
923void cacheinfo_teardown(void)
924{
925 unsigned int cpu;
926
927 lockdep_assert_cpus_held();
928
929 for_each_online_cpu(cpu)
930 cacheinfo_cpu_offline(cpu);
931}
932
933void cacheinfo_rebuild(void)
934{
935 unsigned int cpu;
936
937 lockdep_assert_cpus_held();
938
939 for_each_online_cpu(cpu)
940 cacheinfo_cpu_online(cpu);
941}
942
943#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
1/*
2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
4 *
5 * Copyright 2008 IBM Corporation
6 * Author: Nathan Lynch
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13#include <linux/cpu.h>
14#include <linux/cpumask.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/kobject.h>
18#include <linux/list.h>
19#include <linux/notifier.h>
20#include <linux/of.h>
21#include <linux/percpu.h>
22#include <linux/slab.h>
23#include <asm/prom.h>
24
25#include "cacheinfo.h"
26
27/* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
30 */
31struct cache_dir {
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
33 * directory */
34 struct cache_index_dir *index; /* list of index objects */
35};
36
37/* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
40 */
41struct cache_index_dir {
42 struct kobject kobj;
43 struct cache_index_dir *next; /* next index in parent directory */
44 struct cache *cache;
45};
46
47/* Template for determining which OF properties to query for a given
48 * cache type */
49struct cache_type_info {
50 const char *name;
51 const char *size_prop;
52
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
59
60 const char *line_size_props[2];
61 const char *nr_sets_prop;
62};
63
64/* These are used to index the cache_type_info array. */
65#define CACHE_TYPE_UNIFIED 0
66#define CACHE_TYPE_INSTRUCTION 1
67#define CACHE_TYPE_DATA 2
68
69static const struct cache_type_info cache_type_info[] = {
70 {
71 /* PowerPC Processor binding says the [di]-cache-*
72 * must be equal on unified caches, so just use
73 * d-cache properties. */
74 .name = "Unified",
75 .size_prop = "d-cache-size",
76 .line_size_props = { "d-cache-line-size",
77 "d-cache-block-size", },
78 .nr_sets_prop = "d-cache-sets",
79 },
80 {
81 .name = "Instruction",
82 .size_prop = "i-cache-size",
83 .line_size_props = { "i-cache-line-size",
84 "i-cache-block-size", },
85 .nr_sets_prop = "i-cache-sets",
86 },
87 {
88 .name = "Data",
89 .size_prop = "d-cache-size",
90 .line_size_props = { "d-cache-line-size",
91 "d-cache-block-size", },
92 .nr_sets_prop = "d-cache-sets",
93 },
94};
95
96/* Cache object: each instance of this corresponds to a distinct cache
97 * in the system. There are separate objects for Harvard caches: one
98 * each for instruction and data, and each refers to the same OF node.
99 * The refcount of the OF node is elevated for the lifetime of the
100 * cache object. A cache object is released when its shared_cpu_map
101 * is cleared (see cache_cpu_clear).
102 *
103 * A cache object is on two lists: an unsorted global list
104 * (cache_list) of cache objects; and a singly-linked list
105 * representing the local cache hierarchy, which is ordered by level
106 * (e.g. L1d -> L1i -> L2 -> L3).
107 */
108struct cache {
109 struct device_node *ofnode; /* OF node for this cache, may be cpu */
110 struct cpumask shared_cpu_map; /* online CPUs using this cache */
111 int type; /* split cache disambiguation */
112 int level; /* level not explicit in device tree */
113 struct list_head list; /* global list of cache objects */
114 struct cache *next_local; /* next cache of >= level */
115};
116
117static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
118
119/* traversal/modification of this list occurs only at cpu hotplug time;
120 * access is serialized by cpu hotplug locking
121 */
122static LIST_HEAD(cache_list);
123
124static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
125{
126 return container_of(k, struct cache_index_dir, kobj);
127}
128
129static const char *cache_type_string(const struct cache *cache)
130{
131 return cache_type_info[cache->type].name;
132}
133
134static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
135{
136 cache->type = type;
137 cache->level = level;
138 cache->ofnode = of_node_get(ofnode);
139 INIT_LIST_HEAD(&cache->list);
140 list_add(&cache->list, &cache_list);
141}
142
143static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
144{
145 struct cache *cache;
146
147 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
148 if (cache)
149 cache_init(cache, type, level, ofnode);
150
151 return cache;
152}
153
154static void release_cache_debugcheck(struct cache *cache)
155{
156 struct cache *iter;
157
158 list_for_each_entry(iter, &cache_list, list)
159 WARN_ONCE(iter->next_local == cache,
160 "cache for %s(%s) refers to cache for %s(%s)\n",
161 iter->ofnode->full_name,
162 cache_type_string(iter),
163 cache->ofnode->full_name,
164 cache_type_string(cache));
165}
166
167static void release_cache(struct cache *cache)
168{
169 if (!cache)
170 return;
171
172 pr_debug("freeing L%d %s cache for %s\n", cache->level,
173 cache_type_string(cache), cache->ofnode->full_name);
174
175 release_cache_debugcheck(cache);
176 list_del(&cache->list);
177 of_node_put(cache->ofnode);
178 kfree(cache);
179}
180
181static void cache_cpu_set(struct cache *cache, int cpu)
182{
183 struct cache *next = cache;
184
185 while (next) {
186 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
187 "CPU %i already accounted in %s(%s)\n",
188 cpu, next->ofnode->full_name,
189 cache_type_string(next));
190 cpumask_set_cpu(cpu, &next->shared_cpu_map);
191 next = next->next_local;
192 }
193}
194
195static int cache_size(const struct cache *cache, unsigned int *ret)
196{
197 const char *propname;
198 const u32 *cache_size;
199
200 propname = cache_type_info[cache->type].size_prop;
201
202 cache_size = of_get_property(cache->ofnode, propname, NULL);
203 if (!cache_size)
204 return -ENODEV;
205
206 *ret = *cache_size;
207 return 0;
208}
209
210static int cache_size_kb(const struct cache *cache, unsigned int *ret)
211{
212 unsigned int size;
213
214 if (cache_size(cache, &size))
215 return -ENODEV;
216
217 *ret = size / 1024;
218 return 0;
219}
220
221/* not cache_line_size() because that's a macro in include/linux/cache.h */
222static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
223{
224 const u32 *line_size;
225 int i, lim;
226
227 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
228
229 for (i = 0; i < lim; i++) {
230 const char *propname;
231
232 propname = cache_type_info[cache->type].line_size_props[i];
233 line_size = of_get_property(cache->ofnode, propname, NULL);
234 if (line_size)
235 break;
236 }
237
238 if (!line_size)
239 return -ENODEV;
240
241 *ret = *line_size;
242 return 0;
243}
244
245static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
246{
247 const char *propname;
248 const u32 *nr_sets;
249
250 propname = cache_type_info[cache->type].nr_sets_prop;
251
252 nr_sets = of_get_property(cache->ofnode, propname, NULL);
253 if (!nr_sets)
254 return -ENODEV;
255
256 *ret = *nr_sets;
257 return 0;
258}
259
260static int cache_associativity(const struct cache *cache, unsigned int *ret)
261{
262 unsigned int line_size;
263 unsigned int nr_sets;
264 unsigned int size;
265
266 if (cache_nr_sets(cache, &nr_sets))
267 goto err;
268
269 /* If the cache is fully associative, there is no need to
270 * check the other properties.
271 */
272 if (nr_sets == 1) {
273 *ret = 0;
274 return 0;
275 }
276
277 if (cache_get_line_size(cache, &line_size))
278 goto err;
279 if (cache_size(cache, &size))
280 goto err;
281
282 if (!(nr_sets > 0 && size > 0 && line_size > 0))
283 goto err;
284
285 *ret = (size / nr_sets) / line_size;
286 return 0;
287err:
288 return -ENODEV;
289}
290
291/* helper for dealing with split caches */
292static struct cache *cache_find_first_sibling(struct cache *cache)
293{
294 struct cache *iter;
295
296 if (cache->type == CACHE_TYPE_UNIFIED)
297 return cache;
298
299 list_for_each_entry(iter, &cache_list, list)
300 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
301 return iter;
302
303 return cache;
304}
305
306/* return the first cache on a local list matching node */
307static struct cache *cache_lookup_by_node(const struct device_node *node)
308{
309 struct cache *cache = NULL;
310 struct cache *iter;
311
312 list_for_each_entry(iter, &cache_list, list) {
313 if (iter->ofnode != node)
314 continue;
315 cache = cache_find_first_sibling(iter);
316 break;
317 }
318
319 return cache;
320}
321
322static bool cache_node_is_unified(const struct device_node *np)
323{
324 return of_get_property(np, "cache-unified", NULL);
325}
326
327static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
328{
329 struct cache *cache;
330
331 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
332
333 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
334
335 return cache;
336}
337
338static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
339{
340 struct cache *dcache, *icache;
341
342 pr_debug("creating L%d dcache and icache for %s\n", level,
343 node->full_name);
344
345 dcache = new_cache(CACHE_TYPE_DATA, level, node);
346 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
347
348 if (!dcache || !icache)
349 goto err;
350
351 dcache->next_local = icache;
352
353 return dcache;
354err:
355 release_cache(dcache);
356 release_cache(icache);
357 return NULL;
358}
359
360static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
361{
362 struct cache *cache;
363
364 if (cache_node_is_unified(node))
365 cache = cache_do_one_devnode_unified(node, level);
366 else
367 cache = cache_do_one_devnode_split(node, level);
368
369 return cache;
370}
371
372static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
373{
374 struct cache *cache;
375
376 cache = cache_lookup_by_node(node);
377
378 WARN_ONCE(cache && cache->level != level,
379 "cache level mismatch on lookup (got %d, expected %d)\n",
380 cache->level, level);
381
382 if (!cache)
383 cache = cache_do_one_devnode(node, level);
384
385 return cache;
386}
387
388static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
389{
390 while (smaller->next_local) {
391 if (smaller->next_local == bigger)
392 return; /* already linked */
393 smaller = smaller->next_local;
394 }
395
396 smaller->next_local = bigger;
397}
398
399static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
400{
401 WARN_ON_ONCE(cache->level != 1);
402 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
403}
404
405static void __cpuinit do_subsidiary_caches(struct cache *cache)
406{
407 struct device_node *subcache_node;
408 int level = cache->level;
409
410 do_subsidiary_caches_debugcheck(cache);
411
412 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
413 struct cache *subcache;
414
415 level++;
416 subcache = cache_lookup_or_instantiate(subcache_node, level);
417 of_node_put(subcache_node);
418 if (!subcache)
419 break;
420
421 link_cache_lists(cache, subcache);
422 cache = subcache;
423 }
424}
425
426static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
427{
428 struct device_node *cpu_node;
429 struct cache *cpu_cache = NULL;
430
431 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
432
433 cpu_node = of_get_cpu_node(cpu_id, NULL);
434 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
435 if (!cpu_node)
436 goto out;
437
438 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
439 if (!cpu_cache)
440 goto out;
441
442 do_subsidiary_caches(cpu_cache);
443
444 cache_cpu_set(cpu_cache, cpu_id);
445out:
446 of_node_put(cpu_node);
447
448 return cpu_cache;
449}
450
451static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
452{
453 struct cache_dir *cache_dir;
454 struct device *dev;
455 struct kobject *kobj = NULL;
456
457 dev = get_cpu_device(cpu_id);
458 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
459 if (!dev)
460 goto err;
461
462 kobj = kobject_create_and_add("cache", &dev->kobj);
463 if (!kobj)
464 goto err;
465
466 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
467 if (!cache_dir)
468 goto err;
469
470 cache_dir->kobj = kobj;
471
472 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
473
474 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
475
476 return cache_dir;
477err:
478 kobject_put(kobj);
479 return NULL;
480}
481
482static void cache_index_release(struct kobject *kobj)
483{
484 struct cache_index_dir *index;
485
486 index = kobj_to_cache_index_dir(kobj);
487
488 pr_debug("freeing index directory for L%d %s cache\n",
489 index->cache->level, cache_type_string(index->cache));
490
491 kfree(index);
492}
493
494static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
495{
496 struct kobj_attribute *kobj_attr;
497
498 kobj_attr = container_of(attr, struct kobj_attribute, attr);
499
500 return kobj_attr->show(k, kobj_attr, buf);
501}
502
503static struct cache *index_kobj_to_cache(struct kobject *k)
504{
505 struct cache_index_dir *index;
506
507 index = kobj_to_cache_index_dir(k);
508
509 return index->cache;
510}
511
512static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
513{
514 unsigned int size_kb;
515 struct cache *cache;
516
517 cache = index_kobj_to_cache(k);
518
519 if (cache_size_kb(cache, &size_kb))
520 return -ENODEV;
521
522 return sprintf(buf, "%uK\n", size_kb);
523}
524
525static struct kobj_attribute cache_size_attr =
526 __ATTR(size, 0444, size_show, NULL);
527
528
529static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
530{
531 unsigned int line_size;
532 struct cache *cache;
533
534 cache = index_kobj_to_cache(k);
535
536 if (cache_get_line_size(cache, &line_size))
537 return -ENODEV;
538
539 return sprintf(buf, "%u\n", line_size);
540}
541
542static struct kobj_attribute cache_line_size_attr =
543 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
544
545static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
546{
547 unsigned int nr_sets;
548 struct cache *cache;
549
550 cache = index_kobj_to_cache(k);
551
552 if (cache_nr_sets(cache, &nr_sets))
553 return -ENODEV;
554
555 return sprintf(buf, "%u\n", nr_sets);
556}
557
558static struct kobj_attribute cache_nr_sets_attr =
559 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
560
561static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
562{
563 unsigned int associativity;
564 struct cache *cache;
565
566 cache = index_kobj_to_cache(k);
567
568 if (cache_associativity(cache, &associativity))
569 return -ENODEV;
570
571 return sprintf(buf, "%u\n", associativity);
572}
573
574static struct kobj_attribute cache_assoc_attr =
575 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
576
577static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
578{
579 struct cache *cache;
580
581 cache = index_kobj_to_cache(k);
582
583 return sprintf(buf, "%s\n", cache_type_string(cache));
584}
585
586static struct kobj_attribute cache_type_attr =
587 __ATTR(type, 0444, type_show, NULL);
588
589static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
590{
591 struct cache_index_dir *index;
592 struct cache *cache;
593
594 index = kobj_to_cache_index_dir(k);
595 cache = index->cache;
596
597 return sprintf(buf, "%d\n", cache->level);
598}
599
600static struct kobj_attribute cache_level_attr =
601 __ATTR(level, 0444, level_show, NULL);
602
603static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
604{
605 struct cache_index_dir *index;
606 struct cache *cache;
607 int len;
608 int n = 0;
609
610 index = kobj_to_cache_index_dir(k);
611 cache = index->cache;
612 len = PAGE_SIZE - 2;
613
614 if (len > 1) {
615 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
616 buf[n++] = '\n';
617 buf[n] = '\0';
618 }
619 return n;
620}
621
622static struct kobj_attribute cache_shared_cpu_map_attr =
623 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
624
625/* Attributes which should always be created -- the kobject/sysfs core
626 * does this automatically via kobj_type->default_attrs. This is the
627 * minimum data required to uniquely identify a cache.
628 */
629static struct attribute *cache_index_default_attrs[] = {
630 &cache_type_attr.attr,
631 &cache_level_attr.attr,
632 &cache_shared_cpu_map_attr.attr,
633 NULL,
634};
635
636/* Attributes which should be created if the cache device node has the
637 * right properties -- see cacheinfo_create_index_opt_attrs
638 */
639static struct kobj_attribute *cache_index_opt_attrs[] = {
640 &cache_size_attr,
641 &cache_line_size_attr,
642 &cache_nr_sets_attr,
643 &cache_assoc_attr,
644};
645
646static const struct sysfs_ops cache_index_ops = {
647 .show = cache_index_show,
648};
649
650static struct kobj_type cache_index_type = {
651 .release = cache_index_release,
652 .sysfs_ops = &cache_index_ops,
653 .default_attrs = cache_index_default_attrs,
654};
655
656static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
657{
658 const char *cache_name;
659 const char *cache_type;
660 struct cache *cache;
661 char *buf;
662 int i;
663
664 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
665 if (!buf)
666 return;
667
668 cache = dir->cache;
669 cache_name = cache->ofnode->full_name;
670 cache_type = cache_type_string(cache);
671
672 /* We don't want to create an attribute that can't provide a
673 * meaningful value. Check the return value of each optional
674 * attribute's ->show method before registering the
675 * attribute.
676 */
677 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
678 struct kobj_attribute *attr;
679 ssize_t rc;
680
681 attr = cache_index_opt_attrs[i];
682
683 rc = attr->show(&dir->kobj, attr, buf);
684 if (rc <= 0) {
685 pr_debug("not creating %s attribute for "
686 "%s(%s) (rc = %zd)\n",
687 attr->attr.name, cache_name,
688 cache_type, rc);
689 continue;
690 }
691 if (sysfs_create_file(&dir->kobj, &attr->attr))
692 pr_debug("could not create %s attribute for %s(%s)\n",
693 attr->attr.name, cache_name, cache_type);
694 }
695
696 kfree(buf);
697}
698
699static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
700{
701 struct cache_index_dir *index_dir;
702 int rc;
703
704 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
705 if (!index_dir)
706 goto err;
707
708 index_dir->cache = cache;
709
710 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
711 cache_dir->kobj, "index%d", index);
712 if (rc)
713 goto err;
714
715 index_dir->next = cache_dir->index;
716 cache_dir->index = index_dir;
717
718 cacheinfo_create_index_opt_attrs(index_dir);
719
720 return;
721err:
722 kfree(index_dir);
723}
724
725static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
726{
727 struct cache_dir *cache_dir;
728 struct cache *cache;
729 int index = 0;
730
731 cache_dir = cacheinfo_create_cache_dir(cpu_id);
732 if (!cache_dir)
733 return;
734
735 cache = cache_list;
736 while (cache) {
737 cacheinfo_create_index_dir(cache, index, cache_dir);
738 index++;
739 cache = cache->next_local;
740 }
741}
742
743void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
744{
745 struct cache *cache;
746
747 cache = cache_chain_instantiate(cpu_id);
748 if (!cache)
749 return;
750
751 cacheinfo_sysfs_populate(cpu_id, cache);
752}
753
754#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
755
756static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
757{
758 struct device_node *cpu_node;
759 struct cache *cache;
760
761 cpu_node = of_get_cpu_node(cpu_id, NULL);
762 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
763 if (!cpu_node)
764 return NULL;
765
766 cache = cache_lookup_by_node(cpu_node);
767 of_node_put(cpu_node);
768
769 return cache;
770}
771
772static void remove_index_dirs(struct cache_dir *cache_dir)
773{
774 struct cache_index_dir *index;
775
776 index = cache_dir->index;
777
778 while (index) {
779 struct cache_index_dir *next;
780
781 next = index->next;
782 kobject_put(&index->kobj);
783 index = next;
784 }
785}
786
787static void remove_cache_dir(struct cache_dir *cache_dir)
788{
789 remove_index_dirs(cache_dir);
790
791 kobject_put(cache_dir->kobj);
792
793 kfree(cache_dir);
794}
795
796static void cache_cpu_clear(struct cache *cache, int cpu)
797{
798 while (cache) {
799 struct cache *next = cache->next_local;
800
801 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
802 "CPU %i not accounted in %s(%s)\n",
803 cpu, cache->ofnode->full_name,
804 cache_type_string(cache));
805
806 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
807
808 /* Release the cache object if all the cpus using it
809 * are offline */
810 if (cpumask_empty(&cache->shared_cpu_map))
811 release_cache(cache);
812
813 cache = next;
814 }
815}
816
817void cacheinfo_cpu_offline(unsigned int cpu_id)
818{
819 struct cache_dir *cache_dir;
820 struct cache *cache;
821
822 /* Prevent userspace from seeing inconsistent state - remove
823 * the sysfs hierarchy first */
824 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
825
826 /* careful, sysfs population may have failed */
827 if (cache_dir)
828 remove_cache_dir(cache_dir);
829
830 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
831
832 /* clear the CPU's bit in its cache chain, possibly freeing
833 * cache objects */
834 cache = cache_lookup_by_cpu(cpu_id);
835 if (cache)
836 cache_cpu_clear(cache, cpu_id);
837}
838#endif /* CONFIG_HOTPLUG_CPU */