Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Processor cache information made available to userspace via sysfs;
  4 * intended to be compatible with x86 intel_cacheinfo implementation.
  5 *
  6 * Copyright 2008 IBM Corporation
  7 * Author: Nathan Lynch
  8 */
  9
 10#define pr_fmt(fmt) "cacheinfo: " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/cpumask.h>
 14#include <linux/kernel.h>
 15#include <linux/kobject.h>
 16#include <linux/list.h>
 17#include <linux/notifier.h>
 18#include <linux/of.h>
 19#include <linux/percpu.h>
 20#include <linux/slab.h>
 21#include <asm/prom.h>
 22#include <asm/cputhreads.h>
 23#include <asm/smp.h>
 24
 25#include "cacheinfo.h"
 26
 27/* per-cpu object for tracking:
 28 * - a "cache" kobject for the top-level directory
 29 * - a list of "index" objects representing the cpu's local cache hierarchy
 30 */
 31struct cache_dir {
 32	struct kobject *kobj; /* bare (not embedded) kobject for cache
 33			       * directory */
 34	struct cache_index_dir *index; /* list of index objects */
 35};
 36
 37/* "index" object: each cpu's cache directory has an index
 38 * subdirectory corresponding to a cache object associated with the
 39 * cpu.  This object's lifetime is managed via the embedded kobject.
 40 */
 41struct cache_index_dir {
 42	struct kobject kobj;
 43	struct cache_index_dir *next; /* next index in parent directory */
 44	struct cache *cache;
 45};
 46
 47/* Template for determining which OF properties to query for a given
 48 * cache type */
 49struct cache_type_info {
 50	const char *name;
 51	const char *size_prop;
 52
 53	/* Allow for both [di]-cache-line-size and
 54	 * [di]-cache-block-size properties.  According to the PowerPC
 55	 * Processor binding, -line-size should be provided if it
 56	 * differs from the cache block size (that which is operated
 57	 * on by cache instructions), so we look for -line-size first.
 58	 * See cache_get_line_size(). */
 59
 60	const char *line_size_props[2];
 61	const char *nr_sets_prop;
 62};
 63
 64/* These are used to index the cache_type_info array. */
 65#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
 66#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
 67#define CACHE_TYPE_INSTRUCTION 2
 68#define CACHE_TYPE_DATA        3
 69
 70static const struct cache_type_info cache_type_info[] = {
 71	{
 72		/* Embedded systems that use cache-size, cache-block-size,
 73		 * etc. for the Unified (typically L2) cache. */
 74		.name            = "Unified",
 75		.size_prop       = "cache-size",
 76		.line_size_props = { "cache-line-size",
 77				     "cache-block-size", },
 78		.nr_sets_prop    = "cache-sets",
 79	},
 80	{
 81		/* PowerPC Processor binding says the [di]-cache-*
 82		 * must be equal on unified caches, so just use
 83		 * d-cache properties. */
 84		.name            = "Unified",
 85		.size_prop       = "d-cache-size",
 86		.line_size_props = { "d-cache-line-size",
 87				     "d-cache-block-size", },
 88		.nr_sets_prop    = "d-cache-sets",
 89	},
 90	{
 91		.name            = "Instruction",
 92		.size_prop       = "i-cache-size",
 93		.line_size_props = { "i-cache-line-size",
 94				     "i-cache-block-size", },
 95		.nr_sets_prop    = "i-cache-sets",
 96	},
 97	{
 98		.name            = "Data",
 99		.size_prop       = "d-cache-size",
100		.line_size_props = { "d-cache-line-size",
101				     "d-cache-block-size", },
102		.nr_sets_prop    = "d-cache-sets",
103	},
104};
105
106/* Cache object: each instance of this corresponds to a distinct cache
107 * in the system.  There are separate objects for Harvard caches: one
108 * each for instruction and data, and each refers to the same OF node.
109 * The refcount of the OF node is elevated for the lifetime of the
110 * cache object.  A cache object is released when its shared_cpu_map
111 * is cleared (see cache_cpu_clear).
112 *
113 * A cache object is on two lists: an unsorted global list
114 * (cache_list) of cache objects; and a singly-linked list
115 * representing the local cache hierarchy, which is ordered by level
116 * (e.g. L1d -> L1i -> L2 -> L3).
117 */
118struct cache {
119	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
120	struct cpumask shared_cpu_map; /* online CPUs using this cache */
121	int type;                      /* split cache disambiguation */
122	int level;                     /* level not explicit in device tree */
123	struct list_head list;         /* global list of cache objects */
124	struct cache *next_local;      /* next cache of >= level */
125};
126
127static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
128
129/* traversal/modification of this list occurs only at cpu hotplug time;
130 * access is serialized by cpu hotplug locking
131 */
132static LIST_HEAD(cache_list);
133
134static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
135{
136	return container_of(k, struct cache_index_dir, kobj);
137}
138
139static const char *cache_type_string(const struct cache *cache)
140{
141	return cache_type_info[cache->type].name;
142}
143
144static void cache_init(struct cache *cache, int type, int level,
145		       struct device_node *ofnode)
146{
147	cache->type = type;
148	cache->level = level;
149	cache->ofnode = of_node_get(ofnode);
150	INIT_LIST_HEAD(&cache->list);
151	list_add(&cache->list, &cache_list);
152}
153
154static struct cache *new_cache(int type, int level, struct device_node *ofnode)
155{
156	struct cache *cache;
157
158	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
159	if (cache)
160		cache_init(cache, type, level, ofnode);
161
162	return cache;
163}
164
165static void release_cache_debugcheck(struct cache *cache)
166{
167	struct cache *iter;
168
169	list_for_each_entry(iter, &cache_list, list)
170		WARN_ONCE(iter->next_local == cache,
171			  "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
172			  iter->ofnode,
173			  cache_type_string(iter),
174			  cache->ofnode,
175			  cache_type_string(cache));
176}
177
178static void release_cache(struct cache *cache)
179{
180	if (!cache)
181		return;
182
183	pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
184		 cache_type_string(cache), cache->ofnode);
185
186	release_cache_debugcheck(cache);
187	list_del(&cache->list);
188	of_node_put(cache->ofnode);
189	kfree(cache);
190}
191
192static void cache_cpu_set(struct cache *cache, int cpu)
193{
194	struct cache *next = cache;
195
196	while (next) {
197		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
198			  "CPU %i already accounted in %pOFP(%s)\n",
199			  cpu, next->ofnode,
200			  cache_type_string(next));
201		cpumask_set_cpu(cpu, &next->shared_cpu_map);
202		next = next->next_local;
203	}
204}
205
206static int cache_size(const struct cache *cache, unsigned int *ret)
207{
208	const char *propname;
209	const __be32 *cache_size;
210
211	propname = cache_type_info[cache->type].size_prop;
212
213	cache_size = of_get_property(cache->ofnode, propname, NULL);
214	if (!cache_size)
215		return -ENODEV;
216
217	*ret = of_read_number(cache_size, 1);
218	return 0;
219}
220
221static int cache_size_kb(const struct cache *cache, unsigned int *ret)
222{
223	unsigned int size;
224
225	if (cache_size(cache, &size))
226		return -ENODEV;
227
228	*ret = size / 1024;
229	return 0;
230}
231
232/* not cache_line_size() because that's a macro in include/linux/cache.h */
233static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
234{
235	const __be32 *line_size;
236	int i, lim;
237
238	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
239
240	for (i = 0; i < lim; i++) {
241		const char *propname;
242
243		propname = cache_type_info[cache->type].line_size_props[i];
244		line_size = of_get_property(cache->ofnode, propname, NULL);
245		if (line_size)
246			break;
247	}
248
249	if (!line_size)
250		return -ENODEV;
251
252	*ret = of_read_number(line_size, 1);
253	return 0;
254}
255
256static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
257{
258	const char *propname;
259	const __be32 *nr_sets;
260
261	propname = cache_type_info[cache->type].nr_sets_prop;
262
263	nr_sets = of_get_property(cache->ofnode, propname, NULL);
264	if (!nr_sets)
265		return -ENODEV;
266
267	*ret = of_read_number(nr_sets, 1);
268	return 0;
269}
270
271static int cache_associativity(const struct cache *cache, unsigned int *ret)
272{
273	unsigned int line_size;
274	unsigned int nr_sets;
275	unsigned int size;
276
277	if (cache_nr_sets(cache, &nr_sets))
278		goto err;
279
280	/* If the cache is fully associative, there is no need to
281	 * check the other properties.
282	 */
283	if (nr_sets == 1) {
284		*ret = 0;
285		return 0;
286	}
287
288	if (cache_get_line_size(cache, &line_size))
289		goto err;
290	if (cache_size(cache, &size))
291		goto err;
292
293	if (!(nr_sets > 0 && size > 0 && line_size > 0))
294		goto err;
295
296	*ret = (size / nr_sets) / line_size;
297	return 0;
298err:
299	return -ENODEV;
300}
301
302/* helper for dealing with split caches */
303static struct cache *cache_find_first_sibling(struct cache *cache)
304{
305	struct cache *iter;
306
307	if (cache->type == CACHE_TYPE_UNIFIED ||
308	    cache->type == CACHE_TYPE_UNIFIED_D)
309		return cache;
310
311	list_for_each_entry(iter, &cache_list, list)
312		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
313			return iter;
314
315	return cache;
316}
317
318/* return the first cache on a local list matching node */
319static struct cache *cache_lookup_by_node(const struct device_node *node)
320{
321	struct cache *cache = NULL;
322	struct cache *iter;
323
324	list_for_each_entry(iter, &cache_list, list) {
325		if (iter->ofnode != node)
326			continue;
327		cache = cache_find_first_sibling(iter);
328		break;
329	}
330
331	return cache;
332}
333
334static bool cache_node_is_unified(const struct device_node *np)
335{
336	return of_get_property(np, "cache-unified", NULL);
337}
338
339/*
340 * Unified caches can have two different sets of tags.  Most embedded
341 * use cache-size, etc. for the unified cache size, but open firmware systems
342 * use d-cache-size, etc.   Check on initialization for which type we have, and
343 * return the appropriate structure type.  Assume it's embedded if it isn't
344 * open firmware.  If it's yet a 3rd type, then there will be missing entries
345 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
346 * to be extended further.
347 */
348static int cache_is_unified_d(const struct device_node *np)
349{
350	return of_get_property(np,
351		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
352		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
353}
354
355static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
356{
357	pr_debug("creating L%d ucache for %pOFP\n", level, node);
358
359	return new_cache(cache_is_unified_d(node), level, node);
360}
361
362static struct cache *cache_do_one_devnode_split(struct device_node *node,
363						int level)
364{
365	struct cache *dcache, *icache;
366
367	pr_debug("creating L%d dcache and icache for %pOFP\n", level,
368		 node);
369
370	dcache = new_cache(CACHE_TYPE_DATA, level, node);
371	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
372
373	if (!dcache || !icache)
374		goto err;
375
376	dcache->next_local = icache;
377
378	return dcache;
379err:
380	release_cache(dcache);
381	release_cache(icache);
382	return NULL;
383}
384
385static struct cache *cache_do_one_devnode(struct device_node *node, int level)
386{
387	struct cache *cache;
388
389	if (cache_node_is_unified(node))
390		cache = cache_do_one_devnode_unified(node, level);
391	else
392		cache = cache_do_one_devnode_split(node, level);
393
394	return cache;
395}
396
397static struct cache *cache_lookup_or_instantiate(struct device_node *node,
398						 int level)
399{
400	struct cache *cache;
401
402	cache = cache_lookup_by_node(node);
403
404	WARN_ONCE(cache && cache->level != level,
405		  "cache level mismatch on lookup (got %d, expected %d)\n",
406		  cache->level, level);
407
408	if (!cache)
409		cache = cache_do_one_devnode(node, level);
410
411	return cache;
412}
413
414static void link_cache_lists(struct cache *smaller, struct cache *bigger)
415{
416	while (smaller->next_local) {
417		if (smaller->next_local == bigger)
418			return; /* already linked */
419		smaller = smaller->next_local;
420	}
421
422	smaller->next_local = bigger;
423
424	/*
425	 * The cache->next_local list sorts by level ascending:
426	 * L1d -> L1i -> L2 -> L3 ...
427	 */
428	WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
429		  (smaller->level > 1 && bigger->level != smaller->level + 1),
430		  "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
431		  smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
432}
433
434static void do_subsidiary_caches_debugcheck(struct cache *cache)
435{
436	WARN_ONCE(cache->level != 1,
437		  "instantiating cache chain from L%d %s cache for "
438		  "%pOFP instead of an L1\n", cache->level,
439		  cache_type_string(cache), cache->ofnode);
440	WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
441		  "instantiating cache chain from node %pOFP of type '%s' "
442		  "instead of a cpu node\n", cache->ofnode,
443		  of_node_get_device_type(cache->ofnode));
444}
445
446static void do_subsidiary_caches(struct cache *cache)
447{
448	struct device_node *subcache_node;
449	int level = cache->level;
450
451	do_subsidiary_caches_debugcheck(cache);
452
453	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
454		struct cache *subcache;
455
456		level++;
457		subcache = cache_lookup_or_instantiate(subcache_node, level);
458		of_node_put(subcache_node);
459		if (!subcache)
460			break;
461
462		link_cache_lists(cache, subcache);
463		cache = subcache;
464	}
465}
466
467static struct cache *cache_chain_instantiate(unsigned int cpu_id)
468{
469	struct device_node *cpu_node;
470	struct cache *cpu_cache = NULL;
471
472	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
473
474	cpu_node = of_get_cpu_node(cpu_id, NULL);
475	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
476	if (!cpu_node)
477		goto out;
478
479	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
480	if (!cpu_cache)
481		goto out;
482
483	do_subsidiary_caches(cpu_cache);
484
485	cache_cpu_set(cpu_cache, cpu_id);
486out:
487	of_node_put(cpu_node);
488
489	return cpu_cache;
490}
491
492static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
493{
494	struct cache_dir *cache_dir;
495	struct device *dev;
496	struct kobject *kobj = NULL;
497
498	dev = get_cpu_device(cpu_id);
499	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
500	if (!dev)
501		goto err;
502
503	kobj = kobject_create_and_add("cache", &dev->kobj);
504	if (!kobj)
505		goto err;
506
507	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
508	if (!cache_dir)
509		goto err;
510
511	cache_dir->kobj = kobj;
512
513	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
514
515	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
516
517	return cache_dir;
518err:
519	kobject_put(kobj);
520	return NULL;
521}
522
523static void cache_index_release(struct kobject *kobj)
524{
525	struct cache_index_dir *index;
526
527	index = kobj_to_cache_index_dir(kobj);
528
529	pr_debug("freeing index directory for L%d %s cache\n",
530		 index->cache->level, cache_type_string(index->cache));
531
532	kfree(index);
533}
534
535static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
536{
537	struct kobj_attribute *kobj_attr;
538
539	kobj_attr = container_of(attr, struct kobj_attribute, attr);
540
541	return kobj_attr->show(k, kobj_attr, buf);
542}
543
544static struct cache *index_kobj_to_cache(struct kobject *k)
545{
546	struct cache_index_dir *index;
547
548	index = kobj_to_cache_index_dir(k);
549
550	return index->cache;
551}
552
553static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
554{
555	unsigned int size_kb;
556	struct cache *cache;
557
558	cache = index_kobj_to_cache(k);
559
560	if (cache_size_kb(cache, &size_kb))
561		return -ENODEV;
562
563	return sprintf(buf, "%uK\n", size_kb);
564}
565
566static struct kobj_attribute cache_size_attr =
567	__ATTR(size, 0444, size_show, NULL);
568
569
570static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
571{
572	unsigned int line_size;
573	struct cache *cache;
574
575	cache = index_kobj_to_cache(k);
576
577	if (cache_get_line_size(cache, &line_size))
578		return -ENODEV;
579
580	return sprintf(buf, "%u\n", line_size);
581}
582
583static struct kobj_attribute cache_line_size_attr =
584	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
585
586static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
587{
588	unsigned int nr_sets;
589	struct cache *cache;
590
591	cache = index_kobj_to_cache(k);
592
593	if (cache_nr_sets(cache, &nr_sets))
594		return -ENODEV;
595
596	return sprintf(buf, "%u\n", nr_sets);
597}
598
599static struct kobj_attribute cache_nr_sets_attr =
600	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
601
602static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603{
604	unsigned int associativity;
605	struct cache *cache;
606
607	cache = index_kobj_to_cache(k);
608
609	if (cache_associativity(cache, &associativity))
610		return -ENODEV;
611
612	return sprintf(buf, "%u\n", associativity);
613}
614
615static struct kobj_attribute cache_assoc_attr =
616	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
617
618static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
619{
620	struct cache *cache;
621
622	cache = index_kobj_to_cache(k);
623
624	return sprintf(buf, "%s\n", cache_type_string(cache));
625}
626
627static struct kobj_attribute cache_type_attr =
628	__ATTR(type, 0444, type_show, NULL);
629
630static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
631{
632	struct cache_index_dir *index;
633	struct cache *cache;
634
635	index = kobj_to_cache_index_dir(k);
636	cache = index->cache;
637
638	return sprintf(buf, "%d\n", cache->level);
639}
640
641static struct kobj_attribute cache_level_attr =
642	__ATTR(level, 0444, level_show, NULL);
643
644static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
645{
646	struct kobject *index_dir_kobj = &index->kobj;
647	struct kobject *cache_dir_kobj = index_dir_kobj->parent;
648	struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
649	struct device *dev = kobj_to_dev(cpu_dev_kobj);
650
651	return dev->id;
652}
653
654/*
655 * On big-core systems, each core has two groups of CPUs each of which
656 * has its own L1-cache. The thread-siblings which share l1-cache with
657 * @cpu can be obtained via cpu_smallcore_mask().
658 */
659static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
660{
661	if (cache->level == 1)
662		return cpu_smallcore_mask(cpu);
663
664	return &cache->shared_cpu_map;
665}
666
667static ssize_t
668show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
669{
670	struct cache_index_dir *index;
671	struct cache *cache;
672	const struct cpumask *mask;
673	int cpu;
674
675	index = kobj_to_cache_index_dir(k);
676	cache = index->cache;
677
678	if (has_big_cores) {
679		cpu = index_dir_to_cpu(index);
680		mask = get_big_core_shared_cpu_map(cpu, cache);
681	} else {
682		mask  = &cache->shared_cpu_map;
683	}
684
685	return cpumap_print_to_pagebuf(list, buf, mask);
686}
687
688static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
689{
690	return show_shared_cpumap(k, attr, buf, false);
691}
692
693static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
694{
695	return show_shared_cpumap(k, attr, buf, true);
696}
697
698static struct kobj_attribute cache_shared_cpu_map_attr =
699	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
700
701static struct kobj_attribute cache_shared_cpu_list_attr =
702	__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
703
704/* Attributes which should always be created -- the kobject/sysfs core
705 * does this automatically via kobj_type->default_attrs.  This is the
706 * minimum data required to uniquely identify a cache.
707 */
708static struct attribute *cache_index_default_attrs[] = {
709	&cache_type_attr.attr,
710	&cache_level_attr.attr,
711	&cache_shared_cpu_map_attr.attr,
712	&cache_shared_cpu_list_attr.attr,
713	NULL,
714};
715
716/* Attributes which should be created if the cache device node has the
717 * right properties -- see cacheinfo_create_index_opt_attrs
718 */
719static struct kobj_attribute *cache_index_opt_attrs[] = {
720	&cache_size_attr,
721	&cache_line_size_attr,
722	&cache_nr_sets_attr,
723	&cache_assoc_attr,
724};
725
726static const struct sysfs_ops cache_index_ops = {
727	.show = cache_index_show,
728};
729
730static struct kobj_type cache_index_type = {
731	.release = cache_index_release,
732	.sysfs_ops = &cache_index_ops,
733	.default_attrs = cache_index_default_attrs,
734};
735
736static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
737{
738	const char *cache_type;
739	struct cache *cache;
740	char *buf;
741	int i;
742
743	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
744	if (!buf)
745		return;
746
747	cache = dir->cache;
748	cache_type = cache_type_string(cache);
749
750	/* We don't want to create an attribute that can't provide a
751	 * meaningful value.  Check the return value of each optional
752	 * attribute's ->show method before registering the
753	 * attribute.
754	 */
755	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
756		struct kobj_attribute *attr;
757		ssize_t rc;
758
759		attr = cache_index_opt_attrs[i];
760
761		rc = attr->show(&dir->kobj, attr, buf);
762		if (rc <= 0) {
763			pr_debug("not creating %s attribute for "
764				 "%pOFP(%s) (rc = %zd)\n",
765				 attr->attr.name, cache->ofnode,
766				 cache_type, rc);
767			continue;
768		}
769		if (sysfs_create_file(&dir->kobj, &attr->attr))
770			pr_debug("could not create %s attribute for %pOFP(%s)\n",
771				 attr->attr.name, cache->ofnode, cache_type);
772	}
773
774	kfree(buf);
775}
776
777static void cacheinfo_create_index_dir(struct cache *cache, int index,
778				       struct cache_dir *cache_dir)
779{
780	struct cache_index_dir *index_dir;
781	int rc;
782
783	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
784	if (!index_dir)
785		return;
786
787	index_dir->cache = cache;
788
789	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
790				  cache_dir->kobj, "index%d", index);
791	if (rc) {
792		kobject_put(&index_dir->kobj);
793		return;
794	}
795
796	index_dir->next = cache_dir->index;
797	cache_dir->index = index_dir;
798
799	cacheinfo_create_index_opt_attrs(index_dir);
800}
801
802static void cacheinfo_sysfs_populate(unsigned int cpu_id,
803				     struct cache *cache_list)
804{
805	struct cache_dir *cache_dir;
806	struct cache *cache;
807	int index = 0;
808
809	cache_dir = cacheinfo_create_cache_dir(cpu_id);
810	if (!cache_dir)
811		return;
812
813	cache = cache_list;
814	while (cache) {
815		cacheinfo_create_index_dir(cache, index, cache_dir);
816		index++;
817		cache = cache->next_local;
818	}
819}
820
821void cacheinfo_cpu_online(unsigned int cpu_id)
822{
823	struct cache *cache;
824
825	cache = cache_chain_instantiate(cpu_id);
826	if (!cache)
827		return;
828
829	cacheinfo_sysfs_populate(cpu_id, cache);
830}
831
832/* functions needed to remove cache entry for cpu offline or suspend/resume */
833
834#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
835    defined(CONFIG_HOTPLUG_CPU)
836
837static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
838{
839	struct device_node *cpu_node;
840	struct cache *cache;
841
842	cpu_node = of_get_cpu_node(cpu_id, NULL);
843	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
844	if (!cpu_node)
845		return NULL;
846
847	cache = cache_lookup_by_node(cpu_node);
848	of_node_put(cpu_node);
849
850	return cache;
851}
852
853static void remove_index_dirs(struct cache_dir *cache_dir)
854{
855	struct cache_index_dir *index;
856
857	index = cache_dir->index;
858
859	while (index) {
860		struct cache_index_dir *next;
861
862		next = index->next;
863		kobject_put(&index->kobj);
864		index = next;
865	}
866}
867
868static void remove_cache_dir(struct cache_dir *cache_dir)
869{
870	remove_index_dirs(cache_dir);
871
872	/* Remove cache dir from sysfs */
873	kobject_del(cache_dir->kobj);
874
875	kobject_put(cache_dir->kobj);
876
877	kfree(cache_dir);
878}
879
880static void cache_cpu_clear(struct cache *cache, int cpu)
881{
882	while (cache) {
883		struct cache *next = cache->next_local;
884
885		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
886			  "CPU %i not accounted in %pOFP(%s)\n",
887			  cpu, cache->ofnode,
888			  cache_type_string(cache));
889
890		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
891
892		/* Release the cache object if all the cpus using it
893		 * are offline */
894		if (cpumask_empty(&cache->shared_cpu_map))
895			release_cache(cache);
896
897		cache = next;
898	}
899}
900
901void cacheinfo_cpu_offline(unsigned int cpu_id)
902{
903	struct cache_dir *cache_dir;
904	struct cache *cache;
905
906	/* Prevent userspace from seeing inconsistent state - remove
907	 * the sysfs hierarchy first */
908	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
909
910	/* careful, sysfs population may have failed */
911	if (cache_dir)
912		remove_cache_dir(cache_dir);
913
914	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
915
916	/* clear the CPU's bit in its cache chain, possibly freeing
917	 * cache objects */
918	cache = cache_lookup_by_cpu(cpu_id);
919	if (cache)
920		cache_cpu_clear(cache, cpu_id);
921}
922
923void cacheinfo_teardown(void)
924{
925	unsigned int cpu;
926
927	lockdep_assert_cpus_held();
928
929	for_each_online_cpu(cpu)
930		cacheinfo_cpu_offline(cpu);
931}
932
933void cacheinfo_rebuild(void)
934{
935	unsigned int cpu;
936
937	lockdep_assert_cpus_held();
938
939	for_each_online_cpu(cpu)
940		cacheinfo_cpu_online(cpu);
941}
942
943#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Processor cache information made available to userspace via sysfs;
  4 * intended to be compatible with x86 intel_cacheinfo implementation.
  5 *
  6 * Copyright 2008 IBM Corporation
  7 * Author: Nathan Lynch
  8 */
  9
 10#define pr_fmt(fmt) "cacheinfo: " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/cpumask.h>
 14#include <linux/kernel.h>
 15#include <linux/kobject.h>
 16#include <linux/list.h>
 17#include <linux/notifier.h>
 18#include <linux/of.h>
 19#include <linux/percpu.h>
 20#include <linux/slab.h>
 21#include <asm/prom.h>
 22#include <asm/cputhreads.h>
 23#include <asm/smp.h>
 24
 25#include "cacheinfo.h"
 26
 27/* per-cpu object for tracking:
 28 * - a "cache" kobject for the top-level directory
 29 * - a list of "index" objects representing the cpu's local cache hierarchy
 30 */
 31struct cache_dir {
 32	struct kobject *kobj; /* bare (not embedded) kobject for cache
 33			       * directory */
 34	struct cache_index_dir *index; /* list of index objects */
 35};
 36
 37/* "index" object: each cpu's cache directory has an index
 38 * subdirectory corresponding to a cache object associated with the
 39 * cpu.  This object's lifetime is managed via the embedded kobject.
 40 */
 41struct cache_index_dir {
 42	struct kobject kobj;
 43	struct cache_index_dir *next; /* next index in parent directory */
 44	struct cache *cache;
 45};
 46
 47/* Template for determining which OF properties to query for a given
 48 * cache type */
 49struct cache_type_info {
 50	const char *name;
 51	const char *size_prop;
 52
 53	/* Allow for both [di]-cache-line-size and
 54	 * [di]-cache-block-size properties.  According to the PowerPC
 55	 * Processor binding, -line-size should be provided if it
 56	 * differs from the cache block size (that which is operated
 57	 * on by cache instructions), so we look for -line-size first.
 58	 * See cache_get_line_size(). */
 59
 60	const char *line_size_props[2];
 61	const char *nr_sets_prop;
 62};
 63
 64/* These are used to index the cache_type_info array. */
 65#define CACHE_TYPE_UNIFIED     0 /* cache-size, cache-block-size, etc. */
 66#define CACHE_TYPE_UNIFIED_D   1 /* d-cache-size, d-cache-block-size, etc */
 67#define CACHE_TYPE_INSTRUCTION 2
 68#define CACHE_TYPE_DATA        3
 69
 70static const struct cache_type_info cache_type_info[] = {
 71	{
 72		/* Embedded systems that use cache-size, cache-block-size,
 73		 * etc. for the Unified (typically L2) cache. */
 74		.name            = "Unified",
 75		.size_prop       = "cache-size",
 76		.line_size_props = { "cache-line-size",
 77				     "cache-block-size", },
 78		.nr_sets_prop    = "cache-sets",
 79	},
 80	{
 81		/* PowerPC Processor binding says the [di]-cache-*
 82		 * must be equal on unified caches, so just use
 83		 * d-cache properties. */
 84		.name            = "Unified",
 85		.size_prop       = "d-cache-size",
 86		.line_size_props = { "d-cache-line-size",
 87				     "d-cache-block-size", },
 88		.nr_sets_prop    = "d-cache-sets",
 89	},
 90	{
 91		.name            = "Instruction",
 92		.size_prop       = "i-cache-size",
 93		.line_size_props = { "i-cache-line-size",
 94				     "i-cache-block-size", },
 95		.nr_sets_prop    = "i-cache-sets",
 96	},
 97	{
 98		.name            = "Data",
 99		.size_prop       = "d-cache-size",
100		.line_size_props = { "d-cache-line-size",
101				     "d-cache-block-size", },
102		.nr_sets_prop    = "d-cache-sets",
103	},
104};
105
106/* Cache object: each instance of this corresponds to a distinct cache
107 * in the system.  There are separate objects for Harvard caches: one
108 * each for instruction and data, and each refers to the same OF node.
109 * The refcount of the OF node is elevated for the lifetime of the
110 * cache object.  A cache object is released when its shared_cpu_map
111 * is cleared (see cache_cpu_clear).
112 *
113 * A cache object is on two lists: an unsorted global list
114 * (cache_list) of cache objects; and a singly-linked list
115 * representing the local cache hierarchy, which is ordered by level
116 * (e.g. L1d -> L1i -> L2 -> L3).
117 */
118struct cache {
119	struct device_node *ofnode;    /* OF node for this cache, may be cpu */
120	struct cpumask shared_cpu_map; /* online CPUs using this cache */
121	int type;                      /* split cache disambiguation */
122	int level;                     /* level not explicit in device tree */
123	struct list_head list;         /* global list of cache objects */
124	struct cache *next_local;      /* next cache of >= level */
125};
126
127static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
128
129/* traversal/modification of this list occurs only at cpu hotplug time;
130 * access is serialized by cpu hotplug locking
131 */
132static LIST_HEAD(cache_list);
133
134static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
135{
136	return container_of(k, struct cache_index_dir, kobj);
137}
138
139static const char *cache_type_string(const struct cache *cache)
140{
141	return cache_type_info[cache->type].name;
142}
143
144static void cache_init(struct cache *cache, int type, int level,
145		       struct device_node *ofnode)
146{
147	cache->type = type;
148	cache->level = level;
149	cache->ofnode = of_node_get(ofnode);
150	INIT_LIST_HEAD(&cache->list);
151	list_add(&cache->list, &cache_list);
152}
153
154static struct cache *new_cache(int type, int level, struct device_node *ofnode)
155{
156	struct cache *cache;
157
158	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
159	if (cache)
160		cache_init(cache, type, level, ofnode);
161
162	return cache;
163}
164
165static void release_cache_debugcheck(struct cache *cache)
166{
167	struct cache *iter;
168
169	list_for_each_entry(iter, &cache_list, list)
170		WARN_ONCE(iter->next_local == cache,
171			  "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
172			  iter->ofnode,
173			  cache_type_string(iter),
174			  cache->ofnode,
175			  cache_type_string(cache));
176}
177
178static void release_cache(struct cache *cache)
179{
180	if (!cache)
181		return;
182
183	pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
184		 cache_type_string(cache), cache->ofnode);
185
186	release_cache_debugcheck(cache);
187	list_del(&cache->list);
188	of_node_put(cache->ofnode);
189	kfree(cache);
190}
191
192static void cache_cpu_set(struct cache *cache, int cpu)
193{
194	struct cache *next = cache;
195
196	while (next) {
197		WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
198			  "CPU %i already accounted in %pOFP(%s)\n",
199			  cpu, next->ofnode,
200			  cache_type_string(next));
201		cpumask_set_cpu(cpu, &next->shared_cpu_map);
202		next = next->next_local;
203	}
204}
205
206static int cache_size(const struct cache *cache, unsigned int *ret)
207{
208	const char *propname;
209	const __be32 *cache_size;
210
211	propname = cache_type_info[cache->type].size_prop;
212
213	cache_size = of_get_property(cache->ofnode, propname, NULL);
214	if (!cache_size)
215		return -ENODEV;
216
217	*ret = of_read_number(cache_size, 1);
218	return 0;
219}
220
221static int cache_size_kb(const struct cache *cache, unsigned int *ret)
222{
223	unsigned int size;
224
225	if (cache_size(cache, &size))
226		return -ENODEV;
227
228	*ret = size / 1024;
229	return 0;
230}
231
232/* not cache_line_size() because that's a macro in include/linux/cache.h */
233static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
234{
235	const __be32 *line_size;
236	int i, lim;
237
238	lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
239
240	for (i = 0; i < lim; i++) {
241		const char *propname;
242
243		propname = cache_type_info[cache->type].line_size_props[i];
244		line_size = of_get_property(cache->ofnode, propname, NULL);
245		if (line_size)
246			break;
247	}
248
249	if (!line_size)
250		return -ENODEV;
251
252	*ret = of_read_number(line_size, 1);
253	return 0;
254}
255
256static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
257{
258	const char *propname;
259	const __be32 *nr_sets;
260
261	propname = cache_type_info[cache->type].nr_sets_prop;
262
263	nr_sets = of_get_property(cache->ofnode, propname, NULL);
264	if (!nr_sets)
265		return -ENODEV;
266
267	*ret = of_read_number(nr_sets, 1);
268	return 0;
269}
270
271static int cache_associativity(const struct cache *cache, unsigned int *ret)
272{
273	unsigned int line_size;
274	unsigned int nr_sets;
275	unsigned int size;
276
277	if (cache_nr_sets(cache, &nr_sets))
278		goto err;
279
280	/* If the cache is fully associative, there is no need to
281	 * check the other properties.
282	 */
283	if (nr_sets == 1) {
284		*ret = 0;
285		return 0;
286	}
287
288	if (cache_get_line_size(cache, &line_size))
289		goto err;
290	if (cache_size(cache, &size))
291		goto err;
292
293	if (!(nr_sets > 0 && size > 0 && line_size > 0))
294		goto err;
295
296	*ret = (size / nr_sets) / line_size;
297	return 0;
298err:
299	return -ENODEV;
300}
301
302/* helper for dealing with split caches */
303static struct cache *cache_find_first_sibling(struct cache *cache)
304{
305	struct cache *iter;
306
307	if (cache->type == CACHE_TYPE_UNIFIED ||
308	    cache->type == CACHE_TYPE_UNIFIED_D)
309		return cache;
310
311	list_for_each_entry(iter, &cache_list, list)
312		if (iter->ofnode == cache->ofnode && iter->next_local == cache)
313			return iter;
314
315	return cache;
316}
317
318/* return the first cache on a local list matching node */
319static struct cache *cache_lookup_by_node(const struct device_node *node)
320{
321	struct cache *cache = NULL;
322	struct cache *iter;
323
324	list_for_each_entry(iter, &cache_list, list) {
325		if (iter->ofnode != node)
326			continue;
327		cache = cache_find_first_sibling(iter);
328		break;
329	}
330
331	return cache;
332}
333
334static bool cache_node_is_unified(const struct device_node *np)
335{
336	return of_get_property(np, "cache-unified", NULL);
337}
338
339/*
340 * Unified caches can have two different sets of tags.  Most embedded
341 * use cache-size, etc. for the unified cache size, but open firmware systems
342 * use d-cache-size, etc.   Check on initialization for which type we have, and
343 * return the appropriate structure type.  Assume it's embedded if it isn't
344 * open firmware.  If it's yet a 3rd type, then there will be missing entries
345 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
346 * to be extended further.
347 */
348static int cache_is_unified_d(const struct device_node *np)
349{
350	return of_get_property(np,
351		cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
352		CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
353}
354
355static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
356{
357	pr_debug("creating L%d ucache for %pOFP\n", level, node);
358
359	return new_cache(cache_is_unified_d(node), level, node);
360}
361
362static struct cache *cache_do_one_devnode_split(struct device_node *node,
363						int level)
364{
365	struct cache *dcache, *icache;
366
367	pr_debug("creating L%d dcache and icache for %pOFP\n", level,
368		 node);
369
370	dcache = new_cache(CACHE_TYPE_DATA, level, node);
371	icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
372
373	if (!dcache || !icache)
374		goto err;
375
376	dcache->next_local = icache;
377
378	return dcache;
379err:
380	release_cache(dcache);
381	release_cache(icache);
382	return NULL;
383}
384
385static struct cache *cache_do_one_devnode(struct device_node *node, int level)
386{
387	struct cache *cache;
388
389	if (cache_node_is_unified(node))
390		cache = cache_do_one_devnode_unified(node, level);
391	else
392		cache = cache_do_one_devnode_split(node, level);
393
394	return cache;
395}
396
397static struct cache *cache_lookup_or_instantiate(struct device_node *node,
398						 int level)
399{
400	struct cache *cache;
401
402	cache = cache_lookup_by_node(node);
403
404	WARN_ONCE(cache && cache->level != level,
405		  "cache level mismatch on lookup (got %d, expected %d)\n",
406		  cache->level, level);
407
408	if (!cache)
409		cache = cache_do_one_devnode(node, level);
410
411	return cache;
412}
413
414static void link_cache_lists(struct cache *smaller, struct cache *bigger)
415{
416	while (smaller->next_local) {
417		if (smaller->next_local == bigger)
418			return; /* already linked */
419		smaller = smaller->next_local;
420	}
421
422	smaller->next_local = bigger;
423
424	/*
425	 * The cache->next_local list sorts by level ascending:
426	 * L1d -> L1i -> L2 -> L3 ...
427	 */
428	WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
429		  (smaller->level > 1 && bigger->level != smaller->level + 1),
430		  "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
431		  smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
432}
433
434static void do_subsidiary_caches_debugcheck(struct cache *cache)
435{
436	WARN_ONCE(cache->level != 1,
437		  "instantiating cache chain from L%d %s cache for "
438		  "%pOFP instead of an L1\n", cache->level,
439		  cache_type_string(cache), cache->ofnode);
440	WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
441		  "instantiating cache chain from node %pOFP of type '%s' "
442		  "instead of a cpu node\n", cache->ofnode,
443		  of_node_get_device_type(cache->ofnode));
444}
445
446static void do_subsidiary_caches(struct cache *cache)
447{
448	struct device_node *subcache_node;
449	int level = cache->level;
450
451	do_subsidiary_caches_debugcheck(cache);
452
453	while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
454		struct cache *subcache;
455
456		level++;
457		subcache = cache_lookup_or_instantiate(subcache_node, level);
458		of_node_put(subcache_node);
459		if (!subcache)
460			break;
461
462		link_cache_lists(cache, subcache);
463		cache = subcache;
464	}
465}
466
467static struct cache *cache_chain_instantiate(unsigned int cpu_id)
468{
469	struct device_node *cpu_node;
470	struct cache *cpu_cache = NULL;
471
472	pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
473
474	cpu_node = of_get_cpu_node(cpu_id, NULL);
475	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
476	if (!cpu_node)
477		goto out;
478
479	cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
480	if (!cpu_cache)
481		goto out;
482
483	do_subsidiary_caches(cpu_cache);
484
485	cache_cpu_set(cpu_cache, cpu_id);
486out:
487	of_node_put(cpu_node);
488
489	return cpu_cache;
490}
491
492static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
493{
494	struct cache_dir *cache_dir;
495	struct device *dev;
496	struct kobject *kobj = NULL;
497
498	dev = get_cpu_device(cpu_id);
499	WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
500	if (!dev)
501		goto err;
502
503	kobj = kobject_create_and_add("cache", &dev->kobj);
504	if (!kobj)
505		goto err;
506
507	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
508	if (!cache_dir)
509		goto err;
510
511	cache_dir->kobj = kobj;
512
513	WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
514
515	per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
516
517	return cache_dir;
518err:
519	kobject_put(kobj);
520	return NULL;
521}
522
523static void cache_index_release(struct kobject *kobj)
524{
525	struct cache_index_dir *index;
526
527	index = kobj_to_cache_index_dir(kobj);
528
529	pr_debug("freeing index directory for L%d %s cache\n",
530		 index->cache->level, cache_type_string(index->cache));
531
532	kfree(index);
533}
534
535static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
536{
537	struct kobj_attribute *kobj_attr;
538
539	kobj_attr = container_of(attr, struct kobj_attribute, attr);
540
541	return kobj_attr->show(k, kobj_attr, buf);
542}
543
544static struct cache *index_kobj_to_cache(struct kobject *k)
545{
546	struct cache_index_dir *index;
547
548	index = kobj_to_cache_index_dir(k);
549
550	return index->cache;
551}
552
553static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
554{
555	unsigned int size_kb;
556	struct cache *cache;
557
558	cache = index_kobj_to_cache(k);
559
560	if (cache_size_kb(cache, &size_kb))
561		return -ENODEV;
562
563	return sprintf(buf, "%uK\n", size_kb);
564}
565
566static struct kobj_attribute cache_size_attr =
567	__ATTR(size, 0444, size_show, NULL);
568
569
570static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
571{
572	unsigned int line_size;
573	struct cache *cache;
574
575	cache = index_kobj_to_cache(k);
576
577	if (cache_get_line_size(cache, &line_size))
578		return -ENODEV;
579
580	return sprintf(buf, "%u\n", line_size);
581}
582
583static struct kobj_attribute cache_line_size_attr =
584	__ATTR(coherency_line_size, 0444, line_size_show, NULL);
585
586static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
587{
588	unsigned int nr_sets;
589	struct cache *cache;
590
591	cache = index_kobj_to_cache(k);
592
593	if (cache_nr_sets(cache, &nr_sets))
594		return -ENODEV;
595
596	return sprintf(buf, "%u\n", nr_sets);
597}
598
599static struct kobj_attribute cache_nr_sets_attr =
600	__ATTR(number_of_sets, 0444, nr_sets_show, NULL);
601
602static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
603{
604	unsigned int associativity;
605	struct cache *cache;
606
607	cache = index_kobj_to_cache(k);
608
609	if (cache_associativity(cache, &associativity))
610		return -ENODEV;
611
612	return sprintf(buf, "%u\n", associativity);
613}
614
615static struct kobj_attribute cache_assoc_attr =
616	__ATTR(ways_of_associativity, 0444, associativity_show, NULL);
617
618static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
619{
620	struct cache *cache;
621
622	cache = index_kobj_to_cache(k);
623
624	return sprintf(buf, "%s\n", cache_type_string(cache));
625}
626
627static struct kobj_attribute cache_type_attr =
628	__ATTR(type, 0444, type_show, NULL);
629
630static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
631{
632	struct cache_index_dir *index;
633	struct cache *cache;
634
635	index = kobj_to_cache_index_dir(k);
636	cache = index->cache;
637
638	return sprintf(buf, "%d\n", cache->level);
639}
640
641static struct kobj_attribute cache_level_attr =
642	__ATTR(level, 0444, level_show, NULL);
643
644static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
645{
646	struct kobject *index_dir_kobj = &index->kobj;
647	struct kobject *cache_dir_kobj = index_dir_kobj->parent;
648	struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
649	struct device *dev = kobj_to_dev(cpu_dev_kobj);
650
651	return dev->id;
652}
653
654/*
655 * On big-core systems, each core has two groups of CPUs each of which
656 * has its own L1-cache. The thread-siblings which share l1-cache with
657 * @cpu can be obtained via cpu_smallcore_mask().
658 */
659static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
660{
661	if (cache->level == 1)
662		return cpu_smallcore_mask(cpu);
663
664	return &cache->shared_cpu_map;
665}
666
667static ssize_t
668show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
669{
670	struct cache_index_dir *index;
671	struct cache *cache;
672	const struct cpumask *mask;
673	int cpu;
674
675	index = kobj_to_cache_index_dir(k);
676	cache = index->cache;
677
678	if (has_big_cores) {
679		cpu = index_dir_to_cpu(index);
680		mask = get_big_core_shared_cpu_map(cpu, cache);
681	} else {
682		mask  = &cache->shared_cpu_map;
683	}
684
685	return cpumap_print_to_pagebuf(list, buf, mask);
686}
687
688static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
689{
690	return show_shared_cpumap(k, attr, buf, false);
691}
692
693static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
694{
695	return show_shared_cpumap(k, attr, buf, true);
696}
697
698static struct kobj_attribute cache_shared_cpu_map_attr =
699	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
700
701static struct kobj_attribute cache_shared_cpu_list_attr =
702	__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
703
704/* Attributes which should always be created -- the kobject/sysfs core
705 * does this automatically via kobj_type->default_attrs.  This is the
706 * minimum data required to uniquely identify a cache.
707 */
708static struct attribute *cache_index_default_attrs[] = {
709	&cache_type_attr.attr,
710	&cache_level_attr.attr,
711	&cache_shared_cpu_map_attr.attr,
712	&cache_shared_cpu_list_attr.attr,
713	NULL,
714};
715
716/* Attributes which should be created if the cache device node has the
717 * right properties -- see cacheinfo_create_index_opt_attrs
718 */
719static struct kobj_attribute *cache_index_opt_attrs[] = {
720	&cache_size_attr,
721	&cache_line_size_attr,
722	&cache_nr_sets_attr,
723	&cache_assoc_attr,
724};
725
726static const struct sysfs_ops cache_index_ops = {
727	.show = cache_index_show,
728};
729
730static struct kobj_type cache_index_type = {
731	.release = cache_index_release,
732	.sysfs_ops = &cache_index_ops,
733	.default_attrs = cache_index_default_attrs,
734};
735
736static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
737{
738	const char *cache_type;
739	struct cache *cache;
740	char *buf;
741	int i;
742
743	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
744	if (!buf)
745		return;
746
747	cache = dir->cache;
748	cache_type = cache_type_string(cache);
749
750	/* We don't want to create an attribute that can't provide a
751	 * meaningful value.  Check the return value of each optional
752	 * attribute's ->show method before registering the
753	 * attribute.
754	 */
755	for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
756		struct kobj_attribute *attr;
757		ssize_t rc;
758
759		attr = cache_index_opt_attrs[i];
760
761		rc = attr->show(&dir->kobj, attr, buf);
762		if (rc <= 0) {
763			pr_debug("not creating %s attribute for "
764				 "%pOFP(%s) (rc = %zd)\n",
765				 attr->attr.name, cache->ofnode,
766				 cache_type, rc);
767			continue;
768		}
769		if (sysfs_create_file(&dir->kobj, &attr->attr))
770			pr_debug("could not create %s attribute for %pOFP(%s)\n",
771				 attr->attr.name, cache->ofnode, cache_type);
772	}
773
774	kfree(buf);
775}
776
777static void cacheinfo_create_index_dir(struct cache *cache, int index,
778				       struct cache_dir *cache_dir)
779{
780	struct cache_index_dir *index_dir;
781	int rc;
782
783	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
784	if (!index_dir)
785		return;
786
787	index_dir->cache = cache;
788
789	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
790				  cache_dir->kobj, "index%d", index);
791	if (rc) {
792		kobject_put(&index_dir->kobj);
793		return;
794	}
795
796	index_dir->next = cache_dir->index;
797	cache_dir->index = index_dir;
798
799	cacheinfo_create_index_opt_attrs(index_dir);
800}
801
802static void cacheinfo_sysfs_populate(unsigned int cpu_id,
803				     struct cache *cache_list)
804{
805	struct cache_dir *cache_dir;
806	struct cache *cache;
807	int index = 0;
808
809	cache_dir = cacheinfo_create_cache_dir(cpu_id);
810	if (!cache_dir)
811		return;
812
813	cache = cache_list;
814	while (cache) {
815		cacheinfo_create_index_dir(cache, index, cache_dir);
816		index++;
817		cache = cache->next_local;
818	}
819}
820
821void cacheinfo_cpu_online(unsigned int cpu_id)
822{
823	struct cache *cache;
824
825	cache = cache_chain_instantiate(cpu_id);
826	if (!cache)
827		return;
828
829	cacheinfo_sysfs_populate(cpu_id, cache);
830}
831
832/* functions needed to remove cache entry for cpu offline or suspend/resume */
833
834#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
835    defined(CONFIG_HOTPLUG_CPU)
836
837static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
838{
839	struct device_node *cpu_node;
840	struct cache *cache;
841
842	cpu_node = of_get_cpu_node(cpu_id, NULL);
843	WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
844	if (!cpu_node)
845		return NULL;
846
847	cache = cache_lookup_by_node(cpu_node);
848	of_node_put(cpu_node);
849
850	return cache;
851}
852
853static void remove_index_dirs(struct cache_dir *cache_dir)
854{
855	struct cache_index_dir *index;
856
857	index = cache_dir->index;
858
859	while (index) {
860		struct cache_index_dir *next;
861
862		next = index->next;
863		kobject_put(&index->kobj);
864		index = next;
865	}
866}
867
868static void remove_cache_dir(struct cache_dir *cache_dir)
869{
870	remove_index_dirs(cache_dir);
871
872	/* Remove cache dir from sysfs */
873	kobject_del(cache_dir->kobj);
874
875	kobject_put(cache_dir->kobj);
876
877	kfree(cache_dir);
878}
879
880static void cache_cpu_clear(struct cache *cache, int cpu)
881{
882	while (cache) {
883		struct cache *next = cache->next_local;
884
885		WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
886			  "CPU %i not accounted in %pOFP(%s)\n",
887			  cpu, cache->ofnode,
888			  cache_type_string(cache));
889
890		cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
891
892		/* Release the cache object if all the cpus using it
893		 * are offline */
894		if (cpumask_empty(&cache->shared_cpu_map))
895			release_cache(cache);
896
897		cache = next;
898	}
899}
900
901void cacheinfo_cpu_offline(unsigned int cpu_id)
902{
903	struct cache_dir *cache_dir;
904	struct cache *cache;
905
906	/* Prevent userspace from seeing inconsistent state - remove
907	 * the sysfs hierarchy first */
908	cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
909
910	/* careful, sysfs population may have failed */
911	if (cache_dir)
912		remove_cache_dir(cache_dir);
913
914	per_cpu(cache_dir_pcpu, cpu_id) = NULL;
915
916	/* clear the CPU's bit in its cache chain, possibly freeing
917	 * cache objects */
918	cache = cache_lookup_by_cpu(cpu_id);
919	if (cache)
920		cache_cpu_clear(cache, cpu_id);
921}
922
923void cacheinfo_teardown(void)
924{
925	unsigned int cpu;
926
927	lockdep_assert_cpus_held();
928
929	for_each_online_cpu(cpu)
930		cacheinfo_cpu_offline(cpu);
931}
932
933void cacheinfo_rebuild(void)
934{
935	unsigned int cpu;
936
937	lockdep_assert_cpus_held();
938
939	for_each_online_cpu(cpu)
940		cacheinfo_cpu_online(cpu);
941}
942
943#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */