Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Extract CPU cache information and expose them via sysfs.
  4 *
  5 *    Copyright IBM Corp. 2012
 
  6 */
  7
 
  8#include <linux/seq_file.h>
 
 
 
  9#include <linux/cpu.h>
 10#include <linux/cacheinfo.h>
 11#include <asm/facility.h>
 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13enum {
 14	CACHE_SCOPE_NOTEXISTS,
 15	CACHE_SCOPE_PRIVATE,
 16	CACHE_SCOPE_SHARED,
 17	CACHE_SCOPE_RESERVED,
 18};
 19
 20enum {
 21	CTYPE_SEPARATE,
 22	CTYPE_DATA,
 23	CTYPE_INSTRUCTION,
 24	CTYPE_UNIFIED,
 25};
 26
 27enum {
 28	EXTRACT_TOPOLOGY,
 29	EXTRACT_LINE_SIZE,
 30	EXTRACT_SIZE,
 31	EXTRACT_ASSOCIATIVITY,
 32};
 33
 34enum {
 35	CACHE_TI_UNIFIED = 0,
 36	CACHE_TI_DATA = 0,
 37	CACHE_TI_INSTRUCTION,
 38};
 39
 40struct cache_info {
 41	unsigned char	    : 4;
 42	unsigned char scope : 2;
 43	unsigned char type  : 2;
 44};
 45
 46#define CACHE_MAX_LEVEL 8
 
 47union cache_topology {
 48	struct cache_info ci[CACHE_MAX_LEVEL];
 49	unsigned long long raw;
 50};
 51
 52static const char * const cache_type_string[] = {
 53	"",
 54	"Instruction",
 55	"Data",
 56	"",
 57	"Unified",
 58};
 59
 60static const enum cache_type cache_type_map[] = {
 61	[CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
 62	[CTYPE_DATA] = CACHE_TYPE_DATA,
 63	[CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
 64	[CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
 65};
 66
 67void show_cacheinfo(struct seq_file *m)
 68{
 69	struct cpu_cacheinfo *this_cpu_ci;
 70	struct cacheinfo *cache;
 71	int idx;
 72
 73	this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
 74	for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
 75		cache = this_cpu_ci->info_list + idx;
 76		seq_printf(m, "cache%-11d: ", idx);
 77		seq_printf(m, "level=%d ", cache->level);
 78		seq_printf(m, "type=%s ", cache_type_string[cache->type]);
 79		seq_printf(m, "scope=%s ",
 80			   cache->disable_sysfs ? "Shared" : "Private");
 81		seq_printf(m, "size=%dK ", cache->size >> 10);
 82		seq_printf(m, "line_size=%u ", cache->coherency_line_size);
 83		seq_printf(m, "associativity=%d", cache->ways_of_associativity);
 84		seq_puts(m, "\n");
 
 85	}
 86}
 87
 88static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
 89{
 90	if (level >= CACHE_MAX_LEVEL)
 91		return CACHE_TYPE_NOCACHE;
 92	ci += level;
 93	if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
 94		return CACHE_TYPE_NOCACHE;
 95	return cache_type_map[ci->type];
 96}
 97
 98static inline unsigned long ecag(int ai, int li, int ti)
 99{
100	return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
 
 
 
 
 
101}
102
103static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
104			 enum cache_type type, unsigned int level, int cpu)
105{
106	int ti, num_sets;
 
107
108	if (type == CACHE_TYPE_INST)
 
 
 
109		ti = CACHE_TI_INSTRUCTION;
110	else
111		ti = CACHE_TI_UNIFIED;
112	this_leaf->level = level + 1;
113	this_leaf->type = type;
114	this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
115	this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
116	this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
117	num_sets = this_leaf->size / this_leaf->coherency_line_size;
118	num_sets /= this_leaf->ways_of_associativity;
119	this_leaf->number_of_sets = num_sets;
120	cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
121	if (!private)
122		this_leaf->disable_sysfs = true;
123}
124
125int init_cache_level(unsigned int cpu)
126{
127	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
128	unsigned int level = 0, leaves = 0;
129	union cache_topology ct;
130	enum cache_type ctype;
131
132	if (!this_cpu_ci)
133		return -EINVAL;
134	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
135	do {
136		ctype = get_cache_type(&ct.ci[0], level);
137		if (ctype == CACHE_TYPE_NOCACHE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138			break;
139		/* Separate instruction and data caches */
140		leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
141	} while (++level < CACHE_MAX_LEVEL);
142	this_cpu_ci->num_levels = level;
143	this_cpu_ci->num_leaves = leaves;
144	return 0;
145}
146
147int populate_cache_leaves(unsigned int cpu)
148{
149	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
150	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
151	unsigned int level, idx, pvt;
152	union cache_topology ct;
153	enum cache_type ctype;
154
155	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
156	for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
157	     idx < this_cpu_ci->num_leaves; idx++, level++) {
158		if (!this_leaf)
159			return -EINVAL;
160		pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
161		ctype = get_cache_type(&ct.ci[0], level);
162		if (ctype == CACHE_TYPE_SEPARATE) {
163			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
164			ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
165		} else {
166			ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
167		}
168	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169	return 0;
170}
v3.15
 
  1/*
  2 * Extract CPU cache information and expose them via sysfs.
  3 *
  4 *    Copyright IBM Corp. 2012
  5 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  6 */
  7
  8#include <linux/notifier.h>
  9#include <linux/seq_file.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/cpu.h>
 
 14#include <asm/facility.h>
 15
 16struct cache {
 17	unsigned long size;
 18	unsigned int line_size;
 19	unsigned int associativity;
 20	unsigned int nr_sets;
 21	unsigned int level   : 3;
 22	unsigned int type    : 2;
 23	unsigned int private : 1;
 24	struct list_head list;
 25};
 26
 27struct cache_dir {
 28	struct kobject *kobj;
 29	struct cache_index_dir *index;
 30};
 31
 32struct cache_index_dir {
 33	struct kobject kobj;
 34	int cpu;
 35	struct cache *cache;
 36	struct cache_index_dir *next;
 37};
 38
 39enum {
 40	CACHE_SCOPE_NOTEXISTS,
 41	CACHE_SCOPE_PRIVATE,
 42	CACHE_SCOPE_SHARED,
 43	CACHE_SCOPE_RESERVED,
 44};
 45
 46enum {
 47	CACHE_TYPE_SEPARATE,
 48	CACHE_TYPE_DATA,
 49	CACHE_TYPE_INSTRUCTION,
 50	CACHE_TYPE_UNIFIED,
 51};
 52
 53enum {
 54	EXTRACT_TOPOLOGY,
 55	EXTRACT_LINE_SIZE,
 56	EXTRACT_SIZE,
 57	EXTRACT_ASSOCIATIVITY,
 58};
 59
 60enum {
 61	CACHE_TI_UNIFIED = 0,
 62	CACHE_TI_DATA = 0,
 63	CACHE_TI_INSTRUCTION,
 64};
 65
 66struct cache_info {
 67	unsigned char	    : 4;
 68	unsigned char scope : 2;
 69	unsigned char type  : 2;
 70};
 71
 72#define CACHE_MAX_LEVEL 8
 73
 74union cache_topology {
 75	struct cache_info ci[CACHE_MAX_LEVEL];
 76	unsigned long long raw;
 77};
 78
 79static const char * const cache_type_string[] = {
 
 
 80	"Data",
 81	"Instruction",
 82	"Unified",
 83};
 84
 85static struct cache_dir *cache_dir_cpu[NR_CPUS];
 86static LIST_HEAD(cache_list);
 
 
 
 
 87
 88void show_cacheinfo(struct seq_file *m)
 89{
 90	struct cache *cache;
 91	int index = 0;
 92
 93	list_for_each_entry(cache, &cache_list, list) {
 94		seq_printf(m, "cache%-11d: ", index);
 
 
 
 95		seq_printf(m, "level=%d ", cache->level);
 96		seq_printf(m, "type=%s ", cache_type_string[cache->type]);
 97		seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
 98		seq_printf(m, "size=%luK ", cache->size >> 10);
 99		seq_printf(m, "line_size=%u ", cache->line_size);
100		seq_printf(m, "associativity=%d", cache->associativity);
 
101		seq_puts(m, "\n");
102		index++;
103	}
104}
105
 
 
 
 
 
 
 
 
 
 
106static inline unsigned long ecag(int ai, int li, int ti)
107{
108	unsigned long cmd, val;
109
110	cmd = ai << 4 | li << 1 | ti;
111	asm volatile(".insn	rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
112		     : "=d" (val) : "a" (cmd));
113	return val;
114}
115
116static int __init cache_add(int level, int private, int type)
 
117{
118	struct cache *cache;
119	int ti;
120
121	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
122	if (!cache)
123		return -ENOMEM;
124	if (type == CACHE_TYPE_INSTRUCTION)
125		ti = CACHE_TI_INSTRUCTION;
126	else
127		ti = CACHE_TI_UNIFIED;
128	cache->size = ecag(EXTRACT_SIZE, level, ti);
129	cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
130	cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
131	cache->nr_sets = cache->size / cache->associativity;
132	cache->nr_sets /= cache->line_size;
133	cache->private = private;
134	cache->level = level + 1;
135	cache->type = type - 1;
136	list_add_tail(&cache->list, &cache_list);
137	return 0;
 
138}
139
140static void __init cache_build_info(void)
141{
142	struct cache *cache, *next;
 
143	union cache_topology ct;
144	int level, private, rc;
145
 
 
146	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
147	for (level = 0; level < CACHE_MAX_LEVEL; level++) {
148		switch (ct.ci[level].scope) {
149		case CACHE_SCOPE_SHARED:
150			private = 0;
151			break;
152		case CACHE_SCOPE_PRIVATE:
153			private = 1;
154			break;
155		default:
156			return;
157		}
158		if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
159			rc  = cache_add(level, private, CACHE_TYPE_DATA);
160			rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
161		} else {
162			rc = cache_add(level, private, ct.ci[level].type);
163		}
164		if (rc)
165			goto error;
166	}
167	return;
168error:
169	list_for_each_entry_safe(cache, next, &cache_list, list) {
170		list_del(&cache->list);
171		kfree(cache);
172	}
173}
174
175static struct cache_dir *cache_create_cache_dir(int cpu)
176{
177	struct cache_dir *cache_dir;
178	struct kobject *kobj = NULL;
179	struct device *dev;
180
181	dev = get_cpu_device(cpu);
182	if (!dev)
183		goto out;
184	kobj = kobject_create_and_add("cache", &dev->kobj);
185	if (!kobj)
186		goto out;
187	cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
188	if (!cache_dir)
189		goto out;
190	cache_dir->kobj = kobj;
191	cache_dir_cpu[cpu] = cache_dir;
192	return cache_dir;
193out:
194	kobject_put(kobj);
195	return NULL;
196}
197
198static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
199{
200	return container_of(kobj, struct cache_index_dir, kobj);
201}
202
203static void cache_index_release(struct kobject *kobj)
204{
205	struct cache_index_dir *index;
206
207	index = kobj_to_cache_index_dir(kobj);
208	kfree(index);
209}
210
211static ssize_t cache_index_show(struct kobject *kobj,
212				struct attribute *attr, char *buf)
213{
214	struct kobj_attribute *kobj_attr;
215
216	kobj_attr = container_of(attr, struct kobj_attribute, attr);
217	return kobj_attr->show(kobj, kobj_attr, buf);
218}
219
220#define DEFINE_CACHE_ATTR(_name, _format, _value)			\
221static ssize_t cache_##_name##_show(struct kobject *kobj,		\
222				    struct kobj_attribute *attr,	\
223				    char *buf)				\
224{									\
225	struct cache_index_dir *index;					\
226									\
227	index = kobj_to_cache_index_dir(kobj);				\
228	return sprintf(buf, _format, _value);				\
229}									\
230static struct kobj_attribute cache_##_name##_attr =			\
231	__ATTR(_name, 0444, cache_##_name##_show, NULL);
232
233DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
234DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
235DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
236DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
237DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
238DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
239
240static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
241{
242	struct cache_index_dir *index;
243	int len;
244
245	index = kobj_to_cache_index_dir(kobj);
246	len = type ?
247		cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
248		cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
249	len += sprintf(&buf[len], "\n");
250	return len;
251}
252
253static ssize_t shared_cpu_map_show(struct kobject *kobj,
254				   struct kobj_attribute *attr, char *buf)
255{
256	return shared_cpu_map_func(kobj, 0, buf);
257}
258static struct kobj_attribute cache_shared_cpu_map_attr =
259	__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
260
261static ssize_t shared_cpu_list_show(struct kobject *kobj,
262				    struct kobj_attribute *attr, char *buf)
263{
264	return shared_cpu_map_func(kobj, 1, buf);
265}
266static struct kobj_attribute cache_shared_cpu_list_attr =
267	__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
268
269static struct attribute *cache_index_default_attrs[] = {
270	&cache_type_attr.attr,
271	&cache_size_attr.attr,
272	&cache_number_of_sets_attr.attr,
273	&cache_ways_of_associativity_attr.attr,
274	&cache_level_attr.attr,
275	&cache_coherency_line_size_attr.attr,
276	&cache_shared_cpu_map_attr.attr,
277	&cache_shared_cpu_list_attr.attr,
278	NULL,
279};
280
281static const struct sysfs_ops cache_index_ops = {
282	.show = cache_index_show,
283};
284
285static struct kobj_type cache_index_type = {
286	.sysfs_ops = &cache_index_ops,
287	.release = cache_index_release,
288	.default_attrs = cache_index_default_attrs,
289};
290
291static int cache_create_index_dir(struct cache_dir *cache_dir,
292				  struct cache *cache, int index, int cpu)
293{
294	struct cache_index_dir *index_dir;
295	int rc;
296
297	index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
298	if (!index_dir)
299		return -ENOMEM;
300	index_dir->cache = cache;
301	index_dir->cpu = cpu;
302	rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
303				  cache_dir->kobj, "index%d", index);
304	if (rc)
305		goto out;
306	index_dir->next = cache_dir->index;
307	cache_dir->index = index_dir;
308	return 0;
309out:
310	kfree(index_dir);
311	return rc;
312}
313
314static int cache_add_cpu(int cpu)
315{
316	struct cache_dir *cache_dir;
317	struct cache *cache;
318	int rc, index = 0;
319
320	if (list_empty(&cache_list))
321		return 0;
322	cache_dir = cache_create_cache_dir(cpu);
323	if (!cache_dir)
324		return -ENOMEM;
325	list_for_each_entry(cache, &cache_list, list) {
326		if (!cache->private)
327			break;
328		rc = cache_create_index_dir(cache_dir, cache, index, cpu);
329		if (rc)
330			return rc;
331		index++;
332	}
333	return 0;
334}
335
336static void cache_remove_cpu(int cpu)
337{
338	struct cache_index_dir *index, *next;
339	struct cache_dir *cache_dir;
 
 
 
340
341	cache_dir = cache_dir_cpu[cpu];
342	if (!cache_dir)
343		return;
344	index = cache_dir->index;
345	while (index) {
346		next = index->next;
347		kobject_put(&index->kobj);
348		index = next;
 
 
 
 
 
349	}
350	kobject_put(cache_dir->kobj);
351	kfree(cache_dir);
352	cache_dir_cpu[cpu] = NULL;
353}
354
355static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
356			 void *hcpu)
357{
358	int cpu = (long)hcpu;
359	int rc = 0;
360
361	switch (action & ~CPU_TASKS_FROZEN) {
362	case CPU_ONLINE:
363		rc = cache_add_cpu(cpu);
364		if (rc)
365			cache_remove_cpu(cpu);
366		break;
367	case CPU_DEAD:
368		cache_remove_cpu(cpu);
369		break;
370	}
371	return rc ? NOTIFY_BAD : NOTIFY_OK;
372}
373
374static int __init cache_init(void)
375{
376	int cpu;
377
378	if (!test_facility(34))
379		return 0;
380	cache_build_info();
381
382	cpu_notifier_register_begin();
383	for_each_online_cpu(cpu)
384		cache_add_cpu(cpu);
385	__hotcpu_notifier(cache_hotplug, 0);
386	cpu_notifier_register_done();
387	return 0;
388}
389device_initcall(cache_init);