Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Extract CPU cache information and expose them via sysfs.
4 *
5 * Copyright IBM Corp. 2012
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/seq_file.h>
10#include <linux/cpu.h>
11#include <linux/cacheinfo.h>
12#include <asm/facility.h>
13
14enum {
15 CACHE_SCOPE_NOTEXISTS,
16 CACHE_SCOPE_PRIVATE,
17 CACHE_SCOPE_SHARED,
18 CACHE_SCOPE_RESERVED,
19};
20
21enum {
22 CTYPE_SEPARATE,
23 CTYPE_DATA,
24 CTYPE_INSTRUCTION,
25 CTYPE_UNIFIED,
26};
27
28enum {
29 EXTRACT_TOPOLOGY,
30 EXTRACT_LINE_SIZE,
31 EXTRACT_SIZE,
32 EXTRACT_ASSOCIATIVITY,
33};
34
35enum {
36 CACHE_TI_UNIFIED = 0,
37 CACHE_TI_DATA = 0,
38 CACHE_TI_INSTRUCTION,
39};
40
41struct cache_info {
42 unsigned char : 4;
43 unsigned char scope : 2;
44 unsigned char type : 2;
45};
46
47#define CACHE_MAX_LEVEL 8
48union cache_topology {
49 struct cache_info ci[CACHE_MAX_LEVEL];
50 unsigned long long raw;
51};
52
53static const char * const cache_type_string[] = {
54 "",
55 "Instruction",
56 "Data",
57 "",
58 "Unified",
59};
60
61static const enum cache_type cache_type_map[] = {
62 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
63 [CTYPE_DATA] = CACHE_TYPE_DATA,
64 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
65 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
66};
67
68void show_cacheinfo(struct seq_file *m)
69{
70 struct cpu_cacheinfo *this_cpu_ci;
71 struct cacheinfo *cache;
72 int idx;
73
74 if (!test_facility(34))
75 return;
76 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
77 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
78 cache = this_cpu_ci->info_list + idx;
79 seq_printf(m, "cache%-11d: ", idx);
80 seq_printf(m, "level=%d ", cache->level);
81 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
82 seq_printf(m, "scope=%s ",
83 cache->disable_sysfs ? "Shared" : "Private");
84 seq_printf(m, "size=%dK ", cache->size >> 10);
85 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
86 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
87 seq_puts(m, "\n");
88 }
89}
90
91static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
92{
93 if (level >= CACHE_MAX_LEVEL)
94 return CACHE_TYPE_NOCACHE;
95 ci += level;
96 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
97 return CACHE_TYPE_NOCACHE;
98 return cache_type_map[ci->type];
99}
100
101static inline unsigned long ecag(int ai, int li, int ti)
102{
103 return __ecag(ECAG_CACHE_ATTRIBUTE, ai << 4 | li << 1 | ti);
104}
105
106static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
107 enum cache_type type, unsigned int level, int cpu)
108{
109 int ti, num_sets;
110
111 if (type == CACHE_TYPE_INST)
112 ti = CACHE_TI_INSTRUCTION;
113 else
114 ti = CACHE_TI_UNIFIED;
115 this_leaf->level = level + 1;
116 this_leaf->type = type;
117 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
118 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
119 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
120 num_sets = this_leaf->size / this_leaf->coherency_line_size;
121 num_sets /= this_leaf->ways_of_associativity;
122 this_leaf->number_of_sets = num_sets;
123 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
124 if (!private)
125 this_leaf->disable_sysfs = true;
126}
127
128int init_cache_level(unsigned int cpu)
129{
130 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
131 unsigned int level = 0, leaves = 0;
132 union cache_topology ct;
133 enum cache_type ctype;
134
135 if (!test_facility(34))
136 return -EOPNOTSUPP;
137 if (!this_cpu_ci)
138 return -EINVAL;
139 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
140 do {
141 ctype = get_cache_type(&ct.ci[0], level);
142 if (ctype == CACHE_TYPE_NOCACHE)
143 break;
144 /* Separate instruction and data caches */
145 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
146 } while (++level < CACHE_MAX_LEVEL);
147 this_cpu_ci->num_levels = level;
148 this_cpu_ci->num_leaves = leaves;
149 return 0;
150}
151
152int populate_cache_leaves(unsigned int cpu)
153{
154 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
155 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
156 unsigned int level, idx, pvt;
157 union cache_topology ct;
158 enum cache_type ctype;
159
160 if (!test_facility(34))
161 return -EOPNOTSUPP;
162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
164 idx < this_cpu_ci->num_leaves; idx++, level++) {
165 if (!this_leaf)
166 return -EINVAL;
167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
168 ctype = get_cache_type(&ct.ci[0], level);
169 if (ctype == CACHE_TYPE_SEPARATE) {
170 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
171 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
172 } else {
173 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
174 }
175 }
176 return 0;
177}
1/*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/seq_file.h>
9#include <linux/cpu.h>
10#include <linux/cacheinfo.h>
11#include <asm/facility.h>
12
13enum {
14 CACHE_SCOPE_NOTEXISTS,
15 CACHE_SCOPE_PRIVATE,
16 CACHE_SCOPE_SHARED,
17 CACHE_SCOPE_RESERVED,
18};
19
20enum {
21 CTYPE_SEPARATE,
22 CTYPE_DATA,
23 CTYPE_INSTRUCTION,
24 CTYPE_UNIFIED,
25};
26
27enum {
28 EXTRACT_TOPOLOGY,
29 EXTRACT_LINE_SIZE,
30 EXTRACT_SIZE,
31 EXTRACT_ASSOCIATIVITY,
32};
33
34enum {
35 CACHE_TI_UNIFIED = 0,
36 CACHE_TI_DATA = 0,
37 CACHE_TI_INSTRUCTION,
38};
39
40struct cache_info {
41 unsigned char : 4;
42 unsigned char scope : 2;
43 unsigned char type : 2;
44};
45
46#define CACHE_MAX_LEVEL 8
47union cache_topology {
48 struct cache_info ci[CACHE_MAX_LEVEL];
49 unsigned long long raw;
50};
51
52static const char * const cache_type_string[] = {
53 "",
54 "Instruction",
55 "Data",
56 "",
57 "Unified",
58};
59
60static const enum cache_type cache_type_map[] = {
61 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
62 [CTYPE_DATA] = CACHE_TYPE_DATA,
63 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
64 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
65};
66
67void show_cacheinfo(struct seq_file *m)
68{
69 struct cpu_cacheinfo *this_cpu_ci;
70 struct cacheinfo *cache;
71 int idx;
72
73 if (!test_facility(34))
74 return;
75 get_online_cpus();
76 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
77 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
78 cache = this_cpu_ci->info_list + idx;
79 seq_printf(m, "cache%-11d: ", idx);
80 seq_printf(m, "level=%d ", cache->level);
81 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
82 seq_printf(m, "scope=%s ",
83 cache->disable_sysfs ? "Shared" : "Private");
84 seq_printf(m, "size=%dK ", cache->size >> 10);
85 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
86 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
87 seq_puts(m, "\n");
88 }
89 put_online_cpus();
90}
91
92static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
93{
94 if (level >= CACHE_MAX_LEVEL)
95 return CACHE_TYPE_NOCACHE;
96 ci += level;
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE;
99 return cache_type_map[ci->type];
100}
101
102static inline unsigned long ecag(int ai, int li, int ti)
103{
104 unsigned long cmd, val;
105
106 cmd = ai << 4 | li << 1 | ti;
107 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
108 : "=d" (val) : "a" (cmd));
109 return val;
110}
111
112static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
113 enum cache_type type, unsigned int level, int cpu)
114{
115 int ti, num_sets;
116
117 if (type == CACHE_TYPE_INST)
118 ti = CACHE_TI_INSTRUCTION;
119 else
120 ti = CACHE_TI_UNIFIED;
121 this_leaf->level = level + 1;
122 this_leaf->type = type;
123 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
124 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
125 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
126 num_sets = this_leaf->size / this_leaf->coherency_line_size;
127 num_sets /= this_leaf->ways_of_associativity;
128 this_leaf->number_of_sets = num_sets;
129 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
130 if (!private)
131 this_leaf->disable_sysfs = true;
132}
133
134int init_cache_level(unsigned int cpu)
135{
136 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
137 unsigned int level = 0, leaves = 0;
138 union cache_topology ct;
139 enum cache_type ctype;
140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
143 if (!this_cpu_ci)
144 return -EINVAL;
145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
146 do {
147 ctype = get_cache_type(&ct.ci[0], level);
148 if (ctype == CACHE_TYPE_NOCACHE)
149 break;
150 /* Separate instruction and data caches */
151 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
152 } while (++level < CACHE_MAX_LEVEL);
153 this_cpu_ci->num_levels = level;
154 this_cpu_ci->num_leaves = leaves;
155 return 0;
156}
157
158int populate_cache_leaves(unsigned int cpu)
159{
160 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
161 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
162 unsigned int level, idx, pvt;
163 union cache_topology ct;
164 enum cache_type ctype;
165
166 if (!test_facility(34))
167 return -EOPNOTSUPP;
168 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
169 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
170 idx < this_cpu_ci->num_leaves; idx++, level++) {
171 if (!this_leaf)
172 return -EINVAL;
173 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
174 ctype = get_cache_type(&ct.ci[0], level);
175 if (ctype == CACHE_TYPE_SEPARATE) {
176 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
177 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
178 } else {
179 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
180 }
181 }
182 return 0;
183}