Loading...
1/*
2 * arch/arm64/kernel/topology.c
3 *
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
5 *
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/cpu.h>
15#include <linux/cpumask.h>
16#include <linux/init.h>
17#include <linux/percpu.h>
18#include <linux/node.h>
19#include <linux/nodemask.h>
20#include <linux/of.h>
21#include <linux/sched.h>
22
23#include <asm/cputype.h>
24#include <asm/topology.h>
25
26static int __init get_cpu_for_node(struct device_node *node)
27{
28 struct device_node *cpu_node;
29 int cpu;
30
31 cpu_node = of_parse_phandle(node, "cpu", 0);
32 if (!cpu_node)
33 return -1;
34
35 for_each_possible_cpu(cpu) {
36 if (of_get_cpu_node(cpu, NULL) == cpu_node) {
37 of_node_put(cpu_node);
38 return cpu;
39 }
40 }
41
42 pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
43
44 of_node_put(cpu_node);
45 return -1;
46}
47
48static int __init parse_core(struct device_node *core, int cluster_id,
49 int core_id)
50{
51 char name[10];
52 bool leaf = true;
53 int i = 0;
54 int cpu;
55 struct device_node *t;
56
57 do {
58 snprintf(name, sizeof(name), "thread%d", i);
59 t = of_get_child_by_name(core, name);
60 if (t) {
61 leaf = false;
62 cpu = get_cpu_for_node(t);
63 if (cpu >= 0) {
64 cpu_topology[cpu].cluster_id = cluster_id;
65 cpu_topology[cpu].core_id = core_id;
66 cpu_topology[cpu].thread_id = i;
67 } else {
68 pr_err("%s: Can't get CPU for thread\n",
69 t->full_name);
70 of_node_put(t);
71 return -EINVAL;
72 }
73 of_node_put(t);
74 }
75 i++;
76 } while (t);
77
78 cpu = get_cpu_for_node(core);
79 if (cpu >= 0) {
80 if (!leaf) {
81 pr_err("%s: Core has both threads and CPU\n",
82 core->full_name);
83 return -EINVAL;
84 }
85
86 cpu_topology[cpu].cluster_id = cluster_id;
87 cpu_topology[cpu].core_id = core_id;
88 } else if (leaf) {
89 pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
90 return -EINVAL;
91 }
92
93 return 0;
94}
95
96static int __init parse_cluster(struct device_node *cluster, int depth)
97{
98 char name[10];
99 bool leaf = true;
100 bool has_cores = false;
101 struct device_node *c;
102 static int cluster_id __initdata;
103 int core_id = 0;
104 int i, ret;
105
106 /*
107 * First check for child clusters; we currently ignore any
108 * information about the nesting of clusters and present the
109 * scheduler with a flat list of them.
110 */
111 i = 0;
112 do {
113 snprintf(name, sizeof(name), "cluster%d", i);
114 c = of_get_child_by_name(cluster, name);
115 if (c) {
116 leaf = false;
117 ret = parse_cluster(c, depth + 1);
118 of_node_put(c);
119 if (ret != 0)
120 return ret;
121 }
122 i++;
123 } while (c);
124
125 /* Now check for cores */
126 i = 0;
127 do {
128 snprintf(name, sizeof(name), "core%d", i);
129 c = of_get_child_by_name(cluster, name);
130 if (c) {
131 has_cores = true;
132
133 if (depth == 0) {
134 pr_err("%s: cpu-map children should be clusters\n",
135 c->full_name);
136 of_node_put(c);
137 return -EINVAL;
138 }
139
140 if (leaf) {
141 ret = parse_core(c, cluster_id, core_id++);
142 } else {
143 pr_err("%s: Non-leaf cluster with core %s\n",
144 cluster->full_name, name);
145 ret = -EINVAL;
146 }
147
148 of_node_put(c);
149 if (ret != 0)
150 return ret;
151 }
152 i++;
153 } while (c);
154
155 if (leaf && !has_cores)
156 pr_warn("%s: empty cluster\n", cluster->full_name);
157
158 if (leaf)
159 cluster_id++;
160
161 return 0;
162}
163
164static int __init parse_dt_topology(void)
165{
166 struct device_node *cn, *map;
167 int ret = 0;
168 int cpu;
169
170 cn = of_find_node_by_path("/cpus");
171 if (!cn) {
172 pr_err("No CPU information found in DT\n");
173 return 0;
174 }
175
176 /*
177 * When topology is provided cpu-map is essentially a root
178 * cluster with restricted subnodes.
179 */
180 map = of_get_child_by_name(cn, "cpu-map");
181 if (!map)
182 goto out;
183
184 ret = parse_cluster(map, 0);
185 if (ret != 0)
186 goto out_map;
187
188 /*
189 * Check that all cores are in the topology; the SMP code will
190 * only mark cores described in the DT as possible.
191 */
192 for_each_possible_cpu(cpu)
193 if (cpu_topology[cpu].cluster_id == -1)
194 ret = -EINVAL;
195
196out_map:
197 of_node_put(map);
198out:
199 of_node_put(cn);
200 return ret;
201}
202
203/*
204 * cpu topology table
205 */
206struct cpu_topology cpu_topology[NR_CPUS];
207EXPORT_SYMBOL_GPL(cpu_topology);
208
209const struct cpumask *cpu_coregroup_mask(int cpu)
210{
211 return &cpu_topology[cpu].core_sibling;
212}
213
214static void update_siblings_masks(unsigned int cpuid)
215{
216 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
217 int cpu;
218
219 /* update core and thread sibling masks */
220 for_each_possible_cpu(cpu) {
221 cpu_topo = &cpu_topology[cpu];
222
223 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
224 continue;
225
226 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
227 if (cpu != cpuid)
228 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
229
230 if (cpuid_topo->core_id != cpu_topo->core_id)
231 continue;
232
233 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
234 if (cpu != cpuid)
235 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
236 }
237}
238
239void store_cpu_topology(unsigned int cpuid)
240{
241 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
242 u64 mpidr;
243
244 if (cpuid_topo->cluster_id != -1)
245 goto topology_populated;
246
247 mpidr = read_cpuid_mpidr();
248
249 /* Uniprocessor systems can rely on default topology values */
250 if (mpidr & MPIDR_UP_BITMASK)
251 return;
252
253 /* Create cpu topology mapping based on MPIDR. */
254 if (mpidr & MPIDR_MT_BITMASK) {
255 /* Multiprocessor system : Multi-threads per core */
256 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
257 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
258 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
259 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
260 } else {
261 /* Multiprocessor system : Single-thread per core */
262 cpuid_topo->thread_id = -1;
263 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
264 cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
265 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
266 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
267 }
268
269 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
270 cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
271 cpuid_topo->thread_id, mpidr);
272
273topology_populated:
274 update_siblings_masks(cpuid);
275}
276
277static void __init reset_cpu_topology(void)
278{
279 unsigned int cpu;
280
281 for_each_possible_cpu(cpu) {
282 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
283
284 cpu_topo->thread_id = -1;
285 cpu_topo->core_id = 0;
286 cpu_topo->cluster_id = -1;
287
288 cpumask_clear(&cpu_topo->core_sibling);
289 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
290 cpumask_clear(&cpu_topo->thread_sibling);
291 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
292 }
293}
294
295void __init init_cpu_topology(void)
296{
297 reset_cpu_topology();
298
299 /*
300 * Discard anything that was parsed if we hit an error so we
301 * don't use partial information.
302 */
303 if (of_have_populated_dt() && parse_dt_topology())
304 reset_cpu_topology();
305}
1/*
2 * arch/arm64/kernel/topology.c
3 *
4 * Copyright (C) 2011,2013,2014 Linaro Limited.
5 *
6 * Based on the arm32 version written by Vincent Guittot in turn based on
7 * arch/sh/kernel/topology.c
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13
14#include <linux/acpi.h>
15#include <linux/arch_topology.h>
16#include <linux/cacheinfo.h>
17#include <linux/init.h>
18#include <linux/percpu.h>
19
20#include <asm/cpu.h>
21#include <asm/cputype.h>
22#include <asm/topology.h>
23
24void store_cpu_topology(unsigned int cpuid)
25{
26 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
27 u64 mpidr;
28
29 if (cpuid_topo->package_id != -1)
30 goto topology_populated;
31
32 mpidr = read_cpuid_mpidr();
33
34 /* Uniprocessor systems can rely on default topology values */
35 if (mpidr & MPIDR_UP_BITMASK)
36 return;
37
38 /* Create cpu topology mapping based on MPIDR. */
39 if (mpidr & MPIDR_MT_BITMASK) {
40 /* Multiprocessor system : Multi-threads per core */
41 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
42 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
43 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) |
44 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8;
45 } else {
46 /* Multiprocessor system : Single-thread per core */
47 cpuid_topo->thread_id = -1;
48 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
49 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) |
50 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 |
51 MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16;
52 }
53
54 pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
55 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
56 cpuid_topo->thread_id, mpidr);
57
58topology_populated:
59 update_siblings_masks(cpuid);
60}
61
62#ifdef CONFIG_ACPI
63static bool __init acpi_cpu_is_threaded(int cpu)
64{
65 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
66
67 /*
68 * if the PPTT doesn't have thread information, assume a homogeneous
69 * machine and return the current CPU's thread state.
70 */
71 if (is_threaded < 0)
72 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
73
74 return !!is_threaded;
75}
76
77/*
78 * Propagate the topology information of the processor_topology_node tree to the
79 * cpu_topology array.
80 */
81int __init parse_acpi_topology(void)
82{
83 int cpu, topology_id;
84
85 if (acpi_disabled)
86 return 0;
87
88 for_each_possible_cpu(cpu) {
89 int i, cache_id;
90
91 topology_id = find_acpi_cpu_topology(cpu, 0);
92 if (topology_id < 0)
93 return topology_id;
94
95 if (acpi_cpu_is_threaded(cpu)) {
96 cpu_topology[cpu].thread_id = topology_id;
97 topology_id = find_acpi_cpu_topology(cpu, 1);
98 cpu_topology[cpu].core_id = topology_id;
99 } else {
100 cpu_topology[cpu].thread_id = -1;
101 cpu_topology[cpu].core_id = topology_id;
102 }
103 topology_id = find_acpi_cpu_topology_package(cpu);
104 cpu_topology[cpu].package_id = topology_id;
105
106 i = acpi_find_last_cache_level(cpu);
107
108 if (i > 0) {
109 /*
110 * this is the only part of cpu_topology that has
111 * a direct relationship with the cache topology
112 */
113 cache_id = find_acpi_cpu_cache_topology(cpu, i);
114 if (cache_id > 0)
115 cpu_topology[cpu].llc_id = cache_id;
116 }
117 }
118
119 return 0;
120}
121#endif
122
123