Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
8
9#include <linux/acpi.h>
10#include <linux/cacheinfo.h>
11#include <linux/cpu.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/of.h>
15#include <linux/slab.h>
16#include <linux/sched/topology.h>
17#include <linux/cpuset.h>
18#include <linux/cpumask.h>
19#include <linux/init.h>
20#include <linux/rcupdate.h>
21#include <linux/sched.h>
22#include <linux/units.h>
23
24#define CREATE_TRACE_POINTS
25#include <trace/events/thermal_pressure.h>
26
27static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
28static struct cpumask scale_freq_counters_mask;
29static bool scale_freq_invariant;
30DEFINE_PER_CPU(unsigned long, capacity_freq_ref) = 1;
31EXPORT_PER_CPU_SYMBOL_GPL(capacity_freq_ref);
32
33static bool supports_scale_freq_counters(const struct cpumask *cpus)
34{
35 return cpumask_subset(cpus, &scale_freq_counters_mask);
36}
37
38bool topology_scale_freq_invariant(void)
39{
40 return cpufreq_supports_freq_invariance() ||
41 supports_scale_freq_counters(cpu_online_mask);
42}
43
44static void update_scale_freq_invariant(bool status)
45{
46 if (scale_freq_invariant == status)
47 return;
48
49 /*
50 * Task scheduler behavior depends on frequency invariance support,
51 * either cpufreq or counter driven. If the support status changes as
52 * a result of counter initialisation and use, retrigger the build of
53 * scheduling domains to ensure the information is propagated properly.
54 */
55 if (topology_scale_freq_invariant() == status) {
56 scale_freq_invariant = status;
57 rebuild_sched_domains_energy();
58 }
59}
60
61void topology_set_scale_freq_source(struct scale_freq_data *data,
62 const struct cpumask *cpus)
63{
64 struct scale_freq_data *sfd;
65 int cpu;
66
67 /*
68 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
69 * supported by cpufreq.
70 */
71 if (cpumask_empty(&scale_freq_counters_mask))
72 scale_freq_invariant = topology_scale_freq_invariant();
73
74 rcu_read_lock();
75
76 for_each_cpu(cpu, cpus) {
77 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
78
79 /* Use ARCH provided counters whenever possible */
80 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
81 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
82 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
83 }
84 }
85
86 rcu_read_unlock();
87
88 update_scale_freq_invariant(true);
89}
90EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
91
92void topology_clear_scale_freq_source(enum scale_freq_source source,
93 const struct cpumask *cpus)
94{
95 struct scale_freq_data *sfd;
96 int cpu;
97
98 rcu_read_lock();
99
100 for_each_cpu(cpu, cpus) {
101 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
102
103 if (sfd && sfd->source == source) {
104 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
105 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
106 }
107 }
108
109 rcu_read_unlock();
110
111 /*
112 * Make sure all references to previous sft_data are dropped to avoid
113 * use-after-free races.
114 */
115 synchronize_rcu();
116
117 update_scale_freq_invariant(false);
118}
119EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
120
121void topology_scale_freq_tick(void)
122{
123 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
124
125 if (sfd)
126 sfd->set_freq_scale();
127}
128
129DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
130EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
131
132void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
133 unsigned long max_freq)
134{
135 unsigned long scale;
136 int i;
137
138 if (WARN_ON_ONCE(!cur_freq || !max_freq))
139 return;
140
141 /*
142 * If the use of counters for FIE is enabled, just return as we don't
143 * want to update the scale factor with information from CPUFREQ.
144 * Instead the scale factor will be updated from arch_scale_freq_tick.
145 */
146 if (supports_scale_freq_counters(cpus))
147 return;
148
149 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
150
151 for_each_cpu(i, cpus)
152 per_cpu(arch_freq_scale, i) = scale;
153}
154
155DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
156EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
157
158void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
159{
160 per_cpu(cpu_scale, cpu) = capacity;
161}
162
163DEFINE_PER_CPU(unsigned long, thermal_pressure);
164
165/**
166 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
167 * @cpus : The related CPUs for which capacity has been reduced
168 * @capped_freq : The maximum allowed frequency that CPUs can run at
169 *
170 * Update the value of thermal pressure for all @cpus in the mask. The
171 * cpumask should include all (online+offline) affected CPUs, to avoid
172 * operating on stale data when hot-plug is used for some CPUs. The
173 * @capped_freq reflects the currently allowed max CPUs frequency due to
174 * thermal capping. It might be also a boost frequency value, which is bigger
175 * than the internal 'capacity_freq_ref' max frequency. In such case the
176 * pressure value should simply be removed, since this is an indication that
177 * there is no thermal throttling. The @capped_freq must be provided in kHz.
178 */
179void topology_update_thermal_pressure(const struct cpumask *cpus,
180 unsigned long capped_freq)
181{
182 unsigned long max_capacity, capacity, th_pressure;
183 u32 max_freq;
184 int cpu;
185
186 cpu = cpumask_first(cpus);
187 max_capacity = arch_scale_cpu_capacity(cpu);
188 max_freq = arch_scale_freq_ref(cpu);
189
190 /*
191 * Handle properly the boost frequencies, which should simply clean
192 * the thermal pressure value.
193 */
194 if (max_freq <= capped_freq)
195 capacity = max_capacity;
196 else
197 capacity = mult_frac(max_capacity, capped_freq, max_freq);
198
199 th_pressure = max_capacity - capacity;
200
201 trace_thermal_pressure_update(cpu, th_pressure);
202
203 for_each_cpu(cpu, cpus)
204 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
205}
206EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
207
208static ssize_t cpu_capacity_show(struct device *dev,
209 struct device_attribute *attr,
210 char *buf)
211{
212 struct cpu *cpu = container_of(dev, struct cpu, dev);
213
214 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
215}
216
217static void update_topology_flags_workfn(struct work_struct *work);
218static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
219
220static DEVICE_ATTR_RO(cpu_capacity);
221
222static int cpu_capacity_sysctl_add(unsigned int cpu)
223{
224 struct device *cpu_dev = get_cpu_device(cpu);
225
226 if (!cpu_dev)
227 return -ENOENT;
228
229 device_create_file(cpu_dev, &dev_attr_cpu_capacity);
230
231 return 0;
232}
233
234static int cpu_capacity_sysctl_remove(unsigned int cpu)
235{
236 struct device *cpu_dev = get_cpu_device(cpu);
237
238 if (!cpu_dev)
239 return -ENOENT;
240
241 device_remove_file(cpu_dev, &dev_attr_cpu_capacity);
242
243 return 0;
244}
245
246static int register_cpu_capacity_sysctl(void)
247{
248 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity",
249 cpu_capacity_sysctl_add, cpu_capacity_sysctl_remove);
250
251 return 0;
252}
253subsys_initcall(register_cpu_capacity_sysctl);
254
255static int update_topology;
256
257int topology_update_cpu_topology(void)
258{
259 return update_topology;
260}
261
262/*
263 * Updating the sched_domains can't be done directly from cpufreq callbacks
264 * due to locking, so queue the work for later.
265 */
266static void update_topology_flags_workfn(struct work_struct *work)
267{
268 update_topology = 1;
269 rebuild_sched_domains();
270 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
271 update_topology = 0;
272}
273
274static u32 *raw_capacity;
275
276static int free_raw_capacity(void)
277{
278 kfree(raw_capacity);
279 raw_capacity = NULL;
280
281 return 0;
282}
283
284void topology_normalize_cpu_scale(void)
285{
286 u64 capacity;
287 u64 capacity_scale;
288 int cpu;
289
290 if (!raw_capacity)
291 return;
292
293 capacity_scale = 1;
294 for_each_possible_cpu(cpu) {
295 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
296 capacity_scale = max(capacity, capacity_scale);
297 }
298
299 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
300 for_each_possible_cpu(cpu) {
301 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu);
302 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
303 capacity_scale);
304 topology_set_cpu_scale(cpu, capacity);
305 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
306 cpu, topology_get_cpu_scale(cpu));
307 }
308}
309
310bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
311{
312 struct clk *cpu_clk;
313 static bool cap_parsing_failed;
314 int ret;
315 u32 cpu_capacity;
316
317 if (cap_parsing_failed)
318 return false;
319
320 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
321 &cpu_capacity);
322 if (!ret) {
323 if (!raw_capacity) {
324 raw_capacity = kcalloc(num_possible_cpus(),
325 sizeof(*raw_capacity),
326 GFP_KERNEL);
327 if (!raw_capacity) {
328 cap_parsing_failed = true;
329 return false;
330 }
331 }
332 raw_capacity[cpu] = cpu_capacity;
333 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
334 cpu_node, raw_capacity[cpu]);
335
336 /*
337 * Update capacity_freq_ref for calculating early boot CPU capacities.
338 * For non-clk CPU DVFS mechanism, there's no way to get the
339 * frequency value now, assuming they are running at the same
340 * frequency (by keeping the initial capacity_freq_ref value).
341 */
342 cpu_clk = of_clk_get(cpu_node, 0);
343 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
344 per_cpu(capacity_freq_ref, cpu) =
345 clk_get_rate(cpu_clk) / HZ_PER_KHZ;
346 clk_put(cpu_clk);
347 }
348 } else {
349 if (raw_capacity) {
350 pr_err("cpu_capacity: missing %pOF raw capacity\n",
351 cpu_node);
352 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
353 }
354 cap_parsing_failed = true;
355 free_raw_capacity();
356 }
357
358 return !ret;
359}
360
361void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate)
362{
363}
364
365#ifdef CONFIG_ACPI_CPPC_LIB
366#include <acpi/cppc_acpi.h>
367
368void topology_init_cpu_capacity_cppc(void)
369{
370 u64 capacity, capacity_scale = 0;
371 struct cppc_perf_caps perf_caps;
372 int cpu;
373
374 if (likely(!acpi_cpc_valid()))
375 return;
376
377 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
378 GFP_KERNEL);
379 if (!raw_capacity)
380 return;
381
382 for_each_possible_cpu(cpu) {
383 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
384 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
385 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
386 raw_capacity[cpu] = perf_caps.highest_perf;
387 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]);
388
389 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]);
390
391 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
392 cpu, raw_capacity[cpu]);
393 continue;
394 }
395
396 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
397 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
398 goto exit;
399 }
400
401 for_each_possible_cpu(cpu) {
402 freq_inv_set_max_ratio(cpu,
403 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
404
405 capacity = raw_capacity[cpu];
406 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
407 capacity_scale);
408 topology_set_cpu_scale(cpu, capacity);
409 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
410 cpu, topology_get_cpu_scale(cpu));
411 }
412
413 schedule_work(&update_topology_flags_work);
414 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
415
416exit:
417 free_raw_capacity();
418}
419#endif
420
421#ifdef CONFIG_CPU_FREQ
422static cpumask_var_t cpus_to_visit;
423static void parsing_done_workfn(struct work_struct *work);
424static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
425
426static int
427init_cpu_capacity_callback(struct notifier_block *nb,
428 unsigned long val,
429 void *data)
430{
431 struct cpufreq_policy *policy = data;
432 int cpu;
433
434 if (val != CPUFREQ_CREATE_POLICY)
435 return 0;
436
437 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
438 cpumask_pr_args(policy->related_cpus),
439 cpumask_pr_args(cpus_to_visit));
440
441 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
442
443 for_each_cpu(cpu, policy->related_cpus) {
444 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq;
445 freq_inv_set_max_ratio(cpu,
446 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ);
447 }
448
449 if (cpumask_empty(cpus_to_visit)) {
450 if (raw_capacity) {
451 topology_normalize_cpu_scale();
452 schedule_work(&update_topology_flags_work);
453 free_raw_capacity();
454 }
455 pr_debug("cpu_capacity: parsing done\n");
456 schedule_work(&parsing_done_work);
457 }
458
459 return 0;
460}
461
462static struct notifier_block init_cpu_capacity_notifier = {
463 .notifier_call = init_cpu_capacity_callback,
464};
465
466static int __init register_cpufreq_notifier(void)
467{
468 int ret;
469
470 /*
471 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
472 * information is not needed for cpu capacity initialization.
473 */
474 if (!acpi_disabled)
475 return -EINVAL;
476
477 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
478 return -ENOMEM;
479
480 cpumask_copy(cpus_to_visit, cpu_possible_mask);
481
482 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
483 CPUFREQ_POLICY_NOTIFIER);
484
485 if (ret)
486 free_cpumask_var(cpus_to_visit);
487
488 return ret;
489}
490core_initcall(register_cpufreq_notifier);
491
492static void parsing_done_workfn(struct work_struct *work)
493{
494 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
495 CPUFREQ_POLICY_NOTIFIER);
496 free_cpumask_var(cpus_to_visit);
497}
498
499#else
500core_initcall(free_raw_capacity);
501#endif
502
503#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
504/*
505 * This function returns the logic cpu number of the node.
506 * There are basically three kinds of return values:
507 * (1) logic cpu number which is > 0.
508 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
509 * there is no possible logical CPU in the kernel to match. This happens
510 * when CONFIG_NR_CPUS is configure to be smaller than the number of
511 * CPU nodes in DT. We need to just ignore this case.
512 * (3) -1 if the node does not exist in the device tree
513 */
514static int __init get_cpu_for_node(struct device_node *node)
515{
516 struct device_node *cpu_node;
517 int cpu;
518
519 cpu_node = of_parse_phandle(node, "cpu", 0);
520 if (!cpu_node)
521 return -1;
522
523 cpu = of_cpu_node_to_id(cpu_node);
524 if (cpu >= 0)
525 topology_parse_cpu_capacity(cpu_node, cpu);
526 else
527 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
528 cpu_node, cpumask_pr_args(cpu_possible_mask));
529
530 of_node_put(cpu_node);
531 return cpu;
532}
533
534static int __init parse_core(struct device_node *core, int package_id,
535 int cluster_id, int core_id)
536{
537 char name[20];
538 bool leaf = true;
539 int i = 0;
540 int cpu;
541 struct device_node *t;
542
543 do {
544 snprintf(name, sizeof(name), "thread%d", i);
545 t = of_get_child_by_name(core, name);
546 if (t) {
547 leaf = false;
548 cpu = get_cpu_for_node(t);
549 if (cpu >= 0) {
550 cpu_topology[cpu].package_id = package_id;
551 cpu_topology[cpu].cluster_id = cluster_id;
552 cpu_topology[cpu].core_id = core_id;
553 cpu_topology[cpu].thread_id = i;
554 } else if (cpu != -ENODEV) {
555 pr_err("%pOF: Can't get CPU for thread\n", t);
556 of_node_put(t);
557 return -EINVAL;
558 }
559 of_node_put(t);
560 }
561 i++;
562 } while (t);
563
564 cpu = get_cpu_for_node(core);
565 if (cpu >= 0) {
566 if (!leaf) {
567 pr_err("%pOF: Core has both threads and CPU\n",
568 core);
569 return -EINVAL;
570 }
571
572 cpu_topology[cpu].package_id = package_id;
573 cpu_topology[cpu].cluster_id = cluster_id;
574 cpu_topology[cpu].core_id = core_id;
575 } else if (leaf && cpu != -ENODEV) {
576 pr_err("%pOF: Can't get CPU for leaf core\n", core);
577 return -EINVAL;
578 }
579
580 return 0;
581}
582
583static int __init parse_cluster(struct device_node *cluster, int package_id,
584 int cluster_id, int depth)
585{
586 char name[20];
587 bool leaf = true;
588 bool has_cores = false;
589 struct device_node *c;
590 int core_id = 0;
591 int i, ret;
592
593 /*
594 * First check for child clusters; we currently ignore any
595 * information about the nesting of clusters and present the
596 * scheduler with a flat list of them.
597 */
598 i = 0;
599 do {
600 snprintf(name, sizeof(name), "cluster%d", i);
601 c = of_get_child_by_name(cluster, name);
602 if (c) {
603 leaf = false;
604 ret = parse_cluster(c, package_id, i, depth + 1);
605 if (depth > 0)
606 pr_warn("Topology for clusters of clusters not yet supported\n");
607 of_node_put(c);
608 if (ret != 0)
609 return ret;
610 }
611 i++;
612 } while (c);
613
614 /* Now check for cores */
615 i = 0;
616 do {
617 snprintf(name, sizeof(name), "core%d", i);
618 c = of_get_child_by_name(cluster, name);
619 if (c) {
620 has_cores = true;
621
622 if (depth == 0) {
623 pr_err("%pOF: cpu-map children should be clusters\n",
624 c);
625 of_node_put(c);
626 return -EINVAL;
627 }
628
629 if (leaf) {
630 ret = parse_core(c, package_id, cluster_id,
631 core_id++);
632 } else {
633 pr_err("%pOF: Non-leaf cluster with core %s\n",
634 cluster, name);
635 ret = -EINVAL;
636 }
637
638 of_node_put(c);
639 if (ret != 0)
640 return ret;
641 }
642 i++;
643 } while (c);
644
645 if (leaf && !has_cores)
646 pr_warn("%pOF: empty cluster\n", cluster);
647
648 return 0;
649}
650
651static int __init parse_socket(struct device_node *socket)
652{
653 char name[20];
654 struct device_node *c;
655 bool has_socket = false;
656 int package_id = 0, ret;
657
658 do {
659 snprintf(name, sizeof(name), "socket%d", package_id);
660 c = of_get_child_by_name(socket, name);
661 if (c) {
662 has_socket = true;
663 ret = parse_cluster(c, package_id, -1, 0);
664 of_node_put(c);
665 if (ret != 0)
666 return ret;
667 }
668 package_id++;
669 } while (c);
670
671 if (!has_socket)
672 ret = parse_cluster(socket, 0, -1, 0);
673
674 return ret;
675}
676
677static int __init parse_dt_topology(void)
678{
679 struct device_node *cn, *map;
680 int ret = 0;
681 int cpu;
682
683 cn = of_find_node_by_path("/cpus");
684 if (!cn) {
685 pr_err("No CPU information found in DT\n");
686 return 0;
687 }
688
689 /*
690 * When topology is provided cpu-map is essentially a root
691 * cluster with restricted subnodes.
692 */
693 map = of_get_child_by_name(cn, "cpu-map");
694 if (!map)
695 goto out;
696
697 ret = parse_socket(map);
698 if (ret != 0)
699 goto out_map;
700
701 topology_normalize_cpu_scale();
702
703 /*
704 * Check that all cores are in the topology; the SMP code will
705 * only mark cores described in the DT as possible.
706 */
707 for_each_possible_cpu(cpu)
708 if (cpu_topology[cpu].package_id < 0) {
709 ret = -EINVAL;
710 break;
711 }
712
713out_map:
714 of_node_put(map);
715out:
716 of_node_put(cn);
717 return ret;
718}
719#endif
720
721/*
722 * cpu topology table
723 */
724struct cpu_topology cpu_topology[NR_CPUS];
725EXPORT_SYMBOL_GPL(cpu_topology);
726
727const struct cpumask *cpu_coregroup_mask(int cpu)
728{
729 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
730
731 /* Find the smaller of NUMA, core or LLC siblings */
732 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
733 /* not numa in package, lets use the package siblings */
734 core_mask = &cpu_topology[cpu].core_sibling;
735 }
736
737 if (last_level_cache_is_valid(cpu)) {
738 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
739 core_mask = &cpu_topology[cpu].llc_sibling;
740 }
741
742 /*
743 * For systems with no shared cpu-side LLC but with clusters defined,
744 * extend core_mask to cluster_siblings. The sched domain builder will
745 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
746 */
747 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
748 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
749 core_mask = &cpu_topology[cpu].cluster_sibling;
750
751 return core_mask;
752}
753
754const struct cpumask *cpu_clustergroup_mask(int cpu)
755{
756 /*
757 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
758 * cpu_coregroup_mask().
759 */
760 if (cpumask_subset(cpu_coregroup_mask(cpu),
761 &cpu_topology[cpu].cluster_sibling))
762 return topology_sibling_cpumask(cpu);
763
764 return &cpu_topology[cpu].cluster_sibling;
765}
766
767void update_siblings_masks(unsigned int cpuid)
768{
769 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
770 int cpu, ret;
771
772 ret = detect_cache_attributes(cpuid);
773 if (ret && ret != -ENOENT)
774 pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
775
776 /* update core and thread sibling masks */
777 for_each_online_cpu(cpu) {
778 cpu_topo = &cpu_topology[cpu];
779
780 if (last_level_cache_is_shared(cpu, cpuid)) {
781 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
782 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
783 }
784
785 if (cpuid_topo->package_id != cpu_topo->package_id)
786 continue;
787
788 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
789 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
790
791 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
792 continue;
793
794 if (cpuid_topo->cluster_id >= 0) {
795 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
796 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
797 }
798
799 if (cpuid_topo->core_id != cpu_topo->core_id)
800 continue;
801
802 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
803 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
804 }
805}
806
807static void clear_cpu_topology(int cpu)
808{
809 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
810
811 cpumask_clear(&cpu_topo->llc_sibling);
812 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
813
814 cpumask_clear(&cpu_topo->cluster_sibling);
815 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
816
817 cpumask_clear(&cpu_topo->core_sibling);
818 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
819 cpumask_clear(&cpu_topo->thread_sibling);
820 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
821}
822
823void __init reset_cpu_topology(void)
824{
825 unsigned int cpu;
826
827 for_each_possible_cpu(cpu) {
828 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
829
830 cpu_topo->thread_id = -1;
831 cpu_topo->core_id = -1;
832 cpu_topo->cluster_id = -1;
833 cpu_topo->package_id = -1;
834
835 clear_cpu_topology(cpu);
836 }
837}
838
839void remove_cpu_topology(unsigned int cpu)
840{
841 int sibling;
842
843 for_each_cpu(sibling, topology_core_cpumask(cpu))
844 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
845 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
846 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
847 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
848 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
849 for_each_cpu(sibling, topology_llc_cpumask(cpu))
850 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
851
852 clear_cpu_topology(cpu);
853}
854
855__weak int __init parse_acpi_topology(void)
856{
857 return 0;
858}
859
860#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
861void __init init_cpu_topology(void)
862{
863 int cpu, ret;
864
865 reset_cpu_topology();
866 ret = parse_acpi_topology();
867 if (!ret)
868 ret = of_have_populated_dt() && parse_dt_topology();
869
870 if (ret) {
871 /*
872 * Discard anything that was parsed if we hit an error so we
873 * don't use partial information. But do not return yet to give
874 * arch-specific early cache level detection a chance to run.
875 */
876 reset_cpu_topology();
877 }
878
879 for_each_possible_cpu(cpu) {
880 ret = fetch_cache_info(cpu);
881 if (!ret)
882 continue;
883 else if (ret != -ENOENT)
884 pr_err("Early cacheinfo failed, ret = %d\n", ret);
885 return;
886 }
887}
888
889void store_cpu_topology(unsigned int cpuid)
890{
891 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
892
893 if (cpuid_topo->package_id != -1)
894 goto topology_populated;
895
896 cpuid_topo->thread_id = -1;
897 cpuid_topo->core_id = cpuid;
898 cpuid_topo->package_id = cpu_to_node(cpuid);
899
900 pr_debug("CPU%u: package %d core %d thread %d\n",
901 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
902 cpuid_topo->thread_id);
903
904topology_populated:
905 update_siblings_masks(cpuid);
906}
907#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
7 */
8
9#include <linux/acpi.h>
10#include <linux/cacheinfo.h>
11#include <linux/cpu.h>
12#include <linux/cpufreq.h>
13#include <linux/device.h>
14#include <linux/of.h>
15#include <linux/slab.h>
16#include <linux/sched/topology.h>
17#include <linux/cpuset.h>
18#include <linux/cpumask.h>
19#include <linux/init.h>
20#include <linux/rcupdate.h>
21#include <linux/sched.h>
22
23#define CREATE_TRACE_POINTS
24#include <trace/events/thermal_pressure.h>
25
26static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
27static struct cpumask scale_freq_counters_mask;
28static bool scale_freq_invariant;
29static DEFINE_PER_CPU(u32, freq_factor) = 1;
30
31static bool supports_scale_freq_counters(const struct cpumask *cpus)
32{
33 return cpumask_subset(cpus, &scale_freq_counters_mask);
34}
35
36bool topology_scale_freq_invariant(void)
37{
38 return cpufreq_supports_freq_invariance() ||
39 supports_scale_freq_counters(cpu_online_mask);
40}
41
42static void update_scale_freq_invariant(bool status)
43{
44 if (scale_freq_invariant == status)
45 return;
46
47 /*
48 * Task scheduler behavior depends on frequency invariance support,
49 * either cpufreq or counter driven. If the support status changes as
50 * a result of counter initialisation and use, retrigger the build of
51 * scheduling domains to ensure the information is propagated properly.
52 */
53 if (topology_scale_freq_invariant() == status) {
54 scale_freq_invariant = status;
55 rebuild_sched_domains_energy();
56 }
57}
58
59void topology_set_scale_freq_source(struct scale_freq_data *data,
60 const struct cpumask *cpus)
61{
62 struct scale_freq_data *sfd;
63 int cpu;
64
65 /*
66 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
67 * supported by cpufreq.
68 */
69 if (cpumask_empty(&scale_freq_counters_mask))
70 scale_freq_invariant = topology_scale_freq_invariant();
71
72 rcu_read_lock();
73
74 for_each_cpu(cpu, cpus) {
75 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
76
77 /* Use ARCH provided counters whenever possible */
78 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
79 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
80 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
81 }
82 }
83
84 rcu_read_unlock();
85
86 update_scale_freq_invariant(true);
87}
88EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
89
90void topology_clear_scale_freq_source(enum scale_freq_source source,
91 const struct cpumask *cpus)
92{
93 struct scale_freq_data *sfd;
94 int cpu;
95
96 rcu_read_lock();
97
98 for_each_cpu(cpu, cpus) {
99 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
100
101 if (sfd && sfd->source == source) {
102 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
103 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
104 }
105 }
106
107 rcu_read_unlock();
108
109 /*
110 * Make sure all references to previous sft_data are dropped to avoid
111 * use-after-free races.
112 */
113 synchronize_rcu();
114
115 update_scale_freq_invariant(false);
116}
117EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
118
119void topology_scale_freq_tick(void)
120{
121 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
122
123 if (sfd)
124 sfd->set_freq_scale();
125}
126
127DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
128EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
129
130void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
131 unsigned long max_freq)
132{
133 unsigned long scale;
134 int i;
135
136 if (WARN_ON_ONCE(!cur_freq || !max_freq))
137 return;
138
139 /*
140 * If the use of counters for FIE is enabled, just return as we don't
141 * want to update the scale factor with information from CPUFREQ.
142 * Instead the scale factor will be updated from arch_scale_freq_tick.
143 */
144 if (supports_scale_freq_counters(cpus))
145 return;
146
147 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
148
149 for_each_cpu(i, cpus)
150 per_cpu(arch_freq_scale, i) = scale;
151}
152
153DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
154EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
155
156void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
157{
158 per_cpu(cpu_scale, cpu) = capacity;
159}
160
161DEFINE_PER_CPU(unsigned long, thermal_pressure);
162
163/**
164 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
165 * @cpus : The related CPUs for which capacity has been reduced
166 * @capped_freq : The maximum allowed frequency that CPUs can run at
167 *
168 * Update the value of thermal pressure for all @cpus in the mask. The
169 * cpumask should include all (online+offline) affected CPUs, to avoid
170 * operating on stale data when hot-plug is used for some CPUs. The
171 * @capped_freq reflects the currently allowed max CPUs frequency due to
172 * thermal capping. It might be also a boost frequency value, which is bigger
173 * than the internal 'freq_factor' max frequency. In such case the pressure
174 * value should simply be removed, since this is an indication that there is
175 * no thermal throttling. The @capped_freq must be provided in kHz.
176 */
177void topology_update_thermal_pressure(const struct cpumask *cpus,
178 unsigned long capped_freq)
179{
180 unsigned long max_capacity, capacity, th_pressure;
181 u32 max_freq;
182 int cpu;
183
184 cpu = cpumask_first(cpus);
185 max_capacity = arch_scale_cpu_capacity(cpu);
186 max_freq = per_cpu(freq_factor, cpu);
187
188 /* Convert to MHz scale which is used in 'freq_factor' */
189 capped_freq /= 1000;
190
191 /*
192 * Handle properly the boost frequencies, which should simply clean
193 * the thermal pressure value.
194 */
195 if (max_freq <= capped_freq)
196 capacity = max_capacity;
197 else
198 capacity = mult_frac(max_capacity, capped_freq, max_freq);
199
200 th_pressure = max_capacity - capacity;
201
202 trace_thermal_pressure_update(cpu, th_pressure);
203
204 for_each_cpu(cpu, cpus)
205 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
206}
207EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
208
209static ssize_t cpu_capacity_show(struct device *dev,
210 struct device_attribute *attr,
211 char *buf)
212{
213 struct cpu *cpu = container_of(dev, struct cpu, dev);
214
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
216}
217
218static void update_topology_flags_workfn(struct work_struct *work);
219static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
220
221static DEVICE_ATTR_RO(cpu_capacity);
222
223static int register_cpu_capacity_sysctl(void)
224{
225 int i;
226 struct device *cpu;
227
228 for_each_possible_cpu(i) {
229 cpu = get_cpu_device(i);
230 if (!cpu) {
231 pr_err("%s: too early to get CPU%d device!\n",
232 __func__, i);
233 continue;
234 }
235 device_create_file(cpu, &dev_attr_cpu_capacity);
236 }
237
238 return 0;
239}
240subsys_initcall(register_cpu_capacity_sysctl);
241
242static int update_topology;
243
244int topology_update_cpu_topology(void)
245{
246 return update_topology;
247}
248
249/*
250 * Updating the sched_domains can't be done directly from cpufreq callbacks
251 * due to locking, so queue the work for later.
252 */
253static void update_topology_flags_workfn(struct work_struct *work)
254{
255 update_topology = 1;
256 rebuild_sched_domains();
257 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
258 update_topology = 0;
259}
260
261static u32 *raw_capacity;
262
263static int free_raw_capacity(void)
264{
265 kfree(raw_capacity);
266 raw_capacity = NULL;
267
268 return 0;
269}
270
271void topology_normalize_cpu_scale(void)
272{
273 u64 capacity;
274 u64 capacity_scale;
275 int cpu;
276
277 if (!raw_capacity)
278 return;
279
280 capacity_scale = 1;
281 for_each_possible_cpu(cpu) {
282 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
283 capacity_scale = max(capacity, capacity_scale);
284 }
285
286 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
287 for_each_possible_cpu(cpu) {
288 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
289 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
290 capacity_scale);
291 topology_set_cpu_scale(cpu, capacity);
292 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
293 cpu, topology_get_cpu_scale(cpu));
294 }
295}
296
297bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
298{
299 struct clk *cpu_clk;
300 static bool cap_parsing_failed;
301 int ret;
302 u32 cpu_capacity;
303
304 if (cap_parsing_failed)
305 return false;
306
307 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
308 &cpu_capacity);
309 if (!ret) {
310 if (!raw_capacity) {
311 raw_capacity = kcalloc(num_possible_cpus(),
312 sizeof(*raw_capacity),
313 GFP_KERNEL);
314 if (!raw_capacity) {
315 cap_parsing_failed = true;
316 return false;
317 }
318 }
319 raw_capacity[cpu] = cpu_capacity;
320 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
321 cpu_node, raw_capacity[cpu]);
322
323 /*
324 * Update freq_factor for calculating early boot cpu capacities.
325 * For non-clk CPU DVFS mechanism, there's no way to get the
326 * frequency value now, assuming they are running at the same
327 * frequency (by keeping the initial freq_factor value).
328 */
329 cpu_clk = of_clk_get(cpu_node, 0);
330 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
331 per_cpu(freq_factor, cpu) =
332 clk_get_rate(cpu_clk) / 1000;
333 clk_put(cpu_clk);
334 }
335 } else {
336 if (raw_capacity) {
337 pr_err("cpu_capacity: missing %pOF raw capacity\n",
338 cpu_node);
339 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
340 }
341 cap_parsing_failed = true;
342 free_raw_capacity();
343 }
344
345 return !ret;
346}
347
348#ifdef CONFIG_ACPI_CPPC_LIB
349#include <acpi/cppc_acpi.h>
350
351void topology_init_cpu_capacity_cppc(void)
352{
353 struct cppc_perf_caps perf_caps;
354 int cpu;
355
356 if (likely(!acpi_cpc_valid()))
357 return;
358
359 raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
360 GFP_KERNEL);
361 if (!raw_capacity)
362 return;
363
364 for_each_possible_cpu(cpu) {
365 if (!cppc_get_perf_caps(cpu, &perf_caps) &&
366 (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
367 (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
368 raw_capacity[cpu] = perf_caps.highest_perf;
369 pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
370 cpu, raw_capacity[cpu]);
371 continue;
372 }
373
374 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
375 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
376 goto exit;
377 }
378
379 topology_normalize_cpu_scale();
380 schedule_work(&update_topology_flags_work);
381 pr_debug("cpu_capacity: cpu_capacity initialization done\n");
382
383exit:
384 free_raw_capacity();
385}
386#endif
387
388#ifdef CONFIG_CPU_FREQ
389static cpumask_var_t cpus_to_visit;
390static void parsing_done_workfn(struct work_struct *work);
391static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
392
393static int
394init_cpu_capacity_callback(struct notifier_block *nb,
395 unsigned long val,
396 void *data)
397{
398 struct cpufreq_policy *policy = data;
399 int cpu;
400
401 if (!raw_capacity)
402 return 0;
403
404 if (val != CPUFREQ_CREATE_POLICY)
405 return 0;
406
407 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
408 cpumask_pr_args(policy->related_cpus),
409 cpumask_pr_args(cpus_to_visit));
410
411 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
412
413 for_each_cpu(cpu, policy->related_cpus)
414 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
415
416 if (cpumask_empty(cpus_to_visit)) {
417 topology_normalize_cpu_scale();
418 schedule_work(&update_topology_flags_work);
419 free_raw_capacity();
420 pr_debug("cpu_capacity: parsing done\n");
421 schedule_work(&parsing_done_work);
422 }
423
424 return 0;
425}
426
427static struct notifier_block init_cpu_capacity_notifier = {
428 .notifier_call = init_cpu_capacity_callback,
429};
430
431static int __init register_cpufreq_notifier(void)
432{
433 int ret;
434
435 /*
436 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
437 * information is not needed for cpu capacity initialization.
438 */
439 if (!acpi_disabled || !raw_capacity)
440 return -EINVAL;
441
442 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
443 return -ENOMEM;
444
445 cpumask_copy(cpus_to_visit, cpu_possible_mask);
446
447 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
448 CPUFREQ_POLICY_NOTIFIER);
449
450 if (ret)
451 free_cpumask_var(cpus_to_visit);
452
453 return ret;
454}
455core_initcall(register_cpufreq_notifier);
456
457static void parsing_done_workfn(struct work_struct *work)
458{
459 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
460 CPUFREQ_POLICY_NOTIFIER);
461 free_cpumask_var(cpus_to_visit);
462}
463
464#else
465core_initcall(free_raw_capacity);
466#endif
467
468#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
469/*
470 * This function returns the logic cpu number of the node.
471 * There are basically three kinds of return values:
472 * (1) logic cpu number which is > 0.
473 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
474 * there is no possible logical CPU in the kernel to match. This happens
475 * when CONFIG_NR_CPUS is configure to be smaller than the number of
476 * CPU nodes in DT. We need to just ignore this case.
477 * (3) -1 if the node does not exist in the device tree
478 */
479static int __init get_cpu_for_node(struct device_node *node)
480{
481 struct device_node *cpu_node;
482 int cpu;
483
484 cpu_node = of_parse_phandle(node, "cpu", 0);
485 if (!cpu_node)
486 return -1;
487
488 cpu = of_cpu_node_to_id(cpu_node);
489 if (cpu >= 0)
490 topology_parse_cpu_capacity(cpu_node, cpu);
491 else
492 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
493 cpu_node, cpumask_pr_args(cpu_possible_mask));
494
495 of_node_put(cpu_node);
496 return cpu;
497}
498
499static int __init parse_core(struct device_node *core, int package_id,
500 int cluster_id, int core_id)
501{
502 char name[20];
503 bool leaf = true;
504 int i = 0;
505 int cpu;
506 struct device_node *t;
507
508 do {
509 snprintf(name, sizeof(name), "thread%d", i);
510 t = of_get_child_by_name(core, name);
511 if (t) {
512 leaf = false;
513 cpu = get_cpu_for_node(t);
514 if (cpu >= 0) {
515 cpu_topology[cpu].package_id = package_id;
516 cpu_topology[cpu].cluster_id = cluster_id;
517 cpu_topology[cpu].core_id = core_id;
518 cpu_topology[cpu].thread_id = i;
519 } else if (cpu != -ENODEV) {
520 pr_err("%pOF: Can't get CPU for thread\n", t);
521 of_node_put(t);
522 return -EINVAL;
523 }
524 of_node_put(t);
525 }
526 i++;
527 } while (t);
528
529 cpu = get_cpu_for_node(core);
530 if (cpu >= 0) {
531 if (!leaf) {
532 pr_err("%pOF: Core has both threads and CPU\n",
533 core);
534 return -EINVAL;
535 }
536
537 cpu_topology[cpu].package_id = package_id;
538 cpu_topology[cpu].cluster_id = cluster_id;
539 cpu_topology[cpu].core_id = core_id;
540 } else if (leaf && cpu != -ENODEV) {
541 pr_err("%pOF: Can't get CPU for leaf core\n", core);
542 return -EINVAL;
543 }
544
545 return 0;
546}
547
548static int __init parse_cluster(struct device_node *cluster, int package_id,
549 int cluster_id, int depth)
550{
551 char name[20];
552 bool leaf = true;
553 bool has_cores = false;
554 struct device_node *c;
555 int core_id = 0;
556 int i, ret;
557
558 /*
559 * First check for child clusters; we currently ignore any
560 * information about the nesting of clusters and present the
561 * scheduler with a flat list of them.
562 */
563 i = 0;
564 do {
565 snprintf(name, sizeof(name), "cluster%d", i);
566 c = of_get_child_by_name(cluster, name);
567 if (c) {
568 leaf = false;
569 ret = parse_cluster(c, package_id, i, depth + 1);
570 if (depth > 0)
571 pr_warn("Topology for clusters of clusters not yet supported\n");
572 of_node_put(c);
573 if (ret != 0)
574 return ret;
575 }
576 i++;
577 } while (c);
578
579 /* Now check for cores */
580 i = 0;
581 do {
582 snprintf(name, sizeof(name), "core%d", i);
583 c = of_get_child_by_name(cluster, name);
584 if (c) {
585 has_cores = true;
586
587 if (depth == 0) {
588 pr_err("%pOF: cpu-map children should be clusters\n",
589 c);
590 of_node_put(c);
591 return -EINVAL;
592 }
593
594 if (leaf) {
595 ret = parse_core(c, package_id, cluster_id,
596 core_id++);
597 } else {
598 pr_err("%pOF: Non-leaf cluster with core %s\n",
599 cluster, name);
600 ret = -EINVAL;
601 }
602
603 of_node_put(c);
604 if (ret != 0)
605 return ret;
606 }
607 i++;
608 } while (c);
609
610 if (leaf && !has_cores)
611 pr_warn("%pOF: empty cluster\n", cluster);
612
613 return 0;
614}
615
616static int __init parse_socket(struct device_node *socket)
617{
618 char name[20];
619 struct device_node *c;
620 bool has_socket = false;
621 int package_id = 0, ret;
622
623 do {
624 snprintf(name, sizeof(name), "socket%d", package_id);
625 c = of_get_child_by_name(socket, name);
626 if (c) {
627 has_socket = true;
628 ret = parse_cluster(c, package_id, -1, 0);
629 of_node_put(c);
630 if (ret != 0)
631 return ret;
632 }
633 package_id++;
634 } while (c);
635
636 if (!has_socket)
637 ret = parse_cluster(socket, 0, -1, 0);
638
639 return ret;
640}
641
642static int __init parse_dt_topology(void)
643{
644 struct device_node *cn, *map;
645 int ret = 0;
646 int cpu;
647
648 cn = of_find_node_by_path("/cpus");
649 if (!cn) {
650 pr_err("No CPU information found in DT\n");
651 return 0;
652 }
653
654 /*
655 * When topology is provided cpu-map is essentially a root
656 * cluster with restricted subnodes.
657 */
658 map = of_get_child_by_name(cn, "cpu-map");
659 if (!map)
660 goto out;
661
662 ret = parse_socket(map);
663 if (ret != 0)
664 goto out_map;
665
666 topology_normalize_cpu_scale();
667
668 /*
669 * Check that all cores are in the topology; the SMP code will
670 * only mark cores described in the DT as possible.
671 */
672 for_each_possible_cpu(cpu)
673 if (cpu_topology[cpu].package_id < 0) {
674 ret = -EINVAL;
675 break;
676 }
677
678out_map:
679 of_node_put(map);
680out:
681 of_node_put(cn);
682 return ret;
683}
684#endif
685
686/*
687 * cpu topology table
688 */
689struct cpu_topology cpu_topology[NR_CPUS];
690EXPORT_SYMBOL_GPL(cpu_topology);
691
692const struct cpumask *cpu_coregroup_mask(int cpu)
693{
694 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
695
696 /* Find the smaller of NUMA, core or LLC siblings */
697 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
698 /* not numa in package, lets use the package siblings */
699 core_mask = &cpu_topology[cpu].core_sibling;
700 }
701
702 if (last_level_cache_is_valid(cpu)) {
703 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
704 core_mask = &cpu_topology[cpu].llc_sibling;
705 }
706
707 /*
708 * For systems with no shared cpu-side LLC but with clusters defined,
709 * extend core_mask to cluster_siblings. The sched domain builder will
710 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
711 */
712 if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
713 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
714 core_mask = &cpu_topology[cpu].cluster_sibling;
715
716 return core_mask;
717}
718
719const struct cpumask *cpu_clustergroup_mask(int cpu)
720{
721 /*
722 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
723 * cpu_coregroup_mask().
724 */
725 if (cpumask_subset(cpu_coregroup_mask(cpu),
726 &cpu_topology[cpu].cluster_sibling))
727 return topology_sibling_cpumask(cpu);
728
729 return &cpu_topology[cpu].cluster_sibling;
730}
731
732void update_siblings_masks(unsigned int cpuid)
733{
734 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
735 int cpu, ret;
736
737 ret = detect_cache_attributes(cpuid);
738 if (ret && ret != -ENOENT)
739 pr_info("Early cacheinfo failed, ret = %d\n", ret);
740
741 /* update core and thread sibling masks */
742 for_each_online_cpu(cpu) {
743 cpu_topo = &cpu_topology[cpu];
744
745 if (last_level_cache_is_shared(cpu, cpuid)) {
746 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
747 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
748 }
749
750 if (cpuid_topo->package_id != cpu_topo->package_id)
751 continue;
752
753 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
754 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
755
756 if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
757 continue;
758
759 if (cpuid_topo->cluster_id >= 0) {
760 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
761 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
762 }
763
764 if (cpuid_topo->core_id != cpu_topo->core_id)
765 continue;
766
767 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
768 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
769 }
770}
771
772static void clear_cpu_topology(int cpu)
773{
774 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
775
776 cpumask_clear(&cpu_topo->llc_sibling);
777 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
778
779 cpumask_clear(&cpu_topo->cluster_sibling);
780 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
781
782 cpumask_clear(&cpu_topo->core_sibling);
783 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
784 cpumask_clear(&cpu_topo->thread_sibling);
785 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
786}
787
788void __init reset_cpu_topology(void)
789{
790 unsigned int cpu;
791
792 for_each_possible_cpu(cpu) {
793 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
794
795 cpu_topo->thread_id = -1;
796 cpu_topo->core_id = -1;
797 cpu_topo->cluster_id = -1;
798 cpu_topo->package_id = -1;
799
800 clear_cpu_topology(cpu);
801 }
802}
803
804void remove_cpu_topology(unsigned int cpu)
805{
806 int sibling;
807
808 for_each_cpu(sibling, topology_core_cpumask(cpu))
809 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
810 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
811 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
812 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
813 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
814 for_each_cpu(sibling, topology_llc_cpumask(cpu))
815 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
816
817 clear_cpu_topology(cpu);
818}
819
820__weak int __init parse_acpi_topology(void)
821{
822 return 0;
823}
824
825#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
826void __init init_cpu_topology(void)
827{
828 int ret;
829
830 reset_cpu_topology();
831 ret = parse_acpi_topology();
832 if (!ret)
833 ret = of_have_populated_dt() && parse_dt_topology();
834
835 if (ret) {
836 /*
837 * Discard anything that was parsed if we hit an error so we
838 * don't use partial information.
839 */
840 reset_cpu_topology();
841 return;
842 }
843}
844
845void store_cpu_topology(unsigned int cpuid)
846{
847 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
848
849 if (cpuid_topo->package_id != -1)
850 goto topology_populated;
851
852 cpuid_topo->thread_id = -1;
853 cpuid_topo->core_id = cpuid;
854 cpuid_topo->package_id = cpu_to_node(cpuid);
855
856 pr_debug("CPU%u: package %d core %d thread %d\n",
857 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
858 cpuid_topo->thread_id);
859
860topology_populated:
861 update_siblings_masks(cpuid);
862}
863#endif