Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Arch specific cpu topology information
  4 *
  5 * Copyright (C) 2016, ARM Ltd.
  6 * Written by: Juri Lelli, ARM Ltd.
  7 */
  8
  9#include <linux/acpi.h>
 10#include <linux/cacheinfo.h>
 11#include <linux/cpu.h>
 12#include <linux/cpufreq.h>
 13#include <linux/device.h>
 14#include <linux/of.h>
 15#include <linux/slab.h>
 16#include <linux/sched/topology.h>
 17#include <linux/cpuset.h>
 18#include <linux/cpumask.h>
 19#include <linux/init.h>
 20#include <linux/rcupdate.h>
 21#include <linux/sched.h>
 22
 23#define CREATE_TRACE_POINTS
 24#include <trace/events/thermal_pressure.h>
 25
 26static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
 27static struct cpumask scale_freq_counters_mask;
 28static bool scale_freq_invariant;
 29static DEFINE_PER_CPU(u32, freq_factor) = 1;
 30
 31static bool supports_scale_freq_counters(const struct cpumask *cpus)
 32{
 33	return cpumask_subset(cpus, &scale_freq_counters_mask);
 34}
 35
 36bool topology_scale_freq_invariant(void)
 37{
 38	return cpufreq_supports_freq_invariance() ||
 39	       supports_scale_freq_counters(cpu_online_mask);
 40}
 41
 42static void update_scale_freq_invariant(bool status)
 43{
 44	if (scale_freq_invariant == status)
 45		return;
 46
 47	/*
 48	 * Task scheduler behavior depends on frequency invariance support,
 49	 * either cpufreq or counter driven. If the support status changes as
 50	 * a result of counter initialisation and use, retrigger the build of
 51	 * scheduling domains to ensure the information is propagated properly.
 52	 */
 53	if (topology_scale_freq_invariant() == status) {
 54		scale_freq_invariant = status;
 55		rebuild_sched_domains_energy();
 56	}
 57}
 58
 59void topology_set_scale_freq_source(struct scale_freq_data *data,
 60				    const struct cpumask *cpus)
 61{
 62	struct scale_freq_data *sfd;
 63	int cpu;
 64
 65	/*
 66	 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
 67	 * supported by cpufreq.
 68	 */
 69	if (cpumask_empty(&scale_freq_counters_mask))
 70		scale_freq_invariant = topology_scale_freq_invariant();
 71
 72	rcu_read_lock();
 73
 74	for_each_cpu(cpu, cpus) {
 75		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
 76
 77		/* Use ARCH provided counters whenever possible */
 78		if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
 79			rcu_assign_pointer(per_cpu(sft_data, cpu), data);
 80			cpumask_set_cpu(cpu, &scale_freq_counters_mask);
 81		}
 82	}
 83
 84	rcu_read_unlock();
 85
 86	update_scale_freq_invariant(true);
 87}
 88EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
 89
 90void topology_clear_scale_freq_source(enum scale_freq_source source,
 91				      const struct cpumask *cpus)
 92{
 93	struct scale_freq_data *sfd;
 94	int cpu;
 95
 96	rcu_read_lock();
 97
 98	for_each_cpu(cpu, cpus) {
 99		sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
100
101		if (sfd && sfd->source == source) {
102			rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
103			cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
104		}
105	}
106
107	rcu_read_unlock();
108
109	/*
110	 * Make sure all references to previous sft_data are dropped to avoid
111	 * use-after-free races.
112	 */
113	synchronize_rcu();
114
115	update_scale_freq_invariant(false);
116}
117EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
118
119void topology_scale_freq_tick(void)
120{
121	struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
122
123	if (sfd)
124		sfd->set_freq_scale();
125}
126
127DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
128EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
129
130void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
131			     unsigned long max_freq)
132{
133	unsigned long scale;
134	int i;
135
136	if (WARN_ON_ONCE(!cur_freq || !max_freq))
137		return;
138
139	/*
140	 * If the use of counters for FIE is enabled, just return as we don't
141	 * want to update the scale factor with information from CPUFREQ.
142	 * Instead the scale factor will be updated from arch_scale_freq_tick.
143	 */
144	if (supports_scale_freq_counters(cpus))
145		return;
146
147	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
148
149	for_each_cpu(i, cpus)
150		per_cpu(arch_freq_scale, i) = scale;
151}
152
153DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
154EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
155
156void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
157{
158	per_cpu(cpu_scale, cpu) = capacity;
159}
160
161DEFINE_PER_CPU(unsigned long, thermal_pressure);
162
163/**
164 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
165 * @cpus        : The related CPUs for which capacity has been reduced
166 * @capped_freq : The maximum allowed frequency that CPUs can run at
167 *
168 * Update the value of thermal pressure for all @cpus in the mask. The
169 * cpumask should include all (online+offline) affected CPUs, to avoid
170 * operating on stale data when hot-plug is used for some CPUs. The
171 * @capped_freq reflects the currently allowed max CPUs frequency due to
172 * thermal capping. It might be also a boost frequency value, which is bigger
173 * than the internal 'freq_factor' max frequency. In such case the pressure
174 * value should simply be removed, since this is an indication that there is
175 * no thermal throttling. The @capped_freq must be provided in kHz.
176 */
177void topology_update_thermal_pressure(const struct cpumask *cpus,
178				      unsigned long capped_freq)
179{
180	unsigned long max_capacity, capacity, th_pressure;
181	u32 max_freq;
182	int cpu;
183
184	cpu = cpumask_first(cpus);
185	max_capacity = arch_scale_cpu_capacity(cpu);
186	max_freq = per_cpu(freq_factor, cpu);
187
188	/* Convert to MHz scale which is used in 'freq_factor' */
189	capped_freq /= 1000;
190
191	/*
192	 * Handle properly the boost frequencies, which should simply clean
193	 * the thermal pressure value.
194	 */
195	if (max_freq <= capped_freq)
196		capacity = max_capacity;
197	else
198		capacity = mult_frac(max_capacity, capped_freq, max_freq);
199
200	th_pressure = max_capacity - capacity;
201
202	trace_thermal_pressure_update(cpu, th_pressure);
203
204	for_each_cpu(cpu, cpus)
205		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
206}
207EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
208
209static ssize_t cpu_capacity_show(struct device *dev,
210				 struct device_attribute *attr,
211				 char *buf)
212{
213	struct cpu *cpu = container_of(dev, struct cpu, dev);
214
215	return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
216}
217
218static void update_topology_flags_workfn(struct work_struct *work);
219static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
220
221static DEVICE_ATTR_RO(cpu_capacity);
222
223static int register_cpu_capacity_sysctl(void)
224{
225	int i;
226	struct device *cpu;
227
228	for_each_possible_cpu(i) {
229		cpu = get_cpu_device(i);
230		if (!cpu) {
231			pr_err("%s: too early to get CPU%d device!\n",
232			       __func__, i);
233			continue;
234		}
235		device_create_file(cpu, &dev_attr_cpu_capacity);
236	}
237
238	return 0;
239}
240subsys_initcall(register_cpu_capacity_sysctl);
241
242static int update_topology;
243
244int topology_update_cpu_topology(void)
245{
246	return update_topology;
247}
248
249/*
250 * Updating the sched_domains can't be done directly from cpufreq callbacks
251 * due to locking, so queue the work for later.
252 */
253static void update_topology_flags_workfn(struct work_struct *work)
254{
255	update_topology = 1;
256	rebuild_sched_domains();
257	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
258	update_topology = 0;
259}
260
261static u32 *raw_capacity;
262
263static int free_raw_capacity(void)
264{
265	kfree(raw_capacity);
266	raw_capacity = NULL;
267
268	return 0;
269}
270
271void topology_normalize_cpu_scale(void)
272{
273	u64 capacity;
274	u64 capacity_scale;
275	int cpu;
276
277	if (!raw_capacity)
278		return;
279
280	capacity_scale = 1;
281	for_each_possible_cpu(cpu) {
282		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
283		capacity_scale = max(capacity, capacity_scale);
284	}
285
286	pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
287	for_each_possible_cpu(cpu) {
288		capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
289		capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
290			capacity_scale);
291		topology_set_cpu_scale(cpu, capacity);
292		pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
293			cpu, topology_get_cpu_scale(cpu));
294	}
295}
296
297bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
298{
299	struct clk *cpu_clk;
300	static bool cap_parsing_failed;
301	int ret;
302	u32 cpu_capacity;
303
304	if (cap_parsing_failed)
305		return false;
306
307	ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
308				   &cpu_capacity);
309	if (!ret) {
310		if (!raw_capacity) {
311			raw_capacity = kcalloc(num_possible_cpus(),
312					       sizeof(*raw_capacity),
313					       GFP_KERNEL);
314			if (!raw_capacity) {
315				cap_parsing_failed = true;
316				return false;
317			}
318		}
319		raw_capacity[cpu] = cpu_capacity;
320		pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
321			cpu_node, raw_capacity[cpu]);
322
323		/*
324		 * Update freq_factor for calculating early boot cpu capacities.
325		 * For non-clk CPU DVFS mechanism, there's no way to get the
326		 * frequency value now, assuming they are running at the same
327		 * frequency (by keeping the initial freq_factor value).
328		 */
329		cpu_clk = of_clk_get(cpu_node, 0);
330		if (!PTR_ERR_OR_ZERO(cpu_clk)) {
331			per_cpu(freq_factor, cpu) =
332				clk_get_rate(cpu_clk) / 1000;
333			clk_put(cpu_clk);
334		}
335	} else {
336		if (raw_capacity) {
337			pr_err("cpu_capacity: missing %pOF raw capacity\n",
338				cpu_node);
339			pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
340		}
341		cap_parsing_failed = true;
342		free_raw_capacity();
343	}
344
345	return !ret;
346}
347
348#ifdef CONFIG_ACPI_CPPC_LIB
349#include <acpi/cppc_acpi.h>
350
351void topology_init_cpu_capacity_cppc(void)
352{
353	struct cppc_perf_caps perf_caps;
354	int cpu;
355
356	if (likely(!acpi_cpc_valid()))
357		return;
358
359	raw_capacity = kcalloc(num_possible_cpus(), sizeof(*raw_capacity),
360			       GFP_KERNEL);
361	if (!raw_capacity)
362		return;
363
364	for_each_possible_cpu(cpu) {
365		if (!cppc_get_perf_caps(cpu, &perf_caps) &&
366		    (perf_caps.highest_perf >= perf_caps.nominal_perf) &&
367		    (perf_caps.highest_perf >= perf_caps.lowest_perf)) {
368			raw_capacity[cpu] = perf_caps.highest_perf;
369			pr_debug("cpu_capacity: CPU%d cpu_capacity=%u (raw).\n",
370				 cpu, raw_capacity[cpu]);
371			continue;
372		}
373
374		pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu);
375		pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
376		goto exit;
377	}
378
379	topology_normalize_cpu_scale();
380	schedule_work(&update_topology_flags_work);
381	pr_debug("cpu_capacity: cpu_capacity initialization done\n");
382
383exit:
384	free_raw_capacity();
385}
386#endif
387
388#ifdef CONFIG_CPU_FREQ
389static cpumask_var_t cpus_to_visit;
390static void parsing_done_workfn(struct work_struct *work);
391static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
392
393static int
394init_cpu_capacity_callback(struct notifier_block *nb,
395			   unsigned long val,
396			   void *data)
397{
398	struct cpufreq_policy *policy = data;
399	int cpu;
400
401	if (!raw_capacity)
402		return 0;
403
404	if (val != CPUFREQ_CREATE_POLICY)
405		return 0;
406
407	pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
408		 cpumask_pr_args(policy->related_cpus),
409		 cpumask_pr_args(cpus_to_visit));
410
411	cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
412
413	for_each_cpu(cpu, policy->related_cpus)
414		per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
415
416	if (cpumask_empty(cpus_to_visit)) {
417		topology_normalize_cpu_scale();
418		schedule_work(&update_topology_flags_work);
419		free_raw_capacity();
420		pr_debug("cpu_capacity: parsing done\n");
421		schedule_work(&parsing_done_work);
422	}
423
424	return 0;
425}
426
427static struct notifier_block init_cpu_capacity_notifier = {
428	.notifier_call = init_cpu_capacity_callback,
429};
430
431static int __init register_cpufreq_notifier(void)
432{
433	int ret;
434
435	/*
436	 * On ACPI-based systems skip registering cpufreq notifier as cpufreq
437	 * information is not needed for cpu capacity initialization.
438	 */
439	if (!acpi_disabled || !raw_capacity)
440		return -EINVAL;
441
442	if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
443		return -ENOMEM;
444
445	cpumask_copy(cpus_to_visit, cpu_possible_mask);
446
447	ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
448					CPUFREQ_POLICY_NOTIFIER);
449
450	if (ret)
451		free_cpumask_var(cpus_to_visit);
452
453	return ret;
454}
455core_initcall(register_cpufreq_notifier);
456
457static void parsing_done_workfn(struct work_struct *work)
458{
459	cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
460					 CPUFREQ_POLICY_NOTIFIER);
461	free_cpumask_var(cpus_to_visit);
462}
463
464#else
465core_initcall(free_raw_capacity);
466#endif
467
468#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
469/*
470 * This function returns the logic cpu number of the node.
471 * There are basically three kinds of return values:
472 * (1) logic cpu number which is > 0.
473 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
474 * there is no possible logical CPU in the kernel to match. This happens
475 * when CONFIG_NR_CPUS is configure to be smaller than the number of
476 * CPU nodes in DT. We need to just ignore this case.
477 * (3) -1 if the node does not exist in the device tree
478 */
479static int __init get_cpu_for_node(struct device_node *node)
480{
481	struct device_node *cpu_node;
482	int cpu;
483
484	cpu_node = of_parse_phandle(node, "cpu", 0);
485	if (!cpu_node)
486		return -1;
487
488	cpu = of_cpu_node_to_id(cpu_node);
489	if (cpu >= 0)
490		topology_parse_cpu_capacity(cpu_node, cpu);
491	else
492		pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
493			cpu_node, cpumask_pr_args(cpu_possible_mask));
494
495	of_node_put(cpu_node);
496	return cpu;
497}
498
499static int __init parse_core(struct device_node *core, int package_id,
500			     int cluster_id, int core_id)
501{
502	char name[20];
503	bool leaf = true;
504	int i = 0;
505	int cpu;
506	struct device_node *t;
507
508	do {
509		snprintf(name, sizeof(name), "thread%d", i);
510		t = of_get_child_by_name(core, name);
511		if (t) {
512			leaf = false;
513			cpu = get_cpu_for_node(t);
514			if (cpu >= 0) {
515				cpu_topology[cpu].package_id = package_id;
516				cpu_topology[cpu].cluster_id = cluster_id;
517				cpu_topology[cpu].core_id = core_id;
518				cpu_topology[cpu].thread_id = i;
519			} else if (cpu != -ENODEV) {
520				pr_err("%pOF: Can't get CPU for thread\n", t);
521				of_node_put(t);
522				return -EINVAL;
523			}
524			of_node_put(t);
525		}
526		i++;
527	} while (t);
528
529	cpu = get_cpu_for_node(core);
530	if (cpu >= 0) {
531		if (!leaf) {
532			pr_err("%pOF: Core has both threads and CPU\n",
533			       core);
534			return -EINVAL;
535		}
536
537		cpu_topology[cpu].package_id = package_id;
538		cpu_topology[cpu].cluster_id = cluster_id;
539		cpu_topology[cpu].core_id = core_id;
540	} else if (leaf && cpu != -ENODEV) {
541		pr_err("%pOF: Can't get CPU for leaf core\n", core);
542		return -EINVAL;
543	}
544
545	return 0;
546}
547
548static int __init parse_cluster(struct device_node *cluster, int package_id,
549				int cluster_id, int depth)
550{
551	char name[20];
552	bool leaf = true;
553	bool has_cores = false;
554	struct device_node *c;
555	int core_id = 0;
556	int i, ret;
557
558	/*
559	 * First check for child clusters; we currently ignore any
560	 * information about the nesting of clusters and present the
561	 * scheduler with a flat list of them.
562	 */
563	i = 0;
564	do {
565		snprintf(name, sizeof(name), "cluster%d", i);
566		c = of_get_child_by_name(cluster, name);
567		if (c) {
568			leaf = false;
569			ret = parse_cluster(c, package_id, i, depth + 1);
570			if (depth > 0)
571				pr_warn("Topology for clusters of clusters not yet supported\n");
572			of_node_put(c);
573			if (ret != 0)
574				return ret;
575		}
576		i++;
577	} while (c);
578
579	/* Now check for cores */
580	i = 0;
581	do {
582		snprintf(name, sizeof(name), "core%d", i);
583		c = of_get_child_by_name(cluster, name);
584		if (c) {
585			has_cores = true;
586
587			if (depth == 0) {
588				pr_err("%pOF: cpu-map children should be clusters\n",
589				       c);
590				of_node_put(c);
591				return -EINVAL;
592			}
593
594			if (leaf) {
595				ret = parse_core(c, package_id, cluster_id,
596						 core_id++);
597			} else {
598				pr_err("%pOF: Non-leaf cluster with core %s\n",
599				       cluster, name);
600				ret = -EINVAL;
601			}
602
603			of_node_put(c);
604			if (ret != 0)
605				return ret;
606		}
607		i++;
608	} while (c);
609
610	if (leaf && !has_cores)
611		pr_warn("%pOF: empty cluster\n", cluster);
612
613	return 0;
614}
615
616static int __init parse_socket(struct device_node *socket)
617{
618	char name[20];
619	struct device_node *c;
620	bool has_socket = false;
621	int package_id = 0, ret;
622
623	do {
624		snprintf(name, sizeof(name), "socket%d", package_id);
625		c = of_get_child_by_name(socket, name);
626		if (c) {
627			has_socket = true;
628			ret = parse_cluster(c, package_id, -1, 0);
629			of_node_put(c);
630			if (ret != 0)
631				return ret;
632		}
633		package_id++;
634	} while (c);
635
636	if (!has_socket)
637		ret = parse_cluster(socket, 0, -1, 0);
638
639	return ret;
640}
641
642static int __init parse_dt_topology(void)
643{
644	struct device_node *cn, *map;
645	int ret = 0;
646	int cpu;
647
648	cn = of_find_node_by_path("/cpus");
649	if (!cn) {
650		pr_err("No CPU information found in DT\n");
651		return 0;
652	}
653
654	/*
655	 * When topology is provided cpu-map is essentially a root
656	 * cluster with restricted subnodes.
657	 */
658	map = of_get_child_by_name(cn, "cpu-map");
659	if (!map)
660		goto out;
661
662	ret = parse_socket(map);
663	if (ret != 0)
664		goto out_map;
665
666	topology_normalize_cpu_scale();
667
668	/*
669	 * Check that all cores are in the topology; the SMP code will
670	 * only mark cores described in the DT as possible.
671	 */
672	for_each_possible_cpu(cpu)
673		if (cpu_topology[cpu].package_id < 0) {
674			ret = -EINVAL;
675			break;
676		}
677
678out_map:
679	of_node_put(map);
680out:
681	of_node_put(cn);
682	return ret;
683}
684#endif
685
686/*
687 * cpu topology table
688 */
689struct cpu_topology cpu_topology[NR_CPUS];
690EXPORT_SYMBOL_GPL(cpu_topology);
691
692const struct cpumask *cpu_coregroup_mask(int cpu)
693{
694	const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
695
696	/* Find the smaller of NUMA, core or LLC siblings */
697	if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
698		/* not numa in package, lets use the package siblings */
699		core_mask = &cpu_topology[cpu].core_sibling;
700	}
701
702	if (last_level_cache_is_valid(cpu)) {
703		if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
704			core_mask = &cpu_topology[cpu].llc_sibling;
705	}
706
707	/*
708	 * For systems with no shared cpu-side LLC but with clusters defined,
709	 * extend core_mask to cluster_siblings. The sched domain builder will
710	 * then remove MC as redundant with CLS if SCHED_CLUSTER is enabled.
711	 */
712	if (IS_ENABLED(CONFIG_SCHED_CLUSTER) &&
713	    cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling))
714		core_mask = &cpu_topology[cpu].cluster_sibling;
715
716	return core_mask;
717}
718
719const struct cpumask *cpu_clustergroup_mask(int cpu)
720{
721	/*
722	 * Forbid cpu_clustergroup_mask() to span more or the same CPUs as
723	 * cpu_coregroup_mask().
724	 */
725	if (cpumask_subset(cpu_coregroup_mask(cpu),
726			   &cpu_topology[cpu].cluster_sibling))
727		return topology_sibling_cpumask(cpu);
728
729	return &cpu_topology[cpu].cluster_sibling;
730}
731
732void update_siblings_masks(unsigned int cpuid)
733{
734	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
735	int cpu, ret;
736
737	ret = detect_cache_attributes(cpuid);
738	if (ret && ret != -ENOENT)
739		pr_info("Early cacheinfo failed, ret = %d\n", ret);
740
741	/* update core and thread sibling masks */
742	for_each_online_cpu(cpu) {
743		cpu_topo = &cpu_topology[cpu];
744
745		if (last_level_cache_is_shared(cpu, cpuid)) {
746			cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
747			cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
748		}
749
750		if (cpuid_topo->package_id != cpu_topo->package_id)
751			continue;
752
753		cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
754		cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
755
756		if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
757			continue;
758
759		if (cpuid_topo->cluster_id >= 0) {
760			cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
761			cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
762		}
763
764		if (cpuid_topo->core_id != cpu_topo->core_id)
765			continue;
766
767		cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
768		cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
769	}
770}
771
772static void clear_cpu_topology(int cpu)
773{
774	struct cpu_topology *cpu_topo = &cpu_topology[cpu];
775
776	cpumask_clear(&cpu_topo->llc_sibling);
777	cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
778
779	cpumask_clear(&cpu_topo->cluster_sibling);
780	cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
781
782	cpumask_clear(&cpu_topo->core_sibling);
783	cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
784	cpumask_clear(&cpu_topo->thread_sibling);
785	cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
786}
787
788void __init reset_cpu_topology(void)
789{
790	unsigned int cpu;
791
792	for_each_possible_cpu(cpu) {
793		struct cpu_topology *cpu_topo = &cpu_topology[cpu];
794
795		cpu_topo->thread_id = -1;
796		cpu_topo->core_id = -1;
797		cpu_topo->cluster_id = -1;
798		cpu_topo->package_id = -1;
799
800		clear_cpu_topology(cpu);
801	}
802}
803
804void remove_cpu_topology(unsigned int cpu)
805{
806	int sibling;
807
808	for_each_cpu(sibling, topology_core_cpumask(cpu))
809		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
810	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
811		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
812	for_each_cpu(sibling, topology_cluster_cpumask(cpu))
813		cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
814	for_each_cpu(sibling, topology_llc_cpumask(cpu))
815		cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
816
817	clear_cpu_topology(cpu);
818}
819
820__weak int __init parse_acpi_topology(void)
821{
822	return 0;
823}
824
825#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
826void __init init_cpu_topology(void)
827{
828	int ret;
829
830	reset_cpu_topology();
831	ret = parse_acpi_topology();
832	if (!ret)
833		ret = of_have_populated_dt() && parse_dt_topology();
834
835	if (ret) {
836		/*
837		 * Discard anything that was parsed if we hit an error so we
838		 * don't use partial information.
839		 */
840		reset_cpu_topology();
841		return;
842	}
843}
844
845void store_cpu_topology(unsigned int cpuid)
846{
847	struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
848
849	if (cpuid_topo->package_id != -1)
850		goto topology_populated;
851
852	cpuid_topo->thread_id = -1;
853	cpuid_topo->core_id = cpuid;
854	cpuid_topo->package_id = cpu_to_node(cpuid);
855
856	pr_debug("CPU%u: package %d core %d thread %d\n",
857		 cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
858		 cpuid_topo->thread_id);
859
860topology_populated:
861	update_siblings_masks(cpuid);
862}
863#endif