Loading...
1/*
2 * arch/parisc/kernel/topology.c
3 *
4 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
5 *
6 * based on arch/arm/kernel/topology.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/percpu.h>
14#include <linux/sched.h>
15#include <linux/sched/topology.h>
16
17#include <asm/topology.h>
18
19 /*
20 * cpu topology table
21 */
22struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
23EXPORT_SYMBOL_GPL(cpu_topology);
24
25const struct cpumask *cpu_coregroup_mask(int cpu)
26{
27 return &cpu_topology[cpu].core_sibling;
28}
29
30static void update_siblings_masks(unsigned int cpuid)
31{
32 struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
33 int cpu;
34
35 /* update core and thread sibling masks */
36 for_each_possible_cpu(cpu) {
37 cpu_topo = &cpu_topology[cpu];
38
39 if (cpuid_topo->socket_id != cpu_topo->socket_id)
40 continue;
41
42 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
43 if (cpu != cpuid)
44 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
45
46 if (cpuid_topo->core_id != cpu_topo->core_id)
47 continue;
48
49 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
50 if (cpu != cpuid)
51 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
52 }
53 smp_wmb();
54}
55
56static int dualcores_found __initdata;
57
58/*
59 * store_cpu_topology is called at boot when only one cpu is running
60 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
61 * which prevents simultaneous write access to cpu_topology array
62 */
63void __init store_cpu_topology(unsigned int cpuid)
64{
65 struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
66 struct cpuinfo_parisc *p;
67 int max_socket = -1;
68 unsigned long cpu;
69
70 /* If the cpu topology has been already set, just return */
71 if (cpuid_topo->core_id != -1)
72 return;
73
74 /* create cpu topology mapping */
75 cpuid_topo->thread_id = -1;
76 cpuid_topo->core_id = 0;
77
78 p = &per_cpu(cpu_data, cpuid);
79 for_each_online_cpu(cpu) {
80 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
81
82 if (cpu == cpuid) /* ignore current cpu */
83 continue;
84
85 if (cpuinfo->cpu_loc == p->cpu_loc) {
86 cpuid_topo->core_id = cpu_topology[cpu].core_id;
87 if (p->cpu_loc) {
88 cpuid_topo->core_id++;
89 cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
90 dualcores_found = 1;
91 continue;
92 }
93 }
94
95 if (cpuid_topo->socket_id == -1)
96 max_socket = max(max_socket, cpu_topology[cpu].socket_id);
97 }
98
99 if (cpuid_topo->socket_id == -1)
100 cpuid_topo->socket_id = max_socket + 1;
101
102 update_siblings_masks(cpuid);
103
104 pr_info("CPU%u: thread %d, cpu %d, socket %d\n",
105 cpuid, cpu_topology[cpuid].thread_id,
106 cpu_topology[cpuid].core_id,
107 cpu_topology[cpuid].socket_id);
108}
109
110static struct sched_domain_topology_level parisc_mc_topology[] = {
111#ifdef CONFIG_SCHED_MC
112 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
113#endif
114
115 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
116 { NULL, },
117};
118
119/*
120 * init_cpu_topology is called at boot when only one cpu is running
121 * which prevent simultaneous write access to cpu_topology array
122 */
123void __init init_cpu_topology(void)
124{
125 unsigned int cpu;
126
127 /* init core mask and capacity */
128 for_each_possible_cpu(cpu) {
129 struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
130
131 cpu_topo->thread_id = -1;
132 cpu_topo->core_id = -1;
133 cpu_topo->socket_id = -1;
134 cpumask_clear(&cpu_topo->core_sibling);
135 cpumask_clear(&cpu_topo->thread_sibling);
136 }
137 smp_wmb();
138
139 /* Set scheduler topology descriptor */
140 if (dualcores_found)
141 set_sched_topology(parisc_mc_topology);
142}
1/*
2 * arch/parisc/kernel/topology.c
3 *
4 * Copyright (C) 2017 Helge Deller <deller@gmx.de>
5 *
6 * based on arch/arm/kernel/topology.c
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12
13#include <linux/percpu.h>
14#include <linux/sched.h>
15#include <linux/sched/topology.h>
16#include <linux/cpu.h>
17
18#include <asm/topology.h>
19#include <asm/sections.h>
20
21static DEFINE_PER_CPU(struct cpu, cpu_devices);
22
23/*
24 * store_cpu_topology is called at boot when only one cpu is running
25 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
26 * which prevents simultaneous write access to cpu_topology array
27 */
28void store_cpu_topology(unsigned int cpuid)
29{
30 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
31 struct cpuinfo_parisc *p;
32 int max_socket = -1;
33 unsigned long cpu;
34
35 /* If the cpu topology has been already set, just return */
36 if (cpuid_topo->core_id != -1)
37 return;
38
39#ifdef CONFIG_HOTPLUG_CPU
40 per_cpu(cpu_devices, cpuid).hotpluggable = 1;
41#endif
42 if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
43 pr_warn("Failed to register CPU%d device", cpuid);
44
45 /* create cpu topology mapping */
46 cpuid_topo->thread_id = -1;
47 cpuid_topo->core_id = 0;
48
49 p = &per_cpu(cpu_data, cpuid);
50 for_each_online_cpu(cpu) {
51 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
52
53 if (cpu == cpuid) /* ignore current cpu */
54 continue;
55
56 if (cpuinfo->cpu_loc == p->cpu_loc) {
57 cpuid_topo->core_id = cpu_topology[cpu].core_id;
58 if (p->cpu_loc) {
59 cpuid_topo->core_id++;
60 cpuid_topo->package_id = cpu_topology[cpu].package_id;
61 continue;
62 }
63 }
64
65 if (cpuid_topo->package_id == -1)
66 max_socket = max(max_socket, cpu_topology[cpu].package_id);
67 }
68
69 if (cpuid_topo->package_id == -1)
70 cpuid_topo->package_id = max_socket + 1;
71
72 update_siblings_masks(cpuid);
73
74 pr_info("CPU%u: cpu core %d of socket %d\n",
75 cpuid,
76 cpu_topology[cpuid].core_id,
77 cpu_topology[cpuid].package_id);
78}
79
80/*
81 * init_cpu_topology is called at boot when only one cpu is running
82 * which prevent simultaneous write access to cpu_topology array
83 */
84void __init init_cpu_topology(void)
85{
86 reset_cpu_topology();
87}