Loading...
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8#include <linux/cpu.h>
9
10#include <asm/smp.h>
11#include <asm/x2apic.h>
12
13static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
15static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
16
17static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
18{
19 return x2apic_enabled();
20}
21
22static inline u32 x2apic_cluster(int cpu)
23{
24 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
25}
26
27static void
28__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
29{
30 struct cpumask *cpus_in_cluster_ptr;
31 struct cpumask *ipi_mask_ptr;
32 unsigned int cpu, this_cpu;
33 unsigned long flags;
34 u32 dest;
35
36 x2apic_wrmsr_fence();
37
38 local_irq_save(flags);
39
40 this_cpu = smp_processor_id();
41
42 /*
43 * We are to modify mask, so we need an own copy
44 * and be sure it's manipulated with irq off.
45 */
46 ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
47 cpumask_copy(ipi_mask_ptr, mask);
48
49 /*
50 * The idea is to send one IPI per cluster.
51 */
52 for_each_cpu(cpu, ipi_mask_ptr) {
53 unsigned long i;
54
55 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
56 dest = 0;
57
58 /* Collect cpus in cluster. */
59 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
60 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
61 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
62 }
63
64 if (!dest)
65 continue;
66
67 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
68 /*
69 * Cluster sibling cpus should be discared now so
70 * we would not send IPI them second time.
71 */
72 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
73 }
74
75 local_irq_restore(flags);
76}
77
78static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
79{
80 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
81}
82
83static void
84 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
85{
86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
87}
88
89static void x2apic_send_IPI_allbutself(int vector)
90{
91 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
92}
93
94static void x2apic_send_IPI_all(int vector)
95{
96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
97}
98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
100{
101 /*
102 * We're using fixed IRQ delivery, can only return one logical APIC ID.
103 * May as well be the first.
104 */
105 int cpu = cpumask_first(cpumask);
106
107 if ((unsigned)cpu < nr_cpu_ids)
108 return per_cpu(x86_cpu_to_logical_apicid, cpu);
109 else
110 return BAD_APICID;
111}
112
113static unsigned int
114x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
115 const struct cpumask *andmask)
116{
117 int cpu;
118
119 /*
120 * We're using fixed IRQ delivery, can only return one logical APIC ID.
121 * May as well be the first.
122 */
123 for_each_cpu_and(cpu, cpumask, andmask) {
124 if (cpumask_test_cpu(cpu, cpu_online_mask))
125 break;
126 }
127
128 return per_cpu(x86_cpu_to_logical_apicid, cpu);
129}
130
131static void init_x2apic_ldr(void)
132{
133 unsigned int this_cpu = smp_processor_id();
134 unsigned int cpu;
135
136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
137
138 __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
139 for_each_online_cpu(cpu) {
140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
141 continue;
142 __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
143 __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
144 }
145}
146
147 /*
148 * At CPU state changes, update the x2apic cluster sibling info.
149 */
150static int __cpuinit
151update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
152{
153 unsigned int this_cpu = (unsigned long)hcpu;
154 unsigned int cpu;
155 int err = 0;
156
157 switch (action) {
158 case CPU_UP_PREPARE:
159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
160 GFP_KERNEL)) {
161 err = -ENOMEM;
162 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
163 GFP_KERNEL)) {
164 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
165 err = -ENOMEM;
166 }
167 break;
168 case CPU_UP_CANCELED:
169 case CPU_UP_CANCELED_FROZEN:
170 case CPU_DEAD:
171 for_each_online_cpu(cpu) {
172 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
173 continue;
174 __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
175 __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
176 }
177 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
178 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
179 break;
180 }
181
182 return notifier_from_errno(err);
183}
184
185static struct notifier_block __refdata x2apic_cpu_notifier = {
186 .notifier_call = update_clusterinfo,
187};
188
189static int x2apic_init_cpu_notifier(void)
190{
191 int cpu = smp_processor_id();
192
193 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
194 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
195
196 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
197
198 __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
199 register_hotcpu_notifier(&x2apic_cpu_notifier);
200 return 1;
201}
202
203static int x2apic_cluster_probe(void)
204{
205 if (x2apic_mode)
206 return x2apic_init_cpu_notifier();
207 else
208 return 0;
209}
210
211static struct apic apic_x2apic_cluster = {
212
213 .name = "cluster x2apic",
214 .probe = x2apic_cluster_probe,
215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
216 .apic_id_valid = x2apic_apic_id_valid,
217 .apic_id_registered = x2apic_apic_id_registered,
218
219 .irq_delivery_mode = dest_LowestPrio,
220 .irq_dest_mode = 1, /* logical */
221
222 .target_cpus = x2apic_target_cpus,
223 .disable_esr = 0,
224 .dest_logical = APIC_DEST_LOGICAL,
225 .check_apicid_used = NULL,
226 .check_apicid_present = NULL,
227
228 .vector_allocation_domain = x2apic_vector_allocation_domain,
229 .init_apic_ldr = init_x2apic_ldr,
230
231 .ioapic_phys_id_map = NULL,
232 .setup_apic_routing = NULL,
233 .multi_timer_check = NULL,
234 .cpu_present_to_apicid = default_cpu_present_to_apicid,
235 .apicid_to_cpu_present = NULL,
236 .setup_portio_remap = NULL,
237 .check_phys_apicid_present = default_check_phys_apicid_present,
238 .enable_apic_mode = NULL,
239 .phys_pkg_id = x2apic_phys_pkg_id,
240 .mps_oem_check = NULL,
241
242 .get_apic_id = x2apic_get_apic_id,
243 .set_apic_id = x2apic_set_apic_id,
244 .apic_id_mask = 0xFFFFFFFFu,
245
246 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
247 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
248
249 .send_IPI_mask = x2apic_send_IPI_mask,
250 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
251 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
252 .send_IPI_all = x2apic_send_IPI_all,
253 .send_IPI_self = x2apic_send_IPI_self,
254
255 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
256 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
257 .wait_for_init_deassert = NULL,
258 .smp_callin_clear_local_apic = NULL,
259 .inquire_remote_apic = NULL,
260
261 .read = native_apic_msr_read,
262 .write = native_apic_msr_write,
263 .eoi_write = native_apic_msr_eoi_write,
264 .icr_read = native_x2apic_icr_read,
265 .icr_write = native_x2apic_icr_write,
266 .wait_icr_idle = native_x2apic_wait_icr_idle,
267 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
268};
269
270apic_driver(apic_x2apic_cluster);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/threads.h>
3#include <linux/cpumask.h>
4#include <linux/string.h>
5#include <linux/kernel.h>
6#include <linux/ctype.h>
7#include <linux/dmar.h>
8#include <linux/irq.h>
9#include <linux/cpu.h>
10
11#include <asm/smp.h>
12#include "x2apic.h"
13
14struct cluster_mask {
15 unsigned int clusterid;
16 int node;
17 struct cpumask mask;
18};
19
20static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
21static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
22static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
23static struct cluster_mask *cluster_hotplug_mask;
24
25static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
26{
27 return x2apic_enabled();
28}
29
30static void x2apic_send_IPI(int cpu, int vector)
31{
32 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
33
34 x2apic_wrmsr_fence();
35 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
36}
37
38static void
39__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
40{
41 unsigned int cpu, clustercpu;
42 struct cpumask *tmpmsk;
43 unsigned long flags;
44 u32 dest;
45
46 x2apic_wrmsr_fence();
47 local_irq_save(flags);
48
49 tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
50 cpumask_copy(tmpmsk, mask);
51 /* If IPI should not be sent to self, clear current CPU */
52 if (apic_dest != APIC_DEST_ALLINC)
53 cpumask_clear_cpu(smp_processor_id(), tmpmsk);
54
55 /* Collapse cpus in a cluster so a single IPI per cluster is sent */
56 for_each_cpu(cpu, tmpmsk) {
57 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
58
59 dest = 0;
60 for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
61 dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
62
63 if (!dest)
64 continue;
65
66 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
67 /* Remove cluster CPUs from tmpmask */
68 cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
69 }
70
71 local_irq_restore(flags);
72}
73
74static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
75{
76 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
77}
78
79static void
80x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
81{
82 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
83}
84
85static void x2apic_send_IPI_allbutself(int vector)
86{
87 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
88}
89
90static void x2apic_send_IPI_all(int vector)
91{
92 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
93}
94
95static u32 x2apic_calc_apicid(unsigned int cpu)
96{
97 return per_cpu(x86_cpu_to_logical_apicid, cpu);
98}
99
100static void init_x2apic_ldr(void)
101{
102 struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
103 u32 cluster, apicid = apic_read(APIC_LDR);
104 unsigned int cpu;
105
106 this_cpu_write(x86_cpu_to_logical_apicid, apicid);
107
108 if (cmsk)
109 goto update;
110
111 cluster = apicid >> 16;
112 for_each_online_cpu(cpu) {
113 cmsk = per_cpu(cluster_masks, cpu);
114 /* Matching cluster found. Link and update it. */
115 if (cmsk && cmsk->clusterid == cluster)
116 goto update;
117 }
118 cmsk = cluster_hotplug_mask;
119 cmsk->clusterid = cluster;
120 cluster_hotplug_mask = NULL;
121update:
122 this_cpu_write(cluster_masks, cmsk);
123 cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
124}
125
126static int alloc_clustermask(unsigned int cpu, int node)
127{
128 if (per_cpu(cluster_masks, cpu))
129 return 0;
130 /*
131 * If a hotplug spare mask exists, check whether it's on the right
132 * node. If not, free it and allocate a new one.
133 */
134 if (cluster_hotplug_mask) {
135 if (cluster_hotplug_mask->node == node)
136 return 0;
137 kfree(cluster_hotplug_mask);
138 }
139
140 cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
141 GFP_KERNEL, node);
142 if (!cluster_hotplug_mask)
143 return -ENOMEM;
144 cluster_hotplug_mask->node = node;
145 return 0;
146}
147
148static int x2apic_prepare_cpu(unsigned int cpu)
149{
150 if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
151 return -ENOMEM;
152 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
153 return -ENOMEM;
154 return 0;
155}
156
157static int x2apic_dead_cpu(unsigned int dead_cpu)
158{
159 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
160
161 cpumask_clear_cpu(dead_cpu, &cmsk->mask);
162 free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
163 return 0;
164}
165
166static int x2apic_cluster_probe(void)
167{
168 if (!x2apic_mode)
169 return 0;
170
171 if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
172 x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
173 pr_err("Failed to register X2APIC_PREPARE\n");
174 return 0;
175 }
176 init_x2apic_ldr();
177 return 1;
178}
179
180static struct apic apic_x2apic_cluster __ro_after_init = {
181
182 .name = "cluster x2apic",
183 .probe = x2apic_cluster_probe,
184 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
185 .apic_id_valid = x2apic_apic_id_valid,
186 .apic_id_registered = x2apic_apic_id_registered,
187
188 .irq_delivery_mode = dest_Fixed,
189 .irq_dest_mode = 1, /* logical */
190
191 .disable_esr = 0,
192 .dest_logical = APIC_DEST_LOGICAL,
193 .check_apicid_used = NULL,
194
195 .init_apic_ldr = init_x2apic_ldr,
196
197 .ioapic_phys_id_map = NULL,
198 .setup_apic_routing = NULL,
199 .cpu_present_to_apicid = default_cpu_present_to_apicid,
200 .apicid_to_cpu_present = NULL,
201 .check_phys_apicid_present = default_check_phys_apicid_present,
202 .phys_pkg_id = x2apic_phys_pkg_id,
203
204 .get_apic_id = x2apic_get_apic_id,
205 .set_apic_id = x2apic_set_apic_id,
206
207 .calc_dest_apicid = x2apic_calc_apicid,
208
209 .send_IPI = x2apic_send_IPI,
210 .send_IPI_mask = x2apic_send_IPI_mask,
211 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
212 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self,
215
216 .inquire_remote_apic = NULL,
217
218 .read = native_apic_msr_read,
219 .write = native_apic_msr_write,
220 .eoi_write = native_apic_msr_eoi_write,
221 .icr_read = native_x2apic_icr_read,
222 .icr_write = native_x2apic_icr_write,
223 .wait_icr_idle = native_x2apic_wait_icr_idle,
224 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
225};
226
227apic_driver(apic_x2apic_cluster);