Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/cpuhotplug.h>
4#include <linux/cpumask.h>
5#include <linux/slab.h>
6#include <linux/mm.h>
7
8#include <asm/apic.h>
9
10#include "local.h"
11
12struct cluster_mask {
13 unsigned int clusterid;
14 int node;
15 struct cpumask mask;
16};
17
18/*
19 * __x2apic_send_IPI_mask() possibly needs to read
20 * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
21 * Using per cpu variable would cost one cache line per cpu.
22 */
23static u32 *x86_cpu_to_logical_apicid __read_mostly;
24
25static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
26static DEFINE_PER_CPU_READ_MOSTLY(struct cluster_mask *, cluster_masks);
27static struct cluster_mask *cluster_hotplug_mask;
28
29static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
30{
31 return x2apic_enabled();
32}
33
34static void x2apic_send_IPI(int cpu, int vector)
35{
36 u32 dest = x86_cpu_to_logical_apicid[cpu];
37
38 /* x2apic MSRs are special and need a special fence: */
39 weak_wrmsr_fence();
40 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
41}
42
43static void
44__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
45{
46 unsigned int cpu, clustercpu;
47 struct cpumask *tmpmsk;
48 unsigned long flags;
49 u32 dest;
50
51 /* x2apic MSRs are special and need a special fence: */
52 weak_wrmsr_fence();
53 local_irq_save(flags);
54
55 tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
56 cpumask_copy(tmpmsk, mask);
57 /* If IPI should not be sent to self, clear current CPU */
58 if (apic_dest != APIC_DEST_ALLINC)
59 __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
60
61 /* Collapse cpus in a cluster so a single IPI per cluster is sent */
62 for_each_cpu(cpu, tmpmsk) {
63 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
64
65 dest = 0;
66 for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
67 dest |= x86_cpu_to_logical_apicid[clustercpu];
68
69 if (!dest)
70 continue;
71
72 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
73 /* Remove cluster CPUs from tmpmask */
74 cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
75 }
76
77 local_irq_restore(flags);
78}
79
80static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
81{
82 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
83}
84
85static void
86x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
87{
88 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
89}
90
91static void x2apic_send_IPI_allbutself(int vector)
92{
93 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
94}
95
96static void x2apic_send_IPI_all(int vector)
97{
98 __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
99}
100
101static u32 x2apic_calc_apicid(unsigned int cpu)
102{
103 return x86_cpu_to_logical_apicid[cpu];
104}
105
106static void init_x2apic_ldr(void)
107{
108 struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
109 u32 cluster, apicid = apic_read(APIC_LDR);
110 unsigned int cpu;
111
112 x86_cpu_to_logical_apicid[smp_processor_id()] = apicid;
113
114 if (cmsk)
115 goto update;
116
117 cluster = apicid >> 16;
118 for_each_online_cpu(cpu) {
119 cmsk = per_cpu(cluster_masks, cpu);
120 /* Matching cluster found. Link and update it. */
121 if (cmsk && cmsk->clusterid == cluster)
122 goto update;
123 }
124 cmsk = cluster_hotplug_mask;
125 cmsk->clusterid = cluster;
126 cluster_hotplug_mask = NULL;
127update:
128 this_cpu_write(cluster_masks, cmsk);
129 cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
130}
131
132static int alloc_clustermask(unsigned int cpu, int node)
133{
134 if (per_cpu(cluster_masks, cpu))
135 return 0;
136 /*
137 * If a hotplug spare mask exists, check whether it's on the right
138 * node. If not, free it and allocate a new one.
139 */
140 if (cluster_hotplug_mask) {
141 if (cluster_hotplug_mask->node == node)
142 return 0;
143 kfree(cluster_hotplug_mask);
144 }
145
146 cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
147 GFP_KERNEL, node);
148 if (!cluster_hotplug_mask)
149 return -ENOMEM;
150 cluster_hotplug_mask->node = node;
151 return 0;
152}
153
154static int x2apic_prepare_cpu(unsigned int cpu)
155{
156 if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
157 return -ENOMEM;
158 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
159 return -ENOMEM;
160 return 0;
161}
162
163static int x2apic_dead_cpu(unsigned int dead_cpu)
164{
165 struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
166
167 if (cmsk)
168 cpumask_clear_cpu(dead_cpu, &cmsk->mask);
169 free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
170 return 0;
171}
172
173static int x2apic_cluster_probe(void)
174{
175 u32 slots;
176
177 if (!x2apic_mode)
178 return 0;
179
180 slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
181 x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
182 if (!x86_cpu_to_logical_apicid)
183 return 0;
184
185 if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
186 x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
187 pr_err("Failed to register X2APIC_PREPARE\n");
188 kfree(x86_cpu_to_logical_apicid);
189 x86_cpu_to_logical_apicid = NULL;
190 return 0;
191 }
192 init_x2apic_ldr();
193 return 1;
194}
195
196static struct apic apic_x2apic_cluster __ro_after_init = {
197
198 .name = "cluster x2apic",
199 .probe = x2apic_cluster_probe,
200 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
201 .apic_id_valid = x2apic_apic_id_valid,
202 .apic_id_registered = x2apic_apic_id_registered,
203
204 .delivery_mode = APIC_DELIVERY_MODE_FIXED,
205 .dest_mode_logical = true,
206
207 .disable_esr = 0,
208
209 .check_apicid_used = NULL,
210 .init_apic_ldr = init_x2apic_ldr,
211 .ioapic_phys_id_map = NULL,
212 .setup_apic_routing = NULL,
213 .cpu_present_to_apicid = default_cpu_present_to_apicid,
214 .apicid_to_cpu_present = NULL,
215 .check_phys_apicid_present = default_check_phys_apicid_present,
216 .phys_pkg_id = x2apic_phys_pkg_id,
217
218 .get_apic_id = x2apic_get_apic_id,
219 .set_apic_id = x2apic_set_apic_id,
220
221 .calc_dest_apicid = x2apic_calc_apicid,
222
223 .send_IPI = x2apic_send_IPI,
224 .send_IPI_mask = x2apic_send_IPI_mask,
225 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
226 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
227 .send_IPI_all = x2apic_send_IPI_all,
228 .send_IPI_self = x2apic_send_IPI_self,
229
230 .inquire_remote_apic = NULL,
231
232 .read = native_apic_msr_read,
233 .write = native_apic_msr_write,
234 .eoi_write = native_apic_msr_eoi_write,
235 .icr_read = native_x2apic_icr_read,
236 .icr_write = native_x2apic_icr_write,
237 .wait_icr_idle = native_x2apic_wait_icr_idle,
238 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
239};
240
241apic_driver(apic_x2apic_cluster);
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/dmar.h>
7#include <linux/cpu.h>
8
9#include <asm/smp.h>
10#include <asm/x2apic.h>
11
12static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
13static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
14static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
15
16static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
17{
18 return x2apic_enabled();
19}
20
21static inline u32 x2apic_cluster(int cpu)
22{
23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
24}
25
26static void x2apic_send_IPI(int cpu, int vector)
27{
28 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
29
30 x2apic_wrmsr_fence();
31 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
32}
33
34static void
35__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
36{
37 struct cpumask *cpus_in_cluster_ptr;
38 struct cpumask *ipi_mask_ptr;
39 unsigned int cpu, this_cpu;
40 unsigned long flags;
41 u32 dest;
42
43 x2apic_wrmsr_fence();
44
45 local_irq_save(flags);
46
47 this_cpu = smp_processor_id();
48
49 /*
50 * We are to modify mask, so we need an own copy
51 * and be sure it's manipulated with irq off.
52 */
53 ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
54 cpumask_copy(ipi_mask_ptr, mask);
55
56 /*
57 * The idea is to send one IPI per cluster.
58 */
59 for_each_cpu(cpu, ipi_mask_ptr) {
60 unsigned long i;
61
62 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
63 dest = 0;
64
65 /* Collect cpus in cluster. */
66 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
67 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
68 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
69 }
70
71 if (!dest)
72 continue;
73
74 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
75 /*
76 * Cluster sibling cpus should be discared now so
77 * we would not send IPI them second time.
78 */
79 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
80 }
81
82 local_irq_restore(flags);
83}
84
85static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
86{
87 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
88}
89
90static void
91x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
92{
93 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
94}
95
96static void x2apic_send_IPI_allbutself(int vector)
97{
98 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
99}
100
101static void x2apic_send_IPI_all(int vector)
102{
103 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
104}
105
106static int
107x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
108 const struct cpumask *andmask,
109 unsigned int *apicid)
110{
111 u32 dest = 0;
112 u16 cluster;
113 int i;
114
115 for_each_cpu_and(i, cpumask, andmask) {
116 if (!cpumask_test_cpu(i, cpu_online_mask))
117 continue;
118 dest = per_cpu(x86_cpu_to_logical_apicid, i);
119 cluster = x2apic_cluster(i);
120 break;
121 }
122
123 if (!dest)
124 return -EINVAL;
125
126 for_each_cpu_and(i, cpumask, andmask) {
127 if (!cpumask_test_cpu(i, cpu_online_mask))
128 continue;
129 if (cluster != x2apic_cluster(i))
130 continue;
131 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
132 }
133
134 *apicid = dest;
135
136 return 0;
137}
138
139static void init_x2apic_ldr(void)
140{
141 unsigned int this_cpu = smp_processor_id();
142 unsigned int cpu;
143
144 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
145
146 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
147 for_each_online_cpu(cpu) {
148 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
149 continue;
150 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
151 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
152 }
153}
154
155 /*
156 * At CPU state changes, update the x2apic cluster sibling info.
157 */
158static int
159update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
160{
161 unsigned int this_cpu = (unsigned long)hcpu;
162 unsigned int cpu;
163 int err = 0;
164
165 switch (action) {
166 case CPU_UP_PREPARE:
167 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
168 GFP_KERNEL)) {
169 err = -ENOMEM;
170 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
171 GFP_KERNEL)) {
172 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
173 err = -ENOMEM;
174 }
175 break;
176 case CPU_UP_CANCELED:
177 case CPU_UP_CANCELED_FROZEN:
178 case CPU_DEAD:
179 for_each_online_cpu(cpu) {
180 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
181 continue;
182 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
183 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
184 }
185 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
186 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
187 break;
188 }
189
190 return notifier_from_errno(err);
191}
192
193static struct notifier_block x2apic_cpu_notifier = {
194 .notifier_call = update_clusterinfo,
195};
196
197static int x2apic_init_cpu_notifier(void)
198{
199 int cpu = smp_processor_id();
200
201 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
202 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
203
204 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
205
206 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
207 register_hotcpu_notifier(&x2apic_cpu_notifier);
208 return 1;
209}
210
211static int x2apic_cluster_probe(void)
212{
213 if (x2apic_mode)
214 return x2apic_init_cpu_notifier();
215 else
216 return 0;
217}
218
219static const struct cpumask *x2apic_cluster_target_cpus(void)
220{
221 return cpu_all_mask;
222}
223
224/*
225 * Each x2apic cluster is an allocation domain.
226 */
227static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
228 const struct cpumask *mask)
229{
230 /*
231 * To minimize vector pressure, default case of boot, device bringup
232 * etc will use a single cpu for the interrupt destination.
233 *
234 * On explicit migration requests coming from irqbalance etc,
235 * interrupts will be routed to the x2apic cluster (cluster-id
236 * derived from the first cpu in the mask) members specified
237 * in the mask.
238 */
239 if (mask == x2apic_cluster_target_cpus())
240 cpumask_copy(retmask, cpumask_of(cpu));
241 else
242 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
243}
244
245static struct apic apic_x2apic_cluster = {
246
247 .name = "cluster x2apic",
248 .probe = x2apic_cluster_probe,
249 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
250 .apic_id_valid = x2apic_apic_id_valid,
251 .apic_id_registered = x2apic_apic_id_registered,
252
253 .irq_delivery_mode = dest_LowestPrio,
254 .irq_dest_mode = 1, /* logical */
255
256 .target_cpus = x2apic_cluster_target_cpus,
257 .disable_esr = 0,
258 .dest_logical = APIC_DEST_LOGICAL,
259 .check_apicid_used = NULL,
260
261 .vector_allocation_domain = cluster_vector_allocation_domain,
262 .init_apic_ldr = init_x2apic_ldr,
263
264 .ioapic_phys_id_map = NULL,
265 .setup_apic_routing = NULL,
266 .cpu_present_to_apicid = default_cpu_present_to_apicid,
267 .apicid_to_cpu_present = NULL,
268 .check_phys_apicid_present = default_check_phys_apicid_present,
269 .phys_pkg_id = x2apic_phys_pkg_id,
270
271 .get_apic_id = x2apic_get_apic_id,
272 .set_apic_id = x2apic_set_apic_id,
273 .apic_id_mask = 0xFFFFFFFFu,
274
275 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
276
277 .send_IPI = x2apic_send_IPI,
278 .send_IPI_mask = x2apic_send_IPI_mask,
279 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
280 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
281 .send_IPI_all = x2apic_send_IPI_all,
282 .send_IPI_self = x2apic_send_IPI_self,
283
284 .inquire_remote_apic = NULL,
285
286 .read = native_apic_msr_read,
287 .write = native_apic_msr_write,
288 .eoi_write = native_apic_msr_eoi_write,
289 .icr_read = native_x2apic_icr_read,
290 .icr_write = native_x2apic_icr_write,
291 .wait_icr_idle = native_x2apic_wait_icr_idle,
292 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
293};
294
295apic_driver(apic_x2apic_cluster);