Loading...
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8#include <linux/cpu.h>
9
10#include <asm/smp.h>
11#include <asm/x2apic.h>
12
13static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
15static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
16
17static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
18{
19 return x2apic_enabled();
20}
21
22static inline u32 x2apic_cluster(int cpu)
23{
24 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
25}
26
27static void
28__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
29{
30 struct cpumask *cpus_in_cluster_ptr;
31 struct cpumask *ipi_mask_ptr;
32 unsigned int cpu, this_cpu;
33 unsigned long flags;
34 u32 dest;
35
36 x2apic_wrmsr_fence();
37
38 local_irq_save(flags);
39
40 this_cpu = smp_processor_id();
41
42 /*
43 * We are to modify mask, so we need an own copy
44 * and be sure it's manipulated with irq off.
45 */
46 ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
47 cpumask_copy(ipi_mask_ptr, mask);
48
49 /*
50 * The idea is to send one IPI per cluster.
51 */
52 for_each_cpu(cpu, ipi_mask_ptr) {
53 unsigned long i;
54
55 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
56 dest = 0;
57
58 /* Collect cpus in cluster. */
59 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
60 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
61 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
62 }
63
64 if (!dest)
65 continue;
66
67 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
68 /*
69 * Cluster sibling cpus should be discared now so
70 * we would not send IPI them second time.
71 */
72 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
73 }
74
75 local_irq_restore(flags);
76}
77
78static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
79{
80 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
81}
82
83static void
84 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
85{
86 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
87}
88
89static void x2apic_send_IPI_allbutself(int vector)
90{
91 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
92}
93
94static void x2apic_send_IPI_all(int vector)
95{
96 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
97}
98
99static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
100{
101 /*
102 * We're using fixed IRQ delivery, can only return one logical APIC ID.
103 * May as well be the first.
104 */
105 int cpu = cpumask_first(cpumask);
106
107 if ((unsigned)cpu < nr_cpu_ids)
108 return per_cpu(x86_cpu_to_logical_apicid, cpu);
109 else
110 return BAD_APICID;
111}
112
113static unsigned int
114x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
115 const struct cpumask *andmask)
116{
117 int cpu;
118
119 /*
120 * We're using fixed IRQ delivery, can only return one logical APIC ID.
121 * May as well be the first.
122 */
123 for_each_cpu_and(cpu, cpumask, andmask) {
124 if (cpumask_test_cpu(cpu, cpu_online_mask))
125 break;
126 }
127
128 return per_cpu(x86_cpu_to_logical_apicid, cpu);
129}
130
131static void init_x2apic_ldr(void)
132{
133 unsigned int this_cpu = smp_processor_id();
134 unsigned int cpu;
135
136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
137
138 __cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
139 for_each_online_cpu(cpu) {
140 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
141 continue;
142 __cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
143 __cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
144 }
145}
146
147 /*
148 * At CPU state changes, update the x2apic cluster sibling info.
149 */
150static int __cpuinit
151update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
152{
153 unsigned int this_cpu = (unsigned long)hcpu;
154 unsigned int cpu;
155 int err = 0;
156
157 switch (action) {
158 case CPU_UP_PREPARE:
159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
160 GFP_KERNEL)) {
161 err = -ENOMEM;
162 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
163 GFP_KERNEL)) {
164 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
165 err = -ENOMEM;
166 }
167 break;
168 case CPU_UP_CANCELED:
169 case CPU_UP_CANCELED_FROZEN:
170 case CPU_DEAD:
171 for_each_online_cpu(cpu) {
172 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
173 continue;
174 __cpu_clear(this_cpu, per_cpu(cpus_in_cluster, cpu));
175 __cpu_clear(cpu, per_cpu(cpus_in_cluster, this_cpu));
176 }
177 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
178 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
179 break;
180 }
181
182 return notifier_from_errno(err);
183}
184
185static struct notifier_block __refdata x2apic_cpu_notifier = {
186 .notifier_call = update_clusterinfo,
187};
188
189static int x2apic_init_cpu_notifier(void)
190{
191 int cpu = smp_processor_id();
192
193 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
194 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
195
196 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
197
198 __cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
199 register_hotcpu_notifier(&x2apic_cpu_notifier);
200 return 1;
201}
202
203static int x2apic_cluster_probe(void)
204{
205 if (x2apic_mode)
206 return x2apic_init_cpu_notifier();
207 else
208 return 0;
209}
210
211static struct apic apic_x2apic_cluster = {
212
213 .name = "cluster x2apic",
214 .probe = x2apic_cluster_probe,
215 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
216 .apic_id_registered = x2apic_apic_id_registered,
217
218 .irq_delivery_mode = dest_LowestPrio,
219 .irq_dest_mode = 1, /* logical */
220
221 .target_cpus = x2apic_target_cpus,
222 .disable_esr = 0,
223 .dest_logical = APIC_DEST_LOGICAL,
224 .check_apicid_used = NULL,
225 .check_apicid_present = NULL,
226
227 .vector_allocation_domain = x2apic_vector_allocation_domain,
228 .init_apic_ldr = init_x2apic_ldr,
229
230 .ioapic_phys_id_map = NULL,
231 .setup_apic_routing = NULL,
232 .multi_timer_check = NULL,
233 .cpu_present_to_apicid = default_cpu_present_to_apicid,
234 .apicid_to_cpu_present = NULL,
235 .setup_portio_remap = NULL,
236 .check_phys_apicid_present = default_check_phys_apicid_present,
237 .enable_apic_mode = NULL,
238 .phys_pkg_id = x2apic_phys_pkg_id,
239 .mps_oem_check = NULL,
240
241 .get_apic_id = x2apic_get_apic_id,
242 .set_apic_id = x2apic_set_apic_id,
243 .apic_id_mask = 0xFFFFFFFFu,
244
245 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
246 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
247
248 .send_IPI_mask = x2apic_send_IPI_mask,
249 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
250 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
251 .send_IPI_all = x2apic_send_IPI_all,
252 .send_IPI_self = x2apic_send_IPI_self,
253
254 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
255 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
256 .wait_for_init_deassert = NULL,
257 .smp_callin_clear_local_apic = NULL,
258 .inquire_remote_apic = NULL,
259
260 .read = native_apic_msr_read,
261 .write = native_apic_msr_write,
262 .icr_read = native_x2apic_icr_read,
263 .icr_write = native_x2apic_icr_write,
264 .wait_icr_idle = native_x2apic_wait_icr_idle,
265 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
266};
267
268apic_driver(apic_x2apic_cluster);
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/dmar.h>
7#include <linux/cpu.h>
8
9#include <asm/smp.h>
10#include <asm/x2apic.h>
11
12static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
13static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
14static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
15
16static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
17{
18 return x2apic_enabled();
19}
20
21static inline u32 x2apic_cluster(int cpu)
22{
23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
24}
25
26static void x2apic_send_IPI(int cpu, int vector)
27{
28 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
29
30 x2apic_wrmsr_fence();
31 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
32}
33
34static void
35__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
36{
37 struct cpumask *cpus_in_cluster_ptr;
38 struct cpumask *ipi_mask_ptr;
39 unsigned int cpu, this_cpu;
40 unsigned long flags;
41 u32 dest;
42
43 x2apic_wrmsr_fence();
44
45 local_irq_save(flags);
46
47 this_cpu = smp_processor_id();
48
49 /*
50 * We are to modify mask, so we need an own copy
51 * and be sure it's manipulated with irq off.
52 */
53 ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
54 cpumask_copy(ipi_mask_ptr, mask);
55
56 /*
57 * The idea is to send one IPI per cluster.
58 */
59 for_each_cpu(cpu, ipi_mask_ptr) {
60 unsigned long i;
61
62 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
63 dest = 0;
64
65 /* Collect cpus in cluster. */
66 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
67 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
68 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
69 }
70
71 if (!dest)
72 continue;
73
74 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
75 /*
76 * Cluster sibling cpus should be discared now so
77 * we would not send IPI them second time.
78 */
79 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
80 }
81
82 local_irq_restore(flags);
83}
84
85static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
86{
87 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
88}
89
90static void
91x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
92{
93 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
94}
95
96static void x2apic_send_IPI_allbutself(int vector)
97{
98 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
99}
100
101static void x2apic_send_IPI_all(int vector)
102{
103 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
104}
105
106static int
107x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
108 const struct cpumask *andmask,
109 unsigned int *apicid)
110{
111 u32 dest = 0;
112 u16 cluster;
113 int i;
114
115 for_each_cpu_and(i, cpumask, andmask) {
116 if (!cpumask_test_cpu(i, cpu_online_mask))
117 continue;
118 dest = per_cpu(x86_cpu_to_logical_apicid, i);
119 cluster = x2apic_cluster(i);
120 break;
121 }
122
123 if (!dest)
124 return -EINVAL;
125
126 for_each_cpu_and(i, cpumask, andmask) {
127 if (!cpumask_test_cpu(i, cpu_online_mask))
128 continue;
129 if (cluster != x2apic_cluster(i))
130 continue;
131 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
132 }
133
134 *apicid = dest;
135
136 return 0;
137}
138
139static void init_x2apic_ldr(void)
140{
141 unsigned int this_cpu = smp_processor_id();
142 unsigned int cpu;
143
144 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
145
146 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
147 for_each_online_cpu(cpu) {
148 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
149 continue;
150 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
151 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
152 }
153}
154
155 /*
156 * At CPU state changes, update the x2apic cluster sibling info.
157 */
158static int
159update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
160{
161 unsigned int this_cpu = (unsigned long)hcpu;
162 unsigned int cpu;
163 int err = 0;
164
165 switch (action) {
166 case CPU_UP_PREPARE:
167 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
168 GFP_KERNEL)) {
169 err = -ENOMEM;
170 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
171 GFP_KERNEL)) {
172 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
173 err = -ENOMEM;
174 }
175 break;
176 case CPU_UP_CANCELED:
177 case CPU_UP_CANCELED_FROZEN:
178 case CPU_DEAD:
179 for_each_online_cpu(cpu) {
180 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
181 continue;
182 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
183 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
184 }
185 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
186 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
187 break;
188 }
189
190 return notifier_from_errno(err);
191}
192
193static struct notifier_block x2apic_cpu_notifier = {
194 .notifier_call = update_clusterinfo,
195};
196
197static int x2apic_init_cpu_notifier(void)
198{
199 int cpu = smp_processor_id();
200
201 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
202 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
203
204 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
205
206 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
207 register_hotcpu_notifier(&x2apic_cpu_notifier);
208 return 1;
209}
210
211static int x2apic_cluster_probe(void)
212{
213 if (x2apic_mode)
214 return x2apic_init_cpu_notifier();
215 else
216 return 0;
217}
218
219static const struct cpumask *x2apic_cluster_target_cpus(void)
220{
221 return cpu_all_mask;
222}
223
224/*
225 * Each x2apic cluster is an allocation domain.
226 */
227static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
228 const struct cpumask *mask)
229{
230 /*
231 * To minimize vector pressure, default case of boot, device bringup
232 * etc will use a single cpu for the interrupt destination.
233 *
234 * On explicit migration requests coming from irqbalance etc,
235 * interrupts will be routed to the x2apic cluster (cluster-id
236 * derived from the first cpu in the mask) members specified
237 * in the mask.
238 */
239 if (mask == x2apic_cluster_target_cpus())
240 cpumask_copy(retmask, cpumask_of(cpu));
241 else
242 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
243}
244
245static struct apic apic_x2apic_cluster = {
246
247 .name = "cluster x2apic",
248 .probe = x2apic_cluster_probe,
249 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
250 .apic_id_valid = x2apic_apic_id_valid,
251 .apic_id_registered = x2apic_apic_id_registered,
252
253 .irq_delivery_mode = dest_LowestPrio,
254 .irq_dest_mode = 1, /* logical */
255
256 .target_cpus = x2apic_cluster_target_cpus,
257 .disable_esr = 0,
258 .dest_logical = APIC_DEST_LOGICAL,
259 .check_apicid_used = NULL,
260
261 .vector_allocation_domain = cluster_vector_allocation_domain,
262 .init_apic_ldr = init_x2apic_ldr,
263
264 .ioapic_phys_id_map = NULL,
265 .setup_apic_routing = NULL,
266 .cpu_present_to_apicid = default_cpu_present_to_apicid,
267 .apicid_to_cpu_present = NULL,
268 .check_phys_apicid_present = default_check_phys_apicid_present,
269 .phys_pkg_id = x2apic_phys_pkg_id,
270
271 .get_apic_id = x2apic_get_apic_id,
272 .set_apic_id = x2apic_set_apic_id,
273 .apic_id_mask = 0xFFFFFFFFu,
274
275 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
276
277 .send_IPI = x2apic_send_IPI,
278 .send_IPI_mask = x2apic_send_IPI_mask,
279 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
280 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
281 .send_IPI_all = x2apic_send_IPI_all,
282 .send_IPI_self = x2apic_send_IPI_self,
283
284 .inquire_remote_apic = NULL,
285
286 .read = native_apic_msr_read,
287 .write = native_apic_msr_write,
288 .eoi_write = native_apic_msr_eoi_write,
289 .icr_read = native_x2apic_icr_read,
290 .icr_write = native_x2apic_icr_write,
291 .wait_icr_idle = native_x2apic_wait_icr_idle,
292 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
293};
294
295apic_driver(apic_x2apic_cluster);