Loading...
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/x2apic.h>
11
12int x2apic_phys;
13
14static struct apic apic_x2apic_phys;
15
16static int set_x2apic_phys_mode(char *arg)
17{
18 x2apic_phys = 1;
19 return 0;
20}
21early_param("x2apic_phys", set_x2apic_phys_mode);
22
23static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24{
25 if (x2apic_phys)
26 return x2apic_enabled();
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1;
32 }
33 else
34 return 0;
35}
36
37static void
38__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
39{
40 unsigned long query_cpu;
41 unsigned long this_cpu;
42 unsigned long flags;
43
44 x2apic_wrmsr_fence();
45
46 local_irq_save(flags);
47
48 this_cpu = smp_processor_id();
49 for_each_cpu(query_cpu, mask) {
50 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
51 continue;
52 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
53 vector, APIC_DEST_PHYSICAL);
54 }
55 local_irq_restore(flags);
56}
57
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{
60 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
61}
62
63static void
64 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
65{
66 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
67}
68
69static void x2apic_send_IPI_allbutself(int vector)
70{
71 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
72}
73
74static void x2apic_send_IPI_all(int vector)
75{
76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
77}
78
79static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
80{
81 /*
82 * We're using fixed IRQ delivery, can only return one phys APIC ID.
83 * May as well be the first.
84 */
85 int cpu = cpumask_first(cpumask);
86
87 if ((unsigned)cpu < nr_cpu_ids)
88 return per_cpu(x86_cpu_to_apicid, cpu);
89 else
90 return BAD_APICID;
91}
92
93static unsigned int
94x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
95 const struct cpumask *andmask)
96{
97 int cpu;
98
99 /*
100 * We're using fixed IRQ delivery, can only return one phys APIC ID.
101 * May as well be the first.
102 */
103 for_each_cpu_and(cpu, cpumask, andmask) {
104 if (cpumask_test_cpu(cpu, cpu_online_mask))
105 break;
106 }
107
108 return per_cpu(x86_cpu_to_apicid, cpu);
109}
110
111static void init_x2apic_ldr(void)
112{
113}
114
115static int x2apic_phys_probe(void)
116{
117 if (x2apic_mode && x2apic_phys)
118 return 1;
119
120 return apic == &apic_x2apic_phys;
121}
122
123static struct apic apic_x2apic_phys = {
124
125 .name = "physical x2apic",
126 .probe = x2apic_phys_probe,
127 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
128 .apic_id_valid = x2apic_apic_id_valid,
129 .apic_id_registered = x2apic_apic_id_registered,
130
131 .irq_delivery_mode = dest_Fixed,
132 .irq_dest_mode = 0, /* physical */
133
134 .target_cpus = x2apic_target_cpus,
135 .disable_esr = 0,
136 .dest_logical = 0,
137 .check_apicid_used = NULL,
138 .check_apicid_present = NULL,
139
140 .vector_allocation_domain = x2apic_vector_allocation_domain,
141 .init_apic_ldr = init_x2apic_ldr,
142
143 .ioapic_phys_id_map = NULL,
144 .setup_apic_routing = NULL,
145 .multi_timer_check = NULL,
146 .cpu_present_to_apicid = default_cpu_present_to_apicid,
147 .apicid_to_cpu_present = NULL,
148 .setup_portio_remap = NULL,
149 .check_phys_apicid_present = default_check_phys_apicid_present,
150 .enable_apic_mode = NULL,
151 .phys_pkg_id = x2apic_phys_pkg_id,
152 .mps_oem_check = NULL,
153
154 .get_apic_id = x2apic_get_apic_id,
155 .set_apic_id = x2apic_set_apic_id,
156 .apic_id_mask = 0xFFFFFFFFu,
157
158 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
159 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
160
161 .send_IPI_mask = x2apic_send_IPI_mask,
162 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
163 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
164 .send_IPI_all = x2apic_send_IPI_all,
165 .send_IPI_self = x2apic_send_IPI_self,
166
167 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
168 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
169 .wait_for_init_deassert = NULL,
170 .smp_callin_clear_local_apic = NULL,
171 .inquire_remote_apic = NULL,
172
173 .read = native_apic_msr_read,
174 .write = native_apic_msr_write,
175 .eoi_write = native_apic_msr_eoi_write,
176 .icr_read = native_x2apic_icr_read,
177 .icr_write = native_x2apic_icr_write,
178 .wait_icr_idle = native_x2apic_wait_icr_idle,
179 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
180};
181
182apic_driver(apic_x2apic_phys);
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/x2apic.h>
11
12int x2apic_phys;
13
14static struct apic apic_x2apic_phys;
15
16static int set_x2apic_phys_mode(char *arg)
17{
18 x2apic_phys = 1;
19 return 0;
20}
21early_param("x2apic_phys", set_x2apic_phys_mode);
22
23static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24{
25 if (x2apic_phys)
26 return x2apic_enabled();
27 else
28 return 0;
29}
30
31static void
32__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
33{
34 unsigned long query_cpu;
35 unsigned long this_cpu;
36 unsigned long flags;
37
38 x2apic_wrmsr_fence();
39
40 local_irq_save(flags);
41
42 this_cpu = smp_processor_id();
43 for_each_cpu(query_cpu, mask) {
44 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
45 continue;
46 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
47 vector, APIC_DEST_PHYSICAL);
48 }
49 local_irq_restore(flags);
50}
51
52static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
53{
54 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
55}
56
57static void
58 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
59{
60 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
61}
62
63static void x2apic_send_IPI_allbutself(int vector)
64{
65 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
66}
67
68static void x2apic_send_IPI_all(int vector)
69{
70 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
71}
72
73static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
74{
75 /*
76 * We're using fixed IRQ delivery, can only return one phys APIC ID.
77 * May as well be the first.
78 */
79 int cpu = cpumask_first(cpumask);
80
81 if ((unsigned)cpu < nr_cpu_ids)
82 return per_cpu(x86_cpu_to_apicid, cpu);
83 else
84 return BAD_APICID;
85}
86
87static unsigned int
88x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
89 const struct cpumask *andmask)
90{
91 int cpu;
92
93 /*
94 * We're using fixed IRQ delivery, can only return one phys APIC ID.
95 * May as well be the first.
96 */
97 for_each_cpu_and(cpu, cpumask, andmask) {
98 if (cpumask_test_cpu(cpu, cpu_online_mask))
99 break;
100 }
101
102 return per_cpu(x86_cpu_to_apicid, cpu);
103}
104
105static void init_x2apic_ldr(void)
106{
107}
108
109static int x2apic_phys_probe(void)
110{
111 if (x2apic_mode && x2apic_phys)
112 return 1;
113
114 return apic == &apic_x2apic_phys;
115}
116
117static struct apic apic_x2apic_phys = {
118
119 .name = "physical x2apic",
120 .probe = x2apic_phys_probe,
121 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
122 .apic_id_registered = x2apic_apic_id_registered,
123
124 .irq_delivery_mode = dest_Fixed,
125 .irq_dest_mode = 0, /* physical */
126
127 .target_cpus = x2apic_target_cpus,
128 .disable_esr = 0,
129 .dest_logical = 0,
130 .check_apicid_used = NULL,
131 .check_apicid_present = NULL,
132
133 .vector_allocation_domain = x2apic_vector_allocation_domain,
134 .init_apic_ldr = init_x2apic_ldr,
135
136 .ioapic_phys_id_map = NULL,
137 .setup_apic_routing = NULL,
138 .multi_timer_check = NULL,
139 .cpu_present_to_apicid = default_cpu_present_to_apicid,
140 .apicid_to_cpu_present = NULL,
141 .setup_portio_remap = NULL,
142 .check_phys_apicid_present = default_check_phys_apicid_present,
143 .enable_apic_mode = NULL,
144 .phys_pkg_id = x2apic_phys_pkg_id,
145 .mps_oem_check = NULL,
146
147 .get_apic_id = x2apic_get_apic_id,
148 .set_apic_id = x2apic_set_apic_id,
149 .apic_id_mask = 0xFFFFFFFFu,
150
151 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
152 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
153
154 .send_IPI_mask = x2apic_send_IPI_mask,
155 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
156 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
157 .send_IPI_all = x2apic_send_IPI_all,
158 .send_IPI_self = x2apic_send_IPI_self,
159
160 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
161 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
162 .wait_for_init_deassert = NULL,
163 .smp_callin_clear_local_apic = NULL,
164 .inquire_remote_apic = NULL,
165
166 .read = native_apic_msr_read,
167 .write = native_apic_msr_write,
168 .icr_read = native_x2apic_icr_read,
169 .icr_write = native_x2apic_icr_write,
170 .wait_icr_idle = native_x2apic_wait_icr_idle,
171 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
172};
173
174apic_driver(apic_x2apic_phys);