Loading...
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/x2apic.h>
11
12int x2apic_phys;
13
14static struct apic apic_x2apic_phys;
15
16static int set_x2apic_phys_mode(char *arg)
17{
18 x2apic_phys = 1;
19 return 0;
20}
21early_param("x2apic_phys", set_x2apic_phys_mode);
22
23static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24{
25 if (x2apic_phys)
26 return x2apic_enabled();
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1;
32 }
33 else
34 return 0;
35}
36
37static void
38__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
39{
40 unsigned long query_cpu;
41 unsigned long this_cpu;
42 unsigned long flags;
43
44 x2apic_wrmsr_fence();
45
46 local_irq_save(flags);
47
48 this_cpu = smp_processor_id();
49 for_each_cpu(query_cpu, mask) {
50 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
51 continue;
52 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
53 vector, APIC_DEST_PHYSICAL);
54 }
55 local_irq_restore(flags);
56}
57
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{
60 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
61}
62
63static void
64 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
65{
66 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
67}
68
69static void x2apic_send_IPI_allbutself(int vector)
70{
71 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
72}
73
74static void x2apic_send_IPI_all(int vector)
75{
76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
77}
78
79static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
80{
81 /*
82 * We're using fixed IRQ delivery, can only return one phys APIC ID.
83 * May as well be the first.
84 */
85 int cpu = cpumask_first(cpumask);
86
87 if ((unsigned)cpu < nr_cpu_ids)
88 return per_cpu(x86_cpu_to_apicid, cpu);
89 else
90 return BAD_APICID;
91}
92
93static unsigned int
94x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
95 const struct cpumask *andmask)
96{
97 int cpu;
98
99 /*
100 * We're using fixed IRQ delivery, can only return one phys APIC ID.
101 * May as well be the first.
102 */
103 for_each_cpu_and(cpu, cpumask, andmask) {
104 if (cpumask_test_cpu(cpu, cpu_online_mask))
105 break;
106 }
107
108 return per_cpu(x86_cpu_to_apicid, cpu);
109}
110
111static void init_x2apic_ldr(void)
112{
113}
114
115static int x2apic_phys_probe(void)
116{
117 if (x2apic_mode && x2apic_phys)
118 return 1;
119
120 return apic == &apic_x2apic_phys;
121}
122
123static struct apic apic_x2apic_phys = {
124
125 .name = "physical x2apic",
126 .probe = x2apic_phys_probe,
127 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
128 .apic_id_valid = x2apic_apic_id_valid,
129 .apic_id_registered = x2apic_apic_id_registered,
130
131 .irq_delivery_mode = dest_Fixed,
132 .irq_dest_mode = 0, /* physical */
133
134 .target_cpus = x2apic_target_cpus,
135 .disable_esr = 0,
136 .dest_logical = 0,
137 .check_apicid_used = NULL,
138 .check_apicid_present = NULL,
139
140 .vector_allocation_domain = x2apic_vector_allocation_domain,
141 .init_apic_ldr = init_x2apic_ldr,
142
143 .ioapic_phys_id_map = NULL,
144 .setup_apic_routing = NULL,
145 .multi_timer_check = NULL,
146 .cpu_present_to_apicid = default_cpu_present_to_apicid,
147 .apicid_to_cpu_present = NULL,
148 .setup_portio_remap = NULL,
149 .check_phys_apicid_present = default_check_phys_apicid_present,
150 .enable_apic_mode = NULL,
151 .phys_pkg_id = x2apic_phys_pkg_id,
152 .mps_oem_check = NULL,
153
154 .get_apic_id = x2apic_get_apic_id,
155 .set_apic_id = x2apic_set_apic_id,
156 .apic_id_mask = 0xFFFFFFFFu,
157
158 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
159 .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
160
161 .send_IPI_mask = x2apic_send_IPI_mask,
162 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
163 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
164 .send_IPI_all = x2apic_send_IPI_all,
165 .send_IPI_self = x2apic_send_IPI_self,
166
167 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
168 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
169 .wait_for_init_deassert = NULL,
170 .smp_callin_clear_local_apic = NULL,
171 .inquire_remote_apic = NULL,
172
173 .read = native_apic_msr_read,
174 .write = native_apic_msr_write,
175 .eoi_write = native_apic_msr_eoi_write,
176 .icr_read = native_x2apic_icr_read,
177 .icr_write = native_x2apic_icr_write,
178 .wait_icr_idle = native_x2apic_wait_icr_idle,
179 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
180};
181
182apic_driver(apic_x2apic_phys);
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/dmar.h>
7
8#include <asm/smp.h>
9#include <asm/x2apic.h>
10
11int x2apic_phys;
12
13static struct apic apic_x2apic_phys;
14
15static int set_x2apic_phys_mode(char *arg)
16{
17 x2apic_phys = 1;
18 return 0;
19}
20early_param("x2apic_phys", set_x2apic_phys_mode);
21
22static bool x2apic_fadt_phys(void)
23{
24 if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
25 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
26 printk(KERN_DEBUG "System requires x2apic physical mode\n");
27 return true;
28 }
29 return false;
30}
31
32static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
33{
34 return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
35}
36
37static void
38__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
39{
40 unsigned long query_cpu;
41 unsigned long this_cpu;
42 unsigned long flags;
43
44 x2apic_wrmsr_fence();
45
46 local_irq_save(flags);
47
48 this_cpu = smp_processor_id();
49 for_each_cpu(query_cpu, mask) {
50 if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
51 continue;
52 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
53 vector, APIC_DEST_PHYSICAL);
54 }
55 local_irq_restore(flags);
56}
57
58static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
59{
60 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
61}
62
63static void
64 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
65{
66 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
67}
68
69static void x2apic_send_IPI_allbutself(int vector)
70{
71 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
72}
73
74static void x2apic_send_IPI_all(int vector)
75{
76 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
77}
78
79static void init_x2apic_ldr(void)
80{
81}
82
83static int x2apic_phys_probe(void)
84{
85 if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
86 return 1;
87
88 return apic == &apic_x2apic_phys;
89}
90
91static struct apic apic_x2apic_phys = {
92
93 .name = "physical x2apic",
94 .probe = x2apic_phys_probe,
95 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
96 .apic_id_valid = x2apic_apic_id_valid,
97 .apic_id_registered = x2apic_apic_id_registered,
98
99 .irq_delivery_mode = dest_Fixed,
100 .irq_dest_mode = 0, /* physical */
101
102 .target_cpus = online_target_cpus,
103 .disable_esr = 0,
104 .dest_logical = 0,
105 .check_apicid_used = NULL,
106 .check_apicid_present = NULL,
107
108 .vector_allocation_domain = default_vector_allocation_domain,
109 .init_apic_ldr = init_x2apic_ldr,
110
111 .ioapic_phys_id_map = NULL,
112 .setup_apic_routing = NULL,
113 .multi_timer_check = NULL,
114 .cpu_present_to_apicid = default_cpu_present_to_apicid,
115 .apicid_to_cpu_present = NULL,
116 .setup_portio_remap = NULL,
117 .check_phys_apicid_present = default_check_phys_apicid_present,
118 .enable_apic_mode = NULL,
119 .phys_pkg_id = x2apic_phys_pkg_id,
120 .mps_oem_check = NULL,
121
122 .get_apic_id = x2apic_get_apic_id,
123 .set_apic_id = x2apic_set_apic_id,
124 .apic_id_mask = 0xFFFFFFFFu,
125
126 .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
127
128 .send_IPI_mask = x2apic_send_IPI_mask,
129 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
130 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
131 .send_IPI_all = x2apic_send_IPI_all,
132 .send_IPI_self = x2apic_send_IPI_self,
133
134 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
135 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
136 .wait_for_init_deassert = false,
137 .smp_callin_clear_local_apic = NULL,
138 .inquire_remote_apic = NULL,
139
140 .read = native_apic_msr_read,
141 .write = native_apic_msr_write,
142 .eoi_write = native_apic_msr_eoi_write,
143 .icr_read = native_x2apic_icr_read,
144 .icr_write = native_x2apic_icr_write,
145 .wait_icr_idle = native_x2apic_wait_icr_idle,
146 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
147};
148
149apic_driver(apic_x2apic_phys);