Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 *
5 * Drives the local APIC in "clustered mode".
6 */
7#include <linux/cpumask.h>
8#include <linux/dmi.h>
9#include <linux/smp.h>
10
11#include <asm/apic.h>
12
13#include "local.h"
14
15static unsigned bigsmp_get_apic_id(unsigned long x)
16{
17 return (x >> 24) & 0xFF;
18}
19
20static int bigsmp_apic_id_registered(void)
21{
22 return 1;
23}
24
25static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
26{
27 return false;
28}
29
30static int bigsmp_early_logical_apicid(int cpu)
31{
32 /* on bigsmp, logical apicid is the same as physical */
33 return early_per_cpu(x86_cpu_to_apicid, cpu);
34}
35
36/*
37 * bigsmp enables physical destination mode
38 * and doesn't use LDR and DFR
39 */
40static void bigsmp_init_apic_ldr(void)
41{
42}
43
44static void bigsmp_setup_apic_routing(void)
45{
46 printk(KERN_INFO
47 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
48 nr_ioapics);
49}
50
51static int bigsmp_cpu_present_to_apicid(int mps_cpu)
52{
53 if (mps_cpu < nr_cpu_ids)
54 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
55
56 return BAD_APICID;
57}
58
59static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
60{
61 /* For clustered we don't have a good way to do this yet - hack */
62 physids_promote(0xFFL, retmap);
63}
64
65static int bigsmp_check_phys_apicid_present(int phys_apicid)
66{
67 return 1;
68}
69
70static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
71{
72 return cpuid_apic >> index_msb;
73}
74
75static void bigsmp_send_IPI_allbutself(int vector)
76{
77 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
78}
79
80static void bigsmp_send_IPI_all(int vector)
81{
82 default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
83}
84
85static int dmi_bigsmp; /* can be set by dmi scanners */
86
87static int hp_ht_bigsmp(const struct dmi_system_id *d)
88{
89 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
90 dmi_bigsmp = 1;
91
92 return 0;
93}
94
95
96static const struct dmi_system_id bigsmp_dmi_table[] = {
97 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
98 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
99 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
100 }
101 },
102
103 { hp_ht_bigsmp, "HP ProLiant DL740",
104 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
105 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
106 }
107 },
108 { } /* NULL entry stops DMI scanning */
109};
110
111static int probe_bigsmp(void)
112{
113 if (def_to_bigsmp)
114 dmi_bigsmp = 1;
115 else
116 dmi_check_system(bigsmp_dmi_table);
117
118 return dmi_bigsmp;
119}
120
121static struct apic apic_bigsmp __ro_after_init = {
122
123 .name = "bigsmp",
124 .probe = probe_bigsmp,
125 .acpi_madt_oem_check = NULL,
126 .apic_id_valid = default_apic_id_valid,
127 .apic_id_registered = bigsmp_apic_id_registered,
128
129 .irq_delivery_mode = dest_Fixed,
130 /* phys delivery to target CPU: */
131 .irq_dest_mode = 0,
132
133 .disable_esr = 1,
134 .dest_logical = 0,
135 .check_apicid_used = bigsmp_check_apicid_used,
136
137 .init_apic_ldr = bigsmp_init_apic_ldr,
138
139 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
140 .setup_apic_routing = bigsmp_setup_apic_routing,
141 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
142 .apicid_to_cpu_present = physid_set_mask_of_physid,
143 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
144 .phys_pkg_id = bigsmp_phys_pkg_id,
145
146 .get_apic_id = bigsmp_get_apic_id,
147 .set_apic_id = NULL,
148
149 .calc_dest_apicid = apic_default_calc_apicid,
150
151 .send_IPI = default_send_IPI_single_phys,
152 .send_IPI_mask = default_send_IPI_mask_sequence_phys,
153 .send_IPI_mask_allbutself = NULL,
154 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
155 .send_IPI_all = bigsmp_send_IPI_all,
156 .send_IPI_self = default_send_IPI_self,
157
158 .inquire_remote_apic = default_inquire_remote_apic,
159
160 .read = native_apic_mem_read,
161 .write = native_apic_mem_write,
162 .eoi_write = native_apic_mem_write,
163 .icr_read = native_apic_icr_read,
164 .icr_write = native_apic_icr_write,
165 .wait_icr_idle = native_apic_wait_icr_idle,
166 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
167
168 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
169};
170
171void __init generic_bigsmp_probe(void)
172{
173 unsigned int cpu;
174
175 if (!probe_bigsmp())
176 return;
177
178 apic = &apic_bigsmp;
179
180 for_each_possible_cpu(cpu) {
181 if (early_per_cpu(x86_cpu_to_logical_apicid,
182 cpu) == BAD_APICID)
183 continue;
184 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
185 bigsmp_early_logical_apicid(cpu);
186 }
187
188 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
189}
190
191apic_driver(apic_bigsmp);
1/*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/dmi.h>
11#include <linux/smp.h>
12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18
19static unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24static int bigsmp_apic_id_registered(void)
25{
26 return 1;
27}
28
29static const struct cpumask *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return cpu_online_mask;
33#else
34 return cpumask_of(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39{
40 return 0;
41}
42
43static unsigned long bigsmp_check_apicid_present(int bit)
44{
45 return 1;
46}
47
48static int bigsmp_early_logical_apicid(int cpu)
49{
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
52}
53
54static inline unsigned long calculate_ldr(int cpu)
55{
56 unsigned long val, id;
57
58 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
59 id = per_cpu(x86_bios_cpu_apicid, cpu);
60 val |= SET_APIC_LOGICAL_ID(id);
61
62 return val;
63}
64
65/*
66 * Set up the logical destination ID.
67 *
68 * Intel recommends to set DFR, LDR and TPR before enabling
69 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
70 * document number 292116). So here it goes...
71 */
72static void bigsmp_init_apic_ldr(void)
73{
74 unsigned long val;
75 int cpu = smp_processor_id();
76
77 apic_write(APIC_DFR, APIC_DFR_FLAT);
78 val = calculate_ldr(cpu);
79 apic_write(APIC_LDR, val);
80}
81
82static void bigsmp_setup_apic_routing(void)
83{
84 printk(KERN_INFO
85 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
86 nr_ioapics);
87}
88
89static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93
94 return BAD_APICID;
95}
96
97static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
98{
99 /* For clustered we don't have a good way to do this yet - hack */
100 physids_promote(0xFFL, retmap);
101}
102
103static int bigsmp_check_phys_apicid_present(int phys_apicid)
104{
105 return 1;
106}
107
108/* As we are using single CPU as destination, pick only one CPU here */
109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110{
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116}
117
118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120{
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132}
133
134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135{
136 return cpuid_apic >> index_msb;
137}
138
139static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
140{
141 default_send_IPI_mask_sequence_phys(mask, vector);
142}
143
144static void bigsmp_send_IPI_allbutself(int vector)
145{
146 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
147}
148
149static void bigsmp_send_IPI_all(int vector)
150{
151 bigsmp_send_IPI_mask(cpu_online_mask, vector);
152}
153
154static int dmi_bigsmp; /* can be set by dmi scanners */
155
156static int hp_ht_bigsmp(const struct dmi_system_id *d)
157{
158 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
159 dmi_bigsmp = 1;
160
161 return 0;
162}
163
164
165static const struct dmi_system_id bigsmp_dmi_table[] = {
166 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
167 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
168 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
169 }
170 },
171
172 { hp_ht_bigsmp, "HP ProLiant DL740",
173 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
174 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
175 }
176 },
177 { } /* NULL entry stops DMI scanning */
178};
179
180static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181{
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184}
185
186static int probe_bigsmp(void)
187{
188 if (def_to_bigsmp)
189 dmi_bigsmp = 1;
190 else
191 dmi_check_system(bigsmp_dmi_table);
192
193 return dmi_bigsmp;
194}
195
196static struct apic apic_bigsmp = {
197
198 .name = "bigsmp",
199 .probe = probe_bigsmp,
200 .acpi_madt_oem_check = NULL,
201 .apic_id_valid = default_apic_id_valid,
202 .apic_id_registered = bigsmp_apic_id_registered,
203
204 .irq_delivery_mode = dest_Fixed,
205 /* phys delivery to target CPU: */
206 .irq_dest_mode = 0,
207
208 .target_cpus = bigsmp_target_cpus,
209 .disable_esr = 1,
210 .dest_logical = 0,
211 .check_apicid_used = bigsmp_check_apicid_used,
212 .check_apicid_present = bigsmp_check_apicid_present,
213
214 .vector_allocation_domain = bigsmp_vector_allocation_domain,
215 .init_apic_ldr = bigsmp_init_apic_ldr,
216
217 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
218 .setup_apic_routing = bigsmp_setup_apic_routing,
219 .multi_timer_check = NULL,
220 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
221 .apicid_to_cpu_present = physid_set_mask_of_physid,
222 .setup_portio_remap = NULL,
223 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
224 .enable_apic_mode = NULL,
225 .phys_pkg_id = bigsmp_phys_pkg_id,
226 .mps_oem_check = NULL,
227
228 .get_apic_id = bigsmp_get_apic_id,
229 .set_apic_id = NULL,
230 .apic_id_mask = 0xFF << 24,
231
232 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
233 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
234
235 .send_IPI_mask = bigsmp_send_IPI_mask,
236 .send_IPI_mask_allbutself = NULL,
237 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
238 .send_IPI_all = bigsmp_send_IPI_all,
239 .send_IPI_self = default_send_IPI_self,
240
241 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
242 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
243
244 .wait_for_init_deassert = default_wait_for_init_deassert,
245
246 .smp_callin_clear_local_apic = NULL,
247 .inquire_remote_apic = default_inquire_remote_apic,
248
249 .read = native_apic_mem_read,
250 .write = native_apic_mem_write,
251 .eoi_write = native_apic_mem_write,
252 .icr_read = native_apic_icr_read,
253 .icr_write = native_apic_icr_write,
254 .wait_icr_idle = native_apic_wait_icr_idle,
255 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
256
257 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
258};
259
260void __init generic_bigsmp_probe(void)
261{
262 unsigned int cpu;
263
264 if (!probe_bigsmp())
265 return;
266
267 apic = &apic_bigsmp;
268
269 for_each_possible_cpu(cpu) {
270 if (early_per_cpu(x86_cpu_to_logical_apicid,
271 cpu) == BAD_APICID)
272 continue;
273 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
274 bigsmp_early_logical_apicid(cpu);
275 }
276
277 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
278}
279
280apic_driver(apic_bigsmp);