Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 *
5 * Drives the local APIC in "clustered mode".
6 */
7#include <linux/cpumask.h>
8#include <linux/dmi.h>
9#include <linux/smp.h>
10
11#include <asm/apic.h>
12#include <asm/io_apic.h>
13
14#include "local.h"
15
16static u32 bigsmp_get_apic_id(u32 x)
17{
18 return (x >> 24) & 0xFF;
19}
20
21static void bigsmp_send_IPI_allbutself(int vector)
22{
23 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
24}
25
26static void bigsmp_send_IPI_all(int vector)
27{
28 default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
29}
30
31static int dmi_bigsmp; /* can be set by dmi scanners */
32
33static int hp_ht_bigsmp(const struct dmi_system_id *d)
34{
35 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
36 dmi_bigsmp = 1;
37
38 return 0;
39}
40
41
42static const struct dmi_system_id bigsmp_dmi_table[] = {
43 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
44 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
45 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
46 }
47 },
48
49 { hp_ht_bigsmp, "HP ProLiant DL740",
50 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
51 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
52 }
53 },
54 { } /* NULL entry stops DMI scanning */
55};
56
57static int probe_bigsmp(void)
58{
59 return dmi_check_system(bigsmp_dmi_table);
60}
61
62static struct apic apic_bigsmp __ro_after_init = {
63
64 .name = "bigsmp",
65 .probe = probe_bigsmp,
66
67 .dest_mode_logical = false,
68
69 .disable_esr = 1,
70
71 .cpu_present_to_apicid = default_cpu_present_to_apicid,
72
73 .max_apic_id = 0xFE,
74 .get_apic_id = bigsmp_get_apic_id,
75
76 .calc_dest_apicid = apic_default_calc_apicid,
77
78 .send_IPI = default_send_IPI_single_phys,
79 .send_IPI_mask = default_send_IPI_mask_sequence_phys,
80 .send_IPI_mask_allbutself = NULL,
81 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
82 .send_IPI_all = bigsmp_send_IPI_all,
83 .send_IPI_self = default_send_IPI_self,
84
85 .read = native_apic_mem_read,
86 .write = native_apic_mem_write,
87 .eoi = native_apic_mem_eoi,
88 .icr_read = native_apic_icr_read,
89 .icr_write = native_apic_icr_write,
90 .wait_icr_idle = apic_mem_wait_icr_idle,
91 .safe_wait_icr_idle = apic_mem_wait_icr_idle_timeout,
92};
93
94bool __init apic_bigsmp_possible(bool cmdline_override)
95{
96 return apic == &apic_bigsmp || !cmdline_override;
97}
98
99void __init apic_bigsmp_force(void)
100{
101 if (apic != &apic_bigsmp)
102 apic_install_driver(&apic_bigsmp);
103}
104
105apic_driver(apic_bigsmp);
1/*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/dmi.h>
11#include <linux/smp.h>
12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18
19static unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24static int bigsmp_apic_id_registered(void)
25{
26 return 1;
27}
28
29static const struct cpumask *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return cpu_online_mask;
33#else
34 return cpumask_of(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39{
40 return 0;
41}
42
43static unsigned long bigsmp_check_apicid_present(int bit)
44{
45 return 1;
46}
47
48static int bigsmp_early_logical_apicid(int cpu)
49{
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
52}
53
54static inline unsigned long calculate_ldr(int cpu)
55{
56 unsigned long val, id;
57
58 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
59 id = per_cpu(x86_bios_cpu_apicid, cpu);
60 val |= SET_APIC_LOGICAL_ID(id);
61
62 return val;
63}
64
65/*
66 * Set up the logical destination ID.
67 *
68 * Intel recommends to set DFR, LDR and TPR before enabling
69 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
70 * document number 292116). So here it goes...
71 */
72static void bigsmp_init_apic_ldr(void)
73{
74 unsigned long val;
75 int cpu = smp_processor_id();
76
77 apic_write(APIC_DFR, APIC_DFR_FLAT);
78 val = calculate_ldr(cpu);
79 apic_write(APIC_LDR, val);
80}
81
82static void bigsmp_setup_apic_routing(void)
83{
84 printk(KERN_INFO
85 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
86 nr_ioapics);
87}
88
89static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93
94 return BAD_APICID;
95}
96
97static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
98{
99 /* For clustered we don't have a good way to do this yet - hack */
100 physids_promote(0xFFL, retmap);
101}
102
103static int bigsmp_check_phys_apicid_present(int phys_apicid)
104{
105 return 1;
106}
107
108/* As we are using single CPU as destination, pick only one CPU here */
109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110{
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116}
117
118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120{
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132}
133
134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135{
136 return cpuid_apic >> index_msb;
137}
138
139static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
140{
141 default_send_IPI_mask_sequence_phys(mask, vector);
142}
143
144static void bigsmp_send_IPI_allbutself(int vector)
145{
146 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
147}
148
149static void bigsmp_send_IPI_all(int vector)
150{
151 bigsmp_send_IPI_mask(cpu_online_mask, vector);
152}
153
154static int dmi_bigsmp; /* can be set by dmi scanners */
155
156static int hp_ht_bigsmp(const struct dmi_system_id *d)
157{
158 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
159 dmi_bigsmp = 1;
160
161 return 0;
162}
163
164
165static const struct dmi_system_id bigsmp_dmi_table[] = {
166 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
167 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
168 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
169 }
170 },
171
172 { hp_ht_bigsmp, "HP ProLiant DL740",
173 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
174 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
175 }
176 },
177 { } /* NULL entry stops DMI scanning */
178};
179
180static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181{
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184}
185
186static int probe_bigsmp(void)
187{
188 if (def_to_bigsmp)
189 dmi_bigsmp = 1;
190 else
191 dmi_check_system(bigsmp_dmi_table);
192
193 return dmi_bigsmp;
194}
195
196static struct apic apic_bigsmp = {
197
198 .name = "bigsmp",
199 .probe = probe_bigsmp,
200 .acpi_madt_oem_check = NULL,
201 .apic_id_registered = bigsmp_apic_id_registered,
202
203 .irq_delivery_mode = dest_Fixed,
204 /* phys delivery to target CPU: */
205 .irq_dest_mode = 0,
206
207 .target_cpus = bigsmp_target_cpus,
208 .disable_esr = 1,
209 .dest_logical = 0,
210 .check_apicid_used = bigsmp_check_apicid_used,
211 .check_apicid_present = bigsmp_check_apicid_present,
212
213 .vector_allocation_domain = bigsmp_vector_allocation_domain,
214 .init_apic_ldr = bigsmp_init_apic_ldr,
215
216 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
217 .setup_apic_routing = bigsmp_setup_apic_routing,
218 .multi_timer_check = NULL,
219 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
220 .apicid_to_cpu_present = physid_set_mask_of_physid,
221 .setup_portio_remap = NULL,
222 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
223 .enable_apic_mode = NULL,
224 .phys_pkg_id = bigsmp_phys_pkg_id,
225 .mps_oem_check = NULL,
226
227 .get_apic_id = bigsmp_get_apic_id,
228 .set_apic_id = NULL,
229 .apic_id_mask = 0xFF << 24,
230
231 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
232 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
233
234 .send_IPI_mask = bigsmp_send_IPI_mask,
235 .send_IPI_mask_allbutself = NULL,
236 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
237 .send_IPI_all = bigsmp_send_IPI_all,
238 .send_IPI_self = default_send_IPI_self,
239
240 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
241 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
242
243 .wait_for_init_deassert = default_wait_for_init_deassert,
244
245 .smp_callin_clear_local_apic = NULL,
246 .inquire_remote_apic = default_inquire_remote_apic,
247
248 .read = native_apic_mem_read,
249 .write = native_apic_mem_write,
250 .icr_read = native_apic_icr_read,
251 .icr_write = native_apic_icr_write,
252 .wait_icr_idle = native_apic_wait_icr_idle,
253 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
254
255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
256};
257
258struct apic * __init generic_bigsmp_probe(void)
259{
260 if (probe_bigsmp())
261 return &apic_bigsmp;
262
263 return NULL;
264}
265
266apic_driver(apic_bigsmp);