Loading...
1/*
2 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
3 *
4 * Drives the local APIC in "clustered mode".
5 */
6#include <linux/threads.h>
7#include <linux/cpumask.h>
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/dmi.h>
11#include <linux/smp.h>
12
13#include <asm/apicdef.h>
14#include <asm/fixmap.h>
15#include <asm/mpspec.h>
16#include <asm/apic.h>
17#include <asm/ipi.h>
18
19static unsigned bigsmp_get_apic_id(unsigned long x)
20{
21 return (x >> 24) & 0xFF;
22}
23
24static int bigsmp_apic_id_registered(void)
25{
26 return 1;
27}
28
29static const struct cpumask *bigsmp_target_cpus(void)
30{
31#ifdef CONFIG_SMP
32 return cpu_online_mask;
33#else
34 return cpumask_of(0);
35#endif
36}
37
38static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
39{
40 return 0;
41}
42
43static unsigned long bigsmp_check_apicid_present(int bit)
44{
45 return 1;
46}
47
48static int bigsmp_early_logical_apicid(int cpu)
49{
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
52}
53
54static inline unsigned long calculate_ldr(int cpu)
55{
56 unsigned long val, id;
57
58 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
59 id = per_cpu(x86_bios_cpu_apicid, cpu);
60 val |= SET_APIC_LOGICAL_ID(id);
61
62 return val;
63}
64
65/*
66 * Set up the logical destination ID.
67 *
68 * Intel recommends to set DFR, LDR and TPR before enabling
69 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
70 * document number 292116). So here it goes...
71 */
72static void bigsmp_init_apic_ldr(void)
73{
74 unsigned long val;
75 int cpu = smp_processor_id();
76
77 apic_write(APIC_DFR, APIC_DFR_FLAT);
78 val = calculate_ldr(cpu);
79 apic_write(APIC_LDR, val);
80}
81
82static void bigsmp_setup_apic_routing(void)
83{
84 printk(KERN_INFO
85 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
86 nr_ioapics);
87}
88
89static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{
91 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
93
94 return BAD_APICID;
95}
96
97static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
98{
99 /* For clustered we don't have a good way to do this yet - hack */
100 physids_promote(0xFFL, retmap);
101}
102
103static int bigsmp_check_phys_apicid_present(int phys_apicid)
104{
105 return 1;
106}
107
108/* As we are using single CPU as destination, pick only one CPU here */
109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
110{
111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
116}
117
118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
119 const struct cpumask *andmask)
120{
121 int cpu;
122
123 /*
124 * We're using fixed IRQ delivery, can only return one phys APIC ID.
125 * May as well be the first.
126 */
127 for_each_cpu_and(cpu, cpumask, andmask) {
128 if (cpumask_test_cpu(cpu, cpu_online_mask))
129 return cpu_physical_id(cpu);
130 }
131 return BAD_APICID;
132}
133
134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
135{
136 return cpuid_apic >> index_msb;
137}
138
139static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
140{
141 default_send_IPI_mask_sequence_phys(mask, vector);
142}
143
144static void bigsmp_send_IPI_allbutself(int vector)
145{
146 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
147}
148
149static void bigsmp_send_IPI_all(int vector)
150{
151 bigsmp_send_IPI_mask(cpu_online_mask, vector);
152}
153
154static int dmi_bigsmp; /* can be set by dmi scanners */
155
156static int hp_ht_bigsmp(const struct dmi_system_id *d)
157{
158 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
159 dmi_bigsmp = 1;
160
161 return 0;
162}
163
164
165static const struct dmi_system_id bigsmp_dmi_table[] = {
166 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
167 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
168 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
169 }
170 },
171
172 { hp_ht_bigsmp, "HP ProLiant DL740",
173 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
174 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
175 }
176 },
177 { } /* NULL entry stops DMI scanning */
178};
179
180static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
181{
182 cpumask_clear(retmask);
183 cpumask_set_cpu(cpu, retmask);
184}
185
186static int probe_bigsmp(void)
187{
188 if (def_to_bigsmp)
189 dmi_bigsmp = 1;
190 else
191 dmi_check_system(bigsmp_dmi_table);
192
193 return dmi_bigsmp;
194}
195
196static struct apic apic_bigsmp = {
197
198 .name = "bigsmp",
199 .probe = probe_bigsmp,
200 .acpi_madt_oem_check = NULL,
201 .apic_id_registered = bigsmp_apic_id_registered,
202
203 .irq_delivery_mode = dest_Fixed,
204 /* phys delivery to target CPU: */
205 .irq_dest_mode = 0,
206
207 .target_cpus = bigsmp_target_cpus,
208 .disable_esr = 1,
209 .dest_logical = 0,
210 .check_apicid_used = bigsmp_check_apicid_used,
211 .check_apicid_present = bigsmp_check_apicid_present,
212
213 .vector_allocation_domain = bigsmp_vector_allocation_domain,
214 .init_apic_ldr = bigsmp_init_apic_ldr,
215
216 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
217 .setup_apic_routing = bigsmp_setup_apic_routing,
218 .multi_timer_check = NULL,
219 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
220 .apicid_to_cpu_present = physid_set_mask_of_physid,
221 .setup_portio_remap = NULL,
222 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
223 .enable_apic_mode = NULL,
224 .phys_pkg_id = bigsmp_phys_pkg_id,
225 .mps_oem_check = NULL,
226
227 .get_apic_id = bigsmp_get_apic_id,
228 .set_apic_id = NULL,
229 .apic_id_mask = 0xFF << 24,
230
231 .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
232 .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
233
234 .send_IPI_mask = bigsmp_send_IPI_mask,
235 .send_IPI_mask_allbutself = NULL,
236 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
237 .send_IPI_all = bigsmp_send_IPI_all,
238 .send_IPI_self = default_send_IPI_self,
239
240 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
241 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
242
243 .wait_for_init_deassert = default_wait_for_init_deassert,
244
245 .smp_callin_clear_local_apic = NULL,
246 .inquire_remote_apic = default_inquire_remote_apic,
247
248 .read = native_apic_mem_read,
249 .write = native_apic_mem_write,
250 .icr_read = native_apic_icr_read,
251 .icr_write = native_apic_icr_write,
252 .wait_icr_idle = native_apic_wait_icr_idle,
253 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
254
255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
256};
257
258struct apic * __init generic_bigsmp_probe(void)
259{
260 if (probe_bigsmp())
261 return &apic_bigsmp;
262
263 return NULL;
264}
265
266apic_driver(apic_bigsmp);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
4 *
5 * Drives the local APIC in "clustered mode".
6 */
7#include <linux/cpumask.h>
8#include <linux/dmi.h>
9#include <linux/smp.h>
10
11#include <asm/apic.h>
12#include <asm/io_apic.h>
13
14#include "local.h"
15
16static unsigned bigsmp_get_apic_id(unsigned long x)
17{
18 return (x >> 24) & 0xFF;
19}
20
21static int bigsmp_apic_id_registered(void)
22{
23 return 1;
24}
25
26static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
27{
28 return false;
29}
30
31static int bigsmp_early_logical_apicid(int cpu)
32{
33 /* on bigsmp, logical apicid is the same as physical */
34 return early_per_cpu(x86_cpu_to_apicid, cpu);
35}
36
37/*
38 * bigsmp enables physical destination mode
39 * and doesn't use LDR and DFR
40 */
41static void bigsmp_init_apic_ldr(void)
42{
43}
44
45static void bigsmp_setup_apic_routing(void)
46{
47 printk(KERN_INFO
48 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
49 nr_ioapics);
50}
51
52static int bigsmp_cpu_present_to_apicid(int mps_cpu)
53{
54 if (mps_cpu < nr_cpu_ids)
55 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
56
57 return BAD_APICID;
58}
59
60static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
61{
62 /* For clustered we don't have a good way to do this yet - hack */
63 physids_promote(0xFFL, retmap);
64}
65
66static int bigsmp_check_phys_apicid_present(int phys_apicid)
67{
68 return 1;
69}
70
71static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
72{
73 return cpuid_apic >> index_msb;
74}
75
76static void bigsmp_send_IPI_allbutself(int vector)
77{
78 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
79}
80
81static void bigsmp_send_IPI_all(int vector)
82{
83 default_send_IPI_mask_sequence_phys(cpu_online_mask, vector);
84}
85
86static int dmi_bigsmp; /* can be set by dmi scanners */
87
88static int hp_ht_bigsmp(const struct dmi_system_id *d)
89{
90 printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
91 dmi_bigsmp = 1;
92
93 return 0;
94}
95
96
97static const struct dmi_system_id bigsmp_dmi_table[] = {
98 { hp_ht_bigsmp, "HP ProLiant DL760 G2",
99 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
100 DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
101 }
102 },
103
104 { hp_ht_bigsmp, "HP ProLiant DL740",
105 { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
106 DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
107 }
108 },
109 { } /* NULL entry stops DMI scanning */
110};
111
112static int probe_bigsmp(void)
113{
114 if (def_to_bigsmp)
115 dmi_bigsmp = 1;
116 else
117 dmi_check_system(bigsmp_dmi_table);
118
119 return dmi_bigsmp;
120}
121
122static struct apic apic_bigsmp __ro_after_init = {
123
124 .name = "bigsmp",
125 .probe = probe_bigsmp,
126 .acpi_madt_oem_check = NULL,
127 .apic_id_valid = default_apic_id_valid,
128 .apic_id_registered = bigsmp_apic_id_registered,
129
130 .irq_delivery_mode = dest_Fixed,
131 /* phys delivery to target CPU: */
132 .irq_dest_mode = 0,
133
134 .disable_esr = 1,
135 .dest_logical = 0,
136 .check_apicid_used = bigsmp_check_apicid_used,
137
138 .init_apic_ldr = bigsmp_init_apic_ldr,
139
140 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
141 .setup_apic_routing = bigsmp_setup_apic_routing,
142 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
143 .apicid_to_cpu_present = physid_set_mask_of_physid,
144 .check_phys_apicid_present = bigsmp_check_phys_apicid_present,
145 .phys_pkg_id = bigsmp_phys_pkg_id,
146
147 .get_apic_id = bigsmp_get_apic_id,
148 .set_apic_id = NULL,
149
150 .calc_dest_apicid = apic_default_calc_apicid,
151
152 .send_IPI = default_send_IPI_single_phys,
153 .send_IPI_mask = default_send_IPI_mask_sequence_phys,
154 .send_IPI_mask_allbutself = NULL,
155 .send_IPI_allbutself = bigsmp_send_IPI_allbutself,
156 .send_IPI_all = bigsmp_send_IPI_all,
157 .send_IPI_self = default_send_IPI_self,
158
159 .inquire_remote_apic = default_inquire_remote_apic,
160
161 .read = native_apic_mem_read,
162 .write = native_apic_mem_write,
163 .eoi_write = native_apic_mem_write,
164 .icr_read = native_apic_icr_read,
165 .icr_write = native_apic_icr_write,
166 .wait_icr_idle = native_apic_wait_icr_idle,
167 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
168
169 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
170};
171
172void __init generic_bigsmp_probe(void)
173{
174 unsigned int cpu;
175
176 if (!probe_bigsmp())
177 return;
178
179 apic = &apic_bigsmp;
180
181 for_each_possible_cpu(cpu) {
182 if (early_per_cpu(x86_cpu_to_logical_apicid,
183 cpu) == BAD_APICID)
184 continue;
185 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
186 bigsmp_early_logical_apicid(cpu);
187 }
188
189 pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
190}
191
192apic_driver(apic_bigsmp);