Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4 *
5 * Author: Jianmin Lv <lvjianmin@loongson.cn>
6 * Huacai Chen <chenhuacai@loongson.cn>
7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 */
9
10#include <linux/init.h>
11#include <linux/acpi.h>
12#include <linux/irq.h>
13#include <linux/irqdomain.h>
14#include <linux/memblock.h>
15#include <linux/of_fdt.h>
16#include <linux/serial_core.h>
17#include <asm/io.h>
18#include <asm/numa.h>
19#include <asm/loongson.h>
20
21int acpi_disabled;
22EXPORT_SYMBOL(acpi_disabled);
23int acpi_noirq;
24int acpi_pci_disabled;
25EXPORT_SYMBOL(acpi_pci_disabled);
26int acpi_strict = 1; /* We have no workarounds on LoongArch */
27int num_processors;
28int disabled_cpus;
29
30u64 acpi_saved_sp;
31
32#define PREFIX "ACPI: "
33
34struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
35
36void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
37{
38
39 if (!phys || !size)
40 return NULL;
41
42 return early_memremap(phys, size);
43}
44void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
45{
46 if (!map || !size)
47 return;
48
49 early_memunmap(map, size);
50}
51
52void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
53{
54 if (!memblock_is_memory(phys))
55 return ioremap(phys, size);
56 else
57 return ioremap_cache(phys, size);
58}
59
60#ifdef CONFIG_SMP
61static int set_processor_mask(u32 id, u32 flags)
62{
63
64 int cpu, cpuid = id;
65
66 if (num_processors >= nr_cpu_ids) {
67 pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
68 " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
69
70 return -ENODEV;
71
72 }
73 if (cpuid == loongson_sysconf.boot_cpu_id)
74 cpu = 0;
75 else
76 cpu = cpumask_next_zero(-1, cpu_present_mask);
77
78 if (flags & ACPI_MADT_ENABLED) {
79 num_processors++;
80 set_cpu_possible(cpu, true);
81 set_cpu_present(cpu, true);
82 __cpu_number_map[cpuid] = cpu;
83 __cpu_logical_map[cpu] = cpuid;
84 } else
85 disabled_cpus++;
86
87 return cpu;
88}
89#endif
90
91static int __init
92acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
93{
94 struct acpi_madt_core_pic *processor = NULL;
95
96 processor = (struct acpi_madt_core_pic *)header;
97 if (BAD_MADT_ENTRY(processor, end))
98 return -EINVAL;
99
100 acpi_table_print_madt_entry(&header->common);
101#ifdef CONFIG_SMP
102 acpi_core_pic[processor->core_id] = *processor;
103 set_processor_mask(processor->core_id, processor->flags);
104#endif
105
106 return 0;
107}
108
109static int __init
110acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
111{
112 static int core = 0;
113 struct acpi_madt_eio_pic *eiointc = NULL;
114
115 eiointc = (struct acpi_madt_eio_pic *)header;
116 if (BAD_MADT_ENTRY(eiointc, end))
117 return -EINVAL;
118
119 core = eiointc->node * CORES_PER_EIO_NODE;
120 set_bit(core, loongson_sysconf.cores_io_master);
121
122 return 0;
123}
124
125static void __init acpi_process_madt(void)
126{
127#ifdef CONFIG_SMP
128 int i;
129
130 for (i = 0; i < NR_CPUS; i++) {
131 __cpu_number_map[i] = -1;
132 __cpu_logical_map[i] = -1;
133 }
134#endif
135 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
136 acpi_parse_processor, MAX_CORE_PIC);
137
138 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
139 acpi_parse_eio_master, MAX_IO_PICS);
140
141 loongson_sysconf.nr_cpus = num_processors;
142}
143
144int pptt_enabled;
145
146int __init parse_acpi_topology(void)
147{
148 int cpu, topology_id;
149
150 for_each_possible_cpu(cpu) {
151 topology_id = find_acpi_cpu_topology(cpu, 0);
152 if (topology_id < 0) {
153 pr_warn("Invalid BIOS PPTT\n");
154 return -ENOENT;
155 }
156
157 if (acpi_pptt_cpu_is_thread(cpu) <= 0)
158 cpu_data[cpu].core = topology_id;
159 else {
160 topology_id = find_acpi_cpu_topology(cpu, 1);
161 if (topology_id < 0)
162 return -ENOENT;
163
164 cpu_data[cpu].core = topology_id;
165 }
166 }
167
168 pptt_enabled = 1;
169
170 return 0;
171}
172
173#ifndef CONFIG_SUSPEND
174int (*acpi_suspend_lowlevel)(void);
175#else
176int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
177#endif
178
179void __init acpi_boot_table_init(void)
180{
181 /*
182 * If acpi_disabled, bail out
183 */
184 if (acpi_disabled)
185 goto fdt_earlycon;
186
187 /*
188 * Initialize the ACPI boot-time table parser.
189 */
190 if (acpi_table_init()) {
191 disable_acpi();
192 goto fdt_earlycon;
193 }
194
195 loongson_sysconf.boot_cpu_id = read_csr_cpuid();
196
197 /*
198 * Process the Multiple APIC Description Table (MADT), if present
199 */
200 acpi_process_madt();
201
202 /* Do not enable ACPI SPCR console by default */
203 acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
204
205 return;
206
207fdt_earlycon:
208 if (earlycon_acpi_spcr_enable)
209 early_init_dt_scan_chosen_stdout();
210}
211
212#ifdef CONFIG_ACPI_NUMA
213
214static __init int setup_node(int pxm)
215{
216 return acpi_map_pxm_to_node(pxm);
217}
218
219/*
220 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
221 * I/O localities since SRAT does not list them. I/O localities are
222 * not supported at this point.
223 */
224unsigned int numa_distance_cnt;
225
226static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
227{
228 return slit->locality_count;
229}
230
231void __init numa_set_distance(int from, int to, int distance)
232{
233 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
234 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
235 from, to, distance);
236 return;
237 }
238
239 node_distances[from][to] = distance;
240}
241
242/* Callback for Proximity Domain -> CPUID mapping */
243void __init
244acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
245{
246 int pxm, node;
247
248 if (srat_disabled())
249 return;
250 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
251 bad_srat();
252 return;
253 }
254 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
255 return;
256 pxm = pa->proximity_domain_lo;
257 if (acpi_srat_revision >= 2) {
258 pxm |= (pa->proximity_domain_hi[0] << 8);
259 pxm |= (pa->proximity_domain_hi[1] << 16);
260 pxm |= (pa->proximity_domain_hi[2] << 24);
261 }
262 node = setup_node(pxm);
263 if (node < 0) {
264 pr_err("SRAT: Too many proximity domains %x\n", pxm);
265 bad_srat();
266 return;
267 }
268
269 if (pa->apic_id >= CONFIG_NR_CPUS) {
270 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
271 pxm, pa->apic_id, node);
272 return;
273 }
274
275 early_numa_add_cpu(pa->apic_id, node);
276
277 set_cpuid_to_node(pa->apic_id, node);
278 node_set(node, numa_nodes_parsed);
279 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
280}
281
282#endif
283
284void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
285{
286 memblock_reserve(addr, size);
287}
288
289#ifdef CONFIG_ACPI_HOTPLUG_CPU
290
291#include <acpi/processor.h>
292
293static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
294{
295#ifdef CONFIG_ACPI_NUMA
296 int nid;
297
298 nid = acpi_get_node(handle);
299 if (nid != NUMA_NO_NODE) {
300 set_cpuid_to_node(physid, nid);
301 node_set(nid, numa_nodes_parsed);
302 set_cpu_numa_node(cpu, nid);
303 cpumask_set_cpu(cpu, cpumask_of_node(nid));
304 }
305#endif
306 return 0;
307}
308
309int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
310{
311 int cpu;
312
313 cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
314 if (cpu < 0) {
315 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
316 return cpu;
317 }
318
319 acpi_map_cpu2node(handle, cpu, physid);
320
321 *pcpu = cpu;
322
323 return 0;
324}
325EXPORT_SYMBOL(acpi_map_cpu);
326
327int acpi_unmap_cpu(int cpu)
328{
329#ifdef CONFIG_ACPI_NUMA
330 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
331#endif
332 set_cpu_present(cpu, false);
333 num_processors--;
334
335 pr_info("cpu%d hot remove!\n", cpu);
336
337 return 0;
338}
339EXPORT_SYMBOL(acpi_unmap_cpu);
340
341#endif /* CONFIG_ACPI_HOTPLUG_CPU */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4 *
5 * Author: Jianmin Lv <lvjianmin@loongson.cn>
6 * Huacai Chen <chenhuacai@loongson.cn>
7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 */
9
10#include <linux/init.h>
11#include <linux/acpi.h>
12#include <linux/efi-bgrt.h>
13#include <linux/irq.h>
14#include <linux/irqdomain.h>
15#include <linux/memblock.h>
16#include <linux/of_fdt.h>
17#include <linux/serial_core.h>
18#include <asm/io.h>
19#include <asm/numa.h>
20#include <asm/loongson.h>
21
22int acpi_disabled;
23EXPORT_SYMBOL(acpi_disabled);
24int acpi_noirq;
25int acpi_pci_disabled;
26EXPORT_SYMBOL(acpi_pci_disabled);
27int acpi_strict = 1; /* We have no workarounds on LoongArch */
28int num_processors;
29int disabled_cpus;
30
31u64 acpi_saved_sp;
32
33#define PREFIX "ACPI: "
34
35struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
36
37void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
38{
39
40 if (!phys || !size)
41 return NULL;
42
43 return early_memremap(phys, size);
44}
45void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
46{
47 if (!map || !size)
48 return;
49
50 early_memunmap(map, size);
51}
52
53void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
54{
55 if (!memblock_is_memory(phys))
56 return ioremap(phys, size);
57 else
58 return ioremap_cache(phys, size);
59}
60
61#ifdef CONFIG_SMP
62static int set_processor_mask(u32 id, u32 pass)
63{
64 int cpu = -1, cpuid = id;
65
66 if (num_processors >= NR_CPUS) {
67 pr_warn(PREFIX "nr_cpus limit of %i reached."
68 " processor 0x%x ignored.\n", NR_CPUS, cpuid);
69
70 return -ENODEV;
71
72 }
73
74 if (cpuid == loongson_sysconf.boot_cpu_id)
75 cpu = 0;
76
77 switch (pass) {
78 case 1: /* Pass 1 handle enabled processors */
79 if (cpu < 0)
80 cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
81 num_processors++;
82 set_cpu_present(cpu, true);
83 break;
84 case 2: /* Pass 2 handle disabled processors */
85 if (cpu < 0)
86 cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
87 disabled_cpus++;
88 break;
89 default:
90 return cpu;
91 }
92
93 set_cpu_possible(cpu, true);
94 __cpu_number_map[cpuid] = cpu;
95 __cpu_logical_map[cpu] = cpuid;
96
97 return cpu;
98}
99#endif
100
101static int __init
102acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
103{
104 struct acpi_madt_core_pic *processor = NULL;
105
106 processor = (struct acpi_madt_core_pic *)header;
107 if (BAD_MADT_ENTRY(processor, end))
108 return -EINVAL;
109
110 acpi_table_print_madt_entry(&header->common);
111#ifdef CONFIG_SMP
112 acpi_core_pic[processor->core_id] = *processor;
113 if (processor->flags & ACPI_MADT_ENABLED)
114 set_processor_mask(processor->core_id, 1);
115#endif
116
117 return 0;
118}
119
120static int __init
121acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
122{
123 struct acpi_madt_core_pic *processor = NULL;
124
125 processor = (struct acpi_madt_core_pic *)header;
126 if (BAD_MADT_ENTRY(processor, end))
127 return -EINVAL;
128
129#ifdef CONFIG_SMP
130 if (!(processor->flags & ACPI_MADT_ENABLED))
131 set_processor_mask(processor->core_id, 2);
132#endif
133
134 return 0;
135}
136static int __init
137acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
138{
139 static int core = 0;
140 struct acpi_madt_eio_pic *eiointc = NULL;
141
142 eiointc = (struct acpi_madt_eio_pic *)header;
143 if (BAD_MADT_ENTRY(eiointc, end))
144 return -EINVAL;
145
146 core = eiointc->node * CORES_PER_EIO_NODE;
147 set_bit(core, loongson_sysconf.cores_io_master);
148
149 return 0;
150}
151
152static void __init acpi_process_madt(void)
153{
154#ifdef CONFIG_SMP
155 int i;
156
157 for (i = 0; i < NR_CPUS; i++) {
158 __cpu_number_map[i] = -1;
159 __cpu_logical_map[i] = -1;
160 }
161#endif
162 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
163 acpi_parse_p1_processor, MAX_CORE_PIC);
164
165 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
166 acpi_parse_p2_processor, MAX_CORE_PIC);
167
168 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
169 acpi_parse_eio_master, MAX_IO_PICS);
170
171 loongson_sysconf.nr_cpus = num_processors;
172}
173
174int pptt_enabled;
175
176int __init parse_acpi_topology(void)
177{
178 int cpu, topology_id;
179
180 for_each_possible_cpu(cpu) {
181 topology_id = find_acpi_cpu_topology(cpu, 0);
182 if (topology_id < 0) {
183 pr_warn("Invalid BIOS PPTT\n");
184 return -ENOENT;
185 }
186
187 if (acpi_pptt_cpu_is_thread(cpu) <= 0)
188 cpu_data[cpu].core = topology_id;
189 else {
190 topology_id = find_acpi_cpu_topology(cpu, 1);
191 if (topology_id < 0)
192 return -ENOENT;
193
194 cpu_data[cpu].core = topology_id;
195 }
196 }
197
198 pptt_enabled = 1;
199
200 return 0;
201}
202
203#ifndef CONFIG_SUSPEND
204int (*acpi_suspend_lowlevel)(void);
205#else
206int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
207#endif
208
209void __init acpi_boot_table_init(void)
210{
211 /*
212 * If acpi_disabled, bail out
213 */
214 if (acpi_disabled)
215 goto fdt_earlycon;
216
217 /*
218 * Initialize the ACPI boot-time table parser.
219 */
220 if (acpi_table_init()) {
221 disable_acpi();
222 goto fdt_earlycon;
223 }
224
225 loongson_sysconf.boot_cpu_id = read_csr_cpuid();
226
227 /*
228 * Process the Multiple APIC Description Table (MADT), if present
229 */
230 acpi_process_madt();
231
232 /* Do not enable ACPI SPCR console by default */
233 acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
234
235 if (IS_ENABLED(CONFIG_ACPI_BGRT))
236 acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
237
238 return;
239
240fdt_earlycon:
241 if (earlycon_acpi_spcr_enable)
242 early_init_dt_scan_chosen_stdout();
243}
244
245#ifdef CONFIG_ACPI_NUMA
246
247static __init int setup_node(int pxm)
248{
249 return acpi_map_pxm_to_node(pxm);
250}
251
252/*
253 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
254 * I/O localities since SRAT does not list them. I/O localities are
255 * not supported at this point.
256 */
257unsigned int numa_distance_cnt;
258
259static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
260{
261 return slit->locality_count;
262}
263
264void __init numa_set_distance(int from, int to, int distance)
265{
266 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
267 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
268 from, to, distance);
269 return;
270 }
271
272 node_distances[from][to] = distance;
273}
274
275/* Callback for Proximity Domain -> CPUID mapping */
276void __init
277acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
278{
279 int pxm, node;
280
281 if (srat_disabled())
282 return;
283 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
284 bad_srat();
285 return;
286 }
287 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
288 return;
289 pxm = pa->proximity_domain_lo;
290 if (acpi_srat_revision >= 2) {
291 pxm |= (pa->proximity_domain_hi[0] << 8);
292 pxm |= (pa->proximity_domain_hi[1] << 16);
293 pxm |= (pa->proximity_domain_hi[2] << 24);
294 }
295 node = setup_node(pxm);
296 if (node < 0) {
297 pr_err("SRAT: Too many proximity domains %x\n", pxm);
298 bad_srat();
299 return;
300 }
301
302 if (pa->apic_id >= CONFIG_NR_CPUS) {
303 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
304 pxm, pa->apic_id, node);
305 return;
306 }
307
308 early_numa_add_cpu(pa->apic_id, node);
309
310 set_cpuid_to_node(pa->apic_id, node);
311 node_set(node, numa_nodes_parsed);
312 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
313}
314
315#endif
316
317void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
318{
319 memblock_reserve(addr, size);
320}
321
322#ifdef CONFIG_ACPI_HOTPLUG_CPU
323
324#include <acpi/processor.h>
325
326static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
327{
328#ifdef CONFIG_ACPI_NUMA
329 int nid;
330
331 nid = acpi_get_node(handle);
332
333 if (nid != NUMA_NO_NODE)
334 nid = early_cpu_to_node(cpu);
335
336 if (nid != NUMA_NO_NODE) {
337 set_cpuid_to_node(physid, nid);
338 node_set(nid, numa_nodes_parsed);
339 set_cpu_numa_node(cpu, nid);
340 cpumask_set_cpu(cpu, cpumask_of_node(nid));
341 }
342#endif
343 return 0;
344}
345
346int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
347{
348 int cpu;
349
350 cpu = cpu_number_map(physid);
351 if (cpu < 0 || cpu >= nr_cpu_ids) {
352 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
353 return -ERANGE;
354 }
355
356 num_processors++;
357 set_cpu_present(cpu, true);
358 acpi_map_cpu2node(handle, cpu, physid);
359
360 *pcpu = cpu;
361
362 return 0;
363}
364EXPORT_SYMBOL(acpi_map_cpu);
365
366int acpi_unmap_cpu(int cpu)
367{
368#ifdef CONFIG_ACPI_NUMA
369 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
370#endif
371 set_cpu_present(cpu, false);
372 num_processors--;
373
374 pr_info("cpu%d hot remove!\n", cpu);
375
376 return 0;
377}
378EXPORT_SYMBOL(acpi_unmap_cpu);
379
380#endif /* CONFIG_ACPI_HOTPLUG_CPU */