Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _ASM_X86_ACPI_H
3#define _ASM_X86_ACPI_H
4
5/*
6 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9#include <acpi/pdc_intel.h>
10
11#include <asm/numa.h>
12#include <asm/fixmap.h>
13#include <asm/processor.h>
14#include <asm/mmu.h>
15#include <asm/mpspec.h>
16#include <asm/x86_init.h>
17#include <asm/cpufeature.h>
18
19#ifdef CONFIG_ACPI_APEI
20# include <asm/pgtable_types.h>
21#endif
22
23#ifdef CONFIG_ACPI
24extern int acpi_lapic;
25extern int acpi_ioapic;
26extern int acpi_noirq;
27extern int acpi_strict;
28extern int acpi_disabled;
29extern int acpi_pci_disabled;
30extern int acpi_skip_timer_override;
31extern int acpi_use_timer_override;
32extern int acpi_fix_pin2_polarity;
33extern int acpi_disable_cmcff;
34
35extern u8 acpi_sci_flags;
36extern u32 acpi_sci_override_gsi;
37void acpi_pic_sci_set_trigger(unsigned int, u16);
38
39struct device;
40
41extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
42 int trigger, int polarity);
43extern void (*__acpi_unregister_gsi)(u32 gsi);
44
45static inline void disable_acpi(void)
46{
47 acpi_disabled = 1;
48 acpi_pci_disabled = 1;
49 acpi_noirq = 1;
50}
51
52extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
53
54static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
55static inline void acpi_disable_pci(void)
56{
57 acpi_pci_disabled = 1;
58 acpi_noirq_set();
59}
60
61/* Low-level suspend routine. */
62extern int (*acpi_suspend_lowlevel)(void);
63
64/* Physical address to resume after wakeup */
65unsigned long acpi_get_wakeup_address(void);
66
67static inline bool acpi_skip_set_wakeup_address(void)
68{
69 return cpu_feature_enabled(X86_FEATURE_XENPV);
70}
71
72#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
73
74/*
75 * Check if the CPU can handle C2 and deeper
76 */
77static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
78{
79 /*
80 * Early models (<=5) of AMD Opterons are not supposed to go into
81 * C2 state.
82 *
83 * Steppings 0x0A and later are good
84 */
85 if (boot_cpu_data.x86 == 0x0F &&
86 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
87 boot_cpu_data.x86_model <= 0x05 &&
88 boot_cpu_data.x86_stepping < 0x0A)
89 return 1;
90 else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
91 return 1;
92 else
93 return max_cstate;
94}
95
96static inline bool arch_has_acpi_pdc(void)
97{
98 struct cpuinfo_x86 *c = &cpu_data(0);
99 return (c->x86_vendor == X86_VENDOR_INTEL ||
100 c->x86_vendor == X86_VENDOR_CENTAUR);
101}
102
103static inline void arch_acpi_set_pdc_bits(u32 *buf)
104{
105 struct cpuinfo_x86 *c = &cpu_data(0);
106
107 buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
108
109 if (cpu_has(c, X86_FEATURE_EST))
110 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
111
112 if (cpu_has(c, X86_FEATURE_ACPI))
113 buf[2] |= ACPI_PDC_T_FFH;
114
115 /*
116 * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
117 */
118 if (!cpu_has(c, X86_FEATURE_MWAIT))
119 buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
120}
121
122static inline bool acpi_has_cpu_in_madt(void)
123{
124 return !!acpi_lapic;
125}
126
127#define ACPI_HAVE_ARCH_SET_ROOT_POINTER
128static inline void acpi_arch_set_root_pointer(u64 addr)
129{
130 x86_init.acpi.set_root_pointer(addr);
131}
132
133#define ACPI_HAVE_ARCH_GET_ROOT_POINTER
134static inline u64 acpi_arch_get_root_pointer(void)
135{
136 return x86_init.acpi.get_root_pointer();
137}
138
139void acpi_generic_reduced_hw_init(void);
140
141void x86_default_set_root_pointer(u64 addr);
142u64 x86_default_get_root_pointer(void);
143
144#else /* !CONFIG_ACPI */
145
146#define acpi_lapic 0
147#define acpi_ioapic 0
148#define acpi_disable_cmcff 0
149static inline void acpi_noirq_set(void) { }
150static inline void acpi_disable_pci(void) { }
151static inline void disable_acpi(void) { }
152
153static inline void acpi_generic_reduced_hw_init(void) { }
154
155static inline void x86_default_set_root_pointer(u64 addr) { }
156
157static inline u64 x86_default_get_root_pointer(void)
158{
159 return 0;
160}
161
162#endif /* !CONFIG_ACPI */
163
164#define ARCH_HAS_POWER_INIT 1
165
166#ifdef CONFIG_ACPI_NUMA
167extern int x86_acpi_numa_init(void);
168#endif /* CONFIG_ACPI_NUMA */
169
170struct cper_ia_proc_ctx;
171
172#ifdef CONFIG_ACPI_APEI
173static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
174{
175 /*
176 * We currently have no way to look up the EFI memory map
177 * attributes for a region in a consistent way, because the
178 * memmap is discarded after efi_free_boot_services(). So if
179 * you call efi_mem_attributes() during boot and at runtime,
180 * you could theoretically see different attributes.
181 *
182 * We are yet to see any x86 platforms that require anything
183 * other than PAGE_KERNEL (some ARM64 platforms require the
184 * equivalent of PAGE_KERNEL_NOCACHE). Additionally, if SME
185 * is active, the ACPI information will not be encrypted,
186 * so return PAGE_KERNEL_NOENC until we know differently.
187 */
188 return PAGE_KERNEL_NOENC;
189}
190
191int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
192 u64 lapic_id);
193#else
194static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
195 u64 lapic_id)
196{
197 return -EINVAL;
198}
199#endif
200
201#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT)
202
203#endif /* _ASM_X86_ACPI_H */
1#ifndef _ASM_X86_ACPI_H
2#define _ASM_X86_ACPI_H
3
4/*
5 * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
7 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25 */
26#include <acpi/pdc_intel.h>
27
28#include <asm/numa.h>
29#include <asm/fixmap.h>
30#include <asm/processor.h>
31#include <asm/mmu.h>
32#include <asm/mpspec.h>
33#include <asm/realmode.h>
34
35#ifdef CONFIG_ACPI_APEI
36# include <asm/pgtable_types.h>
37#endif
38
39#ifdef CONFIG_ACPI
40extern int acpi_lapic;
41extern int acpi_ioapic;
42extern int acpi_noirq;
43extern int acpi_strict;
44extern int acpi_disabled;
45extern int acpi_pci_disabled;
46extern int acpi_skip_timer_override;
47extern int acpi_use_timer_override;
48extern int acpi_fix_pin2_polarity;
49extern int acpi_disable_cmcff;
50
51extern u8 acpi_sci_flags;
52extern int acpi_sci_override_gsi;
53void acpi_pic_sci_set_trigger(unsigned int, u16);
54
55extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
56 int trigger, int polarity);
57extern void (*__acpi_unregister_gsi)(u32 gsi);
58
59static inline void disable_acpi(void)
60{
61 acpi_disabled = 1;
62 acpi_pci_disabled = 1;
63 acpi_noirq = 1;
64}
65
66extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
67
68static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
69static inline void acpi_disable_pci(void)
70{
71 acpi_pci_disabled = 1;
72 acpi_noirq_set();
73}
74
75/* Low-level suspend routine. */
76extern int (*acpi_suspend_lowlevel)(void);
77
78/* Physical address to resume after wakeup */
79#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
80
81/*
82 * Check if the CPU can handle C2 and deeper
83 */
84static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
85{
86 /*
87 * Early models (<=5) of AMD Opterons are not supposed to go into
88 * C2 state.
89 *
90 * Steppings 0x0A and later are good
91 */
92 if (boot_cpu_data.x86 == 0x0F &&
93 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
94 boot_cpu_data.x86_model <= 0x05 &&
95 boot_cpu_data.x86_mask < 0x0A)
96 return 1;
97 else if (amd_e400_c1e_detected)
98 return 1;
99 else
100 return max_cstate;
101}
102
103static inline bool arch_has_acpi_pdc(void)
104{
105 struct cpuinfo_x86 *c = &cpu_data(0);
106 return (c->x86_vendor == X86_VENDOR_INTEL ||
107 c->x86_vendor == X86_VENDOR_CENTAUR);
108}
109
110static inline void arch_acpi_set_pdc_bits(u32 *buf)
111{
112 struct cpuinfo_x86 *c = &cpu_data(0);
113
114 buf[2] |= ACPI_PDC_C_CAPABILITY_SMP;
115
116 if (cpu_has(c, X86_FEATURE_EST))
117 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
118
119 if (cpu_has(c, X86_FEATURE_ACPI))
120 buf[2] |= ACPI_PDC_T_FFH;
121
122 /*
123 * If mwait/monitor is unsupported, C2/C3_FFH will be disabled
124 */
125 if (!cpu_has(c, X86_FEATURE_MWAIT))
126 buf[2] &= ~(ACPI_PDC_C_C2C3_FFH);
127}
128
129static inline bool acpi_has_cpu_in_madt(void)
130{
131 return !!acpi_lapic;
132}
133
134#else /* !CONFIG_ACPI */
135
136#define acpi_lapic 0
137#define acpi_ioapic 0
138#define acpi_disable_cmcff 0
139static inline void acpi_noirq_set(void) { }
140static inline void acpi_disable_pci(void) { }
141static inline void disable_acpi(void) { }
142
143#endif /* !CONFIG_ACPI */
144
145#define ARCH_HAS_POWER_INIT 1
146
147#ifdef CONFIG_ACPI_NUMA
148extern int acpi_numa;
149extern int x86_acpi_numa_init(void);
150#endif /* CONFIG_ACPI_NUMA */
151
152#define acpi_unlazy_tlb(x) leave_mm(x)
153
154#ifdef CONFIG_ACPI_APEI
155static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
156{
157 /*
158 * We currently have no way to look up the EFI memory map
159 * attributes for a region in a consistent way, because the
160 * memmap is discarded after efi_free_boot_services(). So if
161 * you call efi_mem_attributes() during boot and at runtime,
162 * you could theoretically see different attributes.
163 *
164 * Since we are yet to see any x86 platforms that require
165 * anything other than PAGE_KERNEL (some arm64 platforms
166 * require the equivalent of PAGE_KERNEL_NOCACHE), return that
167 * until we know differently.
168 */
169 return PAGE_KERNEL;
170}
171#endif
172
173#endif /* _ASM_X86_ACPI_H */