Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 1999 VA Linux Systems
4 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
6 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 */
8
9#ifndef _ASM_ACPI_H
10#define _ASM_ACPI_H
11
12#ifdef __KERNEL__
13
14#include <acpi/pdc_intel.h>
15
16#include <linux/init.h>
17#include <linux/numa.h>
18#include <asm/numa.h>
19
20
21extern int acpi_lapic;
22#define acpi_disabled 0 /* ACPI always enabled on IA64 */
23#define acpi_noirq 0 /* ACPI always enabled on IA64 */
24#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
25#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
26
27static inline bool acpi_has_cpu_in_madt(void)
28{
29 return !!acpi_lapic;
30}
31
32#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
33static inline void disable_acpi(void) { }
34
35int acpi_request_vector (u32 int_type);
36int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
37
38/* Low-level suspend routine. */
39extern int acpi_suspend_lowlevel(void);
40
41static inline unsigned long acpi_get_wakeup_address(void)
42{
43 return 0;
44}
45
46/*
47 * Record the cpei override flag and current logical cpu. This is
48 * useful for CPU removal.
49 */
50extern unsigned int can_cpei_retarget(void);
51extern unsigned int is_cpu_cpei_target(unsigned int cpu);
52extern void set_cpei_target_cpu(unsigned int cpu);
53extern unsigned int get_cpei_target_cpu(void);
54extern void prefill_possible_map(void);
55#ifdef CONFIG_ACPI_HOTPLUG_CPU
56extern int additional_cpus;
57#else
58#define additional_cpus 0
59#endif
60
61#ifdef CONFIG_ACPI_NUMA
62#if MAX_NUMNODES > 256
63#define MAX_PXM_DOMAINS MAX_NUMNODES
64#else
65#define MAX_PXM_DOMAINS (256)
66#endif
67extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
68extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
69#endif
70
71static inline bool arch_has_acpi_pdc(void) { return true; }
72static inline void arch_acpi_set_pdc_bits(u32 *buf)
73{
74 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
75}
76
77#ifdef CONFIG_ACPI_NUMA
78extern cpumask_t early_cpu_possible_map;
79#define for_each_possible_early_cpu(cpu) \
80 for_each_cpu((cpu), &early_cpu_possible_map)
81
82static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
83{
84 int low_cpu, high_cpu;
85 int cpu;
86 int next_nid = 0;
87
88 low_cpu = cpumask_weight(&early_cpu_possible_map);
89
90 high_cpu = max(low_cpu, min_cpus);
91 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
92
93 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
94 cpumask_set_cpu(cpu, &early_cpu_possible_map);
95 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
96 node_cpuid[cpu].nid = next_nid;
97 next_nid++;
98 if (next_nid >= num_online_nodes())
99 next_nid = 0;
100 }
101 }
102}
103
104extern void acpi_numa_fixup(void);
105
106#endif /* CONFIG_ACPI_NUMA */
107
108#endif /*__KERNEL__*/
109
110#endif /*_ASM_ACPI_H*/
1/*
2 * Copyright (C) 1999 VA Linux Systems
3 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
4 * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
5 * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26#ifndef _ASM_ACPI_H
27#define _ASM_ACPI_H
28
29#ifdef __KERNEL__
30
31#include <acpi/pdc_intel.h>
32
33#include <linux/init.h>
34#include <linux/numa.h>
35#include <asm/numa.h>
36
37#define COMPILER_DEPENDENT_INT64 long
38#define COMPILER_DEPENDENT_UINT64 unsigned long
39
40/*
41 * Calling conventions:
42 *
43 * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
44 * ACPI_EXTERNAL_XFACE - External ACPI interfaces
45 * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
46 * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
47 */
48#define ACPI_SYSTEM_XFACE
49#define ACPI_EXTERNAL_XFACE
50#define ACPI_INTERNAL_XFACE
51#define ACPI_INTERNAL_VAR_XFACE
52
53/* Asm macros */
54
55#define ACPI_ASM_MACROS
56#define BREAKPOINT3
57#define ACPI_DISABLE_IRQS() local_irq_disable()
58#define ACPI_ENABLE_IRQS() local_irq_enable()
59#define ACPI_FLUSH_CPU_CACHE()
60
61static inline int
62ia64_acpi_acquire_global_lock (unsigned int *lock)
63{
64 unsigned int old, new, val;
65 do {
66 old = *lock;
67 new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
68 val = ia64_cmpxchg4_acq(lock, new, old);
69 } while (unlikely (val != old));
70 return (new < 3) ? -1 : 0;
71}
72
73static inline int
74ia64_acpi_release_global_lock (unsigned int *lock)
75{
76 unsigned int old, new, val;
77 do {
78 old = *lock;
79 new = old & ~0x3;
80 val = ia64_cmpxchg4_acq(lock, new, old);
81 } while (unlikely (val != old));
82 return old & 0x1;
83}
84
85#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
86 ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
87
88#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
89 ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
90
91#ifdef CONFIG_ACPI
92#define acpi_disabled 0 /* ACPI always enabled on IA64 */
93#define acpi_noirq 0 /* ACPI always enabled on IA64 */
94#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
95#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
96#endif
97#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
98static inline void disable_acpi(void) { }
99static inline void pci_acpi_crs_quirks(void) { }
100
101#ifdef CONFIG_IA64_GENERIC
102const char *acpi_get_sysname (void);
103#else
104static inline const char *acpi_get_sysname (void)
105{
106# if defined (CONFIG_IA64_HP_SIM)
107 return "hpsim";
108# elif defined (CONFIG_IA64_HP_ZX1)
109 return "hpzx1";
110# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
111 return "hpzx1_swiotlb";
112# elif defined (CONFIG_IA64_SGI_SN2)
113 return "sn2";
114# elif defined (CONFIG_IA64_SGI_UV)
115 return "uv";
116# elif defined (CONFIG_IA64_DIG)
117 return "dig";
118# elif defined (CONFIG_IA64_XEN_GUEST)
119 return "xen";
120# elif defined(CONFIG_IA64_DIG_VTD)
121 return "dig_vtd";
122# else
123# error Unknown platform. Fix acpi.c.
124# endif
125}
126#endif
127int acpi_request_vector (u32 int_type);
128int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
129
130/* Low-level suspend routine. */
131extern int acpi_suspend_lowlevel(void);
132
133extern unsigned long acpi_wakeup_address;
134
135/*
136 * Record the cpei override flag and current logical cpu. This is
137 * useful for CPU removal.
138 */
139extern unsigned int can_cpei_retarget(void);
140extern unsigned int is_cpu_cpei_target(unsigned int cpu);
141extern void set_cpei_target_cpu(unsigned int cpu);
142extern unsigned int get_cpei_target_cpu(void);
143extern void prefill_possible_map(void);
144#ifdef CONFIG_ACPI_HOTPLUG_CPU
145extern int additional_cpus;
146#else
147#define additional_cpus 0
148#endif
149
150#ifdef CONFIG_ACPI_NUMA
151#if MAX_NUMNODES > 256
152#define MAX_PXM_DOMAINS MAX_NUMNODES
153#else
154#define MAX_PXM_DOMAINS (256)
155#endif
156extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
157extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
158#endif
159
160static inline bool arch_has_acpi_pdc(void) { return true; }
161static inline void arch_acpi_set_pdc_bits(u32 *buf)
162{
163 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
164}
165
166#define acpi_unlazy_tlb(x)
167
168#ifdef CONFIG_ACPI_NUMA
169extern cpumask_t early_cpu_possible_map;
170#define for_each_possible_early_cpu(cpu) \
171 for_each_cpu_mask((cpu), early_cpu_possible_map)
172
173static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
174{
175 int low_cpu, high_cpu;
176 int cpu;
177 int next_nid = 0;
178
179 low_cpu = cpus_weight(early_cpu_possible_map);
180
181 high_cpu = max(low_cpu, min_cpus);
182 high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
183
184 for (cpu = low_cpu; cpu < high_cpu; cpu++) {
185 cpu_set(cpu, early_cpu_possible_map);
186 if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
187 node_cpuid[cpu].nid = next_nid;
188 next_nid++;
189 if (next_nid >= num_online_nodes())
190 next_nid = 0;
191 }
192 }
193}
194#endif /* CONFIG_ACPI_NUMA */
195
196#endif /*__KERNEL__*/
197
198#endif /*_ASM_ACPI_H*/