Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/spinlock.h>
  4#include <linux/percpu.h>
  5#include <linux/kallsyms.h>
  6#include <linux/kcore.h>
  7#include <linux/pgtable.h>
  8
  9#include <asm/cpu_entry_area.h>
 
 10#include <asm/fixmap.h>
 11#include <asm/desc.h>
 12#include <asm/kasan.h>
 13
 14static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 15
 16#ifdef CONFIG_X86_64
 17static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
 18DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
 19
 20static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
 21
 22static __always_inline unsigned int cea_offset(unsigned int cpu)
 23{
 24	return per_cpu(_cea_offset, cpu);
 25}
 26
 27static __init void init_cea_offsets(void)
 28{
 29	unsigned int max_cea;
 30	unsigned int i, j;
 31
 32	max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
 33
 34	/* O(sodding terrible) */
 35	for_each_possible_cpu(i) {
 36		unsigned int cea;
 37
 38again:
 39		cea = get_random_u32_below(max_cea);
 40
 41		for_each_possible_cpu(j) {
 42			if (cea_offset(j) == cea)
 43				goto again;
 44
 45			if (i == j)
 46				break;
 47		}
 48
 49		per_cpu(_cea_offset, i) = cea;
 50	}
 51}
 52#else /* !X86_64 */
 53DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
 54
 55static __always_inline unsigned int cea_offset(unsigned int cpu)
 56{
 57	return cpu;
 58}
 59static inline void init_cea_offsets(void) { }
 60#endif
 61
 62/* Is called from entry code, so must be noinstr */
 63noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
 64{
 65	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
 66	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
 67
 68	return (struct cpu_entry_area *) va;
 69}
 70EXPORT_SYMBOL(get_cpu_entry_area);
 71
 72void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
 73{
 74	unsigned long va = (unsigned long) cea_vaddr;
 75	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
 76
 77	/*
 78	 * The cpu_entry_area is shared between the user and kernel
 79	 * page tables.  All of its ptes can safely be global.
 80	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
 81	 * non-present PTEs, so be careful not to set it in that
 82	 * case to avoid confusion.
 83	 */
 84	if (boot_cpu_has(X86_FEATURE_PGE) &&
 85	    (pgprot_val(flags) & _PAGE_PRESENT))
 86		pte = pte_set_flags(pte, _PAGE_GLOBAL);
 87
 88	set_pte_vaddr(va, pte);
 89}
 90
 91static void __init
 92cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
 93{
 94	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
 95		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
 96}
 97
 98static void __init percpu_setup_debug_store(unsigned int cpu)
 99{
100#ifdef CONFIG_CPU_SUP_INTEL
101	unsigned int npages;
102	void *cea;
103
104	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
105		return;
106
107	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
108	npages = sizeof(struct debug_store) / PAGE_SIZE;
109	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
110	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
111			     PAGE_KERNEL);
112
113	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
114	/*
115	 * Force the population of PMDs for not yet allocated per cpu
116	 * memory like debug store buffers.
117	 */
118	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
119	for (; npages; npages--, cea += PAGE_SIZE)
120		cea_set_pte(cea, 0, PAGE_NONE);
121#endif
122}
123
124#ifdef CONFIG_X86_64
125
126#define cea_map_stack(name) do {					\
127	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
128	cea_map_percpu_pages(cea->estacks.name## _stack,		\
129			estacks->name## _stack, npages, PAGE_KERNEL);	\
130	} while (0)
131
132static void __init percpu_setup_exception_stacks(unsigned int cpu)
133{
134	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
135	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
136	unsigned int npages;
137
138	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
139
140	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
141
142	/*
143	 * The exceptions stack mappings in the per cpu area are protected
144	 * by guard pages so each stack must be mapped separately. DB2 is
145	 * not mapped; it just exists to catch triple nesting of #DB.
146	 */
147	cea_map_stack(DF);
148	cea_map_stack(NMI);
 
149	cea_map_stack(DB);
150	cea_map_stack(MCE);
151
152	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
153		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
154			cea_map_stack(VC);
155			cea_map_stack(VC2);
156		}
157	}
158}
159#else
160static inline void percpu_setup_exception_stacks(unsigned int cpu)
161{
162	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
163
164	cea_map_percpu_pages(&cea->doublefault_stack,
165			     &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
166}
167#endif
168
169/* Setup the fixmap mappings only once per-processor */
170static void __init setup_cpu_entry_area(unsigned int cpu)
171{
172	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
173#ifdef CONFIG_X86_64
174	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
175	pgprot_t gdt_prot = PAGE_KERNEL_RO;
176	pgprot_t tss_prot = PAGE_KERNEL_RO;
177#else
178	/*
179	 * On 32-bit systems, the GDT cannot be read-only because
180	 * our double fault handler uses a task gate, and entering through
181	 * a task gate needs to change an available TSS to busy.  If the
182	 * GDT is read-only, that will triple fault.  The TSS cannot be
183	 * read-only because the CPU writes to it on task switches.
 
 
 
184	 */
185	pgprot_t gdt_prot = PAGE_KERNEL;
 
186	pgprot_t tss_prot = PAGE_KERNEL;
187#endif
188
189	kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
190					early_cpu_to_node(cpu));
191
192	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
193
194	cea_map_percpu_pages(&cea->entry_stack_page,
195			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
196			     PAGE_KERNEL);
197
198	/*
199	 * The Intel SDM says (Volume 3, 7.2.1):
200	 *
201	 *  Avoid placing a page boundary in the part of the TSS that the
202	 *  processor reads during a task switch (the first 104 bytes). The
203	 *  processor may not correctly perform address translations if a
204	 *  boundary occurs in this area. During a task switch, the processor
205	 *  reads and writes into the first 104 bytes of each TSS (using
206	 *  contiguous physical addresses beginning with the physical address
207	 *  of the first byte of the TSS). So, after TSS access begins, if
208	 *  part of the 104 bytes is not physically contiguous, the processor
209	 *  will access incorrect information without generating a page-fault
210	 *  exception.
211	 *
212	 * There are also a lot of errata involving the TSS spanning a page
213	 * boundary.  Assert that we're not doing that.
214	 */
215	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
216		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
217	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
218	/*
219	 * VMX changes the host TR limit to 0x67 after a VM exit. This is
220	 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
221	 * that this is correct.
222	 */
223	BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
224	BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
225
226	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
227			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
228
229#ifdef CONFIG_X86_32
230	per_cpu(cpu_entry_area, cpu) = cea;
231#endif
232
233	percpu_setup_exception_stacks(cpu);
234
235	percpu_setup_debug_store(cpu);
236}
237
238static __init void setup_cpu_entry_area_ptes(void)
239{
240#ifdef CONFIG_X86_32
241	unsigned long start, end;
242
243	/* The +1 is for the readonly IDT: */
244	BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
245	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
246
247	start = CPU_ENTRY_AREA_BASE;
248	end = start + CPU_ENTRY_AREA_MAP_SIZE;
249
250	/* Careful here: start + PMD_SIZE might wrap around */
251	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
252		populate_extra_pte(start);
253#endif
254}
255
256void __init setup_cpu_entry_areas(void)
257{
258	unsigned int cpu;
259
260	init_cea_offsets();
261
262	setup_cpu_entry_area_ptes();
263
264	for_each_possible_cpu(cpu)
265		setup_cpu_entry_area(cpu);
266
267	/*
268	 * This is the last essential update to swapper_pgdir which needs
269	 * to be synchronized to initial_page_table on 32bit.
270	 */
271	sync_initial_page_table();
272}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/spinlock.h>
  4#include <linux/percpu.h>
  5#include <linux/kallsyms.h>
  6#include <linux/kcore.h>
 
  7
  8#include <asm/cpu_entry_area.h>
  9#include <asm/pgtable.h>
 10#include <asm/fixmap.h>
 11#include <asm/desc.h>
 
 12
 13static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
 14
 15#ifdef CONFIG_X86_64
 16static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
 17DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18#endif
 19
 20struct cpu_entry_area *get_cpu_entry_area(int cpu)
 
 21{
 22	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
 23	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
 24
 25	return (struct cpu_entry_area *) va;
 26}
 27EXPORT_SYMBOL(get_cpu_entry_area);
 28
 29void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
 30{
 31	unsigned long va = (unsigned long) cea_vaddr;
 32	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
 33
 34	/*
 35	 * The cpu_entry_area is shared between the user and kernel
 36	 * page tables.  All of its ptes can safely be global.
 37	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
 38	 * non-present PTEs, so be careful not to set it in that
 39	 * case to avoid confusion.
 40	 */
 41	if (boot_cpu_has(X86_FEATURE_PGE) &&
 42	    (pgprot_val(flags) & _PAGE_PRESENT))
 43		pte = pte_set_flags(pte, _PAGE_GLOBAL);
 44
 45	set_pte_vaddr(va, pte);
 46}
 47
 48static void __init
 49cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
 50{
 51	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
 52		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
 53}
 54
 55static void __init percpu_setup_debug_store(unsigned int cpu)
 56{
 57#ifdef CONFIG_CPU_SUP_INTEL
 58	unsigned int npages;
 59	void *cea;
 60
 61	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 62		return;
 63
 64	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
 65	npages = sizeof(struct debug_store) / PAGE_SIZE;
 66	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
 67	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
 68			     PAGE_KERNEL);
 69
 70	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
 71	/*
 72	 * Force the population of PMDs for not yet allocated per cpu
 73	 * memory like debug store buffers.
 74	 */
 75	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
 76	for (; npages; npages--, cea += PAGE_SIZE)
 77		cea_set_pte(cea, 0, PAGE_NONE);
 78#endif
 79}
 80
 81#ifdef CONFIG_X86_64
 82
 83#define cea_map_stack(name) do {					\
 84	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
 85	cea_map_percpu_pages(cea->estacks.name## _stack,		\
 86			estacks->name## _stack, npages, PAGE_KERNEL);	\
 87	} while (0)
 88
 89static void __init percpu_setup_exception_stacks(unsigned int cpu)
 90{
 91	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
 92	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
 93	unsigned int npages;
 94
 95	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
 96
 97	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
 98
 99	/*
100	 * The exceptions stack mappings in the per cpu area are protected
101	 * by guard pages so each stack must be mapped separately. DB2 is
102	 * not mapped; it just exists to catch triple nesting of #DB.
103	 */
104	cea_map_stack(DF);
105	cea_map_stack(NMI);
106	cea_map_stack(DB1);
107	cea_map_stack(DB);
108	cea_map_stack(MCE);
 
 
 
 
 
 
 
109}
110#else
111static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
 
 
 
 
 
 
112#endif
113
114/* Setup the fixmap mappings only once per-processor */
115static void __init setup_cpu_entry_area(unsigned int cpu)
116{
117	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
118#ifdef CONFIG_X86_64
119	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
120	pgprot_t gdt_prot = PAGE_KERNEL_RO;
121	pgprot_t tss_prot = PAGE_KERNEL_RO;
122#else
123	/*
124	 * On native 32-bit systems, the GDT cannot be read-only because
125	 * our double fault handler uses a task gate, and entering through
126	 * a task gate needs to change an available TSS to busy.  If the
127	 * GDT is read-only, that will triple fault.  The TSS cannot be
128	 * read-only because the CPU writes to it on task switches.
129	 *
130	 * On Xen PV, the GDT must be read-only because the hypervisor
131	 * requires it.
132	 */
133	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
134		PAGE_KERNEL_RO : PAGE_KERNEL;
135	pgprot_t tss_prot = PAGE_KERNEL;
136#endif
137
 
 
 
138	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
139
140	cea_map_percpu_pages(&cea->entry_stack_page,
141			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
142			     PAGE_KERNEL);
143
144	/*
145	 * The Intel SDM says (Volume 3, 7.2.1):
146	 *
147	 *  Avoid placing a page boundary in the part of the TSS that the
148	 *  processor reads during a task switch (the first 104 bytes). The
149	 *  processor may not correctly perform address translations if a
150	 *  boundary occurs in this area. During a task switch, the processor
151	 *  reads and writes into the first 104 bytes of each TSS (using
152	 *  contiguous physical addresses beginning with the physical address
153	 *  of the first byte of the TSS). So, after TSS access begins, if
154	 *  part of the 104 bytes is not physically contiguous, the processor
155	 *  will access incorrect information without generating a page-fault
156	 *  exception.
157	 *
158	 * There are also a lot of errata involving the TSS spanning a page
159	 * boundary.  Assert that we're not doing that.
160	 */
161	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
162		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
163	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
 
 
 
 
 
 
 
 
164	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
165			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
166
167#ifdef CONFIG_X86_32
168	per_cpu(cpu_entry_area, cpu) = cea;
169#endif
170
171	percpu_setup_exception_stacks(cpu);
172
173	percpu_setup_debug_store(cpu);
174}
175
176static __init void setup_cpu_entry_area_ptes(void)
177{
178#ifdef CONFIG_X86_32
179	unsigned long start, end;
180
181	BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
 
182	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
183
184	start = CPU_ENTRY_AREA_BASE;
185	end = start + CPU_ENTRY_AREA_MAP_SIZE;
186
187	/* Careful here: start + PMD_SIZE might wrap around */
188	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
189		populate_extra_pte(start);
190#endif
191}
192
193void __init setup_cpu_entry_areas(void)
194{
195	unsigned int cpu;
 
 
196
197	setup_cpu_entry_area_ptes();
198
199	for_each_possible_cpu(cpu)
200		setup_cpu_entry_area(cpu);
201
202	/*
203	 * This is the last essential update to swapper_pgdir which needs
204	 * to be synchronized to initial_page_table on 32bit.
205	 */
206	sync_initial_page_table();
207}