Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4 *
  5 * This code is based in part on work published here:
  6 *
  7 *	https://github.com/IAIK/KAISER
  8 *
  9 * The original work was written by and and signed off by for the Linux
 10 * kernel by:
 11 *
 12 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 13 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 14 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 15 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 16 *
 17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 19 *		       Andy Lutomirsky <luto@amacapital.net>
 20 */
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/string.h>
 24#include <linux/types.h>
 25#include <linux/bug.h>
 26#include <linux/init.h>
 27#include <linux/spinlock.h>
 28#include <linux/mm.h>
 29#include <linux/uaccess.h>
 30#include <linux/cpu.h>
 31
 32#include <asm/cpufeature.h>
 33#include <asm/hypervisor.h>
 34#include <asm/vsyscall.h>
 35#include <asm/cmdline.h>
 36#include <asm/pti.h>
 37#include <asm/tlbflush.h>
 38#include <asm/desc.h>
 39#include <asm/sections.h>
 40#include <asm/set_memory.h>
 41
 42#undef pr_fmt
 43#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
 44
 45/* Backporting helper */
 46#ifndef __GFP_NOTRACK
 47#define __GFP_NOTRACK	0
 48#endif
 49
 50/*
 51 * Define the page-table levels we clone for user-space on 32
 52 * and 64 bit.
 53 */
 54#ifdef CONFIG_X86_64
 55#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
 56#else
 57#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
 58#endif
 59
 60static void __init pti_print_if_insecure(const char *reason)
 61{
 62	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 63		pr_info("%s\n", reason);
 64}
 65
 66static void __init pti_print_if_secure(const char *reason)
 67{
 68	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 69		pr_info("%s\n", reason);
 70}
 71
 
 72static enum pti_mode {
 73	PTI_AUTO = 0,
 74	PTI_FORCE_OFF,
 75	PTI_FORCE_ON
 76} pti_mode;
 77
 78void __init pti_check_boottime_disable(void)
 79{
 80	char arg[5];
 81	int ret;
 82
 83	/* Assume mode is auto unless overridden. */
 84	pti_mode = PTI_AUTO;
 85
 86	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
 87		pti_mode = PTI_FORCE_OFF;
 88		pti_print_if_insecure("disabled on XEN PV.");
 89		return;
 90	}
 91
 92	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
 93	if (ret > 0)  {
 94		if (ret == 3 && !strncmp(arg, "off", 3)) {
 95			pti_mode = PTI_FORCE_OFF;
 96			pti_print_if_insecure("disabled on command line.");
 97			return;
 98		}
 99		if (ret == 2 && !strncmp(arg, "on", 2)) {
100			pti_mode = PTI_FORCE_ON;
101			pti_print_if_secure("force enabled on command line.");
102			goto enable;
103		}
104		if (ret == 4 && !strncmp(arg, "auto", 4)) {
105			pti_mode = PTI_AUTO;
106			goto autosel;
107		}
108	}
109
110	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
111	    cpu_mitigations_off()) {
112		pti_mode = PTI_FORCE_OFF;
 
113		pti_print_if_insecure("disabled on command line.");
114		return;
115	}
116
117autosel:
118	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 
 
119		return;
120enable:
121	setup_force_cpu_cap(X86_FEATURE_PTI);
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
125{
126	/*
127	 * Changes to the high (kernel) portion of the kernelmode page
128	 * tables are not automatically propagated to the usermode tables.
129	 *
130	 * Users should keep in mind that, unlike the kernelmode tables,
131	 * there is no vmalloc_fault equivalent for the usermode tables.
132	 * Top-level entries added to init_mm's usermode pgd after boot
133	 * will not be automatically propagated to other mms.
134	 */
135	if (!pgdp_maps_userspace(pgdp))
136		return pgd;
137
138	/*
139	 * The user page tables get the full PGD, accessible from
140	 * userspace:
141	 */
142	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143
144	/*
145	 * If this is normal user memory, make it NX in the kernel
146	 * pagetables so that, if we somehow screw up and return to
147	 * usermode with the kernel CR3 loaded, we'll get a page fault
148	 * instead of allowing user code to execute with the wrong CR3.
149	 *
150	 * As exceptions, we don't set NX if:
151	 *  - _PAGE_USER is not set.  This could be an executable
152	 *     EFI runtime mapping or something similar, and the kernel
153	 *     may execute from it
154	 *  - we don't have NX support
155	 *  - we're clearing the PGD (i.e. the new pgd is not present).
156	 */
157	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158	    (__supported_pte_mask & _PAGE_NX))
159		pgd.pgd |= _PAGE_NX;
160
161	/* return the copy of the PGD we want the kernel to use: */
162	return pgd;
163}
164
165/*
166 * Walk the user copy of the page tables (optionally) trying to allocate
167 * page table pages on the way down.
168 *
169 * Returns a pointer to a P4D on success, or NULL on failure.
170 */
171static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
172{
173	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175
176	if (address < PAGE_OFFSET) {
177		WARN_ONCE(1, "attempt to walk user address\n");
178		return NULL;
179	}
180
181	if (pgd_none(*pgd)) {
182		unsigned long new_p4d_page = __get_free_page(gfp);
183		if (WARN_ON_ONCE(!new_p4d_page))
184			return NULL;
185
186		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
187	}
188	BUILD_BUG_ON(pgd_large(*pgd) != 0);
189
190	return p4d_offset(pgd, address);
191}
192
193/*
194 * Walk the user copy of the page tables (optionally) trying to allocate
195 * page table pages on the way down.
196 *
197 * Returns a pointer to a PMD on success, or NULL on failure.
198 */
199static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
200{
201	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
202	p4d_t *p4d;
203	pud_t *pud;
204
205	p4d = pti_user_pagetable_walk_p4d(address);
206	if (!p4d)
207		return NULL;
208
209	BUILD_BUG_ON(p4d_large(*p4d) != 0);
210	if (p4d_none(*p4d)) {
211		unsigned long new_pud_page = __get_free_page(gfp);
212		if (WARN_ON_ONCE(!new_pud_page))
213			return NULL;
214
215		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
216	}
217
218	pud = pud_offset(p4d, address);
219	/* The user page tables do not use large mappings: */
220	if (pud_large(*pud)) {
221		WARN_ON(1);
222		return NULL;
223	}
224	if (pud_none(*pud)) {
225		unsigned long new_pmd_page = __get_free_page(gfp);
226		if (WARN_ON_ONCE(!new_pmd_page))
227			return NULL;
228
229		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
230	}
231
232	return pmd_offset(pud, address);
233}
234
235/*
236 * Walk the shadow copy of the page tables (optionally) trying to allocate
237 * page table pages on the way down.  Does not support large pages.
238 *
239 * Note: this is only used when mapping *new* kernel data into the
240 * user/shadow page tables.  It is never used for userspace data.
241 *
242 * Returns a pointer to a PTE on success, or NULL on failure.
243 */
244static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
245{
246	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247	pmd_t *pmd;
248	pte_t *pte;
249
250	pmd = pti_user_pagetable_walk_pmd(address);
251	if (!pmd)
252		return NULL;
253
254	/* We can't do anything sensible if we hit a large mapping. */
255	if (pmd_large(*pmd)) {
256		WARN_ON(1);
257		return NULL;
 
 
 
 
 
258	}
259
260	if (pmd_none(*pmd)) {
261		unsigned long new_pte_page = __get_free_page(gfp);
262		if (!new_pte_page)
263			return NULL;
264
265		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
266	}
267
268	pte = pte_offset_kernel(pmd, address);
269	if (pte_flags(*pte) & _PAGE_USER) {
270		WARN_ONCE(1, "attempt to walk to user pte\n");
271		return NULL;
272	}
273	return pte;
274}
275
276#ifdef CONFIG_X86_VSYSCALL_EMULATION
277static void __init pti_setup_vsyscall(void)
278{
279	pte_t *pte, *target_pte;
280	unsigned int level;
281
282	pte = lookup_address(VSYSCALL_ADDR, &level);
283	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
284		return;
285
286	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
287	if (WARN_ON(!target_pte))
288		return;
289
290	*target_pte = *pte;
291	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
292}
293#else
294static void __init pti_setup_vsyscall(void) { }
295#endif
296
297enum pti_clone_level {
298	PTI_CLONE_PMD,
299	PTI_CLONE_PTE,
300};
301
302static void
303pti_clone_pgtable(unsigned long start, unsigned long end,
304		  enum pti_clone_level level)
305{
306	unsigned long addr;
307
308	/*
309	 * Clone the populated PMDs which cover start to end. These PMD areas
310	 * can have holes.
311	 */
312	for (addr = start; addr < end;) {
313		pte_t *pte, *target_pte;
314		pmd_t *pmd, *target_pmd;
315		pgd_t *pgd;
316		p4d_t *p4d;
317		pud_t *pud;
318
319		/* Overflow check */
320		if (addr < start)
321			break;
322
323		pgd = pgd_offset_k(addr);
324		if (WARN_ON(pgd_none(*pgd)))
325			return;
326		p4d = p4d_offset(pgd, addr);
327		if (WARN_ON(p4d_none(*p4d)))
328			return;
329
330		pud = pud_offset(p4d, addr);
331		if (pud_none(*pud)) {
332			WARN_ON_ONCE(addr & ~PUD_MASK);
333			addr = round_up(addr + 1, PUD_SIZE);
334			continue;
335		}
336
337		pmd = pmd_offset(pud, addr);
338		if (pmd_none(*pmd)) {
339			WARN_ON_ONCE(addr & ~PMD_MASK);
340			addr = round_up(addr + 1, PMD_SIZE);
341			continue;
342		}
343
344		if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
345			target_pmd = pti_user_pagetable_walk_pmd(addr);
346			if (WARN_ON(!target_pmd))
347				return;
348
349			/*
350			 * Only clone present PMDs.  This ensures only setting
351			 * _PAGE_GLOBAL on present PMDs.  This should only be
352			 * called on well-known addresses anyway, so a non-
353			 * present PMD would be a surprise.
354			 */
355			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
356				return;
357
358			/*
359			 * Setting 'target_pmd' below creates a mapping in both
360			 * the user and kernel page tables.  It is effectively
361			 * global, so set it as global in both copies.  Note:
362			 * the X86_FEATURE_PGE check is not _required_ because
363			 * the CPU ignores _PAGE_GLOBAL when PGE is not
364			 * supported.  The check keeps consistency with
365			 * code that only set this bit when supported.
366			 */
367			if (boot_cpu_has(X86_FEATURE_PGE))
368				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
369
370			/*
371			 * Copy the PMD.  That is, the kernelmode and usermode
372			 * tables will share the last-level page tables of this
373			 * address range
374			 */
375			*target_pmd = *pmd;
376
377			addr += PMD_SIZE;
378
379		} else if (level == PTI_CLONE_PTE) {
380
381			/* Walk the page-table down to the pte level */
382			pte = pte_offset_kernel(pmd, addr);
383			if (pte_none(*pte)) {
384				addr += PAGE_SIZE;
385				continue;
386			}
387
388			/* Only clone present PTEs */
389			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
390				return;
391
392			/* Allocate PTE in the user page-table */
393			target_pte = pti_user_pagetable_walk_pte(addr);
394			if (WARN_ON(!target_pte))
395				return;
396
397			/* Set GLOBAL bit in both PTEs */
398			if (boot_cpu_has(X86_FEATURE_PGE))
399				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
400
401			/* Clone the PTE */
402			*target_pte = *pte;
403
404			addr += PAGE_SIZE;
405
406		} else {
407			BUG();
408		}
409	}
410}
411
412#ifdef CONFIG_X86_64
413/*
414 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
415 * next-level entry on 5-level systems.
416 */
417static void __init pti_clone_p4d(unsigned long addr)
418{
419	p4d_t *kernel_p4d, *user_p4d;
420	pgd_t *kernel_pgd;
421
422	user_p4d = pti_user_pagetable_walk_p4d(addr);
423	if (!user_p4d)
424		return;
425
426	kernel_pgd = pgd_offset_k(addr);
427	kernel_p4d = p4d_offset(kernel_pgd, addr);
428	*user_p4d = *kernel_p4d;
429}
430
431/*
432 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
433 * page table.
434 */
435static void __init pti_clone_user_shared(void)
436{
437	unsigned int cpu;
438
439	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
440
441	for_each_possible_cpu(cpu) {
442		/*
443		 * The SYSCALL64 entry code needs one word of scratch space
444		 * in which to spill a register.  It lives in the sp2 slot
445		 * of the CPU's TSS.
446		 *
447		 * This is done for all possible CPUs during boot to ensure
448		 * that it's propagated to all mms.
449		 */
450
451		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
452		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
453		pte_t *target_pte;
454
455		target_pte = pti_user_pagetable_walk_pte(va);
456		if (WARN_ON(!target_pte))
457			return;
458
459		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
460	}
461}
462
463#else /* CONFIG_X86_64 */
464
465/*
466 * On 32 bit PAE systems with 1GB of Kernel address space there is only
467 * one pgd/p4d for the whole kernel. Cloning that would map the whole
468 * address space into the user page-tables, making PTI useless. So clone
469 * the page-table on the PMD level to prevent that.
470 */
471static void __init pti_clone_user_shared(void)
472{
473	unsigned long start, end;
474
475	start = CPU_ENTRY_AREA_BASE;
476	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
477
478	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
479}
480#endif /* CONFIG_X86_64 */
481
482/*
483 * Clone the ESPFIX P4D into the user space visible page table
484 */
485static void __init pti_setup_espfix64(void)
486{
487#ifdef CONFIG_X86_ESPFIX64
488	pti_clone_p4d(ESPFIX_BASE_ADDR);
489#endif
490}
491
492/*
493 * Clone the populated PMDs of the entry text and force it RO.
494 */
495static void pti_clone_entry_text(void)
496{
497	pti_clone_pgtable((unsigned long) __entry_text_start,
498			  (unsigned long) __entry_text_end,
499			  PTI_CLONE_PMD);
500}
501
502/*
503 * Global pages and PCIDs are both ways to make kernel TLB entries
504 * live longer, reduce TLB misses and improve kernel performance.
505 * But, leaving all kernel text Global makes it potentially accessible
506 * to Meltdown-style attacks which make it trivial to find gadgets or
507 * defeat KASLR.
508 *
509 * Only use global pages when it is really worth it.
510 */
511static inline bool pti_kernel_image_global_ok(void)
512{
513	/*
514	 * Systems with PCIDs get little benefit from global
515	 * kernel text and are not worth the downsides.
516	 */
517	if (cpu_feature_enabled(X86_FEATURE_PCID))
518		return false;
519
520	/*
521	 * Only do global kernel image for pti=auto.  Do the most
522	 * secure thing (not global) if pti=on specified.
523	 */
524	if (pti_mode != PTI_AUTO)
525		return false;
526
527	/*
528	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
529	 * global kernel image pages.  Do the safe thing (disable
530	 * global kernel image).  This is unlikely to ever be
531	 * noticed because PTI is disabled by default on AMD CPUs.
532	 */
533	if (boot_cpu_has(X86_FEATURE_K8))
534		return false;
535
536	/*
537	 * RANDSTRUCT derives its hardening benefits from the
538	 * attacker's lack of knowledge about the layout of kernel
539	 * data structures.  Keep the kernel image non-global in
540	 * cases where RANDSTRUCT is in use to help keep the layout a
541	 * secret.
542	 */
543	if (IS_ENABLED(CONFIG_RANDSTRUCT))
544		return false;
545
546	return true;
547}
548
549/*
550 * For some configurations, map all of kernel text into the user page
551 * tables.  This reduces TLB misses, especially on non-PCID systems.
552 */
553static void pti_clone_kernel_text(void)
554{
555	/*
556	 * rodata is part of the kernel image and is normally
557	 * readable on the filesystem or on the web.  But, do not
558	 * clone the areas past rodata, they might contain secrets.
559	 */
560	unsigned long start = PFN_ALIGN(_text);
561	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
562	unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
563
564	if (!pti_kernel_image_global_ok())
565		return;
566
567	pr_debug("mapping partial kernel image into user address space\n");
568
569	/*
570	 * Note that this will undo _some_ of the work that
571	 * pti_set_kernel_image_nonglobal() did to clear the
572	 * global bit.
573	 */
574	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
575
576	/*
577	 * pti_clone_pgtable() will set the global bit in any PMDs
578	 * that it clones, but we also need to get any PTEs in
579	 * the last level for areas that are not huge-page-aligned.
580	 */
581
582	/* Set the global bit for normal non-__init kernel text: */
583	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
584}
585
586static void pti_set_kernel_image_nonglobal(void)
587{
588	/*
589	 * The identity map is created with PMDs, regardless of the
590	 * actual length of the kernel.  We need to clear
591	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
592	 * of the image.
593	 */
594	unsigned long start = PFN_ALIGN(_text);
595	unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
596
597	/*
598	 * This clears _PAGE_GLOBAL from the entire kernel image.
599	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
600	 * areas that are mapped to userspace.
601	 */
602	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
603}
604
605/*
606 * Initialize kernel page table isolation
607 */
608void __init pti_init(void)
609{
610	if (!boot_cpu_has(X86_FEATURE_PTI))
611		return;
612
613	pr_info("enabled\n");
614
615#ifdef CONFIG_X86_32
616	/*
617	 * We check for X86_FEATURE_PCID here. But the init-code will
618	 * clear the feature flag on 32 bit because the feature is not
619	 * supported on 32 bit anyway. To print the warning we need to
620	 * check with cpuid directly again.
621	 */
622	if (cpuid_ecx(0x1) & BIT(17)) {
623		/* Use printk to work around pr_fmt() */
624		printk(KERN_WARNING "\n");
625		printk(KERN_WARNING "************************************************************\n");
626		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
627		printk(KERN_WARNING "**                                                        **\n");
628		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
629		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
630		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
631		printk(KERN_WARNING "**                                                        **\n");
632		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
633		printk(KERN_WARNING "************************************************************\n");
634	}
635#endif
636
637	pti_clone_user_shared();
638
639	/* Undo all global bits from the init pagetables in head_64.S: */
640	pti_set_kernel_image_nonglobal();
 
641	/* Replace some of the global bits just for shared entry text: */
642	pti_clone_entry_text();
 
 
 
 
 
 
643	pti_setup_espfix64();
644	pti_setup_vsyscall();
645}
646
647/*
648 * Finalize the kernel mappings in the userspace page-table. Some of the
649 * mappings for the kernel image might have changed since pti_init()
650 * cloned them. This is because parts of the kernel image have been
651 * mapped RO and/or NX.  These changes need to be cloned again to the
652 * userspace page-table.
653 */
654void pti_finalize(void)
655{
656	if (!boot_cpu_has(X86_FEATURE_PTI))
657		return;
658	/*
659	 * We need to clone everything (again) that maps parts of the
660	 * kernel image.
 
661	 */
662	pti_clone_entry_text();
663	pti_clone_kernel_text();
664
665	debug_checkwx_user();
666}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4 *
  5 * This code is based in part on work published here:
  6 *
  7 *	https://github.com/IAIK/KAISER
  8 *
  9 * The original work was written by and signed off by for the Linux
 10 * kernel by:
 11 *
 12 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 13 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 14 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 15 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 16 *
 17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 19 *		       Andy Lutomirsky <luto@amacapital.net>
 20 */
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/string.h>
 24#include <linux/types.h>
 25#include <linux/bug.h>
 26#include <linux/init.h>
 27#include <linux/spinlock.h>
 28#include <linux/mm.h>
 29#include <linux/uaccess.h>
 30#include <linux/cpu.h>
 31
 32#include <asm/cpufeature.h>
 33#include <asm/hypervisor.h>
 34#include <asm/vsyscall.h>
 35#include <asm/cmdline.h>
 36#include <asm/pti.h>
 37#include <asm/tlbflush.h>
 38#include <asm/desc.h>
 39#include <asm/sections.h>
 40#include <asm/set_memory.h>
 41
 42#undef pr_fmt
 43#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
 44
 45/* Backporting helper */
 46#ifndef __GFP_NOTRACK
 47#define __GFP_NOTRACK	0
 48#endif
 49
 50/*
 51 * Define the page-table levels we clone for user-space on 32
 52 * and 64 bit.
 53 */
 54#ifdef CONFIG_X86_64
 55#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
 56#else
 57#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
 58#endif
 59
 60static void __init pti_print_if_insecure(const char *reason)
 61{
 62	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 63		pr_info("%s\n", reason);
 64}
 65
 66static void __init pti_print_if_secure(const char *reason)
 67{
 68	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 69		pr_info("%s\n", reason);
 70}
 71
 72/* Assume mode is auto unless overridden via cmdline below. */
 73static enum pti_mode {
 74	PTI_AUTO = 0,
 75	PTI_FORCE_OFF,
 76	PTI_FORCE_ON
 77} pti_mode;
 78
 79void __init pti_check_boottime_disable(void)
 80{
 
 
 
 
 
 
 81	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
 82		pti_mode = PTI_FORCE_OFF;
 83		pti_print_if_insecure("disabled on XEN PV.");
 84		return;
 85	}
 86
 87	if (cpu_mitigations_off())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 88		pti_mode = PTI_FORCE_OFF;
 89	if (pti_mode == PTI_FORCE_OFF) {
 90		pti_print_if_insecure("disabled on command line.");
 91		return;
 92	}
 93
 94	if (pti_mode == PTI_FORCE_ON)
 95		pti_print_if_secure("force enabled on command line.");
 96
 97	if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 98		return;
 99
100	setup_force_cpu_cap(X86_FEATURE_PTI);
101}
102
103static int __init pti_parse_cmdline(char *arg)
104{
105	if (!strcmp(arg, "off"))
106		pti_mode = PTI_FORCE_OFF;
107	else if (!strcmp(arg, "on"))
108		pti_mode = PTI_FORCE_ON;
109	else if (!strcmp(arg, "auto"))
110		pti_mode = PTI_AUTO;
111	else
112		return -EINVAL;
113	return 0;
114}
115early_param("pti", pti_parse_cmdline);
116
117static int __init pti_parse_cmdline_nopti(char *arg)
118{
119	pti_mode = PTI_FORCE_OFF;
120	return 0;
121}
122early_param("nopti", pti_parse_cmdline_nopti);
123
124pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
125{
126	/*
127	 * Changes to the high (kernel) portion of the kernelmode page
128	 * tables are not automatically propagated to the usermode tables.
129	 *
130	 * Users should keep in mind that, unlike the kernelmode tables,
131	 * there is no vmalloc_fault equivalent for the usermode tables.
132	 * Top-level entries added to init_mm's usermode pgd after boot
133	 * will not be automatically propagated to other mms.
134	 */
135	if (!pgdp_maps_userspace(pgdp) || (pgd.pgd & _PAGE_NOPTISHADOW))
136		return pgd;
137
138	/*
139	 * The user page tables get the full PGD, accessible from
140	 * userspace:
141	 */
142	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143
144	/*
145	 * If this is normal user memory, make it NX in the kernel
146	 * pagetables so that, if we somehow screw up and return to
147	 * usermode with the kernel CR3 loaded, we'll get a page fault
148	 * instead of allowing user code to execute with the wrong CR3.
149	 *
150	 * As exceptions, we don't set NX if:
151	 *  - _PAGE_USER is not set.  This could be an executable
152	 *     EFI runtime mapping or something similar, and the kernel
153	 *     may execute from it
154	 *  - we don't have NX support
155	 *  - we're clearing the PGD (i.e. the new pgd is not present).
156	 */
157	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158	    (__supported_pte_mask & _PAGE_NX))
159		pgd.pgd |= _PAGE_NX;
160
161	/* return the copy of the PGD we want the kernel to use: */
162	return pgd;
163}
164
165/*
166 * Walk the user copy of the page tables (optionally) trying to allocate
167 * page table pages on the way down.
168 *
169 * Returns a pointer to a P4D on success, or NULL on failure.
170 */
171static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
172{
173	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175
176	if (address < PAGE_OFFSET) {
177		WARN_ONCE(1, "attempt to walk user address\n");
178		return NULL;
179	}
180
181	if (pgd_none(*pgd)) {
182		unsigned long new_p4d_page = __get_free_page(gfp);
183		if (WARN_ON_ONCE(!new_p4d_page))
184			return NULL;
185
186		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
187	}
188	BUILD_BUG_ON(pgd_leaf(*pgd) != 0);
189
190	return p4d_offset(pgd, address);
191}
192
193/*
194 * Walk the user copy of the page tables (optionally) trying to allocate
195 * page table pages on the way down.
196 *
197 * Returns a pointer to a PMD on success, or NULL on failure.
198 */
199static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
200{
201	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
202	p4d_t *p4d;
203	pud_t *pud;
204
205	p4d = pti_user_pagetable_walk_p4d(address);
206	if (!p4d)
207		return NULL;
208
209	BUILD_BUG_ON(p4d_leaf(*p4d) != 0);
210	if (p4d_none(*p4d)) {
211		unsigned long new_pud_page = __get_free_page(gfp);
212		if (WARN_ON_ONCE(!new_pud_page))
213			return NULL;
214
215		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
216	}
217
218	pud = pud_offset(p4d, address);
219	/* The user page tables do not use large mappings: */
220	if (pud_leaf(*pud)) {
221		WARN_ON(1);
222		return NULL;
223	}
224	if (pud_none(*pud)) {
225		unsigned long new_pmd_page = __get_free_page(gfp);
226		if (WARN_ON_ONCE(!new_pmd_page))
227			return NULL;
228
229		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
230	}
231
232	return pmd_offset(pud, address);
233}
234
235/*
236 * Walk the shadow copy of the page tables (optionally) trying to allocate
237 * page table pages on the way down.  Does not support large pages.
238 *
239 * Note: this is only used when mapping *new* kernel data into the
240 * user/shadow page tables.  It is never used for userspace data.
241 *
242 * Returns a pointer to a PTE on success, or NULL on failure.
243 */
244static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
245{
246	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247	pmd_t *pmd;
248	pte_t *pte;
249
250	pmd = pti_user_pagetable_walk_pmd(address);
251	if (!pmd)
252		return NULL;
253
254	/* Large PMD mapping found */
255	if (pmd_leaf(*pmd)) {
256		/* Clear the PMD if we hit a large mapping from the first round */
257		if (late_text) {
258			set_pmd(pmd, __pmd(0));
259		} else {
260			WARN_ON_ONCE(1);
261			return NULL;
262		}
263	}
264
265	if (pmd_none(*pmd)) {
266		unsigned long new_pte_page = __get_free_page(gfp);
267		if (!new_pte_page)
268			return NULL;
269
270		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
271	}
272
273	pte = pte_offset_kernel(pmd, address);
274	if (pte_flags(*pte) & _PAGE_USER) {
275		WARN_ONCE(1, "attempt to walk to user pte\n");
276		return NULL;
277	}
278	return pte;
279}
280
281#ifdef CONFIG_X86_VSYSCALL_EMULATION
282static void __init pti_setup_vsyscall(void)
283{
284	pte_t *pte, *target_pte;
285	unsigned int level;
286
287	pte = lookup_address(VSYSCALL_ADDR, &level);
288	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
289		return;
290
291	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
292	if (WARN_ON(!target_pte))
293		return;
294
295	*target_pte = *pte;
296	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
297}
298#else
299static void __init pti_setup_vsyscall(void) { }
300#endif
301
302enum pti_clone_level {
303	PTI_CLONE_PMD,
304	PTI_CLONE_PTE,
305};
306
307static void
308pti_clone_pgtable(unsigned long start, unsigned long end,
309		  enum pti_clone_level level, bool late_text)
310{
311	unsigned long addr;
312
313	/*
314	 * Clone the populated PMDs which cover start to end. These PMD areas
315	 * can have holes.
316	 */
317	for (addr = start; addr < end;) {
318		pte_t *pte, *target_pte;
319		pmd_t *pmd, *target_pmd;
320		pgd_t *pgd;
321		p4d_t *p4d;
322		pud_t *pud;
323
324		/* Overflow check */
325		if (addr < start)
326			break;
327
328		pgd = pgd_offset_k(addr);
329		if (WARN_ON(pgd_none(*pgd)))
330			return;
331		p4d = p4d_offset(pgd, addr);
332		if (WARN_ON(p4d_none(*p4d)))
333			return;
334
335		pud = pud_offset(p4d, addr);
336		if (pud_none(*pud)) {
337			WARN_ON_ONCE(addr & ~PUD_MASK);
338			addr = round_up(addr + 1, PUD_SIZE);
339			continue;
340		}
341
342		pmd = pmd_offset(pud, addr);
343		if (pmd_none(*pmd)) {
344			WARN_ON_ONCE(addr & ~PMD_MASK);
345			addr = round_up(addr + 1, PMD_SIZE);
346			continue;
347		}
348
349		if (pmd_leaf(*pmd) || level == PTI_CLONE_PMD) {
350			target_pmd = pti_user_pagetable_walk_pmd(addr);
351			if (WARN_ON(!target_pmd))
352				return;
353
354			/*
355			 * Only clone present PMDs.  This ensures only setting
356			 * _PAGE_GLOBAL on present PMDs.  This should only be
357			 * called on well-known addresses anyway, so a non-
358			 * present PMD would be a surprise.
359			 */
360			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
361				return;
362
363			/*
364			 * Setting 'target_pmd' below creates a mapping in both
365			 * the user and kernel page tables.  It is effectively
366			 * global, so set it as global in both copies.  Note:
367			 * the X86_FEATURE_PGE check is not _required_ because
368			 * the CPU ignores _PAGE_GLOBAL when PGE is not
369			 * supported.  The check keeps consistency with
370			 * code that only set this bit when supported.
371			 */
372			if (boot_cpu_has(X86_FEATURE_PGE))
373				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
374
375			/*
376			 * Copy the PMD.  That is, the kernelmode and usermode
377			 * tables will share the last-level page tables of this
378			 * address range
379			 */
380			*target_pmd = *pmd;
381
382			addr = round_up(addr + 1, PMD_SIZE);
383
384		} else if (level == PTI_CLONE_PTE) {
385
386			/* Walk the page-table down to the pte level */
387			pte = pte_offset_kernel(pmd, addr);
388			if (pte_none(*pte)) {
389				addr = round_up(addr + 1, PAGE_SIZE);
390				continue;
391			}
392
393			/* Only clone present PTEs */
394			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
395				return;
396
397			/* Allocate PTE in the user page-table */
398			target_pte = pti_user_pagetable_walk_pte(addr, late_text);
399			if (WARN_ON(!target_pte))
400				return;
401
402			/* Set GLOBAL bit in both PTEs */
403			if (boot_cpu_has(X86_FEATURE_PGE))
404				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
405
406			/* Clone the PTE */
407			*target_pte = *pte;
408
409			addr = round_up(addr + 1, PAGE_SIZE);
410
411		} else {
412			BUG();
413		}
414	}
415}
416
417#ifdef CONFIG_X86_64
418/*
419 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
420 * next-level entry on 5-level systems.
421 */
422static void __init pti_clone_p4d(unsigned long addr)
423{
424	p4d_t *kernel_p4d, *user_p4d;
425	pgd_t *kernel_pgd;
426
427	user_p4d = pti_user_pagetable_walk_p4d(addr);
428	if (!user_p4d)
429		return;
430
431	kernel_pgd = pgd_offset_k(addr);
432	kernel_p4d = p4d_offset(kernel_pgd, addr);
433	*user_p4d = *kernel_p4d;
434}
435
436/*
437 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
438 * page table.
439 */
440static void __init pti_clone_user_shared(void)
441{
442	unsigned int cpu;
443
444	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
445
446	for_each_possible_cpu(cpu) {
447		/*
448		 * The SYSCALL64 entry code needs one word of scratch space
449		 * in which to spill a register.  It lives in the sp2 slot
450		 * of the CPU's TSS.
451		 *
452		 * This is done for all possible CPUs during boot to ensure
453		 * that it's propagated to all mms.
454		 */
455
456		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
457		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
458		pte_t *target_pte;
459
460		target_pte = pti_user_pagetable_walk_pte(va, false);
461		if (WARN_ON(!target_pte))
462			return;
463
464		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
465	}
466}
467
468#else /* CONFIG_X86_64 */
469
470/*
471 * On 32 bit PAE systems with 1GB of Kernel address space there is only
472 * one pgd/p4d for the whole kernel. Cloning that would map the whole
473 * address space into the user page-tables, making PTI useless. So clone
474 * the page-table on the PMD level to prevent that.
475 */
476static void __init pti_clone_user_shared(void)
477{
478	unsigned long start, end;
479
480	start = CPU_ENTRY_AREA_BASE;
481	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
482
483	pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
484}
485#endif /* CONFIG_X86_64 */
486
487/*
488 * Clone the ESPFIX P4D into the user space visible page table
489 */
490static void __init pti_setup_espfix64(void)
491{
492#ifdef CONFIG_X86_ESPFIX64
493	pti_clone_p4d(ESPFIX_BASE_ADDR);
494#endif
495}
496
497/*
498 * Clone the populated PMDs of the entry text and force it RO.
499 */
500static void pti_clone_entry_text(bool late)
501{
502	pti_clone_pgtable((unsigned long) __entry_text_start,
503			  (unsigned long) __entry_text_end,
504			  PTI_LEVEL_KERNEL_IMAGE, late);
505}
506
507/*
508 * Global pages and PCIDs are both ways to make kernel TLB entries
509 * live longer, reduce TLB misses and improve kernel performance.
510 * But, leaving all kernel text Global makes it potentially accessible
511 * to Meltdown-style attacks which make it trivial to find gadgets or
512 * defeat KASLR.
513 *
514 * Only use global pages when it is really worth it.
515 */
516static inline bool pti_kernel_image_global_ok(void)
517{
518	/*
519	 * Systems with PCIDs get little benefit from global
520	 * kernel text and are not worth the downsides.
521	 */
522	if (cpu_feature_enabled(X86_FEATURE_PCID))
523		return false;
524
525	/*
526	 * Only do global kernel image for pti=auto.  Do the most
527	 * secure thing (not global) if pti=on specified.
528	 */
529	if (pti_mode != PTI_AUTO)
530		return false;
531
532	/*
533	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
534	 * global kernel image pages.  Do the safe thing (disable
535	 * global kernel image).  This is unlikely to ever be
536	 * noticed because PTI is disabled by default on AMD CPUs.
537	 */
538	if (boot_cpu_has(X86_FEATURE_K8))
539		return false;
540
541	/*
542	 * RANDSTRUCT derives its hardening benefits from the
543	 * attacker's lack of knowledge about the layout of kernel
544	 * data structures.  Keep the kernel image non-global in
545	 * cases where RANDSTRUCT is in use to help keep the layout a
546	 * secret.
547	 */
548	if (IS_ENABLED(CONFIG_RANDSTRUCT))
549		return false;
550
551	return true;
552}
553
554/*
555 * For some configurations, map all of kernel text into the user page
556 * tables.  This reduces TLB misses, especially on non-PCID systems.
557 */
558static void pti_clone_kernel_text(void)
559{
560	/*
561	 * rodata is part of the kernel image and is normally
562	 * readable on the filesystem or on the web.  But, do not
563	 * clone the areas past rodata, they might contain secrets.
564	 */
565	unsigned long start = PFN_ALIGN(_text);
566	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
567	unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
568
569	if (!pti_kernel_image_global_ok())
570		return;
571
572	pr_debug("mapping partial kernel image into user address space\n");
573
574	/*
575	 * Note that this will undo _some_ of the work that
576	 * pti_set_kernel_image_nonglobal() did to clear the
577	 * global bit.
578	 */
579	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
580
581	/*
582	 * pti_clone_pgtable() will set the global bit in any PMDs
583	 * that it clones, but we also need to get any PTEs in
584	 * the last level for areas that are not huge-page-aligned.
585	 */
586
587	/* Set the global bit for normal non-__init kernel text: */
588	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
589}
590
591static void pti_set_kernel_image_nonglobal(void)
592{
593	/*
594	 * The identity map is created with PMDs, regardless of the
595	 * actual length of the kernel.  We need to clear
596	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
597	 * of the image.
598	 */
599	unsigned long start = PFN_ALIGN(_text);
600	unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
601
602	/*
603	 * This clears _PAGE_GLOBAL from the entire kernel image.
604	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
605	 * areas that are mapped to userspace.
606	 */
607	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
608}
609
610/*
611 * Initialize kernel page table isolation
612 */
613void __init pti_init(void)
614{
615	if (!boot_cpu_has(X86_FEATURE_PTI))
616		return;
617
618	pr_info("enabled\n");
619
620#ifdef CONFIG_X86_32
621	/*
622	 * We check for X86_FEATURE_PCID here. But the init-code will
623	 * clear the feature flag on 32 bit because the feature is not
624	 * supported on 32 bit anyway. To print the warning we need to
625	 * check with cpuid directly again.
626	 */
627	if (cpuid_ecx(0x1) & BIT(17)) {
628		/* Use printk to work around pr_fmt() */
629		printk(KERN_WARNING "\n");
630		printk(KERN_WARNING "************************************************************\n");
631		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
632		printk(KERN_WARNING "**                                                        **\n");
633		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
634		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
635		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
636		printk(KERN_WARNING "**                                                        **\n");
637		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
638		printk(KERN_WARNING "************************************************************\n");
639	}
640#endif
641
642	pti_clone_user_shared();
643
644	/* Undo all global bits from the init pagetables in head_64.S: */
645	pti_set_kernel_image_nonglobal();
646
647	/* Replace some of the global bits just for shared entry text: */
648	/*
649	 * This is very early in boot. Device and Late initcalls can do
650	 * modprobe before free_initmem() and mark_readonly(). This
651	 * pti_clone_entry_text() allows those user-mode-helpers to function,
652	 * but notably the text is still RW.
653	 */
654	pti_clone_entry_text(false);
655	pti_setup_espfix64();
656	pti_setup_vsyscall();
657}
658
659/*
660 * Finalize the kernel mappings in the userspace page-table. Some of the
661 * mappings for the kernel image might have changed since pti_init()
662 * cloned them. This is because parts of the kernel image have been
663 * mapped RO and/or NX.  These changes need to be cloned again to the
664 * userspace page-table.
665 */
666void pti_finalize(void)
667{
668	if (!boot_cpu_has(X86_FEATURE_PTI))
669		return;
670	/*
671	 * This is after free_initmem() (all initcalls are done) and we've done
672	 * mark_readonly(). Text is now NX which might've split some PMDs
673	 * relative to the early clone.
674	 */
675	pti_clone_entry_text(true);
676	pti_clone_kernel_text();
677
678	debug_checkwx_user();
679}