Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  4 *
 
 
 
 
 
 
 
 
 
  5 * This code is based in part on work published here:
  6 *
  7 *	https://github.com/IAIK/KAISER
  8 *
  9 * The original work was written by and and signed off by for the Linux
 10 * kernel by:
 11 *
 12 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 13 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 14 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 15 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 16 *
 17 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 18 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 19 *		       Andy Lutomirsky <luto@amacapital.net>
 20 */
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/string.h>
 24#include <linux/types.h>
 25#include <linux/bug.h>
 26#include <linux/init.h>
 27#include <linux/spinlock.h>
 28#include <linux/mm.h>
 29#include <linux/uaccess.h>
 30#include <linux/cpu.h>
 31
 32#include <asm/cpufeature.h>
 33#include <asm/hypervisor.h>
 34#include <asm/vsyscall.h>
 35#include <asm/cmdline.h>
 36#include <asm/pti.h>
 
 
 37#include <asm/tlbflush.h>
 38#include <asm/desc.h>
 39#include <asm/sections.h>
 40#include <asm/set_memory.h>
 41
 42#undef pr_fmt
 43#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
 44
 45/* Backporting helper */
 46#ifndef __GFP_NOTRACK
 47#define __GFP_NOTRACK	0
 48#endif
 49
 50/*
 51 * Define the page-table levels we clone for user-space on 32
 52 * and 64 bit.
 53 */
 54#ifdef CONFIG_X86_64
 55#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PMD
 56#else
 57#define	PTI_LEVEL_KERNEL_IMAGE	PTI_CLONE_PTE
 58#endif
 59
 60static void __init pti_print_if_insecure(const char *reason)
 61{
 62	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 63		pr_info("%s\n", reason);
 64}
 65
 66static void __init pti_print_if_secure(const char *reason)
 67{
 68	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 69		pr_info("%s\n", reason);
 70}
 71
 72static enum pti_mode {
 73	PTI_AUTO = 0,
 74	PTI_FORCE_OFF,
 75	PTI_FORCE_ON
 76} pti_mode;
 77
 78void __init pti_check_boottime_disable(void)
 79{
 80	char arg[5];
 81	int ret;
 82
 83	/* Assume mode is auto unless overridden. */
 84	pti_mode = PTI_AUTO;
 85
 86	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
 87		pti_mode = PTI_FORCE_OFF;
 88		pti_print_if_insecure("disabled on XEN PV.");
 89		return;
 90	}
 91
 92	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
 93	if (ret > 0)  {
 94		if (ret == 3 && !strncmp(arg, "off", 3)) {
 95			pti_mode = PTI_FORCE_OFF;
 96			pti_print_if_insecure("disabled on command line.");
 97			return;
 98		}
 99		if (ret == 2 && !strncmp(arg, "on", 2)) {
100			pti_mode = PTI_FORCE_ON;
101			pti_print_if_secure("force enabled on command line.");
102			goto enable;
103		}
104		if (ret == 4 && !strncmp(arg, "auto", 4)) {
105			pti_mode = PTI_AUTO;
106			goto autosel;
107		}
108	}
109
110	if (cmdline_find_option_bool(boot_command_line, "nopti") ||
111	    cpu_mitigations_off()) {
112		pti_mode = PTI_FORCE_OFF;
113		pti_print_if_insecure("disabled on command line.");
114		return;
115	}
116
117autosel:
118	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
119		return;
120enable:
121	setup_force_cpu_cap(X86_FEATURE_PTI);
122}
123
124pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
125{
126	/*
127	 * Changes to the high (kernel) portion of the kernelmode page
128	 * tables are not automatically propagated to the usermode tables.
129	 *
130	 * Users should keep in mind that, unlike the kernelmode tables,
131	 * there is no vmalloc_fault equivalent for the usermode tables.
132	 * Top-level entries added to init_mm's usermode pgd after boot
133	 * will not be automatically propagated to other mms.
134	 */
135	if (!pgdp_maps_userspace(pgdp))
136		return pgd;
137
138	/*
139	 * The user page tables get the full PGD, accessible from
140	 * userspace:
141	 */
142	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
143
144	/*
145	 * If this is normal user memory, make it NX in the kernel
146	 * pagetables so that, if we somehow screw up and return to
147	 * usermode with the kernel CR3 loaded, we'll get a page fault
148	 * instead of allowing user code to execute with the wrong CR3.
149	 *
150	 * As exceptions, we don't set NX if:
151	 *  - _PAGE_USER is not set.  This could be an executable
152	 *     EFI runtime mapping or something similar, and the kernel
153	 *     may execute from it
154	 *  - we don't have NX support
155	 *  - we're clearing the PGD (i.e. the new pgd is not present).
156	 */
157	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
158	    (__supported_pte_mask & _PAGE_NX))
159		pgd.pgd |= _PAGE_NX;
160
161	/* return the copy of the PGD we want the kernel to use: */
162	return pgd;
163}
164
165/*
166 * Walk the user copy of the page tables (optionally) trying to allocate
167 * page table pages on the way down.
168 *
169 * Returns a pointer to a P4D on success, or NULL on failure.
170 */
171static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
172{
173	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
174	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
175
176	if (address < PAGE_OFFSET) {
177		WARN_ONCE(1, "attempt to walk user address\n");
178		return NULL;
179	}
180
181	if (pgd_none(*pgd)) {
182		unsigned long new_p4d_page = __get_free_page(gfp);
183		if (WARN_ON_ONCE(!new_p4d_page))
184			return NULL;
185
186		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
187	}
188	BUILD_BUG_ON(pgd_large(*pgd) != 0);
189
190	return p4d_offset(pgd, address);
191}
192
193/*
194 * Walk the user copy of the page tables (optionally) trying to allocate
195 * page table pages on the way down.
196 *
197 * Returns a pointer to a PMD on success, or NULL on failure.
198 */
199static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
200{
201	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
202	p4d_t *p4d;
203	pud_t *pud;
204
205	p4d = pti_user_pagetable_walk_p4d(address);
206	if (!p4d)
207		return NULL;
208
209	BUILD_BUG_ON(p4d_large(*p4d) != 0);
210	if (p4d_none(*p4d)) {
211		unsigned long new_pud_page = __get_free_page(gfp);
212		if (WARN_ON_ONCE(!new_pud_page))
213			return NULL;
214
215		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
216	}
217
218	pud = pud_offset(p4d, address);
219	/* The user page tables do not use large mappings: */
220	if (pud_large(*pud)) {
221		WARN_ON(1);
222		return NULL;
223	}
224	if (pud_none(*pud)) {
225		unsigned long new_pmd_page = __get_free_page(gfp);
226		if (WARN_ON_ONCE(!new_pmd_page))
227			return NULL;
228
229		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
230	}
231
232	return pmd_offset(pud, address);
233}
234
 
235/*
236 * Walk the shadow copy of the page tables (optionally) trying to allocate
237 * page table pages on the way down.  Does not support large pages.
238 *
239 * Note: this is only used when mapping *new* kernel data into the
240 * user/shadow page tables.  It is never used for userspace data.
241 *
242 * Returns a pointer to a PTE on success, or NULL on failure.
243 */
244static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
245{
246	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247	pmd_t *pmd;
248	pte_t *pte;
249
250	pmd = pti_user_pagetable_walk_pmd(address);
251	if (!pmd)
252		return NULL;
253
254	/* We can't do anything sensible if we hit a large mapping. */
255	if (pmd_large(*pmd)) {
256		WARN_ON(1);
257		return NULL;
258	}
259
260	if (pmd_none(*pmd)) {
261		unsigned long new_pte_page = __get_free_page(gfp);
262		if (!new_pte_page)
263			return NULL;
264
265		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
266	}
267
268	pte = pte_offset_kernel(pmd, address);
269	if (pte_flags(*pte) & _PAGE_USER) {
270		WARN_ONCE(1, "attempt to walk to user pte\n");
271		return NULL;
272	}
273	return pte;
274}
275
276#ifdef CONFIG_X86_VSYSCALL_EMULATION
277static void __init pti_setup_vsyscall(void)
278{
279	pte_t *pte, *target_pte;
280	unsigned int level;
281
282	pte = lookup_address(VSYSCALL_ADDR, &level);
283	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
284		return;
285
286	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
287	if (WARN_ON(!target_pte))
288		return;
289
290	*target_pte = *pte;
291	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
292}
293#else
294static void __init pti_setup_vsyscall(void) { }
295#endif
296
297enum pti_clone_level {
298	PTI_CLONE_PMD,
299	PTI_CLONE_PTE,
300};
301
302static void
303pti_clone_pgtable(unsigned long start, unsigned long end,
304		  enum pti_clone_level level)
305{
306	unsigned long addr;
307
308	/*
309	 * Clone the populated PMDs which cover start to end. These PMD areas
310	 * can have holes.
311	 */
312	for (addr = start; addr < end;) {
313		pte_t *pte, *target_pte;
314		pmd_t *pmd, *target_pmd;
315		pgd_t *pgd;
316		p4d_t *p4d;
317		pud_t *pud;
318
319		/* Overflow check */
320		if (addr < start)
321			break;
322
323		pgd = pgd_offset_k(addr);
324		if (WARN_ON(pgd_none(*pgd)))
325			return;
326		p4d = p4d_offset(pgd, addr);
327		if (WARN_ON(p4d_none(*p4d)))
328			return;
329
330		pud = pud_offset(p4d, addr);
331		if (pud_none(*pud)) {
332			WARN_ON_ONCE(addr & ~PUD_MASK);
333			addr = round_up(addr + 1, PUD_SIZE);
334			continue;
335		}
336
337		pmd = pmd_offset(pud, addr);
338		if (pmd_none(*pmd)) {
339			WARN_ON_ONCE(addr & ~PMD_MASK);
340			addr = round_up(addr + 1, PMD_SIZE);
341			continue;
342		}
343
344		if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
345			target_pmd = pti_user_pagetable_walk_pmd(addr);
346			if (WARN_ON(!target_pmd))
347				return;
348
349			/*
350			 * Only clone present PMDs.  This ensures only setting
351			 * _PAGE_GLOBAL on present PMDs.  This should only be
352			 * called on well-known addresses anyway, so a non-
353			 * present PMD would be a surprise.
354			 */
355			if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
356				return;
357
358			/*
359			 * Setting 'target_pmd' below creates a mapping in both
360			 * the user and kernel page tables.  It is effectively
361			 * global, so set it as global in both copies.  Note:
362			 * the X86_FEATURE_PGE check is not _required_ because
363			 * the CPU ignores _PAGE_GLOBAL when PGE is not
364			 * supported.  The check keeps consistency with
365			 * code that only set this bit when supported.
366			 */
367			if (boot_cpu_has(X86_FEATURE_PGE))
368				*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
369
370			/*
371			 * Copy the PMD.  That is, the kernelmode and usermode
372			 * tables will share the last-level page tables of this
373			 * address range
374			 */
375			*target_pmd = *pmd;
376
377			addr += PMD_SIZE;
378
379		} else if (level == PTI_CLONE_PTE) {
380
381			/* Walk the page-table down to the pte level */
382			pte = pte_offset_kernel(pmd, addr);
383			if (pte_none(*pte)) {
384				addr += PAGE_SIZE;
385				continue;
386			}
387
388			/* Only clone present PTEs */
389			if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
390				return;
391
392			/* Allocate PTE in the user page-table */
393			target_pte = pti_user_pagetable_walk_pte(addr);
394			if (WARN_ON(!target_pte))
395				return;
396
397			/* Set GLOBAL bit in both PTEs */
398			if (boot_cpu_has(X86_FEATURE_PGE))
399				*pte = pte_set_flags(*pte, _PAGE_GLOBAL);
400
401			/* Clone the PTE */
402			*target_pte = *pte;
 
 
 
 
 
 
403
404			addr += PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
405
406		} else {
407			BUG();
408		}
 
 
 
409	}
410}
411
412#ifdef CONFIG_X86_64
413/*
414 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
415 * next-level entry on 5-level systems.
416 */
417static void __init pti_clone_p4d(unsigned long addr)
418{
419	p4d_t *kernel_p4d, *user_p4d;
420	pgd_t *kernel_pgd;
421
422	user_p4d = pti_user_pagetable_walk_p4d(addr);
423	if (!user_p4d)
424		return;
425
426	kernel_pgd = pgd_offset_k(addr);
427	kernel_p4d = p4d_offset(kernel_pgd, addr);
428	*user_p4d = *kernel_p4d;
429}
430
431/*
432 * Clone the CPU_ENTRY_AREA and associated data into the user space visible
433 * page table.
434 */
435static void __init pti_clone_user_shared(void)
436{
437	unsigned int cpu;
438
439	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
440
441	for_each_possible_cpu(cpu) {
442		/*
443		 * The SYSCALL64 entry code needs one word of scratch space
444		 * in which to spill a register.  It lives in the sp2 slot
445		 * of the CPU's TSS.
446		 *
447		 * This is done for all possible CPUs during boot to ensure
448		 * that it's propagated to all mms.
449		 */
450
451		unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
452		phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
453		pte_t *target_pte;
454
455		target_pte = pti_user_pagetable_walk_pte(va);
456		if (WARN_ON(!target_pte))
457			return;
458
459		*target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
460	}
461}
462
463#else /* CONFIG_X86_64 */
464
465/*
466 * On 32 bit PAE systems with 1GB of Kernel address space there is only
467 * one pgd/p4d for the whole kernel. Cloning that would map the whole
468 * address space into the user page-tables, making PTI useless. So clone
469 * the page-table on the PMD level to prevent that.
470 */
471static void __init pti_clone_user_shared(void)
472{
473	unsigned long start, end;
474
475	start = CPU_ENTRY_AREA_BASE;
476	end   = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
477
478	pti_clone_pgtable(start, end, PTI_CLONE_PMD);
479}
480#endif /* CONFIG_X86_64 */
481
482/*
483 * Clone the ESPFIX P4D into the user space visible page table
484 */
485static void __init pti_setup_espfix64(void)
486{
487#ifdef CONFIG_X86_ESPFIX64
488	pti_clone_p4d(ESPFIX_BASE_ADDR);
489#endif
490}
491
492/*
493 * Clone the populated PMDs of the entry text and force it RO.
494 */
495static void pti_clone_entry_text(void)
496{
497	pti_clone_pgtable((unsigned long) __entry_text_start,
498			  (unsigned long) __entry_text_end,
499			  PTI_CLONE_PMD);
500}
501
502/*
503 * Global pages and PCIDs are both ways to make kernel TLB entries
504 * live longer, reduce TLB misses and improve kernel performance.
505 * But, leaving all kernel text Global makes it potentially accessible
506 * to Meltdown-style attacks which make it trivial to find gadgets or
507 * defeat KASLR.
508 *
509 * Only use global pages when it is really worth it.
510 */
511static inline bool pti_kernel_image_global_ok(void)
512{
513	/*
514	 * Systems with PCIDs get little benefit from global
515	 * kernel text and are not worth the downsides.
516	 */
517	if (cpu_feature_enabled(X86_FEATURE_PCID))
518		return false;
519
520	/*
521	 * Only do global kernel image for pti=auto.  Do the most
522	 * secure thing (not global) if pti=on specified.
523	 */
524	if (pti_mode != PTI_AUTO)
525		return false;
526
527	/*
528	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
529	 * global kernel image pages.  Do the safe thing (disable
530	 * global kernel image).  This is unlikely to ever be
531	 * noticed because PTI is disabled by default on AMD CPUs.
532	 */
533	if (boot_cpu_has(X86_FEATURE_K8))
534		return false;
535
536	/*
537	 * RANDSTRUCT derives its hardening benefits from the
538	 * attacker's lack of knowledge about the layout of kernel
539	 * data structures.  Keep the kernel image non-global in
540	 * cases where RANDSTRUCT is in use to help keep the layout a
541	 * secret.
542	 */
543	if (IS_ENABLED(CONFIG_RANDSTRUCT))
544		return false;
545
546	return true;
547}
548
549/*
550 * For some configurations, map all of kernel text into the user page
551 * tables.  This reduces TLB misses, especially on non-PCID systems.
552 */
553static void pti_clone_kernel_text(void)
554{
555	/*
556	 * rodata is part of the kernel image and is normally
557	 * readable on the filesystem or on the web.  But, do not
558	 * clone the areas past rodata, they might contain secrets.
559	 */
560	unsigned long start = PFN_ALIGN(_text);
561	unsigned long end_clone  = (unsigned long)__end_rodata_aligned;
562	unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
563
564	if (!pti_kernel_image_global_ok())
565		return;
566
567	pr_debug("mapping partial kernel image into user address space\n");
568
569	/*
570	 * Note that this will undo _some_ of the work that
571	 * pti_set_kernel_image_nonglobal() did to clear the
572	 * global bit.
573	 */
574	pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
575
576	/*
577	 * pti_clone_pgtable() will set the global bit in any PMDs
578	 * that it clones, but we also need to get any PTEs in
579	 * the last level for areas that are not huge-page-aligned.
580	 */
581
582	/* Set the global bit for normal non-__init kernel text: */
583	set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
584}
585
586static void pti_set_kernel_image_nonglobal(void)
 
 
 
 
 
587{
588	/*
589	 * The identity map is created with PMDs, regardless of the
590	 * actual length of the kernel.  We need to clear
591	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
592	 * of the image.
593	 */
594	unsigned long start = PFN_ALIGN(_text);
595	unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
 
 
 
596
597	/*
598	 * This clears _PAGE_GLOBAL from the entire kernel image.
599	 * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
600	 * areas that are mapped to userspace.
601	 */
602	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
603}
604
605/*
606 * Initialize kernel page table isolation
607 */
608void __init pti_init(void)
609{
610	if (!boot_cpu_has(X86_FEATURE_PTI))
611		return;
612
613	pr_info("enabled\n");
614
615#ifdef CONFIG_X86_32
616	/*
617	 * We check for X86_FEATURE_PCID here. But the init-code will
618	 * clear the feature flag on 32 bit because the feature is not
619	 * supported on 32 bit anyway. To print the warning we need to
620	 * check with cpuid directly again.
621	 */
622	if (cpuid_ecx(0x1) & BIT(17)) {
623		/* Use printk to work around pr_fmt() */
624		printk(KERN_WARNING "\n");
625		printk(KERN_WARNING "************************************************************\n");
626		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
627		printk(KERN_WARNING "**                                                        **\n");
628		printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
629		printk(KERN_WARNING "** Your performance will increase dramatically if you     **\n");
630		printk(KERN_WARNING "** switch to a 64-bit kernel!                             **\n");
631		printk(KERN_WARNING "**                                                        **\n");
632		printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!  **\n");
633		printk(KERN_WARNING "************************************************************\n");
634	}
635#endif
636
637	pti_clone_user_shared();
638
639	/* Undo all global bits from the init pagetables in head_64.S: */
640	pti_set_kernel_image_nonglobal();
641	/* Replace some of the global bits just for shared entry text: */
642	pti_clone_entry_text();
643	pti_setup_espfix64();
644	pti_setup_vsyscall();
645}
646
647/*
648 * Finalize the kernel mappings in the userspace page-table. Some of the
649 * mappings for the kernel image might have changed since pti_init()
650 * cloned them. This is because parts of the kernel image have been
651 * mapped RO and/or NX.  These changes need to be cloned again to the
652 * userspace page-table.
653 */
654void pti_finalize(void)
655{
656	if (!boot_cpu_has(X86_FEATURE_PTI))
657		return;
658	/*
659	 * We need to clone everything (again) that maps parts of the
660	 * kernel image.
661	 */
662	pti_clone_entry_text();
663	pti_clone_kernel_text();
664
665	debug_checkwx_user();
666}
v4.17
 
  1/*
  2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of version 2 of the GNU General Public License as
  6 * published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful, but
  9 * WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 11 * General Public License for more details.
 12 *
 13 * This code is based in part on work published here:
 14 *
 15 *	https://github.com/IAIK/KAISER
 16 *
 17 * The original work was written by and and signed off by for the Linux
 18 * kernel by:
 19 *
 20 *   Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
 21 *   Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
 22 *   Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
 23 *   Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
 24 *
 25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
 26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
 27 *		       Andy Lutomirsky <luto@amacapital.net>
 28 */
 29#include <linux/kernel.h>
 30#include <linux/errno.h>
 31#include <linux/string.h>
 32#include <linux/types.h>
 33#include <linux/bug.h>
 34#include <linux/init.h>
 35#include <linux/spinlock.h>
 36#include <linux/mm.h>
 37#include <linux/uaccess.h>
 
 38
 39#include <asm/cpufeature.h>
 40#include <asm/hypervisor.h>
 41#include <asm/vsyscall.h>
 42#include <asm/cmdline.h>
 43#include <asm/pti.h>
 44#include <asm/pgtable.h>
 45#include <asm/pgalloc.h>
 46#include <asm/tlbflush.h>
 47#include <asm/desc.h>
 
 
 48
 49#undef pr_fmt
 50#define pr_fmt(fmt)     "Kernel/User page tables isolation: " fmt
 51
 52/* Backporting helper */
 53#ifndef __GFP_NOTRACK
 54#define __GFP_NOTRACK	0
 55#endif
 56
 
 
 
 
 
 
 
 
 
 
 57static void __init pti_print_if_insecure(const char *reason)
 58{
 59	if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 60		pr_info("%s\n", reason);
 61}
 62
 63static void __init pti_print_if_secure(const char *reason)
 64{
 65	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
 66		pr_info("%s\n", reason);
 67}
 68
 69enum pti_mode {
 70	PTI_AUTO = 0,
 71	PTI_FORCE_OFF,
 72	PTI_FORCE_ON
 73} pti_mode;
 74
 75void __init pti_check_boottime_disable(void)
 76{
 77	char arg[5];
 78	int ret;
 79
 80	/* Assume mode is auto unless overridden. */
 81	pti_mode = PTI_AUTO;
 82
 83	if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
 84		pti_mode = PTI_FORCE_OFF;
 85		pti_print_if_insecure("disabled on XEN PV.");
 86		return;
 87	}
 88
 89	ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
 90	if (ret > 0)  {
 91		if (ret == 3 && !strncmp(arg, "off", 3)) {
 92			pti_mode = PTI_FORCE_OFF;
 93			pti_print_if_insecure("disabled on command line.");
 94			return;
 95		}
 96		if (ret == 2 && !strncmp(arg, "on", 2)) {
 97			pti_mode = PTI_FORCE_ON;
 98			pti_print_if_secure("force enabled on command line.");
 99			goto enable;
100		}
101		if (ret == 4 && !strncmp(arg, "auto", 4)) {
102			pti_mode = PTI_AUTO;
103			goto autosel;
104		}
105	}
106
107	if (cmdline_find_option_bool(boot_command_line, "nopti")) {
 
108		pti_mode = PTI_FORCE_OFF;
109		pti_print_if_insecure("disabled on command line.");
110		return;
111	}
112
113autosel:
114	if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
115		return;
116enable:
117	setup_force_cpu_cap(X86_FEATURE_PTI);
118}
119
120pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
121{
122	/*
123	 * Changes to the high (kernel) portion of the kernelmode page
124	 * tables are not automatically propagated to the usermode tables.
125	 *
126	 * Users should keep in mind that, unlike the kernelmode tables,
127	 * there is no vmalloc_fault equivalent for the usermode tables.
128	 * Top-level entries added to init_mm's usermode pgd after boot
129	 * will not be automatically propagated to other mms.
130	 */
131	if (!pgdp_maps_userspace(pgdp))
132		return pgd;
133
134	/*
135	 * The user page tables get the full PGD, accessible from
136	 * userspace:
137	 */
138	kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
139
140	/*
141	 * If this is normal user memory, make it NX in the kernel
142	 * pagetables so that, if we somehow screw up and return to
143	 * usermode with the kernel CR3 loaded, we'll get a page fault
144	 * instead of allowing user code to execute with the wrong CR3.
145	 *
146	 * As exceptions, we don't set NX if:
147	 *  - _PAGE_USER is not set.  This could be an executable
148	 *     EFI runtime mapping or something similar, and the kernel
149	 *     may execute from it
150	 *  - we don't have NX support
151	 *  - we're clearing the PGD (i.e. the new pgd is not present).
152	 */
153	if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
154	    (__supported_pte_mask & _PAGE_NX))
155		pgd.pgd |= _PAGE_NX;
156
157	/* return the copy of the PGD we want the kernel to use: */
158	return pgd;
159}
160
161/*
162 * Walk the user copy of the page tables (optionally) trying to allocate
163 * page table pages on the way down.
164 *
165 * Returns a pointer to a P4D on success, or NULL on failure.
166 */
167static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
168{
169	pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
170	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
171
172	if (address < PAGE_OFFSET) {
173		WARN_ONCE(1, "attempt to walk user address\n");
174		return NULL;
175	}
176
177	if (pgd_none(*pgd)) {
178		unsigned long new_p4d_page = __get_free_page(gfp);
179		if (!new_p4d_page)
180			return NULL;
181
182		set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
183	}
184	BUILD_BUG_ON(pgd_large(*pgd) != 0);
185
186	return p4d_offset(pgd, address);
187}
188
189/*
190 * Walk the user copy of the page tables (optionally) trying to allocate
191 * page table pages on the way down.
192 *
193 * Returns a pointer to a PMD on success, or NULL on failure.
194 */
195static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
196{
197	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
198	p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
199	pud_t *pud;
200
 
 
 
 
201	BUILD_BUG_ON(p4d_large(*p4d) != 0);
202	if (p4d_none(*p4d)) {
203		unsigned long new_pud_page = __get_free_page(gfp);
204		if (!new_pud_page)
205			return NULL;
206
207		set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
208	}
209
210	pud = pud_offset(p4d, address);
211	/* The user page tables do not use large mappings: */
212	if (pud_large(*pud)) {
213		WARN_ON(1);
214		return NULL;
215	}
216	if (pud_none(*pud)) {
217		unsigned long new_pmd_page = __get_free_page(gfp);
218		if (!new_pmd_page)
219			return NULL;
220
221		set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
222	}
223
224	return pmd_offset(pud, address);
225}
226
227#ifdef CONFIG_X86_VSYSCALL_EMULATION
228/*
229 * Walk the shadow copy of the page tables (optionally) trying to allocate
230 * page table pages on the way down.  Does not support large pages.
231 *
232 * Note: this is only used when mapping *new* kernel data into the
233 * user/shadow page tables.  It is never used for userspace data.
234 *
235 * Returns a pointer to a PTE on success, or NULL on failure.
236 */
237static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
238{
239	gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
240	pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
241	pte_t *pte;
242
 
 
 
 
243	/* We can't do anything sensible if we hit a large mapping. */
244	if (pmd_large(*pmd)) {
245		WARN_ON(1);
246		return NULL;
247	}
248
249	if (pmd_none(*pmd)) {
250		unsigned long new_pte_page = __get_free_page(gfp);
251		if (!new_pte_page)
252			return NULL;
253
254		set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
255	}
256
257	pte = pte_offset_kernel(pmd, address);
258	if (pte_flags(*pte) & _PAGE_USER) {
259		WARN_ONCE(1, "attempt to walk to user pte\n");
260		return NULL;
261	}
262	return pte;
263}
264
 
265static void __init pti_setup_vsyscall(void)
266{
267	pte_t *pte, *target_pte;
268	unsigned int level;
269
270	pte = lookup_address(VSYSCALL_ADDR, &level);
271	if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
272		return;
273
274	target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
275	if (WARN_ON(!target_pte))
276		return;
277
278	*target_pte = *pte;
279	set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
280}
281#else
282static void __init pti_setup_vsyscall(void) { }
283#endif
284
 
 
 
 
 
285static void
286pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
 
287{
288	unsigned long addr;
289
290	/*
291	 * Clone the populated PMDs which cover start to end. These PMD areas
292	 * can have holes.
293	 */
294	for (addr = start; addr < end; addr += PMD_SIZE) {
 
295		pmd_t *pmd, *target_pmd;
296		pgd_t *pgd;
297		p4d_t *p4d;
298		pud_t *pud;
299
 
 
 
 
300		pgd = pgd_offset_k(addr);
301		if (WARN_ON(pgd_none(*pgd)))
302			return;
303		p4d = p4d_offset(pgd, addr);
304		if (WARN_ON(p4d_none(*p4d)))
305			return;
 
306		pud = pud_offset(p4d, addr);
307		if (pud_none(*pud))
 
 
308			continue;
 
 
309		pmd = pmd_offset(pud, addr);
310		if (pmd_none(*pmd))
 
 
311			continue;
 
312
313		target_pmd = pti_user_pagetable_walk_pmd(addr);
314		if (WARN_ON(!target_pmd))
315			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
317		/*
318		 * Only clone present PMDs.  This ensures only setting
319		 * _PAGE_GLOBAL on present PMDs.  This should only be
320		 * called on well-known addresses anyway, so a non-
321		 * present PMD would be a surprise.
322		 */
323		if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
324			return;
325
326		/*
327		 * Setting 'target_pmd' below creates a mapping in both
328		 * the user and kernel page tables.  It is effectively
329		 * global, so set it as global in both copies.  Note:
330		 * the X86_FEATURE_PGE check is not _required_ because
331		 * the CPU ignores _PAGE_GLOBAL when PGE is not
332		 * supported.  The check keeps consistentency with
333		 * code that only set this bit when supported.
334		 */
335		if (boot_cpu_has(X86_FEATURE_PGE))
336			*pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
337
338		/*
339		 * Copy the PMD.  That is, the kernelmode and usermode
340		 * tables will share the last-level page tables of this
341		 * address range
342		 */
343		*target_pmd = pmd_clear_flags(*pmd, clear);
344	}
345}
346
 
347/*
348 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
349 * next-level entry on 5-level systems.
350 */
351static void __init pti_clone_p4d(unsigned long addr)
352{
353	p4d_t *kernel_p4d, *user_p4d;
354	pgd_t *kernel_pgd;
355
356	user_p4d = pti_user_pagetable_walk_p4d(addr);
 
 
 
357	kernel_pgd = pgd_offset_k(addr);
358	kernel_p4d = p4d_offset(kernel_pgd, addr);
359	*user_p4d = *kernel_p4d;
360}
361
362/*
363 * Clone the CPU_ENTRY_AREA into the user space visible page table.
 
364 */
365static void __init pti_clone_user_shared(void)
366{
 
 
367	pti_clone_p4d(CPU_ENTRY_AREA_BASE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368}
 
369
370/*
371 * Clone the ESPFIX P4D into the user space visible page table
372 */
373static void __init pti_setup_espfix64(void)
374{
375#ifdef CONFIG_X86_ESPFIX64
376	pti_clone_p4d(ESPFIX_BASE_ADDR);
377#endif
378}
379
380/*
381 * Clone the populated PMDs of the entry and irqentry text and force it RO.
382 */
383static void __init pti_clone_entry_text(void)
384{
385	pti_clone_pmds((unsigned long) __entry_text_start,
386			(unsigned long) __irqentry_text_end,
387		       _PAGE_RW);
388}
389
390/*
391 * Global pages and PCIDs are both ways to make kernel TLB entries
392 * live longer, reduce TLB misses and improve kernel performance.
393 * But, leaving all kernel text Global makes it potentially accessible
394 * to Meltdown-style attacks which make it trivial to find gadgets or
395 * defeat KASLR.
396 *
397 * Only use global pages when it is really worth it.
398 */
399static inline bool pti_kernel_image_global_ok(void)
400{
401	/*
402	 * Systems with PCIDs get litlle benefit from global
403	 * kernel text and are not worth the downsides.
404	 */
405	if (cpu_feature_enabled(X86_FEATURE_PCID))
406		return false;
407
408	/*
409	 * Only do global kernel image for pti=auto.  Do the most
410	 * secure thing (not global) if pti=on specified.
411	 */
412	if (pti_mode != PTI_AUTO)
413		return false;
414
415	/*
416	 * K8 may not tolerate the cleared _PAGE_RW on the userspace
417	 * global kernel image pages.  Do the safe thing (disable
418	 * global kernel image).  This is unlikely to ever be
419	 * noticed because PTI is disabled by default on AMD CPUs.
420	 */
421	if (boot_cpu_has(X86_FEATURE_K8))
422		return false;
423
424	/*
425	 * RANDSTRUCT derives its hardening benefits from the
426	 * attacker's lack of knowledge about the layout of kernel
427	 * data structures.  Keep the kernel image non-global in
428	 * cases where RANDSTRUCT is in use to help keep the layout a
429	 * secret.
430	 */
431	if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
432		return false;
433
434	return true;
435}
436
437/*
438 * For some configurations, map all of kernel text into the user page
439 * tables.  This reduces TLB misses, especially on non-PCID systems.
440 */
441void pti_clone_kernel_text(void)
442{
443	/*
444	 * rodata is part of the kernel image and is normally
445	 * readable on the filesystem or on the web.  But, do not
446	 * clone the areas past rodata, they might contain secrets.
447	 */
448	unsigned long start = PFN_ALIGN(_text);
449	unsigned long end = (unsigned long)__end_rodata_hpage_align;
 
450
451	if (!pti_kernel_image_global_ok())
452		return;
453
454	pr_debug("mapping partial kernel image into user address space\n");
455
456	/*
457	 * Note that this will undo _some_ of the work that
458	 * pti_set_kernel_image_nonglobal() did to clear the
459	 * global bit.
460	 */
461	pti_clone_pmds(start, end, _PAGE_RW);
 
 
 
 
 
 
 
 
 
462}
463
464/*
465 * This is the only user for it and it is not arch-generic like
466 * the other set_memory.h functions.  Just extern it.
467 */
468extern int set_memory_nonglobal(unsigned long addr, int numpages);
469void pti_set_kernel_image_nonglobal(void)
470{
471	/*
472	 * The identity map is created with PMDs, regardless of the
473	 * actual length of the kernel.  We need to clear
474	 * _PAGE_GLOBAL up to a PMD boundary, not just to the end
475	 * of the image.
476	 */
477	unsigned long start = PFN_ALIGN(_text);
478	unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
479
480	if (pti_kernel_image_global_ok())
481		return;
482
 
 
 
 
 
483	set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
484}
485
486/*
487 * Initialize kernel page table isolation
488 */
489void __init pti_init(void)
490{
491	if (!static_cpu_has(X86_FEATURE_PTI))
492		return;
493
494	pr_info("enabled\n");
495
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496	pti_clone_user_shared();
497
498	/* Undo all global bits from the init pagetables in head_64.S: */
499	pti_set_kernel_image_nonglobal();
500	/* Replace some of the global bits just for shared entry text: */
501	pti_clone_entry_text();
502	pti_setup_espfix64();
503	pti_setup_vsyscall();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
504}