Linux Audio

Check our new training course

Loading...
v4.6
 
 1#ifndef _ASM_X86_MMU_H
 2#define _ASM_X86_MMU_H
 3
 4#include <linux/spinlock.h>
 
 5#include <linux/mutex.h>
 
 
 
 
 
 
 
 
 
 
 
 6
 7/*
 8 * The x86 doesn't have a mmu context, but
 9 * we put the segment information here.
10 */
11typedef struct {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12#ifdef CONFIG_MODIFY_LDT_SYSCALL
13	struct ldt_struct *ldt;
 
14#endif
15
16#ifdef CONFIG_X86_64
17	/* True if mm supports a task running in 32 bit compatibility mode. */
18	unsigned short ia32_compat;
 
 
 
 
 
 
 
19#endif
20
21	struct mutex lock;
22	void __user *vdso;			/* vdso base address */
23	const struct vdso_image *vdso_image;	/* vdso image in use */
24
25	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
 
 
 
 
 
 
 
 
26} mm_context_t;
27
28#ifdef CONFIG_SMP
 
 
 
 
 
29void leave_mm(int cpu);
30#else
31static inline void leave_mm(int cpu)
32{
33}
34#endif
35
36#endif /* _ASM_X86_MMU_H */
v6.8
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_X86_MMU_H
 3#define _ASM_X86_MMU_H
 4
 5#include <linux/spinlock.h>
 6#include <linux/rwsem.h>
 7#include <linux/mutex.h>
 8#include <linux/atomic.h>
 9#include <linux/bits.h>
10
11/* Uprobes on this MM assume 32-bit code */
12#define MM_CONTEXT_UPROBE_IA32		0
13/* vsyscall page is accessible on this MM */
14#define MM_CONTEXT_HAS_VSYSCALL		1
15/* Do not allow changing LAM mode */
16#define MM_CONTEXT_LOCK_LAM		2
17/* Allow LAM and SVA coexisting */
18#define MM_CONTEXT_FORCE_TAGGED_SVA	3
19
20/*
21 * x86 has arch-specific MMU state beyond what lives in mm_struct.
 
22 */
23typedef struct {
24	/*
25	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
26	 * be reused, and zero is not a valid ctx_id.
27	 */
28	u64 ctx_id;
29
30	/*
31	 * Any code that needs to do any sort of TLB flushing for this
32	 * mm will first make its changes to the page tables, then
33	 * increment tlb_gen, then flush.  This lets the low-level
34	 * flushing code keep track of what needs flushing.
35	 *
36	 * This is not used on Xen PV.
37	 */
38	atomic64_t tlb_gen;
39
40#ifdef CONFIG_MODIFY_LDT_SYSCALL
41	struct rw_semaphore	ldt_usr_sem;
42	struct ldt_struct	*ldt;
43#endif
44
45#ifdef CONFIG_X86_64
46	unsigned long flags;
47#endif
48
49#ifdef CONFIG_ADDRESS_MASKING
50	/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
51	unsigned long lam_cr3_mask;
52
53	/* Significant bits of the virtual address. Excludes tag bits. */
54	u64 untag_mask;
55#endif
56
57	struct mutex lock;
58	void __user *vdso;			/* vdso base address */
59	const struct vdso_image *vdso_image;	/* vdso image in use */
60
61	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
62#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
63	/*
64	 * One bit per protection key says whether userspace can
65	 * use it or not.  protected by mmap_lock.
66	 */
67	u16 pkey_allocation_map;
68	s16 execute_only_pkey;
69#endif
70} mm_context_t;
71
72#define INIT_MM_CONTEXT(mm)						\
73	.context = {							\
74		.ctx_id = 1,						\
75		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
76	}
77
78void leave_mm(int cpu);
79#define leave_mm leave_mm
 
 
 
 
80
81#endif /* _ASM_X86_MMU_H */