Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MMAN_H
3#define _LINUX_MMAN_H
4
5#include <linux/mm.h>
6#include <linux/percpu_counter.h>
7
8#include <linux/atomic.h>
9#include <uapi/linux/mman.h>
10
11/*
12 * Arrange for legacy / undefined architecture specific flags to be
13 * ignored by mmap handling code.
14 */
15#ifndef MAP_32BIT
16#define MAP_32BIT 0
17#endif
18#ifndef MAP_HUGE_2MB
19#define MAP_HUGE_2MB 0
20#endif
21#ifndef MAP_HUGE_1GB
22#define MAP_HUGE_1GB 0
23#endif
24#ifndef MAP_UNINITIALIZED
25#define MAP_UNINITIALIZED 0
26#endif
27#ifndef MAP_SYNC
28#define MAP_SYNC 0
29#endif
30
31/*
32 * The historical set of flags that all mmap implementations implicitly
33 * support when a ->mmap_validate() op is not provided in file_operations.
34 *
35 * MAP_EXECUTABLE is completely ignored throughout the kernel.
36 */
37#define LEGACY_MAP_MASK (MAP_SHARED \
38 | MAP_PRIVATE \
39 | MAP_FIXED \
40 | MAP_ANONYMOUS \
41 | MAP_DENYWRITE \
42 | MAP_EXECUTABLE \
43 | MAP_UNINITIALIZED \
44 | MAP_GROWSDOWN \
45 | MAP_LOCKED \
46 | MAP_NORESERVE \
47 | MAP_POPULATE \
48 | MAP_NONBLOCK \
49 | MAP_STACK \
50 | MAP_HUGETLB \
51 | MAP_32BIT \
52 | MAP_HUGE_2MB \
53 | MAP_HUGE_1GB)
54
55extern int sysctl_overcommit_memory;
56extern int sysctl_overcommit_ratio;
57extern unsigned long sysctl_overcommit_kbytes;
58extern struct percpu_counter vm_committed_as;
59
60#ifdef CONFIG_SMP
61extern s32 vm_committed_as_batch;
62extern void mm_compute_batch(int overcommit_policy);
63#else
64#define vm_committed_as_batch 0
65static inline void mm_compute_batch(int overcommit_policy)
66{
67}
68#endif
69
70unsigned long vm_memory_committed(void);
71
72static inline void vm_acct_memory(long pages)
73{
74 percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
75}
76
77static inline void vm_unacct_memory(long pages)
78{
79 vm_acct_memory(-pages);
80}
81
82/*
83 * Allow architectures to handle additional protection and flag bits. The
84 * overriding macros must be defined in the arch-specific asm/mman.h file.
85 */
86
87#ifndef arch_calc_vm_prot_bits
88#define arch_calc_vm_prot_bits(prot, pkey) 0
89#endif
90
91#ifndef arch_calc_vm_flag_bits
92#define arch_calc_vm_flag_bits(flags) 0
93#endif
94
95#ifndef arch_vm_get_page_prot
96#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
97#endif
98
99#ifndef arch_validate_prot
100/*
101 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
102 * already been masked out.
103 *
104 * Returns true if the prot flags are valid
105 */
106static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
107{
108 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
109}
110#define arch_validate_prot arch_validate_prot
111#endif
112
113#ifndef arch_validate_flags
114/*
115 * This is called from mmap() and mprotect() with the updated vma->vm_flags.
116 *
117 * Returns true if the VM_* flags are valid.
118 */
119static inline bool arch_validate_flags(unsigned long flags)
120{
121 return true;
122}
123#define arch_validate_flags arch_validate_flags
124#endif
125
126/*
127 * Optimisation macro. It is equivalent to:
128 * (x & bit1) ? bit2 : 0
129 * but this version is faster.
130 * ("bit1" and "bit2" must be single bits)
131 */
132#define _calc_vm_trans(x, bit1, bit2) \
133 ((!(bit1) || !(bit2)) ? 0 : \
134 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
135 : ((x) & (bit1)) / ((bit1) / (bit2))))
136
137/*
138 * Combine the mmap "prot" argument into "vm_flags" used internally.
139 */
140static inline unsigned long
141calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
142{
143 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
144 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
145 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
146 arch_calc_vm_prot_bits(prot, pkey);
147}
148
149/*
150 * Combine the mmap "flags" argument into "vm_flags" used internally.
151 */
152static inline unsigned long
153calc_vm_flag_bits(unsigned long flags)
154{
155 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
156 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
157 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
158 _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
159 arch_calc_vm_flag_bits(flags);
160}
161
162unsigned long vm_commit_limit(void);
163#endif /* _LINUX_MMAN_H */
1#ifndef _LINUX_MMAN_H
2#define _LINUX_MMAN_H
3
4#include <asm/mman.h>
5
6#define MREMAP_MAYMOVE 1
7#define MREMAP_FIXED 2
8
9#define OVERCOMMIT_GUESS 0
10#define OVERCOMMIT_ALWAYS 1
11#define OVERCOMMIT_NEVER 2
12
13#ifdef __KERNEL__
14#include <linux/mm.h>
15#include <linux/percpu_counter.h>
16
17#include <linux/atomic.h>
18
19extern int sysctl_overcommit_memory;
20extern int sysctl_overcommit_ratio;
21extern struct percpu_counter vm_committed_as;
22
23static inline void vm_acct_memory(long pages)
24{
25 percpu_counter_add(&vm_committed_as, pages);
26}
27
28static inline void vm_unacct_memory(long pages)
29{
30 vm_acct_memory(-pages);
31}
32
33/*
34 * Allow architectures to handle additional protection bits
35 */
36
37#ifndef arch_calc_vm_prot_bits
38#define arch_calc_vm_prot_bits(prot) 0
39#endif
40
41#ifndef arch_vm_get_page_prot
42#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
43#endif
44
45#ifndef arch_validate_prot
46/*
47 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
48 * already been masked out.
49 *
50 * Returns true if the prot flags are valid
51 */
52static inline int arch_validate_prot(unsigned long prot)
53{
54 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
55}
56#define arch_validate_prot arch_validate_prot
57#endif
58
59/*
60 * Optimisation macro. It is equivalent to:
61 * (x & bit1) ? bit2 : 0
62 * but this version is faster.
63 * ("bit1" and "bit2" must be single bits)
64 */
65#define _calc_vm_trans(x, bit1, bit2) \
66 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
67 : ((x) & (bit1)) / ((bit1) / (bit2)))
68
69/*
70 * Combine the mmap "prot" argument into "vm_flags" used internally.
71 */
72static inline unsigned long
73calc_vm_prot_bits(unsigned long prot)
74{
75 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
76 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
77 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
78 arch_calc_vm_prot_bits(prot);
79}
80
81/*
82 * Combine the mmap "flags" argument into "vm_flags" used internally.
83 */
84static inline unsigned long
85calc_vm_flag_bits(unsigned long flags)
86{
87 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
88 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
89 _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
90 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
91}
92#endif /* __KERNEL__ */
93#endif /* _LINUX_MMAN_H */