Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MMAN_H
3#define _LINUX_MMAN_H
4
5#include <linux/mm.h>
6#include <linux/percpu_counter.h>
7
8#include <linux/atomic.h>
9#include <uapi/linux/mman.h>
10
11/*
12 * Arrange for legacy / undefined architecture specific flags to be
13 * ignored by mmap handling code.
14 */
15#ifndef MAP_32BIT
16#define MAP_32BIT 0
17#endif
18#ifndef MAP_HUGE_2MB
19#define MAP_HUGE_2MB 0
20#endif
21#ifndef MAP_HUGE_1GB
22#define MAP_HUGE_1GB 0
23#endif
24#ifndef MAP_UNINITIALIZED
25#define MAP_UNINITIALIZED 0
26#endif
27#ifndef MAP_SYNC
28#define MAP_SYNC 0
29#endif
30
31/*
32 * The historical set of flags that all mmap implementations implicitly
33 * support when a ->mmap_validate() op is not provided in file_operations.
34 *
35 * MAP_EXECUTABLE is completely ignored throughout the kernel.
36 */
37#define LEGACY_MAP_MASK (MAP_SHARED \
38 | MAP_PRIVATE \
39 | MAP_FIXED \
40 | MAP_ANONYMOUS \
41 | MAP_DENYWRITE \
42 | MAP_EXECUTABLE \
43 | MAP_UNINITIALIZED \
44 | MAP_GROWSDOWN \
45 | MAP_LOCKED \
46 | MAP_NORESERVE \
47 | MAP_POPULATE \
48 | MAP_NONBLOCK \
49 | MAP_STACK \
50 | MAP_HUGETLB \
51 | MAP_32BIT \
52 | MAP_HUGE_2MB \
53 | MAP_HUGE_1GB)
54
55extern int sysctl_overcommit_memory;
56extern int sysctl_overcommit_ratio;
57extern unsigned long sysctl_overcommit_kbytes;
58extern struct percpu_counter vm_committed_as;
59
60#ifdef CONFIG_SMP
61extern s32 vm_committed_as_batch;
62extern void mm_compute_batch(int overcommit_policy);
63#else
64#define vm_committed_as_batch 0
65static inline void mm_compute_batch(int overcommit_policy)
66{
67}
68#endif
69
70unsigned long vm_memory_committed(void);
71
72static inline void vm_acct_memory(long pages)
73{
74 percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
75}
76
77static inline void vm_unacct_memory(long pages)
78{
79 vm_acct_memory(-pages);
80}
81
82/*
83 * Allow architectures to handle additional protection and flag bits. The
84 * overriding macros must be defined in the arch-specific asm/mman.h file.
85 */
86
87#ifndef arch_calc_vm_prot_bits
88#define arch_calc_vm_prot_bits(prot, pkey) 0
89#endif
90
91#ifndef arch_calc_vm_flag_bits
92#define arch_calc_vm_flag_bits(flags) 0
93#endif
94
95#ifndef arch_vm_get_page_prot
96#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
97#endif
98
99#ifndef arch_validate_prot
100/*
101 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
102 * already been masked out.
103 *
104 * Returns true if the prot flags are valid
105 */
106static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
107{
108 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
109}
110#define arch_validate_prot arch_validate_prot
111#endif
112
113#ifndef arch_validate_flags
114/*
115 * This is called from mmap() and mprotect() with the updated vma->vm_flags.
116 *
117 * Returns true if the VM_* flags are valid.
118 */
119static inline bool arch_validate_flags(unsigned long flags)
120{
121 return true;
122}
123#define arch_validate_flags arch_validate_flags
124#endif
125
126/*
127 * Optimisation macro. It is equivalent to:
128 * (x & bit1) ? bit2 : 0
129 * but this version is faster.
130 * ("bit1" and "bit2" must be single bits)
131 */
132#define _calc_vm_trans(x, bit1, bit2) \
133 ((!(bit1) || !(bit2)) ? 0 : \
134 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
135 : ((x) & (bit1)) / ((bit1) / (bit2))))
136
137/*
138 * Combine the mmap "prot" argument into "vm_flags" used internally.
139 */
140static inline unsigned long
141calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
142{
143 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
144 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
145 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
146 arch_calc_vm_prot_bits(prot, pkey);
147}
148
149/*
150 * Combine the mmap "flags" argument into "vm_flags" used internally.
151 */
152static inline unsigned long
153calc_vm_flag_bits(unsigned long flags)
154{
155 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
156 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
157 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
158 _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) |
159 arch_calc_vm_flag_bits(flags);
160}
161
162unsigned long vm_commit_limit(void);
163#endif /* _LINUX_MMAN_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_MMAN_H
3#define _LINUX_MMAN_H
4
5#include <linux/mm.h>
6#include <linux/percpu_counter.h>
7
8#include <linux/atomic.h>
9#include <uapi/linux/mman.h>
10
11/*
12 * Arrange for legacy / undefined architecture specific flags to be
13 * ignored by mmap handling code.
14 */
15#ifndef MAP_32BIT
16#define MAP_32BIT 0
17#endif
18#ifndef MAP_HUGE_2MB
19#define MAP_HUGE_2MB 0
20#endif
21#ifndef MAP_HUGE_1GB
22#define MAP_HUGE_1GB 0
23#endif
24#ifndef MAP_UNINITIALIZED
25#define MAP_UNINITIALIZED 0
26#endif
27#ifndef MAP_SYNC
28#define MAP_SYNC 0
29#endif
30
31/*
32 * The historical set of flags that all mmap implementations implicitly
33 * support when a ->mmap_validate() op is not provided in file_operations.
34 */
35#define LEGACY_MAP_MASK (MAP_SHARED \
36 | MAP_PRIVATE \
37 | MAP_FIXED \
38 | MAP_ANONYMOUS \
39 | MAP_DENYWRITE \
40 | MAP_EXECUTABLE \
41 | MAP_UNINITIALIZED \
42 | MAP_GROWSDOWN \
43 | MAP_LOCKED \
44 | MAP_NORESERVE \
45 | MAP_POPULATE \
46 | MAP_NONBLOCK \
47 | MAP_STACK \
48 | MAP_HUGETLB \
49 | MAP_32BIT \
50 | MAP_HUGE_2MB \
51 | MAP_HUGE_1GB)
52
53extern int sysctl_overcommit_memory;
54extern int sysctl_overcommit_ratio;
55extern unsigned long sysctl_overcommit_kbytes;
56extern struct percpu_counter vm_committed_as;
57
58#ifdef CONFIG_SMP
59extern s32 vm_committed_as_batch;
60#else
61#define vm_committed_as_batch 0
62#endif
63
64unsigned long vm_memory_committed(void);
65
66static inline void vm_acct_memory(long pages)
67{
68 percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
69}
70
71static inline void vm_unacct_memory(long pages)
72{
73 vm_acct_memory(-pages);
74}
75
76/*
77 * Allow architectures to handle additional protection bits
78 */
79
80#ifndef arch_calc_vm_prot_bits
81#define arch_calc_vm_prot_bits(prot, pkey) 0
82#endif
83
84#ifndef arch_vm_get_page_prot
85#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
86#endif
87
88#ifndef arch_validate_prot
89/*
90 * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
91 * already been masked out.
92 *
93 * Returns true if the prot flags are valid
94 */
95static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
96{
97 return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
98}
99#define arch_validate_prot arch_validate_prot
100#endif
101
102/*
103 * Optimisation macro. It is equivalent to:
104 * (x & bit1) ? bit2 : 0
105 * but this version is faster.
106 * ("bit1" and "bit2" must be single bits)
107 */
108#define _calc_vm_trans(x, bit1, bit2) \
109 ((!(bit1) || !(bit2)) ? 0 : \
110 ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
111 : ((x) & (bit1)) / ((bit1) / (bit2))))
112
113/*
114 * Combine the mmap "prot" argument into "vm_flags" used internally.
115 */
116static inline unsigned long
117calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
118{
119 return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
120 _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
121 _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
122 arch_calc_vm_prot_bits(prot, pkey);
123}
124
125/*
126 * Combine the mmap "flags" argument into "vm_flags" used internally.
127 */
128static inline unsigned long
129calc_vm_flag_bits(unsigned long flags)
130{
131 return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
132 _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
133 _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) |
134 _calc_vm_trans(flags, MAP_SYNC, VM_SYNC );
135}
136
137unsigned long vm_commit_limit(void);
138#endif /* _LINUX_MMAN_H */