Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
11#include <linux/elf-randomize.h>
12#include <linux/personality.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/random.h>
18#include <linux/compat.h>
19#include <linux/security.h>
20#include <asm/elf.h>
21
22static unsigned long stack_maxrandom_size(void)
23{
24 if (!(current->flags & PF_RANDOMIZE))
25 return 0;
26 return STACK_RND_MASK << PAGE_SHIFT;
27}
28
29static inline int mmap_is_legacy(struct rlimit *rlim_stack)
30{
31 if (current->personality & ADDR_COMPAT_LAYOUT)
32 return 1;
33 if (rlim_stack->rlim_cur == RLIM_INFINITY)
34 return 1;
35 return sysctl_legacy_va_layout;
36}
37
38unsigned long arch_mmap_rnd(void)
39{
40 return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
41}
42
43static unsigned long mmap_base_legacy(unsigned long rnd)
44{
45 return TASK_UNMAPPED_BASE + rnd;
46}
47
48static inline unsigned long mmap_base(unsigned long rnd,
49 struct rlimit *rlim_stack)
50{
51 unsigned long gap = rlim_stack->rlim_cur;
52 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
53 unsigned long gap_min, gap_max;
54
55 /* Values close to RLIM_INFINITY can overflow. */
56 if (gap + pad > gap)
57 gap += pad;
58
59 /*
60 * Top of mmap area (just below the process stack).
61 * Leave at least a ~128 MB hole.
62 */
63 gap_min = SZ_128M;
64 gap_max = (STACK_TOP / 6) * 5;
65
66 if (gap < gap_min)
67 gap = gap_min;
68 else if (gap > gap_max)
69 gap = gap_max;
70
71 return PAGE_ALIGN(STACK_TOP - gap - rnd);
72}
73
74unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
75 unsigned long len, unsigned long pgoff,
76 unsigned long flags)
77{
78 struct mm_struct *mm = current->mm;
79 struct vm_area_struct *vma;
80 struct vm_unmapped_area_info info;
81
82 if (len > TASK_SIZE - mmap_min_addr)
83 return -ENOMEM;
84
85 if (flags & MAP_FIXED)
86 goto check_asce_limit;
87
88 if (addr) {
89 addr = PAGE_ALIGN(addr);
90 vma = find_vma(mm, addr);
91 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
92 (!vma || addr + len <= vm_start_gap(vma)))
93 goto check_asce_limit;
94 }
95
96 info.flags = 0;
97 info.length = len;
98 info.low_limit = mm->mmap_base;
99 info.high_limit = TASK_SIZE;
100 if (filp || (flags & MAP_SHARED))
101 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
102 else
103 info.align_mask = 0;
104 info.align_offset = pgoff << PAGE_SHIFT;
105 addr = vm_unmapped_area(&info);
106 if (offset_in_page(addr))
107 return addr;
108
109check_asce_limit:
110 return check_asce_limit(mm, addr, len);
111}
112
113unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
114 unsigned long len, unsigned long pgoff,
115 unsigned long flags)
116{
117 struct vm_area_struct *vma;
118 struct mm_struct *mm = current->mm;
119 struct vm_unmapped_area_info info;
120
121 /* requested length too big for entire address space */
122 if (len > TASK_SIZE - mmap_min_addr)
123 return -ENOMEM;
124
125 if (flags & MAP_FIXED)
126 goto check_asce_limit;
127
128 /* requesting a specific address */
129 if (addr) {
130 addr = PAGE_ALIGN(addr);
131 vma = find_vma(mm, addr);
132 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
133 (!vma || addr + len <= vm_start_gap(vma)))
134 goto check_asce_limit;
135 }
136
137 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
138 info.length = len;
139 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
140 info.high_limit = mm->mmap_base;
141 if (filp || (flags & MAP_SHARED))
142 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
143 else
144 info.align_mask = 0;
145 info.align_offset = pgoff << PAGE_SHIFT;
146 addr = vm_unmapped_area(&info);
147
148 /*
149 * A failed mmap() very likely causes application failure,
150 * so fall back to the bottom-up function here. This scenario
151 * can happen with large stack limits and large mmap()
152 * allocations.
153 */
154 if (offset_in_page(addr)) {
155 VM_BUG_ON(addr != -ENOMEM);
156 info.flags = 0;
157 info.low_limit = TASK_UNMAPPED_BASE;
158 info.high_limit = TASK_SIZE;
159 addr = vm_unmapped_area(&info);
160 if (offset_in_page(addr))
161 return addr;
162 }
163
164check_asce_limit:
165 return check_asce_limit(mm, addr, len);
166}
167
168/*
169 * This function, called very early during the creation of a new
170 * process VM image, sets up which VM layout function to use:
171 */
172void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
173{
174 unsigned long random_factor = 0UL;
175
176 if (current->flags & PF_RANDOMIZE)
177 random_factor = arch_mmap_rnd();
178
179 /*
180 * Fall back to the standard layout if the personality
181 * bit is set, or if the expected stack growth is unlimited:
182 */
183 if (mmap_is_legacy(rlim_stack)) {
184 mm->mmap_base = mmap_base_legacy(random_factor);
185 mm->get_unmapped_area = arch_get_unmapped_area;
186 } else {
187 mm->mmap_base = mmap_base(random_factor, rlim_stack);
188 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
189 }
190}
191
192static const pgprot_t protection_map[16] = {
193 [VM_NONE] = PAGE_NONE,
194 [VM_READ] = PAGE_RO,
195 [VM_WRITE] = PAGE_RO,
196 [VM_WRITE | VM_READ] = PAGE_RO,
197 [VM_EXEC] = PAGE_RX,
198 [VM_EXEC | VM_READ] = PAGE_RX,
199 [VM_EXEC | VM_WRITE] = PAGE_RX,
200 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
201 [VM_SHARED] = PAGE_NONE,
202 [VM_SHARED | VM_READ] = PAGE_RO,
203 [VM_SHARED | VM_WRITE] = PAGE_RW,
204 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
205 [VM_SHARED | VM_EXEC] = PAGE_RX,
206 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
207 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
208 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
209};
210DECLARE_VM_GET_PAGE_PROT
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
11#include <linux/elf-randomize.h>
12#include <linux/personality.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/random.h>
18#include <linux/compat.h>
19#include <linux/security.h>
20#include <asm/elf.h>
21
22static unsigned long stack_maxrandom_size(void)
23{
24 if (!(current->flags & PF_RANDOMIZE))
25 return 0;
26 return STACK_RND_MASK << PAGE_SHIFT;
27}
28
29static inline int mmap_is_legacy(struct rlimit *rlim_stack)
30{
31 if (current->personality & ADDR_COMPAT_LAYOUT)
32 return 1;
33 if (rlim_stack->rlim_cur == RLIM_INFINITY)
34 return 1;
35 return sysctl_legacy_va_layout;
36}
37
38unsigned long arch_mmap_rnd(void)
39{
40 return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
41}
42
43static unsigned long mmap_base_legacy(unsigned long rnd)
44{
45 return TASK_UNMAPPED_BASE + rnd;
46}
47
48static inline unsigned long mmap_base(unsigned long rnd,
49 struct rlimit *rlim_stack)
50{
51 unsigned long gap = rlim_stack->rlim_cur;
52 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
53 unsigned long gap_min, gap_max;
54
55 /* Values close to RLIM_INFINITY can overflow. */
56 if (gap + pad > gap)
57 gap += pad;
58
59 /*
60 * Top of mmap area (just below the process stack).
61 * Leave at least a ~128 MB hole.
62 */
63 gap_min = SZ_128M;
64 gap_max = (STACK_TOP / 6) * 5;
65
66 if (gap < gap_min)
67 gap = gap_min;
68 else if (gap > gap_max)
69 gap = gap_max;
70
71 return PAGE_ALIGN(STACK_TOP - gap - rnd);
72}
73
74static int get_align_mask(struct file *filp, unsigned long flags)
75{
76 if (!(current->flags & PF_RANDOMIZE))
77 return 0;
78 if (filp || (flags & MAP_SHARED))
79 return MMAP_ALIGN_MASK << PAGE_SHIFT;
80 return 0;
81}
82
83unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
84 unsigned long len, unsigned long pgoff,
85 unsigned long flags)
86{
87 struct mm_struct *mm = current->mm;
88 struct vm_area_struct *vma;
89 struct vm_unmapped_area_info info;
90
91 if (len > TASK_SIZE - mmap_min_addr)
92 return -ENOMEM;
93
94 if (flags & MAP_FIXED)
95 goto check_asce_limit;
96
97 if (addr) {
98 addr = PAGE_ALIGN(addr);
99 vma = find_vma(mm, addr);
100 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101 (!vma || addr + len <= vm_start_gap(vma)))
102 goto check_asce_limit;
103 }
104
105 info.flags = 0;
106 info.length = len;
107 info.low_limit = mm->mmap_base;
108 info.high_limit = TASK_SIZE;
109 info.align_mask = get_align_mask(filp, flags);
110 info.align_offset = pgoff << PAGE_SHIFT;
111 addr = vm_unmapped_area(&info);
112 if (offset_in_page(addr))
113 return addr;
114
115check_asce_limit:
116 return check_asce_limit(mm, addr, len);
117}
118
119unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
120 unsigned long len, unsigned long pgoff,
121 unsigned long flags)
122{
123 struct vm_area_struct *vma;
124 struct mm_struct *mm = current->mm;
125 struct vm_unmapped_area_info info;
126
127 /* requested length too big for entire address space */
128 if (len > TASK_SIZE - mmap_min_addr)
129 return -ENOMEM;
130
131 if (flags & MAP_FIXED)
132 goto check_asce_limit;
133
134 /* requesting a specific address */
135 if (addr) {
136 addr = PAGE_ALIGN(addr);
137 vma = find_vma(mm, addr);
138 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
139 (!vma || addr + len <= vm_start_gap(vma)))
140 goto check_asce_limit;
141 }
142
143 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
144 info.length = len;
145 info.low_limit = PAGE_SIZE;
146 info.high_limit = mm->mmap_base;
147 info.align_mask = get_align_mask(filp, flags);
148 info.align_offset = pgoff << PAGE_SHIFT;
149 addr = vm_unmapped_area(&info);
150
151 /*
152 * A failed mmap() very likely causes application failure,
153 * so fall back to the bottom-up function here. This scenario
154 * can happen with large stack limits and large mmap()
155 * allocations.
156 */
157 if (offset_in_page(addr)) {
158 VM_BUG_ON(addr != -ENOMEM);
159 info.flags = 0;
160 info.low_limit = TASK_UNMAPPED_BASE;
161 info.high_limit = TASK_SIZE;
162 addr = vm_unmapped_area(&info);
163 if (offset_in_page(addr))
164 return addr;
165 }
166
167check_asce_limit:
168 return check_asce_limit(mm, addr, len);
169}
170
171/*
172 * This function, called very early during the creation of a new
173 * process VM image, sets up which VM layout function to use:
174 */
175void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
176{
177 unsigned long random_factor = 0UL;
178
179 if (current->flags & PF_RANDOMIZE)
180 random_factor = arch_mmap_rnd();
181
182 /*
183 * Fall back to the standard layout if the personality
184 * bit is set, or if the expected stack growth is unlimited:
185 */
186 if (mmap_is_legacy(rlim_stack)) {
187 mm->mmap_base = mmap_base_legacy(random_factor);
188 mm->get_unmapped_area = arch_get_unmapped_area;
189 } else {
190 mm->mmap_base = mmap_base(random_factor, rlim_stack);
191 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
192 }
193}
194
195static const pgprot_t protection_map[16] = {
196 [VM_NONE] = PAGE_NONE,
197 [VM_READ] = PAGE_RO,
198 [VM_WRITE] = PAGE_RO,
199 [VM_WRITE | VM_READ] = PAGE_RO,
200 [VM_EXEC] = PAGE_RX,
201 [VM_EXEC | VM_READ] = PAGE_RX,
202 [VM_EXEC | VM_WRITE] = PAGE_RX,
203 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
204 [VM_SHARED] = PAGE_NONE,
205 [VM_SHARED | VM_READ] = PAGE_RO,
206 [VM_SHARED | VM_WRITE] = PAGE_RW,
207 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
208 [VM_SHARED | VM_EXEC] = PAGE_RX,
209 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
210 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
211 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
212};
213DECLARE_VM_GET_PAGE_PROT