Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
11#include <linux/elf-randomize.h>
12#include <linux/personality.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/random.h>
18#include <linux/compat.h>
19#include <linux/security.h>
20#include <linux/hugetlb.h>
21#include <asm/elf.h>
22
23static unsigned long stack_maxrandom_size(void)
24{
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 return STACK_RND_MASK << PAGE_SHIFT;
28}
29
30static inline int mmap_is_legacy(struct rlimit *rlim_stack)
31{
32 if (current->personality & ADDR_COMPAT_LAYOUT)
33 return 1;
34 if (rlim_stack->rlim_cur == RLIM_INFINITY)
35 return 1;
36 return sysctl_legacy_va_layout;
37}
38
39unsigned long arch_mmap_rnd(void)
40{
41 return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
42}
43
44static unsigned long mmap_base_legacy(unsigned long rnd)
45{
46 return TASK_UNMAPPED_BASE + rnd;
47}
48
49static inline unsigned long mmap_base(unsigned long rnd,
50 struct rlimit *rlim_stack)
51{
52 unsigned long gap = rlim_stack->rlim_cur;
53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54 unsigned long gap_min, gap_max;
55
56 /* Values close to RLIM_INFINITY can overflow. */
57 if (gap + pad > gap)
58 gap += pad;
59
60 /*
61 * Top of mmap area (just below the process stack).
62 * Leave at least a ~128 MB hole.
63 */
64 gap_min = SZ_128M;
65 gap_max = (STACK_TOP / 6) * 5;
66
67 if (gap < gap_min)
68 gap = gap_min;
69 else if (gap > gap_max)
70 gap = gap_max;
71
72 return PAGE_ALIGN(STACK_TOP - gap - rnd);
73}
74
75static int get_align_mask(struct file *filp, unsigned long flags)
76{
77 if (filp && is_file_hugepages(filp))
78 return huge_page_mask_align(filp);
79 if (!(current->flags & PF_RANDOMIZE))
80 return 0;
81 if (filp || (flags & MAP_SHARED))
82 return MMAP_ALIGN_MASK << PAGE_SHIFT;
83 return 0;
84}
85
86unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
87 unsigned long len, unsigned long pgoff,
88 unsigned long flags, vm_flags_t vm_flags)
89{
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 struct vm_unmapped_area_info info = {};
93
94 if (len > TASK_SIZE - mmap_min_addr)
95 return -ENOMEM;
96
97 if (flags & MAP_FIXED)
98 goto check_asce_limit;
99
100 if (addr) {
101 addr = PAGE_ALIGN(addr);
102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
104 (!vma || addr + len <= vm_start_gap(vma)))
105 goto check_asce_limit;
106 }
107
108 info.length = len;
109 info.low_limit = mm->mmap_base;
110 info.high_limit = TASK_SIZE;
111 info.align_mask = get_align_mask(filp, flags);
112 if (!(filp && is_file_hugepages(filp)))
113 info.align_offset = pgoff << PAGE_SHIFT;
114 addr = vm_unmapped_area(&info);
115 if (offset_in_page(addr))
116 return addr;
117
118check_asce_limit:
119 return check_asce_limit(mm, addr, len);
120}
121
122unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
123 unsigned long len, unsigned long pgoff,
124 unsigned long flags, vm_flags_t vm_flags)
125{
126 struct vm_area_struct *vma;
127 struct mm_struct *mm = current->mm;
128 struct vm_unmapped_area_info info = {};
129
130 /* requested length too big for entire address space */
131 if (len > TASK_SIZE - mmap_min_addr)
132 return -ENOMEM;
133
134 if (flags & MAP_FIXED)
135 goto check_asce_limit;
136
137 /* requesting a specific address */
138 if (addr) {
139 addr = PAGE_ALIGN(addr);
140 vma = find_vma(mm, addr);
141 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
142 (!vma || addr + len <= vm_start_gap(vma)))
143 goto check_asce_limit;
144 }
145
146 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147 info.length = len;
148 info.low_limit = PAGE_SIZE;
149 info.high_limit = mm->mmap_base;
150 info.align_mask = get_align_mask(filp, flags);
151 if (!(filp && is_file_hugepages(filp)))
152 info.align_offset = pgoff << PAGE_SHIFT;
153 addr = vm_unmapped_area(&info);
154
155 /*
156 * A failed mmap() very likely causes application failure,
157 * so fall back to the bottom-up function here. This scenario
158 * can happen with large stack limits and large mmap()
159 * allocations.
160 */
161 if (offset_in_page(addr)) {
162 VM_BUG_ON(addr != -ENOMEM);
163 info.flags = 0;
164 info.low_limit = TASK_UNMAPPED_BASE;
165 info.high_limit = TASK_SIZE;
166 addr = vm_unmapped_area(&info);
167 if (offset_in_page(addr))
168 return addr;
169 }
170
171check_asce_limit:
172 return check_asce_limit(mm, addr, len);
173}
174
175/*
176 * This function, called very early during the creation of a new
177 * process VM image, sets up which VM layout function to use:
178 */
179void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
180{
181 unsigned long random_factor = 0UL;
182
183 if (current->flags & PF_RANDOMIZE)
184 random_factor = arch_mmap_rnd();
185
186 /*
187 * Fall back to the standard layout if the personality
188 * bit is set, or if the expected stack growth is unlimited:
189 */
190 if (mmap_is_legacy(rlim_stack)) {
191 mm->mmap_base = mmap_base_legacy(random_factor);
192 clear_bit(MMF_TOPDOWN, &mm->flags);
193 } else {
194 mm->mmap_base = mmap_base(random_factor, rlim_stack);
195 set_bit(MMF_TOPDOWN, &mm->flags);
196 }
197}
198
199static const pgprot_t protection_map[16] = {
200 [VM_NONE] = PAGE_NONE,
201 [VM_READ] = PAGE_RO,
202 [VM_WRITE] = PAGE_RO,
203 [VM_WRITE | VM_READ] = PAGE_RO,
204 [VM_EXEC] = PAGE_RX,
205 [VM_EXEC | VM_READ] = PAGE_RX,
206 [VM_EXEC | VM_WRITE] = PAGE_RX,
207 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
208 [VM_SHARED] = PAGE_NONE,
209 [VM_SHARED | VM_READ] = PAGE_RO,
210 [VM_SHARED | VM_WRITE] = PAGE_RW,
211 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
212 [VM_SHARED | VM_EXEC] = PAGE_RX,
213 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
214 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
215 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
216};
217DECLARE_VM_GET_PAGE_PROT
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
11#include <linux/elf-randomize.h>
12#include <linux/personality.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/random.h>
18#include <linux/compat.h>
19#include <linux/security.h>
20#include <asm/pgalloc.h>
21#include <asm/elf.h>
22
23static unsigned long stack_maxrandom_size(void)
24{
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 if (current->personality & ADDR_NO_RANDOMIZE)
28 return 0;
29 return STACK_RND_MASK << PAGE_SHIFT;
30}
31
32/*
33 * Top of mmap area (just below the process stack).
34 *
35 * Leave at least a ~32 MB hole.
36 */
37#define MIN_GAP (32*1024*1024)
38#define MAX_GAP (STACK_TOP/6*5)
39
40static inline int mmap_is_legacy(struct rlimit *rlim_stack)
41{
42 if (current->personality & ADDR_COMPAT_LAYOUT)
43 return 1;
44 if (rlim_stack->rlim_cur == RLIM_INFINITY)
45 return 1;
46 return sysctl_legacy_va_layout;
47}
48
49unsigned long arch_mmap_rnd(void)
50{
51 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
52}
53
54static unsigned long mmap_base_legacy(unsigned long rnd)
55{
56 return TASK_UNMAPPED_BASE + rnd;
57}
58
59static inline unsigned long mmap_base(unsigned long rnd,
60 struct rlimit *rlim_stack)
61{
62 unsigned long gap = rlim_stack->rlim_cur;
63
64 if (gap < MIN_GAP)
65 gap = MIN_GAP;
66 else if (gap > MAX_GAP)
67 gap = MAX_GAP;
68 gap &= PAGE_MASK;
69 return STACK_TOP - stack_maxrandom_size() - rnd - gap;
70}
71
72unsigned long
73arch_get_unmapped_area(struct file *filp, unsigned long addr,
74 unsigned long len, unsigned long pgoff, unsigned long flags)
75{
76 struct mm_struct *mm = current->mm;
77 struct vm_area_struct *vma;
78 struct vm_unmapped_area_info info;
79 int rc;
80
81 if (len > TASK_SIZE - mmap_min_addr)
82 return -ENOMEM;
83
84 if (flags & MAP_FIXED)
85 goto check_asce_limit;
86
87 if (addr) {
88 addr = PAGE_ALIGN(addr);
89 vma = find_vma(mm, addr);
90 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
91 (!vma || addr + len <= vm_start_gap(vma)))
92 goto check_asce_limit;
93 }
94
95 info.flags = 0;
96 info.length = len;
97 info.low_limit = mm->mmap_base;
98 info.high_limit = TASK_SIZE;
99 if (filp || (flags & MAP_SHARED))
100 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
101 else
102 info.align_mask = 0;
103 info.align_offset = pgoff << PAGE_SHIFT;
104 addr = vm_unmapped_area(&info);
105 if (addr & ~PAGE_MASK)
106 return addr;
107
108check_asce_limit:
109 if (addr + len > current->mm->context.asce_limit &&
110 addr + len <= TASK_SIZE) {
111 rc = crst_table_upgrade(mm, addr + len);
112 if (rc)
113 return (unsigned long) rc;
114 }
115
116 return addr;
117}
118
119unsigned long
120arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
121 const unsigned long len, const unsigned long pgoff,
122 const unsigned long flags)
123{
124 struct vm_area_struct *vma;
125 struct mm_struct *mm = current->mm;
126 unsigned long addr = addr0;
127 struct vm_unmapped_area_info info;
128 int rc;
129
130 /* requested length too big for entire address space */
131 if (len > TASK_SIZE - mmap_min_addr)
132 return -ENOMEM;
133
134 if (flags & MAP_FIXED)
135 goto check_asce_limit;
136
137 /* requesting a specific address */
138 if (addr) {
139 addr = PAGE_ALIGN(addr);
140 vma = find_vma(mm, addr);
141 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
142 (!vma || addr + len <= vm_start_gap(vma)))
143 goto check_asce_limit;
144 }
145
146 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147 info.length = len;
148 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
149 info.high_limit = mm->mmap_base;
150 if (filp || (flags & MAP_SHARED))
151 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
152 else
153 info.align_mask = 0;
154 info.align_offset = pgoff << PAGE_SHIFT;
155 addr = vm_unmapped_area(&info);
156
157 /*
158 * A failed mmap() very likely causes application failure,
159 * so fall back to the bottom-up function here. This scenario
160 * can happen with large stack limits and large mmap()
161 * allocations.
162 */
163 if (addr & ~PAGE_MASK) {
164 VM_BUG_ON(addr != -ENOMEM);
165 info.flags = 0;
166 info.low_limit = TASK_UNMAPPED_BASE;
167 info.high_limit = TASK_SIZE;
168 addr = vm_unmapped_area(&info);
169 if (addr & ~PAGE_MASK)
170 return addr;
171 }
172
173check_asce_limit:
174 if (addr + len > current->mm->context.asce_limit &&
175 addr + len <= TASK_SIZE) {
176 rc = crst_table_upgrade(mm, addr + len);
177 if (rc)
178 return (unsigned long) rc;
179 }
180
181 return addr;
182}
183
184/*
185 * This function, called very early during the creation of a new
186 * process VM image, sets up which VM layout function to use:
187 */
188void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
189{
190 unsigned long random_factor = 0UL;
191
192 if (current->flags & PF_RANDOMIZE)
193 random_factor = arch_mmap_rnd();
194
195 /*
196 * Fall back to the standard layout if the personality
197 * bit is set, or if the expected stack growth is unlimited:
198 */
199 if (mmap_is_legacy(rlim_stack)) {
200 mm->mmap_base = mmap_base_legacy(random_factor);
201 mm->get_unmapped_area = arch_get_unmapped_area;
202 } else {
203 mm->mmap_base = mmap_base(random_factor, rlim_stack);
204 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
205 }
206}