Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compat.h>
3#include <linux/errno.h>
4#include <linux/sched.h>
5#include <linux/sched/mm.h>
6#include <linux/syscalls.h>
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/smp.h>
10#include <linux/sem.h>
11#include <linux/msg.h>
12#include <linux/shm.h>
13#include <linux/stat.h>
14#include <linux/mman.h>
15#include <linux/file.h>
16#include <linux/utsname.h>
17#include <linux/personality.h>
18#include <linux/random.h>
19#include <linux/uaccess.h>
20#include <linux/elf.h>
21
22#include <asm/elf.h>
23#include <asm/ia32.h>
24#include <asm/syscalls.h>
25#include <asm/mpx.h>
26
27/*
28 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
29 */
30static unsigned long get_align_mask(void)
31{
32 /* handle 32- and 64-bit case with a single conditional */
33 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
34 return 0;
35
36 if (!(current->flags & PF_RANDOMIZE))
37 return 0;
38
39 return va_align.mask;
40}
41
42/*
43 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
44 * va_align.bits, [12:upper_bit), are set to a random value instead of
45 * zeroing them. This random value is computed once per boot. This form
46 * of ASLR is known as "per-boot ASLR".
47 *
48 * To achieve this, the random value is added to the info.align_offset
49 * value before calling vm_unmapped_area() or ORed directly to the
50 * address.
51 */
52static unsigned long get_align_bits(void)
53{
54 return va_align.bits & get_align_mask();
55}
56
57unsigned long align_vdso_addr(unsigned long addr)
58{
59 unsigned long align_mask = get_align_mask();
60 addr = (addr + align_mask) & ~align_mask;
61 return addr | get_align_bits();
62}
63
64static int __init control_va_addr_alignment(char *str)
65{
66 /* guard against enabling this on other CPU families */
67 if (va_align.flags < 0)
68 return 1;
69
70 if (*str == 0)
71 return 1;
72
73 if (*str == '=')
74 str++;
75
76 if (!strcmp(str, "32"))
77 va_align.flags = ALIGN_VA_32;
78 else if (!strcmp(str, "64"))
79 va_align.flags = ALIGN_VA_64;
80 else if (!strcmp(str, "off"))
81 va_align.flags = 0;
82 else if (!strcmp(str, "on"))
83 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
84 else
85 return 0;
86
87 return 1;
88}
89__setup("align_va_addr", control_va_addr_alignment);
90
91SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
92 unsigned long, prot, unsigned long, flags,
93 unsigned long, fd, unsigned long, off)
94{
95 long error;
96 error = -EINVAL;
97 if (off & ~PAGE_MASK)
98 goto out;
99
100 error = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
101out:
102 return error;
103}
104
105static void find_start_end(unsigned long addr, unsigned long flags,
106 unsigned long *begin, unsigned long *end)
107{
108 if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
109 /* This is usually used needed to map code in small
110 model, so it needs to be in the first 31bit. Limit
111 it to that. This means we need to move the
112 unmapped base down for this case. This can give
113 conflicts with the heap, but we assume that glibc
114 malloc knows how to fall back to mmap. Give it 1GB
115 of playground for now. -AK */
116 *begin = 0x40000000;
117 *end = 0x80000000;
118 if (current->flags & PF_RANDOMIZE) {
119 *begin = randomize_page(*begin, 0x02000000);
120 }
121 return;
122 }
123
124 *begin = get_mmap_base(1);
125 if (in_32bit_syscall())
126 *end = task_size_32bit();
127 else
128 *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
129}
130
131unsigned long
132arch_get_unmapped_area(struct file *filp, unsigned long addr,
133 unsigned long len, unsigned long pgoff, unsigned long flags)
134{
135 struct mm_struct *mm = current->mm;
136 struct vm_area_struct *vma;
137 struct vm_unmapped_area_info info;
138 unsigned long begin, end;
139
140 addr = mpx_unmapped_area_check(addr, len, flags);
141 if (IS_ERR_VALUE(addr))
142 return addr;
143
144 if (flags & MAP_FIXED)
145 return addr;
146
147 find_start_end(addr, flags, &begin, &end);
148
149 if (len > end)
150 return -ENOMEM;
151
152 if (addr) {
153 addr = PAGE_ALIGN(addr);
154 vma = find_vma(mm, addr);
155 if (end - len >= addr &&
156 (!vma || addr + len <= vm_start_gap(vma)))
157 return addr;
158 }
159
160 info.flags = 0;
161 info.length = len;
162 info.low_limit = begin;
163 info.high_limit = end;
164 info.align_mask = 0;
165 info.align_offset = pgoff << PAGE_SHIFT;
166 if (filp) {
167 info.align_mask = get_align_mask();
168 info.align_offset += get_align_bits();
169 }
170 return vm_unmapped_area(&info);
171}
172
173unsigned long
174arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
175 const unsigned long len, const unsigned long pgoff,
176 const unsigned long flags)
177{
178 struct vm_area_struct *vma;
179 struct mm_struct *mm = current->mm;
180 unsigned long addr = addr0;
181 struct vm_unmapped_area_info info;
182
183 addr = mpx_unmapped_area_check(addr, len, flags);
184 if (IS_ERR_VALUE(addr))
185 return addr;
186
187 /* requested length too big for entire address space */
188 if (len > TASK_SIZE)
189 return -ENOMEM;
190
191 /* No address checking. See comment at mmap_address_hint_valid() */
192 if (flags & MAP_FIXED)
193 return addr;
194
195 /* for MAP_32BIT mappings we force the legacy mmap base */
196 if (!in_32bit_syscall() && (flags & MAP_32BIT))
197 goto bottomup;
198
199 /* requesting a specific address */
200 if (addr) {
201 addr &= PAGE_MASK;
202 if (!mmap_address_hint_valid(addr, len))
203 goto get_unmapped_area;
204
205 vma = find_vma(mm, addr);
206 if (!vma || addr + len <= vm_start_gap(vma))
207 return addr;
208 }
209get_unmapped_area:
210
211 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
212 info.length = len;
213 info.low_limit = PAGE_SIZE;
214 info.high_limit = get_mmap_base(0);
215
216 /*
217 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
218 * in the full address space.
219 *
220 * !in_32bit_syscall() check to avoid high addresses for x32
221 * (and make it no op on native i386).
222 */
223 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
224 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
225
226 info.align_mask = 0;
227 info.align_offset = pgoff << PAGE_SHIFT;
228 if (filp) {
229 info.align_mask = get_align_mask();
230 info.align_offset += get_align_bits();
231 }
232 addr = vm_unmapped_area(&info);
233 if (!(addr & ~PAGE_MASK))
234 return addr;
235 VM_BUG_ON(addr != -ENOMEM);
236
237bottomup:
238 /*
239 * A failed mmap() very likely causes application failure,
240 * so fall back to the bottom-up function here. This scenario
241 * can happen with large stack limits and large mmap()
242 * allocations.
243 */
244 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
245}
1#include <linux/errno.h>
2#include <linux/sched.h>
3#include <linux/syscalls.h>
4#include <linux/mm.h>
5#include <linux/fs.h>
6#include <linux/smp.h>
7#include <linux/sem.h>
8#include <linux/msg.h>
9#include <linux/shm.h>
10#include <linux/stat.h>
11#include <linux/mman.h>
12#include <linux/file.h>
13#include <linux/utsname.h>
14#include <linux/personality.h>
15#include <linux/random.h>
16#include <linux/uaccess.h>
17#include <linux/elf.h>
18
19#include <asm/ia32.h>
20#include <asm/syscalls.h>
21
22/*
23 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
24 */
25static unsigned long get_align_mask(void)
26{
27 /* handle 32- and 64-bit case with a single conditional */
28 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
29 return 0;
30
31 if (!(current->flags & PF_RANDOMIZE))
32 return 0;
33
34 return va_align.mask;
35}
36
37/*
38 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
39 * va_align.bits, [12:upper_bit), are set to a random value instead of
40 * zeroing them. This random value is computed once per boot. This form
41 * of ASLR is known as "per-boot ASLR".
42 *
43 * To achieve this, the random value is added to the info.align_offset
44 * value before calling vm_unmapped_area() or ORed directly to the
45 * address.
46 */
47static unsigned long get_align_bits(void)
48{
49 return va_align.bits & get_align_mask();
50}
51
52unsigned long align_vdso_addr(unsigned long addr)
53{
54 unsigned long align_mask = get_align_mask();
55 addr = (addr + align_mask) & ~align_mask;
56 return addr | get_align_bits();
57}
58
59static int __init control_va_addr_alignment(char *str)
60{
61 /* guard against enabling this on other CPU families */
62 if (va_align.flags < 0)
63 return 1;
64
65 if (*str == 0)
66 return 1;
67
68 if (*str == '=')
69 str++;
70
71 if (!strcmp(str, "32"))
72 va_align.flags = ALIGN_VA_32;
73 else if (!strcmp(str, "64"))
74 va_align.flags = ALIGN_VA_64;
75 else if (!strcmp(str, "off"))
76 va_align.flags = 0;
77 else if (!strcmp(str, "on"))
78 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
79 else
80 return 0;
81
82 return 1;
83}
84__setup("align_va_addr", control_va_addr_alignment);
85
86SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
87 unsigned long, prot, unsigned long, flags,
88 unsigned long, fd, unsigned long, off)
89{
90 long error;
91 error = -EINVAL;
92 if (off & ~PAGE_MASK)
93 goto out;
94
95 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
96out:
97 return error;
98}
99
100static void find_start_end(unsigned long flags, unsigned long *begin,
101 unsigned long *end)
102{
103 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
104 unsigned long new_begin;
105 /* This is usually used needed to map code in small
106 model, so it needs to be in the first 31bit. Limit
107 it to that. This means we need to move the
108 unmapped base down for this case. This can give
109 conflicts with the heap, but we assume that glibc
110 malloc knows how to fall back to mmap. Give it 1GB
111 of playground for now. -AK */
112 *begin = 0x40000000;
113 *end = 0x80000000;
114 if (current->flags & PF_RANDOMIZE) {
115 new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
116 if (new_begin)
117 *begin = new_begin;
118 }
119 } else {
120 *begin = current->mm->mmap_legacy_base;
121 *end = TASK_SIZE;
122 }
123}
124
125unsigned long
126arch_get_unmapped_area(struct file *filp, unsigned long addr,
127 unsigned long len, unsigned long pgoff, unsigned long flags)
128{
129 struct mm_struct *mm = current->mm;
130 struct vm_area_struct *vma;
131 struct vm_unmapped_area_info info;
132 unsigned long begin, end;
133
134 if (flags & MAP_FIXED)
135 return addr;
136
137 find_start_end(flags, &begin, &end);
138
139 if (len > end)
140 return -ENOMEM;
141
142 if (addr) {
143 addr = PAGE_ALIGN(addr);
144 vma = find_vma(mm, addr);
145 if (end - len >= addr &&
146 (!vma || addr + len <= vma->vm_start))
147 return addr;
148 }
149
150 info.flags = 0;
151 info.length = len;
152 info.low_limit = begin;
153 info.high_limit = end;
154 info.align_mask = 0;
155 info.align_offset = pgoff << PAGE_SHIFT;
156 if (filp) {
157 info.align_mask = get_align_mask();
158 info.align_offset += get_align_bits();
159 }
160 return vm_unmapped_area(&info);
161}
162
163unsigned long
164arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
165 const unsigned long len, const unsigned long pgoff,
166 const unsigned long flags)
167{
168 struct vm_area_struct *vma;
169 struct mm_struct *mm = current->mm;
170 unsigned long addr = addr0;
171 struct vm_unmapped_area_info info;
172
173 /* requested length too big for entire address space */
174 if (len > TASK_SIZE)
175 return -ENOMEM;
176
177 if (flags & MAP_FIXED)
178 return addr;
179
180 /* for MAP_32BIT mappings we force the legacy mmap base */
181 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
182 goto bottomup;
183
184 /* requesting a specific address */
185 if (addr) {
186 addr = PAGE_ALIGN(addr);
187 vma = find_vma(mm, addr);
188 if (TASK_SIZE - len >= addr &&
189 (!vma || addr + len <= vma->vm_start))
190 return addr;
191 }
192
193 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
194 info.length = len;
195 info.low_limit = PAGE_SIZE;
196 info.high_limit = mm->mmap_base;
197 info.align_mask = 0;
198 info.align_offset = pgoff << PAGE_SHIFT;
199 if (filp) {
200 info.align_mask = get_align_mask();
201 info.align_offset += get_align_bits();
202 }
203 addr = vm_unmapped_area(&info);
204 if (!(addr & ~PAGE_MASK))
205 return addr;
206 VM_BUG_ON(addr != -ENOMEM);
207
208bottomup:
209 /*
210 * A failed mmap() very likely causes application failure,
211 * so fall back to the bottom-up function here. This scenario
212 * can happen with large stack limits and large mmap()
213 * allocations.
214 */
215 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
216}