Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
8 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
11#include <linux/elf-randomize.h>
12#include <linux/personality.h>
13#include <linux/mm.h>
14#include <linux/mman.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/mm.h>
17#include <linux/random.h>
18#include <linux/compat.h>
19#include <linux/security.h>
20#include <linux/hugetlb.h>
21#include <asm/elf.h>
22
23static unsigned long stack_maxrandom_size(void)
24{
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 return STACK_RND_MASK << PAGE_SHIFT;
28}
29
30static inline int mmap_is_legacy(struct rlimit *rlim_stack)
31{
32 if (current->personality & ADDR_COMPAT_LAYOUT)
33 return 1;
34 if (rlim_stack->rlim_cur == RLIM_INFINITY)
35 return 1;
36 return sysctl_legacy_va_layout;
37}
38
39unsigned long arch_mmap_rnd(void)
40{
41 return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
42}
43
44static unsigned long mmap_base_legacy(unsigned long rnd)
45{
46 return TASK_UNMAPPED_BASE + rnd;
47}
48
49static inline unsigned long mmap_base(unsigned long rnd,
50 struct rlimit *rlim_stack)
51{
52 unsigned long gap = rlim_stack->rlim_cur;
53 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
54 unsigned long gap_min, gap_max;
55
56 /* Values close to RLIM_INFINITY can overflow. */
57 if (gap + pad > gap)
58 gap += pad;
59
60 /*
61 * Top of mmap area (just below the process stack).
62 * Leave at least a ~128 MB hole.
63 */
64 gap_min = SZ_128M;
65 gap_max = (STACK_TOP / 6) * 5;
66
67 if (gap < gap_min)
68 gap = gap_min;
69 else if (gap > gap_max)
70 gap = gap_max;
71
72 return PAGE_ALIGN(STACK_TOP - gap - rnd);
73}
74
75static int get_align_mask(struct file *filp, unsigned long flags)
76{
77 if (filp && is_file_hugepages(filp))
78 return huge_page_mask_align(filp);
79 if (!(current->flags & PF_RANDOMIZE))
80 return 0;
81 if (filp || (flags & MAP_SHARED))
82 return MMAP_ALIGN_MASK << PAGE_SHIFT;
83 return 0;
84}
85
86unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
87 unsigned long len, unsigned long pgoff,
88 unsigned long flags, vm_flags_t vm_flags)
89{
90 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *vma;
92 struct vm_unmapped_area_info info = {};
93
94 if (len > TASK_SIZE - mmap_min_addr)
95 return -ENOMEM;
96
97 if (flags & MAP_FIXED)
98 goto check_asce_limit;
99
100 if (addr) {
101 addr = PAGE_ALIGN(addr);
102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
104 (!vma || addr + len <= vm_start_gap(vma)))
105 goto check_asce_limit;
106 }
107
108 info.length = len;
109 info.low_limit = mm->mmap_base;
110 info.high_limit = TASK_SIZE;
111 info.align_mask = get_align_mask(filp, flags);
112 if (!(filp && is_file_hugepages(filp)))
113 info.align_offset = pgoff << PAGE_SHIFT;
114 addr = vm_unmapped_area(&info);
115 if (offset_in_page(addr))
116 return addr;
117
118check_asce_limit:
119 return check_asce_limit(mm, addr, len);
120}
121
122unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
123 unsigned long len, unsigned long pgoff,
124 unsigned long flags, vm_flags_t vm_flags)
125{
126 struct vm_area_struct *vma;
127 struct mm_struct *mm = current->mm;
128 struct vm_unmapped_area_info info = {};
129
130 /* requested length too big for entire address space */
131 if (len > TASK_SIZE - mmap_min_addr)
132 return -ENOMEM;
133
134 if (flags & MAP_FIXED)
135 goto check_asce_limit;
136
137 /* requesting a specific address */
138 if (addr) {
139 addr = PAGE_ALIGN(addr);
140 vma = find_vma(mm, addr);
141 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
142 (!vma || addr + len <= vm_start_gap(vma)))
143 goto check_asce_limit;
144 }
145
146 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
147 info.length = len;
148 info.low_limit = PAGE_SIZE;
149 info.high_limit = mm->mmap_base;
150 info.align_mask = get_align_mask(filp, flags);
151 if (!(filp && is_file_hugepages(filp)))
152 info.align_offset = pgoff << PAGE_SHIFT;
153 addr = vm_unmapped_area(&info);
154
155 /*
156 * A failed mmap() very likely causes application failure,
157 * so fall back to the bottom-up function here. This scenario
158 * can happen with large stack limits and large mmap()
159 * allocations.
160 */
161 if (offset_in_page(addr)) {
162 VM_BUG_ON(addr != -ENOMEM);
163 info.flags = 0;
164 info.low_limit = TASK_UNMAPPED_BASE;
165 info.high_limit = TASK_SIZE;
166 addr = vm_unmapped_area(&info);
167 if (offset_in_page(addr))
168 return addr;
169 }
170
171check_asce_limit:
172 return check_asce_limit(mm, addr, len);
173}
174
175/*
176 * This function, called very early during the creation of a new
177 * process VM image, sets up which VM layout function to use:
178 */
179void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
180{
181 unsigned long random_factor = 0UL;
182
183 if (current->flags & PF_RANDOMIZE)
184 random_factor = arch_mmap_rnd();
185
186 /*
187 * Fall back to the standard layout if the personality
188 * bit is set, or if the expected stack growth is unlimited:
189 */
190 if (mmap_is_legacy(rlim_stack)) {
191 mm->mmap_base = mmap_base_legacy(random_factor);
192 clear_bit(MMF_TOPDOWN, &mm->flags);
193 } else {
194 mm->mmap_base = mmap_base(random_factor, rlim_stack);
195 set_bit(MMF_TOPDOWN, &mm->flags);
196 }
197}
198
199static const pgprot_t protection_map[16] = {
200 [VM_NONE] = PAGE_NONE,
201 [VM_READ] = PAGE_RO,
202 [VM_WRITE] = PAGE_RO,
203 [VM_WRITE | VM_READ] = PAGE_RO,
204 [VM_EXEC] = PAGE_RX,
205 [VM_EXEC | VM_READ] = PAGE_RX,
206 [VM_EXEC | VM_WRITE] = PAGE_RX,
207 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX,
208 [VM_SHARED] = PAGE_NONE,
209 [VM_SHARED | VM_READ] = PAGE_RO,
210 [VM_SHARED | VM_WRITE] = PAGE_RW,
211 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW,
212 [VM_SHARED | VM_EXEC] = PAGE_RX,
213 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX,
214 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
215 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
216};
217DECLARE_VM_GET_PAGE_PROT
1/*
2 * linux/arch/s390/mm/mmap.c
3 *
4 * flexible mmap layout support
5 *
6 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 *
24 * Started by Ingo Molnar <mingo@elte.hu>
25 */
26
27#include <linux/personality.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/random.h>
31#include <asm/pgalloc.h>
32#include <asm/compat.h>
33
34static unsigned long stack_maxrandom_size(void)
35{
36 if (!(current->flags & PF_RANDOMIZE))
37 return 0;
38 if (current->personality & ADDR_NO_RANDOMIZE)
39 return 0;
40 return STACK_RND_MASK << PAGE_SHIFT;
41}
42
43/*
44 * Top of mmap area (just below the process stack).
45 *
46 * Leave at least a ~32 MB hole.
47 */
48#define MIN_GAP (32*1024*1024)
49#define MAX_GAP (STACK_TOP/6*5)
50
51static inline int mmap_is_legacy(void)
52{
53 if (current->personality & ADDR_COMPAT_LAYOUT)
54 return 1;
55 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
56 return 1;
57 return sysctl_legacy_va_layout;
58}
59
60static unsigned long mmap_rnd(void)
61{
62 if (!(current->flags & PF_RANDOMIZE))
63 return 0;
64 /* 8MB randomization for mmap_base */
65 return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
66}
67
68static inline unsigned long mmap_base(void)
69{
70 unsigned long gap = rlimit(RLIMIT_STACK);
71
72 if (gap < MIN_GAP)
73 gap = MIN_GAP;
74 else if (gap > MAX_GAP)
75 gap = MAX_GAP;
76 gap &= PAGE_MASK;
77 return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
78}
79
80#ifndef CONFIG_64BIT
81
82/*
83 * This function, called very early during the creation of a new
84 * process VM image, sets up which VM layout function to use:
85 */
86void arch_pick_mmap_layout(struct mm_struct *mm)
87{
88 /*
89 * Fall back to the standard layout if the personality
90 * bit is set, or if the expected stack growth is unlimited:
91 */
92 if (mmap_is_legacy()) {
93 mm->mmap_base = TASK_UNMAPPED_BASE;
94 mm->get_unmapped_area = arch_get_unmapped_area;
95 mm->unmap_area = arch_unmap_area;
96 } else {
97 mm->mmap_base = mmap_base();
98 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
99 mm->unmap_area = arch_unmap_area_topdown;
100 }
101}
102EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
103
104#else
105
106int s390_mmap_check(unsigned long addr, unsigned long len)
107{
108 if (!is_compat_task() &&
109 len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
110 return crst_table_upgrade(current->mm, 1UL << 53);
111 return 0;
112}
113
114static unsigned long
115s390_get_unmapped_area(struct file *filp, unsigned long addr,
116 unsigned long len, unsigned long pgoff, unsigned long flags)
117{
118 struct mm_struct *mm = current->mm;
119 unsigned long area;
120 int rc;
121
122 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
123 if (!(area & ~PAGE_MASK))
124 return area;
125 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
126 /* Upgrade the page table to 4 levels and retry. */
127 rc = crst_table_upgrade(mm, 1UL << 53);
128 if (rc)
129 return (unsigned long) rc;
130 area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
131 }
132 return area;
133}
134
135static unsigned long
136s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
137 const unsigned long len, const unsigned long pgoff,
138 const unsigned long flags)
139{
140 struct mm_struct *mm = current->mm;
141 unsigned long area;
142 int rc;
143
144 area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
145 if (!(area & ~PAGE_MASK))
146 return area;
147 if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
148 /* Upgrade the page table to 4 levels and retry. */
149 rc = crst_table_upgrade(mm, 1UL << 53);
150 if (rc)
151 return (unsigned long) rc;
152 area = arch_get_unmapped_area_topdown(filp, addr, len,
153 pgoff, flags);
154 }
155 return area;
156}
157/*
158 * This function, called very early during the creation of a new
159 * process VM image, sets up which VM layout function to use:
160 */
161void arch_pick_mmap_layout(struct mm_struct *mm)
162{
163 /*
164 * Fall back to the standard layout if the personality
165 * bit is set, or if the expected stack growth is unlimited:
166 */
167 if (mmap_is_legacy()) {
168 mm->mmap_base = TASK_UNMAPPED_BASE;
169 mm->get_unmapped_area = s390_get_unmapped_area;
170 mm->unmap_area = arch_unmap_area;
171 } else {
172 mm->mmap_base = mmap_base();
173 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
174 mm->unmap_area = arch_unmap_area_topdown;
175 }
176}
177EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
178
179#endif