Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 *  flexible mmap layout support
  4 *
  5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  6 * All Rights Reserved.
  7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 * Started by Ingo Molnar <mingo@elte.hu>
  9 */
 10
 11#include <linux/elf-randomize.h>
 12#include <linux/personality.h>
 13#include <linux/mm.h>
 14#include <linux/mman.h>
 15#include <linux/sched/signal.h>
 16#include <linux/sched/mm.h>
 17#include <linux/random.h>
 18#include <linux/compat.h>
 19#include <linux/security.h>
 20#include <asm/elf.h>
 21
 22static unsigned long stack_maxrandom_size(void)
 23{
 24	if (!(current->flags & PF_RANDOMIZE))
 25		return 0;
 
 
 26	return STACK_RND_MASK << PAGE_SHIFT;
 27}
 28
 29static inline int mmap_is_legacy(struct rlimit *rlim_stack)
 
 
 
 
 
 
 
 
 30{
 31	if (current->personality & ADDR_COMPAT_LAYOUT)
 32		return 1;
 33	if (rlim_stack->rlim_cur == RLIM_INFINITY)
 34		return 1;
 35	return sysctl_legacy_va_layout;
 36}
 37
 38unsigned long arch_mmap_rnd(void)
 39{
 40	return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT;
 41}
 42
 43static unsigned long mmap_base_legacy(unsigned long rnd)
 44{
 45	return TASK_UNMAPPED_BASE + rnd;
 46}
 47
 48static inline unsigned long mmap_base(unsigned long rnd,
 49				      struct rlimit *rlim_stack)
 50{
 51	unsigned long gap = rlim_stack->rlim_cur;
 52	unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
 53	unsigned long gap_min, gap_max;
 54
 55	/* Values close to RLIM_INFINITY can overflow. */
 56	if (gap + pad > gap)
 57		gap += pad;
 58
 59	/*
 60	 * Top of mmap area (just below the process stack).
 61	 * Leave at least a ~128 MB hole.
 62	 */
 63	gap_min = SZ_128M;
 64	gap_max = (STACK_TOP / 6) * 5;
 65
 66	if (gap < gap_min)
 67		gap = gap_min;
 68	else if (gap > gap_max)
 69		gap = gap_max;
 70
 71	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 72}
 73
 74unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 75				     unsigned long len, unsigned long pgoff,
 76				     unsigned long flags)
 77{
 78	struct mm_struct *mm = current->mm;
 79	struct vm_area_struct *vma;
 80	struct vm_unmapped_area_info info;
 81
 82	if (len > TASK_SIZE - mmap_min_addr)
 83		return -ENOMEM;
 84
 85	if (flags & MAP_FIXED)
 86		goto check_asce_limit;
 87
 88	if (addr) {
 89		addr = PAGE_ALIGN(addr);
 90		vma = find_vma(mm, addr);
 91		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
 92		    (!vma || addr + len <= vm_start_gap(vma)))
 93			goto check_asce_limit;
 94	}
 95
 96	info.flags = 0;
 97	info.length = len;
 98	info.low_limit = mm->mmap_base;
 99	info.high_limit = TASK_SIZE;
100	if (filp || (flags & MAP_SHARED))
101		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
102	else
103		info.align_mask = 0;
104	info.align_offset = pgoff << PAGE_SHIFT;
105	addr = vm_unmapped_area(&info);
106	if (offset_in_page(addr))
107		return addr;
108
109check_asce_limit:
110	return check_asce_limit(mm, addr, len);
111}
112
113unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
114					     unsigned long len, unsigned long pgoff,
115					     unsigned long flags)
 
116{
117	struct vm_area_struct *vma;
118	struct mm_struct *mm = current->mm;
 
119	struct vm_unmapped_area_info info;
120
121	/* requested length too big for entire address space */
122	if (len > TASK_SIZE - mmap_min_addr)
123		return -ENOMEM;
124
125	if (flags & MAP_FIXED)
126		goto check_asce_limit;
127
128	/* requesting a specific address */
129	if (addr) {
130		addr = PAGE_ALIGN(addr);
131		vma = find_vma(mm, addr);
132		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
133				(!vma || addr + len <= vm_start_gap(vma)))
134			goto check_asce_limit;
135	}
136
137	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
138	info.length = len;
139	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
140	info.high_limit = mm->mmap_base;
141	if (filp || (flags & MAP_SHARED))
142		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
143	else
144		info.align_mask = 0;
145	info.align_offset = pgoff << PAGE_SHIFT;
146	addr = vm_unmapped_area(&info);
147
148	/*
149	 * A failed mmap() very likely causes application failure,
150	 * so fall back to the bottom-up function here. This scenario
151	 * can happen with large stack limits and large mmap()
152	 * allocations.
153	 */
154	if (offset_in_page(addr)) {
155		VM_BUG_ON(addr != -ENOMEM);
156		info.flags = 0;
157		info.low_limit = TASK_UNMAPPED_BASE;
158		info.high_limit = TASK_SIZE;
159		addr = vm_unmapped_area(&info);
160		if (offset_in_page(addr))
161			return addr;
162	}
163
164check_asce_limit:
165	return check_asce_limit(mm, addr, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166}
167
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168/*
169 * This function, called very early during the creation of a new
170 * process VM image, sets up which VM layout function to use:
171 */
172void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
173{
174	unsigned long random_factor = 0UL;
175
176	if (current->flags & PF_RANDOMIZE)
177		random_factor = arch_mmap_rnd();
178
179	/*
180	 * Fall back to the standard layout if the personality
181	 * bit is set, or if the expected stack growth is unlimited:
182	 */
183	if (mmap_is_legacy(rlim_stack)) {
184		mm->mmap_base = mmap_base_legacy(random_factor);
185		mm->get_unmapped_area = arch_get_unmapped_area;
186	} else {
187		mm->mmap_base = mmap_base(random_factor, rlim_stack);
188		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
189	}
190}
191
192static const pgprot_t protection_map[16] = {
193	[VM_NONE]					= PAGE_NONE,
194	[VM_READ]					= PAGE_RO,
195	[VM_WRITE]					= PAGE_RO,
196	[VM_WRITE | VM_READ]				= PAGE_RO,
197	[VM_EXEC]					= PAGE_RX,
198	[VM_EXEC | VM_READ]				= PAGE_RX,
199	[VM_EXEC | VM_WRITE]				= PAGE_RX,
200	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_RX,
201	[VM_SHARED]					= PAGE_NONE,
202	[VM_SHARED | VM_READ]				= PAGE_RO,
203	[VM_SHARED | VM_WRITE]				= PAGE_RW,
204	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_RW,
205	[VM_SHARED | VM_EXEC]				= PAGE_RX,
206	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_RX,
207	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_RWX,
208	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_RWX
209};
210DECLARE_VM_GET_PAGE_PROT
v4.10.11
 
  1/*
  2 *  flexible mmap layout support
  3 *
  4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  5 * All Rights Reserved.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2 of the License, or
 10 * (at your option) any later version.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 20 *
 21 *
 22 * Started by Ingo Molnar <mingo@elte.hu>
 23 */
 24
 25#include <linux/elf-randomize.h>
 26#include <linux/personality.h>
 27#include <linux/mm.h>
 28#include <linux/mman.h>
 29#include <linux/module.h>
 
 30#include <linux/random.h>
 31#include <linux/compat.h>
 32#include <linux/security.h>
 33#include <asm/pgalloc.h>
 34
 35static unsigned long stack_maxrandom_size(void)
 36{
 37	if (!(current->flags & PF_RANDOMIZE))
 38		return 0;
 39	if (current->personality & ADDR_NO_RANDOMIZE)
 40		return 0;
 41	return STACK_RND_MASK << PAGE_SHIFT;
 42}
 43
 44/*
 45 * Top of mmap area (just below the process stack).
 46 *
 47 * Leave at least a ~32 MB hole.
 48 */
 49#define MIN_GAP (32*1024*1024)
 50#define MAX_GAP (STACK_TOP/6*5)
 51
 52static inline int mmap_is_legacy(void)
 53{
 54	if (current->personality & ADDR_COMPAT_LAYOUT)
 55		return 1;
 56	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
 57		return 1;
 58	return sysctl_legacy_va_layout;
 59}
 60
 61unsigned long arch_mmap_rnd(void)
 62{
 63	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
 64}
 65
 66static unsigned long mmap_base_legacy(unsigned long rnd)
 67{
 68	return TASK_UNMAPPED_BASE + rnd;
 69}
 70
 71static inline unsigned long mmap_base(unsigned long rnd)
 
 72{
 73	unsigned long gap = rlimit(RLIMIT_STACK);
 
 
 
 
 
 
 74
 75	if (gap < MIN_GAP)
 76		gap = MIN_GAP;
 77	else if (gap > MAX_GAP)
 78		gap = MAX_GAP;
 79	gap &= PAGE_MASK;
 80	return STACK_TOP - stack_maxrandom_size() - rnd - gap;
 
 
 
 
 
 
 
 81}
 82
 83unsigned long
 84arch_get_unmapped_area(struct file *filp, unsigned long addr,
 85		unsigned long len, unsigned long pgoff, unsigned long flags)
 86{
 87	struct mm_struct *mm = current->mm;
 88	struct vm_area_struct *vma;
 89	struct vm_unmapped_area_info info;
 90
 91	if (len > TASK_SIZE - mmap_min_addr)
 92		return -ENOMEM;
 93
 94	if (flags & MAP_FIXED)
 95		return addr;
 96
 97	if (addr) {
 98		addr = PAGE_ALIGN(addr);
 99		vma = find_vma(mm, addr);
100		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
101		    (!vma || addr + len <= vma->vm_start))
102			return addr;
103	}
104
105	info.flags = 0;
106	info.length = len;
107	info.low_limit = mm->mmap_base;
108	info.high_limit = TASK_SIZE;
109	if (filp || (flags & MAP_SHARED))
110		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
111	else
112		info.align_mask = 0;
113	info.align_offset = pgoff << PAGE_SHIFT;
114	return vm_unmapped_area(&info);
 
 
 
 
 
115}
116
117unsigned long
118arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
119			  const unsigned long len, const unsigned long pgoff,
120			  const unsigned long flags)
121{
122	struct vm_area_struct *vma;
123	struct mm_struct *mm = current->mm;
124	unsigned long addr = addr0;
125	struct vm_unmapped_area_info info;
126
127	/* requested length too big for entire address space */
128	if (len > TASK_SIZE - mmap_min_addr)
129		return -ENOMEM;
130
131	if (flags & MAP_FIXED)
132		return addr;
133
134	/* requesting a specific address */
135	if (addr) {
136		addr = PAGE_ALIGN(addr);
137		vma = find_vma(mm, addr);
138		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
139				(!vma || addr + len <= vma->vm_start))
140			return addr;
141	}
142
143	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
144	info.length = len;
145	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
146	info.high_limit = mm->mmap_base;
147	if (filp || (flags & MAP_SHARED))
148		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
149	else
150		info.align_mask = 0;
151	info.align_offset = pgoff << PAGE_SHIFT;
152	addr = vm_unmapped_area(&info);
153
154	/*
155	 * A failed mmap() very likely causes application failure,
156	 * so fall back to the bottom-up function here. This scenario
157	 * can happen with large stack limits and large mmap()
158	 * allocations.
159	 */
160	if (addr & ~PAGE_MASK) {
161		VM_BUG_ON(addr != -ENOMEM);
162		info.flags = 0;
163		info.low_limit = TASK_UNMAPPED_BASE;
164		info.high_limit = TASK_SIZE;
165		addr = vm_unmapped_area(&info);
 
 
166	}
167
168	return addr;
169}
170
171int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
172{
173	if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE)
174		return 0;
175	if (!(flags & MAP_FIXED))
176		addr = 0;
177	if ((addr + len) >= TASK_SIZE)
178		return crst_table_upgrade(current->mm);
179	return 0;
180}
181
182static unsigned long
183s390_get_unmapped_area(struct file *filp, unsigned long addr,
184		unsigned long len, unsigned long pgoff, unsigned long flags)
185{
186	struct mm_struct *mm = current->mm;
187	unsigned long area;
188	int rc;
189
190	area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
191	if (!(area & ~PAGE_MASK))
192		return area;
193	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
194		/* Upgrade the page table to 4 levels and retry. */
195		rc = crst_table_upgrade(mm);
196		if (rc)
197			return (unsigned long) rc;
198		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
199	}
200	return area;
201}
202
203static unsigned long
204s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
205			  const unsigned long len, const unsigned long pgoff,
206			  const unsigned long flags)
207{
208	struct mm_struct *mm = current->mm;
209	unsigned long area;
210	int rc;
211
212	area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
213	if (!(area & ~PAGE_MASK))
214		return area;
215	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
216		/* Upgrade the page table to 4 levels and retry. */
217		rc = crst_table_upgrade(mm);
218		if (rc)
219			return (unsigned long) rc;
220		area = arch_get_unmapped_area_topdown(filp, addr, len,
221						      pgoff, flags);
222	}
223	return area;
224}
225/*
226 * This function, called very early during the creation of a new
227 * process VM image, sets up which VM layout function to use:
228 */
229void arch_pick_mmap_layout(struct mm_struct *mm)
230{
231	unsigned long random_factor = 0UL;
232
233	if (current->flags & PF_RANDOMIZE)
234		random_factor = arch_mmap_rnd();
235
236	/*
237	 * Fall back to the standard layout if the personality
238	 * bit is set, or if the expected stack growth is unlimited:
239	 */
240	if (mmap_is_legacy()) {
241		mm->mmap_base = mmap_base_legacy(random_factor);
242		mm->get_unmapped_area = s390_get_unmapped_area;
243	} else {
244		mm->mmap_base = mmap_base(random_factor);
245		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
246	}
247}