Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/compat.h>
  3#include <linux/errno.h>
  4#include <linux/sched.h>
  5#include <linux/sched/mm.h>
  6#include <linux/syscalls.h>
  7#include <linux/mm.h>
  8#include <linux/fs.h>
  9#include <linux/smp.h>
 10#include <linux/sem.h>
 11#include <linux/msg.h>
 12#include <linux/shm.h>
 13#include <linux/stat.h>
 14#include <linux/mman.h>
 15#include <linux/file.h>
 16#include <linux/utsname.h>
 17#include <linux/personality.h>
 18#include <linux/random.h>
 19#include <linux/uaccess.h>
 20#include <linux/elf.h>
 21
 22#include <asm/elf.h>
 23#include <asm/ia32.h>
 24
 25/*
 26 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
 27 */
 28static unsigned long get_align_mask(void)
 29{
 30	/* handle 32- and 64-bit case with a single conditional */
 31	if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
 32		return 0;
 33
 34	if (!(current->flags & PF_RANDOMIZE))
 35		return 0;
 36
 37	return va_align.mask;
 38}
 39
 40/*
 41 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
 42 * va_align.bits, [12:upper_bit), are set to a random value instead of
 43 * zeroing them. This random value is computed once per boot. This form
 44 * of ASLR is known as "per-boot ASLR".
 45 *
 46 * To achieve this, the random value is added to the info.align_offset
 47 * value before calling vm_unmapped_area() or ORed directly to the
 48 * address.
 49 */
 50static unsigned long get_align_bits(void)
 51{
 52	return va_align.bits & get_align_mask();
 53}
 54
 55unsigned long align_vdso_addr(unsigned long addr)
 56{
 57	unsigned long align_mask = get_align_mask();
 58	addr = (addr + align_mask) & ~align_mask;
 59	return addr | get_align_bits();
 60}
 61
 62static int __init control_va_addr_alignment(char *str)
 63{
 64	/* guard against enabling this on other CPU families */
 65	if (va_align.flags < 0)
 66		return 1;
 67
 68	if (*str == 0)
 69		return 1;
 70
 71	if (!strcmp(str, "32"))
 72		va_align.flags = ALIGN_VA_32;
 73	else if (!strcmp(str, "64"))
 74		va_align.flags = ALIGN_VA_64;
 75	else if (!strcmp(str, "off"))
 76		va_align.flags = 0;
 77	else if (!strcmp(str, "on"))
 78		va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
 79	else
 80		pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
 81
 82	return 1;
 83}
 84__setup("align_va_addr=", control_va_addr_alignment);
 85
 86SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 87		unsigned long, prot, unsigned long, flags,
 88		unsigned long, fd, unsigned long, off)
 89{
 
 
 90	if (off & ~PAGE_MASK)
 91		return -EINVAL;
 92
 93	return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 
 
 94}
 95
 96static void find_start_end(unsigned long addr, unsigned long flags,
 97		unsigned long *begin, unsigned long *end)
 98{
 99	if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
 
100		/* This is usually used needed to map code in small
101		   model, so it needs to be in the first 31bit. Limit
102		   it to that.  This means we need to move the
103		   unmapped base down for this case. This can give
104		   conflicts with the heap, but we assume that glibc
105		   malloc knows how to fall back to mmap. Give it 1GB
106		   of playground for now. -AK */
107		*begin = 0x40000000;
108		*end = 0x80000000;
109		if (current->flags & PF_RANDOMIZE) {
110			*begin = randomize_page(*begin, 0x02000000);
 
 
111		}
112		return;
 
 
113	}
114
115	*begin	= get_mmap_base(1);
116	if (in_32bit_syscall())
117		*end = task_size_32bit();
118	else
119		*end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
120}
121
122unsigned long
123arch_get_unmapped_area(struct file *filp, unsigned long addr,
124		unsigned long len, unsigned long pgoff, unsigned long flags)
125{
126	struct mm_struct *mm = current->mm;
127	struct vm_area_struct *vma;
128	struct vm_unmapped_area_info info;
129	unsigned long begin, end;
130
131	if (flags & MAP_FIXED)
132		return addr;
133
134	find_start_end(addr, flags, &begin, &end);
135
136	if (len > end)
137		return -ENOMEM;
138
139	if (addr) {
140		addr = PAGE_ALIGN(addr);
141		vma = find_vma(mm, addr);
142		if (end - len >= addr &&
143		    (!vma || addr + len <= vm_start_gap(vma)))
144			return addr;
145	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
147	info.flags = 0;
148	info.length = len;
149	info.low_limit = begin;
150	info.high_limit = end;
151	info.align_mask = 0;
152	info.align_offset = pgoff << PAGE_SHIFT;
153	if (filp) {
154		info.align_mask = get_align_mask();
155		info.align_offset += get_align_bits();
156	}
157	return vm_unmapped_area(&info);
158}
159
 
160unsigned long
161arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
162			  const unsigned long len, const unsigned long pgoff,
163			  const unsigned long flags)
164{
165	struct vm_area_struct *vma;
166	struct mm_struct *mm = current->mm;
167	unsigned long addr = addr0;
168	struct vm_unmapped_area_info info;
169
170	/* requested length too big for entire address space */
171	if (len > TASK_SIZE)
172		return -ENOMEM;
173
174	/* No address checking. See comment at mmap_address_hint_valid() */
175	if (flags & MAP_FIXED)
176		return addr;
177
178	/* for MAP_32BIT mappings we force the legacy mmap base */
179	if (!in_32bit_syscall() && (flags & MAP_32BIT))
180		goto bottomup;
181
182	/* requesting a specific address */
183	if (addr) {
184		addr &= PAGE_MASK;
185		if (!mmap_address_hint_valid(addr, len))
186			goto get_unmapped_area;
187
188		vma = find_vma(mm, addr);
189		if (!vma || addr + len <= vm_start_gap(vma))
 
190			return addr;
191	}
192get_unmapped_area:
193
194	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
195	info.length = len;
196	info.low_limit = PAGE_SIZE;
197	info.high_limit = get_mmap_base(0);
 
198
199	/*
200	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
201	 * in the full address space.
202	 *
203	 * !in_32bit_syscall() check to avoid high addresses for x32
204	 * (and make it no op on native i386).
205	 */
206	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
207		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
208
209	info.align_mask = 0;
210	info.align_offset = pgoff << PAGE_SHIFT;
211	if (filp) {
212		info.align_mask = get_align_mask();
213		info.align_offset += get_align_bits();
 
214	}
215	addr = vm_unmapped_area(&info);
216	if (!(addr & ~PAGE_MASK))
217		return addr;
218	VM_BUG_ON(addr != -ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
220bottomup:
221	/*
222	 * A failed mmap() very likely causes application failure,
223	 * so fall back to the bottom-up function here. This scenario
224	 * can happen with large stack limits and large mmap()
225	 * allocations.
226	 */
227	return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
 
 
 
 
 
 
 
 
 
228}
v3.1
 
 
  1#include <linux/errno.h>
  2#include <linux/sched.h>
 
  3#include <linux/syscalls.h>
  4#include <linux/mm.h>
  5#include <linux/fs.h>
  6#include <linux/smp.h>
  7#include <linux/sem.h>
  8#include <linux/msg.h>
  9#include <linux/shm.h>
 10#include <linux/stat.h>
 11#include <linux/mman.h>
 12#include <linux/file.h>
 13#include <linux/utsname.h>
 14#include <linux/personality.h>
 15#include <linux/random.h>
 16#include <linux/uaccess.h>
 
 17
 
 18#include <asm/ia32.h>
 19#include <asm/syscalls.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
 22		unsigned long, prot, unsigned long, flags,
 23		unsigned long, fd, unsigned long, off)
 24{
 25	long error;
 26	error = -EINVAL;
 27	if (off & ~PAGE_MASK)
 28		goto out;
 29
 30	error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
 31out:
 32	return error;
 33}
 34
 35static void find_start_end(unsigned long flags, unsigned long *begin,
 36			   unsigned long *end)
 37{
 38	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
 39		unsigned long new_begin;
 40		/* This is usually used needed to map code in small
 41		   model, so it needs to be in the first 31bit. Limit
 42		   it to that.  This means we need to move the
 43		   unmapped base down for this case. This can give
 44		   conflicts with the heap, but we assume that glibc
 45		   malloc knows how to fall back to mmap. Give it 1GB
 46		   of playground for now. -AK */
 47		*begin = 0x40000000;
 48		*end = 0x80000000;
 49		if (current->flags & PF_RANDOMIZE) {
 50			new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
 51			if (new_begin)
 52				*begin = new_begin;
 53		}
 54	} else {
 55		*begin = TASK_UNMAPPED_BASE;
 56		*end = TASK_SIZE;
 57	}
 
 
 
 
 
 
 58}
 59
 60unsigned long
 61arch_get_unmapped_area(struct file *filp, unsigned long addr,
 62		unsigned long len, unsigned long pgoff, unsigned long flags)
 63{
 64	struct mm_struct *mm = current->mm;
 65	struct vm_area_struct *vma;
 66	unsigned long start_addr;
 67	unsigned long begin, end;
 68
 69	if (flags & MAP_FIXED)
 70		return addr;
 71
 72	find_start_end(flags, &begin, &end);
 73
 74	if (len > end)
 75		return -ENOMEM;
 76
 77	if (addr) {
 78		addr = PAGE_ALIGN(addr);
 79		vma = find_vma(mm, addr);
 80		if (end - len >= addr &&
 81		    (!vma || addr + len <= vma->vm_start))
 82			return addr;
 83	}
 84	if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
 85	    && len <= mm->cached_hole_size) {
 86		mm->cached_hole_size = 0;
 87		mm->free_area_cache = begin;
 88	}
 89	addr = mm->free_area_cache;
 90	if (addr < begin)
 91		addr = begin;
 92	start_addr = addr;
 93
 94full_search:
 95	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 96		/* At this point:  (!vma || addr < vma->vm_end). */
 97		if (end - len < addr) {
 98			/*
 99			 * Start a new search - just in case we missed
100			 * some holes.
101			 */
102			if (start_addr != begin) {
103				start_addr = addr = begin;
104				mm->cached_hole_size = 0;
105				goto full_search;
106			}
107			return -ENOMEM;
108		}
109		if (!vma || addr + len <= vma->vm_start) {
110			/*
111			 * Remember the place where we stopped the search:
112			 */
113			mm->free_area_cache = addr + len;
114			return addr;
115		}
116		if (addr + mm->cached_hole_size < vma->vm_start)
117			mm->cached_hole_size = vma->vm_start - addr;
118
119		addr = vma->vm_end;
 
 
 
 
 
 
 
 
120	}
 
121}
122
123
124unsigned long
125arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126			  const unsigned long len, const unsigned long pgoff,
127			  const unsigned long flags)
128{
129	struct vm_area_struct *vma;
130	struct mm_struct *mm = current->mm;
131	unsigned long addr = addr0;
 
132
133	/* requested length too big for entire address space */
134	if (len > TASK_SIZE)
135		return -ENOMEM;
136
 
137	if (flags & MAP_FIXED)
138		return addr;
139
140	/* for MAP_32BIT mappings we force the legact mmap base */
141	if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
142		goto bottomup;
143
144	/* requesting a specific address */
145	if (addr) {
146		addr = PAGE_ALIGN(addr);
 
 
 
147		vma = find_vma(mm, addr);
148		if (TASK_SIZE - len >= addr &&
149				(!vma || addr + len <= vma->vm_start))
150			return addr;
151	}
 
152
153	/* check if free_area_cache is useful for us */
154	if (len <= mm->cached_hole_size) {
155		mm->cached_hole_size = 0;
156		mm->free_area_cache = mm->mmap_base;
157	}
158
159	/* either no address requested or can't fit in requested address hole */
160	addr = mm->free_area_cache;
 
 
 
 
 
 
 
161
162	/* make sure it can fit in the remaining address space */
163	if (addr > len) {
164		vma = find_vma(mm, addr-len);
165		if (!vma || addr <= vma->vm_start)
166			/* remember the address as a hint for next time */
167			return mm->free_area_cache = addr-len;
168	}
169
170	if (mm->mmap_base < len)
171		goto bottomup;
172
173	addr = mm->mmap_base-len;
174
175	do {
176		/*
177		 * Lookup failure means no vma is above this address,
178		 * else if new region fits below vma->vm_start,
179		 * return with success:
180		 */
181		vma = find_vma(mm, addr);
182		if (!vma || addr+len <= vma->vm_start)
183			/* remember the address as a hint for next time */
184			return mm->free_area_cache = addr;
185
186		/* remember the largest hole we saw so far */
187		if (addr + mm->cached_hole_size < vma->vm_start)
188			mm->cached_hole_size = vma->vm_start - addr;
189
190		/* try just below the current vma->vm_start */
191		addr = vma->vm_start-len;
192	} while (len < vma->vm_start);
193
194bottomup:
195	/*
196	 * A failed mmap() very likely causes application failure,
197	 * so fall back to the bottom-up function here. This scenario
198	 * can happen with large stack limits and large mmap()
199	 * allocations.
200	 */
201	mm->cached_hole_size = ~0UL;
202	mm->free_area_cache = TASK_UNMAPPED_BASE;
203	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
204	/*
205	 * Restore the topdown base:
206	 */
207	mm->free_area_cache = mm->mmap_base;
208	mm->cached_hole_size = ~0UL;
209
210	return addr;
211}