Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/arm/mm/mmap.c
  4 */
  5#include <linux/fs.h>
  6#include <linux/mm.h>
  7#include <linux/mman.h>
  8#include <linux/shm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/sched/mm.h>
 11#include <linux/io.h>
 12#include <linux/personality.h>
 13#include <linux/random.h>
 14#include <asm/cachetype.h>
 15
 
 
 
 
 
 
 
 
 
 
 
 
 16#define COLOUR_ALIGN(addr,pgoff)		\
 17	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 18	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 19
 20/* gap between mmap and stack */
 21#define MIN_GAP (128*1024*1024UL)
 22#define MAX_GAP ((TASK_SIZE)/6*5)
 23
 24static int mmap_is_legacy(struct rlimit *rlim_stack)
 25{
 26	if (current->personality & ADDR_COMPAT_LAYOUT)
 27		return 1;
 28
 29	if (rlim_stack->rlim_cur == RLIM_INFINITY)
 30		return 1;
 31
 32	return sysctl_legacy_va_layout;
 33}
 34
 35static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 36{
 37	unsigned long gap = rlim_stack->rlim_cur;
 38
 39	if (gap < MIN_GAP)
 40		gap = MIN_GAP;
 41	else if (gap > MAX_GAP)
 42		gap = MAX_GAP;
 43
 44	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
 45}
 46
 47/*
 48 * We need to ensure that shared mappings are correctly aligned to
 49 * avoid aliasing issues with VIPT caches.  We need to ensure that
 50 * a specific page of an object is always mapped at a multiple of
 51 * SHMLBA bytes.
 52 *
 53 * We unconditionally provide this function for all cases, however
 54 * in the VIVT case, we optimise out the alignment rules.
 55 */
 56unsigned long
 57arch_get_unmapped_area(struct file *filp, unsigned long addr,
 58		unsigned long len, unsigned long pgoff, unsigned long flags)
 59{
 60	struct mm_struct *mm = current->mm;
 61	struct vm_area_struct *vma;
 
 62	int do_align = 0;
 63	int aliasing = cache_is_vipt_aliasing();
 64	struct vm_unmapped_area_info info;
 65
 66	/*
 67	 * We only need to do colour alignment if either the I or D
 68	 * caches alias.
 69	 */
 70	if (aliasing)
 71		do_align = filp || (flags & MAP_SHARED);
 72
 73	/*
 74	 * We enforce the MAP_FIXED case.
 75	 */
 76	if (flags & MAP_FIXED) {
 77		if (aliasing && flags & MAP_SHARED &&
 78		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 79			return -EINVAL;
 80		return addr;
 81	}
 82
 83	if (len > TASK_SIZE)
 84		return -ENOMEM;
 85
 86	if (addr) {
 87		if (do_align)
 88			addr = COLOUR_ALIGN(addr, pgoff);
 89		else
 90			addr = PAGE_ALIGN(addr);
 91
 92		vma = find_vma(mm, addr);
 93		if (TASK_SIZE - len >= addr &&
 94		    (!vma || addr + len <= vm_start_gap(vma)))
 95			return addr;
 96	}
 
 
 
 
 
 
 97
 98	info.flags = 0;
 99	info.length = len;
100	info.low_limit = mm->mmap_base;
101	info.high_limit = TASK_SIZE;
102	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
103	info.align_offset = pgoff << PAGE_SHIFT;
104	return vm_unmapped_area(&info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105}
106
107unsigned long
108arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
109			const unsigned long len, const unsigned long pgoff,
110			const unsigned long flags)
111{
112	struct vm_area_struct *vma;
113	struct mm_struct *mm = current->mm;
114	unsigned long addr = addr0;
115	int do_align = 0;
116	int aliasing = cache_is_vipt_aliasing();
117	struct vm_unmapped_area_info info;
118
119	/*
120	 * We only need to do colour alignment if either the I or D
121	 * caches alias.
122	 */
123	if (aliasing)
124		do_align = filp || (flags & MAP_SHARED);
125
126	/* requested length too big for entire address space */
127	if (len > TASK_SIZE)
128		return -ENOMEM;
129
130	if (flags & MAP_FIXED) {
131		if (aliasing && flags & MAP_SHARED &&
132		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
133			return -EINVAL;
134		return addr;
135	}
136
137	/* requesting a specific address */
138	if (addr) {
139		if (do_align)
140			addr = COLOUR_ALIGN(addr, pgoff);
141		else
142			addr = PAGE_ALIGN(addr);
143		vma = find_vma(mm, addr);
144		if (TASK_SIZE - len >= addr &&
145				(!vma || addr + len <= vm_start_gap(vma)))
146			return addr;
147	}
148
149	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
150	info.length = len;
151	info.low_limit = FIRST_USER_ADDRESS;
152	info.high_limit = mm->mmap_base;
153	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
154	info.align_offset = pgoff << PAGE_SHIFT;
155	addr = vm_unmapped_area(&info);
156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157	/*
158	 * A failed mmap() very likely causes application failure,
159	 * so fall back to the bottom-up function here. This scenario
160	 * can happen with large stack limits and large mmap()
161	 * allocations.
162	 */
163	if (addr & ~PAGE_MASK) {
164		VM_BUG_ON(addr != -ENOMEM);
165		info.flags = 0;
166		info.low_limit = mm->mmap_base;
167		info.high_limit = TASK_SIZE;
168		addr = vm_unmapped_area(&info);
169	}
 
170
171	return addr;
172}
173
174unsigned long arch_mmap_rnd(void)
175{
176	unsigned long rnd;
177
178	rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
179
180	return rnd << PAGE_SHIFT;
181}
182
183void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
184{
185	unsigned long random_factor = 0UL;
186
187	if (current->flags & PF_RANDOMIZE)
188		random_factor = arch_mmap_rnd();
 
 
189
190	if (mmap_is_legacy(rlim_stack)) {
191		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
192		mm->get_unmapped_area = arch_get_unmapped_area;
 
193	} else {
194		mm->mmap_base = mmap_base(random_factor, rlim_stack);
195		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 
196	}
197}
198
199/*
200 * You really shouldn't be using read() or write() on /dev/mem.  This
201 * might go away in the future.
202 */
203int valid_phys_addr_range(phys_addr_t addr, size_t size)
204{
205	if (addr < PHYS_OFFSET)
206		return 0;
207	if (addr + size > __pa(high_memory - 1) + 1)
208		return 0;
209
210	return 1;
211}
212
213/*
214 * Do not allow /dev/mem mappings beyond the supported physical range.
 
 
215 */
216int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
217{
218	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
219}
220
221#ifdef CONFIG_STRICT_DEVMEM
222
223#include <linux/ioport.h>
224
225/*
226 * devmem_is_allowed() checks to see if /dev/mem access to a certain
227 * address is valid. The argument is a physical page number.
228 * We mimic x86 here by disallowing access to system RAM as well as
229 * device-exclusive MMIO regions. This effectively disable read()/write()
230 * on /dev/mem.
231 */
232int devmem_is_allowed(unsigned long pfn)
233{
234	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
235		return 0;
236	if (!page_is_ram(pfn))
237		return 1;
238	return 0;
239}
240
241#endif
v3.5.6
 
  1/*
  2 *  linux/arch/arm/mm/mmap.c
  3 */
  4#include <linux/fs.h>
  5#include <linux/mm.h>
  6#include <linux/mman.h>
  7#include <linux/shm.h>
  8#include <linux/sched.h>
 
  9#include <linux/io.h>
 10#include <linux/personality.h>
 11#include <linux/random.h>
 12#include <asm/cachetype.h>
 13
 14static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
 15					      unsigned long pgoff)
 16{
 17	unsigned long base = addr & ~(SHMLBA-1);
 18	unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
 19
 20	if (base + off <= addr)
 21		return base + off;
 22
 23	return base - off;
 24}
 25
 26#define COLOUR_ALIGN(addr,pgoff)		\
 27	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 28	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 29
 30/* gap between mmap and stack */
 31#define MIN_GAP (128*1024*1024UL)
 32#define MAX_GAP ((TASK_SIZE)/6*5)
 33
 34static int mmap_is_legacy(void)
 35{
 36	if (current->personality & ADDR_COMPAT_LAYOUT)
 37		return 1;
 38
 39	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
 40		return 1;
 41
 42	return sysctl_legacy_va_layout;
 43}
 44
 45static unsigned long mmap_base(unsigned long rnd)
 46{
 47	unsigned long gap = rlimit(RLIMIT_STACK);
 48
 49	if (gap < MIN_GAP)
 50		gap = MIN_GAP;
 51	else if (gap > MAX_GAP)
 52		gap = MAX_GAP;
 53
 54	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
 55}
 56
 57/*
 58 * We need to ensure that shared mappings are correctly aligned to
 59 * avoid aliasing issues with VIPT caches.  We need to ensure that
 60 * a specific page of an object is always mapped at a multiple of
 61 * SHMLBA bytes.
 62 *
 63 * We unconditionally provide this function for all cases, however
 64 * in the VIVT case, we optimise out the alignment rules.
 65 */
 66unsigned long
 67arch_get_unmapped_area(struct file *filp, unsigned long addr,
 68		unsigned long len, unsigned long pgoff, unsigned long flags)
 69{
 70	struct mm_struct *mm = current->mm;
 71	struct vm_area_struct *vma;
 72	unsigned long start_addr;
 73	int do_align = 0;
 74	int aliasing = cache_is_vipt_aliasing();
 
 75
 76	/*
 77	 * We only need to do colour alignment if either the I or D
 78	 * caches alias.
 79	 */
 80	if (aliasing)
 81		do_align = filp || (flags & MAP_SHARED);
 82
 83	/*
 84	 * We enforce the MAP_FIXED case.
 85	 */
 86	if (flags & MAP_FIXED) {
 87		if (aliasing && flags & MAP_SHARED &&
 88		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 89			return -EINVAL;
 90		return addr;
 91	}
 92
 93	if (len > TASK_SIZE)
 94		return -ENOMEM;
 95
 96	if (addr) {
 97		if (do_align)
 98			addr = COLOUR_ALIGN(addr, pgoff);
 99		else
100			addr = PAGE_ALIGN(addr);
101
102		vma = find_vma(mm, addr);
103		if (TASK_SIZE - len >= addr &&
104		    (!vma || addr + len <= vma->vm_start))
105			return addr;
106	}
107	if (len > mm->cached_hole_size) {
108	        start_addr = addr = mm->free_area_cache;
109	} else {
110	        start_addr = addr = mm->mmap_base;
111	        mm->cached_hole_size = 0;
112	}
113
114full_search:
115	if (do_align)
116		addr = COLOUR_ALIGN(addr, pgoff);
117	else
118		addr = PAGE_ALIGN(addr);
119
120	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
121		/* At this point:  (!vma || addr < vma->vm_end). */
122		if (TASK_SIZE - len < addr) {
123			/*
124			 * Start a new search - just in case we missed
125			 * some holes.
126			 */
127			if (start_addr != TASK_UNMAPPED_BASE) {
128				start_addr = addr = TASK_UNMAPPED_BASE;
129				mm->cached_hole_size = 0;
130				goto full_search;
131			}
132			return -ENOMEM;
133		}
134		if (!vma || addr + len <= vma->vm_start) {
135			/*
136			 * Remember the place where we stopped the search:
137			 */
138			mm->free_area_cache = addr + len;
139			return addr;
140		}
141		if (addr + mm->cached_hole_size < vma->vm_start)
142		        mm->cached_hole_size = vma->vm_start - addr;
143		addr = vma->vm_end;
144		if (do_align)
145			addr = COLOUR_ALIGN(addr, pgoff);
146	}
147}
148
149unsigned long
150arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
151			const unsigned long len, const unsigned long pgoff,
152			const unsigned long flags)
153{
154	struct vm_area_struct *vma;
155	struct mm_struct *mm = current->mm;
156	unsigned long addr = addr0;
157	int do_align = 0;
158	int aliasing = cache_is_vipt_aliasing();
 
159
160	/*
161	 * We only need to do colour alignment if either the I or D
162	 * caches alias.
163	 */
164	if (aliasing)
165		do_align = filp || (flags & MAP_SHARED);
166
167	/* requested length too big for entire address space */
168	if (len > TASK_SIZE)
169		return -ENOMEM;
170
171	if (flags & MAP_FIXED) {
172		if (aliasing && flags & MAP_SHARED &&
173		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
174			return -EINVAL;
175		return addr;
176	}
177
178	/* requesting a specific address */
179	if (addr) {
180		if (do_align)
181			addr = COLOUR_ALIGN(addr, pgoff);
182		else
183			addr = PAGE_ALIGN(addr);
184		vma = find_vma(mm, addr);
185		if (TASK_SIZE - len >= addr &&
186				(!vma || addr + len <= vma->vm_start))
187			return addr;
188	}
189
190	/* check if free_area_cache is useful for us */
191	if (len <= mm->cached_hole_size) {
192		mm->cached_hole_size = 0;
193		mm->free_area_cache = mm->mmap_base;
194	}
 
 
195
196	/* either no address requested or can't fit in requested address hole */
197	addr = mm->free_area_cache;
198	if (do_align) {
199		unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
200		addr = base + len;
201	}
202
203	/* make sure it can fit in the remaining address space */
204	if (addr > len) {
205		vma = find_vma(mm, addr-len);
206		if (!vma || addr <= vma->vm_start)
207			/* remember the address as a hint for next time */
208			return (mm->free_area_cache = addr-len);
209	}
210
211	if (mm->mmap_base < len)
212		goto bottomup;
213
214	addr = mm->mmap_base - len;
215	if (do_align)
216		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
217
218	do {
219		/*
220		 * Lookup failure means no vma is above this address,
221		 * else if new region fits below vma->vm_start,
222		 * return with success:
223		 */
224		vma = find_vma(mm, addr);
225		if (!vma || addr+len <= vma->vm_start)
226			/* remember the address as a hint for next time */
227			return (mm->free_area_cache = addr);
228
229		/* remember the largest hole we saw so far */
230		if (addr + mm->cached_hole_size < vma->vm_start)
231			mm->cached_hole_size = vma->vm_start - addr;
232
233		/* try just below the current vma->vm_start */
234		addr = vma->vm_start - len;
235		if (do_align)
236			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
237	} while (len < vma->vm_start);
238
239bottomup:
240	/*
241	 * A failed mmap() very likely causes application failure,
242	 * so fall back to the bottom-up function here. This scenario
243	 * can happen with large stack limits and large mmap()
244	 * allocations.
245	 */
246	mm->cached_hole_size = ~0UL;
247	mm->free_area_cache = TASK_UNMAPPED_BASE;
248	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
249	/*
250	 * Restore the topdown base:
251	 */
252	mm->free_area_cache = mm->mmap_base;
253	mm->cached_hole_size = ~0UL;
254
255	return addr;
256}
257
258void arch_pick_mmap_layout(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
259{
260	unsigned long random_factor = 0UL;
261
262	/* 8 bits of randomness in 20 address space bits */
263	if ((current->flags & PF_RANDOMIZE) &&
264	    !(current->personality & ADDR_NO_RANDOMIZE))
265		random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
266
267	if (mmap_is_legacy()) {
268		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
269		mm->get_unmapped_area = arch_get_unmapped_area;
270		mm->unmap_area = arch_unmap_area;
271	} else {
272		mm->mmap_base = mmap_base(random_factor);
273		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
274		mm->unmap_area = arch_unmap_area_topdown;
275	}
276}
277
278/*
279 * You really shouldn't be using read() or write() on /dev/mem.  This
280 * might go away in the future.
281 */
282int valid_phys_addr_range(unsigned long addr, size_t size)
283{
284	if (addr < PHYS_OFFSET)
285		return 0;
286	if (addr + size > __pa(high_memory - 1) + 1)
287		return 0;
288
289	return 1;
290}
291
292/*
293 * We don't use supersection mappings for mmap() on /dev/mem, which
294 * means that we can't map the memory area above the 4G barrier into
295 * userspace.
296 */
297int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
298{
299	return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
300}
301
302#ifdef CONFIG_STRICT_DEVMEM
303
304#include <linux/ioport.h>
305
306/*
307 * devmem_is_allowed() checks to see if /dev/mem access to a certain
308 * address is valid. The argument is a physical page number.
309 * We mimic x86 here by disallowing access to system RAM as well as
310 * device-exclusive MMIO regions. This effectively disable read()/write()
311 * on /dev/mem.
312 */
313int devmem_is_allowed(unsigned long pfn)
314{
315	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
316		return 0;
317	if (!page_is_ram(pfn))
318		return 1;
319	return 0;
320}
321
322#endif