Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.15
 
  1/*
  2 *  linux/arch/arm/mm/mmap.c
  3 */
  4#include <linux/fs.h>
  5#include <linux/mm.h>
  6#include <linux/mman.h>
  7#include <linux/shm.h>
  8#include <linux/sched.h>
 
  9#include <linux/io.h>
 10#include <linux/personality.h>
 11#include <linux/random.h>
 12#include <asm/cachetype.h>
 13
 14#define COLOUR_ALIGN(addr,pgoff)		\
 15	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 16	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 17
 18/* gap between mmap and stack */
 19#define MIN_GAP (128*1024*1024UL)
 20#define MAX_GAP ((TASK_SIZE)/6*5)
 21
 22static int mmap_is_legacy(void)
 23{
 24	if (current->personality & ADDR_COMPAT_LAYOUT)
 25		return 1;
 26
 27	if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
 28		return 1;
 29
 30	return sysctl_legacy_va_layout;
 31}
 32
 33static unsigned long mmap_base(unsigned long rnd)
 34{
 35	unsigned long gap = rlimit(RLIMIT_STACK);
 36
 37	if (gap < MIN_GAP)
 38		gap = MIN_GAP;
 39	else if (gap > MAX_GAP)
 40		gap = MAX_GAP;
 41
 42	return PAGE_ALIGN(TASK_SIZE - gap - rnd);
 43}
 44
 45/*
 46 * We need to ensure that shared mappings are correctly aligned to
 47 * avoid aliasing issues with VIPT caches.  We need to ensure that
 48 * a specific page of an object is always mapped at a multiple of
 49 * SHMLBA bytes.
 50 *
 51 * We unconditionally provide this function for all cases, however
 52 * in the VIVT case, we optimise out the alignment rules.
 53 */
 54unsigned long
 55arch_get_unmapped_area(struct file *filp, unsigned long addr,
 56		unsigned long len, unsigned long pgoff, unsigned long flags)
 57{
 58	struct mm_struct *mm = current->mm;
 59	struct vm_area_struct *vma;
 60	int do_align = 0;
 61	int aliasing = cache_is_vipt_aliasing();
 62	struct vm_unmapped_area_info info;
 63
 64	/*
 65	 * We only need to do colour alignment if either the I or D
 66	 * caches alias.
 67	 */
 68	if (aliasing)
 69		do_align = filp || (flags & MAP_SHARED);
 70
 71	/*
 72	 * We enforce the MAP_FIXED case.
 73	 */
 74	if (flags & MAP_FIXED) {
 75		if (aliasing && flags & MAP_SHARED &&
 76		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 77			return -EINVAL;
 78		return addr;
 79	}
 80
 81	if (len > TASK_SIZE)
 82		return -ENOMEM;
 83
 84	if (addr) {
 85		if (do_align)
 86			addr = COLOUR_ALIGN(addr, pgoff);
 87		else
 88			addr = PAGE_ALIGN(addr);
 89
 90		vma = find_vma(mm, addr);
 91		if (TASK_SIZE - len >= addr &&
 92		    (!vma || addr + len <= vma->vm_start))
 93			return addr;
 94	}
 95
 96	info.flags = 0;
 97	info.length = len;
 98	info.low_limit = mm->mmap_base;
 99	info.high_limit = TASK_SIZE;
100	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
101	info.align_offset = pgoff << PAGE_SHIFT;
102	return vm_unmapped_area(&info);
103}
104
105unsigned long
106arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
107			const unsigned long len, const unsigned long pgoff,
108			const unsigned long flags)
109{
110	struct vm_area_struct *vma;
111	struct mm_struct *mm = current->mm;
112	unsigned long addr = addr0;
113	int do_align = 0;
114	int aliasing = cache_is_vipt_aliasing();
115	struct vm_unmapped_area_info info;
116
117	/*
118	 * We only need to do colour alignment if either the I or D
119	 * caches alias.
120	 */
121	if (aliasing)
122		do_align = filp || (flags & MAP_SHARED);
123
124	/* requested length too big for entire address space */
125	if (len > TASK_SIZE)
126		return -ENOMEM;
127
128	if (flags & MAP_FIXED) {
129		if (aliasing && flags & MAP_SHARED &&
130		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
131			return -EINVAL;
132		return addr;
133	}
134
135	/* requesting a specific address */
136	if (addr) {
137		if (do_align)
138			addr = COLOUR_ALIGN(addr, pgoff);
139		else
140			addr = PAGE_ALIGN(addr);
141		vma = find_vma(mm, addr);
142		if (TASK_SIZE - len >= addr &&
143				(!vma || addr + len <= vma->vm_start))
144			return addr;
145	}
146
147	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
148	info.length = len;
149	info.low_limit = FIRST_USER_ADDRESS;
150	info.high_limit = mm->mmap_base;
151	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
152	info.align_offset = pgoff << PAGE_SHIFT;
153	addr = vm_unmapped_area(&info);
154
155	/*
156	 * A failed mmap() very likely causes application failure,
157	 * so fall back to the bottom-up function here. This scenario
158	 * can happen with large stack limits and large mmap()
159	 * allocations.
160	 */
161	if (addr & ~PAGE_MASK) {
162		VM_BUG_ON(addr != -ENOMEM);
163		info.flags = 0;
164		info.low_limit = mm->mmap_base;
165		info.high_limit = TASK_SIZE;
166		addr = vm_unmapped_area(&info);
167	}
168
169	return addr;
170}
171
172void arch_pick_mmap_layout(struct mm_struct *mm)
173{
174	unsigned long random_factor = 0UL;
175
176	/* 8 bits of randomness in 20 address space bits */
177	if ((current->flags & PF_RANDOMIZE) &&
178	    !(current->personality & ADDR_NO_RANDOMIZE))
179		random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
180
181	if (mmap_is_legacy()) {
182		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
183		mm->get_unmapped_area = arch_get_unmapped_area;
184	} else {
185		mm->mmap_base = mmap_base(random_factor);
186		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
187	}
188}
189
190/*
191 * You really shouldn't be using read() or write() on /dev/mem.  This
192 * might go away in the future.
193 */
194int valid_phys_addr_range(phys_addr_t addr, size_t size)
195{
196	if (addr < PHYS_OFFSET)
197		return 0;
198	if (addr + size > __pa(high_memory - 1) + 1)
199		return 0;
200
201	return 1;
202}
203
204/*
205 * Do not allow /dev/mem mappings beyond the supported physical range.
206 */
207int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
208{
209	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
210}
211
212#ifdef CONFIG_STRICT_DEVMEM
213
214#include <linux/ioport.h>
215
216/*
217 * devmem_is_allowed() checks to see if /dev/mem access to a certain
218 * address is valid. The argument is a physical page number.
219 * We mimic x86 here by disallowing access to system RAM as well as
220 * device-exclusive MMIO regions. This effectively disable read()/write()
221 * on /dev/mem.
222 */
223int devmem_is_allowed(unsigned long pfn)
224{
225	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
226		return 0;
227	if (!page_is_ram(pfn))
228		return 1;
229	return 0;
230}
231
232#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/arch/arm/mm/mmap.c
  4 */
  5#include <linux/fs.h>
  6#include <linux/mm.h>
  7#include <linux/mman.h>
  8#include <linux/shm.h>
  9#include <linux/sched/signal.h>
 10#include <linux/sched/mm.h>
 11#include <linux/io.h>
 12#include <linux/personality.h>
 13#include <linux/random.h>
 14#include <asm/cachetype.h>
 15
 16#define COLOUR_ALIGN(addr,pgoff)		\
 17	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
 18	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20/*
 21 * We need to ensure that shared mappings are correctly aligned to
 22 * avoid aliasing issues with VIPT caches.  We need to ensure that
 23 * a specific page of an object is always mapped at a multiple of
 24 * SHMLBA bytes.
 25 *
 26 * We unconditionally provide this function for all cases, however
 27 * in the VIVT case, we optimise out the alignment rules.
 28 */
 29unsigned long
 30arch_get_unmapped_area(struct file *filp, unsigned long addr,
 31		unsigned long len, unsigned long pgoff, unsigned long flags)
 32{
 33	struct mm_struct *mm = current->mm;
 34	struct vm_area_struct *vma;
 35	int do_align = 0;
 36	int aliasing = cache_is_vipt_aliasing();
 37	struct vm_unmapped_area_info info;
 38
 39	/*
 40	 * We only need to do colour alignment if either the I or D
 41	 * caches alias.
 42	 */
 43	if (aliasing)
 44		do_align = filp || (flags & MAP_SHARED);
 45
 46	/*
 47	 * We enforce the MAP_FIXED case.
 48	 */
 49	if (flags & MAP_FIXED) {
 50		if (aliasing && flags & MAP_SHARED &&
 51		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
 52			return -EINVAL;
 53		return addr;
 54	}
 55
 56	if (len > TASK_SIZE)
 57		return -ENOMEM;
 58
 59	if (addr) {
 60		if (do_align)
 61			addr = COLOUR_ALIGN(addr, pgoff);
 62		else
 63			addr = PAGE_ALIGN(addr);
 64
 65		vma = find_vma(mm, addr);
 66		if (TASK_SIZE - len >= addr &&
 67		    (!vma || addr + len <= vm_start_gap(vma)))
 68			return addr;
 69	}
 70
 71	info.flags = 0;
 72	info.length = len;
 73	info.low_limit = mm->mmap_base;
 74	info.high_limit = TASK_SIZE;
 75	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
 76	info.align_offset = pgoff << PAGE_SHIFT;
 77	return vm_unmapped_area(&info);
 78}
 79
 80unsigned long
 81arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 82			const unsigned long len, const unsigned long pgoff,
 83			const unsigned long flags)
 84{
 85	struct vm_area_struct *vma;
 86	struct mm_struct *mm = current->mm;
 87	unsigned long addr = addr0;
 88	int do_align = 0;
 89	int aliasing = cache_is_vipt_aliasing();
 90	struct vm_unmapped_area_info info;
 91
 92	/*
 93	 * We only need to do colour alignment if either the I or D
 94	 * caches alias.
 95	 */
 96	if (aliasing)
 97		do_align = filp || (flags & MAP_SHARED);
 98
 99	/* requested length too big for entire address space */
100	if (len > TASK_SIZE)
101		return -ENOMEM;
102
103	if (flags & MAP_FIXED) {
104		if (aliasing && flags & MAP_SHARED &&
105		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
106			return -EINVAL;
107		return addr;
108	}
109
110	/* requesting a specific address */
111	if (addr) {
112		if (do_align)
113			addr = COLOUR_ALIGN(addr, pgoff);
114		else
115			addr = PAGE_ALIGN(addr);
116		vma = find_vma(mm, addr);
117		if (TASK_SIZE - len >= addr &&
118				(!vma || addr + len <= vm_start_gap(vma)))
119			return addr;
120	}
121
122	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
123	info.length = len;
124	info.low_limit = FIRST_USER_ADDRESS;
125	info.high_limit = mm->mmap_base;
126	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
127	info.align_offset = pgoff << PAGE_SHIFT;
128	addr = vm_unmapped_area(&info);
129
130	/*
131	 * A failed mmap() very likely causes application failure,
132	 * so fall back to the bottom-up function here. This scenario
133	 * can happen with large stack limits and large mmap()
134	 * allocations.
135	 */
136	if (addr & ~PAGE_MASK) {
137		VM_BUG_ON(addr != -ENOMEM);
138		info.flags = 0;
139		info.low_limit = mm->mmap_base;
140		info.high_limit = TASK_SIZE;
141		addr = vm_unmapped_area(&info);
142	}
143
144	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145}
146
147/*
148 * You really shouldn't be using read() or write() on /dev/mem.  This
149 * might go away in the future.
150 */
151int valid_phys_addr_range(phys_addr_t addr, size_t size)
152{
153	if (addr < PHYS_OFFSET)
154		return 0;
155	if (addr + size > __pa(high_memory - 1) + 1)
156		return 0;
157
158	return 1;
159}
160
161/*
162 * Do not allow /dev/mem mappings beyond the supported physical range.
163 */
164int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
165{
166	return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
167}
168
169#ifdef CONFIG_STRICT_DEVMEM
170
171#include <linux/ioport.h>
172
173/*
174 * devmem_is_allowed() checks to see if /dev/mem access to a certain
175 * address is valid. The argument is a physical page number.
176 * We mimic x86 here by disallowing access to system RAM as well as
177 * device-exclusive MMIO regions. This effectively disable read()/write()
178 * on /dev/mem.
179 */
180int devmem_is_allowed(unsigned long pfn)
181{
182	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
183		return 0;
184	if (!page_is_ram(pfn))
185		return 1;
186	return 0;
187}
188
189#endif