Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * arch/sh/mm/mmap.c
  3 *
  4 * Copyright (C) 2008 - 2009  Paul Mundt
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10#include <linux/io.h>
 11#include <linux/mm.h>
 12#include <linux/mman.h>
 13#include <linux/module.h>
 14#include <asm/page.h>
 15#include <asm/processor.h>
 16
 17unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
 18EXPORT_SYMBOL(shm_align_mask);
 19
 20#ifdef CONFIG_MMU
 21/*
 22 * To avoid cache aliases, we map the shared page with same color.
 23 */
 24static inline unsigned long COLOUR_ALIGN(unsigned long addr,
 25					 unsigned long pgoff)
 26{
 27	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
 28	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
 29
 30	return base + off;
 31}
 32
 33static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
 34					      unsigned long pgoff)
 35{
 36	unsigned long base = addr & ~shm_align_mask;
 37	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
 38
 39	if (base + off <= addr)
 40		return base + off;
 41
 42	return base - off;
 43}
 44
 45unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 46	unsigned long len, unsigned long pgoff, unsigned long flags)
 47{
 48	struct mm_struct *mm = current->mm;
 49	struct vm_area_struct *vma;
 50	unsigned long start_addr;
 51	int do_colour_align;
 
 52
 53	if (flags & MAP_FIXED) {
 54		/* We do not accept a shared mapping if it would violate
 55		 * cache aliasing constraints.
 56		 */
 57		if ((flags & MAP_SHARED) &&
 58		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 59			return -EINVAL;
 60		return addr;
 61	}
 62
 63	if (unlikely(len > TASK_SIZE))
 64		return -ENOMEM;
 65
 66	do_colour_align = 0;
 67	if (filp || (flags & MAP_SHARED))
 68		do_colour_align = 1;
 69
 70	if (addr) {
 71		if (do_colour_align)
 72			addr = COLOUR_ALIGN(addr, pgoff);
 73		else
 74			addr = PAGE_ALIGN(addr);
 75
 76		vma = find_vma(mm, addr);
 77		if (TASK_SIZE - len >= addr &&
 78		    (!vma || addr + len <= vma->vm_start))
 79			return addr;
 80	}
 81
 82	if (len > mm->cached_hole_size) {
 83		start_addr = addr = mm->free_area_cache;
 84	} else {
 85	        mm->cached_hole_size = 0;
 86		start_addr = addr = TASK_UNMAPPED_BASE;
 87	}
 88
 89full_search:
 90	if (do_colour_align)
 91		addr = COLOUR_ALIGN(addr, pgoff);
 92	else
 93		addr = PAGE_ALIGN(mm->free_area_cache);
 94
 95	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 96		/* At this point:  (!vma || addr < vma->vm_end). */
 97		if (unlikely(TASK_SIZE - len < addr)) {
 98			/*
 99			 * Start a new search - just in case we missed
100			 * some holes.
101			 */
102			if (start_addr != TASK_UNMAPPED_BASE) {
103				start_addr = addr = TASK_UNMAPPED_BASE;
104				mm->cached_hole_size = 0;
105				goto full_search;
106			}
107			return -ENOMEM;
108		}
109		if (likely(!vma || addr + len <= vma->vm_start)) {
110			/*
111			 * Remember the place where we stopped the search:
112			 */
113			mm->free_area_cache = addr + len;
114			return addr;
115		}
116		if (addr + mm->cached_hole_size < vma->vm_start)
117		        mm->cached_hole_size = vma->vm_start - addr;
118
119		addr = vma->vm_end;
120		if (do_colour_align)
121			addr = COLOUR_ALIGN(addr, pgoff);
122	}
123}
124
125unsigned long
126arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
127			  const unsigned long len, const unsigned long pgoff,
128			  const unsigned long flags)
129{
130	struct vm_area_struct *vma;
131	struct mm_struct *mm = current->mm;
132	unsigned long addr = addr0;
133	int do_colour_align;
 
134
135	if (flags & MAP_FIXED) {
136		/* We do not accept a shared mapping if it would violate
137		 * cache aliasing constraints.
138		 */
139		if ((flags & MAP_SHARED) &&
140		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
141			return -EINVAL;
142		return addr;
143	}
144
145	if (unlikely(len > TASK_SIZE))
146		return -ENOMEM;
147
148	do_colour_align = 0;
149	if (filp || (flags & MAP_SHARED))
150		do_colour_align = 1;
151
152	/* requesting a specific address */
153	if (addr) {
154		if (do_colour_align)
155			addr = COLOUR_ALIGN(addr, pgoff);
156		else
157			addr = PAGE_ALIGN(addr);
158
159		vma = find_vma(mm, addr);
160		if (TASK_SIZE - len >= addr &&
161		    (!vma || addr + len <= vma->vm_start))
162			return addr;
163	}
164
165	/* check if free_area_cache is useful for us */
166	if (len <= mm->cached_hole_size) {
167	        mm->cached_hole_size = 0;
168		mm->free_area_cache = mm->mmap_base;
169	}
170
171	/* either no address requested or can't fit in requested address hole */
172	addr = mm->free_area_cache;
173	if (do_colour_align) {
174		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175
176		addr = base + len;
177	}
178
179	/* make sure it can fit in the remaining address space */
180	if (likely(addr > len)) {
181		vma = find_vma(mm, addr-len);
182		if (!vma || addr <= vma->vm_start) {
183			/* remember the address as a hint for next time */
184			return (mm->free_area_cache = addr-len);
185		}
186	}
187
188	if (unlikely(mm->mmap_base < len))
189		goto bottomup;
190
191	addr = mm->mmap_base-len;
192	if (do_colour_align)
193		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194
195	do {
196		/*
197		 * Lookup failure means no vma is above this address,
198		 * else if new region fits below vma->vm_start,
199		 * return with success:
200		 */
201		vma = find_vma(mm, addr);
202		if (likely(!vma || addr+len <= vma->vm_start)) {
203			/* remember the address as a hint for next time */
204			return (mm->free_area_cache = addr);
205		}
206
207		/* remember the largest hole we saw so far */
208		if (addr + mm->cached_hole_size < vma->vm_start)
209		        mm->cached_hole_size = vma->vm_start - addr;
210
211		/* try just below the current vma->vm_start */
212		addr = vma->vm_start-len;
213		if (do_colour_align)
214			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215	} while (likely(len < vma->vm_start));
216
217bottomup:
218	/*
219	 * A failed mmap() very likely causes application failure,
220	 * so fall back to the bottom-up function here. This scenario
221	 * can happen with large stack limits and large mmap()
222	 * allocations.
223	 */
224	mm->cached_hole_size = ~0UL;
225	mm->free_area_cache = TASK_UNMAPPED_BASE;
226	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
227	/*
228	 * Restore the topdown base:
229	 */
230	mm->free_area_cache = mm->mmap_base;
231	mm->cached_hole_size = ~0UL;
232
233	return addr;
234}
235#endif /* CONFIG_MMU */
236
237/*
238 * You really shouldn't be using read() or write() on /dev/mem.  This
239 * might go away in the future.
240 */
241int valid_phys_addr_range(unsigned long addr, size_t count)
242{
243	if (addr < __MEMORY_START)
244		return 0;
245	if (addr + count > __pa(high_memory))
246		return 0;
247
248	return 1;
249}
250
251int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
252{
253	return 1;
254}
v4.6
  1/*
  2 * arch/sh/mm/mmap.c
  3 *
  4 * Copyright (C) 2008 - 2009  Paul Mundt
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 */
 10#include <linux/io.h>
 11#include <linux/mm.h>
 12#include <linux/mman.h>
 13#include <linux/module.h>
 14#include <asm/page.h>
 15#include <asm/processor.h>
 16
 17unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
 18EXPORT_SYMBOL(shm_align_mask);
 19
 20#ifdef CONFIG_MMU
 21/*
 22 * To avoid cache aliases, we map the shared page with same color.
 23 */
 24static inline unsigned long COLOUR_ALIGN(unsigned long addr,
 25					 unsigned long pgoff)
 26{
 27	unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
 28	unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
 29
 30	return base + off;
 31}
 32
 
 
 
 
 
 
 
 
 
 
 
 
 33unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
 34	unsigned long len, unsigned long pgoff, unsigned long flags)
 35{
 36	struct mm_struct *mm = current->mm;
 37	struct vm_area_struct *vma;
 
 38	int do_colour_align;
 39	struct vm_unmapped_area_info info;
 40
 41	if (flags & MAP_FIXED) {
 42		/* We do not accept a shared mapping if it would violate
 43		 * cache aliasing constraints.
 44		 */
 45		if ((flags & MAP_SHARED) &&
 46		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 47			return -EINVAL;
 48		return addr;
 49	}
 50
 51	if (unlikely(len > TASK_SIZE))
 52		return -ENOMEM;
 53
 54	do_colour_align = 0;
 55	if (filp || (flags & MAP_SHARED))
 56		do_colour_align = 1;
 57
 58	if (addr) {
 59		if (do_colour_align)
 60			addr = COLOUR_ALIGN(addr, pgoff);
 61		else
 62			addr = PAGE_ALIGN(addr);
 63
 64		vma = find_vma(mm, addr);
 65		if (TASK_SIZE - len >= addr &&
 66		    (!vma || addr + len <= vma->vm_start))
 67			return addr;
 68	}
 69
 70	info.flags = 0;
 71	info.length = len;
 72	info.low_limit = TASK_UNMAPPED_BASE;
 73	info.high_limit = TASK_SIZE;
 74	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
 75	info.align_offset = pgoff << PAGE_SHIFT;
 76	return vm_unmapped_area(&info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77}
 78
 79unsigned long
 80arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 81			  const unsigned long len, const unsigned long pgoff,
 82			  const unsigned long flags)
 83{
 84	struct vm_area_struct *vma;
 85	struct mm_struct *mm = current->mm;
 86	unsigned long addr = addr0;
 87	int do_colour_align;
 88	struct vm_unmapped_area_info info;
 89
 90	if (flags & MAP_FIXED) {
 91		/* We do not accept a shared mapping if it would violate
 92		 * cache aliasing constraints.
 93		 */
 94		if ((flags & MAP_SHARED) &&
 95		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
 96			return -EINVAL;
 97		return addr;
 98	}
 99
100	if (unlikely(len > TASK_SIZE))
101		return -ENOMEM;
102
103	do_colour_align = 0;
104	if (filp || (flags & MAP_SHARED))
105		do_colour_align = 1;
106
107	/* requesting a specific address */
108	if (addr) {
109		if (do_colour_align)
110			addr = COLOUR_ALIGN(addr, pgoff);
111		else
112			addr = PAGE_ALIGN(addr);
113
114		vma = find_vma(mm, addr);
115		if (TASK_SIZE - len >= addr &&
116		    (!vma || addr + len <= vma->vm_start))
117			return addr;
118	}
119
120	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
121	info.length = len;
122	info.low_limit = PAGE_SIZE;
123	info.high_limit = mm->mmap_base;
124	info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
125	info.align_offset = pgoff << PAGE_SHIFT;
126	addr = vm_unmapped_area(&info);
 
 
 
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128	/*
129	 * A failed mmap() very likely causes application failure,
130	 * so fall back to the bottom-up function here. This scenario
131	 * can happen with large stack limits and large mmap()
132	 * allocations.
133	 */
134	if (addr & ~PAGE_MASK) {
135		VM_BUG_ON(addr != -ENOMEM);
136		info.flags = 0;
137		info.low_limit = TASK_UNMAPPED_BASE;
138		info.high_limit = TASK_SIZE;
139		addr = vm_unmapped_area(&info);
140	}
 
141
142	return addr;
143}
144#endif /* CONFIG_MMU */
145
146/*
147 * You really shouldn't be using read() or write() on /dev/mem.  This
148 * might go away in the future.
149 */
150int valid_phys_addr_range(phys_addr_t addr, size_t count)
151{
152	if (addr < __MEMORY_START)
153		return 0;
154	if (addr + count > __pa(high_memory))
155		return 0;
156
157	return 1;
158}
159
160int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
161{
162	return 1;
163}