Loading...
1/*
2 * arch/sh/mm/mmap.c
3 *
4 * Copyright (C) 2008 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/mman.h>
13#include <linux/module.h>
14#include <asm/page.h>
15#include <asm/processor.h>
16
17unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
18EXPORT_SYMBOL(shm_align_mask);
19
20#ifdef CONFIG_MMU
21/*
22 * To avoid cache aliases, we map the shared page with same color.
23 */
24static inline unsigned long COLOUR_ALIGN(unsigned long addr,
25 unsigned long pgoff)
26{
27 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
29
30 return base + off;
31}
32
33static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
34 unsigned long pgoff)
35{
36 unsigned long base = addr & ~shm_align_mask;
37 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
38
39 if (base + off <= addr)
40 return base + off;
41
42 return base - off;
43}
44
45unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
46 unsigned long len, unsigned long pgoff, unsigned long flags)
47{
48 struct mm_struct *mm = current->mm;
49 struct vm_area_struct *vma;
50 unsigned long start_addr;
51 int do_colour_align;
52
53 if (flags & MAP_FIXED) {
54 /* We do not accept a shared mapping if it would violate
55 * cache aliasing constraints.
56 */
57 if ((flags & MAP_SHARED) &&
58 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
59 return -EINVAL;
60 return addr;
61 }
62
63 if (unlikely(len > TASK_SIZE))
64 return -ENOMEM;
65
66 do_colour_align = 0;
67 if (filp || (flags & MAP_SHARED))
68 do_colour_align = 1;
69
70 if (addr) {
71 if (do_colour_align)
72 addr = COLOUR_ALIGN(addr, pgoff);
73 else
74 addr = PAGE_ALIGN(addr);
75
76 vma = find_vma(mm, addr);
77 if (TASK_SIZE - len >= addr &&
78 (!vma || addr + len <= vma->vm_start))
79 return addr;
80 }
81
82 if (len > mm->cached_hole_size) {
83 start_addr = addr = mm->free_area_cache;
84 } else {
85 mm->cached_hole_size = 0;
86 start_addr = addr = TASK_UNMAPPED_BASE;
87 }
88
89full_search:
90 if (do_colour_align)
91 addr = COLOUR_ALIGN(addr, pgoff);
92 else
93 addr = PAGE_ALIGN(mm->free_area_cache);
94
95 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
96 /* At this point: (!vma || addr < vma->vm_end). */
97 if (unlikely(TASK_SIZE - len < addr)) {
98 /*
99 * Start a new search - just in case we missed
100 * some holes.
101 */
102 if (start_addr != TASK_UNMAPPED_BASE) {
103 start_addr = addr = TASK_UNMAPPED_BASE;
104 mm->cached_hole_size = 0;
105 goto full_search;
106 }
107 return -ENOMEM;
108 }
109 if (likely(!vma || addr + len <= vma->vm_start)) {
110 /*
111 * Remember the place where we stopped the search:
112 */
113 mm->free_area_cache = addr + len;
114 return addr;
115 }
116 if (addr + mm->cached_hole_size < vma->vm_start)
117 mm->cached_hole_size = vma->vm_start - addr;
118
119 addr = vma->vm_end;
120 if (do_colour_align)
121 addr = COLOUR_ALIGN(addr, pgoff);
122 }
123}
124
125unsigned long
126arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
127 const unsigned long len, const unsigned long pgoff,
128 const unsigned long flags)
129{
130 struct vm_area_struct *vma;
131 struct mm_struct *mm = current->mm;
132 unsigned long addr = addr0;
133 int do_colour_align;
134
135 if (flags & MAP_FIXED) {
136 /* We do not accept a shared mapping if it would violate
137 * cache aliasing constraints.
138 */
139 if ((flags & MAP_SHARED) &&
140 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
141 return -EINVAL;
142 return addr;
143 }
144
145 if (unlikely(len > TASK_SIZE))
146 return -ENOMEM;
147
148 do_colour_align = 0;
149 if (filp || (flags & MAP_SHARED))
150 do_colour_align = 1;
151
152 /* requesting a specific address */
153 if (addr) {
154 if (do_colour_align)
155 addr = COLOUR_ALIGN(addr, pgoff);
156 else
157 addr = PAGE_ALIGN(addr);
158
159 vma = find_vma(mm, addr);
160 if (TASK_SIZE - len >= addr &&
161 (!vma || addr + len <= vma->vm_start))
162 return addr;
163 }
164
165 /* check if free_area_cache is useful for us */
166 if (len <= mm->cached_hole_size) {
167 mm->cached_hole_size = 0;
168 mm->free_area_cache = mm->mmap_base;
169 }
170
171 /* either no address requested or can't fit in requested address hole */
172 addr = mm->free_area_cache;
173 if (do_colour_align) {
174 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
175
176 addr = base + len;
177 }
178
179 /* make sure it can fit in the remaining address space */
180 if (likely(addr > len)) {
181 vma = find_vma(mm, addr-len);
182 if (!vma || addr <= vma->vm_start) {
183 /* remember the address as a hint for next time */
184 return (mm->free_area_cache = addr-len);
185 }
186 }
187
188 if (unlikely(mm->mmap_base < len))
189 goto bottomup;
190
191 addr = mm->mmap_base-len;
192 if (do_colour_align)
193 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
194
195 do {
196 /*
197 * Lookup failure means no vma is above this address,
198 * else if new region fits below vma->vm_start,
199 * return with success:
200 */
201 vma = find_vma(mm, addr);
202 if (likely(!vma || addr+len <= vma->vm_start)) {
203 /* remember the address as a hint for next time */
204 return (mm->free_area_cache = addr);
205 }
206
207 /* remember the largest hole we saw so far */
208 if (addr + mm->cached_hole_size < vma->vm_start)
209 mm->cached_hole_size = vma->vm_start - addr;
210
211 /* try just below the current vma->vm_start */
212 addr = vma->vm_start-len;
213 if (do_colour_align)
214 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
215 } while (likely(len < vma->vm_start));
216
217bottomup:
218 /*
219 * A failed mmap() very likely causes application failure,
220 * so fall back to the bottom-up function here. This scenario
221 * can happen with large stack limits and large mmap()
222 * allocations.
223 */
224 mm->cached_hole_size = ~0UL;
225 mm->free_area_cache = TASK_UNMAPPED_BASE;
226 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
227 /*
228 * Restore the topdown base:
229 */
230 mm->free_area_cache = mm->mmap_base;
231 mm->cached_hole_size = ~0UL;
232
233 return addr;
234}
235#endif /* CONFIG_MMU */
236
237/*
238 * You really shouldn't be using read() or write() on /dev/mem. This
239 * might go away in the future.
240 */
241int valid_phys_addr_range(unsigned long addr, size_t count)
242{
243 if (addr < __MEMORY_START)
244 return 0;
245 if (addr + count > __pa(high_memory))
246 return 0;
247
248 return 1;
249}
250
251int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
252{
253 return 1;
254}
1/*
2 * arch/sh/mm/mmap.c
3 *
4 * Copyright (C) 2008 - 2009 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/io.h>
11#include <linux/mm.h>
12#include <linux/sched/mm.h>
13#include <linux/mman.h>
14#include <linux/module.h>
15#include <asm/page.h>
16#include <asm/processor.h>
17
18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19EXPORT_SYMBOL(shm_align_mask);
20
21#ifdef CONFIG_MMU
22static const pgprot_t protection_map[16] = {
23 [VM_NONE] = PAGE_NONE,
24 [VM_READ] = PAGE_READONLY,
25 [VM_WRITE] = PAGE_COPY,
26 [VM_WRITE | VM_READ] = PAGE_COPY,
27 [VM_EXEC] = PAGE_EXECREAD,
28 [VM_EXEC | VM_READ] = PAGE_EXECREAD,
29 [VM_EXEC | VM_WRITE] = PAGE_COPY,
30 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
31 [VM_SHARED] = PAGE_NONE,
32 [VM_SHARED | VM_READ] = PAGE_READONLY,
33 [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
34 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
35 [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
36 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
37 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
38 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
39};
40DECLARE_VM_GET_PAGE_PROT
41
42/*
43 * To avoid cache aliases, we map the shared page with same color.
44 */
45static inline unsigned long COLOUR_ALIGN(unsigned long addr,
46 unsigned long pgoff)
47{
48 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
49 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
50
51 return base + off;
52}
53
54unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
55 unsigned long len, unsigned long pgoff, unsigned long flags)
56{
57 struct mm_struct *mm = current->mm;
58 struct vm_area_struct *vma;
59 int do_colour_align;
60 struct vm_unmapped_area_info info;
61
62 if (flags & MAP_FIXED) {
63 /* We do not accept a shared mapping if it would violate
64 * cache aliasing constraints.
65 */
66 if ((flags & MAP_SHARED) &&
67 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
68 return -EINVAL;
69 return addr;
70 }
71
72 if (unlikely(len > TASK_SIZE))
73 return -ENOMEM;
74
75 do_colour_align = 0;
76 if (filp || (flags & MAP_SHARED))
77 do_colour_align = 1;
78
79 if (addr) {
80 if (do_colour_align)
81 addr = COLOUR_ALIGN(addr, pgoff);
82 else
83 addr = PAGE_ALIGN(addr);
84
85 vma = find_vma(mm, addr);
86 if (TASK_SIZE - len >= addr &&
87 (!vma || addr + len <= vm_start_gap(vma)))
88 return addr;
89 }
90
91 info.flags = 0;
92 info.length = len;
93 info.low_limit = TASK_UNMAPPED_BASE;
94 info.high_limit = TASK_SIZE;
95 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
96 info.align_offset = pgoff << PAGE_SHIFT;
97 return vm_unmapped_area(&info);
98}
99
100unsigned long
101arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
102 const unsigned long len, const unsigned long pgoff,
103 const unsigned long flags)
104{
105 struct vm_area_struct *vma;
106 struct mm_struct *mm = current->mm;
107 unsigned long addr = addr0;
108 int do_colour_align;
109 struct vm_unmapped_area_info info;
110
111 if (flags & MAP_FIXED) {
112 /* We do not accept a shared mapping if it would violate
113 * cache aliasing constraints.
114 */
115 if ((flags & MAP_SHARED) &&
116 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
117 return -EINVAL;
118 return addr;
119 }
120
121 if (unlikely(len > TASK_SIZE))
122 return -ENOMEM;
123
124 do_colour_align = 0;
125 if (filp || (flags & MAP_SHARED))
126 do_colour_align = 1;
127
128 /* requesting a specific address */
129 if (addr) {
130 if (do_colour_align)
131 addr = COLOUR_ALIGN(addr, pgoff);
132 else
133 addr = PAGE_ALIGN(addr);
134
135 vma = find_vma(mm, addr);
136 if (TASK_SIZE - len >= addr &&
137 (!vma || addr + len <= vm_start_gap(vma)))
138 return addr;
139 }
140
141 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
142 info.length = len;
143 info.low_limit = PAGE_SIZE;
144 info.high_limit = mm->mmap_base;
145 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
146 info.align_offset = pgoff << PAGE_SHIFT;
147 addr = vm_unmapped_area(&info);
148
149 /*
150 * A failed mmap() very likely causes application failure,
151 * so fall back to the bottom-up function here. This scenario
152 * can happen with large stack limits and large mmap()
153 * allocations.
154 */
155 if (addr & ~PAGE_MASK) {
156 VM_BUG_ON(addr != -ENOMEM);
157 info.flags = 0;
158 info.low_limit = TASK_UNMAPPED_BASE;
159 info.high_limit = TASK_SIZE;
160 addr = vm_unmapped_area(&info);
161 }
162
163 return addr;
164}
165#endif /* CONFIG_MMU */
166
167/*
168 * You really shouldn't be using read() or write() on /dev/mem. This
169 * might go away in the future.
170 */
171int valid_phys_addr_range(phys_addr_t addr, size_t count)
172{
173 if (addr < __MEMORY_START)
174 return 0;
175 if (addr + count > __pa(high_memory))
176 return 0;
177
178 return 1;
179}
180
181int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
182{
183 return 1;
184}