Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2
3/*
4 * PARISC specific syscalls
5 *
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
8 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
9 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
10 */
11
12#include <linux/uaccess.h>
13#include <asm/elf.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/linkage.h>
17#include <linux/mm.h>
18#include <linux/mman.h>
19#include <linux/sched/signal.h>
20#include <linux/sched/mm.h>
21#include <linux/shm.h>
22#include <linux/syscalls.h>
23#include <linux/utsname.h>
24#include <linux/personality.h>
25#include <linux/random.h>
26
27/* we construct an artificial offset for the mapping based on the physical
28 * address of the kernel mapping variable */
29#define GET_LAST_MMAP(filp) \
30 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
31#define SET_LAST_MMAP(filp, val) \
32 { /* nothing */ }
33
34static int get_offset(unsigned int last_mmap)
35{
36 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
37}
38
39static unsigned long shared_align_offset(unsigned int last_mmap,
40 unsigned long pgoff)
41{
42 return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
43}
44
45static inline unsigned long COLOR_ALIGN(unsigned long addr,
46 unsigned int last_mmap, unsigned long pgoff)
47{
48 unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
49 unsigned long off = (SHM_COLOUR-1) &
50 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
51
52 return base + off;
53}
54
55/*
56 * Top of mmap area (just below the process stack).
57 */
58
59/*
60 * When called from arch_get_unmapped_area(), rlim_stack will be NULL,
61 * indicating that "current" should be used instead of a passed-in
62 * value from the exec bprm as done with arch_pick_mmap_layout().
63 */
64static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
65{
66 unsigned long stack_base;
67
68 /* Limit stack size - see setup_arg_pages() in fs/exec.c */
69 stack_base = rlim_stack ? rlim_stack->rlim_max
70 : rlimit_max(RLIMIT_STACK);
71 if (stack_base > STACK_SIZE_MAX)
72 stack_base = STACK_SIZE_MAX;
73
74 /* Add space for stack randomization. */
75 if (current->flags & PF_RANDOMIZE)
76 stack_base += (STACK_RND_MASK << PAGE_SHIFT);
77
78 return PAGE_ALIGN(STACK_TOP - stack_base);
79}
80
81
82unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
83 unsigned long len, unsigned long pgoff, unsigned long flags)
84{
85 struct mm_struct *mm = current->mm;
86 struct vm_area_struct *vma, *prev;
87 unsigned long task_size = TASK_SIZE;
88 int do_color_align, last_mmap;
89 struct vm_unmapped_area_info info;
90
91 if (len > task_size)
92 return -ENOMEM;
93
94 do_color_align = 0;
95 if (filp || (flags & MAP_SHARED))
96 do_color_align = 1;
97 last_mmap = GET_LAST_MMAP(filp);
98
99 if (flags & MAP_FIXED) {
100 if ((flags & MAP_SHARED) && last_mmap &&
101 (addr - shared_align_offset(last_mmap, pgoff))
102 & (SHM_COLOUR - 1))
103 return -EINVAL;
104 goto found_addr;
105 }
106
107 if (addr) {
108 if (do_color_align && last_mmap)
109 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
110 else
111 addr = PAGE_ALIGN(addr);
112
113 vma = find_vma_prev(mm, addr, &prev);
114 if (task_size - len >= addr &&
115 (!vma || addr + len <= vm_start_gap(vma)) &&
116 (!prev || addr >= vm_end_gap(prev)))
117 goto found_addr;
118 }
119
120 info.flags = 0;
121 info.length = len;
122 info.low_limit = mm->mmap_legacy_base;
123 info.high_limit = mmap_upper_limit(NULL);
124 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
125 info.align_offset = shared_align_offset(last_mmap, pgoff);
126 addr = vm_unmapped_area(&info);
127
128found_addr:
129 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
130 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
131
132 return addr;
133}
134
135unsigned long
136arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
137 const unsigned long len, const unsigned long pgoff,
138 const unsigned long flags)
139{
140 struct vm_area_struct *vma, *prev;
141 struct mm_struct *mm = current->mm;
142 unsigned long addr = addr0;
143 int do_color_align, last_mmap;
144 struct vm_unmapped_area_info info;
145
146 /* requested length too big for entire address space */
147 if (len > TASK_SIZE)
148 return -ENOMEM;
149
150 do_color_align = 0;
151 if (filp || (flags & MAP_SHARED))
152 do_color_align = 1;
153 last_mmap = GET_LAST_MMAP(filp);
154
155 if (flags & MAP_FIXED) {
156 if ((flags & MAP_SHARED) && last_mmap &&
157 (addr - shared_align_offset(last_mmap, pgoff))
158 & (SHM_COLOUR - 1))
159 return -EINVAL;
160 goto found_addr;
161 }
162
163 /* requesting a specific address */
164 if (addr) {
165 if (do_color_align && last_mmap)
166 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
167 else
168 addr = PAGE_ALIGN(addr);
169
170 vma = find_vma_prev(mm, addr, &prev);
171 if (TASK_SIZE - len >= addr &&
172 (!vma || addr + len <= vm_start_gap(vma)) &&
173 (!prev || addr >= vm_end_gap(prev)))
174 goto found_addr;
175 }
176
177 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
178 info.length = len;
179 info.low_limit = PAGE_SIZE;
180 info.high_limit = mm->mmap_base;
181 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
182 info.align_offset = shared_align_offset(last_mmap, pgoff);
183 addr = vm_unmapped_area(&info);
184 if (!(addr & ~PAGE_MASK))
185 goto found_addr;
186 VM_BUG_ON(addr != -ENOMEM);
187
188 /*
189 * A failed mmap() very likely causes application failure,
190 * so fall back to the bottom-up function here. This scenario
191 * can happen with large stack limits and large mmap()
192 * allocations.
193 */
194 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
195
196found_addr:
197 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
198 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
199
200 return addr;
201}
202
203static int mmap_is_legacy(void)
204{
205 if (current->personality & ADDR_COMPAT_LAYOUT)
206 return 1;
207
208 /* parisc stack always grows up - so a unlimited stack should
209 * not be an indicator to use the legacy memory layout.
210 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
211 * return 1;
212 */
213
214 return sysctl_legacy_va_layout;
215}
216
217static unsigned long mmap_rnd(void)
218{
219 unsigned long rnd = 0;
220
221 if (current->flags & PF_RANDOMIZE)
222 rnd = get_random_int() & MMAP_RND_MASK;
223
224 return rnd << PAGE_SHIFT;
225}
226
227unsigned long arch_mmap_rnd(void)
228{
229 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
230}
231
232static unsigned long mmap_legacy_base(void)
233{
234 return TASK_UNMAPPED_BASE + mmap_rnd();
235}
236
237/*
238 * This function, called very early during the creation of a new
239 * process VM image, sets up which VM layout function to use:
240 */
241void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
242{
243 mm->mmap_legacy_base = mmap_legacy_base();
244 mm->mmap_base = mmap_upper_limit(rlim_stack);
245
246 if (mmap_is_legacy()) {
247 mm->mmap_base = mm->mmap_legacy_base;
248 mm->get_unmapped_area = arch_get_unmapped_area;
249 } else {
250 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
251 }
252}
253
254
255asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
256 unsigned long prot, unsigned long flags, unsigned long fd,
257 unsigned long pgoff)
258{
259 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
260 we have. */
261 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
262 pgoff >> (PAGE_SHIFT - 12));
263}
264
265asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
266 unsigned long prot, unsigned long flags, unsigned long fd,
267 unsigned long offset)
268{
269 if (!(offset & ~PAGE_MASK)) {
270 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
271 offset >> PAGE_SHIFT);
272 } else {
273 return -EINVAL;
274 }
275}
276
277/* Fucking broken ABI */
278
279#ifdef CONFIG_64BIT
280asmlinkage long parisc_truncate64(const char __user * path,
281 unsigned int high, unsigned int low)
282{
283 return ksys_truncate(path, (long)high << 32 | low);
284}
285
286asmlinkage long parisc_ftruncate64(unsigned int fd,
287 unsigned int high, unsigned int low)
288{
289 return ksys_ftruncate(fd, (long)high << 32 | low);
290}
291
292/* stubs for the benefit of the syscall_table since truncate64 and truncate
293 * are identical on LP64 */
294asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
295{
296 return ksys_truncate(path, length);
297}
298asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
299{
300 return ksys_ftruncate(fd, length);
301}
302asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
303{
304 return sys_fcntl(fd, cmd, arg);
305}
306#else
307
308asmlinkage long parisc_truncate64(const char __user * path,
309 unsigned int high, unsigned int low)
310{
311 return ksys_truncate(path, (loff_t)high << 32 | low);
312}
313
314asmlinkage long parisc_ftruncate64(unsigned int fd,
315 unsigned int high, unsigned int low)
316{
317 return sys_ftruncate64(fd, (loff_t)high << 32 | low);
318}
319#endif
320
321asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
322 unsigned int high, unsigned int low)
323{
324 return ksys_pread64(fd, buf, count, (loff_t)high << 32 | low);
325}
326
327asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
328 size_t count, unsigned int high, unsigned int low)
329{
330 return ksys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
331}
332
333asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
334 size_t count)
335{
336 return ksys_readahead(fd, (loff_t)high << 32 | low, count);
337}
338
339asmlinkage long parisc_fadvise64_64(int fd,
340 unsigned int high_off, unsigned int low_off,
341 unsigned int high_len, unsigned int low_len, int advice)
342{
343 return ksys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
344 (loff_t)high_len << 32 | low_len, advice);
345}
346
347asmlinkage long parisc_sync_file_range(int fd,
348 u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
349 unsigned int flags)
350{
351 return ksys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
352 (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
353}
354
355asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
356 u32 lenhi, u32 lenlo)
357{
358 return ksys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
359 ((u64)lenhi << 32) | lenlo);
360}
361
362long parisc_personality(unsigned long personality)
363{
364 long err;
365
366 if (personality(current->personality) == PER_LINUX32
367 && personality(personality) == PER_LINUX)
368 personality = (personality & ~PER_MASK) | PER_LINUX32;
369
370 err = sys_personality(personality);
371 if (personality(err) == PER_LINUX32)
372 err = (err & ~PER_MASK) | PER_LINUX;
373
374 return err;
375}
1
2/*
3 * PARISC specific syscalls
4 *
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8 *
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25#include <asm/uaccess.h>
26#include <linux/file.h>
27#include <linux/fs.h>
28#include <linux/linkage.h>
29#include <linux/mm.h>
30#include <linux/mman.h>
31#include <linux/shm.h>
32#include <linux/syscalls.h>
33#include <linux/utsname.h>
34#include <linux/personality.h>
35
36static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
37{
38 struct vm_area_struct *vma;
39
40 addr = PAGE_ALIGN(addr);
41
42 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
43 /* At this point: (!vma || addr < vma->vm_end). */
44 if (TASK_SIZE - len < addr)
45 return -ENOMEM;
46 if (!vma || addr + len <= vma->vm_start)
47 return addr;
48 addr = vma->vm_end;
49 }
50}
51
52#define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
53
54/*
55 * We need to know the offset to use. Old scheme was to look for
56 * existing mapping and use the same offset. New scheme is to use the
57 * address of the kernel data structure as the seed for the offset.
58 * We'll see how that works...
59 *
60 * The mapping is cacheline aligned, so there's no information in the bottom
61 * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
62 * drop the bottom 8 bits and use bits 8-17.
63 */
64static int get_offset(struct address_space *mapping)
65{
66 int offset = (unsigned long) mapping << (PAGE_SHIFT - 8);
67 return offset & 0x3FF000;
68}
69
70static unsigned long get_shared_area(struct address_space *mapping,
71 unsigned long addr, unsigned long len, unsigned long pgoff)
72{
73 struct vm_area_struct *vma;
74 int offset = mapping ? get_offset(mapping) : 0;
75
76 addr = DCACHE_ALIGN(addr - offset) + offset;
77
78 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
79 /* At this point: (!vma || addr < vma->vm_end). */
80 if (TASK_SIZE - len < addr)
81 return -ENOMEM;
82 if (!vma || addr + len <= vma->vm_start)
83 return addr;
84 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
85 if (addr < vma->vm_end) /* handle wraparound */
86 return -ENOMEM;
87 }
88}
89
90unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
91 unsigned long len, unsigned long pgoff, unsigned long flags)
92{
93 if (len > TASK_SIZE)
94 return -ENOMEM;
95 /* Might want to check for cache aliasing issues for MAP_FIXED case
96 * like ARM or MIPS ??? --BenH.
97 */
98 if (flags & MAP_FIXED)
99 return addr;
100 if (!addr)
101 addr = TASK_UNMAPPED_BASE;
102
103 if (filp) {
104 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
105 } else if(flags & MAP_SHARED) {
106 addr = get_shared_area(NULL, addr, len, pgoff);
107 } else {
108 addr = get_unshared_area(addr, len);
109 }
110 return addr;
111}
112
113asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
114 unsigned long prot, unsigned long flags, unsigned long fd,
115 unsigned long pgoff)
116{
117 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
118 we have. */
119 return sys_mmap_pgoff(addr, len, prot, flags, fd,
120 pgoff >> (PAGE_SHIFT - 12));
121}
122
123asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
124 unsigned long prot, unsigned long flags, unsigned long fd,
125 unsigned long offset)
126{
127 if (!(offset & ~PAGE_MASK)) {
128 return sys_mmap_pgoff(addr, len, prot, flags, fd,
129 offset >> PAGE_SHIFT);
130 } else {
131 return -EINVAL;
132 }
133}
134
135/* Fucking broken ABI */
136
137#ifdef CONFIG_64BIT
138asmlinkage long parisc_truncate64(const char __user * path,
139 unsigned int high, unsigned int low)
140{
141 return sys_truncate(path, (long)high << 32 | low);
142}
143
144asmlinkage long parisc_ftruncate64(unsigned int fd,
145 unsigned int high, unsigned int low)
146{
147 return sys_ftruncate(fd, (long)high << 32 | low);
148}
149
150/* stubs for the benefit of the syscall_table since truncate64 and truncate
151 * are identical on LP64 */
152asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
153{
154 return sys_truncate(path, length);
155}
156asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
157{
158 return sys_ftruncate(fd, length);
159}
160asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
161{
162 return sys_fcntl(fd, cmd, arg);
163}
164#else
165
166asmlinkage long parisc_truncate64(const char __user * path,
167 unsigned int high, unsigned int low)
168{
169 return sys_truncate64(path, (loff_t)high << 32 | low);
170}
171
172asmlinkage long parisc_ftruncate64(unsigned int fd,
173 unsigned int high, unsigned int low)
174{
175 return sys_ftruncate64(fd, (loff_t)high << 32 | low);
176}
177#endif
178
179asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
180 unsigned int high, unsigned int low)
181{
182 return sys_pread64(fd, buf, count, (loff_t)high << 32 | low);
183}
184
185asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
186 size_t count, unsigned int high, unsigned int low)
187{
188 return sys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
189}
190
191asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
192 size_t count)
193{
194 return sys_readahead(fd, (loff_t)high << 32 | low, count);
195}
196
197asmlinkage long parisc_fadvise64_64(int fd,
198 unsigned int high_off, unsigned int low_off,
199 unsigned int high_len, unsigned int low_len, int advice)
200{
201 return sys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
202 (loff_t)high_len << 32 | low_len, advice);
203}
204
205asmlinkage long parisc_sync_file_range(int fd,
206 u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
207 unsigned int flags)
208{
209 return sys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
210 (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
211}
212
213asmlinkage unsigned long sys_alloc_hugepages(int key, unsigned long addr, unsigned long len, int prot, int flag)
214{
215 return -ENOMEM;
216}
217
218asmlinkage int sys_free_hugepages(unsigned long addr)
219{
220 return -EINVAL;
221}
222
223long parisc_personality(unsigned long personality)
224{
225 long err;
226
227 if (personality(current->personality) == PER_LINUX32
228 && personality == PER_LINUX)
229 personality = PER_LINUX32;
230
231 err = sys_personality(personality);
232 if (err == PER_LINUX32)
233 err = PER_LINUX;
234
235 return err;
236}