Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/* linux/arch/sparc64/kernel/sys_sparc.c
  2 *
  3 * This file contains various random system calls that
  4 * have a non-standard calling sequence on the Linux/sparc
  5 * platform.
  6 */
  7
  8#include <linux/errno.h>
  9#include <linux/types.h>
 10#include <linux/sched.h>
 
 
 11#include <linux/fs.h>
 12#include <linux/file.h>
 13#include <linux/mm.h>
 14#include <linux/sem.h>
 15#include <linux/msg.h>
 16#include <linux/shm.h>
 17#include <linux/stat.h>
 18#include <linux/mman.h>
 19#include <linux/utsname.h>
 20#include <linux/smp.h>
 21#include <linux/slab.h>
 22#include <linux/syscalls.h>
 23#include <linux/ipc.h>
 24#include <linux/personality.h>
 25#include <linux/random.h>
 26#include <linux/export.h>
 
 
 
 27
 28#include <asm/uaccess.h>
 29#include <asm/utrap.h>
 30#include <asm/unistd.h>
 31
 32#include "entry.h"
 
 33#include "systbls.h"
 34
 35/* #define DEBUG_UNIMP_SYSCALL */
 36
 37asmlinkage unsigned long sys_getpagesize(void)
 38{
 39	return PAGE_SIZE;
 40}
 41
 42#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
 43#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
 44
 45/* Does addr --> addr+len fall within 4GB of the VA-space hole or
 46 * overflow past the end of the 64-bit address space?
 47 */
 48static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
 49{
 50	unsigned long va_exclude_start, va_exclude_end;
 51
 52	va_exclude_start = VA_EXCLUDE_START;
 53	va_exclude_end   = VA_EXCLUDE_END;
 54
 55	if (unlikely(len >= va_exclude_start))
 56		return 1;
 57
 58	if (unlikely((addr + len) < addr))
 59		return 1;
 60
 61	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
 62		     ((addr + len) >= va_exclude_start &&
 63		      (addr + len) < va_exclude_end)))
 64		return 1;
 65
 66	return 0;
 67}
 68
 69/* Does start,end straddle the VA-space hole?  */
 70static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
 71{
 72	unsigned long va_exclude_start, va_exclude_end;
 73
 74	va_exclude_start = VA_EXCLUDE_START;
 75	va_exclude_end   = VA_EXCLUDE_END;
 76
 77	if (likely(start < va_exclude_start && end < va_exclude_start))
 78		return 0;
 79
 80	if (likely(start >= va_exclude_end && end >= va_exclude_end))
 81		return 0;
 82
 83	return 1;
 84}
 85
 86/* These functions differ from the default implementations in
 87 * mm/mmap.c in two ways:
 88 *
 89 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
 90 *    for fixed such mappings we just validate what the user gave us.
 91 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
 92 *    the spitfire/niagara VA-hole.
 93 */
 94
 95static inline unsigned long COLOUR_ALIGN(unsigned long addr,
 96					 unsigned long pgoff)
 97{
 98	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
 99	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
100
101	return base + off;
102}
103
104static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
105					      unsigned long pgoff)
106{
107	unsigned long base = addr & ~(SHMLBA-1);
108	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
109
110	if (base + off <= addr)
111		return base + off;
112	return base - off;
113}
114
115unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
116{
117	struct mm_struct *mm = current->mm;
118	struct vm_area_struct * vma;
119	unsigned long task_size = TASK_SIZE;
120	unsigned long start_addr;
121	int do_color_align;
 
122
123	if (flags & MAP_FIXED) {
124		/* We do not accept a shared mapping if it would violate
125		 * cache aliasing constraints.
126		 */
127		if ((flags & MAP_SHARED) &&
128		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
129			return -EINVAL;
130		return addr;
131	}
132
133	if (test_thread_flag(TIF_32BIT))
134		task_size = STACK_TOP32;
135	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
136		return -ENOMEM;
137
138	do_color_align = 0;
139	if (filp || (flags & MAP_SHARED))
140		do_color_align = 1;
141
142	if (addr) {
143		if (do_color_align)
144			addr = COLOUR_ALIGN(addr, pgoff);
145		else
146			addr = PAGE_ALIGN(addr);
147
148		vma = find_vma(mm, addr);
149		if (task_size - len >= addr &&
150		    (!vma || addr + len <= vma->vm_start))
151			return addr;
152	}
153
154	if (len > mm->cached_hole_size) {
155	        start_addr = addr = mm->free_area_cache;
156	} else {
157	        start_addr = addr = TASK_UNMAPPED_BASE;
158	        mm->cached_hole_size = 0;
 
 
 
 
 
 
 
 
159	}
160
161	task_size -= len;
162
163full_search:
164	if (do_color_align)
165		addr = COLOUR_ALIGN(addr, pgoff);
166	else
167		addr = PAGE_ALIGN(addr);
168
169	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
170		/* At this point:  (!vma || addr < vma->vm_end). */
171		if (addr < VA_EXCLUDE_START &&
172		    (addr + len) >= VA_EXCLUDE_START) {
173			addr = VA_EXCLUDE_END;
174			vma = find_vma(mm, VA_EXCLUDE_END);
175		}
176		if (unlikely(task_size < addr)) {
177			if (start_addr != TASK_UNMAPPED_BASE) {
178				start_addr = addr = TASK_UNMAPPED_BASE;
179				mm->cached_hole_size = 0;
180				goto full_search;
181			}
182			return -ENOMEM;
183		}
184		if (likely(!vma || addr + len <= vma->vm_start)) {
185			/*
186			 * Remember the place where we stopped the search:
187			 */
188			mm->free_area_cache = addr + len;
189			return addr;
190		}
191		if (addr + mm->cached_hole_size < vma->vm_start)
192		        mm->cached_hole_size = vma->vm_start - addr;
193
194		addr = vma->vm_end;
195		if (do_color_align)
196			addr = COLOUR_ALIGN(addr, pgoff);
197	}
198}
199
200unsigned long
201arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
202			  const unsigned long len, const unsigned long pgoff,
203			  const unsigned long flags)
204{
205	struct vm_area_struct *vma;
206	struct mm_struct *mm = current->mm;
207	unsigned long task_size = STACK_TOP32;
208	unsigned long addr = addr0;
209	int do_color_align;
 
210
211	/* This should only ever run for 32-bit processes.  */
212	BUG_ON(!test_thread_flag(TIF_32BIT));
213
214	if (flags & MAP_FIXED) {
215		/* We do not accept a shared mapping if it would violate
216		 * cache aliasing constraints.
217		 */
218		if ((flags & MAP_SHARED) &&
219		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
220			return -EINVAL;
221		return addr;
222	}
223
224	if (unlikely(len > task_size))
225		return -ENOMEM;
226
227	do_color_align = 0;
228	if (filp || (flags & MAP_SHARED))
229		do_color_align = 1;
230
231	/* requesting a specific address */
232	if (addr) {
233		if (do_color_align)
234			addr = COLOUR_ALIGN(addr, pgoff);
235		else
236			addr = PAGE_ALIGN(addr);
237
238		vma = find_vma(mm, addr);
239		if (task_size - len >= addr &&
240		    (!vma || addr + len <= vma->vm_start))
241			return addr;
242	}
243
244	/* check if free_area_cache is useful for us */
245	if (len <= mm->cached_hole_size) {
246 	        mm->cached_hole_size = 0;
247 		mm->free_area_cache = mm->mmap_base;
248 	}
249
250	/* either no address requested or can't fit in requested address hole */
251	addr = mm->free_area_cache;
252	if (do_color_align) {
253		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
254
255		addr = base + len;
256	}
257
258	/* make sure it can fit in the remaining address space */
259	if (likely(addr > len)) {
260		vma = find_vma(mm, addr-len);
261		if (!vma || addr <= vma->vm_start) {
262			/* remember the address as a hint for next time */
263			return (mm->free_area_cache = addr-len);
264		}
265	}
266
267	if (unlikely(mm->mmap_base < len))
268		goto bottomup;
269
270	addr = mm->mmap_base-len;
271	if (do_color_align)
272		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
273
274	do {
275		/*
276		 * Lookup failure means no vma is above this address,
277		 * else if new region fits below vma->vm_start,
278		 * return with success:
279		 */
280		vma = find_vma(mm, addr);
281		if (likely(!vma || addr+len <= vma->vm_start)) {
282			/* remember the address as a hint for next time */
283			return (mm->free_area_cache = addr);
284		}
285
286 		/* remember the largest hole we saw so far */
287 		if (addr + mm->cached_hole_size < vma->vm_start)
288 		        mm->cached_hole_size = vma->vm_start - addr;
289
290		/* try just below the current vma->vm_start */
291		addr = vma->vm_start-len;
292		if (do_color_align)
293			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
294	} while (likely(len < vma->vm_start));
295
296bottomup:
297	/*
298	 * A failed mmap() very likely causes application failure,
299	 * so fall back to the bottom-up function here. This scenario
300	 * can happen with large stack limits and large mmap()
301	 * allocations.
302	 */
303	mm->cached_hole_size = ~0UL;
304  	mm->free_area_cache = TASK_UNMAPPED_BASE;
305	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
306	/*
307	 * Restore the topdown base:
308	 */
309	mm->free_area_cache = mm->mmap_base;
310	mm->cached_hole_size = ~0UL;
311
312	return addr;
313}
314
315/* Try to align mapping such that we align it as much as possible. */
316unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
317{
318	unsigned long align_goal, addr = -ENOMEM;
319	unsigned long (*get_area)(struct file *, unsigned long,
320				  unsigned long, unsigned long, unsigned long);
321
322	get_area = current->mm->get_unmapped_area;
323
324	if (flags & MAP_FIXED) {
325		/* Ok, don't mess with it. */
326		return get_area(NULL, orig_addr, len, pgoff, flags);
327	}
328	flags &= ~MAP_SHARED;
329
330	align_goal = PAGE_SIZE;
331	if (len >= (4UL * 1024 * 1024))
332		align_goal = (4UL * 1024 * 1024);
333	else if (len >= (512UL * 1024))
334		align_goal = (512UL * 1024);
335	else if (len >= (64UL * 1024))
336		align_goal = (64UL * 1024);
337
338	do {
339		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
340		if (!(addr & ~PAGE_MASK)) {
341			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
342			break;
343		}
344
345		if (align_goal == (4UL * 1024 * 1024))
346			align_goal = (512UL * 1024);
347		else if (align_goal == (512UL * 1024))
348			align_goal = (64UL * 1024);
349		else
350			align_goal = PAGE_SIZE;
351	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
352
353	/* Mapping is smaller than 64K or larger areas could not
354	 * be obtained.
355	 */
356	if (addr & ~PAGE_MASK)
357		addr = get_area(NULL, orig_addr, len, pgoff, flags);
358
359	return addr;
360}
361EXPORT_SYMBOL(get_fb_unmapped_area);
362
363/* Essentially the same as PowerPC.  */
364static unsigned long mmap_rnd(void)
365{
366	unsigned long rnd = 0UL;
367
368	if (current->flags & PF_RANDOMIZE) {
369		unsigned long val = get_random_int();
370		if (test_thread_flag(TIF_32BIT))
371			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
372		else
373			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
374	}
375	return rnd << PAGE_SHIFT;
376}
377
378void arch_pick_mmap_layout(struct mm_struct *mm)
379{
380	unsigned long random_factor = mmap_rnd();
381	unsigned long gap;
382
383	/*
384	 * Fall back to the standard layout if the personality
385	 * bit is set, or if the expected stack growth is unlimited:
386	 */
387	gap = rlimit(RLIMIT_STACK);
388	if (!test_thread_flag(TIF_32BIT) ||
389	    (current->personality & ADDR_COMPAT_LAYOUT) ||
390	    gap == RLIM_INFINITY ||
391	    sysctl_legacy_va_layout) {
392		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
393		mm->get_unmapped_area = arch_get_unmapped_area;
394		mm->unmap_area = arch_unmap_area;
395	} else {
396		/* We know it's 32-bit */
397		unsigned long task_size = STACK_TOP32;
398
399		if (gap < 128 * 1024 * 1024)
400			gap = 128 * 1024 * 1024;
401		if (gap > (task_size / 6 * 5))
402			gap = (task_size / 6 * 5);
403
404		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
405		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
406		mm->unmap_area = arch_unmap_area_topdown;
407	}
408}
409
410/*
411 * sys_pipe() is the normal C calling standard for creating
412 * a pipe. It's not the way unix traditionally does this, though.
413 */
414SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
415{
416	int fd[2];
417	int error;
418
419	error = do_pipe_flags(fd, 0);
420	if (error)
421		goto out;
422	regs->u_regs[UREG_I1] = fd[1];
423	error = fd[0];
424out:
425	return error;
426}
427
428/*
429 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
430 *
431 * This is really horribly ugly.
432 */
433
434SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
435		unsigned long, third, void __user *, ptr, long, fifth)
436{
437	long err;
438
 
 
 
439	/* No need for backward compatibility. We can start fresh... */
440	if (call <= SEMCTL) {
441		switch (call) {
442		case SEMOP:
443			err = sys_semtimedop(first, ptr,
444					     (unsigned)second, NULL);
445			goto out;
446		case SEMTIMEDOP:
447			err = sys_semtimedop(first, ptr, (unsigned)second,
448				(const struct timespec __user *)
449					     (unsigned long) fifth);
450			goto out;
451		case SEMGET:
452			err = sys_semget(first, (int)second, (int)third);
453			goto out;
454		case SEMCTL: {
455			err = sys_semctl(first, second,
456					 (int)third | IPC_64,
457					 (union semun) ptr);
458			goto out;
459		}
460		default:
461			err = -ENOSYS;
462			goto out;
463		}
464	}
465	if (call <= MSGCTL) {
466		switch (call) {
467		case MSGSND:
468			err = sys_msgsnd(first, ptr, (size_t)second,
469					 (int)third);
470			goto out;
471		case MSGRCV:
472			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
473					 (int)third);
474			goto out;
475		case MSGGET:
476			err = sys_msgget((key_t)first, (int)second);
477			goto out;
478		case MSGCTL:
479			err = sys_msgctl(first, (int)second | IPC_64, ptr);
480			goto out;
481		default:
482			err = -ENOSYS;
483			goto out;
484		}
485	}
486	if (call <= SHMCTL) {
487		switch (call) {
488		case SHMAT: {
489			ulong raddr;
490			err = do_shmat(first, ptr, (int)second, &raddr);
491			if (!err) {
492				if (put_user(raddr,
493					     (ulong __user *) third))
494					err = -EFAULT;
495			}
496			goto out;
497		}
498		case SHMDT:
499			err = sys_shmdt(ptr);
500			goto out;
501		case SHMGET:
502			err = sys_shmget(first, (size_t)second, (int)third);
503			goto out;
504		case SHMCTL:
505			err = sys_shmctl(first, (int)second | IPC_64, ptr);
506			goto out;
507		default:
508			err = -ENOSYS;
509			goto out;
510		}
511	} else {
512		err = -ENOSYS;
513	}
514out:
515	return err;
516}
517
518SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
519{
520	int ret;
521
522	if (current->personality == PER_LINUX32 &&
523	    personality == PER_LINUX)
524		personality = PER_LINUX32;
525	ret = sys_personality(personality);
526	if (ret == PER_LINUX32)
527		ret = PER_LINUX;
528
529	return ret;
530}
531
532int sparc_mmap_check(unsigned long addr, unsigned long len)
533{
534	if (test_thread_flag(TIF_32BIT)) {
535		if (len >= STACK_TOP32)
536			return -EINVAL;
537
538		if (addr > STACK_TOP32 - len)
539			return -EINVAL;
540	} else {
541		if (len >= VA_EXCLUDE_START)
542			return -EINVAL;
543
544		if (invalid_64bit_range(addr, len))
545			return -EINVAL;
546	}
547
548	return 0;
549}
550
551/* Linux version of mmap */
552SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
553		unsigned long, prot, unsigned long, flags, unsigned long, fd,
554		unsigned long, off)
555{
556	unsigned long retval = -EINVAL;
557
558	if ((off + PAGE_ALIGN(len)) < off)
559		goto out;
560	if (off & ~PAGE_MASK)
561		goto out;
562	retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
563out:
564	return retval;
565}
566
567SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
568{
569	if (invalid_64bit_range(addr, len))
570		return -EINVAL;
571
572	return vm_munmap(addr, len);
573}
574
575extern unsigned long do_mremap(unsigned long addr,
576	unsigned long old_len, unsigned long new_len,
577	unsigned long flags, unsigned long new_addr);
578                
579SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
580		unsigned long, new_len, unsigned long, flags,
581		unsigned long, new_addr)
582{
583	if (test_thread_flag(TIF_32BIT))
584		return -EINVAL;
585	return sys_mremap(addr, old_len, new_len, flags, new_addr);
586}
587
588/* we come to here via sys_nis_syscall so it can setup the regs argument */
589asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
590{
591	static int count;
 
592	
593	/* Don't make the system unusable, if someone goes stuck */
594	if (count++ > 5)
595		return -ENOSYS;
596
597	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
598#ifdef DEBUG_UNIMP_SYSCALL	
599	show_regs (regs);
600#endif
601
602	return -ENOSYS;
603}
604
605/* #define DEBUG_SPARC_BREAKPOINT */
606
607asmlinkage void sparc_breakpoint(struct pt_regs *regs)
608{
609	siginfo_t info;
610
611	if (test_thread_flag(TIF_32BIT)) {
612		regs->tpc &= 0xffffffff;
613		regs->tnpc &= 0xffffffff;
614	}
615#ifdef DEBUG_SPARC_BREAKPOINT
616        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
617#endif
618	info.si_signo = SIGTRAP;
619	info.si_errno = 0;
620	info.si_code = TRAP_BRKPT;
621	info.si_addr = (void __user *)regs->tpc;
622	info.si_trapno = 0;
623	force_sig_info(SIGTRAP, &info, current);
624#ifdef DEBUG_SPARC_BREAKPOINT
625	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
626#endif
 
627}
628
629extern void check_pending(int signum);
630
631SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
632{
633        int nlen, err;
 
634
635	if (len < 0)
636		return -EINVAL;
637
638 	down_read(&uts_sem);
639 	
640	nlen = strlen(utsname()->domainname) + 1;
641	err = -EINVAL;
642	if (nlen > len)
643		goto out;
 
644
645	err = -EFAULT;
646	if (!copy_to_user(name, utsname()->domainname, nlen))
647		err = 0;
648
649out:
 
 
 
 
650	up_read(&uts_sem);
651	return err;
652}
653
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
655		utrap_handler_t, new_p, utrap_handler_t, new_d,
656		utrap_handler_t __user *, old_p,
657		utrap_handler_t __user *, old_d)
658{
659	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
660		return -EINVAL;
661	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
662		if (old_p) {
663			if (!current_thread_info()->utraps) {
664				if (put_user(NULL, old_p))
665					return -EFAULT;
666			} else {
667				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
668					return -EFAULT;
669			}
670		}
671		if (old_d) {
672			if (put_user(NULL, old_d))
673				return -EFAULT;
674		}
675		return 0;
676	}
677	if (!current_thread_info()->utraps) {
678		current_thread_info()->utraps =
679			kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
 
680		if (!current_thread_info()->utraps)
681			return -ENOMEM;
682		current_thread_info()->utraps[0] = 1;
683	} else {
684		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
685		    current_thread_info()->utraps[0] > 1) {
686			unsigned long *p = current_thread_info()->utraps;
687
688			current_thread_info()->utraps =
689				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
690					GFP_KERNEL);
 
691			if (!current_thread_info()->utraps) {
692				current_thread_info()->utraps = p;
693				return -ENOMEM;
694			}
695			p[0]--;
696			current_thread_info()->utraps[0] = 1;
697			memcpy(current_thread_info()->utraps+1, p+1,
698			       UT_TRAP_INSTRUCTION_31*sizeof(long));
699		}
700	}
701	if (old_p) {
702		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
703			return -EFAULT;
704	}
705	if (old_d) {
706		if (put_user(NULL, old_d))
707			return -EFAULT;
708	}
709	current_thread_info()->utraps[type] = (long)new_p;
710
711	return 0;
712}
713
714asmlinkage long sparc_memory_ordering(unsigned long model,
715				      struct pt_regs *regs)
716{
 
717	if (model >= 3)
718		return -EINVAL;
719	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
720	return 0;
721}
722
723SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
724		struct sigaction __user *, oact, void __user *, restorer,
725		size_t, sigsetsize)
726{
727	struct k_sigaction new_ka, old_ka;
728	int ret;
729
730	/* XXX: Don't preclude handling different sized sigset_t's.  */
731	if (sigsetsize != sizeof(sigset_t))
732		return -EINVAL;
733
734	if (act) {
735		new_ka.ka_restorer = restorer;
736		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
737			return -EFAULT;
738	}
739
740	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
741
742	if (!ret && oact) {
743		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
744			return -EFAULT;
745	}
746
747	return ret;
748}
749
750/*
751 * Do a system call from kernel instead of calling sys_execve so we
752 * end up with proper pt_regs.
753 */
754int kernel_execve(const char *filename,
755		  const char *const argv[],
756		  const char *const envp[])
757{
758	long __res;
759	register long __g1 __asm__ ("g1") = __NR_execve;
760	register long __o0 __asm__ ("o0") = (long)(filename);
761	register long __o1 __asm__ ("o1") = (long)(argv);
762	register long __o2 __asm__ ("o2") = (long)(envp);
763	asm volatile ("t 0x6d\n\t"
764		      "sub %%g0, %%o0, %0\n\t"
765		      "movcc %%xcc, %%o0, %0\n\t"
766		      : "=r" (__res), "=&r" (__o0)
767		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
768		      : "cc");
769	return __res;
770}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* linux/arch/sparc64/kernel/sys_sparc.c
  3 *
  4 * This file contains various random system calls that
  5 * have a non-standard calling sequence on the Linux/sparc
  6 * platform.
  7 */
  8
  9#include <linux/errno.h>
 10#include <linux/types.h>
 11#include <linux/sched/signal.h>
 12#include <linux/sched/mm.h>
 13#include <linux/sched/debug.h>
 14#include <linux/fs.h>
 15#include <linux/file.h>
 16#include <linux/mm.h>
 17#include <linux/sem.h>
 18#include <linux/msg.h>
 19#include <linux/shm.h>
 20#include <linux/stat.h>
 21#include <linux/mman.h>
 22#include <linux/utsname.h>
 23#include <linux/smp.h>
 24#include <linux/slab.h>
 25#include <linux/syscalls.h>
 26#include <linux/ipc.h>
 27#include <linux/personality.h>
 28#include <linux/random.h>
 29#include <linux/export.h>
 30#include <linux/context_tracking.h>
 31#include <linux/timex.h>
 32#include <linux/uaccess.h>
 33
 
 34#include <asm/utrap.h>
 35#include <asm/unistd.h>
 36
 37#include "entry.h"
 38#include "kernel.h"
 39#include "systbls.h"
 40
 41/* #define DEBUG_UNIMP_SYSCALL */
 42
 43SYSCALL_DEFINE0(getpagesize)
 44{
 45	return PAGE_SIZE;
 46}
 47
 
 
 
 48/* Does addr --> addr+len fall within 4GB of the VA-space hole or
 49 * overflow past the end of the 64-bit address space?
 50 */
 51static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
 52{
 53	unsigned long va_exclude_start, va_exclude_end;
 54
 55	va_exclude_start = VA_EXCLUDE_START;
 56	va_exclude_end   = VA_EXCLUDE_END;
 57
 58	if (unlikely(len >= va_exclude_start))
 59		return 1;
 60
 61	if (unlikely((addr + len) < addr))
 62		return 1;
 63
 64	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
 65		     ((addr + len) >= va_exclude_start &&
 66		      (addr + len) < va_exclude_end)))
 67		return 1;
 68
 69	return 0;
 70}
 71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72/* These functions differ from the default implementations in
 73 * mm/mmap.c in two ways:
 74 *
 75 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
 76 *    for fixed such mappings we just validate what the user gave us.
 77 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
 78 *    the spitfire/niagara VA-hole.
 79 */
 80
 81static inline unsigned long COLOR_ALIGN(unsigned long addr,
 82					 unsigned long pgoff)
 83{
 84	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
 85	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
 86
 87	return base + off;
 88}
 89
 
 
 
 
 
 
 
 
 
 
 
 90unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 91{
 92	struct mm_struct *mm = current->mm;
 93	struct vm_area_struct * vma;
 94	unsigned long task_size = TASK_SIZE;
 
 95	int do_color_align;
 96	struct vm_unmapped_area_info info;
 97
 98	if (flags & MAP_FIXED) {
 99		/* We do not accept a shared mapping if it would violate
100		 * cache aliasing constraints.
101		 */
102		if ((flags & MAP_SHARED) &&
103		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
104			return -EINVAL;
105		return addr;
106	}
107
108	if (test_thread_flag(TIF_32BIT))
109		task_size = STACK_TOP32;
110	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
111		return -ENOMEM;
112
113	do_color_align = 0;
114	if (filp || (flags & MAP_SHARED))
115		do_color_align = 1;
116
117	if (addr) {
118		if (do_color_align)
119			addr = COLOR_ALIGN(addr, pgoff);
120		else
121			addr = PAGE_ALIGN(addr);
122
123		vma = find_vma(mm, addr);
124		if (task_size - len >= addr &&
125		    (!vma || addr + len <= vm_start_gap(vma)))
126			return addr;
127	}
128
129	info.flags = 0;
130	info.length = len;
131	info.low_limit = TASK_UNMAPPED_BASE;
132	info.high_limit = min(task_size, VA_EXCLUDE_START);
133	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
134	info.align_offset = pgoff << PAGE_SHIFT;
135	addr = vm_unmapped_area(&info);
136
137	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
138		VM_BUG_ON(addr != -ENOMEM);
139		info.low_limit = VA_EXCLUDE_END;
140		info.high_limit = task_size;
141		addr = vm_unmapped_area(&info);
142	}
143
144	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145}
146
147unsigned long
148arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
149			  const unsigned long len, const unsigned long pgoff,
150			  const unsigned long flags)
151{
152	struct vm_area_struct *vma;
153	struct mm_struct *mm = current->mm;
154	unsigned long task_size = STACK_TOP32;
155	unsigned long addr = addr0;
156	int do_color_align;
157	struct vm_unmapped_area_info info;
158
159	/* This should only ever run for 32-bit processes.  */
160	BUG_ON(!test_thread_flag(TIF_32BIT));
161
162	if (flags & MAP_FIXED) {
163		/* We do not accept a shared mapping if it would violate
164		 * cache aliasing constraints.
165		 */
166		if ((flags & MAP_SHARED) &&
167		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
168			return -EINVAL;
169		return addr;
170	}
171
172	if (unlikely(len > task_size))
173		return -ENOMEM;
174
175	do_color_align = 0;
176	if (filp || (flags & MAP_SHARED))
177		do_color_align = 1;
178
179	/* requesting a specific address */
180	if (addr) {
181		if (do_color_align)
182			addr = COLOR_ALIGN(addr, pgoff);
183		else
184			addr = PAGE_ALIGN(addr);
185
186		vma = find_vma(mm, addr);
187		if (task_size - len >= addr &&
188		    (!vma || addr + len <= vm_start_gap(vma)))
189			return addr;
190	}
191
192	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
193	info.length = len;
194	info.low_limit = PAGE_SIZE;
195	info.high_limit = mm->mmap_base;
196	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
197	info.align_offset = pgoff << PAGE_SHIFT;
198	addr = vm_unmapped_area(&info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200	/*
201	 * A failed mmap() very likely causes application failure,
202	 * so fall back to the bottom-up function here. This scenario
203	 * can happen with large stack limits and large mmap()
204	 * allocations.
205	 */
206	if (addr & ~PAGE_MASK) {
207		VM_BUG_ON(addr != -ENOMEM);
208		info.flags = 0;
209		info.low_limit = TASK_UNMAPPED_BASE;
210		info.high_limit = STACK_TOP32;
211		addr = vm_unmapped_area(&info);
212	}
 
213
214	return addr;
215}
216
217/* Try to align mapping such that we align it as much as possible. */
218unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
219{
220	unsigned long align_goal, addr = -ENOMEM;
221	unsigned long (*get_area)(struct file *, unsigned long,
222				  unsigned long, unsigned long, unsigned long);
223
224	get_area = current->mm->get_unmapped_area;
225
226	if (flags & MAP_FIXED) {
227		/* Ok, don't mess with it. */
228		return get_area(NULL, orig_addr, len, pgoff, flags);
229	}
230	flags &= ~MAP_SHARED;
231
232	align_goal = PAGE_SIZE;
233	if (len >= (4UL * 1024 * 1024))
234		align_goal = (4UL * 1024 * 1024);
235	else if (len >= (512UL * 1024))
236		align_goal = (512UL * 1024);
237	else if (len >= (64UL * 1024))
238		align_goal = (64UL * 1024);
239
240	do {
241		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
242		if (!(addr & ~PAGE_MASK)) {
243			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
244			break;
245		}
246
247		if (align_goal == (4UL * 1024 * 1024))
248			align_goal = (512UL * 1024);
249		else if (align_goal == (512UL * 1024))
250			align_goal = (64UL * 1024);
251		else
252			align_goal = PAGE_SIZE;
253	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
254
255	/* Mapping is smaller than 64K or larger areas could not
256	 * be obtained.
257	 */
258	if (addr & ~PAGE_MASK)
259		addr = get_area(NULL, orig_addr, len, pgoff, flags);
260
261	return addr;
262}
263EXPORT_SYMBOL(get_fb_unmapped_area);
264
265/* Essentially the same as PowerPC.  */
266static unsigned long mmap_rnd(void)
267{
268	unsigned long rnd = 0UL;
269
270	if (current->flags & PF_RANDOMIZE) {
271		unsigned long val = get_random_long();
272		if (test_thread_flag(TIF_32BIT))
273			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
274		else
275			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
276	}
277	return rnd << PAGE_SHIFT;
278}
279
280void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
281{
282	unsigned long random_factor = mmap_rnd();
283	unsigned long gap;
284
285	/*
286	 * Fall back to the standard layout if the personality
287	 * bit is set, or if the expected stack growth is unlimited:
288	 */
289	gap = rlim_stack->rlim_cur;
290	if (!test_thread_flag(TIF_32BIT) ||
291	    (current->personality & ADDR_COMPAT_LAYOUT) ||
292	    gap == RLIM_INFINITY ||
293	    sysctl_legacy_va_layout) {
294		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
295		mm->get_unmapped_area = arch_get_unmapped_area;
 
296	} else {
297		/* We know it's 32-bit */
298		unsigned long task_size = STACK_TOP32;
299
300		if (gap < 128 * 1024 * 1024)
301			gap = 128 * 1024 * 1024;
302		if (gap > (task_size / 6 * 5))
303			gap = (task_size / 6 * 5);
304
305		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
306		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 
307	}
308}
309
310/*
311 * sys_pipe() is the normal C calling standard for creating
312 * a pipe. It's not the way unix traditionally does this, though.
313 */
314SYSCALL_DEFINE0(sparc_pipe)
315{
316	int fd[2];
317	int error;
318
319	error = do_pipe_flags(fd, 0);
320	if (error)
321		goto out;
322	current_pt_regs()->u_regs[UREG_I1] = fd[1];
323	error = fd[0];
324out:
325	return error;
326}
327
328/*
329 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
330 *
331 * This is really horribly ugly.
332 */
333
334SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
335		unsigned long, third, void __user *, ptr, long, fifth)
336{
337	long err;
338
339	if (!IS_ENABLED(CONFIG_SYSVIPC))
340		return -ENOSYS;
341
342	/* No need for backward compatibility. We can start fresh... */
343	if (call <= SEMTIMEDOP) {
344		switch (call) {
345		case SEMOP:
346			err = ksys_semtimedop(first, ptr,
347					      (unsigned int)second, NULL);
348			goto out;
349		case SEMTIMEDOP:
350			err = ksys_semtimedop(first, ptr, (unsigned int)second,
351				(const struct __kernel_timespec __user *)
352					      (unsigned long) fifth);
353			goto out;
354		case SEMGET:
355			err = ksys_semget(first, (int)second, (int)third);
356			goto out;
357		case SEMCTL: {
358			err = ksys_old_semctl(first, second,
359					      (int)third | IPC_64,
360					      (unsigned long) ptr);
361			goto out;
362		}
363		default:
364			err = -ENOSYS;
365			goto out;
366		}
367	}
368	if (call <= MSGCTL) {
369		switch (call) {
370		case MSGSND:
371			err = ksys_msgsnd(first, ptr, (size_t)second,
372					 (int)third);
373			goto out;
374		case MSGRCV:
375			err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
376					 (int)third);
377			goto out;
378		case MSGGET:
379			err = ksys_msgget((key_t)first, (int)second);
380			goto out;
381		case MSGCTL:
382			err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
383			goto out;
384		default:
385			err = -ENOSYS;
386			goto out;
387		}
388	}
389	if (call <= SHMCTL) {
390		switch (call) {
391		case SHMAT: {
392			ulong raddr;
393			err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
394			if (!err) {
395				if (put_user(raddr,
396					     (ulong __user *) third))
397					err = -EFAULT;
398			}
399			goto out;
400		}
401		case SHMDT:
402			err = ksys_shmdt(ptr);
403			goto out;
404		case SHMGET:
405			err = ksys_shmget(first, (size_t)second, (int)third);
406			goto out;
407		case SHMCTL:
408			err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
409			goto out;
410		default:
411			err = -ENOSYS;
412			goto out;
413		}
414	} else {
415		err = -ENOSYS;
416	}
417out:
418	return err;
419}
420
421SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
422{
423	long ret;
424
425	if (personality(current->personality) == PER_LINUX32 &&
426	    personality(personality) == PER_LINUX)
427		personality |= PER_LINUX32;
428	ret = sys_personality(personality);
429	if (personality(ret) == PER_LINUX32)
430		ret &= ~PER_LINUX32;
431
432	return ret;
433}
434
435int sparc_mmap_check(unsigned long addr, unsigned long len)
436{
437	if (test_thread_flag(TIF_32BIT)) {
438		if (len >= STACK_TOP32)
439			return -EINVAL;
440
441		if (addr > STACK_TOP32 - len)
442			return -EINVAL;
443	} else {
444		if (len >= VA_EXCLUDE_START)
445			return -EINVAL;
446
447		if (invalid_64bit_range(addr, len))
448			return -EINVAL;
449	}
450
451	return 0;
452}
453
454/* Linux version of mmap */
455SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
456		unsigned long, prot, unsigned long, flags, unsigned long, fd,
457		unsigned long, off)
458{
459	unsigned long retval = -EINVAL;
460
461	if ((off + PAGE_ALIGN(len)) < off)
462		goto out;
463	if (off & ~PAGE_MASK)
464		goto out;
465	retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
466out:
467	return retval;
468}
469
470SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
471{
472	if (invalid_64bit_range(addr, len))
473		return -EINVAL;
474
475	return vm_munmap(addr, len);
476}
 
 
 
 
477                
478SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
479		unsigned long, new_len, unsigned long, flags,
480		unsigned long, new_addr)
481{
482	if (test_thread_flag(TIF_32BIT))
483		return -EINVAL;
484	return sys_mremap(addr, old_len, new_len, flags, new_addr);
485}
486
487SYSCALL_DEFINE0(nis_syscall)
 
488{
489	static int count;
490	struct pt_regs *regs = current_pt_regs();
491	
492	/* Don't make the system unusable, if someone goes stuck */
493	if (count++ > 5)
494		return -ENOSYS;
495
496	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
497#ifdef DEBUG_UNIMP_SYSCALL	
498	show_regs (regs);
499#endif
500
501	return -ENOSYS;
502}
503
504/* #define DEBUG_SPARC_BREAKPOINT */
505
506asmlinkage void sparc_breakpoint(struct pt_regs *regs)
507{
508	enum ctx_state prev_state = exception_enter();
509
510	if (test_thread_flag(TIF_32BIT)) {
511		regs->tpc &= 0xffffffff;
512		regs->tnpc &= 0xffffffff;
513	}
514#ifdef DEBUG_SPARC_BREAKPOINT
515        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
516#endif
517	force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->tpc, 0);
 
 
 
 
 
518#ifdef DEBUG_SPARC_BREAKPOINT
519	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
520#endif
521	exception_exit(prev_state);
522}
523
 
 
524SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
525{
526	int nlen, err;
527	char tmp[__NEW_UTS_LEN + 1];
528
529	if (len < 0)
530		return -EINVAL;
531
532	down_read(&uts_sem);
533
534	nlen = strlen(utsname()->domainname) + 1;
535	err = -EINVAL;
536	if (nlen > len)
537		goto out_unlock;
538	memcpy(tmp, utsname()->domainname, nlen);
539
540	up_read(&uts_sem);
 
 
541
542	if (copy_to_user(name, tmp, nlen))
543		return -EFAULT;
544	return 0;
545
546out_unlock:
547	up_read(&uts_sem);
548	return err;
549}
550
551SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p)
552{
553	struct timex txc;		/* Local copy of parameter */
554	struct __kernel_timex *kt = (void *)&txc;
555	int ret;
556
557	/* Copy the user data space into the kernel copy
558	 * structure. But bear in mind that the structures
559	 * may change
560	 */
561	if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
562		return -EFAULT;
563
564	/*
565	 * override for sparc64 specific timeval type: tv_usec
566	 * is 32 bit wide instead of 64-bit in __kernel_timex
567	 */
568	kt->time.tv_usec = txc.time.tv_usec;
569	ret = do_adjtimex(kt);
570	txc.time.tv_usec = kt->time.tv_usec;
571
572	return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
573}
574
575SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p)
576{
577	struct timex txc;		/* Local copy of parameter */
578	struct __kernel_timex *kt = (void *)&txc;
579	int ret;
580
581	if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) {
582		pr_err_once("process %d (%s) attempted a POSIX timer syscall "
583		    "while CONFIG_POSIX_TIMERS is not set\n",
584		    current->pid, current->comm);
585
586		return -ENOSYS;
587	}
588
589	/* Copy the user data space into the kernel copy
590	 * structure. But bear in mind that the structures
591	 * may change
592	 */
593	if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
594		return -EFAULT;
595
596	/*
597	 * override for sparc64 specific timeval type: tv_usec
598	 * is 32 bit wide instead of 64-bit in __kernel_timex
599	 */
600	kt->time.tv_usec = txc.time.tv_usec;
601	ret = do_clock_adjtime(which_clock, kt);
602	txc.time.tv_usec = kt->time.tv_usec;
603
604	return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
605}
606
607SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
608		utrap_handler_t, new_p, utrap_handler_t, new_d,
609		utrap_handler_t __user *, old_p,
610		utrap_handler_t __user *, old_d)
611{
612	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
613		return -EINVAL;
614	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
615		if (old_p) {
616			if (!current_thread_info()->utraps) {
617				if (put_user(NULL, old_p))
618					return -EFAULT;
619			} else {
620				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
621					return -EFAULT;
622			}
623		}
624		if (old_d) {
625			if (put_user(NULL, old_d))
626				return -EFAULT;
627		}
628		return 0;
629	}
630	if (!current_thread_info()->utraps) {
631		current_thread_info()->utraps =
632			kcalloc(UT_TRAP_INSTRUCTION_31 + 1, sizeof(long),
633				GFP_KERNEL);
634		if (!current_thread_info()->utraps)
635			return -ENOMEM;
636		current_thread_info()->utraps[0] = 1;
637	} else {
638		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
639		    current_thread_info()->utraps[0] > 1) {
640			unsigned long *p = current_thread_info()->utraps;
641
642			current_thread_info()->utraps =
643				kmalloc_array(UT_TRAP_INSTRUCTION_31 + 1,
644					      sizeof(long),
645					      GFP_KERNEL);
646			if (!current_thread_info()->utraps) {
647				current_thread_info()->utraps = p;
648				return -ENOMEM;
649			}
650			p[0]--;
651			current_thread_info()->utraps[0] = 1;
652			memcpy(current_thread_info()->utraps+1, p+1,
653			       UT_TRAP_INSTRUCTION_31*sizeof(long));
654		}
655	}
656	if (old_p) {
657		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
658			return -EFAULT;
659	}
660	if (old_d) {
661		if (put_user(NULL, old_d))
662			return -EFAULT;
663	}
664	current_thread_info()->utraps[type] = (long)new_p;
665
666	return 0;
667}
668
669SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
 
670{
671	struct pt_regs *regs = current_pt_regs();
672	if (model >= 3)
673		return -EINVAL;
674	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
675	return 0;
676}
677
678SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
679		struct sigaction __user *, oact, void __user *, restorer,
680		size_t, sigsetsize)
681{
682	struct k_sigaction new_ka, old_ka;
683	int ret;
684
685	/* XXX: Don't preclude handling different sized sigset_t's.  */
686	if (sigsetsize != sizeof(sigset_t))
687		return -EINVAL;
688
689	if (act) {
690		new_ka.ka_restorer = restorer;
691		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
692			return -EFAULT;
693	}
694
695	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
696
697	if (!ret && oact) {
698		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
699			return -EFAULT;
700	}
701
702	return ret;
703}
704
705SYSCALL_DEFINE0(kern_features)
706{
707	return KERN_FEATURE_MIXED_MODE_STACK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
708}