Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* linux/arch/sparc64/kernel/sys_sparc.c
  3 *
  4 * This file contains various random system calls that
  5 * have a non-standard calling sequence on the Linux/sparc
  6 * platform.
  7 */
  8
  9#include <linux/errno.h>
 10#include <linux/types.h>
 11#include <linux/sched/signal.h>
 12#include <linux/sched/mm.h>
 13#include <linux/sched/debug.h>
 14#include <linux/fs.h>
 15#include <linux/file.h>
 16#include <linux/mm.h>
 17#include <linux/sem.h>
 18#include <linux/msg.h>
 19#include <linux/shm.h>
 20#include <linux/stat.h>
 21#include <linux/mman.h>
 22#include <linux/utsname.h>
 23#include <linux/smp.h>
 24#include <linux/slab.h>
 25#include <linux/syscalls.h>
 26#include <linux/ipc.h>
 27#include <linux/personality.h>
 28#include <linux/random.h>
 29#include <linux/export.h>
 30#include <linux/context_tracking.h>
 31
 32#include <linux/uaccess.h>
 33#include <asm/utrap.h>
 34#include <asm/unistd.h>
 35
 36#include "entry.h"
 37#include "kernel.h"
 38#include "systbls.h"
 39
 40/* #define DEBUG_UNIMP_SYSCALL */
 41
 42SYSCALL_DEFINE0(getpagesize)
 43{
 44	return PAGE_SIZE;
 45}
 46
 
 
 
 47/* Does addr --> addr+len fall within 4GB of the VA-space hole or
 48 * overflow past the end of the 64-bit address space?
 49 */
 50static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
 51{
 52	unsigned long va_exclude_start, va_exclude_end;
 53
 54	va_exclude_start = VA_EXCLUDE_START;
 55	va_exclude_end   = VA_EXCLUDE_END;
 56
 57	if (unlikely(len >= va_exclude_start))
 58		return 1;
 59
 60	if (unlikely((addr + len) < addr))
 61		return 1;
 62
 63	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
 64		     ((addr + len) >= va_exclude_start &&
 65		      (addr + len) < va_exclude_end)))
 66		return 1;
 67
 68	return 0;
 69}
 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71/* These functions differ from the default implementations in
 72 * mm/mmap.c in two ways:
 73 *
 74 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
 75 *    for fixed such mappings we just validate what the user gave us.
 76 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
 77 *    the spitfire/niagara VA-hole.
 78 */
 79
 80static inline unsigned long COLOR_ALIGN(unsigned long addr,
 81					 unsigned long pgoff)
 82{
 83	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
 84	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
 85
 86	return base + off;
 87}
 88
 
 
 
 
 
 
 
 
 
 
 
 89unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 90{
 91	struct mm_struct *mm = current->mm;
 92	struct vm_area_struct * vma;
 93	unsigned long task_size = TASK_SIZE;
 
 94	int do_color_align;
 95	struct vm_unmapped_area_info info;
 96
 97	if (flags & MAP_FIXED) {
 98		/* We do not accept a shared mapping if it would violate
 99		 * cache aliasing constraints.
100		 */
101		if ((flags & MAP_SHARED) &&
102		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
103			return -EINVAL;
104		return addr;
105	}
106
107	if (test_thread_flag(TIF_32BIT))
108		task_size = STACK_TOP32;
109	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
110		return -ENOMEM;
111
112	do_color_align = 0;
113	if (filp || (flags & MAP_SHARED))
114		do_color_align = 1;
115
116	if (addr) {
117		if (do_color_align)
118			addr = COLOR_ALIGN(addr, pgoff);
119		else
120			addr = PAGE_ALIGN(addr);
121
122		vma = find_vma(mm, addr);
123		if (task_size - len >= addr &&
124		    (!vma || addr + len <= vm_start_gap(vma)))
125			return addr;
126	}
127
128	info.flags = 0;
129	info.length = len;
130	info.low_limit = TASK_UNMAPPED_BASE;
131	info.high_limit = min(task_size, VA_EXCLUDE_START);
132	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
133	info.align_offset = pgoff << PAGE_SHIFT;
134	addr = vm_unmapped_area(&info);
135
136	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
137		VM_BUG_ON(addr != -ENOMEM);
138		info.low_limit = VA_EXCLUDE_END;
139		info.high_limit = task_size;
140		addr = vm_unmapped_area(&info);
141	}
142
143	return addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144}
145
146unsigned long
147arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148			  const unsigned long len, const unsigned long pgoff,
149			  const unsigned long flags)
150{
151	struct vm_area_struct *vma;
152	struct mm_struct *mm = current->mm;
153	unsigned long task_size = STACK_TOP32;
154	unsigned long addr = addr0;
155	int do_color_align;
156	struct vm_unmapped_area_info info;
157
158	/* This should only ever run for 32-bit processes.  */
159	BUG_ON(!test_thread_flag(TIF_32BIT));
160
161	if (flags & MAP_FIXED) {
162		/* We do not accept a shared mapping if it would violate
163		 * cache aliasing constraints.
164		 */
165		if ((flags & MAP_SHARED) &&
166		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
167			return -EINVAL;
168		return addr;
169	}
170
171	if (unlikely(len > task_size))
172		return -ENOMEM;
173
174	do_color_align = 0;
175	if (filp || (flags & MAP_SHARED))
176		do_color_align = 1;
177
178	/* requesting a specific address */
179	if (addr) {
180		if (do_color_align)
181			addr = COLOR_ALIGN(addr, pgoff);
182		else
183			addr = PAGE_ALIGN(addr);
184
185		vma = find_vma(mm, addr);
186		if (task_size - len >= addr &&
187		    (!vma || addr + len <= vm_start_gap(vma)))
188			return addr;
189	}
190
191	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
192	info.length = len;
193	info.low_limit = PAGE_SIZE;
194	info.high_limit = mm->mmap_base;
195	info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
196	info.align_offset = pgoff << PAGE_SHIFT;
197	addr = vm_unmapped_area(&info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199	/*
200	 * A failed mmap() very likely causes application failure,
201	 * so fall back to the bottom-up function here. This scenario
202	 * can happen with large stack limits and large mmap()
203	 * allocations.
204	 */
205	if (addr & ~PAGE_MASK) {
206		VM_BUG_ON(addr != -ENOMEM);
207		info.flags = 0;
208		info.low_limit = TASK_UNMAPPED_BASE;
209		info.high_limit = STACK_TOP32;
210		addr = vm_unmapped_area(&info);
211	}
 
212
213	return addr;
214}
215
216/* Try to align mapping such that we align it as much as possible. */
217unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
218{
219	unsigned long align_goal, addr = -ENOMEM;
220	unsigned long (*get_area)(struct file *, unsigned long,
221				  unsigned long, unsigned long, unsigned long);
222
223	get_area = current->mm->get_unmapped_area;
224
225	if (flags & MAP_FIXED) {
226		/* Ok, don't mess with it. */
227		return get_area(NULL, orig_addr, len, pgoff, flags);
228	}
229	flags &= ~MAP_SHARED;
230
231	align_goal = PAGE_SIZE;
232	if (len >= (4UL * 1024 * 1024))
233		align_goal = (4UL * 1024 * 1024);
234	else if (len >= (512UL * 1024))
235		align_goal = (512UL * 1024);
236	else if (len >= (64UL * 1024))
237		align_goal = (64UL * 1024);
238
239	do {
240		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
241		if (!(addr & ~PAGE_MASK)) {
242			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
243			break;
244		}
245
246		if (align_goal == (4UL * 1024 * 1024))
247			align_goal = (512UL * 1024);
248		else if (align_goal == (512UL * 1024))
249			align_goal = (64UL * 1024);
250		else
251			align_goal = PAGE_SIZE;
252	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
253
254	/* Mapping is smaller than 64K or larger areas could not
255	 * be obtained.
256	 */
257	if (addr & ~PAGE_MASK)
258		addr = get_area(NULL, orig_addr, len, pgoff, flags);
259
260	return addr;
261}
262EXPORT_SYMBOL(get_fb_unmapped_area);
263
264/* Essentially the same as PowerPC.  */
265static unsigned long mmap_rnd(void)
266{
267	unsigned long rnd = 0UL;
268
269	if (current->flags & PF_RANDOMIZE) {
270		unsigned long val = get_random_long();
271		if (test_thread_flag(TIF_32BIT))
272			rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
273		else
274			rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
275	}
276	return rnd << PAGE_SHIFT;
277}
278
279void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
280{
281	unsigned long random_factor = mmap_rnd();
282	unsigned long gap;
283
284	/*
285	 * Fall back to the standard layout if the personality
286	 * bit is set, or if the expected stack growth is unlimited:
287	 */
288	gap = rlim_stack->rlim_cur;
289	if (!test_thread_flag(TIF_32BIT) ||
290	    (current->personality & ADDR_COMPAT_LAYOUT) ||
291	    gap == RLIM_INFINITY ||
292	    sysctl_legacy_va_layout) {
293		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
294		mm->get_unmapped_area = arch_get_unmapped_area;
 
295	} else {
296		/* We know it's 32-bit */
297		unsigned long task_size = STACK_TOP32;
298
299		if (gap < 128 * 1024 * 1024)
300			gap = 128 * 1024 * 1024;
301		if (gap > (task_size / 6 * 5))
302			gap = (task_size / 6 * 5);
303
304		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
305		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 
306	}
307}
308
309/*
310 * sys_pipe() is the normal C calling standard for creating
311 * a pipe. It's not the way unix traditionally does this, though.
312 */
313SYSCALL_DEFINE0(sparc_pipe)
314{
315	int fd[2];
316	int error;
317
318	error = do_pipe_flags(fd, 0);
319	if (error)
320		goto out;
321	current_pt_regs()->u_regs[UREG_I1] = fd[1];
322	error = fd[0];
323out:
324	return error;
325}
326
327/*
328 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
329 *
330 * This is really horribly ugly.
331 */
332
333SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
334		unsigned long, third, void __user *, ptr, long, fifth)
335{
336	long err;
337
338	/* No need for backward compatibility. We can start fresh... */
339	if (call <= SEMTIMEDOP) {
340		switch (call) {
341		case SEMOP:
342			err = sys_semtimedop(first, ptr,
343					     (unsigned int)second, NULL);
344			goto out;
345		case SEMTIMEDOP:
346			err = sys_semtimedop(first, ptr, (unsigned int)second,
347				(const struct timespec __user *)
348					     (unsigned long) fifth);
349			goto out;
350		case SEMGET:
351			err = sys_semget(first, (int)second, (int)third);
352			goto out;
353		case SEMCTL: {
354			err = sys_semctl(first, second,
355					 (int)third | IPC_64,
356					 (unsigned long) ptr);
357			goto out;
358		}
359		default:
360			err = -ENOSYS;
361			goto out;
362		}
363	}
364	if (call <= MSGCTL) {
365		switch (call) {
366		case MSGSND:
367			err = sys_msgsnd(first, ptr, (size_t)second,
368					 (int)third);
369			goto out;
370		case MSGRCV:
371			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
372					 (int)third);
373			goto out;
374		case MSGGET:
375			err = sys_msgget((key_t)first, (int)second);
376			goto out;
377		case MSGCTL:
378			err = sys_msgctl(first, (int)second | IPC_64, ptr);
379			goto out;
380		default:
381			err = -ENOSYS;
382			goto out;
383		}
384	}
385	if (call <= SHMCTL) {
386		switch (call) {
387		case SHMAT: {
388			ulong raddr;
389			err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
390			if (!err) {
391				if (put_user(raddr,
392					     (ulong __user *) third))
393					err = -EFAULT;
394			}
395			goto out;
396		}
397		case SHMDT:
398			err = sys_shmdt(ptr);
399			goto out;
400		case SHMGET:
401			err = sys_shmget(first, (size_t)second, (int)third);
402			goto out;
403		case SHMCTL:
404			err = sys_shmctl(first, (int)second | IPC_64, ptr);
405			goto out;
406		default:
407			err = -ENOSYS;
408			goto out;
409		}
410	} else {
411		err = -ENOSYS;
412	}
413out:
414	return err;
415}
416
417SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
418{
419	long ret;
420
421	if (personality(current->personality) == PER_LINUX32 &&
422	    personality(personality) == PER_LINUX)
423		personality |= PER_LINUX32;
424	ret = sys_personality(personality);
425	if (personality(ret) == PER_LINUX32)
426		ret &= ~PER_LINUX32;
427
428	return ret;
429}
430
431int sparc_mmap_check(unsigned long addr, unsigned long len)
432{
433	if (test_thread_flag(TIF_32BIT)) {
434		if (len >= STACK_TOP32)
435			return -EINVAL;
436
437		if (addr > STACK_TOP32 - len)
438			return -EINVAL;
439	} else {
440		if (len >= VA_EXCLUDE_START)
441			return -EINVAL;
442
443		if (invalid_64bit_range(addr, len))
444			return -EINVAL;
445	}
446
447	return 0;
448}
449
450/* Linux version of mmap */
451SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
452		unsigned long, prot, unsigned long, flags, unsigned long, fd,
453		unsigned long, off)
454{
455	unsigned long retval = -EINVAL;
456
457	if ((off + PAGE_ALIGN(len)) < off)
458		goto out;
459	if (off & ~PAGE_MASK)
460		goto out;
461	retval = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
462out:
463	return retval;
464}
465
466SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
467{
 
 
468	if (invalid_64bit_range(addr, len))
469		return -EINVAL;
470
471	return vm_munmap(addr, len);
 
 
 
472}
 
 
 
 
473                
474SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
475		unsigned long, new_len, unsigned long, flags,
476		unsigned long, new_addr)
477{
 
 
478	if (test_thread_flag(TIF_32BIT))
479		return -EINVAL;
480	return sys_mremap(addr, old_len, new_len, flags, new_addr);
 
 
 
 
 
481}
482
483SYSCALL_DEFINE0(nis_syscall)
 
484{
485	static int count;
486	struct pt_regs *regs = current_pt_regs();
487	
488	/* Don't make the system unusable, if someone goes stuck */
489	if (count++ > 5)
490		return -ENOSYS;
491
492	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
493#ifdef DEBUG_UNIMP_SYSCALL	
494	show_regs (regs);
495#endif
496
497	return -ENOSYS;
498}
499
500/* #define DEBUG_SPARC_BREAKPOINT */
501
502asmlinkage void sparc_breakpoint(struct pt_regs *regs)
503{
504	enum ctx_state prev_state = exception_enter();
505	siginfo_t info;
506
507	if (test_thread_flag(TIF_32BIT)) {
508		regs->tpc &= 0xffffffff;
509		regs->tnpc &= 0xffffffff;
510	}
511#ifdef DEBUG_SPARC_BREAKPOINT
512        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
513#endif
514	info.si_signo = SIGTRAP;
515	info.si_errno = 0;
516	info.si_code = TRAP_BRKPT;
517	info.si_addr = (void __user *)regs->tpc;
518	info.si_trapno = 0;
519	force_sig_info(SIGTRAP, &info, current);
520#ifdef DEBUG_SPARC_BREAKPOINT
521	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
522#endif
523	exception_exit(prev_state);
524}
525
 
 
526SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
527{
528        int nlen, err;
529
530	if (len < 0)
531		return -EINVAL;
532
533 	down_read(&uts_sem);
534 	
535	nlen = strlen(utsname()->domainname) + 1;
536	err = -EINVAL;
537	if (nlen > len)
538		goto out;
539
540	err = -EFAULT;
541	if (!copy_to_user(name, utsname()->domainname, nlen))
542		err = 0;
543
544out:
545	up_read(&uts_sem);
546	return err;
547}
548
549SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
550		utrap_handler_t, new_p, utrap_handler_t, new_d,
551		utrap_handler_t __user *, old_p,
552		utrap_handler_t __user *, old_d)
553{
554	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
555		return -EINVAL;
556	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
557		if (old_p) {
558			if (!current_thread_info()->utraps) {
559				if (put_user(NULL, old_p))
560					return -EFAULT;
561			} else {
562				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
563					return -EFAULT;
564			}
565		}
566		if (old_d) {
567			if (put_user(NULL, old_d))
568				return -EFAULT;
569		}
570		return 0;
571	}
572	if (!current_thread_info()->utraps) {
573		current_thread_info()->utraps =
574			kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
575		if (!current_thread_info()->utraps)
576			return -ENOMEM;
577		current_thread_info()->utraps[0] = 1;
578	} else {
579		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
580		    current_thread_info()->utraps[0] > 1) {
581			unsigned long *p = current_thread_info()->utraps;
582
583			current_thread_info()->utraps =
584				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
585					GFP_KERNEL);
586			if (!current_thread_info()->utraps) {
587				current_thread_info()->utraps = p;
588				return -ENOMEM;
589			}
590			p[0]--;
591			current_thread_info()->utraps[0] = 1;
592			memcpy(current_thread_info()->utraps+1, p+1,
593			       UT_TRAP_INSTRUCTION_31*sizeof(long));
594		}
595	}
596	if (old_p) {
597		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
598			return -EFAULT;
599	}
600	if (old_d) {
601		if (put_user(NULL, old_d))
602			return -EFAULT;
603	}
604	current_thread_info()->utraps[type] = (long)new_p;
605
606	return 0;
607}
608
609SYSCALL_DEFINE1(memory_ordering, unsigned long, model)
 
610{
611	struct pt_regs *regs = current_pt_regs();
612	if (model >= 3)
613		return -EINVAL;
614	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
615	return 0;
616}
617
618SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
619		struct sigaction __user *, oact, void __user *, restorer,
620		size_t, sigsetsize)
621{
622	struct k_sigaction new_ka, old_ka;
623	int ret;
624
625	/* XXX: Don't preclude handling different sized sigset_t's.  */
626	if (sigsetsize != sizeof(sigset_t))
627		return -EINVAL;
628
629	if (act) {
630		new_ka.ka_restorer = restorer;
631		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
632			return -EFAULT;
633	}
634
635	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
636
637	if (!ret && oact) {
638		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
639			return -EFAULT;
640	}
641
642	return ret;
643}
644
645SYSCALL_DEFINE0(kern_features)
646{
647	return KERN_FEATURE_MIXED_MODE_STACK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648}
v3.1
 
  1/* linux/arch/sparc64/kernel/sys_sparc.c
  2 *
  3 * This file contains various random system calls that
  4 * have a non-standard calling sequence on the Linux/sparc
  5 * platform.
  6 */
  7
  8#include <linux/errno.h>
  9#include <linux/types.h>
 10#include <linux/sched.h>
 
 
 11#include <linux/fs.h>
 12#include <linux/file.h>
 13#include <linux/mm.h>
 14#include <linux/sem.h>
 15#include <linux/msg.h>
 16#include <linux/shm.h>
 17#include <linux/stat.h>
 18#include <linux/mman.h>
 19#include <linux/utsname.h>
 20#include <linux/smp.h>
 21#include <linux/slab.h>
 22#include <linux/syscalls.h>
 23#include <linux/ipc.h>
 24#include <linux/personality.h>
 25#include <linux/random.h>
 26#include <linux/module.h>
 
 27
 28#include <asm/uaccess.h>
 29#include <asm/utrap.h>
 30#include <asm/unistd.h>
 31
 32#include "entry.h"
 
 33#include "systbls.h"
 34
 35/* #define DEBUG_UNIMP_SYSCALL */
 36
 37asmlinkage unsigned long sys_getpagesize(void)
 38{
 39	return PAGE_SIZE;
 40}
 41
 42#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
 43#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
 44
 45/* Does addr --> addr+len fall within 4GB of the VA-space hole or
 46 * overflow past the end of the 64-bit address space?
 47 */
 48static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
 49{
 50	unsigned long va_exclude_start, va_exclude_end;
 51
 52	va_exclude_start = VA_EXCLUDE_START;
 53	va_exclude_end   = VA_EXCLUDE_END;
 54
 55	if (unlikely(len >= va_exclude_start))
 56		return 1;
 57
 58	if (unlikely((addr + len) < addr))
 59		return 1;
 60
 61	if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
 62		     ((addr + len) >= va_exclude_start &&
 63		      (addr + len) < va_exclude_end)))
 64		return 1;
 65
 66	return 0;
 67}
 68
 69/* Does start,end straddle the VA-space hole?  */
 70static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
 71{
 72	unsigned long va_exclude_start, va_exclude_end;
 73
 74	va_exclude_start = VA_EXCLUDE_START;
 75	va_exclude_end   = VA_EXCLUDE_END;
 76
 77	if (likely(start < va_exclude_start && end < va_exclude_start))
 78		return 0;
 79
 80	if (likely(start >= va_exclude_end && end >= va_exclude_end))
 81		return 0;
 82
 83	return 1;
 84}
 85
 86/* These functions differ from the default implementations in
 87 * mm/mmap.c in two ways:
 88 *
 89 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
 90 *    for fixed such mappings we just validate what the user gave us.
 91 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
 92 *    the spitfire/niagara VA-hole.
 93 */
 94
 95static inline unsigned long COLOUR_ALIGN(unsigned long addr,
 96					 unsigned long pgoff)
 97{
 98	unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
 99	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
100
101	return base + off;
102}
103
104static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
105					      unsigned long pgoff)
106{
107	unsigned long base = addr & ~(SHMLBA-1);
108	unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
109
110	if (base + off <= addr)
111		return base + off;
112	return base - off;
113}
114
115unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
116{
117	struct mm_struct *mm = current->mm;
118	struct vm_area_struct * vma;
119	unsigned long task_size = TASK_SIZE;
120	unsigned long start_addr;
121	int do_color_align;
 
122
123	if (flags & MAP_FIXED) {
124		/* We do not accept a shared mapping if it would violate
125		 * cache aliasing constraints.
126		 */
127		if ((flags & MAP_SHARED) &&
128		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
129			return -EINVAL;
130		return addr;
131	}
132
133	if (test_thread_flag(TIF_32BIT))
134		task_size = STACK_TOP32;
135	if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
136		return -ENOMEM;
137
138	do_color_align = 0;
139	if (filp || (flags & MAP_SHARED))
140		do_color_align = 1;
141
142	if (addr) {
143		if (do_color_align)
144			addr = COLOUR_ALIGN(addr, pgoff);
145		else
146			addr = PAGE_ALIGN(addr);
147
148		vma = find_vma(mm, addr);
149		if (task_size - len >= addr &&
150		    (!vma || addr + len <= vma->vm_start))
151			return addr;
152	}
153
154	if (len > mm->cached_hole_size) {
155	        start_addr = addr = mm->free_area_cache;
156	} else {
157	        start_addr = addr = TASK_UNMAPPED_BASE;
158	        mm->cached_hole_size = 0;
 
 
 
 
 
 
 
 
159	}
160
161	task_size -= len;
162
163full_search:
164	if (do_color_align)
165		addr = COLOUR_ALIGN(addr, pgoff);
166	else
167		addr = PAGE_ALIGN(addr);
168
169	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
170		/* At this point:  (!vma || addr < vma->vm_end). */
171		if (addr < VA_EXCLUDE_START &&
172		    (addr + len) >= VA_EXCLUDE_START) {
173			addr = VA_EXCLUDE_END;
174			vma = find_vma(mm, VA_EXCLUDE_END);
175		}
176		if (unlikely(task_size < addr)) {
177			if (start_addr != TASK_UNMAPPED_BASE) {
178				start_addr = addr = TASK_UNMAPPED_BASE;
179				mm->cached_hole_size = 0;
180				goto full_search;
181			}
182			return -ENOMEM;
183		}
184		if (likely(!vma || addr + len <= vma->vm_start)) {
185			/*
186			 * Remember the place where we stopped the search:
187			 */
188			mm->free_area_cache = addr + len;
189			return addr;
190		}
191		if (addr + mm->cached_hole_size < vma->vm_start)
192		        mm->cached_hole_size = vma->vm_start - addr;
193
194		addr = vma->vm_end;
195		if (do_color_align)
196			addr = COLOUR_ALIGN(addr, pgoff);
197	}
198}
199
200unsigned long
201arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
202			  const unsigned long len, const unsigned long pgoff,
203			  const unsigned long flags)
204{
205	struct vm_area_struct *vma;
206	struct mm_struct *mm = current->mm;
207	unsigned long task_size = STACK_TOP32;
208	unsigned long addr = addr0;
209	int do_color_align;
 
210
211	/* This should only ever run for 32-bit processes.  */
212	BUG_ON(!test_thread_flag(TIF_32BIT));
213
214	if (flags & MAP_FIXED) {
215		/* We do not accept a shared mapping if it would violate
216		 * cache aliasing constraints.
217		 */
218		if ((flags & MAP_SHARED) &&
219		    ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
220			return -EINVAL;
221		return addr;
222	}
223
224	if (unlikely(len > task_size))
225		return -ENOMEM;
226
227	do_color_align = 0;
228	if (filp || (flags & MAP_SHARED))
229		do_color_align = 1;
230
231	/* requesting a specific address */
232	if (addr) {
233		if (do_color_align)
234			addr = COLOUR_ALIGN(addr, pgoff);
235		else
236			addr = PAGE_ALIGN(addr);
237
238		vma = find_vma(mm, addr);
239		if (task_size - len >= addr &&
240		    (!vma || addr + len <= vma->vm_start))
241			return addr;
242	}
243
244	/* check if free_area_cache is useful for us */
245	if (len <= mm->cached_hole_size) {
246 	        mm->cached_hole_size = 0;
247 		mm->free_area_cache = mm->mmap_base;
248 	}
249
250	/* either no address requested or can't fit in requested address hole */
251	addr = mm->free_area_cache;
252	if (do_color_align) {
253		unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
254
255		addr = base + len;
256	}
257
258	/* make sure it can fit in the remaining address space */
259	if (likely(addr > len)) {
260		vma = find_vma(mm, addr-len);
261		if (!vma || addr <= vma->vm_start) {
262			/* remember the address as a hint for next time */
263			return (mm->free_area_cache = addr-len);
264		}
265	}
266
267	if (unlikely(mm->mmap_base < len))
268		goto bottomup;
269
270	addr = mm->mmap_base-len;
271	if (do_color_align)
272		addr = COLOUR_ALIGN_DOWN(addr, pgoff);
273
274	do {
275		/*
276		 * Lookup failure means no vma is above this address,
277		 * else if new region fits below vma->vm_start,
278		 * return with success:
279		 */
280		vma = find_vma(mm, addr);
281		if (likely(!vma || addr+len <= vma->vm_start)) {
282			/* remember the address as a hint for next time */
283			return (mm->free_area_cache = addr);
284		}
285
286 		/* remember the largest hole we saw so far */
287 		if (addr + mm->cached_hole_size < vma->vm_start)
288 		        mm->cached_hole_size = vma->vm_start - addr;
289
290		/* try just below the current vma->vm_start */
291		addr = vma->vm_start-len;
292		if (do_color_align)
293			addr = COLOUR_ALIGN_DOWN(addr, pgoff);
294	} while (likely(len < vma->vm_start));
295
296bottomup:
297	/*
298	 * A failed mmap() very likely causes application failure,
299	 * so fall back to the bottom-up function here. This scenario
300	 * can happen with large stack limits and large mmap()
301	 * allocations.
302	 */
303	mm->cached_hole_size = ~0UL;
304  	mm->free_area_cache = TASK_UNMAPPED_BASE;
305	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
306	/*
307	 * Restore the topdown base:
308	 */
309	mm->free_area_cache = mm->mmap_base;
310	mm->cached_hole_size = ~0UL;
311
312	return addr;
313}
314
315/* Try to align mapping such that we align it as much as possible. */
316unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
317{
318	unsigned long align_goal, addr = -ENOMEM;
319	unsigned long (*get_area)(struct file *, unsigned long,
320				  unsigned long, unsigned long, unsigned long);
321
322	get_area = current->mm->get_unmapped_area;
323
324	if (flags & MAP_FIXED) {
325		/* Ok, don't mess with it. */
326		return get_area(NULL, orig_addr, len, pgoff, flags);
327	}
328	flags &= ~MAP_SHARED;
329
330	align_goal = PAGE_SIZE;
331	if (len >= (4UL * 1024 * 1024))
332		align_goal = (4UL * 1024 * 1024);
333	else if (len >= (512UL * 1024))
334		align_goal = (512UL * 1024);
335	else if (len >= (64UL * 1024))
336		align_goal = (64UL * 1024);
337
338	do {
339		addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
340		if (!(addr & ~PAGE_MASK)) {
341			addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
342			break;
343		}
344
345		if (align_goal == (4UL * 1024 * 1024))
346			align_goal = (512UL * 1024);
347		else if (align_goal == (512UL * 1024))
348			align_goal = (64UL * 1024);
349		else
350			align_goal = PAGE_SIZE;
351	} while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
352
353	/* Mapping is smaller than 64K or larger areas could not
354	 * be obtained.
355	 */
356	if (addr & ~PAGE_MASK)
357		addr = get_area(NULL, orig_addr, len, pgoff, flags);
358
359	return addr;
360}
361EXPORT_SYMBOL(get_fb_unmapped_area);
362
363/* Essentially the same as PowerPC.  */
364static unsigned long mmap_rnd(void)
365{
366	unsigned long rnd = 0UL;
367
368	if (current->flags & PF_RANDOMIZE) {
369		unsigned long val = get_random_int();
370		if (test_thread_flag(TIF_32BIT))
371			rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
372		else
373			rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
374	}
375	return (rnd << PAGE_SHIFT) * 2;
376}
377
378void arch_pick_mmap_layout(struct mm_struct *mm)
379{
380	unsigned long random_factor = mmap_rnd();
381	unsigned long gap;
382
383	/*
384	 * Fall back to the standard layout if the personality
385	 * bit is set, or if the expected stack growth is unlimited:
386	 */
387	gap = rlimit(RLIMIT_STACK);
388	if (!test_thread_flag(TIF_32BIT) ||
389	    (current->personality & ADDR_COMPAT_LAYOUT) ||
390	    gap == RLIM_INFINITY ||
391	    sysctl_legacy_va_layout) {
392		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
393		mm->get_unmapped_area = arch_get_unmapped_area;
394		mm->unmap_area = arch_unmap_area;
395	} else {
396		/* We know it's 32-bit */
397		unsigned long task_size = STACK_TOP32;
398
399		if (gap < 128 * 1024 * 1024)
400			gap = 128 * 1024 * 1024;
401		if (gap > (task_size / 6 * 5))
402			gap = (task_size / 6 * 5);
403
404		mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
405		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
406		mm->unmap_area = arch_unmap_area_topdown;
407	}
408}
409
410/*
411 * sys_pipe() is the normal C calling standard for creating
412 * a pipe. It's not the way unix traditionally does this, though.
413 */
414SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
415{
416	int fd[2];
417	int error;
418
419	error = do_pipe_flags(fd, 0);
420	if (error)
421		goto out;
422	regs->u_regs[UREG_I1] = fd[1];
423	error = fd[0];
424out:
425	return error;
426}
427
428/*
429 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
430 *
431 * This is really horribly ugly.
432 */
433
434SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
435		unsigned long, third, void __user *, ptr, long, fifth)
436{
437	long err;
438
439	/* No need for backward compatibility. We can start fresh... */
440	if (call <= SEMCTL) {
441		switch (call) {
442		case SEMOP:
443			err = sys_semtimedop(first, ptr,
444					     (unsigned)second, NULL);
445			goto out;
446		case SEMTIMEDOP:
447			err = sys_semtimedop(first, ptr, (unsigned)second,
448				(const struct timespec __user *)
449					     (unsigned long) fifth);
450			goto out;
451		case SEMGET:
452			err = sys_semget(first, (int)second, (int)third);
453			goto out;
454		case SEMCTL: {
455			err = sys_semctl(first, second,
456					 (int)third | IPC_64,
457					 (union semun) ptr);
458			goto out;
459		}
460		default:
461			err = -ENOSYS;
462			goto out;
463		}
464	}
465	if (call <= MSGCTL) {
466		switch (call) {
467		case MSGSND:
468			err = sys_msgsnd(first, ptr, (size_t)second,
469					 (int)third);
470			goto out;
471		case MSGRCV:
472			err = sys_msgrcv(first, ptr, (size_t)second, fifth,
473					 (int)third);
474			goto out;
475		case MSGGET:
476			err = sys_msgget((key_t)first, (int)second);
477			goto out;
478		case MSGCTL:
479			err = sys_msgctl(first, (int)second | IPC_64, ptr);
480			goto out;
481		default:
482			err = -ENOSYS;
483			goto out;
484		}
485	}
486	if (call <= SHMCTL) {
487		switch (call) {
488		case SHMAT: {
489			ulong raddr;
490			err = do_shmat(first, ptr, (int)second, &raddr);
491			if (!err) {
492				if (put_user(raddr,
493					     (ulong __user *) third))
494					err = -EFAULT;
495			}
496			goto out;
497		}
498		case SHMDT:
499			err = sys_shmdt(ptr);
500			goto out;
501		case SHMGET:
502			err = sys_shmget(first, (size_t)second, (int)third);
503			goto out;
504		case SHMCTL:
505			err = sys_shmctl(first, (int)second | IPC_64, ptr);
506			goto out;
507		default:
508			err = -ENOSYS;
509			goto out;
510		}
511	} else {
512		err = -ENOSYS;
513	}
514out:
515	return err;
516}
517
518SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
519{
520	int ret;
521
522	if (current->personality == PER_LINUX32 &&
523	    personality == PER_LINUX)
524		personality = PER_LINUX32;
525	ret = sys_personality(personality);
526	if (ret == PER_LINUX32)
527		ret = PER_LINUX;
528
529	return ret;
530}
531
532int sparc_mmap_check(unsigned long addr, unsigned long len)
533{
534	if (test_thread_flag(TIF_32BIT)) {
535		if (len >= STACK_TOP32)
536			return -EINVAL;
537
538		if (addr > STACK_TOP32 - len)
539			return -EINVAL;
540	} else {
541		if (len >= VA_EXCLUDE_START)
542			return -EINVAL;
543
544		if (invalid_64bit_range(addr, len))
545			return -EINVAL;
546	}
547
548	return 0;
549}
550
551/* Linux version of mmap */
552SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
553		unsigned long, prot, unsigned long, flags, unsigned long, fd,
554		unsigned long, off)
555{
556	unsigned long retval = -EINVAL;
557
558	if ((off + PAGE_ALIGN(len)) < off)
559		goto out;
560	if (off & ~PAGE_MASK)
561		goto out;
562	retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
563out:
564	return retval;
565}
566
567SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
568{
569	long ret;
570
571	if (invalid_64bit_range(addr, len))
572		return -EINVAL;
573
574	down_write(&current->mm->mmap_sem);
575	ret = do_munmap(current->mm, addr, len);
576	up_write(&current->mm->mmap_sem);
577	return ret;
578}
579
580extern unsigned long do_mremap(unsigned long addr,
581	unsigned long old_len, unsigned long new_len,
582	unsigned long flags, unsigned long new_addr);
583                
584SYSCALL_DEFINE5(64_mremap, unsigned long, addr,	unsigned long, old_len,
585		unsigned long, new_len, unsigned long, flags,
586		unsigned long, new_addr)
587{
588	unsigned long ret = -EINVAL;
589
590	if (test_thread_flag(TIF_32BIT))
591		goto out;
592
593	down_write(&current->mm->mmap_sem);
594	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
595	up_write(&current->mm->mmap_sem);
596out:
597	return ret;       
598}
599
600/* we come to here via sys_nis_syscall so it can setup the regs argument */
601asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
602{
603	static int count;
 
604	
605	/* Don't make the system unusable, if someone goes stuck */
606	if (count++ > 5)
607		return -ENOSYS;
608
609	printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
610#ifdef DEBUG_UNIMP_SYSCALL	
611	show_regs (regs);
612#endif
613
614	return -ENOSYS;
615}
616
617/* #define DEBUG_SPARC_BREAKPOINT */
618
619asmlinkage void sparc_breakpoint(struct pt_regs *regs)
620{
 
621	siginfo_t info;
622
623	if (test_thread_flag(TIF_32BIT)) {
624		regs->tpc &= 0xffffffff;
625		regs->tnpc &= 0xffffffff;
626	}
627#ifdef DEBUG_SPARC_BREAKPOINT
628        printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
629#endif
630	info.si_signo = SIGTRAP;
631	info.si_errno = 0;
632	info.si_code = TRAP_BRKPT;
633	info.si_addr = (void __user *)regs->tpc;
634	info.si_trapno = 0;
635	force_sig_info(SIGTRAP, &info, current);
636#ifdef DEBUG_SPARC_BREAKPOINT
637	printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
638#endif
 
639}
640
641extern void check_pending(int signum);
642
643SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
644{
645        int nlen, err;
646
647	if (len < 0)
648		return -EINVAL;
649
650 	down_read(&uts_sem);
651 	
652	nlen = strlen(utsname()->domainname) + 1;
653	err = -EINVAL;
654	if (nlen > len)
655		goto out;
656
657	err = -EFAULT;
658	if (!copy_to_user(name, utsname()->domainname, nlen))
659		err = 0;
660
661out:
662	up_read(&uts_sem);
663	return err;
664}
665
666SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
667		utrap_handler_t, new_p, utrap_handler_t, new_d,
668		utrap_handler_t __user *, old_p,
669		utrap_handler_t __user *, old_d)
670{
671	if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
672		return -EINVAL;
673	if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
674		if (old_p) {
675			if (!current_thread_info()->utraps) {
676				if (put_user(NULL, old_p))
677					return -EFAULT;
678			} else {
679				if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
680					return -EFAULT;
681			}
682		}
683		if (old_d) {
684			if (put_user(NULL, old_d))
685				return -EFAULT;
686		}
687		return 0;
688	}
689	if (!current_thread_info()->utraps) {
690		current_thread_info()->utraps =
691			kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
692		if (!current_thread_info()->utraps)
693			return -ENOMEM;
694		current_thread_info()->utraps[0] = 1;
695	} else {
696		if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
697		    current_thread_info()->utraps[0] > 1) {
698			unsigned long *p = current_thread_info()->utraps;
699
700			current_thread_info()->utraps =
701				kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
702					GFP_KERNEL);
703			if (!current_thread_info()->utraps) {
704				current_thread_info()->utraps = p;
705				return -ENOMEM;
706			}
707			p[0]--;
708			current_thread_info()->utraps[0] = 1;
709			memcpy(current_thread_info()->utraps+1, p+1,
710			       UT_TRAP_INSTRUCTION_31*sizeof(long));
711		}
712	}
713	if (old_p) {
714		if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
715			return -EFAULT;
716	}
717	if (old_d) {
718		if (put_user(NULL, old_d))
719			return -EFAULT;
720	}
721	current_thread_info()->utraps[type] = (long)new_p;
722
723	return 0;
724}
725
726asmlinkage long sparc_memory_ordering(unsigned long model,
727				      struct pt_regs *regs)
728{
 
729	if (model >= 3)
730		return -EINVAL;
731	regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
732	return 0;
733}
734
735SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
736		struct sigaction __user *, oact, void __user *, restorer,
737		size_t, sigsetsize)
738{
739	struct k_sigaction new_ka, old_ka;
740	int ret;
741
742	/* XXX: Don't preclude handling different sized sigset_t's.  */
743	if (sigsetsize != sizeof(sigset_t))
744		return -EINVAL;
745
746	if (act) {
747		new_ka.ka_restorer = restorer;
748		if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
749			return -EFAULT;
750	}
751
752	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
753
754	if (!ret && oact) {
755		if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
756			return -EFAULT;
757	}
758
759	return ret;
760}
761
762/*
763 * Do a system call from kernel instead of calling sys_execve so we
764 * end up with proper pt_regs.
765 */
766int kernel_execve(const char *filename,
767		  const char *const argv[],
768		  const char *const envp[])
769{
770	long __res;
771	register long __g1 __asm__ ("g1") = __NR_execve;
772	register long __o0 __asm__ ("o0") = (long)(filename);
773	register long __o1 __asm__ ("o1") = (long)(argv);
774	register long __o2 __asm__ ("o2") = (long)(envp);
775	asm volatile ("t 0x6d\n\t"
776		      "sub %%g0, %%o0, %0\n\t"
777		      "movcc %%xcc, %%o0, %0\n\t"
778		      : "=r" (__res), "=&r" (__o0)
779		      : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
780		      : "cc");
781	return __res;
782}