Linux Audio

Check our new training course

Loading...
v3.1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1#ifdef CONFIG_MMU
2#include "signal_mm.c"
 
 
 
 
 
 
3#else
4#include "signal_no.c"
 
5#endif
v4.10.11
   1/*
   2 *  linux/arch/m68k/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11/*
  12 * Linux/m68k support by Hamish Macdonald
  13 *
  14 * 68060 fixes by Jesper Skov
  15 *
  16 * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
  17 *
  18 * mathemu support by Roman Zippel
  19 *  (Note: fpstate in the signal context is completely ignored for the emulator
  20 *         and the internal floating point format is put on stack)
  21 */
  22
  23/*
  24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
  25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
  26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
  27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
  28 * signal handlers!
  29 */
  30
  31#include <linux/sched.h>
  32#include <linux/mm.h>
  33#include <linux/kernel.h>
  34#include <linux/signal.h>
  35#include <linux/syscalls.h>
  36#include <linux/errno.h>
  37#include <linux/wait.h>
  38#include <linux/ptrace.h>
  39#include <linux/unistd.h>
  40#include <linux/stddef.h>
  41#include <linux/highuid.h>
  42#include <linux/personality.h>
  43#include <linux/tty.h>
  44#include <linux/binfmts.h>
  45#include <linux/extable.h>
  46#include <linux/tracehook.h>
  47
  48#include <asm/setup.h>
  49#include <linux/uaccess.h>
  50#include <asm/pgtable.h>
  51#include <asm/traps.h>
  52#include <asm/ucontext.h>
  53#include <asm/cacheflush.h>
  54
  55#ifdef CONFIG_MMU
  56
  57/*
  58 * Handle the slight differences in classic 68k and ColdFire trap frames.
  59 */
  60#ifdef CONFIG_COLDFIRE
  61#define	FORMAT		4
  62#define	FMT4SIZE	0
  63#else
  64#define	FORMAT		0
  65#define	FMT4SIZE	sizeof(((struct frame *)0)->un.fmt4)
  66#endif
  67
  68static const int frame_size_change[16] = {
  69  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
  70  [2]	= sizeof(((struct frame *)0)->un.fmt2),
  71  [3]	= sizeof(((struct frame *)0)->un.fmt3),
  72  [4]	= FMT4SIZE,
  73  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
  74  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
  75  [7]	= sizeof(((struct frame *)0)->un.fmt7),
  76  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
  77  [9]	= sizeof(((struct frame *)0)->un.fmt9),
  78  [10]	= sizeof(((struct frame *)0)->un.fmta),
  79  [11]	= sizeof(((struct frame *)0)->un.fmtb),
  80  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
  81  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
  82  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
  83  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
  84};
  85
  86static inline int frame_extra_sizes(int f)
  87{
  88	return frame_size_change[f];
  89}
  90
  91int handle_kernel_fault(struct pt_regs *regs)
  92{
  93	const struct exception_table_entry *fixup;
  94	struct pt_regs *tregs;
  95
  96	/* Are we prepared to handle this kernel fault? */
  97	fixup = search_exception_tables(regs->pc);
  98	if (!fixup)
  99		return 0;
 100
 101	/* Create a new four word stack frame, discarding the old one. */
 102	regs->stkadj = frame_extra_sizes(regs->format);
 103	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
 104	tregs->vector = regs->vector;
 105	tregs->format = FORMAT;
 106	tregs->pc = fixup->fixup;
 107	tregs->sr = regs->sr;
 108
 109	return 1;
 110}
 111
 112void ptrace_signal_deliver(void)
 113{
 114	struct pt_regs *regs = signal_pt_regs();
 115	if (regs->orig_d0 < 0)
 116		return;
 117	switch (regs->d0) {
 118	case -ERESTARTNOHAND:
 119	case -ERESTARTSYS:
 120	case -ERESTARTNOINTR:
 121		regs->d0 = regs->orig_d0;
 122		regs->orig_d0 = -1;
 123		regs->pc -= 2;
 124		break;
 125	}
 126}
 127
 128static inline void push_cache (unsigned long vaddr)
 129{
 130	/*
 131	 * Using the old cache_push_v() was really a big waste.
 132	 *
 133	 * What we are trying to do is to flush 8 bytes to ram.
 134	 * Flushing 2 cache lines of 16 bytes is much cheaper than
 135	 * flushing 1 or 2 pages, as previously done in
 136	 * cache_push_v().
 137	 *                                                     Jes
 138	 */
 139	if (CPU_IS_040) {
 140		unsigned long temp;
 141
 142		__asm__ __volatile__ (".chip 68040\n\t"
 143				      "nop\n\t"
 144				      "ptestr (%1)\n\t"
 145				      "movec %%mmusr,%0\n\t"
 146				      ".chip 68k"
 147				      : "=r" (temp)
 148				      : "a" (vaddr));
 149
 150		temp &= PAGE_MASK;
 151		temp |= vaddr & ~PAGE_MASK;
 152
 153		__asm__ __volatile__ (".chip 68040\n\t"
 154				      "nop\n\t"
 155				      "cpushl %%bc,(%0)\n\t"
 156				      ".chip 68k"
 157				      : : "a" (temp));
 158	}
 159	else if (CPU_IS_060) {
 160		unsigned long temp;
 161		__asm__ __volatile__ (".chip 68060\n\t"
 162				      "plpar (%0)\n\t"
 163				      ".chip 68k"
 164				      : "=a" (temp)
 165				      : "0" (vaddr));
 166		__asm__ __volatile__ (".chip 68060\n\t"
 167				      "cpushl %%bc,(%0)\n\t"
 168				      ".chip 68k"
 169				      : : "a" (temp));
 170	} else if (!CPU_IS_COLDFIRE) {
 171		/*
 172		 * 68030/68020 have no writeback cache;
 173		 * still need to clear icache.
 174		 * Note that vaddr is guaranteed to be long word aligned.
 175		 */
 176		unsigned long temp;
 177		asm volatile ("movec %%cacr,%0" : "=r" (temp));
 178		temp += 4;
 179		asm volatile ("movec %0,%%caar\n\t"
 180			      "movec %1,%%cacr"
 181			      : : "r" (vaddr), "r" (temp));
 182		asm volatile ("movec %0,%%caar\n\t"
 183			      "movec %1,%%cacr"
 184			      : : "r" (vaddr + 4), "r" (temp));
 185	} else {
 186		/* CPU_IS_COLDFIRE */
 187#if defined(CONFIG_CACHE_COPYBACK)
 188		flush_cf_dcache(0, DCACHE_MAX_ADDR);
 189#endif
 190		/* Invalidate instruction cache for the pushed bytes */
 191		clear_cf_icache(vaddr, vaddr + 8);
 192	}
 193}
 194
 195static inline void adjustformat(struct pt_regs *regs)
 196{
 197}
 198
 199static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 200{
 201}
 202
 203#else /* CONFIG_MMU */
 204
 205void ret_from_user_signal(void);
 206void ret_from_user_rt_signal(void);
 207
 208static inline int frame_extra_sizes(int f)
 209{
 210	/* No frame size adjustments required on non-MMU CPUs */
 211	return 0;
 212}
 213
 214static inline void adjustformat(struct pt_regs *regs)
 215{
 216	/*
 217	 * set format byte to make stack appear modulo 4, which it will
 218	 * be when doing the rte
 219	 */
 220	regs->format = 0x4;
 221}
 222
 223static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 224{
 225	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
 226}
 227
 228static inline void push_cache(unsigned long vaddr)
 229{
 230}
 231
 232#endif /* CONFIG_MMU */
 233
 234/*
 235 * Do a signal return; undo the signal stack.
 236 *
 237 * Keep the return code on the stack quadword aligned!
 238 * That makes the cache flush below easier.
 239 */
 240
 241struct sigframe
 242{
 243	char __user *pretcode;
 244	int sig;
 245	int code;
 246	struct sigcontext __user *psc;
 247	char retcode[8];
 248	unsigned long extramask[_NSIG_WORDS-1];
 249	struct sigcontext sc;
 250};
 251
 252struct rt_sigframe
 253{
 254	char __user *pretcode;
 255	int sig;
 256	struct siginfo __user *pinfo;
 257	void __user *puc;
 258	char retcode[8];
 259	struct siginfo info;
 260	struct ucontext uc;
 261};
 262
 263#define FPCONTEXT_SIZE	216
 264#define uc_fpstate	uc_filler[0]
 265#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
 266#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
 267
 268#ifdef CONFIG_FPU
 269
 270static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
 271
 272static inline int restore_fpu_state(struct sigcontext *sc)
 273{
 274	int err = 1;
 275
 276	if (FPU_IS_EMU) {
 277	    /* restore registers */
 278	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
 279	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
 280	    return 0;
 281	}
 282
 283	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 284	    /* Verify the frame format.  */
 285	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 286		 (sc->sc_fpstate[0] != fpu_version))
 287		goto out;
 288	    if (CPU_IS_020_OR_030) {
 289		if (m68k_fputype & FPU_68881 &&
 290		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
 291		    goto out;
 292		if (m68k_fputype & FPU_68882 &&
 293		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
 294		    goto out;
 295	    } else if (CPU_IS_040) {
 296		if (!(sc->sc_fpstate[1] == 0x00 ||
 297                      sc->sc_fpstate[1] == 0x28 ||
 298                      sc->sc_fpstate[1] == 0x60))
 299		    goto out;
 300	    } else if (CPU_IS_060) {
 301		if (!(sc->sc_fpstate[3] == 0x00 ||
 302                      sc->sc_fpstate[3] == 0x60 ||
 303		      sc->sc_fpstate[3] == 0xe0))
 304		    goto out;
 305	    } else if (CPU_IS_COLDFIRE) {
 306		if (!(sc->sc_fpstate[0] == 0x00 ||
 307		      sc->sc_fpstate[0] == 0x05 ||
 308		      sc->sc_fpstate[0] == 0xe5))
 309		    goto out;
 310	    } else
 311		goto out;
 312
 313	    if (CPU_IS_COLDFIRE) {
 314		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
 315				  "fmovel %1,%%fpcr\n\t"
 316				  "fmovel %2,%%fpsr\n\t"
 317				  "fmovel %3,%%fpiar"
 318				  : /* no outputs */
 319				  : "m" (sc->sc_fpregs[0]),
 320				    "m" (sc->sc_fpcntl[0]),
 321				    "m" (sc->sc_fpcntl[1]),
 322				    "m" (sc->sc_fpcntl[2]));
 323	    } else {
 324		__asm__ volatile (".chip 68k/68881\n\t"
 325				  "fmovemx %0,%%fp0-%%fp1\n\t"
 326				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 327				  ".chip 68k"
 328				  : /* no outputs */
 329				  : "m" (*sc->sc_fpregs),
 330				    "m" (*sc->sc_fpcntl));
 331	    }
 332	}
 333
 334	if (CPU_IS_COLDFIRE) {
 335		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
 336	} else {
 337		__asm__ volatile (".chip 68k/68881\n\t"
 338				  "frestore %0\n\t"
 339				  ".chip 68k"
 340				  : : "m" (*sc->sc_fpstate));
 341	}
 342	err = 0;
 343
 344out:
 345	return err;
 346}
 347
 348static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 349{
 350	unsigned char fpstate[FPCONTEXT_SIZE];
 351	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 352	fpregset_t fpregs;
 353	int err = 1;
 354
 355	if (FPU_IS_EMU) {
 356		/* restore fpu control register */
 357		if (__copy_from_user(current->thread.fpcntl,
 358				uc->uc_mcontext.fpregs.f_fpcntl, 12))
 359			goto out;
 360		/* restore all other fpu register */
 361		if (__copy_from_user(current->thread.fp,
 362				uc->uc_mcontext.fpregs.f_fpregs, 96))
 363			goto out;
 364		return 0;
 365	}
 366
 367	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 368		goto out;
 369	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 370		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 371			context_size = fpstate[1];
 372		/* Verify the frame format.  */
 373		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 374		     (fpstate[0] != fpu_version))
 375			goto out;
 376		if (CPU_IS_020_OR_030) {
 377			if (m68k_fputype & FPU_68881 &&
 378			    !(context_size == 0x18 || context_size == 0xb4))
 379				goto out;
 380			if (m68k_fputype & FPU_68882 &&
 381			    !(context_size == 0x38 || context_size == 0xd4))
 382				goto out;
 383		} else if (CPU_IS_040) {
 384			if (!(context_size == 0x00 ||
 385			      context_size == 0x28 ||
 386			      context_size == 0x60))
 387				goto out;
 388		} else if (CPU_IS_060) {
 389			if (!(fpstate[3] == 0x00 ||
 390			      fpstate[3] == 0x60 ||
 391			      fpstate[3] == 0xe0))
 392				goto out;
 393		} else if (CPU_IS_COLDFIRE) {
 394			if (!(fpstate[3] == 0x00 ||
 395			      fpstate[3] == 0x05 ||
 396			      fpstate[3] == 0xe5))
 397				goto out;
 398		} else
 399			goto out;
 400		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
 401				     sizeof(fpregs)))
 402			goto out;
 403
 404		if (CPU_IS_COLDFIRE) {
 405			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
 406					  "fmovel %1,%%fpcr\n\t"
 407					  "fmovel %2,%%fpsr\n\t"
 408					  "fmovel %3,%%fpiar"
 409					  : /* no outputs */
 410					  : "m" (fpregs.f_fpregs[0]),
 411					    "m" (fpregs.f_fpcntl[0]),
 412					    "m" (fpregs.f_fpcntl[1]),
 413					    "m" (fpregs.f_fpcntl[2]));
 414		} else {
 415			__asm__ volatile (".chip 68k/68881\n\t"
 416					  "fmovemx %0,%%fp0-%%fp7\n\t"
 417					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 418					  ".chip 68k"
 419					  : /* no outputs */
 420					  : "m" (*fpregs.f_fpregs),
 421					    "m" (*fpregs.f_fpcntl));
 422		}
 423	}
 424	if (context_size &&
 425	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 426			     context_size))
 427		goto out;
 428
 429	if (CPU_IS_COLDFIRE) {
 430		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
 431	} else {
 432		__asm__ volatile (".chip 68k/68881\n\t"
 433				  "frestore %0\n\t"
 434				  ".chip 68k"
 435				  : : "m" (*fpstate));
 436	}
 437	err = 0;
 438
 439out:
 440	return err;
 441}
 442
 443/*
 444 * Set up a signal frame.
 445 */
 446static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 447{
 448	if (FPU_IS_EMU) {
 449		/* save registers */
 450		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
 451		memcpy(sc->sc_fpregs, current->thread.fp, 24);
 452		return;
 453	}
 454
 455	if (CPU_IS_COLDFIRE) {
 456		__asm__ volatile ("fsave %0"
 457				  : : "m" (*sc->sc_fpstate) : "memory");
 458	} else {
 459		__asm__ volatile (".chip 68k/68881\n\t"
 460				  "fsave %0\n\t"
 461				  ".chip 68k"
 462				  : : "m" (*sc->sc_fpstate) : "memory");
 463	}
 464
 465	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 466		fpu_version = sc->sc_fpstate[0];
 467		if (CPU_IS_020_OR_030 &&
 468		    regs->vector >= (VEC_FPBRUC * 4) &&
 469		    regs->vector <= (VEC_FPNAN * 4)) {
 470			/* Clear pending exception in 68882 idle frame */
 471			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
 472				sc->sc_fpstate[0x38] |= 1 << 3;
 473		}
 474
 475		if (CPU_IS_COLDFIRE) {
 476			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
 477					  "fmovel %%fpcr,%1\n\t"
 478					  "fmovel %%fpsr,%2\n\t"
 479					  "fmovel %%fpiar,%3"
 480					  : "=m" (sc->sc_fpregs[0]),
 481					    "=m" (sc->sc_fpcntl[0]),
 482					    "=m" (sc->sc_fpcntl[1]),
 483					    "=m" (sc->sc_fpcntl[2])
 484					  : /* no inputs */
 485					  : "memory");
 486		} else {
 487			__asm__ volatile (".chip 68k/68881\n\t"
 488					  "fmovemx %%fp0-%%fp1,%0\n\t"
 489					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 490					  ".chip 68k"
 491					  : "=m" (*sc->sc_fpregs),
 492					    "=m" (*sc->sc_fpcntl)
 493					  : /* no inputs */
 494					  : "memory");
 495		}
 496	}
 497}
 498
 499static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 500{
 501	unsigned char fpstate[FPCONTEXT_SIZE];
 502	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 503	int err = 0;
 504
 505	if (FPU_IS_EMU) {
 506		/* save fpu control register */
 507		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
 508				current->thread.fpcntl, 12);
 509		/* save all other fpu register */
 510		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
 511				current->thread.fp, 96);
 512		return err;
 513	}
 514
 515	if (CPU_IS_COLDFIRE) {
 516		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
 517	} else {
 518		__asm__ volatile (".chip 68k/68881\n\t"
 519				  "fsave %0\n\t"
 520				  ".chip 68k"
 521				  : : "m" (*fpstate) : "memory");
 522	}
 523
 524	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 525	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 526		fpregset_t fpregs;
 527		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 528			context_size = fpstate[1];
 529		fpu_version = fpstate[0];
 530		if (CPU_IS_020_OR_030 &&
 531		    regs->vector >= (VEC_FPBRUC * 4) &&
 532		    regs->vector <= (VEC_FPNAN * 4)) {
 533			/* Clear pending exception in 68882 idle frame */
 534			if (*(unsigned short *) fpstate == 0x1f38)
 535				fpstate[0x38] |= 1 << 3;
 536		}
 537		if (CPU_IS_COLDFIRE) {
 538			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
 539					  "fmovel %%fpcr,%1\n\t"
 540					  "fmovel %%fpsr,%2\n\t"
 541					  "fmovel %%fpiar,%3"
 542					  : "=m" (fpregs.f_fpregs[0]),
 543					    "=m" (fpregs.f_fpcntl[0]),
 544					    "=m" (fpregs.f_fpcntl[1]),
 545					    "=m" (fpregs.f_fpcntl[2])
 546					  : /* no inputs */
 547					  : "memory");
 548		} else {
 549			__asm__ volatile (".chip 68k/68881\n\t"
 550					  "fmovemx %%fp0-%%fp7,%0\n\t"
 551					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 552					  ".chip 68k"
 553					  : "=m" (*fpregs.f_fpregs),
 554					    "=m" (*fpregs.f_fpcntl)
 555					  : /* no inputs */
 556					  : "memory");
 557		}
 558		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 559				    sizeof(fpregs));
 560	}
 561	if (context_size)
 562		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
 563				    context_size);
 564	return err;
 565}
 566
 567#else /* CONFIG_FPU */
 568
 569/*
 570 * For the case with no FPU configured these all do nothing.
 571 */
 572static inline int restore_fpu_state(struct sigcontext *sc)
 573{
 574	return 0;
 575}
 576
 577static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 578{
 579	return 0;
 580}
 581
 582static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 583{
 584}
 585
 586static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 587{
 588	return 0;
 589}
 590
 591#endif /* CONFIG_FPU */
 592
 593static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
 594			       void __user *fp)
 595{
 596	int fsize = frame_extra_sizes(formatvec >> 12);
 597	if (fsize < 0) {
 598		/*
 599		 * user process trying to return with weird frame format
 600		 */
 601#ifdef DEBUG
 602		printk("user process returning with weird frame format\n");
 603#endif
 604		return 1;
 605	}
 606	if (!fsize) {
 607		regs->format = formatvec >> 12;
 608		regs->vector = formatvec & 0xfff;
 609	} else {
 610		struct switch_stack *sw = (struct switch_stack *)regs - 1;
 611		unsigned long buf[fsize / 2]; /* yes, twice as much */
 612
 613		/* that'll make sure that expansion won't crap over data */
 614		if (copy_from_user(buf + fsize / 4, fp, fsize))
 615			return 1;
 616
 617		/* point of no return */
 618		regs->format = formatvec >> 12;
 619		regs->vector = formatvec & 0xfff;
 620#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
 621		__asm__ __volatile__ (
 622#ifdef CONFIG_COLDFIRE
 623			 "   movel %0,%/sp\n\t"
 624			 "   bra ret_from_signal\n"
 625#else
 626			 "   movel %0,%/a0\n\t"
 627			 "   subl %1,%/a0\n\t"     /* make room on stack */
 628			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
 629			 /* move switch_stack and pt_regs */
 630			 "1: movel %0@+,%/a0@+\n\t"
 631			 "   dbra %2,1b\n\t"
 632			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
 633			 "   lsrl  #2,%1\n\t"
 634			 "   subql #1,%1\n\t"
 635			 /* copy to the gap we'd made */
 636			 "2: movel %4@+,%/a0@+\n\t"
 637			 "   dbra %1,2b\n\t"
 638			 "   bral ret_from_signal\n"
 639#endif
 640			 : /* no outputs, it doesn't ever return */
 641			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
 642			   "n" (frame_offset), "a" (buf + fsize/4)
 643			 : "a0");
 644#undef frame_offset
 645	}
 646	return 0;
 647}
 648
 649static inline int
 650restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
 651{
 652	int formatvec;
 653	struct sigcontext context;
 654	int err = 0;
 655
 656	/* Always make any pending restarted system calls return -EINTR */
 657	current->restart_block.fn = do_no_restart_syscall;
 658
 659	/* get previous context */
 660	if (copy_from_user(&context, usc, sizeof(context)))
 661		goto badframe;
 662
 663	/* restore passed registers */
 664	regs->d0 = context.sc_d0;
 665	regs->d1 = context.sc_d1;
 666	regs->a0 = context.sc_a0;
 667	regs->a1 = context.sc_a1;
 668	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
 669	regs->pc = context.sc_pc;
 670	regs->orig_d0 = -1;		/* disable syscall checks */
 671	wrusp(context.sc_usp);
 672	formatvec = context.sc_formatvec;
 673
 674	err = restore_fpu_state(&context);
 675
 676	if (err || mangle_kernel_stack(regs, formatvec, fp))
 677		goto badframe;
 678
 679	return 0;
 680
 681badframe:
 682	return 1;
 683}
 684
 685static inline int
 686rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 687		    struct ucontext __user *uc)
 688{
 689	int temp;
 690	greg_t __user *gregs = uc->uc_mcontext.gregs;
 691	unsigned long usp;
 692	int err;
 693
 694	/* Always make any pending restarted system calls return -EINTR */
 695	current->restart_block.fn = do_no_restart_syscall;
 696
 697	err = __get_user(temp, &uc->uc_mcontext.version);
 698	if (temp != MCONTEXT_VERSION)
 699		goto badframe;
 700	/* restore passed registers */
 701	err |= __get_user(regs->d0, &gregs[0]);
 702	err |= __get_user(regs->d1, &gregs[1]);
 703	err |= __get_user(regs->d2, &gregs[2]);
 704	err |= __get_user(regs->d3, &gregs[3]);
 705	err |= __get_user(regs->d4, &gregs[4]);
 706	err |= __get_user(regs->d5, &gregs[5]);
 707	err |= __get_user(sw->d6, &gregs[6]);
 708	err |= __get_user(sw->d7, &gregs[7]);
 709	err |= __get_user(regs->a0, &gregs[8]);
 710	err |= __get_user(regs->a1, &gregs[9]);
 711	err |= __get_user(regs->a2, &gregs[10]);
 712	err |= __get_user(sw->a3, &gregs[11]);
 713	err |= __get_user(sw->a4, &gregs[12]);
 714	err |= __get_user(sw->a5, &gregs[13]);
 715	err |= __get_user(sw->a6, &gregs[14]);
 716	err |= __get_user(usp, &gregs[15]);
 717	wrusp(usp);
 718	err |= __get_user(regs->pc, &gregs[16]);
 719	err |= __get_user(temp, &gregs[17]);
 720	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
 721	regs->orig_d0 = -1;		/* disable syscall checks */
 722	err |= __get_user(temp, &uc->uc_formatvec);
 723
 724	err |= rt_restore_fpu_state(uc);
 725	err |= restore_altstack(&uc->uc_stack);
 726
 727	if (err)
 728		goto badframe;
 729
 730	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
 731		goto badframe;
 732
 733	return 0;
 734
 735badframe:
 736	return 1;
 737}
 738
 739asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 740{
 741	unsigned long usp = rdusp();
 742	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 743	sigset_t set;
 744
 745	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 746		goto badframe;
 747	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
 748	    (_NSIG_WORDS > 1 &&
 749	     __copy_from_user(&set.sig[1], &frame->extramask,
 750			      sizeof(frame->extramask))))
 751		goto badframe;
 752
 753	set_current_blocked(&set);
 754
 755	if (restore_sigcontext(regs, &frame->sc, frame + 1))
 756		goto badframe;
 757	return regs->d0;
 758
 759badframe:
 760	force_sig(SIGSEGV, current);
 761	return 0;
 762}
 763
 764asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 765{
 766	unsigned long usp = rdusp();
 767	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 768	sigset_t set;
 769
 770	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 771		goto badframe;
 772	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 773		goto badframe;
 774
 775	set_current_blocked(&set);
 776
 777	if (rt_restore_ucontext(regs, sw, &frame->uc))
 778		goto badframe;
 779	return regs->d0;
 780
 781badframe:
 782	force_sig(SIGSEGV, current);
 783	return 0;
 784}
 785
 786static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 787			     unsigned long mask)
 788{
 789	sc->sc_mask = mask;
 790	sc->sc_usp = rdusp();
 791	sc->sc_d0 = regs->d0;
 792	sc->sc_d1 = regs->d1;
 793	sc->sc_a0 = regs->a0;
 794	sc->sc_a1 = regs->a1;
 795	sc->sc_sr = regs->sr;
 796	sc->sc_pc = regs->pc;
 797	sc->sc_formatvec = regs->format << 12 | regs->vector;
 798	save_a5_state(sc, regs);
 799	save_fpu_state(sc, regs);
 800}
 801
 802static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 803{
 804	struct switch_stack *sw = (struct switch_stack *)regs - 1;
 805	greg_t __user *gregs = uc->uc_mcontext.gregs;
 806	int err = 0;
 807
 808	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
 809	err |= __put_user(regs->d0, &gregs[0]);
 810	err |= __put_user(regs->d1, &gregs[1]);
 811	err |= __put_user(regs->d2, &gregs[2]);
 812	err |= __put_user(regs->d3, &gregs[3]);
 813	err |= __put_user(regs->d4, &gregs[4]);
 814	err |= __put_user(regs->d5, &gregs[5]);
 815	err |= __put_user(sw->d6, &gregs[6]);
 816	err |= __put_user(sw->d7, &gregs[7]);
 817	err |= __put_user(regs->a0, &gregs[8]);
 818	err |= __put_user(regs->a1, &gregs[9]);
 819	err |= __put_user(regs->a2, &gregs[10]);
 820	err |= __put_user(sw->a3, &gregs[11]);
 821	err |= __put_user(sw->a4, &gregs[12]);
 822	err |= __put_user(sw->a5, &gregs[13]);
 823	err |= __put_user(sw->a6, &gregs[14]);
 824	err |= __put_user(rdusp(), &gregs[15]);
 825	err |= __put_user(regs->pc, &gregs[16]);
 826	err |= __put_user(regs->sr, &gregs[17]);
 827	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
 828	err |= rt_save_fpu_state(uc, regs);
 829	return err;
 830}
 831
 832static inline void __user *
 833get_sigframe(struct ksignal *ksig, size_t frame_size)
 834{
 835	unsigned long usp = sigsp(rdusp(), ksig);
 836
 837	return (void __user *)((usp - frame_size) & -8UL);
 838}
 839
 840static int setup_frame(struct ksignal *ksig, sigset_t *set,
 841			struct pt_regs *regs)
 842{
 843	struct sigframe __user *frame;
 844	int fsize = frame_extra_sizes(regs->format);
 845	struct sigcontext context;
 846	int err = 0, sig = ksig->sig;
 847
 848	if (fsize < 0) {
 849#ifdef DEBUG
 850		printk ("setup_frame: Unknown frame format %#x\n",
 851			regs->format);
 852#endif
 853		return -EFAULT;
 854	}
 855
 856	frame = get_sigframe(ksig, sizeof(*frame) + fsize);
 857
 858	if (fsize)
 859		err |= copy_to_user (frame + 1, regs + 1, fsize);
 860
 861	err |= __put_user(sig, &frame->sig);
 862
 863	err |= __put_user(regs->vector, &frame->code);
 864	err |= __put_user(&frame->sc, &frame->psc);
 865
 866	if (_NSIG_WORDS > 1)
 867		err |= copy_to_user(frame->extramask, &set->sig[1],
 868				    sizeof(frame->extramask));
 869
 870	setup_sigcontext(&context, regs, set->sig[0]);
 871	err |= copy_to_user (&frame->sc, &context, sizeof(context));
 872
 873	/* Set up to return from userspace.  */
 874#ifdef CONFIG_MMU
 875	err |= __put_user(frame->retcode, &frame->pretcode);
 876	/* moveq #,d0; trap #0 */
 877	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
 878			  (long __user *)(frame->retcode));
 879#else
 880	err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
 881#endif
 882
 883	if (err)
 884		return -EFAULT;
 885
 886	push_cache ((unsigned long) &frame->retcode);
 887
 888	/*
 889	 * Set up registers for signal handler.  All the state we are about
 890	 * to destroy is successfully copied to sigframe.
 891	 */
 892	wrusp ((unsigned long) frame);
 893	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 894	adjustformat(regs);
 895
 896	/*
 897	 * This is subtle; if we build more than one sigframe, all but the
 898	 * first one will see frame format 0 and have fsize == 0, so we won't
 899	 * screw stkadj.
 900	 */
 901	if (fsize)
 902		regs->stkadj = fsize;
 903
 904	/* Prepare to skip over the extra stuff in the exception frame.  */
 905	if (regs->stkadj) {
 906		struct pt_regs *tregs =
 907			(struct pt_regs *)((ulong)regs + regs->stkadj);
 908#ifdef DEBUG
 909		printk("Performing stackadjust=%04x\n", regs->stkadj);
 910#endif
 911		/* This must be copied with decreasing addresses to
 912                   handle overlaps.  */
 913		tregs->vector = 0;
 914		tregs->format = 0;
 915		tregs->pc = regs->pc;
 916		tregs->sr = regs->sr;
 917	}
 918	return 0;
 919}
 920
 921static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 922			   struct pt_regs *regs)
 923{
 924	struct rt_sigframe __user *frame;
 925	int fsize = frame_extra_sizes(regs->format);
 926	int err = 0, sig = ksig->sig;
 927
 928	if (fsize < 0) {
 929#ifdef DEBUG
 930		printk ("setup_frame: Unknown frame format %#x\n",
 931			regs->format);
 932#endif
 933		return -EFAULT;
 934	}
 935
 936	frame = get_sigframe(ksig, sizeof(*frame));
 937
 938	if (fsize)
 939		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
 940
 941	err |= __put_user(sig, &frame->sig);
 942	err |= __put_user(&frame->info, &frame->pinfo);
 943	err |= __put_user(&frame->uc, &frame->puc);
 944	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 945
 946	/* Create the ucontext.  */
 947	err |= __put_user(0, &frame->uc.uc_flags);
 948	err |= __put_user(NULL, &frame->uc.uc_link);
 949	err |= __save_altstack(&frame->uc.uc_stack, rdusp());
 950	err |= rt_setup_ucontext(&frame->uc, regs);
 951	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
 952
 953	/* Set up to return from userspace.  */
 954#ifdef CONFIG_MMU
 955	err |= __put_user(frame->retcode, &frame->pretcode);
 956#ifdef __mcoldfire__
 957	/* movel #__NR_rt_sigreturn,d0; trap #0 */
 958	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
 959	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
 960			  (long __user *)(frame->retcode + 4));
 961#else
 962	/* moveq #,d0; notb d0; trap #0 */
 963	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
 964			  (long __user *)(frame->retcode + 0));
 965	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
 966#endif
 967#else
 968	err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
 969#endif /* CONFIG_MMU */
 970
 971	if (err)
 972		return -EFAULT;
 973
 974	push_cache ((unsigned long) &frame->retcode);
 975
 976	/*
 977	 * Set up registers for signal handler.  All the state we are about
 978	 * to destroy is successfully copied to sigframe.
 979	 */
 980	wrusp ((unsigned long) frame);
 981	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 982	adjustformat(regs);
 983
 984	/*
 985	 * This is subtle; if we build more than one sigframe, all but the
 986	 * first one will see frame format 0 and have fsize == 0, so we won't
 987	 * screw stkadj.
 988	 */
 989	if (fsize)
 990		regs->stkadj = fsize;
 991
 992	/* Prepare to skip over the extra stuff in the exception frame.  */
 993	if (regs->stkadj) {
 994		struct pt_regs *tregs =
 995			(struct pt_regs *)((ulong)regs + regs->stkadj);
 996#ifdef DEBUG
 997		printk("Performing stackadjust=%04x\n", regs->stkadj);
 998#endif
 999		/* This must be copied with decreasing addresses to
1000                   handle overlaps.  */
1001		tregs->vector = 0;
1002		tregs->format = 0;
1003		tregs->pc = regs->pc;
1004		tregs->sr = regs->sr;
1005	}
1006	return 0;
1007}
1008
1009static inline void
1010handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1011{
1012	switch (regs->d0) {
1013	case -ERESTARTNOHAND:
1014		if (!has_handler)
1015			goto do_restart;
1016		regs->d0 = -EINTR;
1017		break;
1018
1019	case -ERESTART_RESTARTBLOCK:
1020		if (!has_handler) {
1021			regs->d0 = __NR_restart_syscall;
1022			regs->pc -= 2;
1023			break;
1024		}
1025		regs->d0 = -EINTR;
1026		break;
1027
1028	case -ERESTARTSYS:
1029		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1030			regs->d0 = -EINTR;
1031			break;
1032		}
1033	/* fallthrough */
1034	case -ERESTARTNOINTR:
1035	do_restart:
1036		regs->d0 = regs->orig_d0;
1037		regs->pc -= 2;
1038		break;
1039	}
1040}
1041
1042/*
1043 * OK, we're invoking a handler
1044 */
1045static void
1046handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1047{
1048	sigset_t *oldset = sigmask_to_save();
1049	int err;
1050	/* are we from a system call? */
1051	if (regs->orig_d0 >= 0)
1052		/* If so, check system call restarting.. */
1053		handle_restart(regs, &ksig->ka, 1);
1054
1055	/* set up the stack frame */
1056	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1057		err = setup_rt_frame(ksig, oldset, regs);
1058	else
1059		err = setup_frame(ksig, oldset, regs);
1060
1061	signal_setup_done(err, ksig, 0);
1062
1063	if (test_thread_flag(TIF_DELAYED_TRACE)) {
1064		regs->sr &= ~0x8000;
1065		send_sig(SIGTRAP, current, 1);
1066	}
1067}
1068
1069/*
1070 * Note that 'init' is a special process: it doesn't get signals it doesn't
1071 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1072 * mistake.
1073 */
1074static void do_signal(struct pt_regs *regs)
1075{
1076	struct ksignal ksig;
1077
1078	current->thread.esp0 = (unsigned long) regs;
1079
1080	if (get_signal(&ksig)) {
1081		/* Whee!  Actually deliver the signal.  */
1082		handle_signal(&ksig, regs);
1083		return;
1084	}
1085
1086	/* Did we come from a system call? */
1087	if (regs->orig_d0 >= 0)
1088		/* Restart the system call - no handlers present */
1089		handle_restart(regs, NULL, 0);
1090
1091	/* If there's no signal to deliver, we just restore the saved mask.  */
1092	restore_saved_sigmask();
1093}
1094
1095void do_notify_resume(struct pt_regs *regs)
1096{
1097	if (test_thread_flag(TIF_SIGPENDING))
1098		do_signal(regs);
1099
1100	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
1101		tracehook_notify_resume(regs);
1102}