Linux Audio

Check our new training course

Loading...
v6.8
   1/*
   2 *  linux/arch/m68k/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11/*
  12 * Linux/m68k support by Hamish Macdonald
  13 *
  14 * 68060 fixes by Jesper Skov
  15 *
  16 * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
  17 *
  18 * mathemu support by Roman Zippel
  19 *  (Note: fpstate in the signal context is completely ignored for the emulator
  20 *         and the internal floating point format is put on stack)
  21 */
  22
  23/*
  24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
  25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
  26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
  27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
  28 * signal handlers!
  29 */
  30
  31#include <linux/sched.h>
  32#include <linux/mm.h>
  33#include <linux/kernel.h>
  34#include <linux/signal.h>
  35#include <linux/syscalls.h>
  36#include <linux/errno.h>
  37#include <linux/wait.h>
  38#include <linux/ptrace.h>
  39#include <linux/unistd.h>
  40#include <linux/stddef.h>
  41#include <linux/highuid.h>
  42#include <linux/personality.h>
  43#include <linux/tty.h>
  44#include <linux/binfmts.h>
  45#include <linux/extable.h>
  46#include <linux/resume_user_mode.h>
  47
  48#include <asm/setup.h>
  49#include <linux/uaccess.h>
 
  50#include <asm/traps.h>
  51#include <asm/ucontext.h>
  52#include <asm/cacheflush.h>
  53
  54#include "signal.h"
  55
  56#ifdef CONFIG_MMU
  57
  58/*
  59 * Handle the slight differences in classic 68k and ColdFire trap frames.
  60 */
  61#ifdef CONFIG_COLDFIRE
  62#define	FORMAT		4
  63#define	FMT4SIZE	0
  64#else
  65#define	FORMAT		0
  66#define	FMT4SIZE	sizeof_field(struct frame, un.fmt4)
  67#endif
  68
  69static const int frame_size_change[16] = {
  70  [1]	= -1, /* sizeof_field(struct frame, un.fmt1), */
  71  [2]	= sizeof_field(struct frame, un.fmt2),
  72  [3]	= sizeof_field(struct frame, un.fmt3),
  73  [4]	= FMT4SIZE,
  74  [5]	= -1, /* sizeof_field(struct frame, un.fmt5), */
  75  [6]	= -1, /* sizeof_field(struct frame, un.fmt6), */
  76  [7]	= sizeof_field(struct frame, un.fmt7),
  77  [8]	= -1, /* sizeof_field(struct frame, un.fmt8), */
  78  [9]	= sizeof_field(struct frame, un.fmt9),
  79  [10]	= sizeof_field(struct frame, un.fmta),
  80  [11]	= sizeof_field(struct frame, un.fmtb),
  81  [12]	= -1, /* sizeof_field(struct frame, un.fmtc), */
  82  [13]	= -1, /* sizeof_field(struct frame, un.fmtd), */
  83  [14]	= -1, /* sizeof_field(struct frame, un.fmte), */
  84  [15]	= -1, /* sizeof_field(struct frame, un.fmtf), */
  85};
  86
  87static inline int frame_extra_sizes(int f)
  88{
  89	return frame_size_change[f];
  90}
  91
  92int fixup_exception(struct pt_regs *regs)
  93{
  94	const struct exception_table_entry *fixup;
  95	struct pt_regs *tregs;
  96
  97	/* Are we prepared to handle this kernel fault? */
  98	fixup = search_exception_tables(regs->pc);
  99	if (!fixup)
 100		return 0;
 101
 102	/* Create a new four word stack frame, discarding the old one. */
 103	regs->stkadj = frame_extra_sizes(regs->format);
 104	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
 105	tregs->vector = regs->vector;
 106	tregs->format = FORMAT;
 107	tregs->pc = fixup->fixup;
 108	tregs->sr = regs->sr;
 109
 110	return 1;
 111}
 112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113static inline void push_cache (unsigned long vaddr)
 114{
 115	/*
 116	 * Using the old cache_push_v() was really a big waste.
 117	 *
 118	 * What we are trying to do is to flush 8 bytes to ram.
 119	 * Flushing 2 cache lines of 16 bytes is much cheaper than
 120	 * flushing 1 or 2 pages, as previously done in
 121	 * cache_push_v().
 122	 *                                                     Jes
 123	 */
 124	if (CPU_IS_040) {
 125		unsigned long temp;
 126
 127		__asm__ __volatile__ (".chip 68040\n\t"
 128				      "nop\n\t"
 129				      "ptestr (%1)\n\t"
 130				      "movec %%mmusr,%0\n\t"
 131				      ".chip 68k"
 132				      : "=r" (temp)
 133				      : "a" (vaddr));
 134
 135		temp &= PAGE_MASK;
 136		temp |= vaddr & ~PAGE_MASK;
 137
 138		__asm__ __volatile__ (".chip 68040\n\t"
 139				      "nop\n\t"
 140				      "cpushl %%bc,(%0)\n\t"
 141				      ".chip 68k"
 142				      : : "a" (temp));
 143	}
 144	else if (CPU_IS_060) {
 145		unsigned long temp;
 146		__asm__ __volatile__ (".chip 68060\n\t"
 147				      "plpar (%0)\n\t"
 148				      ".chip 68k"
 149				      : "=a" (temp)
 150				      : "0" (vaddr));
 151		__asm__ __volatile__ (".chip 68060\n\t"
 152				      "cpushl %%bc,(%0)\n\t"
 153				      ".chip 68k"
 154				      : : "a" (temp));
 155	} else if (!CPU_IS_COLDFIRE) {
 156		/*
 157		 * 68030/68020 have no writeback cache;
 158		 * still need to clear icache.
 159		 * Note that vaddr is guaranteed to be long word aligned.
 160		 */
 161		unsigned long temp;
 162		asm volatile ("movec %%cacr,%0" : "=r" (temp));
 163		temp += 4;
 164		asm volatile ("movec %0,%%caar\n\t"
 165			      "movec %1,%%cacr"
 166			      : : "r" (vaddr), "r" (temp));
 167		asm volatile ("movec %0,%%caar\n\t"
 168			      "movec %1,%%cacr"
 169			      : : "r" (vaddr + 4), "r" (temp));
 170	} else {
 171		/* CPU_IS_COLDFIRE */
 172#if defined(CONFIG_CACHE_COPYBACK)
 173		flush_cf_dcache(0, DCACHE_MAX_ADDR);
 174#endif
 175		/* Invalidate instruction cache for the pushed bytes */
 176		clear_cf_icache(vaddr, vaddr + 8);
 177	}
 178}
 179
 180static inline void adjustformat(struct pt_regs *regs)
 181{
 182}
 183
 184static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 185{
 186}
 187
 188#else /* CONFIG_MMU */
 189
 190void ret_from_user_signal(void);
 191void ret_from_user_rt_signal(void);
 192
 193static inline int frame_extra_sizes(int f)
 194{
 195	/* No frame size adjustments required on non-MMU CPUs */
 196	return 0;
 197}
 198
 199static inline void adjustformat(struct pt_regs *regs)
 200{
 
 201	/*
 202	 * set format byte to make stack appear modulo 4, which it will
 203	 * be when doing the rte
 204	 */
 205	regs->format = 0x4;
 206}
 207
 208static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 209{
 210	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
 211}
 212
 213static inline void push_cache(unsigned long vaddr)
 214{
 215}
 216
 217#endif /* CONFIG_MMU */
 218
 219/*
 220 * Do a signal return; undo the signal stack.
 221 *
 222 * Keep the return code on the stack quadword aligned!
 223 * That makes the cache flush below easier.
 224 */
 225
 226struct sigframe
 227{
 228	char __user *pretcode;
 229	int sig;
 230	int code;
 231	struct sigcontext __user *psc;
 232	char retcode[8];
 233	unsigned long extramask[_NSIG_WORDS-1];
 234	struct sigcontext sc;
 235};
 236
 237struct rt_sigframe
 238{
 239	char __user *pretcode;
 240	int sig;
 241	struct siginfo __user *pinfo;
 242	void __user *puc;
 243	char retcode[8];
 244	struct siginfo info;
 245	struct ucontext uc;
 246};
 247
 248#define FPCONTEXT_SIZE	216
 249#define uc_fpstate	uc_filler[0]
 250#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
 251#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
 252
 253#ifdef CONFIG_FPU
 254
 255static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
 256
 257static inline int restore_fpu_state(struct sigcontext *sc)
 258{
 259	int err = 1;
 260
 261	if (FPU_IS_EMU) {
 262	    /* restore registers */
 263	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
 264	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
 265	    return 0;
 266	}
 267
 268	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 269	    /* Verify the frame format.  */
 270	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 271		 (sc->sc_fpstate[0] != fpu_version))
 272		goto out;
 273	    if (CPU_IS_020_OR_030) {
 274		if (m68k_fputype & FPU_68881 &&
 275		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
 276		    goto out;
 277		if (m68k_fputype & FPU_68882 &&
 278		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
 279		    goto out;
 280	    } else if (CPU_IS_040) {
 281		if (!(sc->sc_fpstate[1] == 0x00 ||
 282                      sc->sc_fpstate[1] == 0x28 ||
 283                      sc->sc_fpstate[1] == 0x60))
 284		    goto out;
 285	    } else if (CPU_IS_060) {
 286		if (!(sc->sc_fpstate[3] == 0x00 ||
 287                      sc->sc_fpstate[3] == 0x60 ||
 288		      sc->sc_fpstate[3] == 0xe0))
 289		    goto out;
 290	    } else if (CPU_IS_COLDFIRE) {
 291		if (!(sc->sc_fpstate[0] == 0x00 ||
 292		      sc->sc_fpstate[0] == 0x05 ||
 293		      sc->sc_fpstate[0] == 0xe5))
 294		    goto out;
 295	    } else
 296		goto out;
 297
 298	    if (CPU_IS_COLDFIRE) {
 299		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
 300				  "fmovel %1,%%fpcr\n\t"
 301				  "fmovel %2,%%fpsr\n\t"
 302				  "fmovel %3,%%fpiar"
 303				  : /* no outputs */
 304				  : "m" (sc->sc_fpregs[0]),
 305				    "m" (sc->sc_fpcntl[0]),
 306				    "m" (sc->sc_fpcntl[1]),
 307				    "m" (sc->sc_fpcntl[2]));
 308	    } else {
 309		__asm__ volatile (".chip 68k/68881\n\t"
 310				  "fmovemx %0,%%fp0-%%fp1\n\t"
 311				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 312				  ".chip 68k"
 313				  : /* no outputs */
 314				  : "m" (*sc->sc_fpregs),
 315				    "m" (*sc->sc_fpcntl));
 316	    }
 317	}
 318
 319	if (CPU_IS_COLDFIRE) {
 320		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
 321	} else {
 322		__asm__ volatile (".chip 68k/68881\n\t"
 323				  "frestore %0\n\t"
 324				  ".chip 68k"
 325				  : : "m" (*sc->sc_fpstate));
 326	}
 327	err = 0;
 328
 329out:
 330	return err;
 331}
 332
 333static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 334{
 335	unsigned char fpstate[FPCONTEXT_SIZE];
 336	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 337	fpregset_t fpregs;
 338	int err = 1;
 339
 340	if (FPU_IS_EMU) {
 341		/* restore fpu control register */
 342		if (__copy_from_user(current->thread.fpcntl,
 343				uc->uc_mcontext.fpregs.f_fpcntl, 12))
 344			goto out;
 345		/* restore all other fpu register */
 346		if (__copy_from_user(current->thread.fp,
 347				uc->uc_mcontext.fpregs.f_fpregs, 96))
 348			goto out;
 349		return 0;
 350	}
 351
 352	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 353		goto out;
 354	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 355		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 356			context_size = fpstate[1];
 357		/* Verify the frame format.  */
 358		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 359		     (fpstate[0] != fpu_version))
 360			goto out;
 361		if (CPU_IS_020_OR_030) {
 362			if (m68k_fputype & FPU_68881 &&
 363			    !(context_size == 0x18 || context_size == 0xb4))
 364				goto out;
 365			if (m68k_fputype & FPU_68882 &&
 366			    !(context_size == 0x38 || context_size == 0xd4))
 367				goto out;
 368		} else if (CPU_IS_040) {
 369			if (!(context_size == 0x00 ||
 370			      context_size == 0x28 ||
 371			      context_size == 0x60))
 372				goto out;
 373		} else if (CPU_IS_060) {
 374			if (!(fpstate[3] == 0x00 ||
 375			      fpstate[3] == 0x60 ||
 376			      fpstate[3] == 0xe0))
 377				goto out;
 378		} else if (CPU_IS_COLDFIRE) {
 379			if (!(fpstate[3] == 0x00 ||
 380			      fpstate[3] == 0x05 ||
 381			      fpstate[3] == 0xe5))
 382				goto out;
 383		} else
 384			goto out;
 385		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
 386				     sizeof(fpregs)))
 387			goto out;
 388
 389		if (CPU_IS_COLDFIRE) {
 390			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
 391					  "fmovel %1,%%fpcr\n\t"
 392					  "fmovel %2,%%fpsr\n\t"
 393					  "fmovel %3,%%fpiar"
 394					  : /* no outputs */
 395					  : "m" (fpregs.f_fpregs[0]),
 396					    "m" (fpregs.f_fpcntl[0]),
 397					    "m" (fpregs.f_fpcntl[1]),
 398					    "m" (fpregs.f_fpcntl[2]));
 399		} else {
 400			__asm__ volatile (".chip 68k/68881\n\t"
 401					  "fmovemx %0,%%fp0-%%fp7\n\t"
 402					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 403					  ".chip 68k"
 404					  : /* no outputs */
 405					  : "m" (*fpregs.f_fpregs),
 406					    "m" (*fpregs.f_fpcntl));
 407		}
 408	}
 409	if (context_size &&
 410	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 411			     context_size))
 412		goto out;
 413
 414	if (CPU_IS_COLDFIRE) {
 415		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
 416	} else {
 417		__asm__ volatile (".chip 68k/68881\n\t"
 418				  "frestore %0\n\t"
 419				  ".chip 68k"
 420				  : : "m" (*fpstate));
 421	}
 422	err = 0;
 423
 424out:
 425	return err;
 426}
 427
 428/*
 429 * Set up a signal frame.
 430 */
 431static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 432{
 433	if (FPU_IS_EMU) {
 434		/* save registers */
 435		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
 436		memcpy(sc->sc_fpregs, current->thread.fp, 24);
 437		return;
 438	}
 439
 440	if (CPU_IS_COLDFIRE) {
 441		__asm__ volatile ("fsave %0"
 442				  : : "m" (*sc->sc_fpstate) : "memory");
 443	} else {
 444		__asm__ volatile (".chip 68k/68881\n\t"
 445				  "fsave %0\n\t"
 446				  ".chip 68k"
 447				  : : "m" (*sc->sc_fpstate) : "memory");
 448	}
 449
 450	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 451		fpu_version = sc->sc_fpstate[0];
 452		if (CPU_IS_020_OR_030 && !regs->stkadj &&
 453		    regs->vector >= (VEC_FPBRUC * 4) &&
 454		    regs->vector <= (VEC_FPNAN * 4)) {
 455			/* Clear pending exception in 68882 idle frame */
 456			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
 457				sc->sc_fpstate[0x38] |= 1 << 3;
 458		}
 459
 460		if (CPU_IS_COLDFIRE) {
 461			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
 462					  "fmovel %%fpcr,%1\n\t"
 463					  "fmovel %%fpsr,%2\n\t"
 464					  "fmovel %%fpiar,%3"
 465					  : "=m" (sc->sc_fpregs[0]),
 466					    "=m" (sc->sc_fpcntl[0]),
 467					    "=m" (sc->sc_fpcntl[1]),
 468					    "=m" (sc->sc_fpcntl[2])
 469					  : /* no inputs */
 470					  : "memory");
 471		} else {
 472			__asm__ volatile (".chip 68k/68881\n\t"
 473					  "fmovemx %%fp0-%%fp1,%0\n\t"
 474					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 475					  ".chip 68k"
 476					  : "=m" (*sc->sc_fpregs),
 477					    "=m" (*sc->sc_fpcntl)
 478					  : /* no inputs */
 479					  : "memory");
 480		}
 481	}
 482}
 483
 484static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 485{
 486	unsigned char fpstate[FPCONTEXT_SIZE];
 487	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 488	int err = 0;
 489
 490	if (FPU_IS_EMU) {
 491		/* save fpu control register */
 492		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
 493				current->thread.fpcntl, 12);
 494		/* save all other fpu register */
 495		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
 496				current->thread.fp, 96);
 497		return err;
 498	}
 499
 500	if (CPU_IS_COLDFIRE) {
 501		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
 502	} else {
 503		__asm__ volatile (".chip 68k/68881\n\t"
 504				  "fsave %0\n\t"
 505				  ".chip 68k"
 506				  : : "m" (*fpstate) : "memory");
 507	}
 508
 509	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 510	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 511		fpregset_t fpregs;
 512		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 513			context_size = fpstate[1];
 514		fpu_version = fpstate[0];
 515		if (CPU_IS_020_OR_030 && !regs->stkadj &&
 516		    regs->vector >= (VEC_FPBRUC * 4) &&
 517		    regs->vector <= (VEC_FPNAN * 4)) {
 518			/* Clear pending exception in 68882 idle frame */
 519			if (*(unsigned short *) fpstate == 0x1f38)
 520				fpstate[0x38] |= 1 << 3;
 521		}
 522		if (CPU_IS_COLDFIRE) {
 523			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
 524					  "fmovel %%fpcr,%1\n\t"
 525					  "fmovel %%fpsr,%2\n\t"
 526					  "fmovel %%fpiar,%3"
 527					  : "=m" (fpregs.f_fpregs[0]),
 528					    "=m" (fpregs.f_fpcntl[0]),
 529					    "=m" (fpregs.f_fpcntl[1]),
 530					    "=m" (fpregs.f_fpcntl[2])
 531					  : /* no inputs */
 532					  : "memory");
 533		} else {
 534			__asm__ volatile (".chip 68k/68881\n\t"
 535					  "fmovemx %%fp0-%%fp7,%0\n\t"
 536					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 537					  ".chip 68k"
 538					  : "=m" (*fpregs.f_fpregs),
 539					    "=m" (*fpregs.f_fpcntl)
 540					  : /* no inputs */
 541					  : "memory");
 542		}
 543		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 544				    sizeof(fpregs));
 545	}
 546	if (context_size)
 547		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
 548				    context_size);
 549	return err;
 550}
 551
 552#else /* CONFIG_FPU */
 553
 554/*
 555 * For the case with no FPU configured these all do nothing.
 556 */
 557static inline int restore_fpu_state(struct sigcontext *sc)
 558{
 559	return 0;
 560}
 561
 562static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 563{
 564	return 0;
 565}
 566
 567static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 568{
 569}
 570
 571static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 572{
 573	return 0;
 574}
 575
 576#endif /* CONFIG_FPU */
 577
 578static inline void siginfo_build_tests(void)
 579{
 580	/*
 581	 * This needs to be tested on m68k as it has a lesser
 582	 * alignment requirement than x86 and that can cause surprises.
 583	 */
 584
 585	/* This is part of the ABI and can never change in size: */
 586	BUILD_BUG_ON(sizeof(siginfo_t) != 128);
 587
 588	/* Ensure the known fields never change in location */
 589	BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
 590	BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
 591	BUILD_BUG_ON(offsetof(siginfo_t, si_code)  != 8);
 592
 593	/* _kill */
 594	BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
 595	BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
 596
 597	/* _timer */
 598	BUILD_BUG_ON(offsetof(siginfo_t, si_tid)     != 0x0c);
 599	BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x10);
 600	BUILD_BUG_ON(offsetof(siginfo_t, si_value)   != 0x14);
 601
 602	/* _rt */
 603	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)   != 0x0c);
 604	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)   != 0x10);
 605	BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14);
 606
 607	/* _sigchld */
 608	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)    != 0x0c);
 609	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)    != 0x10);
 610	BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x14);
 611	BUILD_BUG_ON(offsetof(siginfo_t, si_utime)  != 0x18);
 612	BUILD_BUG_ON(offsetof(siginfo_t, si_stime)  != 0x1c);
 613
 614	/* _sigfault */
 615	BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0c);
 616
 617	/* _sigfault._mcerr */
 618	BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x10);
 619
 620	/* _sigfault._addr_bnd */
 621	BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x12);
 622	BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x16);
 623
 624	/* _sigfault._addr_pkey */
 625	BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
 626
 627	/* _sigfault._perf */
 628	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
 629	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
 630	BUILD_BUG_ON(offsetof(siginfo_t, si_perf_flags) != 0x18);
 631
 632	/* _sigpoll */
 633	BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x0c);
 634	BUILD_BUG_ON(offsetof(siginfo_t, si_fd)     != 0x10);
 635
 636	/* _sigsys */
 637	BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0c);
 638	BUILD_BUG_ON(offsetof(siginfo_t, si_syscall)   != 0x10);
 639	BUILD_BUG_ON(offsetof(siginfo_t, si_arch)      != 0x14);
 640
 641	/* any new si_fields should be added here */
 642}
 643
 644static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
 645			       void __user *fp)
 646{
 647	int extra = frame_extra_sizes(formatvec >> 12);
 648	char buf[sizeof_field(struct frame, un)];
 649
 650	if (extra < 0) {
 651		/*
 652		 * user process trying to return with weird frame format
 653		 */
 654		pr_debug("user process returning with weird frame format\n");
 655		return -1;
 
 
 656	}
 657	if (extra && copy_from_user(buf, fp, extra))
 658		return -1;
 659	regs->format = formatvec >> 12;
 660	regs->vector = formatvec & 0xfff;
 661	if (extra) {
 662		void *p = (struct switch_stack *)regs - 1;
 663		struct frame *new = (void *)regs - extra;
 664		int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
 665
 666		memmove(p - extra, p, size);
 667		memcpy(p - extra + size, buf, extra);
 668		current->thread.esp0 = (unsigned long)&new->ptregs;
 669#ifdef CONFIG_M68040
 670		/* on 68040 complete pending writebacks if any */
 671		if (new->ptregs.format == 7) // bus error frame
 672			berr_040cleanup(new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673#endif
 
 
 
 
 
 674	}
 675	return extra;
 676}
 677
 678static inline int
 679restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
 680{
 681	int formatvec;
 682	struct sigcontext context;
 683
 684	siginfo_build_tests();
 685
 686	/* Always make any pending restarted system calls return -EINTR */
 687	current->restart_block.fn = do_no_restart_syscall;
 688
 689	/* get previous context */
 690	if (copy_from_user(&context, usc, sizeof(context)))
 691		return -1;
 692
 693	/* restore passed registers */
 694	regs->d0 = context.sc_d0;
 695	regs->d1 = context.sc_d1;
 696	regs->a0 = context.sc_a0;
 697	regs->a1 = context.sc_a1;
 698	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
 699	regs->pc = context.sc_pc;
 700	regs->orig_d0 = -1;		/* disable syscall checks */
 701	wrusp(context.sc_usp);
 702	formatvec = context.sc_formatvec;
 703
 704	if (restore_fpu_state(&context))
 705		return -1;
 
 
 
 
 706
 707	return mangle_kernel_stack(regs, formatvec, fp);
 
 708}
 709
 710static inline int
 711rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 712		    struct ucontext __user *uc)
 713{
 714	int temp;
 715	greg_t __user *gregs = uc->uc_mcontext.gregs;
 716	unsigned long usp;
 717	int err;
 718
 719	/* Always make any pending restarted system calls return -EINTR */
 720	current->restart_block.fn = do_no_restart_syscall;
 721
 722	err = __get_user(temp, &uc->uc_mcontext.version);
 723	if (temp != MCONTEXT_VERSION)
 724		return -1;
 725	/* restore passed registers */
 726	err |= __get_user(regs->d0, &gregs[0]);
 727	err |= __get_user(regs->d1, &gregs[1]);
 728	err |= __get_user(regs->d2, &gregs[2]);
 729	err |= __get_user(regs->d3, &gregs[3]);
 730	err |= __get_user(regs->d4, &gregs[4]);
 731	err |= __get_user(regs->d5, &gregs[5]);
 732	err |= __get_user(sw->d6, &gregs[6]);
 733	err |= __get_user(sw->d7, &gregs[7]);
 734	err |= __get_user(regs->a0, &gregs[8]);
 735	err |= __get_user(regs->a1, &gregs[9]);
 736	err |= __get_user(regs->a2, &gregs[10]);
 737	err |= __get_user(sw->a3, &gregs[11]);
 738	err |= __get_user(sw->a4, &gregs[12]);
 739	err |= __get_user(sw->a5, &gregs[13]);
 740	err |= __get_user(sw->a6, &gregs[14]);
 741	err |= __get_user(usp, &gregs[15]);
 742	wrusp(usp);
 743	err |= __get_user(regs->pc, &gregs[16]);
 744	err |= __get_user(temp, &gregs[17]);
 745	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
 746	regs->orig_d0 = -1;		/* disable syscall checks */
 747	err |= __get_user(temp, &uc->uc_formatvec);
 748
 749	err |= rt_restore_fpu_state(uc);
 750	err |= restore_altstack(&uc->uc_stack);
 751
 752	if (err)
 753		return -1;
 754
 755	return mangle_kernel_stack(regs, temp, &uc->uc_extra);
 
 
 
 
 
 
 756}
 757
 758asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 759{
 760	unsigned long usp = rdusp();
 761	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 762	sigset_t set;
 763	int size;
 764
 765	if (!access_ok(frame, sizeof(*frame)))
 766		goto badframe;
 767	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
 768	    (_NSIG_WORDS > 1 &&
 769	     __copy_from_user(&set.sig[1], &frame->extramask,
 770			      sizeof(frame->extramask))))
 771		goto badframe;
 772
 773	set_current_blocked(&set);
 774
 775	size = restore_sigcontext(regs, &frame->sc, frame + 1);
 776	if (size < 0)
 777		goto badframe;
 778	return (void *)sw - size;
 779
 780badframe:
 781	force_sig(SIGSEGV);
 782	return sw;
 783}
 784
 785asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 786{
 787	unsigned long usp = rdusp();
 788	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 789	sigset_t set;
 790	int size;
 791
 792	if (!access_ok(frame, sizeof(*frame)))
 793		goto badframe;
 794	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 795		goto badframe;
 796
 797	set_current_blocked(&set);
 798
 799	size = rt_restore_ucontext(regs, sw, &frame->uc);
 800	if (size < 0)
 801		goto badframe;
 802	return (void *)sw - size;
 803
 804badframe:
 805	force_sig(SIGSEGV);
 806	return sw;
 807}
 808
 809static inline struct pt_regs *rte_regs(struct pt_regs *regs)
 810{
 811	return (void *)regs + regs->stkadj;
 812}
 813
 814static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 815			     unsigned long mask)
 816{
 817	struct pt_regs *tregs = rte_regs(regs);
 818	sc->sc_mask = mask;
 819	sc->sc_usp = rdusp();
 820	sc->sc_d0 = regs->d0;
 821	sc->sc_d1 = regs->d1;
 822	sc->sc_a0 = regs->a0;
 823	sc->sc_a1 = regs->a1;
 824	sc->sc_sr = tregs->sr;
 825	sc->sc_pc = tregs->pc;
 826	sc->sc_formatvec = tregs->format << 12 | tregs->vector;
 827	save_a5_state(sc, regs);
 828	save_fpu_state(sc, regs);
 829}
 830
 831static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 832{
 833	struct switch_stack *sw = (struct switch_stack *)regs - 1;
 834	struct pt_regs *tregs = rte_regs(regs);
 835	greg_t __user *gregs = uc->uc_mcontext.gregs;
 836	int err = 0;
 837
 838	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
 839	err |= __put_user(regs->d0, &gregs[0]);
 840	err |= __put_user(regs->d1, &gregs[1]);
 841	err |= __put_user(regs->d2, &gregs[2]);
 842	err |= __put_user(regs->d3, &gregs[3]);
 843	err |= __put_user(regs->d4, &gregs[4]);
 844	err |= __put_user(regs->d5, &gregs[5]);
 845	err |= __put_user(sw->d6, &gregs[6]);
 846	err |= __put_user(sw->d7, &gregs[7]);
 847	err |= __put_user(regs->a0, &gregs[8]);
 848	err |= __put_user(regs->a1, &gregs[9]);
 849	err |= __put_user(regs->a2, &gregs[10]);
 850	err |= __put_user(sw->a3, &gregs[11]);
 851	err |= __put_user(sw->a4, &gregs[12]);
 852	err |= __put_user(sw->a5, &gregs[13]);
 853	err |= __put_user(sw->a6, &gregs[14]);
 854	err |= __put_user(rdusp(), &gregs[15]);
 855	err |= __put_user(tregs->pc, &gregs[16]);
 856	err |= __put_user(tregs->sr, &gregs[17]);
 857	err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
 858	err |= rt_save_fpu_state(uc, regs);
 859	return err;
 860}
 861
 862static inline void __user *
 863get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
 864{
 865	unsigned long usp = sigsp(rdusp(), ksig);
 866	unsigned long gap = 0;
 867
 868	if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
 869		/* USP is unreliable so use worst-case value */
 870		gap = 256;
 871	}
 872
 873	return (void __user *)((usp - gap - frame_size) & -8UL);
 874}
 875
 876static int setup_frame(struct ksignal *ksig, sigset_t *set,
 877			struct pt_regs *regs)
 878{
 879	struct sigframe __user *frame;
 880	struct pt_regs *tregs = rte_regs(regs);
 881	int fsize = frame_extra_sizes(tregs->format);
 882	struct sigcontext context;
 883	int err = 0, sig = ksig->sig;
 884
 885	if (fsize < 0) {
 886		pr_debug("setup_frame: Unknown frame format %#x\n",
 887			 tregs->format);
 
 
 888		return -EFAULT;
 889	}
 890
 891	frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
 892
 893	if (fsize)
 894		err |= copy_to_user (frame + 1, regs + 1, fsize);
 895
 896	err |= __put_user(sig, &frame->sig);
 897
 898	err |= __put_user(tregs->vector, &frame->code);
 899	err |= __put_user(&frame->sc, &frame->psc);
 900
 901	if (_NSIG_WORDS > 1)
 902		err |= copy_to_user(frame->extramask, &set->sig[1],
 903				    sizeof(frame->extramask));
 904
 905	setup_sigcontext(&context, regs, set->sig[0]);
 906	err |= copy_to_user (&frame->sc, &context, sizeof(context));
 907
 908	/* Set up to return from userspace.  */
 909#ifdef CONFIG_MMU
 910	err |= __put_user(frame->retcode, &frame->pretcode);
 911	/* moveq #,d0; trap #0 */
 912	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
 913			  (long __user *)(frame->retcode));
 914#else
 915	err |= __put_user((long) ret_from_user_signal,
 916			  (long __user *) &frame->pretcode);
 917#endif
 918
 919	if (err)
 920		return -EFAULT;
 921
 922	push_cache ((unsigned long) &frame->retcode);
 923
 924	/*
 
 
 
 
 
 
 
 
 925	 * This is subtle; if we build more than one sigframe, all but the
 926	 * first one will see frame format 0 and have fsize == 0, so we won't
 927	 * screw stkadj.
 928	 */
 929	if (fsize) {
 930		regs->stkadj = fsize;
 931		tregs = rte_regs(regs);
 932		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
 
 
 
 
 
 
 
 
 933		tregs->vector = 0;
 934		tregs->format = 0;
 
 935		tregs->sr = regs->sr;
 936	}
 937
 938	/*
 939	 * Set up registers for signal handler.  All the state we are about
 940	 * to destroy is successfully copied to sigframe.
 941	 */
 942	wrusp ((unsigned long) frame);
 943	tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 944	adjustformat(regs);
 945
 946	return 0;
 947}
 948
 949static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 950			   struct pt_regs *regs)
 951{
 952	struct rt_sigframe __user *frame;
 953	struct pt_regs *tregs = rte_regs(regs);
 954	int fsize = frame_extra_sizes(tregs->format);
 955	int err = 0, sig = ksig->sig;
 956
 957	if (fsize < 0) {
 958		pr_debug("setup_frame: Unknown frame format %#x\n",
 959			 regs->format);
 
 
 960		return -EFAULT;
 961	}
 962
 963	frame = get_sigframe(ksig, tregs, sizeof(*frame));
 964
 965	if (fsize)
 966		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
 967
 968	err |= __put_user(sig, &frame->sig);
 969	err |= __put_user(&frame->info, &frame->pinfo);
 970	err |= __put_user(&frame->uc, &frame->puc);
 971	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 972
 973	/* Create the ucontext.  */
 974	err |= __put_user(0, &frame->uc.uc_flags);
 975	err |= __put_user(NULL, &frame->uc.uc_link);
 976	err |= __save_altstack(&frame->uc.uc_stack, rdusp());
 977	err |= rt_setup_ucontext(&frame->uc, regs);
 978	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
 979
 980	/* Set up to return from userspace.  */
 981#ifdef CONFIG_MMU
 982	err |= __put_user(frame->retcode, &frame->pretcode);
 983#ifdef __mcoldfire__
 984	/* movel #__NR_rt_sigreturn,d0; trap #0 */
 985	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
 986	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
 987			  (long __user *)(frame->retcode + 4));
 988#else
 989	/* moveq #,d0; notb d0; trap #0 */
 990	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
 991			  (long __user *)(frame->retcode + 0));
 992	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
 993#endif
 994#else
 995	err |= __put_user((long) ret_from_user_rt_signal,
 996			  (long __user *) &frame->pretcode);
 997#endif /* CONFIG_MMU */
 998
 999	if (err)
1000		return -EFAULT;
1001
1002	push_cache ((unsigned long) &frame->retcode);
1003
1004	/*
 
 
 
 
 
 
 
 
1005	 * This is subtle; if we build more than one sigframe, all but the
1006	 * first one will see frame format 0 and have fsize == 0, so we won't
1007	 * screw stkadj.
1008	 */
1009	if (fsize) {
1010		regs->stkadj = fsize;
1011		tregs = rte_regs(regs);
1012		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
 
 
 
 
 
 
 
 
1013		tregs->vector = 0;
1014		tregs->format = 0;
 
1015		tregs->sr = regs->sr;
1016	}
1017
1018	/*
1019	 * Set up registers for signal handler.  All the state we are about
1020	 * to destroy is successfully copied to sigframe.
1021	 */
1022	wrusp ((unsigned long) frame);
1023	tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
1024	adjustformat(regs);
1025	return 0;
1026}
1027
1028static inline void
1029handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1030{
1031	switch (regs->d0) {
1032	case -ERESTARTNOHAND:
1033		if (!has_handler)
1034			goto do_restart;
1035		regs->d0 = -EINTR;
1036		break;
1037
1038	case -ERESTART_RESTARTBLOCK:
1039		if (!has_handler) {
1040			regs->d0 = __NR_restart_syscall;
1041			regs->pc -= 2;
1042			break;
1043		}
1044		regs->d0 = -EINTR;
1045		break;
1046
1047	case -ERESTARTSYS:
1048		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1049			regs->d0 = -EINTR;
1050			break;
1051		}
1052		fallthrough;
1053	case -ERESTARTNOINTR:
1054	do_restart:
1055		regs->d0 = regs->orig_d0;
1056		regs->pc -= 2;
1057		break;
1058	}
1059}
1060
1061/*
1062 * OK, we're invoking a handler
1063 */
1064static void
1065handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1066{
1067	sigset_t *oldset = sigmask_to_save();
1068	int err;
1069	/* are we from a system call? */
1070	if (regs->orig_d0 >= 0)
1071		/* If so, check system call restarting.. */
1072		handle_restart(regs, &ksig->ka, 1);
1073
1074	/* set up the stack frame */
1075	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1076		err = setup_rt_frame(ksig, oldset, regs);
1077	else
1078		err = setup_frame(ksig, oldset, regs);
1079
1080	signal_setup_done(err, ksig, 0);
1081
1082	if (test_thread_flag(TIF_DELAYED_TRACE)) {
1083		regs->sr &= ~0x8000;
1084		send_sig(SIGTRAP, current, 1);
1085	}
1086}
1087
1088/*
1089 * Note that 'init' is a special process: it doesn't get signals it doesn't
1090 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1091 * mistake.
1092 */
1093static void do_signal(struct pt_regs *regs)
1094{
1095	struct ksignal ksig;
1096
1097	current->thread.esp0 = (unsigned long) regs;
1098
1099	if (get_signal(&ksig)) {
1100		/* Whee!  Actually deliver the signal.  */
1101		handle_signal(&ksig, regs);
1102		return;
1103	}
1104
1105	/* Did we come from a system call? */
1106	if (regs->orig_d0 >= 0)
1107		/* Restart the system call - no handlers present */
1108		handle_restart(regs, NULL, 0);
1109
1110	/* If there's no signal to deliver, we just restore the saved mask.  */
1111	restore_saved_sigmask();
1112}
1113
1114asmlinkage void do_notify_resume(struct pt_regs *regs)
1115{
1116	if (test_thread_flag(TIF_NOTIFY_SIGNAL) ||
1117	    test_thread_flag(TIF_SIGPENDING))
1118		do_signal(regs);
1119
1120	if (test_thread_flag(TIF_NOTIFY_RESUME))
1121		resume_user_mode_work(regs);
1122}
v4.6
   1/*
   2 *  linux/arch/m68k/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11/*
  12 * Linux/m68k support by Hamish Macdonald
  13 *
  14 * 68060 fixes by Jesper Skov
  15 *
  16 * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
  17 *
  18 * mathemu support by Roman Zippel
  19 *  (Note: fpstate in the signal context is completely ignored for the emulator
  20 *         and the internal floating point format is put on stack)
  21 */
  22
  23/*
  24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
  25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
  26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
  27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
  28 * signal handlers!
  29 */
  30
  31#include <linux/sched.h>
  32#include <linux/mm.h>
  33#include <linux/kernel.h>
  34#include <linux/signal.h>
  35#include <linux/syscalls.h>
  36#include <linux/errno.h>
  37#include <linux/wait.h>
  38#include <linux/ptrace.h>
  39#include <linux/unistd.h>
  40#include <linux/stddef.h>
  41#include <linux/highuid.h>
  42#include <linux/personality.h>
  43#include <linux/tty.h>
  44#include <linux/binfmts.h>
  45#include <linux/module.h>
  46#include <linux/tracehook.h>
  47
  48#include <asm/setup.h>
  49#include <asm/uaccess.h>
  50#include <asm/pgtable.h>
  51#include <asm/traps.h>
  52#include <asm/ucontext.h>
  53#include <asm/cacheflush.h>
  54
 
 
  55#ifdef CONFIG_MMU
  56
  57/*
  58 * Handle the slight differences in classic 68k and ColdFire trap frames.
  59 */
  60#ifdef CONFIG_COLDFIRE
  61#define	FORMAT		4
  62#define	FMT4SIZE	0
  63#else
  64#define	FORMAT		0
  65#define	FMT4SIZE	sizeof(((struct frame *)0)->un.fmt4)
  66#endif
  67
  68static const int frame_size_change[16] = {
  69  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
  70  [2]	= sizeof(((struct frame *)0)->un.fmt2),
  71  [3]	= sizeof(((struct frame *)0)->un.fmt3),
  72  [4]	= FMT4SIZE,
  73  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
  74  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
  75  [7]	= sizeof(((struct frame *)0)->un.fmt7),
  76  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
  77  [9]	= sizeof(((struct frame *)0)->un.fmt9),
  78  [10]	= sizeof(((struct frame *)0)->un.fmta),
  79  [11]	= sizeof(((struct frame *)0)->un.fmtb),
  80  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
  81  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
  82  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
  83  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
  84};
  85
  86static inline int frame_extra_sizes(int f)
  87{
  88	return frame_size_change[f];
  89}
  90
  91int handle_kernel_fault(struct pt_regs *regs)
  92{
  93	const struct exception_table_entry *fixup;
  94	struct pt_regs *tregs;
  95
  96	/* Are we prepared to handle this kernel fault? */
  97	fixup = search_exception_tables(regs->pc);
  98	if (!fixup)
  99		return 0;
 100
 101	/* Create a new four word stack frame, discarding the old one. */
 102	regs->stkadj = frame_extra_sizes(regs->format);
 103	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
 104	tregs->vector = regs->vector;
 105	tregs->format = FORMAT;
 106	tregs->pc = fixup->fixup;
 107	tregs->sr = regs->sr;
 108
 109	return 1;
 110}
 111
 112void ptrace_signal_deliver(void)
 113{
 114	struct pt_regs *regs = signal_pt_regs();
 115	if (regs->orig_d0 < 0)
 116		return;
 117	switch (regs->d0) {
 118	case -ERESTARTNOHAND:
 119	case -ERESTARTSYS:
 120	case -ERESTARTNOINTR:
 121		regs->d0 = regs->orig_d0;
 122		regs->orig_d0 = -1;
 123		regs->pc -= 2;
 124		break;
 125	}
 126}
 127
 128static inline void push_cache (unsigned long vaddr)
 129{
 130	/*
 131	 * Using the old cache_push_v() was really a big waste.
 132	 *
 133	 * What we are trying to do is to flush 8 bytes to ram.
 134	 * Flushing 2 cache lines of 16 bytes is much cheaper than
 135	 * flushing 1 or 2 pages, as previously done in
 136	 * cache_push_v().
 137	 *                                                     Jes
 138	 */
 139	if (CPU_IS_040) {
 140		unsigned long temp;
 141
 142		__asm__ __volatile__ (".chip 68040\n\t"
 143				      "nop\n\t"
 144				      "ptestr (%1)\n\t"
 145				      "movec %%mmusr,%0\n\t"
 146				      ".chip 68k"
 147				      : "=r" (temp)
 148				      : "a" (vaddr));
 149
 150		temp &= PAGE_MASK;
 151		temp |= vaddr & ~PAGE_MASK;
 152
 153		__asm__ __volatile__ (".chip 68040\n\t"
 154				      "nop\n\t"
 155				      "cpushl %%bc,(%0)\n\t"
 156				      ".chip 68k"
 157				      : : "a" (temp));
 158	}
 159	else if (CPU_IS_060) {
 160		unsigned long temp;
 161		__asm__ __volatile__ (".chip 68060\n\t"
 162				      "plpar (%0)\n\t"
 163				      ".chip 68k"
 164				      : "=a" (temp)
 165				      : "0" (vaddr));
 166		__asm__ __volatile__ (".chip 68060\n\t"
 167				      "cpushl %%bc,(%0)\n\t"
 168				      ".chip 68k"
 169				      : : "a" (temp));
 170	} else if (!CPU_IS_COLDFIRE) {
 171		/*
 172		 * 68030/68020 have no writeback cache;
 173		 * still need to clear icache.
 174		 * Note that vaddr is guaranteed to be long word aligned.
 175		 */
 176		unsigned long temp;
 177		asm volatile ("movec %%cacr,%0" : "=r" (temp));
 178		temp += 4;
 179		asm volatile ("movec %0,%%caar\n\t"
 180			      "movec %1,%%cacr"
 181			      : : "r" (vaddr), "r" (temp));
 182		asm volatile ("movec %0,%%caar\n\t"
 183			      "movec %1,%%cacr"
 184			      : : "r" (vaddr + 4), "r" (temp));
 185	} else {
 186		/* CPU_IS_COLDFIRE */
 187#if defined(CONFIG_CACHE_COPYBACK)
 188		flush_cf_dcache(0, DCACHE_MAX_ADDR);
 189#endif
 190		/* Invalidate instruction cache for the pushed bytes */
 191		clear_cf_icache(vaddr, vaddr + 8);
 192	}
 193}
 194
 195static inline void adjustformat(struct pt_regs *regs)
 196{
 197}
 198
 199static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 200{
 201}
 202
 203#else /* CONFIG_MMU */
 204
 205void ret_from_user_signal(void);
 206void ret_from_user_rt_signal(void);
 207
 208static inline int frame_extra_sizes(int f)
 209{
 210	/* No frame size adjustments required on non-MMU CPUs */
 211	return 0;
 212}
 213
 214static inline void adjustformat(struct pt_regs *regs)
 215{
 216	((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
 217	/*
 218	 * set format byte to make stack appear modulo 4, which it will
 219	 * be when doing the rte
 220	 */
 221	regs->format = 0x4;
 222}
 223
 224static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 225{
 226	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
 227}
 228
 229static inline void push_cache(unsigned long vaddr)
 230{
 231}
 232
 233#endif /* CONFIG_MMU */
 234
 235/*
 236 * Do a signal return; undo the signal stack.
 237 *
 238 * Keep the return code on the stack quadword aligned!
 239 * That makes the cache flush below easier.
 240 */
 241
 242struct sigframe
 243{
 244	char __user *pretcode;
 245	int sig;
 246	int code;
 247	struct sigcontext __user *psc;
 248	char retcode[8];
 249	unsigned long extramask[_NSIG_WORDS-1];
 250	struct sigcontext sc;
 251};
 252
 253struct rt_sigframe
 254{
 255	char __user *pretcode;
 256	int sig;
 257	struct siginfo __user *pinfo;
 258	void __user *puc;
 259	char retcode[8];
 260	struct siginfo info;
 261	struct ucontext uc;
 262};
 263
 264#define FPCONTEXT_SIZE	216
 265#define uc_fpstate	uc_filler[0]
 266#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
 267#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
 268
 269#ifdef CONFIG_FPU
 270
 271static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
 272
 273static inline int restore_fpu_state(struct sigcontext *sc)
 274{
 275	int err = 1;
 276
 277	if (FPU_IS_EMU) {
 278	    /* restore registers */
 279	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
 280	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
 281	    return 0;
 282	}
 283
 284	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 285	    /* Verify the frame format.  */
 286	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 287		 (sc->sc_fpstate[0] != fpu_version))
 288		goto out;
 289	    if (CPU_IS_020_OR_030) {
 290		if (m68k_fputype & FPU_68881 &&
 291		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
 292		    goto out;
 293		if (m68k_fputype & FPU_68882 &&
 294		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
 295		    goto out;
 296	    } else if (CPU_IS_040) {
 297		if (!(sc->sc_fpstate[1] == 0x00 ||
 298                      sc->sc_fpstate[1] == 0x28 ||
 299                      sc->sc_fpstate[1] == 0x60))
 300		    goto out;
 301	    } else if (CPU_IS_060) {
 302		if (!(sc->sc_fpstate[3] == 0x00 ||
 303                      sc->sc_fpstate[3] == 0x60 ||
 304		      sc->sc_fpstate[3] == 0xe0))
 305		    goto out;
 306	    } else if (CPU_IS_COLDFIRE) {
 307		if (!(sc->sc_fpstate[0] == 0x00 ||
 308		      sc->sc_fpstate[0] == 0x05 ||
 309		      sc->sc_fpstate[0] == 0xe5))
 310		    goto out;
 311	    } else
 312		goto out;
 313
 314	    if (CPU_IS_COLDFIRE) {
 315		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
 316				  "fmovel %1,%%fpcr\n\t"
 317				  "fmovel %2,%%fpsr\n\t"
 318				  "fmovel %3,%%fpiar"
 319				  : /* no outputs */
 320				  : "m" (sc->sc_fpregs[0]),
 321				    "m" (sc->sc_fpcntl[0]),
 322				    "m" (sc->sc_fpcntl[1]),
 323				    "m" (sc->sc_fpcntl[2]));
 324	    } else {
 325		__asm__ volatile (".chip 68k/68881\n\t"
 326				  "fmovemx %0,%%fp0-%%fp1\n\t"
 327				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 328				  ".chip 68k"
 329				  : /* no outputs */
 330				  : "m" (*sc->sc_fpregs),
 331				    "m" (*sc->sc_fpcntl));
 332	    }
 333	}
 334
 335	if (CPU_IS_COLDFIRE) {
 336		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
 337	} else {
 338		__asm__ volatile (".chip 68k/68881\n\t"
 339				  "frestore %0\n\t"
 340				  ".chip 68k"
 341				  : : "m" (*sc->sc_fpstate));
 342	}
 343	err = 0;
 344
 345out:
 346	return err;
 347}
 348
 349static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 350{
 351	unsigned char fpstate[FPCONTEXT_SIZE];
 352	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 353	fpregset_t fpregs;
 354	int err = 1;
 355
 356	if (FPU_IS_EMU) {
 357		/* restore fpu control register */
 358		if (__copy_from_user(current->thread.fpcntl,
 359				uc->uc_mcontext.fpregs.f_fpcntl, 12))
 360			goto out;
 361		/* restore all other fpu register */
 362		if (__copy_from_user(current->thread.fp,
 363				uc->uc_mcontext.fpregs.f_fpregs, 96))
 364			goto out;
 365		return 0;
 366	}
 367
 368	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 369		goto out;
 370	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 371		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 372			context_size = fpstate[1];
 373		/* Verify the frame format.  */
 374		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 375		     (fpstate[0] != fpu_version))
 376			goto out;
 377		if (CPU_IS_020_OR_030) {
 378			if (m68k_fputype & FPU_68881 &&
 379			    !(context_size == 0x18 || context_size == 0xb4))
 380				goto out;
 381			if (m68k_fputype & FPU_68882 &&
 382			    !(context_size == 0x38 || context_size == 0xd4))
 383				goto out;
 384		} else if (CPU_IS_040) {
 385			if (!(context_size == 0x00 ||
 386			      context_size == 0x28 ||
 387			      context_size == 0x60))
 388				goto out;
 389		} else if (CPU_IS_060) {
 390			if (!(fpstate[3] == 0x00 ||
 391			      fpstate[3] == 0x60 ||
 392			      fpstate[3] == 0xe0))
 393				goto out;
 394		} else if (CPU_IS_COLDFIRE) {
 395			if (!(fpstate[3] == 0x00 ||
 396			      fpstate[3] == 0x05 ||
 397			      fpstate[3] == 0xe5))
 398				goto out;
 399		} else
 400			goto out;
 401		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
 402				     sizeof(fpregs)))
 403			goto out;
 404
 405		if (CPU_IS_COLDFIRE) {
 406			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
 407					  "fmovel %1,%%fpcr\n\t"
 408					  "fmovel %2,%%fpsr\n\t"
 409					  "fmovel %3,%%fpiar"
 410					  : /* no outputs */
 411					  : "m" (fpregs.f_fpregs[0]),
 412					    "m" (fpregs.f_fpcntl[0]),
 413					    "m" (fpregs.f_fpcntl[1]),
 414					    "m" (fpregs.f_fpcntl[2]));
 415		} else {
 416			__asm__ volatile (".chip 68k/68881\n\t"
 417					  "fmovemx %0,%%fp0-%%fp7\n\t"
 418					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 419					  ".chip 68k"
 420					  : /* no outputs */
 421					  : "m" (*fpregs.f_fpregs),
 422					    "m" (*fpregs.f_fpcntl));
 423		}
 424	}
 425	if (context_size &&
 426	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 427			     context_size))
 428		goto out;
 429
 430	if (CPU_IS_COLDFIRE) {
 431		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
 432	} else {
 433		__asm__ volatile (".chip 68k/68881\n\t"
 434				  "frestore %0\n\t"
 435				  ".chip 68k"
 436				  : : "m" (*fpstate));
 437	}
 438	err = 0;
 439
 440out:
 441	return err;
 442}
 443
 444/*
 445 * Set up a signal frame.
 446 */
 447static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 448{
 449	if (FPU_IS_EMU) {
 450		/* save registers */
 451		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
 452		memcpy(sc->sc_fpregs, current->thread.fp, 24);
 453		return;
 454	}
 455
 456	if (CPU_IS_COLDFIRE) {
 457		__asm__ volatile ("fsave %0"
 458				  : : "m" (*sc->sc_fpstate) : "memory");
 459	} else {
 460		__asm__ volatile (".chip 68k/68881\n\t"
 461				  "fsave %0\n\t"
 462				  ".chip 68k"
 463				  : : "m" (*sc->sc_fpstate) : "memory");
 464	}
 465
 466	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 467		fpu_version = sc->sc_fpstate[0];
 468		if (CPU_IS_020_OR_030 &&
 469		    regs->vector >= (VEC_FPBRUC * 4) &&
 470		    regs->vector <= (VEC_FPNAN * 4)) {
 471			/* Clear pending exception in 68882 idle frame */
 472			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
 473				sc->sc_fpstate[0x38] |= 1 << 3;
 474		}
 475
 476		if (CPU_IS_COLDFIRE) {
 477			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
 478					  "fmovel %%fpcr,%1\n\t"
 479					  "fmovel %%fpsr,%2\n\t"
 480					  "fmovel %%fpiar,%3"
 481					  : "=m" (sc->sc_fpregs[0]),
 482					    "=m" (sc->sc_fpcntl[0]),
 483					    "=m" (sc->sc_fpcntl[1]),
 484					    "=m" (sc->sc_fpcntl[2])
 485					  : /* no inputs */
 486					  : "memory");
 487		} else {
 488			__asm__ volatile (".chip 68k/68881\n\t"
 489					  "fmovemx %%fp0-%%fp1,%0\n\t"
 490					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 491					  ".chip 68k"
 492					  : "=m" (*sc->sc_fpregs),
 493					    "=m" (*sc->sc_fpcntl)
 494					  : /* no inputs */
 495					  : "memory");
 496		}
 497	}
 498}
 499
 500static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 501{
 502	unsigned char fpstate[FPCONTEXT_SIZE];
 503	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 504	int err = 0;
 505
 506	if (FPU_IS_EMU) {
 507		/* save fpu control register */
 508		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
 509				current->thread.fpcntl, 12);
 510		/* save all other fpu register */
 511		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
 512				current->thread.fp, 96);
 513		return err;
 514	}
 515
 516	if (CPU_IS_COLDFIRE) {
 517		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
 518	} else {
 519		__asm__ volatile (".chip 68k/68881\n\t"
 520				  "fsave %0\n\t"
 521				  ".chip 68k"
 522				  : : "m" (*fpstate) : "memory");
 523	}
 524
 525	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 526	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 527		fpregset_t fpregs;
 528		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 529			context_size = fpstate[1];
 530		fpu_version = fpstate[0];
 531		if (CPU_IS_020_OR_030 &&
 532		    regs->vector >= (VEC_FPBRUC * 4) &&
 533		    regs->vector <= (VEC_FPNAN * 4)) {
 534			/* Clear pending exception in 68882 idle frame */
 535			if (*(unsigned short *) fpstate == 0x1f38)
 536				fpstate[0x38] |= 1 << 3;
 537		}
 538		if (CPU_IS_COLDFIRE) {
 539			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
 540					  "fmovel %%fpcr,%1\n\t"
 541					  "fmovel %%fpsr,%2\n\t"
 542					  "fmovel %%fpiar,%3"
 543					  : "=m" (fpregs.f_fpregs[0]),
 544					    "=m" (fpregs.f_fpcntl[0]),
 545					    "=m" (fpregs.f_fpcntl[1]),
 546					    "=m" (fpregs.f_fpcntl[2])
 547					  : /* no inputs */
 548					  : "memory");
 549		} else {
 550			__asm__ volatile (".chip 68k/68881\n\t"
 551					  "fmovemx %%fp0-%%fp7,%0\n\t"
 552					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 553					  ".chip 68k"
 554					  : "=m" (*fpregs.f_fpregs),
 555					    "=m" (*fpregs.f_fpcntl)
 556					  : /* no inputs */
 557					  : "memory");
 558		}
 559		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 560				    sizeof(fpregs));
 561	}
 562	if (context_size)
 563		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
 564				    context_size);
 565	return err;
 566}
 567
 568#else /* CONFIG_FPU */
 569
 570/*
 571 * For the case with no FPU configured these all do nothing.
 572 */
 573static inline int restore_fpu_state(struct sigcontext *sc)
 574{
 575	return 0;
 576}
 577
 578static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 579{
 580	return 0;
 581}
 582
 583static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 584{
 585}
 586
 587static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 588{
 589	return 0;
 590}
 591
 592#endif /* CONFIG_FPU */
 593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 594static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
 595			       void __user *fp)
 596{
 597	int fsize = frame_extra_sizes(formatvec >> 12);
 598	if (fsize < 0) {
 
 
 599		/*
 600		 * user process trying to return with weird frame format
 601		 */
 602#ifdef DEBUG
 603		printk("user process returning with weird frame format\n");
 604#endif
 605		return 1;
 606	}
 607	if (!fsize) {
 608		regs->format = formatvec >> 12;
 609		regs->vector = formatvec & 0xfff;
 610	} else {
 611		struct switch_stack *sw = (struct switch_stack *)regs - 1;
 612		unsigned long buf[fsize / 2]; /* yes, twice as much */
 613
 614		/* that'll make sure that expansion won't crap over data */
 615		if (copy_from_user(buf + fsize / 4, fp, fsize))
 616			return 1;
 617
 618		/* point of no return */
 619		regs->format = formatvec >> 12;
 620		regs->vector = formatvec & 0xfff;
 621#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
 622		__asm__ __volatile__ (
 623#ifdef CONFIG_COLDFIRE
 624			 "   movel %0,%/sp\n\t"
 625			 "   bra ret_from_signal\n"
 626#else
 627			 "   movel %0,%/a0\n\t"
 628			 "   subl %1,%/a0\n\t"     /* make room on stack */
 629			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
 630			 /* move switch_stack and pt_regs */
 631			 "1: movel %0@+,%/a0@+\n\t"
 632			 "   dbra %2,1b\n\t"
 633			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
 634			 "   lsrl  #2,%1\n\t"
 635			 "   subql #1,%1\n\t"
 636			 /* copy to the gap we'd made */
 637			 "2: movel %4@+,%/a0@+\n\t"
 638			 "   dbra %1,2b\n\t"
 639			 "   bral ret_from_signal\n"
 640#endif
 641			 : /* no outputs, it doesn't ever return */
 642			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
 643			   "n" (frame_offset), "a" (buf + fsize/4)
 644			 : "a0");
 645#undef frame_offset
 646	}
 647	return 0;
 648}
 649
 650static inline int
 651restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
 652{
 653	int formatvec;
 654	struct sigcontext context;
 655	int err = 0;
 
 656
 657	/* Always make any pending restarted system calls return -EINTR */
 658	current->restart_block.fn = do_no_restart_syscall;
 659
 660	/* get previous context */
 661	if (copy_from_user(&context, usc, sizeof(context)))
 662		goto badframe;
 663
 664	/* restore passed registers */
 665	regs->d0 = context.sc_d0;
 666	regs->d1 = context.sc_d1;
 667	regs->a0 = context.sc_a0;
 668	regs->a1 = context.sc_a1;
 669	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
 670	regs->pc = context.sc_pc;
 671	regs->orig_d0 = -1;		/* disable syscall checks */
 672	wrusp(context.sc_usp);
 673	formatvec = context.sc_formatvec;
 674
 675	err = restore_fpu_state(&context);
 676
 677	if (err || mangle_kernel_stack(regs, formatvec, fp))
 678		goto badframe;
 679
 680	return 0;
 681
 682badframe:
 683	return 1;
 684}
 685
 686static inline int
 687rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 688		    struct ucontext __user *uc)
 689{
 690	int temp;
 691	greg_t __user *gregs = uc->uc_mcontext.gregs;
 692	unsigned long usp;
 693	int err;
 694
 695	/* Always make any pending restarted system calls return -EINTR */
 696	current->restart_block.fn = do_no_restart_syscall;
 697
 698	err = __get_user(temp, &uc->uc_mcontext.version);
 699	if (temp != MCONTEXT_VERSION)
 700		goto badframe;
 701	/* restore passed registers */
 702	err |= __get_user(regs->d0, &gregs[0]);
 703	err |= __get_user(regs->d1, &gregs[1]);
 704	err |= __get_user(regs->d2, &gregs[2]);
 705	err |= __get_user(regs->d3, &gregs[3]);
 706	err |= __get_user(regs->d4, &gregs[4]);
 707	err |= __get_user(regs->d5, &gregs[5]);
 708	err |= __get_user(sw->d6, &gregs[6]);
 709	err |= __get_user(sw->d7, &gregs[7]);
 710	err |= __get_user(regs->a0, &gregs[8]);
 711	err |= __get_user(regs->a1, &gregs[9]);
 712	err |= __get_user(regs->a2, &gregs[10]);
 713	err |= __get_user(sw->a3, &gregs[11]);
 714	err |= __get_user(sw->a4, &gregs[12]);
 715	err |= __get_user(sw->a5, &gregs[13]);
 716	err |= __get_user(sw->a6, &gregs[14]);
 717	err |= __get_user(usp, &gregs[15]);
 718	wrusp(usp);
 719	err |= __get_user(regs->pc, &gregs[16]);
 720	err |= __get_user(temp, &gregs[17]);
 721	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
 722	regs->orig_d0 = -1;		/* disable syscall checks */
 723	err |= __get_user(temp, &uc->uc_formatvec);
 724
 725	err |= rt_restore_fpu_state(uc);
 726	err |= restore_altstack(&uc->uc_stack);
 727
 728	if (err)
 729		goto badframe;
 730
 731	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
 732		goto badframe;
 733
 734	return 0;
 735
 736badframe:
 737	return 1;
 738}
 739
 740asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 741{
 742	unsigned long usp = rdusp();
 743	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 744	sigset_t set;
 
 745
 746	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 747		goto badframe;
 748	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
 749	    (_NSIG_WORDS > 1 &&
 750	     __copy_from_user(&set.sig[1], &frame->extramask,
 751			      sizeof(frame->extramask))))
 752		goto badframe;
 753
 754	set_current_blocked(&set);
 755
 756	if (restore_sigcontext(regs, &frame->sc, frame + 1))
 
 757		goto badframe;
 758	return regs->d0;
 759
 760badframe:
 761	force_sig(SIGSEGV, current);
 762	return 0;
 763}
 764
 765asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 766{
 767	unsigned long usp = rdusp();
 768	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 769	sigset_t set;
 
 770
 771	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 772		goto badframe;
 773	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 774		goto badframe;
 775
 776	set_current_blocked(&set);
 777
 778	if (rt_restore_ucontext(regs, sw, &frame->uc))
 
 779		goto badframe;
 780	return regs->d0;
 781
 782badframe:
 783	force_sig(SIGSEGV, current);
 784	return 0;
 
 
 
 
 
 785}
 786
 787static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 788			     unsigned long mask)
 789{
 
 790	sc->sc_mask = mask;
 791	sc->sc_usp = rdusp();
 792	sc->sc_d0 = regs->d0;
 793	sc->sc_d1 = regs->d1;
 794	sc->sc_a0 = regs->a0;
 795	sc->sc_a1 = regs->a1;
 796	sc->sc_sr = regs->sr;
 797	sc->sc_pc = regs->pc;
 798	sc->sc_formatvec = regs->format << 12 | regs->vector;
 799	save_a5_state(sc, regs);
 800	save_fpu_state(sc, regs);
 801}
 802
 803static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 804{
 805	struct switch_stack *sw = (struct switch_stack *)regs - 1;
 
 806	greg_t __user *gregs = uc->uc_mcontext.gregs;
 807	int err = 0;
 808
 809	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
 810	err |= __put_user(regs->d0, &gregs[0]);
 811	err |= __put_user(regs->d1, &gregs[1]);
 812	err |= __put_user(regs->d2, &gregs[2]);
 813	err |= __put_user(regs->d3, &gregs[3]);
 814	err |= __put_user(regs->d4, &gregs[4]);
 815	err |= __put_user(regs->d5, &gregs[5]);
 816	err |= __put_user(sw->d6, &gregs[6]);
 817	err |= __put_user(sw->d7, &gregs[7]);
 818	err |= __put_user(regs->a0, &gregs[8]);
 819	err |= __put_user(regs->a1, &gregs[9]);
 820	err |= __put_user(regs->a2, &gregs[10]);
 821	err |= __put_user(sw->a3, &gregs[11]);
 822	err |= __put_user(sw->a4, &gregs[12]);
 823	err |= __put_user(sw->a5, &gregs[13]);
 824	err |= __put_user(sw->a6, &gregs[14]);
 825	err |= __put_user(rdusp(), &gregs[15]);
 826	err |= __put_user(regs->pc, &gregs[16]);
 827	err |= __put_user(regs->sr, &gregs[17]);
 828	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
 829	err |= rt_save_fpu_state(uc, regs);
 830	return err;
 831}
 832
 833static inline void __user *
 834get_sigframe(struct ksignal *ksig, size_t frame_size)
 835{
 836	unsigned long usp = sigsp(rdusp(), ksig);
 
 837
 838	return (void __user *)((usp - frame_size) & -8UL);
 
 
 
 
 
 839}
 840
 841static int setup_frame(struct ksignal *ksig, sigset_t *set,
 842			struct pt_regs *regs)
 843{
 844	struct sigframe __user *frame;
 845	int fsize = frame_extra_sizes(regs->format);
 
 846	struct sigcontext context;
 847	int err = 0, sig = ksig->sig;
 848
 849	if (fsize < 0) {
 850#ifdef DEBUG
 851		printk ("setup_frame: Unknown frame format %#x\n",
 852			regs->format);
 853#endif
 854		return -EFAULT;
 855	}
 856
 857	frame = get_sigframe(ksig, sizeof(*frame) + fsize);
 858
 859	if (fsize)
 860		err |= copy_to_user (frame + 1, regs + 1, fsize);
 861
 862	err |= __put_user(sig, &frame->sig);
 863
 864	err |= __put_user(regs->vector, &frame->code);
 865	err |= __put_user(&frame->sc, &frame->psc);
 866
 867	if (_NSIG_WORDS > 1)
 868		err |= copy_to_user(frame->extramask, &set->sig[1],
 869				    sizeof(frame->extramask));
 870
 871	setup_sigcontext(&context, regs, set->sig[0]);
 872	err |= copy_to_user (&frame->sc, &context, sizeof(context));
 873
 874	/* Set up to return from userspace.  */
 875#ifdef CONFIG_MMU
 876	err |= __put_user(frame->retcode, &frame->pretcode);
 877	/* moveq #,d0; trap #0 */
 878	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
 879			  (long __user *)(frame->retcode));
 880#else
 881	err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
 
 882#endif
 883
 884	if (err)
 885		return -EFAULT;
 886
 887	push_cache ((unsigned long) &frame->retcode);
 888
 889	/*
 890	 * Set up registers for signal handler.  All the state we are about
 891	 * to destroy is successfully copied to sigframe.
 892	 */
 893	wrusp ((unsigned long) frame);
 894	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 895	adjustformat(regs);
 896
 897	/*
 898	 * This is subtle; if we build more than one sigframe, all but the
 899	 * first one will see frame format 0 and have fsize == 0, so we won't
 900	 * screw stkadj.
 901	 */
 902	if (fsize)
 903		regs->stkadj = fsize;
 904
 905	/* Prepare to skip over the extra stuff in the exception frame.  */
 906	if (regs->stkadj) {
 907		struct pt_regs *tregs =
 908			(struct pt_regs *)((ulong)regs + regs->stkadj);
 909#ifdef DEBUG
 910		printk("Performing stackadjust=%04x\n", regs->stkadj);
 911#endif
 912		/* This must be copied with decreasing addresses to
 913                   handle overlaps.  */
 914		tregs->vector = 0;
 915		tregs->format = 0;
 916		tregs->pc = regs->pc;
 917		tregs->sr = regs->sr;
 918	}
 
 
 
 
 
 
 
 
 
 919	return 0;
 920}
 921
 922static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 923			   struct pt_regs *regs)
 924{
 925	struct rt_sigframe __user *frame;
 926	int fsize = frame_extra_sizes(regs->format);
 
 927	int err = 0, sig = ksig->sig;
 928
 929	if (fsize < 0) {
 930#ifdef DEBUG
 931		printk ("setup_frame: Unknown frame format %#x\n",
 932			regs->format);
 933#endif
 934		return -EFAULT;
 935	}
 936
 937	frame = get_sigframe(ksig, sizeof(*frame));
 938
 939	if (fsize)
 940		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
 941
 942	err |= __put_user(sig, &frame->sig);
 943	err |= __put_user(&frame->info, &frame->pinfo);
 944	err |= __put_user(&frame->uc, &frame->puc);
 945	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 946
 947	/* Create the ucontext.  */
 948	err |= __put_user(0, &frame->uc.uc_flags);
 949	err |= __put_user(NULL, &frame->uc.uc_link);
 950	err |= __save_altstack(&frame->uc.uc_stack, rdusp());
 951	err |= rt_setup_ucontext(&frame->uc, regs);
 952	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
 953
 954	/* Set up to return from userspace.  */
 955#ifdef CONFIG_MMU
 956	err |= __put_user(frame->retcode, &frame->pretcode);
 957#ifdef __mcoldfire__
 958	/* movel #__NR_rt_sigreturn,d0; trap #0 */
 959	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
 960	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
 961			  (long __user *)(frame->retcode + 4));
 962#else
 963	/* moveq #,d0; notb d0; trap #0 */
 964	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
 965			  (long __user *)(frame->retcode + 0));
 966	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
 967#endif
 968#else
 969	err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
 
 970#endif /* CONFIG_MMU */
 971
 972	if (err)
 973		return -EFAULT;
 974
 975	push_cache ((unsigned long) &frame->retcode);
 976
 977	/*
 978	 * Set up registers for signal handler.  All the state we are about
 979	 * to destroy is successfully copied to sigframe.
 980	 */
 981	wrusp ((unsigned long) frame);
 982	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 983	adjustformat(regs);
 984
 985	/*
 986	 * This is subtle; if we build more than one sigframe, all but the
 987	 * first one will see frame format 0 and have fsize == 0, so we won't
 988	 * screw stkadj.
 989	 */
 990	if (fsize)
 991		regs->stkadj = fsize;
 992
 993	/* Prepare to skip over the extra stuff in the exception frame.  */
 994	if (regs->stkadj) {
 995		struct pt_regs *tregs =
 996			(struct pt_regs *)((ulong)regs + regs->stkadj);
 997#ifdef DEBUG
 998		printk("Performing stackadjust=%04x\n", regs->stkadj);
 999#endif
1000		/* This must be copied with decreasing addresses to
1001                   handle overlaps.  */
1002		tregs->vector = 0;
1003		tregs->format = 0;
1004		tregs->pc = regs->pc;
1005		tregs->sr = regs->sr;
1006	}
 
 
 
 
 
 
 
 
1007	return 0;
1008}
1009
1010static inline void
1011handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1012{
1013	switch (regs->d0) {
1014	case -ERESTARTNOHAND:
1015		if (!has_handler)
1016			goto do_restart;
1017		regs->d0 = -EINTR;
1018		break;
1019
1020	case -ERESTART_RESTARTBLOCK:
1021		if (!has_handler) {
1022			regs->d0 = __NR_restart_syscall;
1023			regs->pc -= 2;
1024			break;
1025		}
1026		regs->d0 = -EINTR;
1027		break;
1028
1029	case -ERESTARTSYS:
1030		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1031			regs->d0 = -EINTR;
1032			break;
1033		}
1034	/* fallthrough */
1035	case -ERESTARTNOINTR:
1036	do_restart:
1037		regs->d0 = regs->orig_d0;
1038		regs->pc -= 2;
1039		break;
1040	}
1041}
1042
1043/*
1044 * OK, we're invoking a handler
1045 */
1046static void
1047handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1048{
1049	sigset_t *oldset = sigmask_to_save();
1050	int err;
1051	/* are we from a system call? */
1052	if (regs->orig_d0 >= 0)
1053		/* If so, check system call restarting.. */
1054		handle_restart(regs, &ksig->ka, 1);
1055
1056	/* set up the stack frame */
1057	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1058		err = setup_rt_frame(ksig, oldset, regs);
1059	else
1060		err = setup_frame(ksig, oldset, regs);
1061
1062	signal_setup_done(err, ksig, 0);
1063
1064	if (test_thread_flag(TIF_DELAYED_TRACE)) {
1065		regs->sr &= ~0x8000;
1066		send_sig(SIGTRAP, current, 1);
1067	}
1068}
1069
1070/*
1071 * Note that 'init' is a special process: it doesn't get signals it doesn't
1072 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1073 * mistake.
1074 */
1075static void do_signal(struct pt_regs *regs)
1076{
1077	struct ksignal ksig;
1078
1079	current->thread.esp0 = (unsigned long) regs;
1080
1081	if (get_signal(&ksig)) {
1082		/* Whee!  Actually deliver the signal.  */
1083		handle_signal(&ksig, regs);
1084		return;
1085	}
1086
1087	/* Did we come from a system call? */
1088	if (regs->orig_d0 >= 0)
1089		/* Restart the system call - no handlers present */
1090		handle_restart(regs, NULL, 0);
1091
1092	/* If there's no signal to deliver, we just restore the saved mask.  */
1093	restore_saved_sigmask();
1094}
1095
1096void do_notify_resume(struct pt_regs *regs)
1097{
1098	if (test_thread_flag(TIF_SIGPENDING))
 
1099		do_signal(regs);
1100
1101	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
1102		tracehook_notify_resume(regs);
1103}