Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/alpha/kernel/traps.c
  4 *
  5 * (C) Copyright 1994 Linus Torvalds
  6 */
  7
  8/*
  9 * This file initializes the trap entry points
 10 */
 11
 12#include <linux/cpu.h>
 13#include <linux/jiffies.h>
 14#include <linux/mm.h>
 15#include <linux/sched/signal.h>
 16#include <linux/sched/debug.h>
 17#include <linux/tty.h>
 18#include <linux/delay.h>
 19#include <linux/extable.h>
 
 20#include <linux/kallsyms.h>
 21#include <linux/ratelimit.h>
 22
 23#include <asm/gentrap.h>
 24#include <linux/uaccess.h>
 25#include <linux/unaligned.h>
 26#include <asm/sysinfo.h>
 27#include <asm/hwrpb.h>
 28#include <asm/mmu_context.h>
 29#include <asm/special_insns.h>
 30
 31#include "proto.h"
 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33void
 34dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
 35{
 36	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
 37	       regs->pc, regs->r26, regs->ps, print_tainted());
 38	printk("pc is at %pSR\n", (void *)regs->pc);
 39	printk("ra is at %pSR\n", (void *)regs->r26);
 40	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
 41	       regs->r0, regs->r1, regs->r2);
 42	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
 43 	       regs->r3, regs->r4, regs->r5);
 44	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
 45	       regs->r6, regs->r7, regs->r8);
 46
 47	if (r9_15) {
 48		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
 49		       r9_15[9], r9_15[10], r9_15[11]);
 50		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
 51		       r9_15[12], r9_15[13], r9_15[14]);
 52		printk("s6 = %016lx\n", r9_15[15]);
 53	}
 54
 55	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
 56	       regs->r16, regs->r17, regs->r18);
 57	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
 58 	       regs->r19, regs->r20, regs->r21);
 59 	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
 60	       regs->r22, regs->r23, regs->r24);
 61	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
 62	       regs->r25, regs->r27, regs->r28);
 63	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
 64#if 0
 65__halt();
 66#endif
 67}
 68
 69#if 0
 70static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
 71			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
 72			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
 73			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
 74#endif
 75
 76static void
 77dik_show_code(unsigned int *pc)
 78{
 79	long i;
 80
 81	printk("Code:");
 82	for (i = -6; i < 2; i++) {
 83		unsigned int insn;
 84		if (__get_user(insn, (unsigned int __user *)pc + i))
 85			break;
 86		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
 87	}
 88	printk("\n");
 89}
 90
 91static void
 92dik_show_trace(unsigned long *sp, const char *loglvl)
 93{
 94	long i = 0;
 95	printk("%sTrace:\n", loglvl);
 96	while (0x1ff8 & (unsigned long) sp) {
 97		extern char _stext[], _etext[];
 98		unsigned long tmp = *sp;
 99		sp++;
100		if (!is_kernel_text(tmp))
 
 
101			continue;
102		printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
103		if (i > 40) {
104			printk("%s ...", loglvl);
105			break;
106		}
107	}
108	printk("%s\n", loglvl);
109}
110
111static int kstack_depth_to_print = 24;
112
113void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
114{
115	unsigned long *stack;
116	int i;
117
118	/*
119	 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
120	 * back trace for this cpu.
121	 */
122	if(sp==NULL)
123		sp=(unsigned long*)&sp;
124
125	stack = sp;
126	for(i=0; i < kstack_depth_to_print; i++) {
127		if (((long) stack & (THREAD_SIZE-1)) == 0)
128			break;
129		if ((i % 4) == 0) {
130			if (i)
131				pr_cont("\n");
132			printk("%s       ", loglvl);
133		} else {
134			pr_cont(" ");
135		}
136		pr_cont("%016lx", *stack++);
137	}
138	pr_cont("\n");
139	dik_show_trace(sp, loglvl);
140}
141
142void
143die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
144{
145	if (regs->ps & 8)
146		return;
147#ifdef CONFIG_SMP
148	printk("CPU %d ", hard_smp_processor_id());
149#endif
150	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
151	dik_show_regs(regs, r9_15);
152	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
153	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
154	dik_show_code((unsigned int *)regs->pc);
155
156	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
157		printk("die_if_kernel recursion detected.\n");
158		local_irq_enable();
159		while (1);
160	}
161	make_task_dead(SIGSEGV);
162}
163
164#ifndef CONFIG_MATHEMU
165static long dummy_emul(void) { return 0; }
166long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
167  = (void *)dummy_emul;
168EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
169long (*alpha_fp_emul) (unsigned long pc)
170  = (void *)dummy_emul;
171EXPORT_SYMBOL_GPL(alpha_fp_emul);
172#else
173long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
174long alpha_fp_emul (unsigned long pc);
175#endif
176
177asmlinkage void
178do_entArith(unsigned long summary, unsigned long write_mask,
179	    struct pt_regs *regs)
180{
181	long si_code = FPE_FLTINV;
 
182
183	if (summary & 1) {
184		/* Software-completion summary bit is set, so try to
185		   emulate the instruction.  If the processor supports
186		   precise exceptions, we don't have to search.  */
187		if (!amask(AMASK_PRECISE_TRAP))
188			si_code = alpha_fp_emul(regs->pc - 4);
189		else
190			si_code = alpha_fp_emul_imprecise(regs, write_mask);
191		if (si_code == 0)
192			return;
193	}
194	die_if_kernel("Arithmetic fault", regs, 0, NULL);
195
196	send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
 
 
 
 
197}
198
199asmlinkage void
200do_entIF(unsigned long type, struct pt_regs *regs)
201{
 
202	int signo, code;
203
204	if (type == 3) { /* FEN fault */
205		/* Irritating users can call PAL_clrfen to disable the
206		   FPU for the process.  The kernel will then trap in
207		   do_switch_stack and undo_switch_stack when we try
208		   to save and restore the FP registers.
209
210		   Given that GCC by default generates code that uses the
211		   FP registers, PAL_clrfen is not useful except for DoS
212		   attacks.  So turn the bleeding FPU back on and be done
213		   with it.  */
214		current_thread_info()->pcb.flags |= 1;
215		__reload_thread(&current_thread_info()->pcb);
216		return;
217	}
218	if (!user_mode(regs)) {
219		if (type == 1) {
220			const unsigned int *data
221			  = (const unsigned int *) regs->pc;
222			printk("Kernel bug at %s:%d\n",
223			       (const char *)(data[1] | (long)data[2] << 32), 
224			       data[0]);
225		}
226#ifdef CONFIG_ALPHA_WTINT
227		if (type == 4) {
228			/* If CALL_PAL WTINT is totally unsupported by the
229			   PALcode, e.g. MILO, "emulate" it by overwriting
230			   the insn.  */
231			unsigned int *pinsn
232			  = (unsigned int *) regs->pc - 1;
233			if (*pinsn == PAL_wtint) {
234				*pinsn = 0x47e01400; /* mov 0,$0 */
235				imb();
236				regs->r0 = 0;
237				return;
238			}
239		}
240#endif /* ALPHA_WTINT */
241		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
242			      regs, type, NULL);
243	}
244
245	switch (type) {
246	      case 0: /* breakpoint */
 
 
 
 
 
 
247		if (ptrace_cancel_bpt(current)) {
248			regs->pc -= 4;	/* make pc point to former bpt */
249		}
250
251		send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
252			       current);
253		return;
254
255	      case 1: /* bugcheck */
256		send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
257				      (void __user *) regs->pc, 0, current);
 
 
 
 
258		return;
259		
260	      case 2: /* gentrap */
 
 
261		switch ((long) regs->r16) {
262		case GEN_INTOVF:
263			signo = SIGFPE;
264			code = FPE_INTOVF;
265			break;
266		case GEN_INTDIV:
267			signo = SIGFPE;
268			code = FPE_INTDIV;
269			break;
270		case GEN_FLTOVF:
271			signo = SIGFPE;
272			code = FPE_FLTOVF;
273			break;
274		case GEN_FLTDIV:
275			signo = SIGFPE;
276			code = FPE_FLTDIV;
277			break;
278		case GEN_FLTUND:
279			signo = SIGFPE;
280			code = FPE_FLTUND;
281			break;
282		case GEN_FLTINV:
283			signo = SIGFPE;
284			code = FPE_FLTINV;
285			break;
286		case GEN_FLTINE:
287			signo = SIGFPE;
288			code = FPE_FLTRES;
289			break;
290		case GEN_ROPRAND:
291			signo = SIGFPE;
292			code = FPE_FLTUNK;
293			break;
294
295		case GEN_DECOVF:
296		case GEN_DECDIV:
297		case GEN_DECINV:
298		case GEN_ASSERTERR:
299		case GEN_NULPTRERR:
300		case GEN_STKOVF:
301		case GEN_STRLENERR:
302		case GEN_SUBSTRERR:
303		case GEN_RANGERR:
304		case GEN_SUBRNG:
305		case GEN_SUBRNG1:
306		case GEN_SUBRNG2:
307		case GEN_SUBRNG3:
308		case GEN_SUBRNG4:
309		case GEN_SUBRNG5:
310		case GEN_SUBRNG6:
311		case GEN_SUBRNG7:
312		default:
313			signo = SIGTRAP;
314			code = TRAP_UNK;
315			break;
316		}
317
318		send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
319				      regs->r16, current);
 
 
 
320		return;
321
322	      case 4: /* opDEC */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323		break;
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325	      case 5: /* illoc */
326	      default: /* unexpected instruction-fault type */
327		      ;
328	}
329
330	send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
 
 
 
 
331}
332
333/* There is an ifdef in the PALcode in MILO that enables a 
334   "kernel debugging entry point" as an unprivileged call_pal.
335
336   We don't want to have anything to do with it, but unfortunately
337   several versions of MILO included in distributions have it enabled,
338   and if we don't put something on the entry point we'll oops.  */
339
340asmlinkage void
341do_entDbg(struct pt_regs *regs)
342{
 
 
343	die_if_kernel("Instruction fault", regs, 0, NULL);
344
345	force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
 
 
 
 
346}
347
348
349/*
350 * entUna has a different register layout to be reasonably simple. It
351 * needs access to all the integer registers (the kernel doesn't use
352 * fp-regs), and it needs to have them in order for simpler access.
353 *
354 * Due to the non-standard register layout (and because we don't want
355 * to handle floating-point regs), user-mode unaligned accesses are
356 * handled separately by do_entUnaUser below.
357 *
358 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
359 * on a gp-register unaligned load/store, something is _very_ wrong
360 * in the kernel anyway..
361 */
362struct allregs {
363	unsigned long regs[32];
364	unsigned long ps, pc, gp, a0, a1, a2;
365};
366
367struct unaligned_stat {
368	unsigned long count, va, pc;
369} unaligned[2];
370
371
372/* Macro for exception fixup code to access integer registers.  */
373#define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
374
375
376asmlinkage void
377do_entUna(void * va, unsigned long opcode, unsigned long reg,
378	  struct allregs *regs)
379{
380	long error, tmp1, tmp2, tmp3, tmp4;
381	unsigned long pc = regs->pc - 4;
382	unsigned long *_regs = regs->regs;
383	const struct exception_table_entry *fixup;
384
385	unaligned[0].count++;
386	unaligned[0].va = (unsigned long) va;
387	unaligned[0].pc = pc;
388
389	/* We don't want to use the generic get/put unaligned macros as
390	   we want to trap exceptions.  Only if we actually get an
391	   exception will we decide whether we should have caught it.  */
392
393	switch (opcode) {
394	case 0x0c: /* ldwu */
395		__asm__ __volatile__(
396		"1:	ldq_u %1,0(%3)\n"
397		"2:	ldq_u %2,1(%3)\n"
398		"	extwl %1,%3,%1\n"
399		"	extwh %2,%3,%2\n"
400		"3:\n"
401		EXC(1b,3b,%1,%0)
402		EXC(2b,3b,%2,%0)
 
 
 
 
403			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
404			: "r"(va), "0"(0));
405		if (error)
406			goto got_exception;
407		una_reg(reg) = tmp1|tmp2;
408		return;
409
410	case 0x28: /* ldl */
411		__asm__ __volatile__(
412		"1:	ldq_u %1,0(%3)\n"
413		"2:	ldq_u %2,3(%3)\n"
414		"	extll %1,%3,%1\n"
415		"	extlh %2,%3,%2\n"
416		"3:\n"
417		EXC(1b,3b,%1,%0)
418		EXC(2b,3b,%2,%0)
 
 
 
 
419			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
420			: "r"(va), "0"(0));
421		if (error)
422			goto got_exception;
423		una_reg(reg) = (int)(tmp1|tmp2);
424		return;
425
426	case 0x29: /* ldq */
427		__asm__ __volatile__(
428		"1:	ldq_u %1,0(%3)\n"
429		"2:	ldq_u %2,7(%3)\n"
430		"	extql %1,%3,%1\n"
431		"	extqh %2,%3,%2\n"
432		"3:\n"
433		EXC(1b,3b,%1,%0)
434		EXC(2b,3b,%2,%0)
 
 
 
 
435			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
436			: "r"(va), "0"(0));
437		if (error)
438			goto got_exception;
439		una_reg(reg) = tmp1|tmp2;
440		return;
441
442	/* Note that the store sequences do not indicate that they change
443	   memory because it _should_ be affecting nothing in this context.
444	   (Otherwise we have other, much larger, problems.)  */
445	case 0x0d: /* stw */
446		__asm__ __volatile__(
447		"1:	ldq_u %2,1(%5)\n"
448		"2:	ldq_u %1,0(%5)\n"
449		"	inswh %6,%5,%4\n"
450		"	inswl %6,%5,%3\n"
451		"	mskwh %2,%5,%2\n"
452		"	mskwl %1,%5,%1\n"
453		"	or %2,%4,%2\n"
454		"	or %1,%3,%1\n"
455		"3:	stq_u %2,1(%5)\n"
456		"4:	stq_u %1,0(%5)\n"
457		"5:\n"
458		EXC(1b,5b,%2,%0)
459		EXC(2b,5b,%1,%0)
460		EXC(3b,5b,$31,%0)
461		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
462			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
463			  "=&r"(tmp3), "=&r"(tmp4)
464			: "r"(va), "r"(una_reg(reg)), "0"(0));
465		if (error)
466			goto got_exception;
467		return;
468
469	case 0x2c: /* stl */
470		__asm__ __volatile__(
471		"1:	ldq_u %2,3(%5)\n"
472		"2:	ldq_u %1,0(%5)\n"
473		"	inslh %6,%5,%4\n"
474		"	insll %6,%5,%3\n"
475		"	msklh %2,%5,%2\n"
476		"	mskll %1,%5,%1\n"
477		"	or %2,%4,%2\n"
478		"	or %1,%3,%1\n"
479		"3:	stq_u %2,3(%5)\n"
480		"4:	stq_u %1,0(%5)\n"
481		"5:\n"
482		EXC(1b,5b,%2,%0)
483		EXC(2b,5b,%1,%0)
484		EXC(3b,5b,$31,%0)
485		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
486			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
487			  "=&r"(tmp3), "=&r"(tmp4)
488			: "r"(va), "r"(una_reg(reg)), "0"(0));
489		if (error)
490			goto got_exception;
491		return;
492
493	case 0x2d: /* stq */
494		__asm__ __volatile__(
495		"1:	ldq_u %2,7(%5)\n"
496		"2:	ldq_u %1,0(%5)\n"
497		"	insqh %6,%5,%4\n"
498		"	insql %6,%5,%3\n"
499		"	mskqh %2,%5,%2\n"
500		"	mskql %1,%5,%1\n"
501		"	or %2,%4,%2\n"
502		"	or %1,%3,%1\n"
503		"3:	stq_u %2,7(%5)\n"
504		"4:	stq_u %1,0(%5)\n"
505		"5:\n"
506		EXC(1b,5b,%2,%0)
507		EXC(2b,5b,%1,%0)
508		EXC(3b,5b,$31,%0)
509		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
510			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
511			  "=&r"(tmp3), "=&r"(tmp4)
512			: "r"(va), "r"(una_reg(reg)), "0"(0));
513		if (error)
514			goto got_exception;
515		return;
516	}
517
518	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
519		pc, va, opcode, reg);
520	make_task_dead(SIGSEGV);
521
522got_exception:
523	/* Ok, we caught the exception, but we don't want it.  Is there
524	   someone to pass it along to?  */
525	if ((fixup = search_exception_tables(pc)) != 0) {
526		unsigned long newpc;
527		newpc = fixup_exception(una_reg, fixup, pc);
528
529		printk("Forwarding unaligned exception at %lx (%lx)\n",
530		       pc, newpc);
531
532		regs->pc = newpc;
533		return;
534	}
535
536	/*
537	 * Yikes!  No one to forward the exception to.
538	 * Since the registers are in a weird format, dump them ourselves.
539 	 */
540
541	printk("%s(%d): unhandled unaligned exception\n",
542	       current->comm, task_pid_nr(current));
543
544	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
545	       pc, una_reg(26), regs->ps);
546	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
547	       una_reg(0), una_reg(1), una_reg(2));
548	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
549 	       una_reg(3), una_reg(4), una_reg(5));
550	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
551	       una_reg(6), una_reg(7), una_reg(8));
552	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
553	       una_reg(9), una_reg(10), una_reg(11));
554	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
555	       una_reg(12), una_reg(13), una_reg(14));
556	printk("r15= %016lx\n", una_reg(15));
557	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
558	       una_reg(16), una_reg(17), una_reg(18));
559	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
560 	       una_reg(19), una_reg(20), una_reg(21));
561 	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
562	       una_reg(22), una_reg(23), una_reg(24));
563	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
564	       una_reg(25), una_reg(27), una_reg(28));
565	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
566
567	dik_show_code((unsigned int *)pc);
568	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
569
570	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
571		printk("die_if_kernel recursion detected.\n");
572		local_irq_enable();
573		while (1);
574	}
575	make_task_dead(SIGSEGV);
576}
577
578/*
579 * Convert an s-floating point value in memory format to the
580 * corresponding value in register format.  The exponent
581 * needs to be remapped to preserve non-finite values
582 * (infinities, not-a-numbers, denormals).
583 */
584static inline unsigned long
585s_mem_to_reg (unsigned long s_mem)
586{
587	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
588	unsigned long sign    = (s_mem >> 31) & 0x1;
589	unsigned long exp_msb = (s_mem >> 30) & 0x1;
590	unsigned long exp_low = (s_mem >> 23) & 0x7f;
591	unsigned long exp;
592
593	exp = (exp_msb << 10) | exp_low;	/* common case */
594	if (exp_msb) {
595		if (exp_low == 0x7f) {
596			exp = 0x7ff;
597		}
598	} else {
599		if (exp_low == 0x00) {
600			exp = 0x000;
601		} else {
602			exp |= (0x7 << 7);
603		}
604	}
605	return (sign << 63) | (exp << 52) | (frac << 29);
606}
607
608/*
609 * Convert an s-floating point value in register format to the
610 * corresponding value in memory format.
611 */
612static inline unsigned long
613s_reg_to_mem (unsigned long s_reg)
614{
615	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
616}
617
618/*
619 * Handle user-level unaligned fault.  Handling user-level unaligned
620 * faults is *extremely* slow and produces nasty messages.  A user
621 * program *should* fix unaligned faults ASAP.
622 *
623 * Notice that we have (almost) the regular kernel stack layout here,
624 * so finding the appropriate registers is a little more difficult
625 * than in the kernel case.
626 *
627 * Finally, we handle regular integer load/stores only.  In
628 * particular, load-linked/store-conditionally and floating point
629 * load/stores are not supported.  The former make no sense with
630 * unaligned faults (they are guaranteed to fail) and I don't think
631 * the latter will occur in any decent program.
632 *
633 * Sigh. We *do* have to handle some FP operations, because GCC will
634 * uses them as temporary storage for integer memory to memory copies.
635 * However, we need to deal with stt/ldt and sts/lds only.
636 */
637
638#define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
639			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
640			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
641			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
642
643#define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
644			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
645			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
646
647#define R(x)	((size_t) &((struct pt_regs *)0)->x)
648
649static int unauser_reg_offsets[32] = {
650	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
651	/* r9 ... r15 are stored in front of regs.  */
652	-64, -56, -48, -40, -32, -24, -16,	/* padding at -8 */
653	R(r16), R(r17), R(r18),
654	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
655	R(r27), R(r28), R(gp),
656	0, 0
657};
658
659#undef R
660
661asmlinkage void
662do_entUnaUser(void __user * va, unsigned long opcode,
663	      unsigned long reg, struct pt_regs *regs)
664{
665	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
666
667	unsigned long tmp1, tmp2, tmp3, tmp4;
668	unsigned long fake_reg, *reg_addr = &fake_reg;
669	int si_code;
670	long error;
671
672	/* Check the UAC bits to decide what the user wants us to do
673	   with the unaligned access.  */
674
675	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
676		if (__ratelimit(&ratelimit)) {
677			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
678			       current->comm, task_pid_nr(current),
679			       regs->pc - 4, va, opcode, reg);
680		}
681	}
682	if ((current_thread_info()->status & TS_UAC_SIGBUS))
683		goto give_sigbus;
684	/* Not sure why you'd want to use this, but... */
685	if ((current_thread_info()->status & TS_UAC_NOFIX))
686		return;
687
688	/* Don't bother reading ds in the access check since we already
689	   know that this came from the user.  Also rely on the fact that
690	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
691	if ((unsigned long)va >= TASK_SIZE)
692		goto give_sigsegv;
693
694	++unaligned[1].count;
695	unaligned[1].va = (unsigned long)va;
696	unaligned[1].pc = regs->pc - 4;
697
698	if ((1L << opcode) & OP_INT_MASK) {
699		/* it's an integer load/store */
700		if (reg < 30) {
701			reg_addr = (unsigned long *)
702			  ((char *)regs + unauser_reg_offsets[reg]);
703		} else if (reg == 30) {
704			/* usp in PAL regs */
705			fake_reg = rdusp();
706		} else {
707			/* zero "register" */
708			fake_reg = 0;
709		}
710	}
711
712	/* We don't want to use the generic get/put unaligned macros as
713	   we want to trap exceptions.  Only if we actually get an
714	   exception will we decide whether we should have caught it.  */
715
716	switch (opcode) {
717	case 0x0c: /* ldwu */
718		__asm__ __volatile__(
719		"1:	ldq_u %1,0(%3)\n"
720		"2:	ldq_u %2,1(%3)\n"
721		"	extwl %1,%3,%1\n"
722		"	extwh %2,%3,%2\n"
723		"3:\n"
724		EXC(1b,3b,%1,%0)
725		EXC(2b,3b,%2,%0)
 
 
 
 
726			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
727			: "r"(va), "0"(0));
728		if (error)
729			goto give_sigsegv;
730		*reg_addr = tmp1|tmp2;
731		break;
732
733	case 0x22: /* lds */
734		__asm__ __volatile__(
735		"1:	ldq_u %1,0(%3)\n"
736		"2:	ldq_u %2,3(%3)\n"
737		"	extll %1,%3,%1\n"
738		"	extlh %2,%3,%2\n"
739		"3:\n"
740		EXC(1b,3b,%1,%0)
741		EXC(2b,3b,%2,%0)
 
 
 
 
742			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
743			: "r"(va), "0"(0));
744		if (error)
745			goto give_sigsegv;
746		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
747		return;
748
749	case 0x23: /* ldt */
750		__asm__ __volatile__(
751		"1:	ldq_u %1,0(%3)\n"
752		"2:	ldq_u %2,7(%3)\n"
753		"	extql %1,%3,%1\n"
754		"	extqh %2,%3,%2\n"
755		"3:\n"
756		EXC(1b,3b,%1,%0)
757		EXC(2b,3b,%2,%0)
 
 
 
 
758			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
759			: "r"(va), "0"(0));
760		if (error)
761			goto give_sigsegv;
762		alpha_write_fp_reg(reg, tmp1|tmp2);
763		return;
764
765	case 0x28: /* ldl */
766		__asm__ __volatile__(
767		"1:	ldq_u %1,0(%3)\n"
768		"2:	ldq_u %2,3(%3)\n"
769		"	extll %1,%3,%1\n"
770		"	extlh %2,%3,%2\n"
771		"3:\n"
772		EXC(1b,3b,%1,%0)
773		EXC(2b,3b,%2,%0)
 
 
 
 
774			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
775			: "r"(va), "0"(0));
776		if (error)
777			goto give_sigsegv;
778		*reg_addr = (int)(tmp1|tmp2);
779		break;
780
781	case 0x29: /* ldq */
782		__asm__ __volatile__(
783		"1:	ldq_u %1,0(%3)\n"
784		"2:	ldq_u %2,7(%3)\n"
785		"	extql %1,%3,%1\n"
786		"	extqh %2,%3,%2\n"
787		"3:\n"
788		EXC(1b,3b,%1,%0)
789		EXC(2b,3b,%2,%0)
 
 
 
 
790			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
791			: "r"(va), "0"(0));
792		if (error)
793			goto give_sigsegv;
794		*reg_addr = tmp1|tmp2;
795		break;
796
797	/* Note that the store sequences do not indicate that they change
798	   memory because it _should_ be affecting nothing in this context.
799	   (Otherwise we have other, much larger, problems.)  */
800	case 0x0d: /* stw */
801		__asm__ __volatile__(
802		"1:	ldq_u %2,1(%5)\n"
803		"2:	ldq_u %1,0(%5)\n"
804		"	inswh %6,%5,%4\n"
805		"	inswl %6,%5,%3\n"
806		"	mskwh %2,%5,%2\n"
807		"	mskwl %1,%5,%1\n"
808		"	or %2,%4,%2\n"
809		"	or %1,%3,%1\n"
810		"3:	stq_u %2,1(%5)\n"
811		"4:	stq_u %1,0(%5)\n"
812		"5:\n"
813		EXC(1b,5b,%2,%0)
814		EXC(2b,5b,%1,%0)
815		EXC(3b,5b,$31,%0)
816		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
817			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
818			  "=&r"(tmp3), "=&r"(tmp4)
819			: "r"(va), "r"(*reg_addr), "0"(0));
820		if (error)
821			goto give_sigsegv;
822		return;
823
824	case 0x26: /* sts */
825		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
826		fallthrough;
827
828	case 0x2c: /* stl */
829		__asm__ __volatile__(
830		"1:	ldq_u %2,3(%5)\n"
831		"2:	ldq_u %1,0(%5)\n"
832		"	inslh %6,%5,%4\n"
833		"	insll %6,%5,%3\n"
834		"	msklh %2,%5,%2\n"
835		"	mskll %1,%5,%1\n"
836		"	or %2,%4,%2\n"
837		"	or %1,%3,%1\n"
838		"3:	stq_u %2,3(%5)\n"
839		"4:	stq_u %1,0(%5)\n"
840		"5:\n"
841		EXC(1b,5b,%2,%0)
842		EXC(2b,5b,%1,%0)
843		EXC(3b,5b,$31,%0)
844		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
845			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
846			  "=&r"(tmp3), "=&r"(tmp4)
847			: "r"(va), "r"(*reg_addr), "0"(0));
848		if (error)
849			goto give_sigsegv;
850		return;
851
852	case 0x27: /* stt */
853		fake_reg = alpha_read_fp_reg(reg);
854		fallthrough;
855
856	case 0x2d: /* stq */
857		__asm__ __volatile__(
858		"1:	ldq_u %2,7(%5)\n"
859		"2:	ldq_u %1,0(%5)\n"
860		"	insqh %6,%5,%4\n"
861		"	insql %6,%5,%3\n"
862		"	mskqh %2,%5,%2\n"
863		"	mskql %1,%5,%1\n"
864		"	or %2,%4,%2\n"
865		"	or %1,%3,%1\n"
866		"3:	stq_u %2,7(%5)\n"
867		"4:	stq_u %1,0(%5)\n"
868		"5:\n"
869		EXC(1b,5b,%2,%0)
870		EXC(2b,5b,%1,%0)
871		EXC(3b,5b,$31,%0)
872		EXC(4b,5b,$31,%0)
 
 
 
 
 
 
873			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
874			  "=&r"(tmp3), "=&r"(tmp4)
875			: "r"(va), "r"(*reg_addr), "0"(0));
876		if (error)
877			goto give_sigsegv;
878		return;
879
880	default:
881		/* What instruction were you trying to use, exactly?  */
882		goto give_sigbus;
883	}
884
885	/* Only integer loads should get here; everyone else returns early. */
886	if (reg == 30)
887		wrusp(fake_reg);
888	return;
889
890give_sigsegv:
891	regs->pc -= 4;  /* make pc point to faulting insn */
 
 
892
893	/* We need to replicate some of the logic in mm/fault.c,
894	   since we don't have access to the fault code in the
895	   exception handling return path.  */
896	if ((unsigned long)va >= TASK_SIZE)
897		si_code = SEGV_ACCERR;
898	else {
899		struct mm_struct *mm = current->mm;
900		mmap_read_lock(mm);
901		if (find_vma(mm, (unsigned long)va))
902			si_code = SEGV_ACCERR;
903		else
904			si_code = SEGV_MAPERR;
905		mmap_read_unlock(mm);
906	}
907	send_sig_fault(SIGSEGV, si_code, va, current);
 
908	return;
909
910give_sigbus:
911	regs->pc -= 4;
912	send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
 
 
 
 
913	return;
914}
915
916void
917trap_init(void)
918{
919	/* Tell PAL-code what global pointer we want in the kernel.  */
920	register unsigned long gptr __asm__("$29");
921	wrkgp(gptr);
 
 
 
 
 
922
923	wrent(entArith, 1);
924	wrent(entMM, 2);
925	wrent(entIF, 3);
926	wrent(entUna, 4);
927	wrent(entSys, 5);
928	wrent(entDbg, 6);
929}
v3.15
 
   1/*
   2 * arch/alpha/kernel/traps.c
   3 *
   4 * (C) Copyright 1994 Linus Torvalds
   5 */
   6
   7/*
   8 * This file initializes the trap entry points
   9 */
  10
 
  11#include <linux/jiffies.h>
  12#include <linux/mm.h>
  13#include <linux/sched.h>
 
  14#include <linux/tty.h>
  15#include <linux/delay.h>
  16#include <linux/module.h>
  17#include <linux/init.h>
  18#include <linux/kallsyms.h>
  19#include <linux/ratelimit.h>
  20
  21#include <asm/gentrap.h>
  22#include <asm/uaccess.h>
  23#include <asm/unaligned.h>
  24#include <asm/sysinfo.h>
  25#include <asm/hwrpb.h>
  26#include <asm/mmu_context.h>
  27#include <asm/special_insns.h>
  28
  29#include "proto.h"
  30
  31/* Work-around for some SRMs which mishandle opDEC faults.  */
  32
  33static int opDEC_fix;
  34
  35static void
  36opDEC_check(void)
  37{
  38	__asm__ __volatile__ (
  39	/* Load the address of... */
  40	"	br	$16, 1f\n"
  41	/* A stub instruction fault handler.  Just add 4 to the
  42	   pc and continue.  */
  43	"	ldq	$16, 8($sp)\n"
  44	"	addq	$16, 4, $16\n"
  45	"	stq	$16, 8($sp)\n"
  46	"	call_pal %[rti]\n"
  47	/* Install the instruction fault handler.  */
  48	"1:	lda	$17, 3\n"
  49	"	call_pal %[wrent]\n"
  50	/* With that in place, the fault from the round-to-minf fp
  51	   insn will arrive either at the "lda 4" insn (bad) or one
  52	   past that (good).  This places the correct fixup in %0.  */
  53	"	lda %[fix], 0\n"
  54	"	cvttq/svm $f31,$f31\n"
  55	"	lda %[fix], 4"
  56	: [fix] "=r" (opDEC_fix)
  57	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
  58	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
  59
  60	if (opDEC_fix)
  61		printk("opDEC fixup enabled.\n");
  62}
  63
  64void
  65dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
  66{
  67	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
  68	       regs->pc, regs->r26, regs->ps, print_tainted());
  69	printk("pc is at %pSR\n", (void *)regs->pc);
  70	printk("ra is at %pSR\n", (void *)regs->r26);
  71	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
  72	       regs->r0, regs->r1, regs->r2);
  73	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
  74 	       regs->r3, regs->r4, regs->r5);
  75	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
  76	       regs->r6, regs->r7, regs->r8);
  77
  78	if (r9_15) {
  79		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
  80		       r9_15[9], r9_15[10], r9_15[11]);
  81		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
  82		       r9_15[12], r9_15[13], r9_15[14]);
  83		printk("s6 = %016lx\n", r9_15[15]);
  84	}
  85
  86	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
  87	       regs->r16, regs->r17, regs->r18);
  88	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
  89 	       regs->r19, regs->r20, regs->r21);
  90 	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
  91	       regs->r22, regs->r23, regs->r24);
  92	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
  93	       regs->r25, regs->r27, regs->r28);
  94	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
  95#if 0
  96__halt();
  97#endif
  98}
  99
 100#if 0
 101static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
 102			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
 103			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
 104			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
 105#endif
 106
 107static void
 108dik_show_code(unsigned int *pc)
 109{
 110	long i;
 111
 112	printk("Code:");
 113	for (i = -6; i < 2; i++) {
 114		unsigned int insn;
 115		if (__get_user(insn, (unsigned int __user *)pc + i))
 116			break;
 117		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
 118	}
 119	printk("\n");
 120}
 121
 122static void
 123dik_show_trace(unsigned long *sp)
 124{
 125	long i = 0;
 126	printk("Trace:\n");
 127	while (0x1ff8 & (unsigned long) sp) {
 128		extern char _stext[], _etext[];
 129		unsigned long tmp = *sp;
 130		sp++;
 131		if (tmp < (unsigned long) &_stext)
 132			continue;
 133		if (tmp >= (unsigned long) &_etext)
 134			continue;
 135		printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
 136		if (i > 40) {
 137			printk(" ...");
 138			break;
 139		}
 140	}
 141	printk("\n");
 142}
 143
 144static int kstack_depth_to_print = 24;
 145
 146void show_stack(struct task_struct *task, unsigned long *sp)
 147{
 148	unsigned long *stack;
 149	int i;
 150
 151	/*
 152	 * debugging aid: "show_stack(NULL);" prints the
 153	 * back trace for this cpu.
 154	 */
 155	if(sp==NULL)
 156		sp=(unsigned long*)&sp;
 157
 158	stack = sp;
 159	for(i=0; i < kstack_depth_to_print; i++) {
 160		if (((long) stack & (THREAD_SIZE-1)) == 0)
 161			break;
 162		if (i && ((i % 4) == 0))
 163			printk("\n       ");
 164		printk("%016lx ", *stack++);
 
 
 
 
 
 165	}
 166	printk("\n");
 167	dik_show_trace(sp);
 168}
 169
 170void
 171die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
 172{
 173	if (regs->ps & 8)
 174		return;
 175#ifdef CONFIG_SMP
 176	printk("CPU %d ", hard_smp_processor_id());
 177#endif
 178	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
 179	dik_show_regs(regs, r9_15);
 180	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 181	dik_show_trace((unsigned long *)(regs+1));
 182	dik_show_code((unsigned int *)regs->pc);
 183
 184	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
 185		printk("die_if_kernel recursion detected.\n");
 186		local_irq_enable();
 187		while (1);
 188	}
 189	do_exit(SIGSEGV);
 190}
 191
 192#ifndef CONFIG_MATHEMU
 193static long dummy_emul(void) { return 0; }
 194long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
 195  = (void *)dummy_emul;
 
 196long (*alpha_fp_emul) (unsigned long pc)
 197  = (void *)dummy_emul;
 
 198#else
 199long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
 200long alpha_fp_emul (unsigned long pc);
 201#endif
 202
 203asmlinkage void
 204do_entArith(unsigned long summary, unsigned long write_mask,
 205	    struct pt_regs *regs)
 206{
 207	long si_code = FPE_FLTINV;
 208	siginfo_t info;
 209
 210	if (summary & 1) {
 211		/* Software-completion summary bit is set, so try to
 212		   emulate the instruction.  If the processor supports
 213		   precise exceptions, we don't have to search.  */
 214		if (!amask(AMASK_PRECISE_TRAP))
 215			si_code = alpha_fp_emul(regs->pc - 4);
 216		else
 217			si_code = alpha_fp_emul_imprecise(regs, write_mask);
 218		if (si_code == 0)
 219			return;
 220	}
 221	die_if_kernel("Arithmetic fault", regs, 0, NULL);
 222
 223	info.si_signo = SIGFPE;
 224	info.si_errno = 0;
 225	info.si_code = si_code;
 226	info.si_addr = (void __user *) regs->pc;
 227	send_sig_info(SIGFPE, &info, current);
 228}
 229
 230asmlinkage void
 231do_entIF(unsigned long type, struct pt_regs *regs)
 232{
 233	siginfo_t info;
 234	int signo, code;
 235
 236	if ((regs->ps & ~IPL_MAX) == 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237		if (type == 1) {
 238			const unsigned int *data
 239			  = (const unsigned int *) regs->pc;
 240			printk("Kernel bug at %s:%d\n",
 241			       (const char *)(data[1] | (long)data[2] << 32), 
 242			       data[0]);
 243		}
 244#ifdef CONFIG_ALPHA_WTINT
 245		if (type == 4) {
 246			/* If CALL_PAL WTINT is totally unsupported by the
 247			   PALcode, e.g. MILO, "emulate" it by overwriting
 248			   the insn.  */
 249			unsigned int *pinsn
 250			  = (unsigned int *) regs->pc - 1;
 251			if (*pinsn == PAL_wtint) {
 252				*pinsn = 0x47e01400; /* mov 0,$0 */
 253				imb();
 254				regs->r0 = 0;
 255				return;
 256			}
 257		}
 258#endif /* ALPHA_WTINT */
 259		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
 260			      regs, type, NULL);
 261	}
 262
 263	switch (type) {
 264	      case 0: /* breakpoint */
 265		info.si_signo = SIGTRAP;
 266		info.si_errno = 0;
 267		info.si_code = TRAP_BRKPT;
 268		info.si_trapno = 0;
 269		info.si_addr = (void __user *) regs->pc;
 270
 271		if (ptrace_cancel_bpt(current)) {
 272			regs->pc -= 4;	/* make pc point to former bpt */
 273		}
 274
 275		send_sig_info(SIGTRAP, &info, current);
 
 276		return;
 277
 278	      case 1: /* bugcheck */
 279		info.si_signo = SIGTRAP;
 280		info.si_errno = 0;
 281		info.si_code = __SI_FAULT;
 282		info.si_addr = (void __user *) regs->pc;
 283		info.si_trapno = 0;
 284		send_sig_info(SIGTRAP, &info, current);
 285		return;
 286		
 287	      case 2: /* gentrap */
 288		info.si_addr = (void __user *) regs->pc;
 289		info.si_trapno = regs->r16;
 290		switch ((long) regs->r16) {
 291		case GEN_INTOVF:
 292			signo = SIGFPE;
 293			code = FPE_INTOVF;
 294			break;
 295		case GEN_INTDIV:
 296			signo = SIGFPE;
 297			code = FPE_INTDIV;
 298			break;
 299		case GEN_FLTOVF:
 300			signo = SIGFPE;
 301			code = FPE_FLTOVF;
 302			break;
 303		case GEN_FLTDIV:
 304			signo = SIGFPE;
 305			code = FPE_FLTDIV;
 306			break;
 307		case GEN_FLTUND:
 308			signo = SIGFPE;
 309			code = FPE_FLTUND;
 310			break;
 311		case GEN_FLTINV:
 312			signo = SIGFPE;
 313			code = FPE_FLTINV;
 314			break;
 315		case GEN_FLTINE:
 316			signo = SIGFPE;
 317			code = FPE_FLTRES;
 318			break;
 319		case GEN_ROPRAND:
 320			signo = SIGFPE;
 321			code = __SI_FAULT;
 322			break;
 323
 324		case GEN_DECOVF:
 325		case GEN_DECDIV:
 326		case GEN_DECINV:
 327		case GEN_ASSERTERR:
 328		case GEN_NULPTRERR:
 329		case GEN_STKOVF:
 330		case GEN_STRLENERR:
 331		case GEN_SUBSTRERR:
 332		case GEN_RANGERR:
 333		case GEN_SUBRNG:
 334		case GEN_SUBRNG1:
 335		case GEN_SUBRNG2:
 336		case GEN_SUBRNG3:
 337		case GEN_SUBRNG4:
 338		case GEN_SUBRNG5:
 339		case GEN_SUBRNG6:
 340		case GEN_SUBRNG7:
 341		default:
 342			signo = SIGTRAP;
 343			code = __SI_FAULT;
 344			break;
 345		}
 346
 347		info.si_signo = signo;
 348		info.si_errno = 0;
 349		info.si_code = code;
 350		info.si_addr = (void __user *) regs->pc;
 351		send_sig_info(signo, &info, current);
 352		return;
 353
 354	      case 4: /* opDEC */
 355		if (implver() == IMPLVER_EV4) {
 356			long si_code;
 357
 358			/* The some versions of SRM do not handle
 359			   the opDEC properly - they return the PC of the
 360			   opDEC fault, not the instruction after as the
 361			   Alpha architecture requires.  Here we fix it up.
 362			   We do this by intentionally causing an opDEC
 363			   fault during the boot sequence and testing if
 364			   we get the correct PC.  If not, we set a flag
 365			   to correct it every time through.  */
 366			regs->pc += opDEC_fix; 
 367			
 368			/* EV4 does not implement anything except normal
 369			   rounding.  Everything else will come here as
 370			   an illegal instruction.  Emulate them.  */
 371			si_code = alpha_fp_emul(regs->pc - 4);
 372			if (si_code == 0)
 373				return;
 374			if (si_code > 0) {
 375				info.si_signo = SIGFPE;
 376				info.si_errno = 0;
 377				info.si_code = si_code;
 378				info.si_addr = (void __user *) regs->pc;
 379				send_sig_info(SIGFPE, &info, current);
 380				return;
 381			}
 382		}
 383		break;
 384
 385	      case 3: /* FEN fault */
 386		/* Irritating users can call PAL_clrfen to disable the
 387		   FPU for the process.  The kernel will then trap in
 388		   do_switch_stack and undo_switch_stack when we try
 389		   to save and restore the FP registers.
 390
 391		   Given that GCC by default generates code that uses the
 392		   FP registers, PAL_clrfen is not useful except for DoS
 393		   attacks.  So turn the bleeding FPU back on and be done
 394		   with it.  */
 395		current_thread_info()->pcb.flags |= 1;
 396		__reload_thread(&current_thread_info()->pcb);
 397		return;
 398
 399	      case 5: /* illoc */
 400	      default: /* unexpected instruction-fault type */
 401		      ;
 402	}
 403
 404	info.si_signo = SIGILL;
 405	info.si_errno = 0;
 406	info.si_code = ILL_ILLOPC;
 407	info.si_addr = (void __user *) regs->pc;
 408	send_sig_info(SIGILL, &info, current);
 409}
 410
 411/* There is an ifdef in the PALcode in MILO that enables a 
 412   "kernel debugging entry point" as an unprivileged call_pal.
 413
 414   We don't want to have anything to do with it, but unfortunately
 415   several versions of MILO included in distributions have it enabled,
 416   and if we don't put something on the entry point we'll oops.  */
 417
 418asmlinkage void
 419do_entDbg(struct pt_regs *regs)
 420{
 421	siginfo_t info;
 422
 423	die_if_kernel("Instruction fault", regs, 0, NULL);
 424
 425	info.si_signo = SIGILL;
 426	info.si_errno = 0;
 427	info.si_code = ILL_ILLOPC;
 428	info.si_addr = (void __user *) regs->pc;
 429	force_sig_info(SIGILL, &info, current);
 430}
 431
 432
 433/*
 434 * entUna has a different register layout to be reasonably simple. It
 435 * needs access to all the integer registers (the kernel doesn't use
 436 * fp-regs), and it needs to have them in order for simpler access.
 437 *
 438 * Due to the non-standard register layout (and because we don't want
 439 * to handle floating-point regs), user-mode unaligned accesses are
 440 * handled separately by do_entUnaUser below.
 441 *
 442 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
 443 * on a gp-register unaligned load/store, something is _very_ wrong
 444 * in the kernel anyway..
 445 */
 446struct allregs {
 447	unsigned long regs[32];
 448	unsigned long ps, pc, gp, a0, a1, a2;
 449};
 450
 451struct unaligned_stat {
 452	unsigned long count, va, pc;
 453} unaligned[2];
 454
 455
 456/* Macro for exception fixup code to access integer registers.  */
 457#define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
 458
 459
 460asmlinkage void
 461do_entUna(void * va, unsigned long opcode, unsigned long reg,
 462	  struct allregs *regs)
 463{
 464	long error, tmp1, tmp2, tmp3, tmp4;
 465	unsigned long pc = regs->pc - 4;
 466	unsigned long *_regs = regs->regs;
 467	const struct exception_table_entry *fixup;
 468
 469	unaligned[0].count++;
 470	unaligned[0].va = (unsigned long) va;
 471	unaligned[0].pc = pc;
 472
 473	/* We don't want to use the generic get/put unaligned macros as
 474	   we want to trap exceptions.  Only if we actually get an
 475	   exception will we decide whether we should have caught it.  */
 476
 477	switch (opcode) {
 478	case 0x0c: /* ldwu */
 479		__asm__ __volatile__(
 480		"1:	ldq_u %1,0(%3)\n"
 481		"2:	ldq_u %2,1(%3)\n"
 482		"	extwl %1,%3,%1\n"
 483		"	extwh %2,%3,%2\n"
 484		"3:\n"
 485		".section __ex_table,\"a\"\n"
 486		"	.long 1b - .\n"
 487		"	lda %1,3b-1b(%0)\n"
 488		"	.long 2b - .\n"
 489		"	lda %2,3b-2b(%0)\n"
 490		".previous"
 491			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 492			: "r"(va), "0"(0));
 493		if (error)
 494			goto got_exception;
 495		una_reg(reg) = tmp1|tmp2;
 496		return;
 497
 498	case 0x28: /* ldl */
 499		__asm__ __volatile__(
 500		"1:	ldq_u %1,0(%3)\n"
 501		"2:	ldq_u %2,3(%3)\n"
 502		"	extll %1,%3,%1\n"
 503		"	extlh %2,%3,%2\n"
 504		"3:\n"
 505		".section __ex_table,\"a\"\n"
 506		"	.long 1b - .\n"
 507		"	lda %1,3b-1b(%0)\n"
 508		"	.long 2b - .\n"
 509		"	lda %2,3b-2b(%0)\n"
 510		".previous"
 511			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 512			: "r"(va), "0"(0));
 513		if (error)
 514			goto got_exception;
 515		una_reg(reg) = (int)(tmp1|tmp2);
 516		return;
 517
 518	case 0x29: /* ldq */
 519		__asm__ __volatile__(
 520		"1:	ldq_u %1,0(%3)\n"
 521		"2:	ldq_u %2,7(%3)\n"
 522		"	extql %1,%3,%1\n"
 523		"	extqh %2,%3,%2\n"
 524		"3:\n"
 525		".section __ex_table,\"a\"\n"
 526		"	.long 1b - .\n"
 527		"	lda %1,3b-1b(%0)\n"
 528		"	.long 2b - .\n"
 529		"	lda %2,3b-2b(%0)\n"
 530		".previous"
 531			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 532			: "r"(va), "0"(0));
 533		if (error)
 534			goto got_exception;
 535		una_reg(reg) = tmp1|tmp2;
 536		return;
 537
 538	/* Note that the store sequences do not indicate that they change
 539	   memory because it _should_ be affecting nothing in this context.
 540	   (Otherwise we have other, much larger, problems.)  */
 541	case 0x0d: /* stw */
 542		__asm__ __volatile__(
 543		"1:	ldq_u %2,1(%5)\n"
 544		"2:	ldq_u %1,0(%5)\n"
 545		"	inswh %6,%5,%4\n"
 546		"	inswl %6,%5,%3\n"
 547		"	mskwh %2,%5,%2\n"
 548		"	mskwl %1,%5,%1\n"
 549		"	or %2,%4,%2\n"
 550		"	or %1,%3,%1\n"
 551		"3:	stq_u %2,1(%5)\n"
 552		"4:	stq_u %1,0(%5)\n"
 553		"5:\n"
 554		".section __ex_table,\"a\"\n"
 555		"	.long 1b - .\n"
 556		"	lda %2,5b-1b(%0)\n"
 557		"	.long 2b - .\n"
 558		"	lda %1,5b-2b(%0)\n"
 559		"	.long 3b - .\n"
 560		"	lda $31,5b-3b(%0)\n"
 561		"	.long 4b - .\n"
 562		"	lda $31,5b-4b(%0)\n"
 563		".previous"
 564			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 565			  "=&r"(tmp3), "=&r"(tmp4)
 566			: "r"(va), "r"(una_reg(reg)), "0"(0));
 567		if (error)
 568			goto got_exception;
 569		return;
 570
 571	case 0x2c: /* stl */
 572		__asm__ __volatile__(
 573		"1:	ldq_u %2,3(%5)\n"
 574		"2:	ldq_u %1,0(%5)\n"
 575		"	inslh %6,%5,%4\n"
 576		"	insll %6,%5,%3\n"
 577		"	msklh %2,%5,%2\n"
 578		"	mskll %1,%5,%1\n"
 579		"	or %2,%4,%2\n"
 580		"	or %1,%3,%1\n"
 581		"3:	stq_u %2,3(%5)\n"
 582		"4:	stq_u %1,0(%5)\n"
 583		"5:\n"
 584		".section __ex_table,\"a\"\n"
 585		"	.long 1b - .\n"
 586		"	lda %2,5b-1b(%0)\n"
 587		"	.long 2b - .\n"
 588		"	lda %1,5b-2b(%0)\n"
 589		"	.long 3b - .\n"
 590		"	lda $31,5b-3b(%0)\n"
 591		"	.long 4b - .\n"
 592		"	lda $31,5b-4b(%0)\n"
 593		".previous"
 594			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 595			  "=&r"(tmp3), "=&r"(tmp4)
 596			: "r"(va), "r"(una_reg(reg)), "0"(0));
 597		if (error)
 598			goto got_exception;
 599		return;
 600
 601	case 0x2d: /* stq */
 602		__asm__ __volatile__(
 603		"1:	ldq_u %2,7(%5)\n"
 604		"2:	ldq_u %1,0(%5)\n"
 605		"	insqh %6,%5,%4\n"
 606		"	insql %6,%5,%3\n"
 607		"	mskqh %2,%5,%2\n"
 608		"	mskql %1,%5,%1\n"
 609		"	or %2,%4,%2\n"
 610		"	or %1,%3,%1\n"
 611		"3:	stq_u %2,7(%5)\n"
 612		"4:	stq_u %1,0(%5)\n"
 613		"5:\n"
 614		".section __ex_table,\"a\"\n\t"
 615		"	.long 1b - .\n"
 616		"	lda %2,5b-1b(%0)\n"
 617		"	.long 2b - .\n"
 618		"	lda %1,5b-2b(%0)\n"
 619		"	.long 3b - .\n"
 620		"	lda $31,5b-3b(%0)\n"
 621		"	.long 4b - .\n"
 622		"	lda $31,5b-4b(%0)\n"
 623		".previous"
 624			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 625			  "=&r"(tmp3), "=&r"(tmp4)
 626			: "r"(va), "r"(una_reg(reg)), "0"(0));
 627		if (error)
 628			goto got_exception;
 629		return;
 630	}
 631
 632	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
 633		pc, va, opcode, reg);
 634	do_exit(SIGSEGV);
 635
 636got_exception:
 637	/* Ok, we caught the exception, but we don't want it.  Is there
 638	   someone to pass it along to?  */
 639	if ((fixup = search_exception_tables(pc)) != 0) {
 640		unsigned long newpc;
 641		newpc = fixup_exception(una_reg, fixup, pc);
 642
 643		printk("Forwarding unaligned exception at %lx (%lx)\n",
 644		       pc, newpc);
 645
 646		regs->pc = newpc;
 647		return;
 648	}
 649
 650	/*
 651	 * Yikes!  No one to forward the exception to.
 652	 * Since the registers are in a weird format, dump them ourselves.
 653 	 */
 654
 655	printk("%s(%d): unhandled unaligned exception\n",
 656	       current->comm, task_pid_nr(current));
 657
 658	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
 659	       pc, una_reg(26), regs->ps);
 660	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
 661	       una_reg(0), una_reg(1), una_reg(2));
 662	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
 663 	       una_reg(3), una_reg(4), una_reg(5));
 664	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
 665	       una_reg(6), una_reg(7), una_reg(8));
 666	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
 667	       una_reg(9), una_reg(10), una_reg(11));
 668	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
 669	       una_reg(12), una_reg(13), una_reg(14));
 670	printk("r15= %016lx\n", una_reg(15));
 671	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
 672	       una_reg(16), una_reg(17), una_reg(18));
 673	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
 674 	       una_reg(19), una_reg(20), una_reg(21));
 675 	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
 676	       una_reg(22), una_reg(23), una_reg(24));
 677	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
 678	       una_reg(25), una_reg(27), una_reg(28));
 679	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
 680
 681	dik_show_code((unsigned int *)pc);
 682	dik_show_trace((unsigned long *)(regs+1));
 683
 684	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
 685		printk("die_if_kernel recursion detected.\n");
 686		local_irq_enable();
 687		while (1);
 688	}
 689	do_exit(SIGSEGV);
 690}
 691
 692/*
 693 * Convert an s-floating point value in memory format to the
 694 * corresponding value in register format.  The exponent
 695 * needs to be remapped to preserve non-finite values
 696 * (infinities, not-a-numbers, denormals).
 697 */
 698static inline unsigned long
 699s_mem_to_reg (unsigned long s_mem)
 700{
 701	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
 702	unsigned long sign    = (s_mem >> 31) & 0x1;
 703	unsigned long exp_msb = (s_mem >> 30) & 0x1;
 704	unsigned long exp_low = (s_mem >> 23) & 0x7f;
 705	unsigned long exp;
 706
 707	exp = (exp_msb << 10) | exp_low;	/* common case */
 708	if (exp_msb) {
 709		if (exp_low == 0x7f) {
 710			exp = 0x7ff;
 711		}
 712	} else {
 713		if (exp_low == 0x00) {
 714			exp = 0x000;
 715		} else {
 716			exp |= (0x7 << 7);
 717		}
 718	}
 719	return (sign << 63) | (exp << 52) | (frac << 29);
 720}
 721
 722/*
 723 * Convert an s-floating point value in register format to the
 724 * corresponding value in memory format.
 725 */
 726static inline unsigned long
 727s_reg_to_mem (unsigned long s_reg)
 728{
 729	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
 730}
 731
 732/*
 733 * Handle user-level unaligned fault.  Handling user-level unaligned
 734 * faults is *extremely* slow and produces nasty messages.  A user
 735 * program *should* fix unaligned faults ASAP.
 736 *
 737 * Notice that we have (almost) the regular kernel stack layout here,
 738 * so finding the appropriate registers is a little more difficult
 739 * than in the kernel case.
 740 *
 741 * Finally, we handle regular integer load/stores only.  In
 742 * particular, load-linked/store-conditionally and floating point
 743 * load/stores are not supported.  The former make no sense with
 744 * unaligned faults (they are guaranteed to fail) and I don't think
 745 * the latter will occur in any decent program.
 746 *
 747 * Sigh. We *do* have to handle some FP operations, because GCC will
 748 * uses them as temporary storage for integer memory to memory copies.
 749 * However, we need to deal with stt/ldt and sts/lds only.
 750 */
 751
 752#define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
 753			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
 754			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
 755			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
 756
 757#define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
 758			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
 759			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
 760
 761#define R(x)	((size_t) &((struct pt_regs *)0)->x)
 762
 763static int unauser_reg_offsets[32] = {
 764	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
 765	/* r9 ... r15 are stored in front of regs.  */
 766	-56, -48, -40, -32, -24, -16, -8,
 767	R(r16), R(r17), R(r18),
 768	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
 769	R(r27), R(r28), R(gp),
 770	0, 0
 771};
 772
 773#undef R
 774
 775asmlinkage void
 776do_entUnaUser(void __user * va, unsigned long opcode,
 777	      unsigned long reg, struct pt_regs *regs)
 778{
 779	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
 780
 781	unsigned long tmp1, tmp2, tmp3, tmp4;
 782	unsigned long fake_reg, *reg_addr = &fake_reg;
 783	siginfo_t info;
 784	long error;
 785
 786	/* Check the UAC bits to decide what the user wants us to do
 787	   with the unaliged access.  */
 788
 789	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
 790		if (__ratelimit(&ratelimit)) {
 791			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
 792			       current->comm, task_pid_nr(current),
 793			       regs->pc - 4, va, opcode, reg);
 794		}
 795	}
 796	if ((current_thread_info()->status & TS_UAC_SIGBUS))
 797		goto give_sigbus;
 798	/* Not sure why you'd want to use this, but... */
 799	if ((current_thread_info()->status & TS_UAC_NOFIX))
 800		return;
 801
 802	/* Don't bother reading ds in the access check since we already
 803	   know that this came from the user.  Also rely on the fact that
 804	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
 805	if (!__access_ok((unsigned long)va, 0, USER_DS))
 806		goto give_sigsegv;
 807
 808	++unaligned[1].count;
 809	unaligned[1].va = (unsigned long)va;
 810	unaligned[1].pc = regs->pc - 4;
 811
 812	if ((1L << opcode) & OP_INT_MASK) {
 813		/* it's an integer load/store */
 814		if (reg < 30) {
 815			reg_addr = (unsigned long *)
 816			  ((char *)regs + unauser_reg_offsets[reg]);
 817		} else if (reg == 30) {
 818			/* usp in PAL regs */
 819			fake_reg = rdusp();
 820		} else {
 821			/* zero "register" */
 822			fake_reg = 0;
 823		}
 824	}
 825
 826	/* We don't want to use the generic get/put unaligned macros as
 827	   we want to trap exceptions.  Only if we actually get an
 828	   exception will we decide whether we should have caught it.  */
 829
 830	switch (opcode) {
 831	case 0x0c: /* ldwu */
 832		__asm__ __volatile__(
 833		"1:	ldq_u %1,0(%3)\n"
 834		"2:	ldq_u %2,1(%3)\n"
 835		"	extwl %1,%3,%1\n"
 836		"	extwh %2,%3,%2\n"
 837		"3:\n"
 838		".section __ex_table,\"a\"\n"
 839		"	.long 1b - .\n"
 840		"	lda %1,3b-1b(%0)\n"
 841		"	.long 2b - .\n"
 842		"	lda %2,3b-2b(%0)\n"
 843		".previous"
 844			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 845			: "r"(va), "0"(0));
 846		if (error)
 847			goto give_sigsegv;
 848		*reg_addr = tmp1|tmp2;
 849		break;
 850
 851	case 0x22: /* lds */
 852		__asm__ __volatile__(
 853		"1:	ldq_u %1,0(%3)\n"
 854		"2:	ldq_u %2,3(%3)\n"
 855		"	extll %1,%3,%1\n"
 856		"	extlh %2,%3,%2\n"
 857		"3:\n"
 858		".section __ex_table,\"a\"\n"
 859		"	.long 1b - .\n"
 860		"	lda %1,3b-1b(%0)\n"
 861		"	.long 2b - .\n"
 862		"	lda %2,3b-2b(%0)\n"
 863		".previous"
 864			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 865			: "r"(va), "0"(0));
 866		if (error)
 867			goto give_sigsegv;
 868		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
 869		return;
 870
 871	case 0x23: /* ldt */
 872		__asm__ __volatile__(
 873		"1:	ldq_u %1,0(%3)\n"
 874		"2:	ldq_u %2,7(%3)\n"
 875		"	extql %1,%3,%1\n"
 876		"	extqh %2,%3,%2\n"
 877		"3:\n"
 878		".section __ex_table,\"a\"\n"
 879		"	.long 1b - .\n"
 880		"	lda %1,3b-1b(%0)\n"
 881		"	.long 2b - .\n"
 882		"	lda %2,3b-2b(%0)\n"
 883		".previous"
 884			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 885			: "r"(va), "0"(0));
 886		if (error)
 887			goto give_sigsegv;
 888		alpha_write_fp_reg(reg, tmp1|tmp2);
 889		return;
 890
 891	case 0x28: /* ldl */
 892		__asm__ __volatile__(
 893		"1:	ldq_u %1,0(%3)\n"
 894		"2:	ldq_u %2,3(%3)\n"
 895		"	extll %1,%3,%1\n"
 896		"	extlh %2,%3,%2\n"
 897		"3:\n"
 898		".section __ex_table,\"a\"\n"
 899		"	.long 1b - .\n"
 900		"	lda %1,3b-1b(%0)\n"
 901		"	.long 2b - .\n"
 902		"	lda %2,3b-2b(%0)\n"
 903		".previous"
 904			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 905			: "r"(va), "0"(0));
 906		if (error)
 907			goto give_sigsegv;
 908		*reg_addr = (int)(tmp1|tmp2);
 909		break;
 910
 911	case 0x29: /* ldq */
 912		__asm__ __volatile__(
 913		"1:	ldq_u %1,0(%3)\n"
 914		"2:	ldq_u %2,7(%3)\n"
 915		"	extql %1,%3,%1\n"
 916		"	extqh %2,%3,%2\n"
 917		"3:\n"
 918		".section __ex_table,\"a\"\n"
 919		"	.long 1b - .\n"
 920		"	lda %1,3b-1b(%0)\n"
 921		"	.long 2b - .\n"
 922		"	lda %2,3b-2b(%0)\n"
 923		".previous"
 924			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
 925			: "r"(va), "0"(0));
 926		if (error)
 927			goto give_sigsegv;
 928		*reg_addr = tmp1|tmp2;
 929		break;
 930
 931	/* Note that the store sequences do not indicate that they change
 932	   memory because it _should_ be affecting nothing in this context.
 933	   (Otherwise we have other, much larger, problems.)  */
 934	case 0x0d: /* stw */
 935		__asm__ __volatile__(
 936		"1:	ldq_u %2,1(%5)\n"
 937		"2:	ldq_u %1,0(%5)\n"
 938		"	inswh %6,%5,%4\n"
 939		"	inswl %6,%5,%3\n"
 940		"	mskwh %2,%5,%2\n"
 941		"	mskwl %1,%5,%1\n"
 942		"	or %2,%4,%2\n"
 943		"	or %1,%3,%1\n"
 944		"3:	stq_u %2,1(%5)\n"
 945		"4:	stq_u %1,0(%5)\n"
 946		"5:\n"
 947		".section __ex_table,\"a\"\n"
 948		"	.long 1b - .\n"
 949		"	lda %2,5b-1b(%0)\n"
 950		"	.long 2b - .\n"
 951		"	lda %1,5b-2b(%0)\n"
 952		"	.long 3b - .\n"
 953		"	lda $31,5b-3b(%0)\n"
 954		"	.long 4b - .\n"
 955		"	lda $31,5b-4b(%0)\n"
 956		".previous"
 957			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 958			  "=&r"(tmp3), "=&r"(tmp4)
 959			: "r"(va), "r"(*reg_addr), "0"(0));
 960		if (error)
 961			goto give_sigsegv;
 962		return;
 963
 964	case 0x26: /* sts */
 965		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
 966		/* FALLTHRU */
 967
 968	case 0x2c: /* stl */
 969		__asm__ __volatile__(
 970		"1:	ldq_u %2,3(%5)\n"
 971		"2:	ldq_u %1,0(%5)\n"
 972		"	inslh %6,%5,%4\n"
 973		"	insll %6,%5,%3\n"
 974		"	msklh %2,%5,%2\n"
 975		"	mskll %1,%5,%1\n"
 976		"	or %2,%4,%2\n"
 977		"	or %1,%3,%1\n"
 978		"3:	stq_u %2,3(%5)\n"
 979		"4:	stq_u %1,0(%5)\n"
 980		"5:\n"
 981		".section __ex_table,\"a\"\n"
 982		"	.long 1b - .\n"
 983		"	lda %2,5b-1b(%0)\n"
 984		"	.long 2b - .\n"
 985		"	lda %1,5b-2b(%0)\n"
 986		"	.long 3b - .\n"
 987		"	lda $31,5b-3b(%0)\n"
 988		"	.long 4b - .\n"
 989		"	lda $31,5b-4b(%0)\n"
 990		".previous"
 991			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
 992			  "=&r"(tmp3), "=&r"(tmp4)
 993			: "r"(va), "r"(*reg_addr), "0"(0));
 994		if (error)
 995			goto give_sigsegv;
 996		return;
 997
 998	case 0x27: /* stt */
 999		fake_reg = alpha_read_fp_reg(reg);
1000		/* FALLTHRU */
1001
1002	case 0x2d: /* stq */
1003		__asm__ __volatile__(
1004		"1:	ldq_u %2,7(%5)\n"
1005		"2:	ldq_u %1,0(%5)\n"
1006		"	insqh %6,%5,%4\n"
1007		"	insql %6,%5,%3\n"
1008		"	mskqh %2,%5,%2\n"
1009		"	mskql %1,%5,%1\n"
1010		"	or %2,%4,%2\n"
1011		"	or %1,%3,%1\n"
1012		"3:	stq_u %2,7(%5)\n"
1013		"4:	stq_u %1,0(%5)\n"
1014		"5:\n"
1015		".section __ex_table,\"a\"\n\t"
1016		"	.long 1b - .\n"
1017		"	lda %2,5b-1b(%0)\n"
1018		"	.long 2b - .\n"
1019		"	lda %1,5b-2b(%0)\n"
1020		"	.long 3b - .\n"
1021		"	lda $31,5b-3b(%0)\n"
1022		"	.long 4b - .\n"
1023		"	lda $31,5b-4b(%0)\n"
1024		".previous"
1025			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
1026			  "=&r"(tmp3), "=&r"(tmp4)
1027			: "r"(va), "r"(*reg_addr), "0"(0));
1028		if (error)
1029			goto give_sigsegv;
1030		return;
1031
1032	default:
1033		/* What instruction were you trying to use, exactly?  */
1034		goto give_sigbus;
1035	}
1036
1037	/* Only integer loads should get here; everyone else returns early. */
1038	if (reg == 30)
1039		wrusp(fake_reg);
1040	return;
1041
1042give_sigsegv:
1043	regs->pc -= 4;  /* make pc point to faulting insn */
1044	info.si_signo = SIGSEGV;
1045	info.si_errno = 0;
1046
1047	/* We need to replicate some of the logic in mm/fault.c,
1048	   since we don't have access to the fault code in the
1049	   exception handling return path.  */
1050	if (!__access_ok((unsigned long)va, 0, USER_DS))
1051		info.si_code = SEGV_ACCERR;
1052	else {
1053		struct mm_struct *mm = current->mm;
1054		down_read(&mm->mmap_sem);
1055		if (find_vma(mm, (unsigned long)va))
1056			info.si_code = SEGV_ACCERR;
1057		else
1058			info.si_code = SEGV_MAPERR;
1059		up_read(&mm->mmap_sem);
1060	}
1061	info.si_addr = va;
1062	send_sig_info(SIGSEGV, &info, current);
1063	return;
1064
1065give_sigbus:
1066	regs->pc -= 4;
1067	info.si_signo = SIGBUS;
1068	info.si_errno = 0;
1069	info.si_code = BUS_ADRALN;
1070	info.si_addr = va;
1071	send_sig_info(SIGBUS, &info, current);
1072	return;
1073}
1074
1075void
1076trap_init(void)
1077{
1078	/* Tell PAL-code what global pointer we want in the kernel.  */
1079	register unsigned long gptr __asm__("$29");
1080	wrkgp(gptr);
1081
1082	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1083	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
1084	if (implver() == IMPLVER_EV4)
1085		opDEC_check();
1086
1087	wrent(entArith, 1);
1088	wrent(entMM, 2);
1089	wrent(entIF, 3);
1090	wrent(entUna, 4);
1091	wrent(entSys, 5);
1092	wrent(entDbg, 6);
1093}