Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 *
8 * Derived from "arch/i386/kernel/traps.c"
9 * Copyright (C) 1991, 1992 Linus Torvalds
10 */
11
12/*
13 * 'Traps.c' handles hardware traps and faults after we have saved some
14 * state in 'asm.s'.
15 */
16#include "asm/irqflags.h"
17#include "asm/ptrace.h"
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/randomize_kstack.h>
21#include <linux/extable.h>
22#include <linux/ptrace.h>
23#include <linux/sched.h>
24#include <linux/sched/debug.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/uaccess.h>
28#include <linux/cpu.h>
29#include <linux/entry-common.h>
30#include <asm/asm-extable.h>
31#include <asm/fpu/api.h>
32#include <asm/vtime.h>
33#include "entry.h"
34
35static inline void __user *get_trap_ip(struct pt_regs *regs)
36{
37 unsigned long address;
38
39 if (regs->int_code & 0x200)
40 address = current->thread.trap_tdb.data[3];
41 else
42 address = regs->psw.addr;
43 return (void __user *) (address - (regs->int_code >> 16));
44}
45
46int is_valid_bugaddr(unsigned long addr)
47{
48 return 1;
49}
50
51void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
52{
53 if (user_mode(regs)) {
54 force_sig_fault(si_signo, si_code, get_trap_ip(regs));
55 report_user_fault(regs, si_signo, 0);
56 } else {
57 if (!fixup_exception(regs))
58 die(regs, str);
59 }
60}
61
62static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
63{
64 if (notify_die(DIE_TRAP, str, regs, 0,
65 regs->int_code, si_signo) == NOTIFY_STOP)
66 return;
67 do_report_trap(regs, si_signo, si_code, str);
68}
69NOKPROBE_SYMBOL(do_trap);
70
71void do_per_trap(struct pt_regs *regs)
72{
73 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
74 return;
75 if (!current->ptrace)
76 return;
77 force_sig_fault(SIGTRAP, TRAP_HWBKPT,
78 (void __force __user *) current->thread.per_event.address);
79}
80NOKPROBE_SYMBOL(do_per_trap);
81
82static void default_trap_handler(struct pt_regs *regs)
83{
84 if (user_mode(regs)) {
85 report_user_fault(regs, SIGSEGV, 0);
86 force_exit_sig(SIGSEGV);
87 } else
88 die(regs, "Unknown program exception");
89}
90
91#define DO_ERROR_INFO(name, signr, sicode, str) \
92static void name(struct pt_regs *regs) \
93{ \
94 do_trap(regs, signr, sicode, str); \
95}
96
97DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
98 "addressing exception")
99DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
100 "execute exception")
101DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
102 "fixpoint divide exception")
103DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
104 "fixpoint overflow exception")
105DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
106 "HFP overflow exception")
107DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
108 "HFP underflow exception")
109DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
110 "HFP significance exception")
111DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
112 "HFP divide exception")
113DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
114 "HFP square root exception")
115DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
116 "operand exception")
117DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
118 "privileged operation")
119DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
120 "special operation exception")
121DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
122 "transaction constraint exception")
123
124static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
125{
126 int si_code = 0;
127 /* FPC[2] is Data Exception Code */
128 if ((fpc & 0x00000300) == 0) {
129 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
130 if (fpc & 0x8000) /* invalid fp operation */
131 si_code = FPE_FLTINV;
132 else if (fpc & 0x4000) /* div by 0 */
133 si_code = FPE_FLTDIV;
134 else if (fpc & 0x2000) /* overflow */
135 si_code = FPE_FLTOVF;
136 else if (fpc & 0x1000) /* underflow */
137 si_code = FPE_FLTUND;
138 else if (fpc & 0x0800) /* inexact */
139 si_code = FPE_FLTRES;
140 }
141 do_trap(regs, SIGFPE, si_code, "floating point exception");
142}
143
144static void translation_specification_exception(struct pt_regs *regs)
145{
146 /* May never happen. */
147 panic("Translation-Specification Exception");
148}
149
150static void illegal_op(struct pt_regs *regs)
151{
152 __u8 opcode[6];
153 __u16 __user *location;
154 int is_uprobe_insn = 0;
155 int signal = 0;
156
157 location = get_trap_ip(regs);
158
159 if (user_mode(regs)) {
160 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
161 return;
162 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
163 if (current->ptrace)
164 force_sig_fault(SIGTRAP, TRAP_BRKPT, location);
165 else
166 signal = SIGILL;
167#ifdef CONFIG_UPROBES
168 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
169 is_uprobe_insn = 1;
170#endif
171 } else
172 signal = SIGILL;
173 }
174 /*
175 * We got either an illegal op in kernel mode, or user space trapped
176 * on a uprobes illegal instruction. See if kprobes or uprobes picks
177 * it up. If not, SIGILL.
178 */
179 if (is_uprobe_insn || !user_mode(regs)) {
180 if (notify_die(DIE_BPT, "bpt", regs, 0,
181 3, SIGTRAP) != NOTIFY_STOP)
182 signal = SIGILL;
183 }
184 if (signal)
185 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
186}
187NOKPROBE_SYMBOL(illegal_op);
188
189DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
190 "specification exception");
191
192static void vector_exception(struct pt_regs *regs)
193{
194 int si_code, vic;
195
196 if (!MACHINE_HAS_VX) {
197 do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
198 return;
199 }
200
201 /* get vector interrupt code from fpc */
202 save_fpu_regs();
203 vic = (current->thread.fpu.fpc & 0xf00) >> 8;
204 switch (vic) {
205 case 1: /* invalid vector operation */
206 si_code = FPE_FLTINV;
207 break;
208 case 2: /* division by zero */
209 si_code = FPE_FLTDIV;
210 break;
211 case 3: /* overflow */
212 si_code = FPE_FLTOVF;
213 break;
214 case 4: /* underflow */
215 si_code = FPE_FLTUND;
216 break;
217 case 5: /* inexact */
218 si_code = FPE_FLTRES;
219 break;
220 default: /* unknown cause */
221 si_code = 0;
222 }
223 do_trap(regs, SIGFPE, si_code, "vector exception");
224}
225
226static void data_exception(struct pt_regs *regs)
227{
228 save_fpu_regs();
229 if (current->thread.fpu.fpc & FPC_DXC_MASK)
230 do_fp_trap(regs, current->thread.fpu.fpc);
231 else
232 do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
233}
234
235static void space_switch_exception(struct pt_regs *regs)
236{
237 /* Set user psw back to home space mode. */
238 if (user_mode(regs))
239 regs->psw.mask |= PSW_ASC_HOME;
240 /* Send SIGILL. */
241 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
242}
243
244static void monitor_event_exception(struct pt_regs *regs)
245{
246 if (user_mode(regs))
247 return;
248
249 switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
250 case BUG_TRAP_TYPE_NONE:
251 fixup_exception(regs);
252 break;
253 case BUG_TRAP_TYPE_WARN:
254 break;
255 case BUG_TRAP_TYPE_BUG:
256 die(regs, "monitor event");
257 break;
258 }
259}
260
261void kernel_stack_overflow(struct pt_regs *regs)
262{
263 bust_spinlocks(1);
264 printk("Kernel stack overflow.\n");
265 show_regs(regs);
266 bust_spinlocks(0);
267 panic("Corrupt kernel stack, can't continue.");
268}
269NOKPROBE_SYMBOL(kernel_stack_overflow);
270
271static void __init test_monitor_call(void)
272{
273 int val = 1;
274
275 if (!IS_ENABLED(CONFIG_BUG))
276 return;
277 asm volatile(
278 " mc 0,0\n"
279 "0: xgr %0,%0\n"
280 "1:\n"
281 EX_TABLE(0b,1b)
282 : "+d" (val));
283 if (!val)
284 panic("Monitor call doesn't work!\n");
285}
286
287void __init trap_init(void)
288{
289 local_mcck_enable();
290 test_monitor_call();
291}
292
293static void (*pgm_check_table[128])(struct pt_regs *regs);
294
295void noinstr __do_pgm_check(struct pt_regs *regs)
296{
297 unsigned int trapnr;
298 irqentry_state_t state;
299
300 regs->int_code = S390_lowcore.pgm_int_code;
301 regs->int_parm_long = S390_lowcore.trans_exc_code;
302
303 state = irqentry_enter(regs);
304
305 if (user_mode(regs)) {
306 update_timer_sys();
307 if (!static_branch_likely(&cpu_has_bear)) {
308 if (regs->last_break < 4096)
309 regs->last_break = 1;
310 }
311 current->thread.last_break = regs->last_break;
312 }
313
314 if (S390_lowcore.pgm_code & 0x0200) {
315 /* transaction abort */
316 current->thread.trap_tdb = S390_lowcore.pgm_tdb;
317 }
318
319 if (S390_lowcore.pgm_code & PGM_INT_CODE_PER) {
320 if (user_mode(regs)) {
321 struct per_event *ev = ¤t->thread.per_event;
322
323 set_thread_flag(TIF_PER_TRAP);
324 ev->address = S390_lowcore.per_address;
325 ev->cause = S390_lowcore.per_code_combined;
326 ev->paid = S390_lowcore.per_access_id;
327 } else {
328 /* PER event in kernel is kprobes */
329 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
330 do_per_trap(regs);
331 goto out;
332 }
333 }
334
335 if (!irqs_disabled_flags(regs->psw.mask))
336 trace_hardirqs_on();
337 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
338
339 trapnr = regs->int_code & PGM_INT_CODE_MASK;
340 if (trapnr)
341 pgm_check_table[trapnr](regs);
342out:
343 local_irq_disable();
344 irqentry_exit(regs, state);
345}
346
347/*
348 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
349 * line defines the function to be called corresponding to the program check
350 * interruption code.
351 */
352static void (*pgm_check_table[128])(struct pt_regs *regs) = {
353 [0x00] = default_trap_handler,
354 [0x01] = illegal_op,
355 [0x02] = privileged_op,
356 [0x03] = execute_exception,
357 [0x04] = do_protection_exception,
358 [0x05] = addressing_exception,
359 [0x06] = specification_exception,
360 [0x07] = data_exception,
361 [0x08] = overflow_exception,
362 [0x09] = divide_exception,
363 [0x0a] = overflow_exception,
364 [0x0b] = divide_exception,
365 [0x0c] = hfp_overflow_exception,
366 [0x0d] = hfp_underflow_exception,
367 [0x0e] = hfp_significance_exception,
368 [0x0f] = hfp_divide_exception,
369 [0x10] = do_dat_exception,
370 [0x11] = do_dat_exception,
371 [0x12] = translation_specification_exception,
372 [0x13] = special_op_exception,
373 [0x14] = default_trap_handler,
374 [0x15] = operand_exception,
375 [0x16] = default_trap_handler,
376 [0x17] = default_trap_handler,
377 [0x18] = transaction_exception,
378 [0x19] = default_trap_handler,
379 [0x1a] = default_trap_handler,
380 [0x1b] = vector_exception,
381 [0x1c] = space_switch_exception,
382 [0x1d] = hfp_sqrt_exception,
383 [0x1e ... 0x37] = default_trap_handler,
384 [0x38] = do_dat_exception,
385 [0x39] = do_dat_exception,
386 [0x3a] = do_dat_exception,
387 [0x3b] = do_dat_exception,
388 [0x3c] = default_trap_handler,
389 [0x3d] = do_secure_storage_access,
390 [0x3e] = do_non_secure_storage_access,
391 [0x3f] = do_secure_storage_violation,
392 [0x40] = monitor_event_exception,
393 [0x41 ... 0x7f] = default_trap_handler,
394};
395
396#define COND_TRAP(x) asm( \
397 ".weak " __stringify(x) "\n\t" \
398 ".set " __stringify(x) "," \
399 __stringify(default_trap_handler))
400
401COND_TRAP(do_secure_storage_access);
402COND_TRAP(do_non_secure_storage_access);
403COND_TRAP(do_secure_storage_violation);
1/*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/ptrace.h>
22#include <linux/timer.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/seq_file.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30#include <linux/kdebug.h>
31#include <linux/kallsyms.h>
32#include <linux/reboot.h>
33#include <linux/kprobes.h>
34#include <linux/bug.h>
35#include <linux/utsname.h>
36#include <asm/uaccess.h>
37#include <asm/io.h>
38#include <linux/atomic.h>
39#include <asm/mathemu.h>
40#include <asm/cpcmd.h>
41#include <asm/lowcore.h>
42#include <asm/debug.h>
43#include <asm/ipl.h>
44#include "entry.h"
45
46void (*pgm_check_table[128])(struct pt_regs *regs);
47
48int show_unhandled_signals = 1;
49
50#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
51
52#ifndef CONFIG_64BIT
53#define LONG "%08lx "
54#define FOURLONG "%08lx %08lx %08lx %08lx\n"
55static int kstack_depth_to_print = 12;
56#else /* CONFIG_64BIT */
57#define LONG "%016lx "
58#define FOURLONG "%016lx %016lx %016lx %016lx\n"
59static int kstack_depth_to_print = 20;
60#endif /* CONFIG_64BIT */
61
62/*
63 * For show_trace we have tree different stack to consider:
64 * - the panic stack which is used if the kernel stack has overflown
65 * - the asynchronous interrupt stack (cpu related)
66 * - the synchronous kernel stack (process related)
67 * The stack trace can start at any of the three stack and can potentially
68 * touch all of them. The order is: panic stack, async stack, sync stack.
69 */
70static unsigned long
71__show_trace(unsigned long sp, unsigned long low, unsigned long high)
72{
73 struct stack_frame *sf;
74 struct pt_regs *regs;
75
76 while (1) {
77 sp = sp & PSW_ADDR_INSN;
78 if (sp < low || sp > high - sizeof(*sf))
79 return sp;
80 sf = (struct stack_frame *) sp;
81 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
82 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
83 /* Follow the backchain. */
84 while (1) {
85 low = sp;
86 sp = sf->back_chain & PSW_ADDR_INSN;
87 if (!sp)
88 break;
89 if (sp <= low || sp > high - sizeof(*sf))
90 return sp;
91 sf = (struct stack_frame *) sp;
92 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
93 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
94 }
95 /* Zero backchain detected, check for interrupt frame. */
96 sp = (unsigned long) (sf + 1);
97 if (sp <= low || sp > high - sizeof(*regs))
98 return sp;
99 regs = (struct pt_regs *) sp;
100 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
101 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
102 low = sp;
103 sp = regs->gprs[15];
104 }
105}
106
107static void show_trace(struct task_struct *task, unsigned long *stack)
108{
109 register unsigned long __r15 asm ("15");
110 unsigned long sp;
111
112 sp = (unsigned long) stack;
113 if (!sp)
114 sp = task ? task->thread.ksp : __r15;
115 printk("Call Trace:\n");
116#ifdef CONFIG_CHECK_STACK
117 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
118 S390_lowcore.panic_stack);
119#endif
120 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
121 S390_lowcore.async_stack);
122 if (task)
123 __show_trace(sp, (unsigned long) task_stack_page(task),
124 (unsigned long) task_stack_page(task) + THREAD_SIZE);
125 else
126 __show_trace(sp, S390_lowcore.thread_info,
127 S390_lowcore.thread_info + THREAD_SIZE);
128 if (!task)
129 task = current;
130 debug_show_held_locks(task);
131}
132
133void show_stack(struct task_struct *task, unsigned long *sp)
134{
135 register unsigned long * __r15 asm ("15");
136 unsigned long *stack;
137 int i;
138
139 if (!sp)
140 stack = task ? (unsigned long *) task->thread.ksp : __r15;
141 else
142 stack = sp;
143
144 for (i = 0; i < kstack_depth_to_print; i++) {
145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
146 break;
147 if ((i * sizeof(long) % 32) == 0)
148 printk("%s ", i == 0 ? "" : "\n");
149 printk(LONG, *stack++);
150 }
151 printk("\n");
152 show_trace(task, sp);
153}
154
155static void show_last_breaking_event(struct pt_regs *regs)
156{
157#ifdef CONFIG_64BIT
158 printk("Last Breaking-Event-Address:\n");
159 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
160 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
161#endif
162}
163
164/*
165 * The architecture-independent dump_stack generator
166 */
167void dump_stack(void)
168{
169 printk("CPU: %d %s %s %.*s\n",
170 task_thread_info(current)->cpu, print_tainted(),
171 init_utsname()->release,
172 (int)strcspn(init_utsname()->version, " "),
173 init_utsname()->version);
174 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
175 current->comm, current->pid, current,
176 (void *) current->thread.ksp);
177 show_stack(NULL, NULL);
178}
179EXPORT_SYMBOL(dump_stack);
180
181static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
182{
183 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
184}
185
186void show_registers(struct pt_regs *regs)
187{
188 char *mode;
189
190 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
191 printk("%s PSW : %p %p",
192 mode, (void *) regs->psw.mask,
193 (void *) regs->psw.addr);
194 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
195 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
196 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
197 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
198 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
199 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
202#ifdef CONFIG_64BIT
203 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
204#endif
205 printk("\n%s GPRS: " FOURLONG, mode,
206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
207 printk(" " FOURLONG,
208 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
209 printk(" " FOURLONG,
210 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
211 printk(" " FOURLONG,
212 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
213
214 show_code(regs);
215}
216
217void show_regs(struct pt_regs *regs)
218{
219 print_modules();
220 printk("CPU: %d %s %s %.*s\n",
221 task_thread_info(current)->cpu, print_tainted(),
222 init_utsname()->release,
223 (int)strcspn(init_utsname()->version, " "),
224 init_utsname()->version);
225 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
226 current->comm, current->pid, current,
227 (void *) current->thread.ksp);
228 show_registers(regs);
229 /* Show stack backtrace if pt_regs is from kernel mode */
230 if (!(regs->psw.mask & PSW_MASK_PSTATE))
231 show_trace(NULL, (unsigned long *) regs->gprs[15]);
232 show_last_breaking_event(regs);
233}
234
235static DEFINE_SPINLOCK(die_lock);
236
237void die(struct pt_regs *regs, const char *str)
238{
239 static int die_counter;
240
241 oops_enter();
242 lgr_info_log();
243 debug_stop_all();
244 console_verbose();
245 spin_lock_irq(&die_lock);
246 bust_spinlocks(1);
247 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
248#ifdef CONFIG_PREEMPT
249 printk("PREEMPT ");
250#endif
251#ifdef CONFIG_SMP
252 printk("SMP ");
253#endif
254#ifdef CONFIG_DEBUG_PAGEALLOC
255 printk("DEBUG_PAGEALLOC");
256#endif
257 printk("\n");
258 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
259 show_regs(regs);
260 bust_spinlocks(0);
261 add_taint(TAINT_DIE);
262 spin_unlock_irq(&die_lock);
263 if (in_interrupt())
264 panic("Fatal exception in interrupt");
265 if (panic_on_oops)
266 panic("Fatal exception: panic_on_oops");
267 oops_exit();
268 do_exit(SIGSEGV);
269}
270
271static inline void report_user_fault(struct pt_regs *regs, int signr)
272{
273 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
274 return;
275 if (!unhandled_signal(current, signr))
276 return;
277 if (!printk_ratelimit())
278 return;
279 printk("User process fault: interruption code 0x%X ", regs->int_code);
280 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
281 printk("\n");
282 show_regs(regs);
283}
284
285int is_valid_bugaddr(unsigned long addr)
286{
287 return 1;
288}
289
290static inline void __user *get_psw_address(struct pt_regs *regs)
291{
292 return (void __user *)
293 ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN);
294}
295
296static void __kprobes do_trap(struct pt_regs *regs,
297 int si_signo, int si_code, char *str)
298{
299 siginfo_t info;
300
301 if (notify_die(DIE_TRAP, str, regs, 0,
302 regs->int_code, si_signo) == NOTIFY_STOP)
303 return;
304
305 if (regs->psw.mask & PSW_MASK_PSTATE) {
306 info.si_signo = si_signo;
307 info.si_errno = 0;
308 info.si_code = si_code;
309 info.si_addr = get_psw_address(regs);
310 force_sig_info(si_signo, &info, current);
311 report_user_fault(regs, si_signo);
312 } else {
313 const struct exception_table_entry *fixup;
314 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
315 if (fixup)
316 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
317 else {
318 enum bug_trap_type btt;
319
320 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
321 if (btt == BUG_TRAP_TYPE_WARN)
322 return;
323 die(regs, str);
324 }
325 }
326}
327
328void __kprobes do_per_trap(struct pt_regs *regs)
329{
330 siginfo_t info;
331
332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
333 return;
334 if (!current->ptrace)
335 return;
336 info.si_signo = SIGTRAP;
337 info.si_errno = 0;
338 info.si_code = TRAP_HWBKPT;
339 info.si_addr =
340 (void __force __user *) current->thread.per_event.address;
341 force_sig_info(SIGTRAP, &info, current);
342}
343
344static void default_trap_handler(struct pt_regs *regs)
345{
346 if (regs->psw.mask & PSW_MASK_PSTATE) {
347 report_user_fault(regs, SIGSEGV);
348 do_exit(SIGSEGV);
349 } else
350 die(regs, "Unknown program exception");
351}
352
353#define DO_ERROR_INFO(name, signr, sicode, str) \
354static void name(struct pt_regs *regs) \
355{ \
356 do_trap(regs, signr, sicode, str); \
357}
358
359DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
360 "addressing exception")
361DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
362 "execute exception")
363DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
364 "fixpoint divide exception")
365DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
366 "fixpoint overflow exception")
367DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
368 "HFP overflow exception")
369DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
370 "HFP underflow exception")
371DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
372 "HFP significance exception")
373DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
374 "HFP divide exception")
375DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
376 "HFP square root exception")
377DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
378 "operand exception")
379DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
380 "privileged operation")
381DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
382 "special operation exception")
383DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
384 "translation exception")
385
386static inline void do_fp_trap(struct pt_regs *regs, int fpc)
387{
388 int si_code = 0;
389 /* FPC[2] is Data Exception Code */
390 if ((fpc & 0x00000300) == 0) {
391 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
392 if (fpc & 0x8000) /* invalid fp operation */
393 si_code = FPE_FLTINV;
394 else if (fpc & 0x4000) /* div by 0 */
395 si_code = FPE_FLTDIV;
396 else if (fpc & 0x2000) /* overflow */
397 si_code = FPE_FLTOVF;
398 else if (fpc & 0x1000) /* underflow */
399 si_code = FPE_FLTUND;
400 else if (fpc & 0x0800) /* inexact */
401 si_code = FPE_FLTRES;
402 }
403 do_trap(regs, SIGFPE, si_code, "floating point exception");
404}
405
406static void __kprobes illegal_op(struct pt_regs *regs)
407{
408 siginfo_t info;
409 __u8 opcode[6];
410 __u16 __user *location;
411 int signal = 0;
412
413 location = get_psw_address(regs);
414
415 if (regs->psw.mask & PSW_MASK_PSTATE) {
416 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
417 return;
418 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
419 if (current->ptrace) {
420 info.si_signo = SIGTRAP;
421 info.si_errno = 0;
422 info.si_code = TRAP_BRKPT;
423 info.si_addr = location;
424 force_sig_info(SIGTRAP, &info, current);
425 } else
426 signal = SIGILL;
427#ifdef CONFIG_MATHEMU
428 } else if (opcode[0] == 0xb3) {
429 if (get_user(*((__u16 *) (opcode+2)), location+1))
430 return;
431 signal = math_emu_b3(opcode, regs);
432 } else if (opcode[0] == 0xed) {
433 if (get_user(*((__u32 *) (opcode+2)),
434 (__u32 __user *)(location+1)))
435 return;
436 signal = math_emu_ed(opcode, regs);
437 } else if (*((__u16 *) opcode) == 0xb299) {
438 if (get_user(*((__u16 *) (opcode+2)), location+1))
439 return;
440 signal = math_emu_srnm(opcode, regs);
441 } else if (*((__u16 *) opcode) == 0xb29c) {
442 if (get_user(*((__u16 *) (opcode+2)), location+1))
443 return;
444 signal = math_emu_stfpc(opcode, regs);
445 } else if (*((__u16 *) opcode) == 0xb29d) {
446 if (get_user(*((__u16 *) (opcode+2)), location+1))
447 return;
448 signal = math_emu_lfpc(opcode, regs);
449#endif
450 } else
451 signal = SIGILL;
452 } else {
453 /*
454 * If we get an illegal op in kernel mode, send it through the
455 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
456 */
457 if (notify_die(DIE_BPT, "bpt", regs, 0,
458 3, SIGTRAP) != NOTIFY_STOP)
459 signal = SIGILL;
460 }
461
462#ifdef CONFIG_MATHEMU
463 if (signal == SIGFPE)
464 do_fp_trap(regs, current->thread.fp_regs.fpc);
465 else if (signal == SIGSEGV)
466 do_trap(regs, signal, SEGV_MAPERR, "user address fault");
467 else
468#endif
469 if (signal)
470 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
471}
472
473
474#ifdef CONFIG_MATHEMU
475void specification_exception(struct pt_regs *regs)
476{
477 __u8 opcode[6];
478 __u16 __user *location = NULL;
479 int signal = 0;
480
481 location = (__u16 __user *) get_psw_address(regs);
482
483 if (regs->psw.mask & PSW_MASK_PSTATE) {
484 get_user(*((__u16 *) opcode), location);
485 switch (opcode[0]) {
486 case 0x28: /* LDR Rx,Ry */
487 signal = math_emu_ldr(opcode);
488 break;
489 case 0x38: /* LER Rx,Ry */
490 signal = math_emu_ler(opcode);
491 break;
492 case 0x60: /* STD R,D(X,B) */
493 get_user(*((__u16 *) (opcode+2)), location+1);
494 signal = math_emu_std(opcode, regs);
495 break;
496 case 0x68: /* LD R,D(X,B) */
497 get_user(*((__u16 *) (opcode+2)), location+1);
498 signal = math_emu_ld(opcode, regs);
499 break;
500 case 0x70: /* STE R,D(X,B) */
501 get_user(*((__u16 *) (opcode+2)), location+1);
502 signal = math_emu_ste(opcode, regs);
503 break;
504 case 0x78: /* LE R,D(X,B) */
505 get_user(*((__u16 *) (opcode+2)), location+1);
506 signal = math_emu_le(opcode, regs);
507 break;
508 default:
509 signal = SIGILL;
510 break;
511 }
512 } else
513 signal = SIGILL;
514
515 if (signal == SIGFPE)
516 do_fp_trap(regs, current->thread.fp_regs.fpc);
517 else if (signal)
518 do_trap(regs, signal, ILL_ILLOPN, "specification exception");
519}
520#else
521DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
522 "specification exception");
523#endif
524
525static void data_exception(struct pt_regs *regs)
526{
527 __u16 __user *location;
528 int signal = 0;
529
530 location = get_psw_address(regs);
531
532 if (MACHINE_HAS_IEEE)
533 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
534
535#ifdef CONFIG_MATHEMU
536 else if (regs->psw.mask & PSW_MASK_PSTATE) {
537 __u8 opcode[6];
538 get_user(*((__u16 *) opcode), location);
539 switch (opcode[0]) {
540 case 0x28: /* LDR Rx,Ry */
541 signal = math_emu_ldr(opcode);
542 break;
543 case 0x38: /* LER Rx,Ry */
544 signal = math_emu_ler(opcode);
545 break;
546 case 0x60: /* STD R,D(X,B) */
547 get_user(*((__u16 *) (opcode+2)), location+1);
548 signal = math_emu_std(opcode, regs);
549 break;
550 case 0x68: /* LD R,D(X,B) */
551 get_user(*((__u16 *) (opcode+2)), location+1);
552 signal = math_emu_ld(opcode, regs);
553 break;
554 case 0x70: /* STE R,D(X,B) */
555 get_user(*((__u16 *) (opcode+2)), location+1);
556 signal = math_emu_ste(opcode, regs);
557 break;
558 case 0x78: /* LE R,D(X,B) */
559 get_user(*((__u16 *) (opcode+2)), location+1);
560 signal = math_emu_le(opcode, regs);
561 break;
562 case 0xb3:
563 get_user(*((__u16 *) (opcode+2)), location+1);
564 signal = math_emu_b3(opcode, regs);
565 break;
566 case 0xed:
567 get_user(*((__u32 *) (opcode+2)),
568 (__u32 __user *)(location+1));
569 signal = math_emu_ed(opcode, regs);
570 break;
571 case 0xb2:
572 if (opcode[1] == 0x99) {
573 get_user(*((__u16 *) (opcode+2)), location+1);
574 signal = math_emu_srnm(opcode, regs);
575 } else if (opcode[1] == 0x9c) {
576 get_user(*((__u16 *) (opcode+2)), location+1);
577 signal = math_emu_stfpc(opcode, regs);
578 } else if (opcode[1] == 0x9d) {
579 get_user(*((__u16 *) (opcode+2)), location+1);
580 signal = math_emu_lfpc(opcode, regs);
581 } else
582 signal = SIGILL;
583 break;
584 default:
585 signal = SIGILL;
586 break;
587 }
588 }
589#endif
590 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
591 signal = SIGFPE;
592 else
593 signal = SIGILL;
594 if (signal == SIGFPE)
595 do_fp_trap(regs, current->thread.fp_regs.fpc);
596 else if (signal)
597 do_trap(regs, signal, ILL_ILLOPN, "data exception");
598}
599
600static void space_switch_exception(struct pt_regs *regs)
601{
602 /* Set user psw back to home space mode. */
603 if (regs->psw.mask & PSW_MASK_PSTATE)
604 regs->psw.mask |= PSW_ASC_HOME;
605 /* Send SIGILL. */
606 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
607}
608
609void __kprobes kernel_stack_overflow(struct pt_regs * regs)
610{
611 bust_spinlocks(1);
612 printk("Kernel stack overflow.\n");
613 show_regs(regs);
614 bust_spinlocks(0);
615 panic("Corrupt kernel stack, can't continue.");
616}
617
618/* init is done in lowcore.S and head.S */
619
620void __init trap_init(void)
621{
622 int i;
623
624 for (i = 0; i < 128; i++)
625 pgm_check_table[i] = &default_trap_handler;
626 pgm_check_table[1] = &illegal_op;
627 pgm_check_table[2] = &privileged_op;
628 pgm_check_table[3] = &execute_exception;
629 pgm_check_table[4] = &do_protection_exception;
630 pgm_check_table[5] = &addressing_exception;
631 pgm_check_table[6] = &specification_exception;
632 pgm_check_table[7] = &data_exception;
633 pgm_check_table[8] = &overflow_exception;
634 pgm_check_table[9] = ÷_exception;
635 pgm_check_table[0x0A] = &overflow_exception;
636 pgm_check_table[0x0B] = ÷_exception;
637 pgm_check_table[0x0C] = &hfp_overflow_exception;
638 pgm_check_table[0x0D] = &hfp_underflow_exception;
639 pgm_check_table[0x0E] = &hfp_significance_exception;
640 pgm_check_table[0x0F] = &hfp_divide_exception;
641 pgm_check_table[0x10] = &do_dat_exception;
642 pgm_check_table[0x11] = &do_dat_exception;
643 pgm_check_table[0x12] = &translation_exception;
644 pgm_check_table[0x13] = &special_op_exception;
645#ifdef CONFIG_64BIT
646 pgm_check_table[0x38] = &do_asce_exception;
647 pgm_check_table[0x39] = &do_dat_exception;
648 pgm_check_table[0x3A] = &do_dat_exception;
649 pgm_check_table[0x3B] = &do_dat_exception;
650#endif /* CONFIG_64BIT */
651 pgm_check_table[0x15] = &operand_exception;
652 pgm_check_table[0x1C] = &space_switch_exception;
653 pgm_check_table[0x1D] = &hfp_sqrt_exception;
654 /* Enable machine checks early. */
655 local_mcck_enable();
656}