Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7 *
8 * Derived from "arch/i386/kernel/traps.c"
9 * Copyright (C) 1991, 1992 Linus Torvalds
10 */
11
12/*
13 * 'Traps.c' handles hardware traps and faults after we have saved some
14 * state in 'asm.s'.
15 */
16#include "asm/irqflags.h"
17#include "asm/ptrace.h"
18#include <linux/kprobes.h>
19#include <linux/kdebug.h>
20#include <linux/randomize_kstack.h>
21#include <linux/extable.h>
22#include <linux/ptrace.h>
23#include <linux/sched.h>
24#include <linux/sched/debug.h>
25#include <linux/mm.h>
26#include <linux/slab.h>
27#include <linux/uaccess.h>
28#include <linux/cpu.h>
29#include <linux/entry-common.h>
30#include <linux/kmsan.h>
31#include <asm/asm-extable.h>
32#include <asm/vtime.h>
33#include <asm/fpu.h>
34#include <asm/fault.h>
35#include "entry.h"
36
37static inline void __user *get_trap_ip(struct pt_regs *regs)
38{
39 unsigned long address;
40
41 if (regs->int_code & 0x200)
42 address = current->thread.trap_tdb.data[3];
43 else
44 address = regs->psw.addr;
45 return (void __user *) (address - (regs->int_code >> 16));
46}
47
48#ifdef CONFIG_GENERIC_BUG
49int is_valid_bugaddr(unsigned long addr)
50{
51 return 1;
52}
53#endif
54
55void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
56{
57 if (user_mode(regs)) {
58 force_sig_fault(si_signo, si_code, get_trap_ip(regs));
59 report_user_fault(regs, si_signo, 0);
60 } else {
61 if (!fixup_exception(regs))
62 die(regs, str);
63 }
64}
65
66static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
67{
68 if (notify_die(DIE_TRAP, str, regs, 0,
69 regs->int_code, si_signo) == NOTIFY_STOP)
70 return;
71 do_report_trap(regs, si_signo, si_code, str);
72}
73NOKPROBE_SYMBOL(do_trap);
74
75void do_per_trap(struct pt_regs *regs)
76{
77 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
78 return;
79 if (!current->ptrace)
80 return;
81 force_sig_fault(SIGTRAP, TRAP_HWBKPT,
82 (void __force __user *) current->thread.per_event.address);
83}
84NOKPROBE_SYMBOL(do_per_trap);
85
86static void default_trap_handler(struct pt_regs *regs)
87{
88 if (user_mode(regs)) {
89 report_user_fault(regs, SIGSEGV, 0);
90 force_exit_sig(SIGSEGV);
91 } else
92 die(regs, "Unknown program exception");
93}
94
95#define DO_ERROR_INFO(name, signr, sicode, str) \
96static void name(struct pt_regs *regs) \
97{ \
98 do_trap(regs, signr, sicode, str); \
99}
100
101DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
102 "addressing exception")
103DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
104 "execute exception")
105DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
106 "fixpoint divide exception")
107DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
108 "fixpoint overflow exception")
109DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
110 "HFP overflow exception")
111DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
112 "HFP underflow exception")
113DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
114 "HFP significance exception")
115DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
116 "HFP divide exception")
117DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
118 "HFP square root exception")
119DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
120 "operand exception")
121DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
122 "privileged operation")
123DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
124 "special operation exception")
125DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
126 "transaction constraint exception")
127
128static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
129{
130 int si_code = 0;
131 /* FPC[2] is Data Exception Code */
132 if ((fpc & 0x00000300) == 0) {
133 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
134 if (fpc & 0x8000) /* invalid fp operation */
135 si_code = FPE_FLTINV;
136 else if (fpc & 0x4000) /* div by 0 */
137 si_code = FPE_FLTDIV;
138 else if (fpc & 0x2000) /* overflow */
139 si_code = FPE_FLTOVF;
140 else if (fpc & 0x1000) /* underflow */
141 si_code = FPE_FLTUND;
142 else if (fpc & 0x0800) /* inexact */
143 si_code = FPE_FLTRES;
144 }
145 do_trap(regs, SIGFPE, si_code, "floating point exception");
146}
147
148static void translation_specification_exception(struct pt_regs *regs)
149{
150 /* May never happen. */
151 panic("Translation-Specification Exception");
152}
153
154static void illegal_op(struct pt_regs *regs)
155{
156 __u8 opcode[6];
157 __u16 __user *location;
158 int is_uprobe_insn = 0;
159 int signal = 0;
160
161 location = get_trap_ip(regs);
162
163 if (user_mode(regs)) {
164 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
165 return;
166 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
167 if (current->ptrace)
168 force_sig_fault(SIGTRAP, TRAP_BRKPT, location);
169 else
170 signal = SIGILL;
171#ifdef CONFIG_UPROBES
172 } else if (*((__u16 *) opcode) == UPROBE_SWBP_INSN) {
173 is_uprobe_insn = 1;
174#endif
175 } else
176 signal = SIGILL;
177 }
178 /*
179 * We got either an illegal op in kernel mode, or user space trapped
180 * on a uprobes illegal instruction. See if kprobes or uprobes picks
181 * it up. If not, SIGILL.
182 */
183 if (is_uprobe_insn || !user_mode(regs)) {
184 if (notify_die(DIE_BPT, "bpt", regs, 0,
185 3, SIGTRAP) != NOTIFY_STOP)
186 signal = SIGILL;
187 }
188 if (signal)
189 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
190}
191NOKPROBE_SYMBOL(illegal_op);
192
193DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
194 "specification exception");
195
196static void vector_exception(struct pt_regs *regs)
197{
198 int si_code, vic;
199
200 if (!cpu_has_vx()) {
201 do_trap(regs, SIGILL, ILL_ILLOPN, "illegal operation");
202 return;
203 }
204
205 /* get vector interrupt code from fpc */
206 save_user_fpu_regs();
207 vic = (current->thread.ufpu.fpc & 0xf00) >> 8;
208 switch (vic) {
209 case 1: /* invalid vector operation */
210 si_code = FPE_FLTINV;
211 break;
212 case 2: /* division by zero */
213 si_code = FPE_FLTDIV;
214 break;
215 case 3: /* overflow */
216 si_code = FPE_FLTOVF;
217 break;
218 case 4: /* underflow */
219 si_code = FPE_FLTUND;
220 break;
221 case 5: /* inexact */
222 si_code = FPE_FLTRES;
223 break;
224 default: /* unknown cause */
225 si_code = 0;
226 }
227 do_trap(regs, SIGFPE, si_code, "vector exception");
228}
229
230static void data_exception(struct pt_regs *regs)
231{
232 save_user_fpu_regs();
233 if (current->thread.ufpu.fpc & FPC_DXC_MASK)
234 do_fp_trap(regs, current->thread.ufpu.fpc);
235 else
236 do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
237}
238
239static void space_switch_exception(struct pt_regs *regs)
240{
241 /* Set user psw back to home space mode. */
242 if (user_mode(regs))
243 regs->psw.mask |= PSW_ASC_HOME;
244 /* Send SIGILL. */
245 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
246}
247
248static void monitor_event_exception(struct pt_regs *regs)
249{
250 if (user_mode(regs))
251 return;
252
253 switch (report_bug(regs->psw.addr - (regs->int_code >> 16), regs)) {
254 case BUG_TRAP_TYPE_NONE:
255 fixup_exception(regs);
256 break;
257 case BUG_TRAP_TYPE_WARN:
258 break;
259 case BUG_TRAP_TYPE_BUG:
260 die(regs, "monitor event");
261 break;
262 }
263}
264
265void kernel_stack_overflow(struct pt_regs *regs)
266{
267 /*
268 * Normally regs are unpoisoned by the generic entry code, but
269 * kernel_stack_overflow() is a rare case that is called bypassing it.
270 */
271 kmsan_unpoison_entry_regs(regs);
272 bust_spinlocks(1);
273 printk("Kernel stack overflow.\n");
274 show_regs(regs);
275 bust_spinlocks(0);
276 panic("Corrupt kernel stack, can't continue.");
277}
278NOKPROBE_SYMBOL(kernel_stack_overflow);
279
280static void __init test_monitor_call(void)
281{
282 int val = 1;
283
284 if (!IS_ENABLED(CONFIG_BUG))
285 return;
286 asm volatile(
287 " mc 0,0\n"
288 "0: lhi %[val],0\n"
289 "1:\n"
290 EX_TABLE(0b, 1b)
291 : [val] "+d" (val));
292 if (!val)
293 panic("Monitor call doesn't work!\n");
294}
295
296void __init trap_init(void)
297{
298 struct lowcore *lc = get_lowcore();
299 unsigned long flags;
300 struct ctlreg cr0;
301
302 local_irq_save(flags);
303 cr0 = local_ctl_clear_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
304 psw_bits(lc->external_new_psw).mcheck = 1;
305 psw_bits(lc->program_new_psw).mcheck = 1;
306 psw_bits(lc->svc_new_psw).mcheck = 1;
307 psw_bits(lc->io_new_psw).mcheck = 1;
308 local_ctl_load(0, &cr0);
309 local_irq_restore(flags);
310 local_mcck_enable();
311 test_monitor_call();
312}
313
314static void (*pgm_check_table[128])(struct pt_regs *regs);
315
316void noinstr __do_pgm_check(struct pt_regs *regs)
317{
318 struct lowcore *lc = get_lowcore();
319 irqentry_state_t state;
320 unsigned int trapnr;
321 union teid teid;
322
323 teid.val = lc->trans_exc_code;
324 regs->int_code = lc->pgm_int_code;
325 regs->int_parm_long = teid.val;
326
327 /*
328 * In case of a guest fault, short-circuit the fault handler and return.
329 * This way the sie64a() function will return 0; fault address and
330 * other relevant bits are saved in current->thread.gmap_teid, and
331 * the fault number in current->thread.gmap_int_code. KVM will be
332 * able to use this information to handle the fault.
333 */
334 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT)) {
335 current->thread.gmap_teid.val = regs->int_parm_long;
336 current->thread.gmap_int_code = regs->int_code & 0xffff;
337 return;
338 }
339
340 state = irqentry_enter(regs);
341
342 if (user_mode(regs)) {
343 update_timer_sys();
344 if (!static_branch_likely(&cpu_has_bear)) {
345 if (regs->last_break < 4096)
346 regs->last_break = 1;
347 }
348 current->thread.last_break = regs->last_break;
349 }
350
351 if (lc->pgm_code & 0x0200) {
352 /* transaction abort */
353 current->thread.trap_tdb = lc->pgm_tdb;
354 }
355
356 if (lc->pgm_code & PGM_INT_CODE_PER) {
357 if (user_mode(regs)) {
358 struct per_event *ev = ¤t->thread.per_event;
359
360 set_thread_flag(TIF_PER_TRAP);
361 ev->address = lc->per_address;
362 ev->cause = lc->per_code_combined;
363 ev->paid = lc->per_access_id;
364 } else {
365 /* PER event in kernel is kprobes */
366 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
367 do_per_trap(regs);
368 goto out;
369 }
370 }
371
372 if (!irqs_disabled_flags(regs->psw.mask))
373 trace_hardirqs_on();
374 __arch_local_irq_ssm(regs->psw.mask & ~PSW_MASK_PER);
375
376 trapnr = regs->int_code & PGM_INT_CODE_MASK;
377 if (trapnr)
378 pgm_check_table[trapnr](regs);
379out:
380 local_irq_disable();
381 irqentry_exit(regs, state);
382}
383
384/*
385 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
386 * line defines the function to be called corresponding to the program check
387 * interruption code.
388 */
389static void (*pgm_check_table[128])(struct pt_regs *regs) = {
390 [0x00] = default_trap_handler,
391 [0x01] = illegal_op,
392 [0x02] = privileged_op,
393 [0x03] = execute_exception,
394 [0x04] = do_protection_exception,
395 [0x05] = addressing_exception,
396 [0x06] = specification_exception,
397 [0x07] = data_exception,
398 [0x08] = overflow_exception,
399 [0x09] = divide_exception,
400 [0x0a] = overflow_exception,
401 [0x0b] = divide_exception,
402 [0x0c] = hfp_overflow_exception,
403 [0x0d] = hfp_underflow_exception,
404 [0x0e] = hfp_significance_exception,
405 [0x0f] = hfp_divide_exception,
406 [0x10] = do_dat_exception,
407 [0x11] = do_dat_exception,
408 [0x12] = translation_specification_exception,
409 [0x13] = special_op_exception,
410 [0x14] = default_trap_handler,
411 [0x15] = operand_exception,
412 [0x16] = default_trap_handler,
413 [0x17] = default_trap_handler,
414 [0x18] = transaction_exception,
415 [0x19] = default_trap_handler,
416 [0x1a] = default_trap_handler,
417 [0x1b] = vector_exception,
418 [0x1c] = space_switch_exception,
419 [0x1d] = hfp_sqrt_exception,
420 [0x1e ... 0x37] = default_trap_handler,
421 [0x38] = do_dat_exception,
422 [0x39] = do_dat_exception,
423 [0x3a] = do_dat_exception,
424 [0x3b] = do_dat_exception,
425 [0x3c] = default_trap_handler,
426 [0x3d] = do_secure_storage_access,
427 [0x3e] = default_trap_handler,
428 [0x3f] = default_trap_handler,
429 [0x40] = monitor_event_exception,
430 [0x41 ... 0x7f] = default_trap_handler,
431};
432
433#define COND_TRAP(x) asm( \
434 ".weak " __stringify(x) "\n\t" \
435 ".set " __stringify(x) "," \
436 __stringify(default_trap_handler))
437
438COND_TRAP(do_secure_storage_access);
1/*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17#include <linux/sched.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/ptrace.h>
22#include <linux/timer.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/seq_file.h>
28#include <linux/delay.h>
29#include <linux/module.h>
30#include <linux/kdebug.h>
31#include <linux/kallsyms.h>
32#include <linux/reboot.h>
33#include <linux/kprobes.h>
34#include <linux/bug.h>
35#include <linux/utsname.h>
36#include <asm/system.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <linux/atomic.h>
40#include <asm/mathemu.h>
41#include <asm/cpcmd.h>
42#include <asm/lowcore.h>
43#include <asm/debug.h>
44#include "entry.h"
45
46void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
47
48int show_unhandled_signals;
49
50#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
51
52#ifndef CONFIG_64BIT
53#define LONG "%08lx "
54#define FOURLONG "%08lx %08lx %08lx %08lx\n"
55static int kstack_depth_to_print = 12;
56#else /* CONFIG_64BIT */
57#define LONG "%016lx "
58#define FOURLONG "%016lx %016lx %016lx %016lx\n"
59static int kstack_depth_to_print = 20;
60#endif /* CONFIG_64BIT */
61
62/*
63 * For show_trace we have tree different stack to consider:
64 * - the panic stack which is used if the kernel stack has overflown
65 * - the asynchronous interrupt stack (cpu related)
66 * - the synchronous kernel stack (process related)
67 * The stack trace can start at any of the three stack and can potentially
68 * touch all of them. The order is: panic stack, async stack, sync stack.
69 */
70static unsigned long
71__show_trace(unsigned long sp, unsigned long low, unsigned long high)
72{
73 struct stack_frame *sf;
74 struct pt_regs *regs;
75
76 while (1) {
77 sp = sp & PSW_ADDR_INSN;
78 if (sp < low || sp > high - sizeof(*sf))
79 return sp;
80 sf = (struct stack_frame *) sp;
81 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
82 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
83 /* Follow the backchain. */
84 while (1) {
85 low = sp;
86 sp = sf->back_chain & PSW_ADDR_INSN;
87 if (!sp)
88 break;
89 if (sp <= low || sp > high - sizeof(*sf))
90 return sp;
91 sf = (struct stack_frame *) sp;
92 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
93 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
94 }
95 /* Zero backchain detected, check for interrupt frame. */
96 sp = (unsigned long) (sf + 1);
97 if (sp <= low || sp > high - sizeof(*regs))
98 return sp;
99 regs = (struct pt_regs *) sp;
100 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
101 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
102 low = sp;
103 sp = regs->gprs[15];
104 }
105}
106
107static void show_trace(struct task_struct *task, unsigned long *stack)
108{
109 register unsigned long __r15 asm ("15");
110 unsigned long sp;
111
112 sp = (unsigned long) stack;
113 if (!sp)
114 sp = task ? task->thread.ksp : __r15;
115 printk("Call Trace:\n");
116#ifdef CONFIG_CHECK_STACK
117 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
118 S390_lowcore.panic_stack);
119#endif
120 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
121 S390_lowcore.async_stack);
122 if (task)
123 __show_trace(sp, (unsigned long) task_stack_page(task),
124 (unsigned long) task_stack_page(task) + THREAD_SIZE);
125 else
126 __show_trace(sp, S390_lowcore.thread_info,
127 S390_lowcore.thread_info + THREAD_SIZE);
128 if (!task)
129 task = current;
130 debug_show_held_locks(task);
131}
132
133void show_stack(struct task_struct *task, unsigned long *sp)
134{
135 register unsigned long * __r15 asm ("15");
136 unsigned long *stack;
137 int i;
138
139 if (!sp)
140 stack = task ? (unsigned long *) task->thread.ksp : __r15;
141 else
142 stack = sp;
143
144 for (i = 0; i < kstack_depth_to_print; i++) {
145 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
146 break;
147 if (i && ((i * sizeof (long) % 32) == 0))
148 printk("\n ");
149 printk(LONG, *stack++);
150 }
151 printk("\n");
152 show_trace(task, sp);
153}
154
155static void show_last_breaking_event(struct pt_regs *regs)
156{
157#ifdef CONFIG_64BIT
158 printk("Last Breaking-Event-Address:\n");
159 printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
160 print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
161#endif
162}
163
164/*
165 * The architecture-independent dump_stack generator
166 */
167void dump_stack(void)
168{
169 printk("CPU: %d %s %s %.*s\n",
170 task_thread_info(current)->cpu, print_tainted(),
171 init_utsname()->release,
172 (int)strcspn(init_utsname()->version, " "),
173 init_utsname()->version);
174 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
175 current->comm, current->pid, current,
176 (void *) current->thread.ksp);
177 show_stack(NULL, NULL);
178}
179EXPORT_SYMBOL(dump_stack);
180
181static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
182{
183 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
184}
185
186void show_registers(struct pt_regs *regs)
187{
188 char *mode;
189
190 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
191 printk("%s PSW : %p %p",
192 mode, (void *) regs->psw.mask,
193 (void *) regs->psw.addr);
194 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
195 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
196 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
197 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
198 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
199 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
200 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
201 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
202#ifdef CONFIG_64BIT
203 printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
204#endif
205 printk("\n%s GPRS: " FOURLONG, mode,
206 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
207 printk(" " FOURLONG,
208 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
209 printk(" " FOURLONG,
210 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
211 printk(" " FOURLONG,
212 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
213
214 show_code(regs);
215}
216
217void show_regs(struct pt_regs *regs)
218{
219 print_modules();
220 printk("CPU: %d %s %s %.*s\n",
221 task_thread_info(current)->cpu, print_tainted(),
222 init_utsname()->release,
223 (int)strcspn(init_utsname()->version, " "),
224 init_utsname()->version);
225 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
226 current->comm, current->pid, current,
227 (void *) current->thread.ksp);
228 show_registers(regs);
229 /* Show stack backtrace if pt_regs is from kernel mode */
230 if (!(regs->psw.mask & PSW_MASK_PSTATE))
231 show_trace(NULL, (unsigned long *) regs->gprs[15]);
232 show_last_breaking_event(regs);
233}
234
235static DEFINE_SPINLOCK(die_lock);
236
237void die(const char * str, struct pt_regs * regs, long err)
238{
239 static int die_counter;
240
241 oops_enter();
242 debug_stop_all();
243 console_verbose();
244 spin_lock_irq(&die_lock);
245 bust_spinlocks(1);
246 printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
247#ifdef CONFIG_PREEMPT
248 printk("PREEMPT ");
249#endif
250#ifdef CONFIG_SMP
251 printk("SMP ");
252#endif
253#ifdef CONFIG_DEBUG_PAGEALLOC
254 printk("DEBUG_PAGEALLOC");
255#endif
256 printk("\n");
257 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
258 show_regs(regs);
259 bust_spinlocks(0);
260 add_taint(TAINT_DIE);
261 spin_unlock_irq(&die_lock);
262 if (in_interrupt())
263 panic("Fatal exception in interrupt");
264 if (panic_on_oops)
265 panic("Fatal exception: panic_on_oops");
266 oops_exit();
267 do_exit(SIGSEGV);
268}
269
270static void inline report_user_fault(struct pt_regs *regs, long int_code,
271 int signr)
272{
273 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
274 return;
275 if (!unhandled_signal(current, signr))
276 return;
277 if (!printk_ratelimit())
278 return;
279 printk("User process fault: interruption code 0x%lX ", int_code);
280 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
281 printk("\n");
282 show_regs(regs);
283}
284
285int is_valid_bugaddr(unsigned long addr)
286{
287 return 1;
288}
289
290static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str,
291 struct pt_regs *regs, siginfo_t *info)
292{
293 if (notify_die(DIE_TRAP, str, regs, pgm_int_code,
294 pgm_int_code, signr) == NOTIFY_STOP)
295 return;
296
297 if (regs->psw.mask & PSW_MASK_PSTATE) {
298 struct task_struct *tsk = current;
299
300 tsk->thread.trap_no = pgm_int_code & 0xffff;
301 force_sig_info(signr, info, tsk);
302 report_user_fault(regs, pgm_int_code, signr);
303 } else {
304 const struct exception_table_entry *fixup;
305 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
306 if (fixup)
307 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
308 else {
309 enum bug_trap_type btt;
310
311 btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
312 if (btt == BUG_TRAP_TYPE_WARN)
313 return;
314 die(str, regs, pgm_int_code);
315 }
316 }
317}
318
319static inline void __user *get_psw_address(struct pt_regs *regs,
320 long pgm_int_code)
321{
322 return (void __user *)
323 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN);
324}
325
326void __kprobes do_per_trap(struct pt_regs *regs)
327{
328 siginfo_t info;
329
330 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
331 return;
332 if (!current->ptrace)
333 return;
334 info.si_signo = SIGTRAP;
335 info.si_errno = 0;
336 info.si_code = TRAP_HWBKPT;
337 info.si_addr = (void *) current->thread.per_event.address;
338 force_sig_info(SIGTRAP, &info, current);
339}
340
341static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
342 unsigned long trans_exc_code)
343{
344 if (regs->psw.mask & PSW_MASK_PSTATE) {
345 report_user_fault(regs, pgm_int_code, SIGSEGV);
346 do_exit(SIGSEGV);
347 } else
348 die("Unknown program exception", regs, pgm_int_code);
349}
350
351#define DO_ERROR_INFO(name, signr, sicode, str) \
352static void name(struct pt_regs *regs, long pgm_int_code, \
353 unsigned long trans_exc_code) \
354{ \
355 siginfo_t info; \
356 info.si_signo = signr; \
357 info.si_errno = 0; \
358 info.si_code = sicode; \
359 info.si_addr = get_psw_address(regs, pgm_int_code); \
360 do_trap(pgm_int_code, signr, str, regs, &info); \
361}
362
363DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
364 "addressing exception")
365DO_ERROR_INFO(execute_exception, SIGILL, ILL_ILLOPN,
366 "execute exception")
367DO_ERROR_INFO(divide_exception, SIGFPE, FPE_INTDIV,
368 "fixpoint divide exception")
369DO_ERROR_INFO(overflow_exception, SIGFPE, FPE_INTOVF,
370 "fixpoint overflow exception")
371DO_ERROR_INFO(hfp_overflow_exception, SIGFPE, FPE_FLTOVF,
372 "HFP overflow exception")
373DO_ERROR_INFO(hfp_underflow_exception, SIGFPE, FPE_FLTUND,
374 "HFP underflow exception")
375DO_ERROR_INFO(hfp_significance_exception, SIGFPE, FPE_FLTRES,
376 "HFP significance exception")
377DO_ERROR_INFO(hfp_divide_exception, SIGFPE, FPE_FLTDIV,
378 "HFP divide exception")
379DO_ERROR_INFO(hfp_sqrt_exception, SIGFPE, FPE_FLTINV,
380 "HFP square root exception")
381DO_ERROR_INFO(operand_exception, SIGILL, ILL_ILLOPN,
382 "operand exception")
383DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
384 "privileged operation")
385DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
386 "special operation exception")
387DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
388 "translation exception")
389
390static inline void do_fp_trap(struct pt_regs *regs, void __user *location,
391 int fpc, long pgm_int_code)
392{
393 siginfo_t si;
394
395 si.si_signo = SIGFPE;
396 si.si_errno = 0;
397 si.si_addr = location;
398 si.si_code = 0;
399 /* FPC[2] is Data Exception Code */
400 if ((fpc & 0x00000300) == 0) {
401 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
402 if (fpc & 0x8000) /* invalid fp operation */
403 si.si_code = FPE_FLTINV;
404 else if (fpc & 0x4000) /* div by 0 */
405 si.si_code = FPE_FLTDIV;
406 else if (fpc & 0x2000) /* overflow */
407 si.si_code = FPE_FLTOVF;
408 else if (fpc & 0x1000) /* underflow */
409 si.si_code = FPE_FLTUND;
410 else if (fpc & 0x0800) /* inexact */
411 si.si_code = FPE_FLTRES;
412 }
413 do_trap(pgm_int_code, SIGFPE,
414 "floating point exception", regs, &si);
415}
416
417static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
418 unsigned long trans_exc_code)
419{
420 siginfo_t info;
421 __u8 opcode[6];
422 __u16 __user *location;
423 int signal = 0;
424
425 location = get_psw_address(regs, pgm_int_code);
426
427 if (regs->psw.mask & PSW_MASK_PSTATE) {
428 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
429 return;
430 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
431 if (current->ptrace) {
432 info.si_signo = SIGTRAP;
433 info.si_errno = 0;
434 info.si_code = TRAP_BRKPT;
435 info.si_addr = location;
436 force_sig_info(SIGTRAP, &info, current);
437 } else
438 signal = SIGILL;
439#ifdef CONFIG_MATHEMU
440 } else if (opcode[0] == 0xb3) {
441 if (get_user(*((__u16 *) (opcode+2)), location+1))
442 return;
443 signal = math_emu_b3(opcode, regs);
444 } else if (opcode[0] == 0xed) {
445 if (get_user(*((__u32 *) (opcode+2)),
446 (__u32 __user *)(location+1)))
447 return;
448 signal = math_emu_ed(opcode, regs);
449 } else if (*((__u16 *) opcode) == 0xb299) {
450 if (get_user(*((__u16 *) (opcode+2)), location+1))
451 return;
452 signal = math_emu_srnm(opcode, regs);
453 } else if (*((__u16 *) opcode) == 0xb29c) {
454 if (get_user(*((__u16 *) (opcode+2)), location+1))
455 return;
456 signal = math_emu_stfpc(opcode, regs);
457 } else if (*((__u16 *) opcode) == 0xb29d) {
458 if (get_user(*((__u16 *) (opcode+2)), location+1))
459 return;
460 signal = math_emu_lfpc(opcode, regs);
461#endif
462 } else
463 signal = SIGILL;
464 } else {
465 /*
466 * If we get an illegal op in kernel mode, send it through the
467 * kprobes notifier. If kprobes doesn't pick it up, SIGILL
468 */
469 if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code,
470 3, SIGTRAP) != NOTIFY_STOP)
471 signal = SIGILL;
472 }
473
474#ifdef CONFIG_MATHEMU
475 if (signal == SIGFPE)
476 do_fp_trap(regs, location,
477 current->thread.fp_regs.fpc, pgm_int_code);
478 else if (signal == SIGSEGV) {
479 info.si_signo = signal;
480 info.si_errno = 0;
481 info.si_code = SEGV_MAPERR;
482 info.si_addr = (void __user *) location;
483 do_trap(pgm_int_code, signal,
484 "user address fault", regs, &info);
485 } else
486#endif
487 if (signal) {
488 info.si_signo = signal;
489 info.si_errno = 0;
490 info.si_code = ILL_ILLOPC;
491 info.si_addr = (void __user *) location;
492 do_trap(pgm_int_code, signal,
493 "illegal operation", regs, &info);
494 }
495}
496
497
498#ifdef CONFIG_MATHEMU
499void specification_exception(struct pt_regs *regs, long pgm_int_code,
500 unsigned long trans_exc_code)
501{
502 __u8 opcode[6];
503 __u16 __user *location = NULL;
504 int signal = 0;
505
506 location = (__u16 __user *) get_psw_address(regs, pgm_int_code);
507
508 if (regs->psw.mask & PSW_MASK_PSTATE) {
509 get_user(*((__u16 *) opcode), location);
510 switch (opcode[0]) {
511 case 0x28: /* LDR Rx,Ry */
512 signal = math_emu_ldr(opcode);
513 break;
514 case 0x38: /* LER Rx,Ry */
515 signal = math_emu_ler(opcode);
516 break;
517 case 0x60: /* STD R,D(X,B) */
518 get_user(*((__u16 *) (opcode+2)), location+1);
519 signal = math_emu_std(opcode, regs);
520 break;
521 case 0x68: /* LD R,D(X,B) */
522 get_user(*((__u16 *) (opcode+2)), location+1);
523 signal = math_emu_ld(opcode, regs);
524 break;
525 case 0x70: /* STE R,D(X,B) */
526 get_user(*((__u16 *) (opcode+2)), location+1);
527 signal = math_emu_ste(opcode, regs);
528 break;
529 case 0x78: /* LE R,D(X,B) */
530 get_user(*((__u16 *) (opcode+2)), location+1);
531 signal = math_emu_le(opcode, regs);
532 break;
533 default:
534 signal = SIGILL;
535 break;
536 }
537 } else
538 signal = SIGILL;
539
540 if (signal == SIGFPE)
541 do_fp_trap(regs, location,
542 current->thread.fp_regs.fpc, pgm_int_code);
543 else if (signal) {
544 siginfo_t info;
545 info.si_signo = signal;
546 info.si_errno = 0;
547 info.si_code = ILL_ILLOPN;
548 info.si_addr = location;
549 do_trap(pgm_int_code, signal,
550 "specification exception", regs, &info);
551 }
552}
553#else
554DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
555 "specification exception");
556#endif
557
558static void data_exception(struct pt_regs *regs, long pgm_int_code,
559 unsigned long trans_exc_code)
560{
561 __u16 __user *location;
562 int signal = 0;
563
564 location = get_psw_address(regs, pgm_int_code);
565
566 if (MACHINE_HAS_IEEE)
567 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
568
569#ifdef CONFIG_MATHEMU
570 else if (regs->psw.mask & PSW_MASK_PSTATE) {
571 __u8 opcode[6];
572 get_user(*((__u16 *) opcode), location);
573 switch (opcode[0]) {
574 case 0x28: /* LDR Rx,Ry */
575 signal = math_emu_ldr(opcode);
576 break;
577 case 0x38: /* LER Rx,Ry */
578 signal = math_emu_ler(opcode);
579 break;
580 case 0x60: /* STD R,D(X,B) */
581 get_user(*((__u16 *) (opcode+2)), location+1);
582 signal = math_emu_std(opcode, regs);
583 break;
584 case 0x68: /* LD R,D(X,B) */
585 get_user(*((__u16 *) (opcode+2)), location+1);
586 signal = math_emu_ld(opcode, regs);
587 break;
588 case 0x70: /* STE R,D(X,B) */
589 get_user(*((__u16 *) (opcode+2)), location+1);
590 signal = math_emu_ste(opcode, regs);
591 break;
592 case 0x78: /* LE R,D(X,B) */
593 get_user(*((__u16 *) (opcode+2)), location+1);
594 signal = math_emu_le(opcode, regs);
595 break;
596 case 0xb3:
597 get_user(*((__u16 *) (opcode+2)), location+1);
598 signal = math_emu_b3(opcode, regs);
599 break;
600 case 0xed:
601 get_user(*((__u32 *) (opcode+2)),
602 (__u32 __user *)(location+1));
603 signal = math_emu_ed(opcode, regs);
604 break;
605 case 0xb2:
606 if (opcode[1] == 0x99) {
607 get_user(*((__u16 *) (opcode+2)), location+1);
608 signal = math_emu_srnm(opcode, regs);
609 } else if (opcode[1] == 0x9c) {
610 get_user(*((__u16 *) (opcode+2)), location+1);
611 signal = math_emu_stfpc(opcode, regs);
612 } else if (opcode[1] == 0x9d) {
613 get_user(*((__u16 *) (opcode+2)), location+1);
614 signal = math_emu_lfpc(opcode, regs);
615 } else
616 signal = SIGILL;
617 break;
618 default:
619 signal = SIGILL;
620 break;
621 }
622 }
623#endif
624 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
625 signal = SIGFPE;
626 else
627 signal = SIGILL;
628 if (signal == SIGFPE)
629 do_fp_trap(regs, location,
630 current->thread.fp_regs.fpc, pgm_int_code);
631 else if (signal) {
632 siginfo_t info;
633 info.si_signo = signal;
634 info.si_errno = 0;
635 info.si_code = ILL_ILLOPN;
636 info.si_addr = location;
637 do_trap(pgm_int_code, signal, "data exception", regs, &info);
638 }
639}
640
641static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
642 unsigned long trans_exc_code)
643{
644 siginfo_t info;
645
646 /* Set user psw back to home space mode. */
647 if (regs->psw.mask & PSW_MASK_PSTATE)
648 regs->psw.mask |= PSW_ASC_HOME;
649 /* Send SIGILL. */
650 info.si_signo = SIGILL;
651 info.si_errno = 0;
652 info.si_code = ILL_PRVOPC;
653 info.si_addr = get_psw_address(regs, pgm_int_code);
654 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
655}
656
657void __kprobes kernel_stack_overflow(struct pt_regs * regs)
658{
659 bust_spinlocks(1);
660 printk("Kernel stack overflow.\n");
661 show_regs(regs);
662 bust_spinlocks(0);
663 panic("Corrupt kernel stack, can't continue.");
664}
665
666/* init is done in lowcore.S and head.S */
667
668void __init trap_init(void)
669{
670 int i;
671
672 for (i = 0; i < 128; i++)
673 pgm_check_table[i] = &default_trap_handler;
674 pgm_check_table[1] = &illegal_op;
675 pgm_check_table[2] = &privileged_op;
676 pgm_check_table[3] = &execute_exception;
677 pgm_check_table[4] = &do_protection_exception;
678 pgm_check_table[5] = &addressing_exception;
679 pgm_check_table[6] = &specification_exception;
680 pgm_check_table[7] = &data_exception;
681 pgm_check_table[8] = &overflow_exception;
682 pgm_check_table[9] = ÷_exception;
683 pgm_check_table[0x0A] = &overflow_exception;
684 pgm_check_table[0x0B] = ÷_exception;
685 pgm_check_table[0x0C] = &hfp_overflow_exception;
686 pgm_check_table[0x0D] = &hfp_underflow_exception;
687 pgm_check_table[0x0E] = &hfp_significance_exception;
688 pgm_check_table[0x0F] = &hfp_divide_exception;
689 pgm_check_table[0x10] = &do_dat_exception;
690 pgm_check_table[0x11] = &do_dat_exception;
691 pgm_check_table[0x12] = &translation_exception;
692 pgm_check_table[0x13] = &special_op_exception;
693#ifdef CONFIG_64BIT
694 pgm_check_table[0x38] = &do_asce_exception;
695 pgm_check_table[0x39] = &do_dat_exception;
696 pgm_check_table[0x3A] = &do_dat_exception;
697 pgm_check_table[0x3B] = &do_dat_exception;
698#endif /* CONFIG_64BIT */
699 pgm_check_table[0x15] = &operand_exception;
700 pgm_check_table[0x1C] = &space_switch_exception;
701 pgm_check_table[0x1D] = &hfp_sqrt_exception;
702 /* Enable machine checks early. */
703 local_mcck_enable();
704}