Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/kernel_stat.h>
13#include <linux/mmu_context.h>
14#include <linux/perf_event.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/jump_label.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/compat.h>
27#include <linux/smp.h>
28#include <linux/kdebug.h>
29#include <linux/init.h>
30#include <linux/console.h>
31#include <linux/extable.h>
32#include <linux/hardirq.h>
33#include <linux/kprobes.h>
34#include <linux/uaccess.h>
35#include <linux/hugetlb.h>
36#include <linux/kfence.h>
37#include <asm/asm-extable.h>
38#include <asm/asm-offsets.h>
39#include <asm/ptrace.h>
40#include <asm/fault.h>
41#include <asm/diag.h>
42#include <asm/gmap.h>
43#include <asm/irq.h>
44#include <asm/facility.h>
45#include <asm/uv.h>
46#include "../kernel/entry.h"
47
48enum fault_type {
49 KERNEL_FAULT,
50 USER_FAULT,
51 GMAP_FAULT,
52};
53
54static DEFINE_STATIC_KEY_FALSE(have_store_indication);
55
56static int __init fault_init(void)
57{
58 if (test_facility(75))
59 static_branch_enable(&have_store_indication);
60 return 0;
61}
62early_initcall(fault_init);
63
64/*
65 * Find out which address space caused the exception.
66 */
67static enum fault_type get_fault_type(struct pt_regs *regs)
68{
69 union teid teid = { .val = regs->int_parm_long };
70
71 if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
72 if (user_mode(regs))
73 return USER_FAULT;
74 if (!IS_ENABLED(CONFIG_PGSTE))
75 return KERNEL_FAULT;
76 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
77 return GMAP_FAULT;
78 return KERNEL_FAULT;
79 }
80 if (teid.as == PSW_BITS_AS_SECONDARY)
81 return USER_FAULT;
82 /* Access register mode, not used in the kernel */
83 if (teid.as == PSW_BITS_AS_ACCREG)
84 return USER_FAULT;
85 /* Home space -> access via kernel ASCE */
86 return KERNEL_FAULT;
87}
88
89static unsigned long get_fault_address(struct pt_regs *regs)
90{
91 union teid teid = { .val = regs->int_parm_long };
92
93 return teid.addr * PAGE_SIZE;
94}
95
96static __always_inline bool fault_is_write(struct pt_regs *regs)
97{
98 union teid teid = { .val = regs->int_parm_long };
99
100 if (static_branch_likely(&have_store_indication))
101 return teid.fsi == TEID_FSI_STORE;
102 return false;
103}
104
105static void dump_pagetable(unsigned long asce, unsigned long address)
106{
107 unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
108
109 pr_alert("AS:%016lx ", asce);
110 switch (asce & _ASCE_TYPE_MASK) {
111 case _ASCE_TYPE_REGION1:
112 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
113 if (get_kernel_nofault(entry, table))
114 goto bad;
115 pr_cont("R1:%016lx ", entry);
116 if (entry & _REGION_ENTRY_INVALID)
117 goto out;
118 table = __va(entry & _REGION_ENTRY_ORIGIN);
119 fallthrough;
120 case _ASCE_TYPE_REGION2:
121 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
122 if (get_kernel_nofault(entry, table))
123 goto bad;
124 pr_cont("R2:%016lx ", entry);
125 if (entry & _REGION_ENTRY_INVALID)
126 goto out;
127 table = __va(entry & _REGION_ENTRY_ORIGIN);
128 fallthrough;
129 case _ASCE_TYPE_REGION3:
130 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
131 if (get_kernel_nofault(entry, table))
132 goto bad;
133 pr_cont("R3:%016lx ", entry);
134 if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
135 goto out;
136 table = __va(entry & _REGION_ENTRY_ORIGIN);
137 fallthrough;
138 case _ASCE_TYPE_SEGMENT:
139 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
140 if (get_kernel_nofault(entry, table))
141 goto bad;
142 pr_cont("S:%016lx ", entry);
143 if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
144 goto out;
145 table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
146 }
147 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
148 if (get_kernel_nofault(entry, table))
149 goto bad;
150 pr_cont("P:%016lx ", entry);
151out:
152 pr_cont("\n");
153 return;
154bad:
155 pr_cont("BAD\n");
156}
157
158static void dump_fault_info(struct pt_regs *regs)
159{
160 union teid teid = { .val = regs->int_parm_long };
161 unsigned long asce;
162
163 pr_alert("Failing address: %016lx TEID: %016lx\n",
164 get_fault_address(regs), teid.val);
165 pr_alert("Fault in ");
166 switch (teid.as) {
167 case PSW_BITS_AS_HOME:
168 pr_cont("home space ");
169 break;
170 case PSW_BITS_AS_SECONDARY:
171 pr_cont("secondary space ");
172 break;
173 case PSW_BITS_AS_ACCREG:
174 pr_cont("access register ");
175 break;
176 case PSW_BITS_AS_PRIMARY:
177 pr_cont("primary space ");
178 break;
179 }
180 pr_cont("mode while using ");
181 switch (get_fault_type(regs)) {
182 case USER_FAULT:
183 asce = S390_lowcore.user_asce.val;
184 pr_cont("user ");
185 break;
186 case GMAP_FAULT:
187 asce = ((struct gmap *)S390_lowcore.gmap)->asce;
188 pr_cont("gmap ");
189 break;
190 case KERNEL_FAULT:
191 asce = S390_lowcore.kernel_asce.val;
192 pr_cont("kernel ");
193 break;
194 default:
195 unreachable();
196 }
197 pr_cont("ASCE.\n");
198 dump_pagetable(asce, get_fault_address(regs));
199}
200
201int show_unhandled_signals = 1;
202
203void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204{
205 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
206
207 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
208 return;
209 if (!unhandled_signal(current, signr))
210 return;
211 if (!__ratelimit(&rs))
212 return;
213 pr_alert("User process fault: interruption code %04x ilc:%d ",
214 regs->int_code & 0xffff, regs->int_code >> 17);
215 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
216 pr_cont("\n");
217 if (is_mm_fault)
218 dump_fault_info(regs);
219 show_regs(regs);
220}
221
222static void do_sigsegv(struct pt_regs *regs, int si_code)
223{
224 report_user_fault(regs, SIGSEGV, 1);
225 force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
226}
227
228static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
229{
230 enum fault_type fault_type;
231 unsigned long address;
232 bool is_write;
233
234 if (user_mode(regs)) {
235 if (WARN_ON_ONCE(!si_code))
236 si_code = SEGV_MAPERR;
237 return do_sigsegv(regs, si_code);
238 }
239 if (fixup_exception(regs))
240 return;
241 fault_type = get_fault_type(regs);
242 if (fault_type == KERNEL_FAULT) {
243 address = get_fault_address(regs);
244 is_write = fault_is_write(regs);
245 if (kfence_handle_page_fault(address, is_write, regs))
246 return;
247 }
248 if (fault_type == KERNEL_FAULT)
249 pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
250 else
251 pr_alert("Unable to handle kernel paging request in virtual user address space\n");
252 dump_fault_info(regs);
253 die(regs, "Oops");
254}
255
256static void handle_fault_error(struct pt_regs *regs, int si_code)
257{
258 struct mm_struct *mm = current->mm;
259
260 mmap_read_unlock(mm);
261 handle_fault_error_nolock(regs, si_code);
262}
263
264static void do_sigbus(struct pt_regs *regs)
265{
266 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
267}
268
269/*
270 * This routine handles page faults. It determines the address,
271 * and the problem, and then passes it off to one of the appropriate
272 * routines.
273 *
274 * interruption code (int_code):
275 * 04 Protection -> Write-Protection (suppression)
276 * 10 Segment translation -> Not present (nullification)
277 * 11 Page translation -> Not present (nullification)
278 * 3b Region third trans. -> Not present (nullification)
279 */
280static void do_exception(struct pt_regs *regs, int access)
281{
282 struct vm_area_struct *vma;
283 unsigned long address;
284 struct mm_struct *mm;
285 enum fault_type type;
286 unsigned int flags;
287 struct gmap *gmap;
288 vm_fault_t fault;
289 bool is_write;
290
291 /*
292 * The instruction that caused the program check has
293 * been nullified. Don't signal single step via SIGTRAP.
294 */
295 clear_thread_flag(TIF_PER_TRAP);
296 if (kprobe_page_fault(regs, 14))
297 return;
298 mm = current->mm;
299 address = get_fault_address(regs);
300 is_write = fault_is_write(regs);
301 type = get_fault_type(regs);
302 switch (type) {
303 case KERNEL_FAULT:
304 return handle_fault_error_nolock(regs, 0);
305 case USER_FAULT:
306 case GMAP_FAULT:
307 if (faulthandler_disabled() || !mm)
308 return handle_fault_error_nolock(regs, 0);
309 break;
310 }
311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
312 flags = FAULT_FLAG_DEFAULT;
313 if (user_mode(regs))
314 flags |= FAULT_FLAG_USER;
315 if (is_write)
316 access = VM_WRITE;
317 if (access == VM_WRITE)
318 flags |= FAULT_FLAG_WRITE;
319 if (!(flags & FAULT_FLAG_USER))
320 goto lock_mmap;
321 vma = lock_vma_under_rcu(mm, address);
322 if (!vma)
323 goto lock_mmap;
324 if (!(vma->vm_flags & access)) {
325 vma_end_read(vma);
326 goto lock_mmap;
327 }
328 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
329 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
330 vma_end_read(vma);
331 if (!(fault & VM_FAULT_RETRY)) {
332 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
333 if (unlikely(fault & VM_FAULT_ERROR))
334 goto error;
335 return;
336 }
337 count_vm_vma_lock_event(VMA_LOCK_RETRY);
338 if (fault & VM_FAULT_MAJOR)
339 flags |= FAULT_FLAG_TRIED;
340
341 /* Quick path to respond to signals */
342 if (fault_signal_pending(fault, regs)) {
343 if (!user_mode(regs))
344 handle_fault_error_nolock(regs, 0);
345 return;
346 }
347lock_mmap:
348 mmap_read_lock(mm);
349 gmap = NULL;
350 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
351 gmap = (struct gmap *)S390_lowcore.gmap;
352 current->thread.gmap_addr = address;
353 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
354 current->thread.gmap_int_code = regs->int_code & 0xffff;
355 address = __gmap_translate(gmap, address);
356 if (address == -EFAULT)
357 return handle_fault_error(regs, SEGV_MAPERR);
358 if (gmap->pfault_enabled)
359 flags |= FAULT_FLAG_RETRY_NOWAIT;
360 }
361retry:
362 vma = find_vma(mm, address);
363 if (!vma)
364 return handle_fault_error(regs, SEGV_MAPERR);
365 if (unlikely(vma->vm_start > address)) {
366 if (!(vma->vm_flags & VM_GROWSDOWN))
367 return handle_fault_error(regs, SEGV_MAPERR);
368 vma = expand_stack(mm, address);
369 if (!vma)
370 return handle_fault_error_nolock(regs, SEGV_MAPERR);
371 }
372 if (unlikely(!(vma->vm_flags & access)))
373 return handle_fault_error(regs, SEGV_ACCERR);
374 fault = handle_mm_fault(vma, address, flags, regs);
375 if (fault_signal_pending(fault, regs)) {
376 if (flags & FAULT_FLAG_RETRY_NOWAIT)
377 mmap_read_unlock(mm);
378 if (!user_mode(regs))
379 handle_fault_error_nolock(regs, 0);
380 return;
381 }
382 /* The fault is fully completed (including releasing mmap lock) */
383 if (fault & VM_FAULT_COMPLETED) {
384 if (gmap) {
385 mmap_read_lock(mm);
386 goto gmap;
387 }
388 return;
389 }
390 if (unlikely(fault & VM_FAULT_ERROR)) {
391 mmap_read_unlock(mm);
392 goto error;
393 }
394 if (fault & VM_FAULT_RETRY) {
395 if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
396 /*
397 * FAULT_FLAG_RETRY_NOWAIT has been set,
398 * mmap_lock has not been released
399 */
400 current->thread.gmap_pfault = 1;
401 return handle_fault_error(regs, 0);
402 }
403 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
404 flags |= FAULT_FLAG_TRIED;
405 mmap_read_lock(mm);
406 goto retry;
407 }
408gmap:
409 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
410 address = __gmap_link(gmap, current->thread.gmap_addr,
411 address);
412 if (address == -EFAULT)
413 return handle_fault_error(regs, SEGV_MAPERR);
414 if (address == -ENOMEM) {
415 fault = VM_FAULT_OOM;
416 mmap_read_unlock(mm);
417 goto error;
418 }
419 }
420 mmap_read_unlock(mm);
421 return;
422error:
423 if (fault & VM_FAULT_OOM) {
424 if (!user_mode(regs))
425 handle_fault_error_nolock(regs, 0);
426 else
427 pagefault_out_of_memory();
428 } else if (fault & VM_FAULT_SIGSEGV) {
429 if (!user_mode(regs))
430 handle_fault_error_nolock(regs, 0);
431 else
432 do_sigsegv(regs, SEGV_MAPERR);
433 } else if (fault & VM_FAULT_SIGBUS) {
434 if (!user_mode(regs))
435 handle_fault_error_nolock(regs, 0);
436 else
437 do_sigbus(regs);
438 } else {
439 BUG();
440 }
441}
442
443void do_protection_exception(struct pt_regs *regs)
444{
445 union teid teid = { .val = regs->int_parm_long };
446
447 /*
448 * Protection exceptions are suppressing, decrement psw address.
449 * The exception to this rule are aborted transactions, for these
450 * the PSW already points to the correct location.
451 */
452 if (!(regs->int_code & 0x200))
453 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
454 /*
455 * Check for low-address protection. This needs to be treated
456 * as a special case because the translation exception code
457 * field is not guaranteed to contain valid data in this case.
458 */
459 if (unlikely(!teid.b61)) {
460 if (user_mode(regs)) {
461 /* Low-address protection in user mode: cannot happen */
462 die(regs, "Low-address protection");
463 }
464 /*
465 * Low-address protection in kernel mode means
466 * NULL pointer write access in kernel mode.
467 */
468 return handle_fault_error_nolock(regs, 0);
469 }
470 if (unlikely(MACHINE_HAS_NX && teid.b56)) {
471 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
472 return handle_fault_error_nolock(regs, SEGV_ACCERR);
473 }
474 do_exception(regs, VM_WRITE);
475}
476NOKPROBE_SYMBOL(do_protection_exception);
477
478void do_dat_exception(struct pt_regs *regs)
479{
480 do_exception(regs, VM_ACCESS_FLAGS);
481}
482NOKPROBE_SYMBOL(do_dat_exception);
483
484#if IS_ENABLED(CONFIG_PGSTE)
485
486void do_secure_storage_access(struct pt_regs *regs)
487{
488 union teid teid = { .val = regs->int_parm_long };
489 unsigned long addr = get_fault_address(regs);
490 struct vm_area_struct *vma;
491 struct mm_struct *mm;
492 struct page *page;
493 struct gmap *gmap;
494 int rc;
495
496 /*
497 * Bit 61 indicates if the address is valid, if it is not the
498 * kernel should be stopped or SIGSEGV should be sent to the
499 * process. Bit 61 is not reliable without the misc UV feature,
500 * therefore this needs to be checked too.
501 */
502 if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
503 /*
504 * When this happens, userspace did something that it
505 * was not supposed to do, e.g. branching into secure
506 * memory. Trigger a segmentation fault.
507 */
508 if (user_mode(regs)) {
509 send_sig(SIGSEGV, current, 0);
510 return;
511 }
512 /*
513 * The kernel should never run into this case and
514 * there is no way out of this situation.
515 */
516 panic("Unexpected PGM 0x3d with TEID bit 61=0");
517 }
518 switch (get_fault_type(regs)) {
519 case GMAP_FAULT:
520 mm = current->mm;
521 gmap = (struct gmap *)S390_lowcore.gmap;
522 mmap_read_lock(mm);
523 addr = __gmap_translate(gmap, addr);
524 mmap_read_unlock(mm);
525 if (IS_ERR_VALUE(addr))
526 return handle_fault_error_nolock(regs, SEGV_MAPERR);
527 fallthrough;
528 case USER_FAULT:
529 mm = current->mm;
530 mmap_read_lock(mm);
531 vma = find_vma(mm, addr);
532 if (!vma)
533 return handle_fault_error(regs, SEGV_MAPERR);
534 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
535 if (IS_ERR_OR_NULL(page)) {
536 mmap_read_unlock(mm);
537 break;
538 }
539 if (arch_make_page_accessible(page))
540 send_sig(SIGSEGV, current, 0);
541 put_page(page);
542 mmap_read_unlock(mm);
543 break;
544 case KERNEL_FAULT:
545 page = phys_to_page(addr);
546 if (unlikely(!try_get_page(page)))
547 break;
548 rc = arch_make_page_accessible(page);
549 put_page(page);
550 if (rc)
551 BUG();
552 break;
553 default:
554 unreachable();
555 }
556}
557NOKPROBE_SYMBOL(do_secure_storage_access);
558
559void do_non_secure_storage_access(struct pt_regs *regs)
560{
561 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
562 unsigned long gaddr = get_fault_address(regs);
563
564 if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
565 return handle_fault_error_nolock(regs, SEGV_MAPERR);
566 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
567 send_sig(SIGSEGV, current, 0);
568}
569NOKPROBE_SYMBOL(do_non_secure_storage_access);
570
571void do_secure_storage_violation(struct pt_regs *regs)
572{
573 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
574 unsigned long gaddr = get_fault_address(regs);
575
576 /*
577 * If the VM has been rebooted, its address space might still contain
578 * secure pages from the previous boot.
579 * Clear the page so it can be reused.
580 */
581 if (!gmap_destroy_page(gmap, gaddr))
582 return;
583 /*
584 * Either KVM messed up the secure guest mapping or the same
585 * page is mapped into multiple secure guests.
586 *
587 * This exception is only triggered when a guest 2 is running
588 * and can therefore never occur in kernel context.
589 */
590 pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
591 current->comm, current->pid);
592 send_sig(SIGSEGV, current, 0);
593}
594
595#endif /* CONFIG_PGSTE */
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (uweigand@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/perf_event.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/ptrace.h>
20#include <linux/mman.h>
21#include <linux/mm.h>
22#include <linux/compat.h>
23#include <linux/smp.h>
24#include <linux/kdebug.h>
25#include <linux/init.h>
26#include <linux/console.h>
27#include <linux/module.h>
28#include <linux/hardirq.h>
29#include <linux/kprobes.h>
30#include <linux/uaccess.h>
31#include <linux/hugetlb.h>
32#include <asm/asm-offsets.h>
33#include <asm/diag.h>
34#include <asm/pgtable.h>
35#include <asm/gmap.h>
36#include <asm/irq.h>
37#include <asm/mmu_context.h>
38#include <asm/facility.h>
39#include "../kernel/entry.h"
40
41#define __FAIL_ADDR_MASK -4096L
42#define __SUBCODE_MASK 0x0600
43#define __PF_RES_FIELD 0x8000000000000000ULL
44
45#define VM_FAULT_BADCONTEXT 0x010000
46#define VM_FAULT_BADMAP 0x020000
47#define VM_FAULT_BADACCESS 0x040000
48#define VM_FAULT_SIGNAL 0x080000
49#define VM_FAULT_PFAULT 0x100000
50
51static unsigned long store_indication __read_mostly;
52
53static int __init fault_init(void)
54{
55 if (test_facility(75))
56 store_indication = 0xc00;
57 return 0;
58}
59early_initcall(fault_init);
60
61static inline int notify_page_fault(struct pt_regs *regs)
62{
63 int ret = 0;
64
65 /* kprobe_running() needs smp_processor_id() */
66 if (kprobes_built_in() && !user_mode(regs)) {
67 preempt_disable();
68 if (kprobe_running() && kprobe_fault_handler(regs, 14))
69 ret = 1;
70 preempt_enable();
71 }
72 return ret;
73}
74
75
76/*
77 * Unlock any spinlocks which will prevent us from getting the
78 * message out.
79 */
80void bust_spinlocks(int yes)
81{
82 if (yes) {
83 oops_in_progress = 1;
84 } else {
85 int loglevel_save = console_loglevel;
86 console_unblank();
87 oops_in_progress = 0;
88 /*
89 * OK, the message is on the console. Now we call printk()
90 * without oops_in_progress set so that printk will give klogd
91 * a poke. Hold onto your hats...
92 */
93 console_loglevel = 15;
94 printk(" ");
95 console_loglevel = loglevel_save;
96 }
97}
98
99/*
100 * Returns the address space associated with the fault.
101 * Returns 0 for kernel space and 1 for user space.
102 */
103static inline int user_space_fault(struct pt_regs *regs)
104{
105 unsigned long trans_exc_code;
106
107 /*
108 * The lowest two bits of the translation exception
109 * identification indicate which paging table was used.
110 */
111 trans_exc_code = regs->int_parm_long & 3;
112 if (trans_exc_code == 3) /* home space -> kernel */
113 return 0;
114 if (user_mode(regs))
115 return 1;
116 if (trans_exc_code == 2) /* secondary space -> set_fs */
117 return current->thread.mm_segment.ar4;
118 if (current->flags & PF_VCPU)
119 return 1;
120 return 0;
121}
122
123static int bad_address(void *p)
124{
125 unsigned long dummy;
126
127 return probe_kernel_address((unsigned long *)p, dummy);
128}
129
130static void dump_pagetable(unsigned long asce, unsigned long address)
131{
132 unsigned long *table = __va(asce & PAGE_MASK);
133
134 pr_alert("AS:%016lx ", asce);
135 switch (asce & _ASCE_TYPE_MASK) {
136 case _ASCE_TYPE_REGION1:
137 table = table + ((address >> 53) & 0x7ff);
138 if (bad_address(table))
139 goto bad;
140 pr_cont("R1:%016lx ", *table);
141 if (*table & _REGION_ENTRY_INVALID)
142 goto out;
143 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
144 /* fallthrough */
145 case _ASCE_TYPE_REGION2:
146 table = table + ((address >> 42) & 0x7ff);
147 if (bad_address(table))
148 goto bad;
149 pr_cont("R2:%016lx ", *table);
150 if (*table & _REGION_ENTRY_INVALID)
151 goto out;
152 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
153 /* fallthrough */
154 case _ASCE_TYPE_REGION3:
155 table = table + ((address >> 31) & 0x7ff);
156 if (bad_address(table))
157 goto bad;
158 pr_cont("R3:%016lx ", *table);
159 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
160 goto out;
161 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
162 /* fallthrough */
163 case _ASCE_TYPE_SEGMENT:
164 table = table + ((address >> 20) & 0x7ff);
165 if (bad_address(table))
166 goto bad;
167 pr_cont("S:%016lx ", *table);
168 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
169 goto out;
170 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
171 }
172 table = table + ((address >> 12) & 0xff);
173 if (bad_address(table))
174 goto bad;
175 pr_cont("P:%016lx ", *table);
176out:
177 pr_cont("\n");
178 return;
179bad:
180 pr_cont("BAD\n");
181}
182
183static void dump_fault_info(struct pt_regs *regs)
184{
185 unsigned long asce;
186
187 pr_alert("Failing address: %016lx TEID: %016lx\n",
188 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
189 pr_alert("Fault in ");
190 switch (regs->int_parm_long & 3) {
191 case 3:
192 pr_cont("home space ");
193 break;
194 case 2:
195 pr_cont("secondary space ");
196 break;
197 case 1:
198 pr_cont("access register ");
199 break;
200 case 0:
201 pr_cont("primary space ");
202 break;
203 }
204 pr_cont("mode while using ");
205 if (!user_space_fault(regs)) {
206 asce = S390_lowcore.kernel_asce;
207 pr_cont("kernel ");
208 }
209#ifdef CONFIG_PGSTE
210 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
211 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
212 asce = gmap->asce;
213 pr_cont("gmap ");
214 }
215#endif
216 else {
217 asce = S390_lowcore.user_asce;
218 pr_cont("user ");
219 }
220 pr_cont("ASCE.\n");
221 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
222}
223
224int show_unhandled_signals = 1;
225
226void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
227{
228 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
229 return;
230 if (!unhandled_signal(current, signr))
231 return;
232 if (!printk_ratelimit())
233 return;
234 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
235 regs->int_code & 0xffff, regs->int_code >> 17);
236 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
237 printk(KERN_CONT "\n");
238 if (is_mm_fault)
239 dump_fault_info(regs);
240 show_regs(regs);
241}
242
243/*
244 * Send SIGSEGV to task. This is an external routine
245 * to keep the stack usage of do_page_fault small.
246 */
247static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
248{
249 struct siginfo si;
250
251 report_user_fault(regs, SIGSEGV, 1);
252 si.si_signo = SIGSEGV;
253 si.si_code = si_code;
254 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
255 force_sig_info(SIGSEGV, &si, current);
256}
257
258static noinline void do_no_context(struct pt_regs *regs)
259{
260 const struct exception_table_entry *fixup;
261
262 /* Are we prepared to handle this kernel fault? */
263 fixup = search_exception_tables(regs->psw.addr);
264 if (fixup) {
265 regs->psw.addr = extable_fixup(fixup);
266 return;
267 }
268
269 /*
270 * Oops. The kernel tried to access some bad page. We'll have to
271 * terminate things with extreme prejudice.
272 */
273 if (!user_space_fault(regs))
274 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
275 " in virtual kernel address space\n");
276 else
277 printk(KERN_ALERT "Unable to handle kernel paging request"
278 " in virtual user address space\n");
279 dump_fault_info(regs);
280 die(regs, "Oops");
281 do_exit(SIGKILL);
282}
283
284static noinline void do_low_address(struct pt_regs *regs)
285{
286 /* Low-address protection hit in kernel mode means
287 NULL pointer write access in kernel mode. */
288 if (regs->psw.mask & PSW_MASK_PSTATE) {
289 /* Low-address protection hit in user mode 'cannot happen'. */
290 die (regs, "Low-address protection");
291 do_exit(SIGKILL);
292 }
293
294 do_no_context(regs);
295}
296
297static noinline void do_sigbus(struct pt_regs *regs)
298{
299 struct task_struct *tsk = current;
300 struct siginfo si;
301
302 /*
303 * Send a sigbus, regardless of whether we were in kernel
304 * or user mode.
305 */
306 si.si_signo = SIGBUS;
307 si.si_errno = 0;
308 si.si_code = BUS_ADRERR;
309 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
310 force_sig_info(SIGBUS, &si, tsk);
311}
312
313static noinline void do_fault_error(struct pt_regs *regs, int fault)
314{
315 int si_code;
316
317 switch (fault) {
318 case VM_FAULT_BADACCESS:
319 case VM_FAULT_BADMAP:
320 /* Bad memory access. Check if it is kernel or user space. */
321 if (user_mode(regs)) {
322 /* User mode accesses just cause a SIGSEGV */
323 si_code = (fault == VM_FAULT_BADMAP) ?
324 SEGV_MAPERR : SEGV_ACCERR;
325 do_sigsegv(regs, si_code);
326 return;
327 }
328 case VM_FAULT_BADCONTEXT:
329 case VM_FAULT_PFAULT:
330 do_no_context(regs);
331 break;
332 case VM_FAULT_SIGNAL:
333 if (!user_mode(regs))
334 do_no_context(regs);
335 break;
336 default: /* fault & VM_FAULT_ERROR */
337 if (fault & VM_FAULT_OOM) {
338 if (!user_mode(regs))
339 do_no_context(regs);
340 else
341 pagefault_out_of_memory();
342 } else if (fault & VM_FAULT_SIGSEGV) {
343 /* Kernel mode? Handle exceptions or die */
344 if (!user_mode(regs))
345 do_no_context(regs);
346 else
347 do_sigsegv(regs, SEGV_MAPERR);
348 } else if (fault & VM_FAULT_SIGBUS) {
349 /* Kernel mode? Handle exceptions or die */
350 if (!user_mode(regs))
351 do_no_context(regs);
352 else
353 do_sigbus(regs);
354 } else
355 BUG();
356 break;
357 }
358}
359
360/*
361 * This routine handles page faults. It determines the address,
362 * and the problem, and then passes it off to one of the appropriate
363 * routines.
364 *
365 * interruption code (int_code):
366 * 04 Protection -> Write-Protection (suprression)
367 * 10 Segment translation -> Not present (nullification)
368 * 11 Page translation -> Not present (nullification)
369 * 3b Region third trans. -> Not present (nullification)
370 */
371static inline int do_exception(struct pt_regs *regs, int access)
372{
373#ifdef CONFIG_PGSTE
374 struct gmap *gmap;
375#endif
376 struct task_struct *tsk;
377 struct mm_struct *mm;
378 struct vm_area_struct *vma;
379 unsigned long trans_exc_code;
380 unsigned long address;
381 unsigned int flags;
382 int fault;
383
384 tsk = current;
385 /*
386 * The instruction that caused the program check has
387 * been nullified. Don't signal single step via SIGTRAP.
388 */
389 clear_pt_regs_flag(regs, PIF_PER_TRAP);
390
391 if (notify_page_fault(regs))
392 return 0;
393
394 mm = tsk->mm;
395 trans_exc_code = regs->int_parm_long;
396
397 /*
398 * Verify that the fault happened in user space, that
399 * we are not in an interrupt and that there is a
400 * user context.
401 */
402 fault = VM_FAULT_BADCONTEXT;
403 if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
404 goto out;
405
406 address = trans_exc_code & __FAIL_ADDR_MASK;
407 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
408 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
409 if (user_mode(regs))
410 flags |= FAULT_FLAG_USER;
411 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
412 flags |= FAULT_FLAG_WRITE;
413 down_read(&mm->mmap_sem);
414
415#ifdef CONFIG_PGSTE
416 gmap = (current->flags & PF_VCPU) ?
417 (struct gmap *) S390_lowcore.gmap : NULL;
418 if (gmap) {
419 current->thread.gmap_addr = address;
420 address = __gmap_translate(gmap, address);
421 if (address == -EFAULT) {
422 fault = VM_FAULT_BADMAP;
423 goto out_up;
424 }
425 if (gmap->pfault_enabled)
426 flags |= FAULT_FLAG_RETRY_NOWAIT;
427 }
428#endif
429
430retry:
431 fault = VM_FAULT_BADMAP;
432 vma = find_vma(mm, address);
433 if (!vma)
434 goto out_up;
435
436 if (unlikely(vma->vm_start > address)) {
437 if (!(vma->vm_flags & VM_GROWSDOWN))
438 goto out_up;
439 if (expand_stack(vma, address))
440 goto out_up;
441 }
442
443 /*
444 * Ok, we have a good vm_area for this memory access, so
445 * we can handle it..
446 */
447 fault = VM_FAULT_BADACCESS;
448 if (unlikely(!(vma->vm_flags & access)))
449 goto out_up;
450
451 if (is_vm_hugetlb_page(vma))
452 address &= HPAGE_MASK;
453 /*
454 * If for any reason at all we couldn't handle the fault,
455 * make sure we exit gracefully rather than endlessly redo
456 * the fault.
457 */
458 fault = handle_mm_fault(mm, vma, address, flags);
459 /* No reason to continue if interrupted by SIGKILL. */
460 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
461 fault = VM_FAULT_SIGNAL;
462 goto out;
463 }
464 if (unlikely(fault & VM_FAULT_ERROR))
465 goto out_up;
466
467 /*
468 * Major/minor page fault accounting is only done on the
469 * initial attempt. If we go through a retry, it is extremely
470 * likely that the page will be found in page cache at that point.
471 */
472 if (flags & FAULT_FLAG_ALLOW_RETRY) {
473 if (fault & VM_FAULT_MAJOR) {
474 tsk->maj_flt++;
475 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
476 regs, address);
477 } else {
478 tsk->min_flt++;
479 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
480 regs, address);
481 }
482 if (fault & VM_FAULT_RETRY) {
483#ifdef CONFIG_PGSTE
484 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
485 /* FAULT_FLAG_RETRY_NOWAIT has been set,
486 * mmap_sem has not been released */
487 current->thread.gmap_pfault = 1;
488 fault = VM_FAULT_PFAULT;
489 goto out_up;
490 }
491#endif
492 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
493 * of starvation. */
494 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
495 FAULT_FLAG_RETRY_NOWAIT);
496 flags |= FAULT_FLAG_TRIED;
497 down_read(&mm->mmap_sem);
498 goto retry;
499 }
500 }
501#ifdef CONFIG_PGSTE
502 if (gmap) {
503 address = __gmap_link(gmap, current->thread.gmap_addr,
504 address);
505 if (address == -EFAULT) {
506 fault = VM_FAULT_BADMAP;
507 goto out_up;
508 }
509 if (address == -ENOMEM) {
510 fault = VM_FAULT_OOM;
511 goto out_up;
512 }
513 }
514#endif
515 fault = 0;
516out_up:
517 up_read(&mm->mmap_sem);
518out:
519 return fault;
520}
521
522void do_protection_exception(struct pt_regs *regs)
523{
524 unsigned long trans_exc_code;
525 int fault;
526
527 trans_exc_code = regs->int_parm_long;
528 /*
529 * Protection exceptions are suppressing, decrement psw address.
530 * The exception to this rule are aborted transactions, for these
531 * the PSW already points to the correct location.
532 */
533 if (!(regs->int_code & 0x200))
534 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
535 /*
536 * Check for low-address protection. This needs to be treated
537 * as a special case because the translation exception code
538 * field is not guaranteed to contain valid data in this case.
539 */
540 if (unlikely(!(trans_exc_code & 4))) {
541 do_low_address(regs);
542 return;
543 }
544 fault = do_exception(regs, VM_WRITE);
545 if (unlikely(fault))
546 do_fault_error(regs, fault);
547}
548NOKPROBE_SYMBOL(do_protection_exception);
549
550void do_dat_exception(struct pt_regs *regs)
551{
552 int access, fault;
553
554 access = VM_READ | VM_EXEC | VM_WRITE;
555 fault = do_exception(regs, access);
556 if (unlikely(fault))
557 do_fault_error(regs, fault);
558}
559NOKPROBE_SYMBOL(do_dat_exception);
560
561#ifdef CONFIG_PFAULT
562/*
563 * 'pfault' pseudo page faults routines.
564 */
565static int pfault_disable;
566
567static int __init nopfault(char *str)
568{
569 pfault_disable = 1;
570 return 1;
571}
572
573__setup("nopfault", nopfault);
574
575struct pfault_refbk {
576 u16 refdiagc;
577 u16 reffcode;
578 u16 refdwlen;
579 u16 refversn;
580 u64 refgaddr;
581 u64 refselmk;
582 u64 refcmpmk;
583 u64 reserved;
584} __attribute__ ((packed, aligned(8)));
585
586int pfault_init(void)
587{
588 struct pfault_refbk refbk = {
589 .refdiagc = 0x258,
590 .reffcode = 0,
591 .refdwlen = 5,
592 .refversn = 2,
593 .refgaddr = __LC_LPP,
594 .refselmk = 1ULL << 48,
595 .refcmpmk = 1ULL << 48,
596 .reserved = __PF_RES_FIELD };
597 int rc;
598
599 if (pfault_disable)
600 return -1;
601 diag_stat_inc(DIAG_STAT_X258);
602 asm volatile(
603 " diag %1,%0,0x258\n"
604 "0: j 2f\n"
605 "1: la %0,8\n"
606 "2:\n"
607 EX_TABLE(0b,1b)
608 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
609 return rc;
610}
611
612void pfault_fini(void)
613{
614 struct pfault_refbk refbk = {
615 .refdiagc = 0x258,
616 .reffcode = 1,
617 .refdwlen = 5,
618 .refversn = 2,
619 };
620
621 if (pfault_disable)
622 return;
623 diag_stat_inc(DIAG_STAT_X258);
624 asm volatile(
625 " diag %0,0,0x258\n"
626 "0:\n"
627 EX_TABLE(0b,0b)
628 : : "a" (&refbk), "m" (refbk) : "cc");
629}
630
631static DEFINE_SPINLOCK(pfault_lock);
632static LIST_HEAD(pfault_list);
633
634static void pfault_interrupt(struct ext_code ext_code,
635 unsigned int param32, unsigned long param64)
636{
637 struct task_struct *tsk;
638 __u16 subcode;
639 pid_t pid;
640
641 /*
642 * Get the external interruption subcode & pfault
643 * initial/completion signal bit. VM stores this
644 * in the 'cpu address' field associated with the
645 * external interrupt.
646 */
647 subcode = ext_code.subcode;
648 if ((subcode & 0xff00) != __SUBCODE_MASK)
649 return;
650 inc_irq_stat(IRQEXT_PFL);
651 /* Get the token (= pid of the affected task). */
652 pid = param64 & LPP_PFAULT_PID_MASK;
653 rcu_read_lock();
654 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
655 if (tsk)
656 get_task_struct(tsk);
657 rcu_read_unlock();
658 if (!tsk)
659 return;
660 spin_lock(&pfault_lock);
661 if (subcode & 0x0080) {
662 /* signal bit is set -> a page has been swapped in by VM */
663 if (tsk->thread.pfault_wait == 1) {
664 /* Initial interrupt was faster than the completion
665 * interrupt. pfault_wait is valid. Set pfault_wait
666 * back to zero and wake up the process. This can
667 * safely be done because the task is still sleeping
668 * and can't produce new pfaults. */
669 tsk->thread.pfault_wait = 0;
670 list_del(&tsk->thread.list);
671 wake_up_process(tsk);
672 put_task_struct(tsk);
673 } else {
674 /* Completion interrupt was faster than initial
675 * interrupt. Set pfault_wait to -1 so the initial
676 * interrupt doesn't put the task to sleep.
677 * If the task is not running, ignore the completion
678 * interrupt since it must be a leftover of a PFAULT
679 * CANCEL operation which didn't remove all pending
680 * completion interrupts. */
681 if (tsk->state == TASK_RUNNING)
682 tsk->thread.pfault_wait = -1;
683 }
684 } else {
685 /* signal bit not set -> a real page is missing. */
686 if (WARN_ON_ONCE(tsk != current))
687 goto out;
688 if (tsk->thread.pfault_wait == 1) {
689 /* Already on the list with a reference: put to sleep */
690 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
691 set_tsk_need_resched(tsk);
692 } else if (tsk->thread.pfault_wait == -1) {
693 /* Completion interrupt was faster than the initial
694 * interrupt (pfault_wait == -1). Set pfault_wait
695 * back to zero and exit. */
696 tsk->thread.pfault_wait = 0;
697 } else {
698 /* Initial interrupt arrived before completion
699 * interrupt. Let the task sleep.
700 * An extra task reference is needed since a different
701 * cpu may set the task state to TASK_RUNNING again
702 * before the scheduler is reached. */
703 get_task_struct(tsk);
704 tsk->thread.pfault_wait = 1;
705 list_add(&tsk->thread.list, &pfault_list);
706 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
707 set_tsk_need_resched(tsk);
708 }
709 }
710out:
711 spin_unlock(&pfault_lock);
712 put_task_struct(tsk);
713}
714
715static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
716 void *hcpu)
717{
718 struct thread_struct *thread, *next;
719 struct task_struct *tsk;
720
721 switch (action & ~CPU_TASKS_FROZEN) {
722 case CPU_DEAD:
723 spin_lock_irq(&pfault_lock);
724 list_for_each_entry_safe(thread, next, &pfault_list, list) {
725 thread->pfault_wait = 0;
726 list_del(&thread->list);
727 tsk = container_of(thread, struct task_struct, thread);
728 wake_up_process(tsk);
729 put_task_struct(tsk);
730 }
731 spin_unlock_irq(&pfault_lock);
732 break;
733 default:
734 break;
735 }
736 return NOTIFY_OK;
737}
738
739static int __init pfault_irq_init(void)
740{
741 int rc;
742
743 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
744 if (rc)
745 goto out_extint;
746 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
747 if (rc)
748 goto out_pfault;
749 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
750 hotcpu_notifier(pfault_cpu_notify, 0);
751 return 0;
752
753out_pfault:
754 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
755out_extint:
756 pfault_disable = 1;
757 return rc;
758}
759early_initcall(pfault_irq_init);
760
761#endif /* CONFIG_PFAULT */