Loading...
1/*
2 * arch/s390/mm/fault.c
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (uweigand@de.ibm.com)
8 *
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1995 Linus Torvalds
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/perf_event.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
24#include <linux/compat.h>
25#include <linux/smp.h>
26#include <linux/kdebug.h>
27#include <linux/init.h>
28#include <linux/console.h>
29#include <linux/module.h>
30#include <linux/hardirq.h>
31#include <linux/kprobes.h>
32#include <linux/uaccess.h>
33#include <linux/hugetlb.h>
34#include <asm/asm-offsets.h>
35#include <asm/system.h>
36#include <asm/pgtable.h>
37#include <asm/irq.h>
38#include <asm/mmu_context.h>
39#include <asm/compat.h>
40#include "../kernel/entry.h"
41
42#ifndef CONFIG_64BIT
43#define __FAIL_ADDR_MASK 0x7ffff000
44#define __SUBCODE_MASK 0x0200
45#define __PF_RES_FIELD 0ULL
46#else /* CONFIG_64BIT */
47#define __FAIL_ADDR_MASK -4096L
48#define __SUBCODE_MASK 0x0600
49#define __PF_RES_FIELD 0x8000000000000000ULL
50#endif /* CONFIG_64BIT */
51
52#define VM_FAULT_BADCONTEXT 0x010000
53#define VM_FAULT_BADMAP 0x020000
54#define VM_FAULT_BADACCESS 0x040000
55
56static unsigned long store_indication;
57
58void fault_init(void)
59{
60 if (test_facility(2) && test_facility(75))
61 store_indication = 0xc00;
62}
63
64static inline int notify_page_fault(struct pt_regs *regs)
65{
66 int ret = 0;
67
68 /* kprobe_running() needs smp_processor_id() */
69 if (kprobes_built_in() && !user_mode(regs)) {
70 preempt_disable();
71 if (kprobe_running() && kprobe_fault_handler(regs, 14))
72 ret = 1;
73 preempt_enable();
74 }
75 return ret;
76}
77
78
79/*
80 * Unlock any spinlocks which will prevent us from getting the
81 * message out.
82 */
83void bust_spinlocks(int yes)
84{
85 if (yes) {
86 oops_in_progress = 1;
87 } else {
88 int loglevel_save = console_loglevel;
89 console_unblank();
90 oops_in_progress = 0;
91 /*
92 * OK, the message is on the console. Now we call printk()
93 * without oops_in_progress set so that printk will give klogd
94 * a poke. Hold onto your hats...
95 */
96 console_loglevel = 15;
97 printk(" ");
98 console_loglevel = loglevel_save;
99 }
100}
101
102/*
103 * Returns the address space associated with the fault.
104 * Returns 0 for kernel space and 1 for user space.
105 */
106static inline int user_space_fault(unsigned long trans_exc_code)
107{
108 /*
109 * The lowest two bits of the translation exception
110 * identification indicate which paging table was used.
111 */
112 trans_exc_code &= 3;
113 if (trans_exc_code == 2)
114 /* Access via secondary space, set_fs setting decides */
115 return current->thread.mm_segment.ar4;
116 if (user_mode == HOME_SPACE_MODE)
117 /* User space if the access has been done via home space. */
118 return trans_exc_code == 3;
119 /*
120 * If the user space is not the home space the kernel runs in home
121 * space. Access via secondary space has already been covered,
122 * access via primary space or access register is from user space
123 * and access via home space is from the kernel.
124 */
125 return trans_exc_code != 3;
126}
127
128static inline void report_user_fault(struct pt_regs *regs, long int_code,
129 int signr, unsigned long address)
130{
131 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
132 return;
133 if (!unhandled_signal(current, signr))
134 return;
135 if (!printk_ratelimit())
136 return;
137 printk("User process fault: interruption code 0x%lX ", int_code);
138 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
139 printk("\n");
140 printk("failing address: %lX\n", address);
141 show_regs(regs);
142}
143
144/*
145 * Send SIGSEGV to task. This is an external routine
146 * to keep the stack usage of do_page_fault small.
147 */
148static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
149 int si_code, unsigned long trans_exc_code)
150{
151 struct siginfo si;
152 unsigned long address;
153
154 address = trans_exc_code & __FAIL_ADDR_MASK;
155 current->thread.prot_addr = address;
156 current->thread.trap_no = int_code;
157 report_user_fault(regs, int_code, SIGSEGV, address);
158 si.si_signo = SIGSEGV;
159 si.si_code = si_code;
160 si.si_addr = (void __user *) address;
161 force_sig_info(SIGSEGV, &si, current);
162}
163
164static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long trans_exc_code)
166{
167 const struct exception_table_entry *fixup;
168 unsigned long address;
169
170 /* Are we prepared to handle this kernel fault? */
171 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
172 if (fixup) {
173 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
174 return;
175 }
176
177 /*
178 * Oops. The kernel tried to access some bad page. We'll have to
179 * terminate things with extreme prejudice.
180 */
181 address = trans_exc_code & __FAIL_ADDR_MASK;
182 if (!user_space_fault(trans_exc_code))
183 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
184 " at virtual kernel address %p\n", (void *)address);
185 else
186 printk(KERN_ALERT "Unable to handle kernel paging request"
187 " at virtual user address %p\n", (void *)address);
188
189 die("Oops", regs, int_code);
190 do_exit(SIGKILL);
191}
192
193static noinline void do_low_address(struct pt_regs *regs, long int_code,
194 unsigned long trans_exc_code)
195{
196 /* Low-address protection hit in kernel mode means
197 NULL pointer write access in kernel mode. */
198 if (regs->psw.mask & PSW_MASK_PSTATE) {
199 /* Low-address protection hit in user mode 'cannot happen'. */
200 die ("Low-address protection", regs, int_code);
201 do_exit(SIGKILL);
202 }
203
204 do_no_context(regs, int_code, trans_exc_code);
205}
206
207static noinline void do_sigbus(struct pt_regs *regs, long int_code,
208 unsigned long trans_exc_code)
209{
210 struct task_struct *tsk = current;
211 unsigned long address;
212 struct siginfo si;
213
214 /*
215 * Send a sigbus, regardless of whether we were in kernel
216 * or user mode.
217 */
218 address = trans_exc_code & __FAIL_ADDR_MASK;
219 tsk->thread.prot_addr = address;
220 tsk->thread.trap_no = int_code;
221 si.si_signo = SIGBUS;
222 si.si_errno = 0;
223 si.si_code = BUS_ADRERR;
224 si.si_addr = (void __user *) address;
225 force_sig_info(SIGBUS, &si, tsk);
226}
227
228static noinline void do_fault_error(struct pt_regs *regs, long int_code,
229 unsigned long trans_exc_code, int fault)
230{
231 int si_code;
232
233 switch (fault) {
234 case VM_FAULT_BADACCESS:
235 case VM_FAULT_BADMAP:
236 /* Bad memory access. Check if it is kernel or user space. */
237 if (regs->psw.mask & PSW_MASK_PSTATE) {
238 /* User mode accesses just cause a SIGSEGV */
239 si_code = (fault == VM_FAULT_BADMAP) ?
240 SEGV_MAPERR : SEGV_ACCERR;
241 do_sigsegv(regs, int_code, si_code, trans_exc_code);
242 return;
243 }
244 case VM_FAULT_BADCONTEXT:
245 do_no_context(regs, int_code, trans_exc_code);
246 break;
247 default: /* fault & VM_FAULT_ERROR */
248 if (fault & VM_FAULT_OOM) {
249 if (!(regs->psw.mask & PSW_MASK_PSTATE))
250 do_no_context(regs, int_code, trans_exc_code);
251 else
252 pagefault_out_of_memory();
253 } else if (fault & VM_FAULT_SIGBUS) {
254 /* Kernel mode? Handle exceptions or die */
255 if (!(regs->psw.mask & PSW_MASK_PSTATE))
256 do_no_context(regs, int_code, trans_exc_code);
257 else
258 do_sigbus(regs, int_code, trans_exc_code);
259 } else
260 BUG();
261 break;
262 }
263}
264
265/*
266 * This routine handles page faults. It determines the address,
267 * and the problem, and then passes it off to one of the appropriate
268 * routines.
269 *
270 * interruption code (int_code):
271 * 04 Protection -> Write-Protection (suprression)
272 * 10 Segment translation -> Not present (nullification)
273 * 11 Page translation -> Not present (nullification)
274 * 3b Region third trans. -> Not present (nullification)
275 */
276static inline int do_exception(struct pt_regs *regs, int access,
277 unsigned long trans_exc_code)
278{
279 struct task_struct *tsk;
280 struct mm_struct *mm;
281 struct vm_area_struct *vma;
282 unsigned long address;
283 unsigned int flags;
284 int fault;
285
286 if (notify_page_fault(regs))
287 return 0;
288
289 tsk = current;
290 mm = tsk->mm;
291
292 /*
293 * Verify that the fault happened in user space, that
294 * we are not in an interrupt and that there is a
295 * user context.
296 */
297 fault = VM_FAULT_BADCONTEXT;
298 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
299 goto out;
300
301 address = trans_exc_code & __FAIL_ADDR_MASK;
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE;
306 down_read(&mm->mmap_sem);
307
308#ifdef CONFIG_PGSTE
309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
310 address = gmap_fault(address,
311 (struct gmap *) S390_lowcore.gmap);
312 if (address == -EFAULT) {
313 fault = VM_FAULT_BADMAP;
314 goto out_up;
315 }
316 if (address == -ENOMEM) {
317 fault = VM_FAULT_OOM;
318 goto out_up;
319 }
320 }
321#endif
322
323retry:
324 fault = VM_FAULT_BADMAP;
325 vma = find_vma(mm, address);
326 if (!vma)
327 goto out_up;
328
329 if (unlikely(vma->vm_start > address)) {
330 if (!(vma->vm_flags & VM_GROWSDOWN))
331 goto out_up;
332 if (expand_stack(vma, address))
333 goto out_up;
334 }
335
336 /*
337 * Ok, we have a good vm_area for this memory access, so
338 * we can handle it..
339 */
340 fault = VM_FAULT_BADACCESS;
341 if (unlikely(!(vma->vm_flags & access)))
342 goto out_up;
343
344 if (is_vm_hugetlb_page(vma))
345 address &= HPAGE_MASK;
346 /*
347 * If for any reason at all we couldn't handle the fault,
348 * make sure we exit gracefully rather than endlessly redo
349 * the fault.
350 */
351 fault = handle_mm_fault(mm, vma, address, flags);
352 if (unlikely(fault & VM_FAULT_ERROR))
353 goto out_up;
354
355 /*
356 * Major/minor page fault accounting is only done on the
357 * initial attempt. If we go through a retry, it is extremely
358 * likely that the page will be found in page cache at that point.
359 */
360 if (flags & FAULT_FLAG_ALLOW_RETRY) {
361 if (fault & VM_FAULT_MAJOR) {
362 tsk->maj_flt++;
363 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
364 regs, address);
365 } else {
366 tsk->min_flt++;
367 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
368 regs, address);
369 }
370 if (fault & VM_FAULT_RETRY) {
371 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
372 * of starvation. */
373 flags &= ~FAULT_FLAG_ALLOW_RETRY;
374 down_read(&mm->mmap_sem);
375 goto retry;
376 }
377 }
378 /*
379 * The instruction that caused the program check will
380 * be repeated. Don't signal single step via SIGTRAP.
381 */
382 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
383 fault = 0;
384out_up:
385 up_read(&mm->mmap_sem);
386out:
387 return fault;
388}
389
390void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
391 unsigned long trans_exc_code)
392{
393 int fault;
394
395 /* Protection exception is suppressing, decrement psw address. */
396 regs->psw.addr -= (pgm_int_code >> 16);
397 /*
398 * Check for low-address protection. This needs to be treated
399 * as a special case because the translation exception code
400 * field is not guaranteed to contain valid data in this case.
401 */
402 if (unlikely(!(trans_exc_code & 4))) {
403 do_low_address(regs, pgm_int_code, trans_exc_code);
404 return;
405 }
406 fault = do_exception(regs, VM_WRITE, trans_exc_code);
407 if (unlikely(fault))
408 do_fault_error(regs, 4, trans_exc_code, fault);
409}
410
411void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
412 unsigned long trans_exc_code)
413{
414 int access, fault;
415
416 access = VM_READ | VM_EXEC | VM_WRITE;
417 fault = do_exception(regs, access, trans_exc_code);
418 if (unlikely(fault))
419 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
420}
421
422#ifdef CONFIG_64BIT
423void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
424 unsigned long trans_exc_code)
425{
426 struct mm_struct *mm = current->mm;
427 struct vm_area_struct *vma;
428
429 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
430 goto no_context;
431
432 down_read(&mm->mmap_sem);
433 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
434 up_read(&mm->mmap_sem);
435
436 if (vma) {
437 update_mm(mm, current);
438 return;
439 }
440
441 /* User mode accesses just cause a SIGSEGV */
442 if (regs->psw.mask & PSW_MASK_PSTATE) {
443 do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
444 return;
445 }
446
447no_context:
448 do_no_context(regs, pgm_int_code, trans_exc_code);
449}
450#endif
451
452int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
453{
454 struct pt_regs regs;
455 int access, fault;
456
457 regs.psw.mask = psw_kernel_bits;
458 if (!irqs_disabled())
459 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
460 regs.psw.addr = (unsigned long) __builtin_return_address(0);
461 regs.psw.addr |= PSW_ADDR_AMODE;
462 uaddr &= PAGE_MASK;
463 access = write ? VM_WRITE : VM_READ;
464 fault = do_exception(®s, access, uaddr | 2);
465 if (unlikely(fault)) {
466 if (fault & VM_FAULT_OOM)
467 return -EFAULT;
468 else if (fault & VM_FAULT_SIGBUS)
469 do_sigbus(®s, pgm_int_code, uaddr);
470 }
471 return fault ? -EFAULT : 0;
472}
473
474#ifdef CONFIG_PFAULT
475/*
476 * 'pfault' pseudo page faults routines.
477 */
478static int pfault_disable;
479
480static int __init nopfault(char *str)
481{
482 pfault_disable = 1;
483 return 1;
484}
485
486__setup("nopfault", nopfault);
487
488struct pfault_refbk {
489 u16 refdiagc;
490 u16 reffcode;
491 u16 refdwlen;
492 u16 refversn;
493 u64 refgaddr;
494 u64 refselmk;
495 u64 refcmpmk;
496 u64 reserved;
497} __attribute__ ((packed, aligned(8)));
498
499int pfault_init(void)
500{
501 struct pfault_refbk refbk = {
502 .refdiagc = 0x258,
503 .reffcode = 0,
504 .refdwlen = 5,
505 .refversn = 2,
506 .refgaddr = __LC_CURRENT_PID,
507 .refselmk = 1ULL << 48,
508 .refcmpmk = 1ULL << 48,
509 .reserved = __PF_RES_FIELD };
510 int rc;
511
512 if (!MACHINE_IS_VM || pfault_disable)
513 return -1;
514 asm volatile(
515 " diag %1,%0,0x258\n"
516 "0: j 2f\n"
517 "1: la %0,8\n"
518 "2:\n"
519 EX_TABLE(0b,1b)
520 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
521 return rc;
522}
523
524void pfault_fini(void)
525{
526 struct pfault_refbk refbk = {
527 .refdiagc = 0x258,
528 .reffcode = 1,
529 .refdwlen = 5,
530 .refversn = 2,
531 };
532
533 if (!MACHINE_IS_VM || pfault_disable)
534 return;
535 asm volatile(
536 " diag %0,0,0x258\n"
537 "0:\n"
538 EX_TABLE(0b,0b)
539 : : "a" (&refbk), "m" (refbk) : "cc");
540}
541
542static DEFINE_SPINLOCK(pfault_lock);
543static LIST_HEAD(pfault_list);
544
545static void pfault_interrupt(unsigned int ext_int_code,
546 unsigned int param32, unsigned long param64)
547{
548 struct task_struct *tsk;
549 __u16 subcode;
550 pid_t pid;
551
552 /*
553 * Get the external interruption subcode & pfault
554 * initial/completion signal bit. VM stores this
555 * in the 'cpu address' field associated with the
556 * external interrupt.
557 */
558 subcode = ext_int_code >> 16;
559 if ((subcode & 0xff00) != __SUBCODE_MASK)
560 return;
561 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
562 if (subcode & 0x0080) {
563 /* Get the token (= pid of the affected task). */
564 pid = sizeof(void *) == 4 ? param32 : param64;
565 rcu_read_lock();
566 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
567 if (tsk)
568 get_task_struct(tsk);
569 rcu_read_unlock();
570 if (!tsk)
571 return;
572 } else {
573 tsk = current;
574 }
575 spin_lock(&pfault_lock);
576 if (subcode & 0x0080) {
577 /* signal bit is set -> a page has been swapped in by VM */
578 if (tsk->thread.pfault_wait == 1) {
579 /* Initial interrupt was faster than the completion
580 * interrupt. pfault_wait is valid. Set pfault_wait
581 * back to zero and wake up the process. This can
582 * safely be done because the task is still sleeping
583 * and can't produce new pfaults. */
584 tsk->thread.pfault_wait = 0;
585 list_del(&tsk->thread.list);
586 wake_up_process(tsk);
587 } else {
588 /* Completion interrupt was faster than initial
589 * interrupt. Set pfault_wait to -1 so the initial
590 * interrupt doesn't put the task to sleep. */
591 tsk->thread.pfault_wait = -1;
592 }
593 put_task_struct(tsk);
594 } else {
595 /* signal bit not set -> a real page is missing. */
596 if (tsk->thread.pfault_wait == -1) {
597 /* Completion interrupt was faster than the initial
598 * interrupt (pfault_wait == -1). Set pfault_wait
599 * back to zero and exit. */
600 tsk->thread.pfault_wait = 0;
601 } else {
602 /* Initial interrupt arrived before completion
603 * interrupt. Let the task sleep. */
604 tsk->thread.pfault_wait = 1;
605 list_add(&tsk->thread.list, &pfault_list);
606 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
607 set_tsk_need_resched(tsk);
608 }
609 }
610 spin_unlock(&pfault_lock);
611}
612
613static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
614 unsigned long action, void *hcpu)
615{
616 struct thread_struct *thread, *next;
617 struct task_struct *tsk;
618
619 switch (action) {
620 case CPU_DEAD:
621 case CPU_DEAD_FROZEN:
622 spin_lock_irq(&pfault_lock);
623 list_for_each_entry_safe(thread, next, &pfault_list, list) {
624 thread->pfault_wait = 0;
625 list_del(&thread->list);
626 tsk = container_of(thread, struct task_struct, thread);
627 wake_up_process(tsk);
628 }
629 spin_unlock_irq(&pfault_lock);
630 break;
631 default:
632 break;
633 }
634 return NOTIFY_OK;
635}
636
637static int __init pfault_irq_init(void)
638{
639 int rc;
640
641 if (!MACHINE_IS_VM)
642 return 0;
643 rc = register_external_interrupt(0x2603, pfault_interrupt);
644 if (rc)
645 goto out_extint;
646 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
647 if (rc)
648 goto out_pfault;
649 service_subclass_irq_register();
650 hotcpu_notifier(pfault_cpu_notify, 0);
651 return 0;
652
653out_pfault:
654 unregister_external_interrupt(0x2603, pfault_interrupt);
655out_extint:
656 pfault_disable = 1;
657 return rc;
658}
659early_initcall(pfault_irq_init);
660
661#endif /* CONFIG_PFAULT */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/kernel_stat.h>
13#include <linux/perf_event.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
24#include <linux/compat.h>
25#include <linux/smp.h>
26#include <linux/kdebug.h>
27#include <linux/init.h>
28#include <linux/console.h>
29#include <linux/extable.h>
30#include <linux/hardirq.h>
31#include <linux/kprobes.h>
32#include <linux/uaccess.h>
33#include <linux/hugetlb.h>
34#include <asm/asm-offsets.h>
35#include <asm/diag.h>
36#include <asm/gmap.h>
37#include <asm/irq.h>
38#include <asm/mmu_context.h>
39#include <asm/facility.h>
40#include <asm/uv.h>
41#include "../kernel/entry.h"
42
43#define __FAIL_ADDR_MASK -4096L
44#define __SUBCODE_MASK 0x0600
45#define __PF_RES_FIELD 0x8000000000000000ULL
46
47#define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
48#define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
49#define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
50#define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
51#define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
52
53enum fault_type {
54 KERNEL_FAULT,
55 USER_FAULT,
56 VDSO_FAULT,
57 GMAP_FAULT,
58};
59
60static unsigned long store_indication __read_mostly;
61
62static int __init fault_init(void)
63{
64 if (test_facility(75))
65 store_indication = 0xc00;
66 return 0;
67}
68early_initcall(fault_init);
69
70/*
71 * Find out which address space caused the exception.
72 */
73static enum fault_type get_fault_type(struct pt_regs *regs)
74{
75 unsigned long trans_exc_code;
76
77 trans_exc_code = regs->int_parm_long & 3;
78 if (likely(trans_exc_code == 0)) {
79 /* primary space exception */
80 if (IS_ENABLED(CONFIG_PGSTE) &&
81 test_pt_regs_flag(regs, PIF_GUEST_FAULT))
82 return GMAP_FAULT;
83 if (current->thread.mm_segment == USER_DS)
84 return USER_FAULT;
85 return KERNEL_FAULT;
86 }
87 if (trans_exc_code == 2) {
88 /* secondary space exception */
89 if (current->thread.mm_segment & 1) {
90 if (current->thread.mm_segment == USER_DS_SACF)
91 return USER_FAULT;
92 return KERNEL_FAULT;
93 }
94 return VDSO_FAULT;
95 }
96 if (trans_exc_code == 1) {
97 /* access register mode, not used in the kernel */
98 return USER_FAULT;
99 }
100 /* home space exception -> access via kernel ASCE */
101 return KERNEL_FAULT;
102}
103
104static int bad_address(void *p)
105{
106 unsigned long dummy;
107
108 return get_kernel_nofault(dummy, (unsigned long *)p);
109}
110
111static void dump_pagetable(unsigned long asce, unsigned long address)
112{
113 unsigned long *table = __va(asce & _ASCE_ORIGIN);
114
115 pr_alert("AS:%016lx ", asce);
116 switch (asce & _ASCE_TYPE_MASK) {
117 case _ASCE_TYPE_REGION1:
118 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119 if (bad_address(table))
120 goto bad;
121 pr_cont("R1:%016lx ", *table);
122 if (*table & _REGION_ENTRY_INVALID)
123 goto out;
124 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
125 fallthrough;
126 case _ASCE_TYPE_REGION2:
127 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128 if (bad_address(table))
129 goto bad;
130 pr_cont("R2:%016lx ", *table);
131 if (*table & _REGION_ENTRY_INVALID)
132 goto out;
133 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
134 fallthrough;
135 case _ASCE_TYPE_REGION3:
136 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137 if (bad_address(table))
138 goto bad;
139 pr_cont("R3:%016lx ", *table);
140 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
141 goto out;
142 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
143 fallthrough;
144 case _ASCE_TYPE_SEGMENT:
145 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146 if (bad_address(table))
147 goto bad;
148 pr_cont("S:%016lx ", *table);
149 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
150 goto out;
151 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
152 }
153 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154 if (bad_address(table))
155 goto bad;
156 pr_cont("P:%016lx ", *table);
157out:
158 pr_cont("\n");
159 return;
160bad:
161 pr_cont("BAD\n");
162}
163
164static void dump_fault_info(struct pt_regs *regs)
165{
166 unsigned long asce;
167
168 pr_alert("Failing address: %016lx TEID: %016lx\n",
169 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170 pr_alert("Fault in ");
171 switch (regs->int_parm_long & 3) {
172 case 3:
173 pr_cont("home space ");
174 break;
175 case 2:
176 pr_cont("secondary space ");
177 break;
178 case 1:
179 pr_cont("access register ");
180 break;
181 case 0:
182 pr_cont("primary space ");
183 break;
184 }
185 pr_cont("mode while using ");
186 switch (get_fault_type(regs)) {
187 case USER_FAULT:
188 asce = S390_lowcore.user_asce;
189 pr_cont("user ");
190 break;
191 case VDSO_FAULT:
192 asce = S390_lowcore.vdso_asce;
193 pr_cont("vdso ");
194 break;
195 case GMAP_FAULT:
196 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
197 pr_cont("gmap ");
198 break;
199 case KERNEL_FAULT:
200 asce = S390_lowcore.kernel_asce;
201 pr_cont("kernel ");
202 break;
203 default:
204 unreachable();
205 }
206 pr_cont("ASCE.\n");
207 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
208}
209
210int show_unhandled_signals = 1;
211
212void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
213{
214 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
215 return;
216 if (!unhandled_signal(current, signr))
217 return;
218 if (!printk_ratelimit())
219 return;
220 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221 regs->int_code & 0xffff, regs->int_code >> 17);
222 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
223 printk(KERN_CONT "\n");
224 if (is_mm_fault)
225 dump_fault_info(regs);
226 show_regs(regs);
227}
228
229/*
230 * Send SIGSEGV to task. This is an external routine
231 * to keep the stack usage of do_page_fault small.
232 */
233static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
234{
235 report_user_fault(regs, SIGSEGV, 1);
236 force_sig_fault(SIGSEGV, si_code,
237 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
238}
239
240const struct exception_table_entry *s390_search_extables(unsigned long addr)
241{
242 const struct exception_table_entry *fixup;
243
244 fixup = search_extable(__start_dma_ex_table,
245 __stop_dma_ex_table - __start_dma_ex_table,
246 addr);
247 if (!fixup)
248 fixup = search_exception_tables(addr);
249 return fixup;
250}
251
252static noinline void do_no_context(struct pt_regs *regs)
253{
254 const struct exception_table_entry *fixup;
255
256 /* Are we prepared to handle this kernel fault? */
257 fixup = s390_search_extables(regs->psw.addr);
258 if (fixup && ex_handle(fixup, regs))
259 return;
260
261 /*
262 * Oops. The kernel tried to access some bad page. We'll have to
263 * terminate things with extreme prejudice.
264 */
265 if (get_fault_type(regs) == KERNEL_FAULT)
266 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
267 " in virtual kernel address space\n");
268 else
269 printk(KERN_ALERT "Unable to handle kernel paging request"
270 " in virtual user address space\n");
271 dump_fault_info(regs);
272 die(regs, "Oops");
273 do_exit(SIGKILL);
274}
275
276static noinline void do_low_address(struct pt_regs *regs)
277{
278 /* Low-address protection hit in kernel mode means
279 NULL pointer write access in kernel mode. */
280 if (regs->psw.mask & PSW_MASK_PSTATE) {
281 /* Low-address protection hit in user mode 'cannot happen'. */
282 die (regs, "Low-address protection");
283 do_exit(SIGKILL);
284 }
285
286 do_no_context(regs);
287}
288
289static noinline void do_sigbus(struct pt_regs *regs)
290{
291 /*
292 * Send a sigbus, regardless of whether we were in kernel
293 * or user mode.
294 */
295 force_sig_fault(SIGBUS, BUS_ADRERR,
296 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
297}
298
299static noinline int signal_return(struct pt_regs *regs)
300{
301 u16 instruction;
302 int rc;
303
304 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
305 if (rc)
306 return rc;
307 if (instruction == 0x0a77) {
308 set_pt_regs_flag(regs, PIF_SYSCALL);
309 regs->int_code = 0x00040077;
310 return 0;
311 } else if (instruction == 0x0aad) {
312 set_pt_regs_flag(regs, PIF_SYSCALL);
313 regs->int_code = 0x000400ad;
314 return 0;
315 }
316 return -EACCES;
317}
318
319static noinline void do_fault_error(struct pt_regs *regs, int access,
320 vm_fault_t fault)
321{
322 int si_code;
323
324 switch (fault) {
325 case VM_FAULT_BADACCESS:
326 if (access == VM_EXEC && signal_return(regs) == 0)
327 break;
328 fallthrough;
329 case VM_FAULT_BADMAP:
330 /* Bad memory access. Check if it is kernel or user space. */
331 if (user_mode(regs)) {
332 /* User mode accesses just cause a SIGSEGV */
333 si_code = (fault == VM_FAULT_BADMAP) ?
334 SEGV_MAPERR : SEGV_ACCERR;
335 do_sigsegv(regs, si_code);
336 break;
337 }
338 fallthrough;
339 case VM_FAULT_BADCONTEXT:
340 case VM_FAULT_PFAULT:
341 do_no_context(regs);
342 break;
343 case VM_FAULT_SIGNAL:
344 if (!user_mode(regs))
345 do_no_context(regs);
346 break;
347 default: /* fault & VM_FAULT_ERROR */
348 if (fault & VM_FAULT_OOM) {
349 if (!user_mode(regs))
350 do_no_context(regs);
351 else
352 pagefault_out_of_memory();
353 } else if (fault & VM_FAULT_SIGSEGV) {
354 /* Kernel mode? Handle exceptions or die */
355 if (!user_mode(regs))
356 do_no_context(regs);
357 else
358 do_sigsegv(regs, SEGV_MAPERR);
359 } else if (fault & VM_FAULT_SIGBUS) {
360 /* Kernel mode? Handle exceptions or die */
361 if (!user_mode(regs))
362 do_no_context(regs);
363 else
364 do_sigbus(regs);
365 } else
366 BUG();
367 break;
368 }
369}
370
371/*
372 * This routine handles page faults. It determines the address,
373 * and the problem, and then passes it off to one of the appropriate
374 * routines.
375 *
376 * interruption code (int_code):
377 * 04 Protection -> Write-Protection (suppression)
378 * 10 Segment translation -> Not present (nullification)
379 * 11 Page translation -> Not present (nullification)
380 * 3b Region third trans. -> Not present (nullification)
381 */
382static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
383{
384 struct gmap *gmap;
385 struct task_struct *tsk;
386 struct mm_struct *mm;
387 struct vm_area_struct *vma;
388 enum fault_type type;
389 unsigned long trans_exc_code;
390 unsigned long address;
391 unsigned int flags;
392 vm_fault_t fault;
393
394 tsk = current;
395 /*
396 * The instruction that caused the program check has
397 * been nullified. Don't signal single step via SIGTRAP.
398 */
399 clear_pt_regs_flag(regs, PIF_PER_TRAP);
400
401 if (kprobe_page_fault(regs, 14))
402 return 0;
403
404 mm = tsk->mm;
405 trans_exc_code = regs->int_parm_long;
406
407 /*
408 * Verify that the fault happened in user space, that
409 * we are not in an interrupt and that there is a
410 * user context.
411 */
412 fault = VM_FAULT_BADCONTEXT;
413 type = get_fault_type(regs);
414 switch (type) {
415 case KERNEL_FAULT:
416 goto out;
417 case VDSO_FAULT:
418 fault = VM_FAULT_BADMAP;
419 goto out;
420 case USER_FAULT:
421 case GMAP_FAULT:
422 if (faulthandler_disabled() || !mm)
423 goto out;
424 break;
425 }
426
427 address = trans_exc_code & __FAIL_ADDR_MASK;
428 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
429 flags = FAULT_FLAG_DEFAULT;
430 if (user_mode(regs))
431 flags |= FAULT_FLAG_USER;
432 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
433 flags |= FAULT_FLAG_WRITE;
434 mmap_read_lock(mm);
435
436 gmap = NULL;
437 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
438 gmap = (struct gmap *) S390_lowcore.gmap;
439 current->thread.gmap_addr = address;
440 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
441 current->thread.gmap_int_code = regs->int_code & 0xffff;
442 address = __gmap_translate(gmap, address);
443 if (address == -EFAULT) {
444 fault = VM_FAULT_BADMAP;
445 goto out_up;
446 }
447 if (gmap->pfault_enabled)
448 flags |= FAULT_FLAG_RETRY_NOWAIT;
449 }
450
451retry:
452 fault = VM_FAULT_BADMAP;
453 vma = find_vma(mm, address);
454 if (!vma)
455 goto out_up;
456
457 if (unlikely(vma->vm_start > address)) {
458 if (!(vma->vm_flags & VM_GROWSDOWN))
459 goto out_up;
460 if (expand_stack(vma, address))
461 goto out_up;
462 }
463
464 /*
465 * Ok, we have a good vm_area for this memory access, so
466 * we can handle it..
467 */
468 fault = VM_FAULT_BADACCESS;
469 if (unlikely(!(vma->vm_flags & access)))
470 goto out_up;
471
472 if (is_vm_hugetlb_page(vma))
473 address &= HPAGE_MASK;
474 /*
475 * If for any reason at all we couldn't handle the fault,
476 * make sure we exit gracefully rather than endlessly redo
477 * the fault.
478 */
479 fault = handle_mm_fault(vma, address, flags, regs);
480 if (fault_signal_pending(fault, regs)) {
481 fault = VM_FAULT_SIGNAL;
482 if (flags & FAULT_FLAG_RETRY_NOWAIT)
483 goto out_up;
484 goto out;
485 }
486 if (unlikely(fault & VM_FAULT_ERROR))
487 goto out_up;
488
489 if (flags & FAULT_FLAG_ALLOW_RETRY) {
490 if (fault & VM_FAULT_RETRY) {
491 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
492 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
493 /* FAULT_FLAG_RETRY_NOWAIT has been set,
494 * mmap_lock has not been released */
495 current->thread.gmap_pfault = 1;
496 fault = VM_FAULT_PFAULT;
497 goto out_up;
498 }
499 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
500 flags |= FAULT_FLAG_TRIED;
501 mmap_read_lock(mm);
502 goto retry;
503 }
504 }
505 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
506 address = __gmap_link(gmap, current->thread.gmap_addr,
507 address);
508 if (address == -EFAULT) {
509 fault = VM_FAULT_BADMAP;
510 goto out_up;
511 }
512 if (address == -ENOMEM) {
513 fault = VM_FAULT_OOM;
514 goto out_up;
515 }
516 }
517 fault = 0;
518out_up:
519 mmap_read_unlock(mm);
520out:
521 return fault;
522}
523
524void do_protection_exception(struct pt_regs *regs)
525{
526 unsigned long trans_exc_code;
527 int access;
528 vm_fault_t fault;
529
530 trans_exc_code = regs->int_parm_long;
531 /*
532 * Protection exceptions are suppressing, decrement psw address.
533 * The exception to this rule are aborted transactions, for these
534 * the PSW already points to the correct location.
535 */
536 if (!(regs->int_code & 0x200))
537 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
538 /*
539 * Check for low-address protection. This needs to be treated
540 * as a special case because the translation exception code
541 * field is not guaranteed to contain valid data in this case.
542 */
543 if (unlikely(!(trans_exc_code & 4))) {
544 do_low_address(regs);
545 return;
546 }
547 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
548 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
549 (regs->psw.addr & PAGE_MASK);
550 access = VM_EXEC;
551 fault = VM_FAULT_BADACCESS;
552 } else {
553 access = VM_WRITE;
554 fault = do_exception(regs, access);
555 }
556 if (unlikely(fault))
557 do_fault_error(regs, access, fault);
558}
559NOKPROBE_SYMBOL(do_protection_exception);
560
561void do_dat_exception(struct pt_regs *regs)
562{
563 int access;
564 vm_fault_t fault;
565
566 access = VM_ACCESS_FLAGS;
567 fault = do_exception(regs, access);
568 if (unlikely(fault))
569 do_fault_error(regs, access, fault);
570}
571NOKPROBE_SYMBOL(do_dat_exception);
572
573#ifdef CONFIG_PFAULT
574/*
575 * 'pfault' pseudo page faults routines.
576 */
577static int pfault_disable;
578
579static int __init nopfault(char *str)
580{
581 pfault_disable = 1;
582 return 1;
583}
584
585__setup("nopfault", nopfault);
586
587struct pfault_refbk {
588 u16 refdiagc;
589 u16 reffcode;
590 u16 refdwlen;
591 u16 refversn;
592 u64 refgaddr;
593 u64 refselmk;
594 u64 refcmpmk;
595 u64 reserved;
596} __attribute__ ((packed, aligned(8)));
597
598static struct pfault_refbk pfault_init_refbk = {
599 .refdiagc = 0x258,
600 .reffcode = 0,
601 .refdwlen = 5,
602 .refversn = 2,
603 .refgaddr = __LC_LPP,
604 .refselmk = 1ULL << 48,
605 .refcmpmk = 1ULL << 48,
606 .reserved = __PF_RES_FIELD
607};
608
609int pfault_init(void)
610{
611 int rc;
612
613 if (pfault_disable)
614 return -1;
615 diag_stat_inc(DIAG_STAT_X258);
616 asm volatile(
617 " diag %1,%0,0x258\n"
618 "0: j 2f\n"
619 "1: la %0,8\n"
620 "2:\n"
621 EX_TABLE(0b,1b)
622 : "=d" (rc)
623 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
624 return rc;
625}
626
627static struct pfault_refbk pfault_fini_refbk = {
628 .refdiagc = 0x258,
629 .reffcode = 1,
630 .refdwlen = 5,
631 .refversn = 2,
632};
633
634void pfault_fini(void)
635{
636
637 if (pfault_disable)
638 return;
639 diag_stat_inc(DIAG_STAT_X258);
640 asm volatile(
641 " diag %0,0,0x258\n"
642 "0: nopr %%r7\n"
643 EX_TABLE(0b,0b)
644 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
645}
646
647static DEFINE_SPINLOCK(pfault_lock);
648static LIST_HEAD(pfault_list);
649
650#define PF_COMPLETE 0x0080
651
652/*
653 * The mechanism of our pfault code: if Linux is running as guest, runs a user
654 * space process and the user space process accesses a page that the host has
655 * paged out we get a pfault interrupt.
656 *
657 * This allows us, within the guest, to schedule a different process. Without
658 * this mechanism the host would have to suspend the whole virtual cpu until
659 * the page has been paged in.
660 *
661 * So when we get such an interrupt then we set the state of the current task
662 * to uninterruptible and also set the need_resched flag. Both happens within
663 * interrupt context(!). If we later on want to return to user space we
664 * recognize the need_resched flag and then call schedule(). It's not very
665 * obvious how this works...
666 *
667 * Of course we have a lot of additional fun with the completion interrupt (->
668 * host signals that a page of a process has been paged in and the process can
669 * continue to run). This interrupt can arrive on any cpu and, since we have
670 * virtual cpus, actually appear before the interrupt that signals that a page
671 * is missing.
672 */
673static void pfault_interrupt(struct ext_code ext_code,
674 unsigned int param32, unsigned long param64)
675{
676 struct task_struct *tsk;
677 __u16 subcode;
678 pid_t pid;
679
680 /*
681 * Get the external interruption subcode & pfault initial/completion
682 * signal bit. VM stores this in the 'cpu address' field associated
683 * with the external interrupt.
684 */
685 subcode = ext_code.subcode;
686 if ((subcode & 0xff00) != __SUBCODE_MASK)
687 return;
688 inc_irq_stat(IRQEXT_PFL);
689 /* Get the token (= pid of the affected task). */
690 pid = param64 & LPP_PID_MASK;
691 rcu_read_lock();
692 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
693 if (tsk)
694 get_task_struct(tsk);
695 rcu_read_unlock();
696 if (!tsk)
697 return;
698 spin_lock(&pfault_lock);
699 if (subcode & PF_COMPLETE) {
700 /* signal bit is set -> a page has been swapped in by VM */
701 if (tsk->thread.pfault_wait == 1) {
702 /* Initial interrupt was faster than the completion
703 * interrupt. pfault_wait is valid. Set pfault_wait
704 * back to zero and wake up the process. This can
705 * safely be done because the task is still sleeping
706 * and can't produce new pfaults. */
707 tsk->thread.pfault_wait = 0;
708 list_del(&tsk->thread.list);
709 wake_up_process(tsk);
710 put_task_struct(tsk);
711 } else {
712 /* Completion interrupt was faster than initial
713 * interrupt. Set pfault_wait to -1 so the initial
714 * interrupt doesn't put the task to sleep.
715 * If the task is not running, ignore the completion
716 * interrupt since it must be a leftover of a PFAULT
717 * CANCEL operation which didn't remove all pending
718 * completion interrupts. */
719 if (tsk->state == TASK_RUNNING)
720 tsk->thread.pfault_wait = -1;
721 }
722 } else {
723 /* signal bit not set -> a real page is missing. */
724 if (WARN_ON_ONCE(tsk != current))
725 goto out;
726 if (tsk->thread.pfault_wait == 1) {
727 /* Already on the list with a reference: put to sleep */
728 goto block;
729 } else if (tsk->thread.pfault_wait == -1) {
730 /* Completion interrupt was faster than the initial
731 * interrupt (pfault_wait == -1). Set pfault_wait
732 * back to zero and exit. */
733 tsk->thread.pfault_wait = 0;
734 } else {
735 /* Initial interrupt arrived before completion
736 * interrupt. Let the task sleep.
737 * An extra task reference is needed since a different
738 * cpu may set the task state to TASK_RUNNING again
739 * before the scheduler is reached. */
740 get_task_struct(tsk);
741 tsk->thread.pfault_wait = 1;
742 list_add(&tsk->thread.list, &pfault_list);
743block:
744 /* Since this must be a userspace fault, there
745 * is no kernel task state to trample. Rely on the
746 * return to userspace schedule() to block. */
747 __set_current_state(TASK_UNINTERRUPTIBLE);
748 set_tsk_need_resched(tsk);
749 set_preempt_need_resched();
750 }
751 }
752out:
753 spin_unlock(&pfault_lock);
754 put_task_struct(tsk);
755}
756
757static int pfault_cpu_dead(unsigned int cpu)
758{
759 struct thread_struct *thread, *next;
760 struct task_struct *tsk;
761
762 spin_lock_irq(&pfault_lock);
763 list_for_each_entry_safe(thread, next, &pfault_list, list) {
764 thread->pfault_wait = 0;
765 list_del(&thread->list);
766 tsk = container_of(thread, struct task_struct, thread);
767 wake_up_process(tsk);
768 put_task_struct(tsk);
769 }
770 spin_unlock_irq(&pfault_lock);
771 return 0;
772}
773
774static int __init pfault_irq_init(void)
775{
776 int rc;
777
778 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
779 if (rc)
780 goto out_extint;
781 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
782 if (rc)
783 goto out_pfault;
784 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
785 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
786 NULL, pfault_cpu_dead);
787 return 0;
788
789out_pfault:
790 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
791out_extint:
792 pfault_disable = 1;
793 return rc;
794}
795early_initcall(pfault_irq_init);
796
797#endif /* CONFIG_PFAULT */
798
799#if IS_ENABLED(CONFIG_PGSTE)
800void do_secure_storage_access(struct pt_regs *regs)
801{
802 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
803 struct vm_area_struct *vma;
804 struct mm_struct *mm;
805 struct page *page;
806 int rc;
807
808 switch (get_fault_type(regs)) {
809 case USER_FAULT:
810 mm = current->mm;
811 mmap_read_lock(mm);
812 vma = find_vma(mm, addr);
813 if (!vma) {
814 mmap_read_unlock(mm);
815 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
816 break;
817 }
818 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
819 if (IS_ERR_OR_NULL(page)) {
820 mmap_read_unlock(mm);
821 break;
822 }
823 if (arch_make_page_accessible(page))
824 send_sig(SIGSEGV, current, 0);
825 put_page(page);
826 mmap_read_unlock(mm);
827 break;
828 case KERNEL_FAULT:
829 page = phys_to_page(addr);
830 if (unlikely(!try_get_page(page)))
831 break;
832 rc = arch_make_page_accessible(page);
833 put_page(page);
834 if (rc)
835 BUG();
836 break;
837 case VDSO_FAULT:
838 case GMAP_FAULT:
839 default:
840 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
841 WARN_ON_ONCE(1);
842 }
843}
844NOKPROBE_SYMBOL(do_secure_storage_access);
845
846void do_non_secure_storage_access(struct pt_regs *regs)
847{
848 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
849 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
850
851 if (get_fault_type(regs) != GMAP_FAULT) {
852 do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
853 WARN_ON_ONCE(1);
854 return;
855 }
856
857 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
858 send_sig(SIGSEGV, current, 0);
859}
860NOKPROBE_SYMBOL(do_non_secure_storage_access);
861
862void do_secure_storage_violation(struct pt_regs *regs)
863{
864 /*
865 * Either KVM messed up the secure guest mapping or the same
866 * page is mapped into multiple secure guests.
867 *
868 * This exception is only triggered when a guest 2 is running
869 * and can therefore never occur in kernel context.
870 */
871 printk_ratelimited(KERN_WARNING
872 "Secure storage violation in task: %s, pid %d\n",
873 current->comm, current->pid);
874 send_sig(SIGSEGV, current, 0);
875}
876
877#else
878void do_secure_storage_access(struct pt_regs *regs)
879{
880 default_trap_handler(regs);
881}
882
883void do_non_secure_storage_access(struct pt_regs *regs)
884{
885 default_trap_handler(regs);
886}
887
888void do_secure_storage_violation(struct pt_regs *regs)
889{
890 default_trap_handler(regs);
891}
892#endif