Loading...
1/*
2 * arch/s390/mm/fault.c
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (uweigand@de.ibm.com)
8 *
9 * Derived from "arch/i386/mm/fault.c"
10 * Copyright (C) 1995 Linus Torvalds
11 */
12
13#include <linux/kernel_stat.h>
14#include <linux/perf_event.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
24#include <linux/compat.h>
25#include <linux/smp.h>
26#include <linux/kdebug.h>
27#include <linux/init.h>
28#include <linux/console.h>
29#include <linux/module.h>
30#include <linux/hardirq.h>
31#include <linux/kprobes.h>
32#include <linux/uaccess.h>
33#include <linux/hugetlb.h>
34#include <asm/asm-offsets.h>
35#include <asm/system.h>
36#include <asm/pgtable.h>
37#include <asm/irq.h>
38#include <asm/mmu_context.h>
39#include <asm/compat.h>
40#include "../kernel/entry.h"
41
42#ifndef CONFIG_64BIT
43#define __FAIL_ADDR_MASK 0x7ffff000
44#define __SUBCODE_MASK 0x0200
45#define __PF_RES_FIELD 0ULL
46#else /* CONFIG_64BIT */
47#define __FAIL_ADDR_MASK -4096L
48#define __SUBCODE_MASK 0x0600
49#define __PF_RES_FIELD 0x8000000000000000ULL
50#endif /* CONFIG_64BIT */
51
52#define VM_FAULT_BADCONTEXT 0x010000
53#define VM_FAULT_BADMAP 0x020000
54#define VM_FAULT_BADACCESS 0x040000
55
56static unsigned long store_indication;
57
58void fault_init(void)
59{
60 if (test_facility(2) && test_facility(75))
61 store_indication = 0xc00;
62}
63
64static inline int notify_page_fault(struct pt_regs *regs)
65{
66 int ret = 0;
67
68 /* kprobe_running() needs smp_processor_id() */
69 if (kprobes_built_in() && !user_mode(regs)) {
70 preempt_disable();
71 if (kprobe_running() && kprobe_fault_handler(regs, 14))
72 ret = 1;
73 preempt_enable();
74 }
75 return ret;
76}
77
78
79/*
80 * Unlock any spinlocks which will prevent us from getting the
81 * message out.
82 */
83void bust_spinlocks(int yes)
84{
85 if (yes) {
86 oops_in_progress = 1;
87 } else {
88 int loglevel_save = console_loglevel;
89 console_unblank();
90 oops_in_progress = 0;
91 /*
92 * OK, the message is on the console. Now we call printk()
93 * without oops_in_progress set so that printk will give klogd
94 * a poke. Hold onto your hats...
95 */
96 console_loglevel = 15;
97 printk(" ");
98 console_loglevel = loglevel_save;
99 }
100}
101
102/*
103 * Returns the address space associated with the fault.
104 * Returns 0 for kernel space and 1 for user space.
105 */
106static inline int user_space_fault(unsigned long trans_exc_code)
107{
108 /*
109 * The lowest two bits of the translation exception
110 * identification indicate which paging table was used.
111 */
112 trans_exc_code &= 3;
113 if (trans_exc_code == 2)
114 /* Access via secondary space, set_fs setting decides */
115 return current->thread.mm_segment.ar4;
116 if (user_mode == HOME_SPACE_MODE)
117 /* User space if the access has been done via home space. */
118 return trans_exc_code == 3;
119 /*
120 * If the user space is not the home space the kernel runs in home
121 * space. Access via secondary space has already been covered,
122 * access via primary space or access register is from user space
123 * and access via home space is from the kernel.
124 */
125 return trans_exc_code != 3;
126}
127
128static inline void report_user_fault(struct pt_regs *regs, long int_code,
129 int signr, unsigned long address)
130{
131 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
132 return;
133 if (!unhandled_signal(current, signr))
134 return;
135 if (!printk_ratelimit())
136 return;
137 printk("User process fault: interruption code 0x%lX ", int_code);
138 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
139 printk("\n");
140 printk("failing address: %lX\n", address);
141 show_regs(regs);
142}
143
144/*
145 * Send SIGSEGV to task. This is an external routine
146 * to keep the stack usage of do_page_fault small.
147 */
148static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
149 int si_code, unsigned long trans_exc_code)
150{
151 struct siginfo si;
152 unsigned long address;
153
154 address = trans_exc_code & __FAIL_ADDR_MASK;
155 current->thread.prot_addr = address;
156 current->thread.trap_no = int_code;
157 report_user_fault(regs, int_code, SIGSEGV, address);
158 si.si_signo = SIGSEGV;
159 si.si_code = si_code;
160 si.si_addr = (void __user *) address;
161 force_sig_info(SIGSEGV, &si, current);
162}
163
164static noinline void do_no_context(struct pt_regs *regs, long int_code,
165 unsigned long trans_exc_code)
166{
167 const struct exception_table_entry *fixup;
168 unsigned long address;
169
170 /* Are we prepared to handle this kernel fault? */
171 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
172 if (fixup) {
173 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
174 return;
175 }
176
177 /*
178 * Oops. The kernel tried to access some bad page. We'll have to
179 * terminate things with extreme prejudice.
180 */
181 address = trans_exc_code & __FAIL_ADDR_MASK;
182 if (!user_space_fault(trans_exc_code))
183 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
184 " at virtual kernel address %p\n", (void *)address);
185 else
186 printk(KERN_ALERT "Unable to handle kernel paging request"
187 " at virtual user address %p\n", (void *)address);
188
189 die("Oops", regs, int_code);
190 do_exit(SIGKILL);
191}
192
193static noinline void do_low_address(struct pt_regs *regs, long int_code,
194 unsigned long trans_exc_code)
195{
196 /* Low-address protection hit in kernel mode means
197 NULL pointer write access in kernel mode. */
198 if (regs->psw.mask & PSW_MASK_PSTATE) {
199 /* Low-address protection hit in user mode 'cannot happen'. */
200 die ("Low-address protection", regs, int_code);
201 do_exit(SIGKILL);
202 }
203
204 do_no_context(regs, int_code, trans_exc_code);
205}
206
207static noinline void do_sigbus(struct pt_regs *regs, long int_code,
208 unsigned long trans_exc_code)
209{
210 struct task_struct *tsk = current;
211 unsigned long address;
212 struct siginfo si;
213
214 /*
215 * Send a sigbus, regardless of whether we were in kernel
216 * or user mode.
217 */
218 address = trans_exc_code & __FAIL_ADDR_MASK;
219 tsk->thread.prot_addr = address;
220 tsk->thread.trap_no = int_code;
221 si.si_signo = SIGBUS;
222 si.si_errno = 0;
223 si.si_code = BUS_ADRERR;
224 si.si_addr = (void __user *) address;
225 force_sig_info(SIGBUS, &si, tsk);
226}
227
228static noinline void do_fault_error(struct pt_regs *regs, long int_code,
229 unsigned long trans_exc_code, int fault)
230{
231 int si_code;
232
233 switch (fault) {
234 case VM_FAULT_BADACCESS:
235 case VM_FAULT_BADMAP:
236 /* Bad memory access. Check if it is kernel or user space. */
237 if (regs->psw.mask & PSW_MASK_PSTATE) {
238 /* User mode accesses just cause a SIGSEGV */
239 si_code = (fault == VM_FAULT_BADMAP) ?
240 SEGV_MAPERR : SEGV_ACCERR;
241 do_sigsegv(regs, int_code, si_code, trans_exc_code);
242 return;
243 }
244 case VM_FAULT_BADCONTEXT:
245 do_no_context(regs, int_code, trans_exc_code);
246 break;
247 default: /* fault & VM_FAULT_ERROR */
248 if (fault & VM_FAULT_OOM) {
249 if (!(regs->psw.mask & PSW_MASK_PSTATE))
250 do_no_context(regs, int_code, trans_exc_code);
251 else
252 pagefault_out_of_memory();
253 } else if (fault & VM_FAULT_SIGBUS) {
254 /* Kernel mode? Handle exceptions or die */
255 if (!(regs->psw.mask & PSW_MASK_PSTATE))
256 do_no_context(regs, int_code, trans_exc_code);
257 else
258 do_sigbus(regs, int_code, trans_exc_code);
259 } else
260 BUG();
261 break;
262 }
263}
264
265/*
266 * This routine handles page faults. It determines the address,
267 * and the problem, and then passes it off to one of the appropriate
268 * routines.
269 *
270 * interruption code (int_code):
271 * 04 Protection -> Write-Protection (suprression)
272 * 10 Segment translation -> Not present (nullification)
273 * 11 Page translation -> Not present (nullification)
274 * 3b Region third trans. -> Not present (nullification)
275 */
276static inline int do_exception(struct pt_regs *regs, int access,
277 unsigned long trans_exc_code)
278{
279 struct task_struct *tsk;
280 struct mm_struct *mm;
281 struct vm_area_struct *vma;
282 unsigned long address;
283 unsigned int flags;
284 int fault;
285
286 if (notify_page_fault(regs))
287 return 0;
288
289 tsk = current;
290 mm = tsk->mm;
291
292 /*
293 * Verify that the fault happened in user space, that
294 * we are not in an interrupt and that there is a
295 * user context.
296 */
297 fault = VM_FAULT_BADCONTEXT;
298 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
299 goto out;
300
301 address = trans_exc_code & __FAIL_ADDR_MASK;
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE;
306 down_read(&mm->mmap_sem);
307
308#ifdef CONFIG_PGSTE
309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
310 address = gmap_fault(address,
311 (struct gmap *) S390_lowcore.gmap);
312 if (address == -EFAULT) {
313 fault = VM_FAULT_BADMAP;
314 goto out_up;
315 }
316 if (address == -ENOMEM) {
317 fault = VM_FAULT_OOM;
318 goto out_up;
319 }
320 }
321#endif
322
323retry:
324 fault = VM_FAULT_BADMAP;
325 vma = find_vma(mm, address);
326 if (!vma)
327 goto out_up;
328
329 if (unlikely(vma->vm_start > address)) {
330 if (!(vma->vm_flags & VM_GROWSDOWN))
331 goto out_up;
332 if (expand_stack(vma, address))
333 goto out_up;
334 }
335
336 /*
337 * Ok, we have a good vm_area for this memory access, so
338 * we can handle it..
339 */
340 fault = VM_FAULT_BADACCESS;
341 if (unlikely(!(vma->vm_flags & access)))
342 goto out_up;
343
344 if (is_vm_hugetlb_page(vma))
345 address &= HPAGE_MASK;
346 /*
347 * If for any reason at all we couldn't handle the fault,
348 * make sure we exit gracefully rather than endlessly redo
349 * the fault.
350 */
351 fault = handle_mm_fault(mm, vma, address, flags);
352 if (unlikely(fault & VM_FAULT_ERROR))
353 goto out_up;
354
355 /*
356 * Major/minor page fault accounting is only done on the
357 * initial attempt. If we go through a retry, it is extremely
358 * likely that the page will be found in page cache at that point.
359 */
360 if (flags & FAULT_FLAG_ALLOW_RETRY) {
361 if (fault & VM_FAULT_MAJOR) {
362 tsk->maj_flt++;
363 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
364 regs, address);
365 } else {
366 tsk->min_flt++;
367 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
368 regs, address);
369 }
370 if (fault & VM_FAULT_RETRY) {
371 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
372 * of starvation. */
373 flags &= ~FAULT_FLAG_ALLOW_RETRY;
374 down_read(&mm->mmap_sem);
375 goto retry;
376 }
377 }
378 /*
379 * The instruction that caused the program check will
380 * be repeated. Don't signal single step via SIGTRAP.
381 */
382 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
383 fault = 0;
384out_up:
385 up_read(&mm->mmap_sem);
386out:
387 return fault;
388}
389
390void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code,
391 unsigned long trans_exc_code)
392{
393 int fault;
394
395 /* Protection exception is suppressing, decrement psw address. */
396 regs->psw.addr -= (pgm_int_code >> 16);
397 /*
398 * Check for low-address protection. This needs to be treated
399 * as a special case because the translation exception code
400 * field is not guaranteed to contain valid data in this case.
401 */
402 if (unlikely(!(trans_exc_code & 4))) {
403 do_low_address(regs, pgm_int_code, trans_exc_code);
404 return;
405 }
406 fault = do_exception(regs, VM_WRITE, trans_exc_code);
407 if (unlikely(fault))
408 do_fault_error(regs, 4, trans_exc_code, fault);
409}
410
411void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code,
412 unsigned long trans_exc_code)
413{
414 int access, fault;
415
416 access = VM_READ | VM_EXEC | VM_WRITE;
417 fault = do_exception(regs, access, trans_exc_code);
418 if (unlikely(fault))
419 do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault);
420}
421
422#ifdef CONFIG_64BIT
423void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code,
424 unsigned long trans_exc_code)
425{
426 struct mm_struct *mm = current->mm;
427 struct vm_area_struct *vma;
428
429 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
430 goto no_context;
431
432 down_read(&mm->mmap_sem);
433 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
434 up_read(&mm->mmap_sem);
435
436 if (vma) {
437 update_mm(mm, current);
438 return;
439 }
440
441 /* User mode accesses just cause a SIGSEGV */
442 if (regs->psw.mask & PSW_MASK_PSTATE) {
443 do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code);
444 return;
445 }
446
447no_context:
448 do_no_context(regs, pgm_int_code, trans_exc_code);
449}
450#endif
451
452int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
453{
454 struct pt_regs regs;
455 int access, fault;
456
457 regs.psw.mask = psw_kernel_bits;
458 if (!irqs_disabled())
459 regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
460 regs.psw.addr = (unsigned long) __builtin_return_address(0);
461 regs.psw.addr |= PSW_ADDR_AMODE;
462 uaddr &= PAGE_MASK;
463 access = write ? VM_WRITE : VM_READ;
464 fault = do_exception(®s, access, uaddr | 2);
465 if (unlikely(fault)) {
466 if (fault & VM_FAULT_OOM)
467 return -EFAULT;
468 else if (fault & VM_FAULT_SIGBUS)
469 do_sigbus(®s, pgm_int_code, uaddr);
470 }
471 return fault ? -EFAULT : 0;
472}
473
474#ifdef CONFIG_PFAULT
475/*
476 * 'pfault' pseudo page faults routines.
477 */
478static int pfault_disable;
479
480static int __init nopfault(char *str)
481{
482 pfault_disable = 1;
483 return 1;
484}
485
486__setup("nopfault", nopfault);
487
488struct pfault_refbk {
489 u16 refdiagc;
490 u16 reffcode;
491 u16 refdwlen;
492 u16 refversn;
493 u64 refgaddr;
494 u64 refselmk;
495 u64 refcmpmk;
496 u64 reserved;
497} __attribute__ ((packed, aligned(8)));
498
499int pfault_init(void)
500{
501 struct pfault_refbk refbk = {
502 .refdiagc = 0x258,
503 .reffcode = 0,
504 .refdwlen = 5,
505 .refversn = 2,
506 .refgaddr = __LC_CURRENT_PID,
507 .refselmk = 1ULL << 48,
508 .refcmpmk = 1ULL << 48,
509 .reserved = __PF_RES_FIELD };
510 int rc;
511
512 if (!MACHINE_IS_VM || pfault_disable)
513 return -1;
514 asm volatile(
515 " diag %1,%0,0x258\n"
516 "0: j 2f\n"
517 "1: la %0,8\n"
518 "2:\n"
519 EX_TABLE(0b,1b)
520 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
521 return rc;
522}
523
524void pfault_fini(void)
525{
526 struct pfault_refbk refbk = {
527 .refdiagc = 0x258,
528 .reffcode = 1,
529 .refdwlen = 5,
530 .refversn = 2,
531 };
532
533 if (!MACHINE_IS_VM || pfault_disable)
534 return;
535 asm volatile(
536 " diag %0,0,0x258\n"
537 "0:\n"
538 EX_TABLE(0b,0b)
539 : : "a" (&refbk), "m" (refbk) : "cc");
540}
541
542static DEFINE_SPINLOCK(pfault_lock);
543static LIST_HEAD(pfault_list);
544
545static void pfault_interrupt(unsigned int ext_int_code,
546 unsigned int param32, unsigned long param64)
547{
548 struct task_struct *tsk;
549 __u16 subcode;
550 pid_t pid;
551
552 /*
553 * Get the external interruption subcode & pfault
554 * initial/completion signal bit. VM stores this
555 * in the 'cpu address' field associated with the
556 * external interrupt.
557 */
558 subcode = ext_int_code >> 16;
559 if ((subcode & 0xff00) != __SUBCODE_MASK)
560 return;
561 kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
562 if (subcode & 0x0080) {
563 /* Get the token (= pid of the affected task). */
564 pid = sizeof(void *) == 4 ? param32 : param64;
565 rcu_read_lock();
566 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
567 if (tsk)
568 get_task_struct(tsk);
569 rcu_read_unlock();
570 if (!tsk)
571 return;
572 } else {
573 tsk = current;
574 }
575 spin_lock(&pfault_lock);
576 if (subcode & 0x0080) {
577 /* signal bit is set -> a page has been swapped in by VM */
578 if (tsk->thread.pfault_wait == 1) {
579 /* Initial interrupt was faster than the completion
580 * interrupt. pfault_wait is valid. Set pfault_wait
581 * back to zero and wake up the process. This can
582 * safely be done because the task is still sleeping
583 * and can't produce new pfaults. */
584 tsk->thread.pfault_wait = 0;
585 list_del(&tsk->thread.list);
586 wake_up_process(tsk);
587 } else {
588 /* Completion interrupt was faster than initial
589 * interrupt. Set pfault_wait to -1 so the initial
590 * interrupt doesn't put the task to sleep. */
591 tsk->thread.pfault_wait = -1;
592 }
593 put_task_struct(tsk);
594 } else {
595 /* signal bit not set -> a real page is missing. */
596 if (tsk->thread.pfault_wait == -1) {
597 /* Completion interrupt was faster than the initial
598 * interrupt (pfault_wait == -1). Set pfault_wait
599 * back to zero and exit. */
600 tsk->thread.pfault_wait = 0;
601 } else {
602 /* Initial interrupt arrived before completion
603 * interrupt. Let the task sleep. */
604 tsk->thread.pfault_wait = 1;
605 list_add(&tsk->thread.list, &pfault_list);
606 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
607 set_tsk_need_resched(tsk);
608 }
609 }
610 spin_unlock(&pfault_lock);
611}
612
613static int __cpuinit pfault_cpu_notify(struct notifier_block *self,
614 unsigned long action, void *hcpu)
615{
616 struct thread_struct *thread, *next;
617 struct task_struct *tsk;
618
619 switch (action) {
620 case CPU_DEAD:
621 case CPU_DEAD_FROZEN:
622 spin_lock_irq(&pfault_lock);
623 list_for_each_entry_safe(thread, next, &pfault_list, list) {
624 thread->pfault_wait = 0;
625 list_del(&thread->list);
626 tsk = container_of(thread, struct task_struct, thread);
627 wake_up_process(tsk);
628 }
629 spin_unlock_irq(&pfault_lock);
630 break;
631 default:
632 break;
633 }
634 return NOTIFY_OK;
635}
636
637static int __init pfault_irq_init(void)
638{
639 int rc;
640
641 if (!MACHINE_IS_VM)
642 return 0;
643 rc = register_external_interrupt(0x2603, pfault_interrupt);
644 if (rc)
645 goto out_extint;
646 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
647 if (rc)
648 goto out_pfault;
649 service_subclass_irq_register();
650 hotcpu_notifier(pfault_cpu_notify, 0);
651 return 0;
652
653out_pfault:
654 unregister_external_interrupt(0x2603, pfault_interrupt);
655out_extint:
656 pfault_disable = 1;
657 return rc;
658}
659early_initcall(pfault_irq_init);
660
661#endif /* CONFIG_PFAULT */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/kernel_stat.h>
13#include <linux/mmu_context.h>
14#include <linux/perf_event.h>
15#include <linux/signal.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/jump_label.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/compat.h>
27#include <linux/smp.h>
28#include <linux/kdebug.h>
29#include <linux/init.h>
30#include <linux/console.h>
31#include <linux/extable.h>
32#include <linux/hardirq.h>
33#include <linux/kprobes.h>
34#include <linux/uaccess.h>
35#include <linux/hugetlb.h>
36#include <linux/kfence.h>
37#include <asm/asm-extable.h>
38#include <asm/asm-offsets.h>
39#include <asm/ptrace.h>
40#include <asm/fault.h>
41#include <asm/diag.h>
42#include <asm/gmap.h>
43#include <asm/irq.h>
44#include <asm/facility.h>
45#include <asm/uv.h>
46#include "../kernel/entry.h"
47
48enum fault_type {
49 KERNEL_FAULT,
50 USER_FAULT,
51 GMAP_FAULT,
52};
53
54static DEFINE_STATIC_KEY_FALSE(have_store_indication);
55
56static int __init fault_init(void)
57{
58 if (test_facility(75))
59 static_branch_enable(&have_store_indication);
60 return 0;
61}
62early_initcall(fault_init);
63
64/*
65 * Find out which address space caused the exception.
66 */
67static enum fault_type get_fault_type(struct pt_regs *regs)
68{
69 union teid teid = { .val = regs->int_parm_long };
70 struct gmap *gmap;
71
72 if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
73 if (user_mode(regs))
74 return USER_FAULT;
75 if (!IS_ENABLED(CONFIG_PGSTE))
76 return KERNEL_FAULT;
77 gmap = (struct gmap *)S390_lowcore.gmap;
78 if (gmap && gmap->asce == regs->cr1)
79 return GMAP_FAULT;
80 return KERNEL_FAULT;
81 }
82 if (teid.as == PSW_BITS_AS_SECONDARY)
83 return USER_FAULT;
84 /* Access register mode, not used in the kernel */
85 if (teid.as == PSW_BITS_AS_ACCREG)
86 return USER_FAULT;
87 /* Home space -> access via kernel ASCE */
88 return KERNEL_FAULT;
89}
90
91static unsigned long get_fault_address(struct pt_regs *regs)
92{
93 union teid teid = { .val = regs->int_parm_long };
94
95 return teid.addr * PAGE_SIZE;
96}
97
98static __always_inline bool fault_is_write(struct pt_regs *regs)
99{
100 union teid teid = { .val = regs->int_parm_long };
101
102 if (static_branch_likely(&have_store_indication))
103 return teid.fsi == TEID_FSI_STORE;
104 return false;
105}
106
107static void dump_pagetable(unsigned long asce, unsigned long address)
108{
109 unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
110
111 pr_alert("AS:%016lx ", asce);
112 switch (asce & _ASCE_TYPE_MASK) {
113 case _ASCE_TYPE_REGION1:
114 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
115 if (get_kernel_nofault(entry, table))
116 goto bad;
117 pr_cont("R1:%016lx ", entry);
118 if (entry & _REGION_ENTRY_INVALID)
119 goto out;
120 table = __va(entry & _REGION_ENTRY_ORIGIN);
121 fallthrough;
122 case _ASCE_TYPE_REGION2:
123 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
124 if (get_kernel_nofault(entry, table))
125 goto bad;
126 pr_cont("R2:%016lx ", entry);
127 if (entry & _REGION_ENTRY_INVALID)
128 goto out;
129 table = __va(entry & _REGION_ENTRY_ORIGIN);
130 fallthrough;
131 case _ASCE_TYPE_REGION3:
132 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
133 if (get_kernel_nofault(entry, table))
134 goto bad;
135 pr_cont("R3:%016lx ", entry);
136 if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
137 goto out;
138 table = __va(entry & _REGION_ENTRY_ORIGIN);
139 fallthrough;
140 case _ASCE_TYPE_SEGMENT:
141 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
142 if (get_kernel_nofault(entry, table))
143 goto bad;
144 pr_cont("S:%016lx ", entry);
145 if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
146 goto out;
147 table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
148 }
149 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
150 if (get_kernel_nofault(entry, table))
151 goto bad;
152 pr_cont("P:%016lx ", entry);
153out:
154 pr_cont("\n");
155 return;
156bad:
157 pr_cont("BAD\n");
158}
159
160static void dump_fault_info(struct pt_regs *regs)
161{
162 union teid teid = { .val = regs->int_parm_long };
163 unsigned long asce;
164
165 pr_alert("Failing address: %016lx TEID: %016lx\n",
166 get_fault_address(regs), teid.val);
167 pr_alert("Fault in ");
168 switch (teid.as) {
169 case PSW_BITS_AS_HOME:
170 pr_cont("home space ");
171 break;
172 case PSW_BITS_AS_SECONDARY:
173 pr_cont("secondary space ");
174 break;
175 case PSW_BITS_AS_ACCREG:
176 pr_cont("access register ");
177 break;
178 case PSW_BITS_AS_PRIMARY:
179 pr_cont("primary space ");
180 break;
181 }
182 pr_cont("mode while using ");
183 switch (get_fault_type(regs)) {
184 case USER_FAULT:
185 asce = S390_lowcore.user_asce.val;
186 pr_cont("user ");
187 break;
188 case GMAP_FAULT:
189 asce = ((struct gmap *)S390_lowcore.gmap)->asce;
190 pr_cont("gmap ");
191 break;
192 case KERNEL_FAULT:
193 asce = S390_lowcore.kernel_asce.val;
194 pr_cont("kernel ");
195 break;
196 default:
197 unreachable();
198 }
199 pr_cont("ASCE.\n");
200 dump_pagetable(asce, get_fault_address(regs));
201}
202
203int show_unhandled_signals = 1;
204
205void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
206{
207 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
208
209 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
210 return;
211 if (!unhandled_signal(current, signr))
212 return;
213 if (!__ratelimit(&rs))
214 return;
215 pr_alert("User process fault: interruption code %04x ilc:%d ",
216 regs->int_code & 0xffff, regs->int_code >> 17);
217 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
218 pr_cont("\n");
219 if (is_mm_fault)
220 dump_fault_info(regs);
221 show_regs(regs);
222}
223
224static void do_sigsegv(struct pt_regs *regs, int si_code)
225{
226 report_user_fault(regs, SIGSEGV, 1);
227 force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
228}
229
230static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
231{
232 enum fault_type fault_type;
233 unsigned long address;
234 bool is_write;
235
236 if (user_mode(regs)) {
237 if (WARN_ON_ONCE(!si_code))
238 si_code = SEGV_MAPERR;
239 return do_sigsegv(regs, si_code);
240 }
241 if (fixup_exception(regs))
242 return;
243 fault_type = get_fault_type(regs);
244 if (fault_type == KERNEL_FAULT) {
245 address = get_fault_address(regs);
246 is_write = fault_is_write(regs);
247 if (kfence_handle_page_fault(address, is_write, regs))
248 return;
249 }
250 if (fault_type == KERNEL_FAULT)
251 pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
252 else
253 pr_alert("Unable to handle kernel paging request in virtual user address space\n");
254 dump_fault_info(regs);
255 die(regs, "Oops");
256}
257
258static void handle_fault_error(struct pt_regs *regs, int si_code)
259{
260 struct mm_struct *mm = current->mm;
261
262 mmap_read_unlock(mm);
263 handle_fault_error_nolock(regs, si_code);
264}
265
266static void do_sigbus(struct pt_regs *regs)
267{
268 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
269}
270
271/*
272 * This routine handles page faults. It determines the address,
273 * and the problem, and then passes it off to one of the appropriate
274 * routines.
275 *
276 * interruption code (int_code):
277 * 04 Protection -> Write-Protection (suppression)
278 * 10 Segment translation -> Not present (nullification)
279 * 11 Page translation -> Not present (nullification)
280 * 3b Region third trans. -> Not present (nullification)
281 */
282static void do_exception(struct pt_regs *regs, int access)
283{
284 struct vm_area_struct *vma;
285 unsigned long address;
286 struct mm_struct *mm;
287 enum fault_type type;
288 unsigned int flags;
289 struct gmap *gmap;
290 vm_fault_t fault;
291 bool is_write;
292
293 /*
294 * The instruction that caused the program check has
295 * been nullified. Don't signal single step via SIGTRAP.
296 */
297 clear_thread_flag(TIF_PER_TRAP);
298 if (kprobe_page_fault(regs, 14))
299 return;
300 mm = current->mm;
301 address = get_fault_address(regs);
302 is_write = fault_is_write(regs);
303 type = get_fault_type(regs);
304 switch (type) {
305 case KERNEL_FAULT:
306 return handle_fault_error_nolock(regs, 0);
307 case USER_FAULT:
308 case GMAP_FAULT:
309 if (faulthandler_disabled() || !mm)
310 return handle_fault_error_nolock(regs, 0);
311 break;
312 }
313 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
314 flags = FAULT_FLAG_DEFAULT;
315 if (user_mode(regs))
316 flags |= FAULT_FLAG_USER;
317 if (is_write)
318 access = VM_WRITE;
319 if (access == VM_WRITE)
320 flags |= FAULT_FLAG_WRITE;
321 if (!(flags & FAULT_FLAG_USER))
322 goto lock_mmap;
323 vma = lock_vma_under_rcu(mm, address);
324 if (!vma)
325 goto lock_mmap;
326 if (!(vma->vm_flags & access)) {
327 vma_end_read(vma);
328 goto lock_mmap;
329 }
330 fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
331 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
332 vma_end_read(vma);
333 if (!(fault & VM_FAULT_RETRY)) {
334 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
335 if (unlikely(fault & VM_FAULT_ERROR))
336 goto error;
337 return;
338 }
339 count_vm_vma_lock_event(VMA_LOCK_RETRY);
340 if (fault & VM_FAULT_MAJOR)
341 flags |= FAULT_FLAG_TRIED;
342
343 /* Quick path to respond to signals */
344 if (fault_signal_pending(fault, regs)) {
345 if (!user_mode(regs))
346 handle_fault_error_nolock(regs, 0);
347 return;
348 }
349lock_mmap:
350 mmap_read_lock(mm);
351 gmap = NULL;
352 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
353 gmap = (struct gmap *)S390_lowcore.gmap;
354 current->thread.gmap_addr = address;
355 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
356 current->thread.gmap_int_code = regs->int_code & 0xffff;
357 address = __gmap_translate(gmap, address);
358 if (address == -EFAULT)
359 return handle_fault_error(regs, SEGV_MAPERR);
360 if (gmap->pfault_enabled)
361 flags |= FAULT_FLAG_RETRY_NOWAIT;
362 }
363retry:
364 vma = find_vma(mm, address);
365 if (!vma)
366 return handle_fault_error(regs, SEGV_MAPERR);
367 if (unlikely(vma->vm_start > address)) {
368 if (!(vma->vm_flags & VM_GROWSDOWN))
369 return handle_fault_error(regs, SEGV_MAPERR);
370 vma = expand_stack(mm, address);
371 if (!vma)
372 return handle_fault_error_nolock(regs, SEGV_MAPERR);
373 }
374 if (unlikely(!(vma->vm_flags & access)))
375 return handle_fault_error(regs, SEGV_ACCERR);
376 fault = handle_mm_fault(vma, address, flags, regs);
377 if (fault_signal_pending(fault, regs)) {
378 if (flags & FAULT_FLAG_RETRY_NOWAIT)
379 mmap_read_unlock(mm);
380 if (!user_mode(regs))
381 handle_fault_error_nolock(regs, 0);
382 return;
383 }
384 /* The fault is fully completed (including releasing mmap lock) */
385 if (fault & VM_FAULT_COMPLETED) {
386 if (gmap) {
387 mmap_read_lock(mm);
388 goto gmap;
389 }
390 return;
391 }
392 if (unlikely(fault & VM_FAULT_ERROR)) {
393 mmap_read_unlock(mm);
394 goto error;
395 }
396 if (fault & VM_FAULT_RETRY) {
397 if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
398 /*
399 * FAULT_FLAG_RETRY_NOWAIT has been set,
400 * mmap_lock has not been released
401 */
402 current->thread.gmap_pfault = 1;
403 return handle_fault_error(regs, 0);
404 }
405 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
406 flags |= FAULT_FLAG_TRIED;
407 mmap_read_lock(mm);
408 goto retry;
409 }
410gmap:
411 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
412 address = __gmap_link(gmap, current->thread.gmap_addr,
413 address);
414 if (address == -EFAULT)
415 return handle_fault_error(regs, SEGV_MAPERR);
416 if (address == -ENOMEM) {
417 fault = VM_FAULT_OOM;
418 mmap_read_unlock(mm);
419 goto error;
420 }
421 }
422 mmap_read_unlock(mm);
423 return;
424error:
425 if (fault & VM_FAULT_OOM) {
426 if (!user_mode(regs))
427 handle_fault_error_nolock(regs, 0);
428 else
429 pagefault_out_of_memory();
430 } else if (fault & VM_FAULT_SIGSEGV) {
431 if (!user_mode(regs))
432 handle_fault_error_nolock(regs, 0);
433 else
434 do_sigsegv(regs, SEGV_MAPERR);
435 } else if (fault & VM_FAULT_SIGBUS) {
436 if (!user_mode(regs))
437 handle_fault_error_nolock(regs, 0);
438 else
439 do_sigbus(regs);
440 } else {
441 BUG();
442 }
443}
444
445void do_protection_exception(struct pt_regs *regs)
446{
447 union teid teid = { .val = regs->int_parm_long };
448
449 /*
450 * Protection exceptions are suppressing, decrement psw address.
451 * The exception to this rule are aborted transactions, for these
452 * the PSW already points to the correct location.
453 */
454 if (!(regs->int_code & 0x200))
455 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
456 /*
457 * Check for low-address protection. This needs to be treated
458 * as a special case because the translation exception code
459 * field is not guaranteed to contain valid data in this case.
460 */
461 if (unlikely(!teid.b61)) {
462 if (user_mode(regs)) {
463 /* Low-address protection in user mode: cannot happen */
464 die(regs, "Low-address protection");
465 }
466 /*
467 * Low-address protection in kernel mode means
468 * NULL pointer write access in kernel mode.
469 */
470 return handle_fault_error_nolock(regs, 0);
471 }
472 if (unlikely(MACHINE_HAS_NX && teid.b56)) {
473 regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
474 return handle_fault_error_nolock(regs, SEGV_ACCERR);
475 }
476 do_exception(regs, VM_WRITE);
477}
478NOKPROBE_SYMBOL(do_protection_exception);
479
480void do_dat_exception(struct pt_regs *regs)
481{
482 do_exception(regs, VM_ACCESS_FLAGS);
483}
484NOKPROBE_SYMBOL(do_dat_exception);
485
486#if IS_ENABLED(CONFIG_PGSTE)
487
488void do_secure_storage_access(struct pt_regs *regs)
489{
490 union teid teid = { .val = regs->int_parm_long };
491 unsigned long addr = get_fault_address(regs);
492 struct vm_area_struct *vma;
493 struct mm_struct *mm;
494 struct page *page;
495 struct gmap *gmap;
496 int rc;
497
498 /*
499 * Bit 61 indicates if the address is valid, if it is not the
500 * kernel should be stopped or SIGSEGV should be sent to the
501 * process. Bit 61 is not reliable without the misc UV feature,
502 * therefore this needs to be checked too.
503 */
504 if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
505 /*
506 * When this happens, userspace did something that it
507 * was not supposed to do, e.g. branching into secure
508 * memory. Trigger a segmentation fault.
509 */
510 if (user_mode(regs)) {
511 send_sig(SIGSEGV, current, 0);
512 return;
513 }
514 /*
515 * The kernel should never run into this case and
516 * there is no way out of this situation.
517 */
518 panic("Unexpected PGM 0x3d with TEID bit 61=0");
519 }
520 switch (get_fault_type(regs)) {
521 case GMAP_FAULT:
522 mm = current->mm;
523 gmap = (struct gmap *)S390_lowcore.gmap;
524 mmap_read_lock(mm);
525 addr = __gmap_translate(gmap, addr);
526 mmap_read_unlock(mm);
527 if (IS_ERR_VALUE(addr))
528 return handle_fault_error_nolock(regs, SEGV_MAPERR);
529 fallthrough;
530 case USER_FAULT:
531 mm = current->mm;
532 mmap_read_lock(mm);
533 vma = find_vma(mm, addr);
534 if (!vma)
535 return handle_fault_error(regs, SEGV_MAPERR);
536 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
537 if (IS_ERR_OR_NULL(page)) {
538 mmap_read_unlock(mm);
539 break;
540 }
541 if (arch_make_page_accessible(page))
542 send_sig(SIGSEGV, current, 0);
543 put_page(page);
544 mmap_read_unlock(mm);
545 break;
546 case KERNEL_FAULT:
547 page = phys_to_page(addr);
548 if (unlikely(!try_get_page(page)))
549 break;
550 rc = arch_make_page_accessible(page);
551 put_page(page);
552 if (rc)
553 BUG();
554 break;
555 default:
556 unreachable();
557 }
558}
559NOKPROBE_SYMBOL(do_secure_storage_access);
560
561void do_non_secure_storage_access(struct pt_regs *regs)
562{
563 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
564 unsigned long gaddr = get_fault_address(regs);
565
566 if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
567 return handle_fault_error_nolock(regs, SEGV_MAPERR);
568 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
569 send_sig(SIGSEGV, current, 0);
570}
571NOKPROBE_SYMBOL(do_non_secure_storage_access);
572
573void do_secure_storage_violation(struct pt_regs *regs)
574{
575 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
576 unsigned long gaddr = get_fault_address(regs);
577
578 /*
579 * If the VM has been rebooted, its address space might still contain
580 * secure pages from the previous boot.
581 * Clear the page so it can be reused.
582 */
583 if (!gmap_destroy_page(gmap, gaddr))
584 return;
585 /*
586 * Either KVM messed up the secure guest mapping or the same
587 * page is mapped into multiple secure guests.
588 *
589 * This exception is only triggered when a guest 2 is running
590 * and can therefore never occur in kernel context.
591 */
592 pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
593 current->comm, current->pid);
594 send_sig(SIGSEGV, current, 0);
595}
596
597#endif /* CONFIG_PGSTE */