Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (uweigand@de.ibm.com)
7 *
8 * Derived from "arch/i386/mm/fault.c"
9 * Copyright (C) 1995 Linus Torvalds
10 */
11
12#include <linux/kernel_stat.h>
13#include <linux/perf_event.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/mman.h>
23#include <linux/mm.h>
24#include <linux/compat.h>
25#include <linux/smp.h>
26#include <linux/kdebug.h>
27#include <linux/init.h>
28#include <linux/console.h>
29#include <linux/extable.h>
30#include <linux/hardirq.h>
31#include <linux/kprobes.h>
32#include <linux/uaccess.h>
33#include <linux/hugetlb.h>
34#include <linux/kfence.h>
35#include <asm/asm-extable.h>
36#include <asm/asm-offsets.h>
37#include <asm/diag.h>
38#include <asm/gmap.h>
39#include <asm/irq.h>
40#include <asm/mmu_context.h>
41#include <asm/facility.h>
42#include <asm/uv.h>
43#include "../kernel/entry.h"
44
45#define __FAIL_ADDR_MASK -4096L
46#define __SUBCODE_MASK 0x0600
47#define __PF_RES_FIELD 0x8000000000000000ULL
48
49#define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
50#define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
51#define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
52#define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
53#define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
54
55enum fault_type {
56 KERNEL_FAULT,
57 USER_FAULT,
58 GMAP_FAULT,
59};
60
61static unsigned long store_indication __read_mostly;
62
63static int __init fault_init(void)
64{
65 if (test_facility(75))
66 store_indication = 0xc00;
67 return 0;
68}
69early_initcall(fault_init);
70
71/*
72 * Find out which address space caused the exception.
73 */
74static enum fault_type get_fault_type(struct pt_regs *regs)
75{
76 unsigned long trans_exc_code;
77
78 trans_exc_code = regs->int_parm_long & 3;
79 if (likely(trans_exc_code == 0)) {
80 /* primary space exception */
81 if (user_mode(regs))
82 return USER_FAULT;
83 if (!IS_ENABLED(CONFIG_PGSTE))
84 return KERNEL_FAULT;
85 if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
86 return GMAP_FAULT;
87 return KERNEL_FAULT;
88 }
89 if (trans_exc_code == 2)
90 return USER_FAULT;
91 if (trans_exc_code == 1) {
92 /* access register mode, not used in the kernel */
93 return USER_FAULT;
94 }
95 /* home space exception -> access via kernel ASCE */
96 return KERNEL_FAULT;
97}
98
99static int bad_address(void *p)
100{
101 unsigned long dummy;
102
103 return get_kernel_nofault(dummy, (unsigned long *)p);
104}
105
106static void dump_pagetable(unsigned long asce, unsigned long address)
107{
108 unsigned long *table = __va(asce & _ASCE_ORIGIN);
109
110 pr_alert("AS:%016lx ", asce);
111 switch (asce & _ASCE_TYPE_MASK) {
112 case _ASCE_TYPE_REGION1:
113 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
114 if (bad_address(table))
115 goto bad;
116 pr_cont("R1:%016lx ", *table);
117 if (*table & _REGION_ENTRY_INVALID)
118 goto out;
119 table = __va(*table & _REGION_ENTRY_ORIGIN);
120 fallthrough;
121 case _ASCE_TYPE_REGION2:
122 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
123 if (bad_address(table))
124 goto bad;
125 pr_cont("R2:%016lx ", *table);
126 if (*table & _REGION_ENTRY_INVALID)
127 goto out;
128 table = __va(*table & _REGION_ENTRY_ORIGIN);
129 fallthrough;
130 case _ASCE_TYPE_REGION3:
131 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
132 if (bad_address(table))
133 goto bad;
134 pr_cont("R3:%016lx ", *table);
135 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
136 goto out;
137 table = __va(*table & _REGION_ENTRY_ORIGIN);
138 fallthrough;
139 case _ASCE_TYPE_SEGMENT:
140 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
141 if (bad_address(table))
142 goto bad;
143 pr_cont("S:%016lx ", *table);
144 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
145 goto out;
146 table = __va(*table & _SEGMENT_ENTRY_ORIGIN);
147 }
148 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
149 if (bad_address(table))
150 goto bad;
151 pr_cont("P:%016lx ", *table);
152out:
153 pr_cont("\n");
154 return;
155bad:
156 pr_cont("BAD\n");
157}
158
159static void dump_fault_info(struct pt_regs *regs)
160{
161 unsigned long asce;
162
163 pr_alert("Failing address: %016lx TEID: %016lx\n",
164 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
165 pr_alert("Fault in ");
166 switch (regs->int_parm_long & 3) {
167 case 3:
168 pr_cont("home space ");
169 break;
170 case 2:
171 pr_cont("secondary space ");
172 break;
173 case 1:
174 pr_cont("access register ");
175 break;
176 case 0:
177 pr_cont("primary space ");
178 break;
179 }
180 pr_cont("mode while using ");
181 switch (get_fault_type(regs)) {
182 case USER_FAULT:
183 asce = S390_lowcore.user_asce;
184 pr_cont("user ");
185 break;
186 case GMAP_FAULT:
187 asce = ((struct gmap *) S390_lowcore.gmap)->asce;
188 pr_cont("gmap ");
189 break;
190 case KERNEL_FAULT:
191 asce = S390_lowcore.kernel_asce;
192 pr_cont("kernel ");
193 break;
194 default:
195 unreachable();
196 }
197 pr_cont("ASCE.\n");
198 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
199}
200
201int show_unhandled_signals = 1;
202
203void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204{
205 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
206 return;
207 if (!unhandled_signal(current, signr))
208 return;
209 if (!printk_ratelimit())
210 return;
211 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
212 regs->int_code & 0xffff, regs->int_code >> 17);
213 print_vma_addr(KERN_CONT "in ", regs->psw.addr);
214 printk(KERN_CONT "\n");
215 if (is_mm_fault)
216 dump_fault_info(regs);
217 show_regs(regs);
218}
219
220/*
221 * Send SIGSEGV to task. This is an external routine
222 * to keep the stack usage of do_page_fault small.
223 */
224static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
225{
226 report_user_fault(regs, SIGSEGV, 1);
227 force_sig_fault(SIGSEGV, si_code,
228 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
229}
230
231static noinline void do_no_context(struct pt_regs *regs)
232{
233 if (fixup_exception(regs))
234 return;
235 /*
236 * Oops. The kernel tried to access some bad page. We'll have to
237 * terminate things with extreme prejudice.
238 */
239 if (get_fault_type(regs) == KERNEL_FAULT)
240 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
241 " in virtual kernel address space\n");
242 else
243 printk(KERN_ALERT "Unable to handle kernel paging request"
244 " in virtual user address space\n");
245 dump_fault_info(regs);
246 die(regs, "Oops");
247}
248
249static noinline void do_low_address(struct pt_regs *regs)
250{
251 /* Low-address protection hit in kernel mode means
252 NULL pointer write access in kernel mode. */
253 if (regs->psw.mask & PSW_MASK_PSTATE) {
254 /* Low-address protection hit in user mode 'cannot happen'. */
255 die (regs, "Low-address protection");
256 }
257
258 do_no_context(regs);
259}
260
261static noinline void do_sigbus(struct pt_regs *regs)
262{
263 /*
264 * Send a sigbus, regardless of whether we were in kernel
265 * or user mode.
266 */
267 force_sig_fault(SIGBUS, BUS_ADRERR,
268 (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
269}
270
271static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault)
272{
273 int si_code;
274
275 switch (fault) {
276 case VM_FAULT_BADACCESS:
277 case VM_FAULT_BADMAP:
278 /* Bad memory access. Check if it is kernel or user space. */
279 if (user_mode(regs)) {
280 /* User mode accesses just cause a SIGSEGV */
281 si_code = (fault == VM_FAULT_BADMAP) ?
282 SEGV_MAPERR : SEGV_ACCERR;
283 do_sigsegv(regs, si_code);
284 break;
285 }
286 fallthrough;
287 case VM_FAULT_BADCONTEXT:
288 case VM_FAULT_PFAULT:
289 do_no_context(regs);
290 break;
291 case VM_FAULT_SIGNAL:
292 if (!user_mode(regs))
293 do_no_context(regs);
294 break;
295 default: /* fault & VM_FAULT_ERROR */
296 if (fault & VM_FAULT_OOM) {
297 if (!user_mode(regs))
298 do_no_context(regs);
299 else
300 pagefault_out_of_memory();
301 } else if (fault & VM_FAULT_SIGSEGV) {
302 /* Kernel mode? Handle exceptions or die */
303 if (!user_mode(regs))
304 do_no_context(regs);
305 else
306 do_sigsegv(regs, SEGV_MAPERR);
307 } else if (fault & VM_FAULT_SIGBUS) {
308 /* Kernel mode? Handle exceptions or die */
309 if (!user_mode(regs))
310 do_no_context(regs);
311 else
312 do_sigbus(regs);
313 } else
314 BUG();
315 break;
316 }
317}
318
319/*
320 * This routine handles page faults. It determines the address,
321 * and the problem, and then passes it off to one of the appropriate
322 * routines.
323 *
324 * interruption code (int_code):
325 * 04 Protection -> Write-Protection (suppression)
326 * 10 Segment translation -> Not present (nullification)
327 * 11 Page translation -> Not present (nullification)
328 * 3b Region third trans. -> Not present (nullification)
329 */
330static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
331{
332 struct gmap *gmap;
333 struct task_struct *tsk;
334 struct mm_struct *mm;
335 struct vm_area_struct *vma;
336 enum fault_type type;
337 unsigned long trans_exc_code;
338 unsigned long address;
339 unsigned int flags;
340 vm_fault_t fault;
341 bool is_write;
342
343 tsk = current;
344 /*
345 * The instruction that caused the program check has
346 * been nullified. Don't signal single step via SIGTRAP.
347 */
348 clear_thread_flag(TIF_PER_TRAP);
349
350 if (kprobe_page_fault(regs, 14))
351 return 0;
352
353 mm = tsk->mm;
354 trans_exc_code = regs->int_parm_long;
355 address = trans_exc_code & __FAIL_ADDR_MASK;
356 is_write = (trans_exc_code & store_indication) == 0x400;
357
358 /*
359 * Verify that the fault happened in user space, that
360 * we are not in an interrupt and that there is a
361 * user context.
362 */
363 fault = VM_FAULT_BADCONTEXT;
364 type = get_fault_type(regs);
365 switch (type) {
366 case KERNEL_FAULT:
367 if (kfence_handle_page_fault(address, is_write, regs))
368 return 0;
369 goto out;
370 case USER_FAULT:
371 case GMAP_FAULT:
372 if (faulthandler_disabled() || !mm)
373 goto out;
374 break;
375 }
376
377 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
378 flags = FAULT_FLAG_DEFAULT;
379 if (user_mode(regs))
380 flags |= FAULT_FLAG_USER;
381 if (is_write)
382 access = VM_WRITE;
383 if (access == VM_WRITE)
384 flags |= FAULT_FLAG_WRITE;
385 mmap_read_lock(mm);
386
387 gmap = NULL;
388 if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
389 gmap = (struct gmap *) S390_lowcore.gmap;
390 current->thread.gmap_addr = address;
391 current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
392 current->thread.gmap_int_code = regs->int_code & 0xffff;
393 address = __gmap_translate(gmap, address);
394 if (address == -EFAULT) {
395 fault = VM_FAULT_BADMAP;
396 goto out_up;
397 }
398 if (gmap->pfault_enabled)
399 flags |= FAULT_FLAG_RETRY_NOWAIT;
400 }
401
402retry:
403 fault = VM_FAULT_BADMAP;
404 vma = find_vma(mm, address);
405 if (!vma)
406 goto out_up;
407
408 if (unlikely(vma->vm_start > address)) {
409 if (!(vma->vm_flags & VM_GROWSDOWN))
410 goto out_up;
411 if (expand_stack(vma, address))
412 goto out_up;
413 }
414
415 /*
416 * Ok, we have a good vm_area for this memory access, so
417 * we can handle it..
418 */
419 fault = VM_FAULT_BADACCESS;
420 if (unlikely(!(vma->vm_flags & access)))
421 goto out_up;
422
423 /*
424 * If for any reason at all we couldn't handle the fault,
425 * make sure we exit gracefully rather than endlessly redo
426 * the fault.
427 */
428 fault = handle_mm_fault(vma, address, flags, regs);
429 if (fault_signal_pending(fault, regs)) {
430 fault = VM_FAULT_SIGNAL;
431 if (flags & FAULT_FLAG_RETRY_NOWAIT)
432 goto out_up;
433 goto out;
434 }
435
436 /* The fault is fully completed (including releasing mmap lock) */
437 if (fault & VM_FAULT_COMPLETED) {
438 if (gmap) {
439 mmap_read_lock(mm);
440 goto out_gmap;
441 }
442 fault = 0;
443 goto out;
444 }
445
446 if (unlikely(fault & VM_FAULT_ERROR))
447 goto out_up;
448
449 if (fault & VM_FAULT_RETRY) {
450 if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
451 (flags & FAULT_FLAG_RETRY_NOWAIT)) {
452 /*
453 * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
454 * not been released
455 */
456 current->thread.gmap_pfault = 1;
457 fault = VM_FAULT_PFAULT;
458 goto out_up;
459 }
460 flags &= ~FAULT_FLAG_RETRY_NOWAIT;
461 flags |= FAULT_FLAG_TRIED;
462 mmap_read_lock(mm);
463 goto retry;
464 }
465out_gmap:
466 if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
467 address = __gmap_link(gmap, current->thread.gmap_addr,
468 address);
469 if (address == -EFAULT) {
470 fault = VM_FAULT_BADMAP;
471 goto out_up;
472 }
473 if (address == -ENOMEM) {
474 fault = VM_FAULT_OOM;
475 goto out_up;
476 }
477 }
478 fault = 0;
479out_up:
480 mmap_read_unlock(mm);
481out:
482 return fault;
483}
484
485void do_protection_exception(struct pt_regs *regs)
486{
487 unsigned long trans_exc_code;
488 int access;
489 vm_fault_t fault;
490
491 trans_exc_code = regs->int_parm_long;
492 /*
493 * Protection exceptions are suppressing, decrement psw address.
494 * The exception to this rule are aborted transactions, for these
495 * the PSW already points to the correct location.
496 */
497 if (!(regs->int_code & 0x200))
498 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
499 /*
500 * Check for low-address protection. This needs to be treated
501 * as a special case because the translation exception code
502 * field is not guaranteed to contain valid data in this case.
503 */
504 if (unlikely(!(trans_exc_code & 4))) {
505 do_low_address(regs);
506 return;
507 }
508 if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
509 regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
510 (regs->psw.addr & PAGE_MASK);
511 access = VM_EXEC;
512 fault = VM_FAULT_BADACCESS;
513 } else {
514 access = VM_WRITE;
515 fault = do_exception(regs, access);
516 }
517 if (unlikely(fault))
518 do_fault_error(regs, fault);
519}
520NOKPROBE_SYMBOL(do_protection_exception);
521
522void do_dat_exception(struct pt_regs *regs)
523{
524 int access;
525 vm_fault_t fault;
526
527 access = VM_ACCESS_FLAGS;
528 fault = do_exception(regs, access);
529 if (unlikely(fault))
530 do_fault_error(regs, fault);
531}
532NOKPROBE_SYMBOL(do_dat_exception);
533
534#ifdef CONFIG_PFAULT
535/*
536 * 'pfault' pseudo page faults routines.
537 */
538static int pfault_disable;
539
540static int __init nopfault(char *str)
541{
542 pfault_disable = 1;
543 return 1;
544}
545
546__setup("nopfault", nopfault);
547
548struct pfault_refbk {
549 u16 refdiagc;
550 u16 reffcode;
551 u16 refdwlen;
552 u16 refversn;
553 u64 refgaddr;
554 u64 refselmk;
555 u64 refcmpmk;
556 u64 reserved;
557} __attribute__ ((packed, aligned(8)));
558
559static struct pfault_refbk pfault_init_refbk = {
560 .refdiagc = 0x258,
561 .reffcode = 0,
562 .refdwlen = 5,
563 .refversn = 2,
564 .refgaddr = __LC_LPP,
565 .refselmk = 1ULL << 48,
566 .refcmpmk = 1ULL << 48,
567 .reserved = __PF_RES_FIELD
568};
569
570int pfault_init(void)
571{
572 int rc;
573
574 if (pfault_disable)
575 return -1;
576 diag_stat_inc(DIAG_STAT_X258);
577 asm volatile(
578 " diag %1,%0,0x258\n"
579 "0: j 2f\n"
580 "1: la %0,8\n"
581 "2:\n"
582 EX_TABLE(0b,1b)
583 : "=d" (rc)
584 : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
585 return rc;
586}
587
588static struct pfault_refbk pfault_fini_refbk = {
589 .refdiagc = 0x258,
590 .reffcode = 1,
591 .refdwlen = 5,
592 .refversn = 2,
593};
594
595void pfault_fini(void)
596{
597
598 if (pfault_disable)
599 return;
600 diag_stat_inc(DIAG_STAT_X258);
601 asm volatile(
602 " diag %0,0,0x258\n"
603 "0: nopr %%r7\n"
604 EX_TABLE(0b,0b)
605 : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
606}
607
608static DEFINE_SPINLOCK(pfault_lock);
609static LIST_HEAD(pfault_list);
610
611#define PF_COMPLETE 0x0080
612
613/*
614 * The mechanism of our pfault code: if Linux is running as guest, runs a user
615 * space process and the user space process accesses a page that the host has
616 * paged out we get a pfault interrupt.
617 *
618 * This allows us, within the guest, to schedule a different process. Without
619 * this mechanism the host would have to suspend the whole virtual cpu until
620 * the page has been paged in.
621 *
622 * So when we get such an interrupt then we set the state of the current task
623 * to uninterruptible and also set the need_resched flag. Both happens within
624 * interrupt context(!). If we later on want to return to user space we
625 * recognize the need_resched flag and then call schedule(). It's not very
626 * obvious how this works...
627 *
628 * Of course we have a lot of additional fun with the completion interrupt (->
629 * host signals that a page of a process has been paged in and the process can
630 * continue to run). This interrupt can arrive on any cpu and, since we have
631 * virtual cpus, actually appear before the interrupt that signals that a page
632 * is missing.
633 */
634static void pfault_interrupt(struct ext_code ext_code,
635 unsigned int param32, unsigned long param64)
636{
637 struct task_struct *tsk;
638 __u16 subcode;
639 pid_t pid;
640
641 /*
642 * Get the external interruption subcode & pfault initial/completion
643 * signal bit. VM stores this in the 'cpu address' field associated
644 * with the external interrupt.
645 */
646 subcode = ext_code.subcode;
647 if ((subcode & 0xff00) != __SUBCODE_MASK)
648 return;
649 inc_irq_stat(IRQEXT_PFL);
650 /* Get the token (= pid of the affected task). */
651 pid = param64 & LPP_PID_MASK;
652 rcu_read_lock();
653 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
654 if (tsk)
655 get_task_struct(tsk);
656 rcu_read_unlock();
657 if (!tsk)
658 return;
659 spin_lock(&pfault_lock);
660 if (subcode & PF_COMPLETE) {
661 /* signal bit is set -> a page has been swapped in by VM */
662 if (tsk->thread.pfault_wait == 1) {
663 /* Initial interrupt was faster than the completion
664 * interrupt. pfault_wait is valid. Set pfault_wait
665 * back to zero and wake up the process. This can
666 * safely be done because the task is still sleeping
667 * and can't produce new pfaults. */
668 tsk->thread.pfault_wait = 0;
669 list_del(&tsk->thread.list);
670 wake_up_process(tsk);
671 put_task_struct(tsk);
672 } else {
673 /* Completion interrupt was faster than initial
674 * interrupt. Set pfault_wait to -1 so the initial
675 * interrupt doesn't put the task to sleep.
676 * If the task is not running, ignore the completion
677 * interrupt since it must be a leftover of a PFAULT
678 * CANCEL operation which didn't remove all pending
679 * completion interrupts. */
680 if (task_is_running(tsk))
681 tsk->thread.pfault_wait = -1;
682 }
683 } else {
684 /* signal bit not set -> a real page is missing. */
685 if (WARN_ON_ONCE(tsk != current))
686 goto out;
687 if (tsk->thread.pfault_wait == 1) {
688 /* Already on the list with a reference: put to sleep */
689 goto block;
690 } else if (tsk->thread.pfault_wait == -1) {
691 /* Completion interrupt was faster than the initial
692 * interrupt (pfault_wait == -1). Set pfault_wait
693 * back to zero and exit. */
694 tsk->thread.pfault_wait = 0;
695 } else {
696 /* Initial interrupt arrived before completion
697 * interrupt. Let the task sleep.
698 * An extra task reference is needed since a different
699 * cpu may set the task state to TASK_RUNNING again
700 * before the scheduler is reached. */
701 get_task_struct(tsk);
702 tsk->thread.pfault_wait = 1;
703 list_add(&tsk->thread.list, &pfault_list);
704block:
705 /* Since this must be a userspace fault, there
706 * is no kernel task state to trample. Rely on the
707 * return to userspace schedule() to block. */
708 __set_current_state(TASK_UNINTERRUPTIBLE);
709 set_tsk_need_resched(tsk);
710 set_preempt_need_resched();
711 }
712 }
713out:
714 spin_unlock(&pfault_lock);
715 put_task_struct(tsk);
716}
717
718static int pfault_cpu_dead(unsigned int cpu)
719{
720 struct thread_struct *thread, *next;
721 struct task_struct *tsk;
722
723 spin_lock_irq(&pfault_lock);
724 list_for_each_entry_safe(thread, next, &pfault_list, list) {
725 thread->pfault_wait = 0;
726 list_del(&thread->list);
727 tsk = container_of(thread, struct task_struct, thread);
728 wake_up_process(tsk);
729 put_task_struct(tsk);
730 }
731 spin_unlock_irq(&pfault_lock);
732 return 0;
733}
734
735static int __init pfault_irq_init(void)
736{
737 int rc;
738
739 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
740 if (rc)
741 goto out_extint;
742 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
743 if (rc)
744 goto out_pfault;
745 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
746 cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
747 NULL, pfault_cpu_dead);
748 return 0;
749
750out_pfault:
751 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
752out_extint:
753 pfault_disable = 1;
754 return rc;
755}
756early_initcall(pfault_irq_init);
757
758#endif /* CONFIG_PFAULT */
759
760#if IS_ENABLED(CONFIG_PGSTE)
761
762void do_secure_storage_access(struct pt_regs *regs)
763{
764 unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
765 struct vm_area_struct *vma;
766 struct mm_struct *mm;
767 struct page *page;
768 struct gmap *gmap;
769 int rc;
770
771 /*
772 * bit 61 tells us if the address is valid, if it's not we
773 * have a major problem and should stop the kernel or send a
774 * SIGSEGV to the process. Unfortunately bit 61 is not
775 * reliable without the misc UV feature so we need to check
776 * for that as well.
777 */
778 if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
779 !test_bit_inv(61, ®s->int_parm_long)) {
780 /*
781 * When this happens, userspace did something that it
782 * was not supposed to do, e.g. branching into secure
783 * memory. Trigger a segmentation fault.
784 */
785 if (user_mode(regs)) {
786 send_sig(SIGSEGV, current, 0);
787 return;
788 }
789
790 /*
791 * The kernel should never run into this case and we
792 * have no way out of this situation.
793 */
794 panic("Unexpected PGM 0x3d with TEID bit 61=0");
795 }
796
797 switch (get_fault_type(regs)) {
798 case GMAP_FAULT:
799 mm = current->mm;
800 gmap = (struct gmap *)S390_lowcore.gmap;
801 mmap_read_lock(mm);
802 addr = __gmap_translate(gmap, addr);
803 mmap_read_unlock(mm);
804 if (IS_ERR_VALUE(addr)) {
805 do_fault_error(regs, VM_FAULT_BADMAP);
806 break;
807 }
808 fallthrough;
809 case USER_FAULT:
810 mm = current->mm;
811 mmap_read_lock(mm);
812 vma = find_vma(mm, addr);
813 if (!vma) {
814 mmap_read_unlock(mm);
815 do_fault_error(regs, VM_FAULT_BADMAP);
816 break;
817 }
818 page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
819 if (IS_ERR_OR_NULL(page)) {
820 mmap_read_unlock(mm);
821 break;
822 }
823 if (arch_make_page_accessible(page))
824 send_sig(SIGSEGV, current, 0);
825 put_page(page);
826 mmap_read_unlock(mm);
827 break;
828 case KERNEL_FAULT:
829 page = phys_to_page(addr);
830 if (unlikely(!try_get_page(page)))
831 break;
832 rc = arch_make_page_accessible(page);
833 put_page(page);
834 if (rc)
835 BUG();
836 break;
837 default:
838 do_fault_error(regs, VM_FAULT_BADMAP);
839 WARN_ON_ONCE(1);
840 }
841}
842NOKPROBE_SYMBOL(do_secure_storage_access);
843
844void do_non_secure_storage_access(struct pt_regs *regs)
845{
846 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
847 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
848
849 if (get_fault_type(regs) != GMAP_FAULT) {
850 do_fault_error(regs, VM_FAULT_BADMAP);
851 WARN_ON_ONCE(1);
852 return;
853 }
854
855 if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
856 send_sig(SIGSEGV, current, 0);
857}
858NOKPROBE_SYMBOL(do_non_secure_storage_access);
859
860void do_secure_storage_violation(struct pt_regs *regs)
861{
862 unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
863 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
864
865 /*
866 * If the VM has been rebooted, its address space might still contain
867 * secure pages from the previous boot.
868 * Clear the page so it can be reused.
869 */
870 if (!gmap_destroy_page(gmap, gaddr))
871 return;
872 /*
873 * Either KVM messed up the secure guest mapping or the same
874 * page is mapped into multiple secure guests.
875 *
876 * This exception is only triggered when a guest 2 is running
877 * and can therefore never occur in kernel context.
878 */
879 printk_ratelimited(KERN_WARNING
880 "Secure storage violation in task: %s, pid %d\n",
881 current->comm, current->pid);
882 send_sig(SIGSEGV, current, 0);
883}
884
885#endif /* CONFIG_PGSTE */
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (uweigand@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/fault.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11#include <linux/kernel_stat.h>
12#include <linux/perf_event.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/ptrace.h>
20#include <linux/mman.h>
21#include <linux/mm.h>
22#include <linux/compat.h>
23#include <linux/smp.h>
24#include <linux/kdebug.h>
25#include <linux/init.h>
26#include <linux/console.h>
27#include <linux/module.h>
28#include <linux/hardirq.h>
29#include <linux/kprobes.h>
30#include <linux/uaccess.h>
31#include <linux/hugetlb.h>
32#include <asm/asm-offsets.h>
33#include <asm/pgtable.h>
34#include <asm/irq.h>
35#include <asm/mmu_context.h>
36#include <asm/facility.h>
37#include "../kernel/entry.h"
38
39#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000
41#define __SUBCODE_MASK 0x0200
42#define __PF_RES_FIELD 0ULL
43#else /* CONFIG_64BIT */
44#define __FAIL_ADDR_MASK -4096L
45#define __SUBCODE_MASK 0x0600
46#define __PF_RES_FIELD 0x8000000000000000ULL
47#endif /* CONFIG_64BIT */
48
49#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000
51#define VM_FAULT_BADACCESS 0x040000
52#define VM_FAULT_SIGNAL 0x080000
53#define VM_FAULT_PFAULT 0x100000
54
55static unsigned long store_indication __read_mostly;
56
57#ifdef CONFIG_64BIT
58static int __init fault_init(void)
59{
60 if (test_facility(75))
61 store_indication = 0xc00;
62 return 0;
63}
64early_initcall(fault_init);
65#endif
66
67static inline int notify_page_fault(struct pt_regs *regs)
68{
69 int ret = 0;
70
71 /* kprobe_running() needs smp_processor_id() */
72 if (kprobes_built_in() && !user_mode(regs)) {
73 preempt_disable();
74 if (kprobe_running() && kprobe_fault_handler(regs, 14))
75 ret = 1;
76 preempt_enable();
77 }
78 return ret;
79}
80
81
82/*
83 * Unlock any spinlocks which will prevent us from getting the
84 * message out.
85 */
86void bust_spinlocks(int yes)
87{
88 if (yes) {
89 oops_in_progress = 1;
90 } else {
91 int loglevel_save = console_loglevel;
92 console_unblank();
93 oops_in_progress = 0;
94 /*
95 * OK, the message is on the console. Now we call printk()
96 * without oops_in_progress set so that printk will give klogd
97 * a poke. Hold onto your hats...
98 */
99 console_loglevel = 15;
100 printk(" ");
101 console_loglevel = loglevel_save;
102 }
103}
104
105/*
106 * Returns the address space associated with the fault.
107 * Returns 0 for kernel space and 1 for user space.
108 */
109static inline int user_space_fault(struct pt_regs *regs)
110{
111 unsigned long trans_exc_code;
112
113 /*
114 * The lowest two bits of the translation exception
115 * identification indicate which paging table was used.
116 */
117 trans_exc_code = regs->int_parm_long & 3;
118 if (trans_exc_code == 3) /* home space -> kernel */
119 return 0;
120 if (user_mode(regs))
121 return 1;
122 if (trans_exc_code == 2) /* secondary space -> set_fs */
123 return current->thread.mm_segment.ar4;
124 if (current->flags & PF_VCPU)
125 return 1;
126 return 0;
127}
128
129static int bad_address(void *p)
130{
131 unsigned long dummy;
132
133 return probe_kernel_address((unsigned long *)p, dummy);
134}
135
136#ifdef CONFIG_64BIT
137static void dump_pagetable(unsigned long asce, unsigned long address)
138{
139 unsigned long *table = __va(asce & PAGE_MASK);
140
141 pr_alert("AS:%016lx ", asce);
142 switch (asce & _ASCE_TYPE_MASK) {
143 case _ASCE_TYPE_REGION1:
144 table = table + ((address >> 53) & 0x7ff);
145 if (bad_address(table))
146 goto bad;
147 pr_cont("R1:%016lx ", *table);
148 if (*table & _REGION_ENTRY_INVALID)
149 goto out;
150 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
151 /* fallthrough */
152 case _ASCE_TYPE_REGION2:
153 table = table + ((address >> 42) & 0x7ff);
154 if (bad_address(table))
155 goto bad;
156 pr_cont("R2:%016lx ", *table);
157 if (*table & _REGION_ENTRY_INVALID)
158 goto out;
159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
160 /* fallthrough */
161 case _ASCE_TYPE_REGION3:
162 table = table + ((address >> 31) & 0x7ff);
163 if (bad_address(table))
164 goto bad;
165 pr_cont("R3:%016lx ", *table);
166 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
167 goto out;
168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
169 /* fallthrough */
170 case _ASCE_TYPE_SEGMENT:
171 table = table + ((address >> 20) & 0x7ff);
172 if (bad_address(table))
173 goto bad;
174 pr_cont(KERN_CONT "S:%016lx ", *table);
175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
176 goto out;
177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
178 }
179 table = table + ((address >> 12) & 0xff);
180 if (bad_address(table))
181 goto bad;
182 pr_cont("P:%016lx ", *table);
183out:
184 pr_cont("\n");
185 return;
186bad:
187 pr_cont("BAD\n");
188}
189
190#else /* CONFIG_64BIT */
191
192static void dump_pagetable(unsigned long asce, unsigned long address)
193{
194 unsigned long *table = __va(asce & PAGE_MASK);
195
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
199 goto bad;
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
202 goto out;
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
206 goto bad;
207 pr_cont("P:%08lx ", *table);
208out:
209 pr_cont("\n");
210 return;
211bad:
212 pr_cont("BAD\n");
213}
214
215#endif /* CONFIG_64BIT */
216
217static void dump_fault_info(struct pt_regs *regs)
218{
219 unsigned long asce;
220
221 pr_alert("Fault in ");
222 switch (regs->int_parm_long & 3) {
223 case 3:
224 pr_cont("home space ");
225 break;
226 case 2:
227 pr_cont("secondary space ");
228 break;
229 case 1:
230 pr_cont("access register ");
231 break;
232 case 0:
233 pr_cont("primary space ");
234 break;
235 }
236 pr_cont("mode while using ");
237 if (!user_space_fault(regs)) {
238 asce = S390_lowcore.kernel_asce;
239 pr_cont("kernel ");
240 }
241#ifdef CONFIG_PGSTE
242 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
243 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
244 asce = gmap->asce;
245 pr_cont("gmap ");
246 }
247#endif
248 else {
249 asce = S390_lowcore.user_asce;
250 pr_cont("user ");
251 }
252 pr_cont("ASCE.\n");
253 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
254}
255
256static inline void report_user_fault(struct pt_regs *regs, long signr)
257{
258 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
259 return;
260 if (!unhandled_signal(current, signr))
261 return;
262 if (!printk_ratelimit())
263 return;
264 printk(KERN_ALERT "User process fault: interruption code 0x%X ",
265 regs->int_code);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n");
268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
269 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
270 dump_fault_info(regs);
271 show_regs(regs);
272}
273
274/*
275 * Send SIGSEGV to task. This is an external routine
276 * to keep the stack usage of do_page_fault small.
277 */
278static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
279{
280 struct siginfo si;
281
282 report_user_fault(regs, SIGSEGV);
283 si.si_signo = SIGSEGV;
284 si.si_code = si_code;
285 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
286 force_sig_info(SIGSEGV, &si, current);
287}
288
289static noinline void do_no_context(struct pt_regs *regs)
290{
291 const struct exception_table_entry *fixup;
292 unsigned long address;
293
294 /* Are we prepared to handle this kernel fault? */
295 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
296 if (fixup) {
297 regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
298 return;
299 }
300
301 /*
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
304 */
305 address = regs->int_parm_long & __FAIL_ADDR_MASK;
306 if (!user_space_fault(regs))
307 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
308 " in virtual kernel address space\n");
309 else
310 printk(KERN_ALERT "Unable to handle kernel paging request"
311 " in virtual user address space\n");
312 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
313 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
314 dump_fault_info(regs);
315 die(regs, "Oops");
316 do_exit(SIGKILL);
317}
318
319static noinline void do_low_address(struct pt_regs *regs)
320{
321 /* Low-address protection hit in kernel mode means
322 NULL pointer write access in kernel mode. */
323 if (regs->psw.mask & PSW_MASK_PSTATE) {
324 /* Low-address protection hit in user mode 'cannot happen'. */
325 die (regs, "Low-address protection");
326 do_exit(SIGKILL);
327 }
328
329 do_no_context(regs);
330}
331
332static noinline void do_sigbus(struct pt_regs *regs)
333{
334 struct task_struct *tsk = current;
335 struct siginfo si;
336
337 /*
338 * Send a sigbus, regardless of whether we were in kernel
339 * or user mode.
340 */
341 si.si_signo = SIGBUS;
342 si.si_errno = 0;
343 si.si_code = BUS_ADRERR;
344 si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
345 force_sig_info(SIGBUS, &si, tsk);
346}
347
348static noinline void do_fault_error(struct pt_regs *regs, int fault)
349{
350 int si_code;
351
352 switch (fault) {
353 case VM_FAULT_BADACCESS:
354 case VM_FAULT_BADMAP:
355 /* Bad memory access. Check if it is kernel or user space. */
356 if (user_mode(regs)) {
357 /* User mode accesses just cause a SIGSEGV */
358 si_code = (fault == VM_FAULT_BADMAP) ?
359 SEGV_MAPERR : SEGV_ACCERR;
360 do_sigsegv(regs, si_code);
361 return;
362 }
363 case VM_FAULT_BADCONTEXT:
364 case VM_FAULT_PFAULT:
365 do_no_context(regs);
366 break;
367 case VM_FAULT_SIGNAL:
368 if (!user_mode(regs))
369 do_no_context(regs);
370 break;
371 default: /* fault & VM_FAULT_ERROR */
372 if (fault & VM_FAULT_OOM) {
373 if (!user_mode(regs))
374 do_no_context(regs);
375 else
376 pagefault_out_of_memory();
377 } else if (fault & VM_FAULT_SIGBUS) {
378 /* Kernel mode? Handle exceptions or die */
379 if (!user_mode(regs))
380 do_no_context(regs);
381 else
382 do_sigbus(regs);
383 } else
384 BUG();
385 break;
386 }
387}
388
389/*
390 * This routine handles page faults. It determines the address,
391 * and the problem, and then passes it off to one of the appropriate
392 * routines.
393 *
394 * interruption code (int_code):
395 * 04 Protection -> Write-Protection (suprression)
396 * 10 Segment translation -> Not present (nullification)
397 * 11 Page translation -> Not present (nullification)
398 * 3b Region third trans. -> Not present (nullification)
399 */
400static inline int do_exception(struct pt_regs *regs, int access)
401{
402#ifdef CONFIG_PGSTE
403 struct gmap *gmap;
404#endif
405 struct task_struct *tsk;
406 struct mm_struct *mm;
407 struct vm_area_struct *vma;
408 unsigned long trans_exc_code;
409 unsigned long address;
410 unsigned int flags;
411 int fault;
412
413 tsk = current;
414 /*
415 * The instruction that caused the program check has
416 * been nullified. Don't signal single step via SIGTRAP.
417 */
418 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
419
420 if (notify_page_fault(regs))
421 return 0;
422
423 mm = tsk->mm;
424 trans_exc_code = regs->int_parm_long;
425
426 /*
427 * Verify that the fault happened in user space, that
428 * we are not in an interrupt and that there is a
429 * user context.
430 */
431 fault = VM_FAULT_BADCONTEXT;
432 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
433 goto out;
434
435 address = trans_exc_code & __FAIL_ADDR_MASK;
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
437 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
438 if (user_mode(regs))
439 flags |= FAULT_FLAG_USER;
440 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
441 flags |= FAULT_FLAG_WRITE;
442 down_read(&mm->mmap_sem);
443
444#ifdef CONFIG_PGSTE
445 gmap = (struct gmap *)
446 ((current->flags & PF_VCPU) ? S390_lowcore.gmap : 0);
447 if (gmap) {
448 address = __gmap_fault(address, gmap);
449 if (address == -EFAULT) {
450 fault = VM_FAULT_BADMAP;
451 goto out_up;
452 }
453 if (address == -ENOMEM) {
454 fault = VM_FAULT_OOM;
455 goto out_up;
456 }
457 if (gmap->pfault_enabled)
458 flags |= FAULT_FLAG_RETRY_NOWAIT;
459 }
460#endif
461
462retry:
463 fault = VM_FAULT_BADMAP;
464 vma = find_vma(mm, address);
465 if (!vma)
466 goto out_up;
467
468 if (unlikely(vma->vm_start > address)) {
469 if (!(vma->vm_flags & VM_GROWSDOWN))
470 goto out_up;
471 if (expand_stack(vma, address))
472 goto out_up;
473 }
474
475 /*
476 * Ok, we have a good vm_area for this memory access, so
477 * we can handle it..
478 */
479 fault = VM_FAULT_BADACCESS;
480 if (unlikely(!(vma->vm_flags & access)))
481 goto out_up;
482
483 if (is_vm_hugetlb_page(vma))
484 address &= HPAGE_MASK;
485 /*
486 * If for any reason at all we couldn't handle the fault,
487 * make sure we exit gracefully rather than endlessly redo
488 * the fault.
489 */
490 fault = handle_mm_fault(mm, vma, address, flags);
491 /* No reason to continue if interrupted by SIGKILL. */
492 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
493 fault = VM_FAULT_SIGNAL;
494 goto out;
495 }
496 if (unlikely(fault & VM_FAULT_ERROR))
497 goto out_up;
498
499 /*
500 * Major/minor page fault accounting is only done on the
501 * initial attempt. If we go through a retry, it is extremely
502 * likely that the page will be found in page cache at that point.
503 */
504 if (flags & FAULT_FLAG_ALLOW_RETRY) {
505 if (fault & VM_FAULT_MAJOR) {
506 tsk->maj_flt++;
507 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
508 regs, address);
509 } else {
510 tsk->min_flt++;
511 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
512 regs, address);
513 }
514 if (fault & VM_FAULT_RETRY) {
515#ifdef CONFIG_PGSTE
516 if (gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) {
517 /* FAULT_FLAG_RETRY_NOWAIT has been set,
518 * mmap_sem has not been released */
519 current->thread.gmap_pfault = 1;
520 fault = VM_FAULT_PFAULT;
521 goto out_up;
522 }
523#endif
524 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
525 * of starvation. */
526 flags &= ~(FAULT_FLAG_ALLOW_RETRY |
527 FAULT_FLAG_RETRY_NOWAIT);
528 flags |= FAULT_FLAG_TRIED;
529 down_read(&mm->mmap_sem);
530 goto retry;
531 }
532 }
533 fault = 0;
534out_up:
535 up_read(&mm->mmap_sem);
536out:
537 return fault;
538}
539
540void __kprobes do_protection_exception(struct pt_regs *regs)
541{
542 unsigned long trans_exc_code;
543 int fault;
544
545 trans_exc_code = regs->int_parm_long;
546 /*
547 * Protection exceptions are suppressing, decrement psw address.
548 * The exception to this rule are aborted transactions, for these
549 * the PSW already points to the correct location.
550 */
551 if (!(regs->int_code & 0x200))
552 regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
553 /*
554 * Check for low-address protection. This needs to be treated
555 * as a special case because the translation exception code
556 * field is not guaranteed to contain valid data in this case.
557 */
558 if (unlikely(!(trans_exc_code & 4))) {
559 do_low_address(regs);
560 return;
561 }
562 fault = do_exception(regs, VM_WRITE);
563 if (unlikely(fault))
564 do_fault_error(regs, fault);
565}
566
567void __kprobes do_dat_exception(struct pt_regs *regs)
568{
569 int access, fault;
570
571 access = VM_READ | VM_EXEC | VM_WRITE;
572 fault = do_exception(regs, access);
573 if (unlikely(fault))
574 do_fault_error(regs, fault);
575}
576
577#ifdef CONFIG_PFAULT
578/*
579 * 'pfault' pseudo page faults routines.
580 */
581static int pfault_disable;
582
583static int __init nopfault(char *str)
584{
585 pfault_disable = 1;
586 return 1;
587}
588
589__setup("nopfault", nopfault);
590
591struct pfault_refbk {
592 u16 refdiagc;
593 u16 reffcode;
594 u16 refdwlen;
595 u16 refversn;
596 u64 refgaddr;
597 u64 refselmk;
598 u64 refcmpmk;
599 u64 reserved;
600} __attribute__ ((packed, aligned(8)));
601
602int pfault_init(void)
603{
604 struct pfault_refbk refbk = {
605 .refdiagc = 0x258,
606 .reffcode = 0,
607 .refdwlen = 5,
608 .refversn = 2,
609 .refgaddr = __LC_CURRENT_PID,
610 .refselmk = 1ULL << 48,
611 .refcmpmk = 1ULL << 48,
612 .reserved = __PF_RES_FIELD };
613 int rc;
614
615 if (pfault_disable)
616 return -1;
617 asm volatile(
618 " diag %1,%0,0x258\n"
619 "0: j 2f\n"
620 "1: la %0,8\n"
621 "2:\n"
622 EX_TABLE(0b,1b)
623 : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
624 return rc;
625}
626
627void pfault_fini(void)
628{
629 struct pfault_refbk refbk = {
630 .refdiagc = 0x258,
631 .reffcode = 1,
632 .refdwlen = 5,
633 .refversn = 2,
634 };
635
636 if (pfault_disable)
637 return;
638 asm volatile(
639 " diag %0,0,0x258\n"
640 "0:\n"
641 EX_TABLE(0b,0b)
642 : : "a" (&refbk), "m" (refbk) : "cc");
643}
644
645static DEFINE_SPINLOCK(pfault_lock);
646static LIST_HEAD(pfault_list);
647
648static void pfault_interrupt(struct ext_code ext_code,
649 unsigned int param32, unsigned long param64)
650{
651 struct task_struct *tsk;
652 __u16 subcode;
653 pid_t pid;
654
655 /*
656 * Get the external interruption subcode & pfault
657 * initial/completion signal bit. VM stores this
658 * in the 'cpu address' field associated with the
659 * external interrupt.
660 */
661 subcode = ext_code.subcode;
662 if ((subcode & 0xff00) != __SUBCODE_MASK)
663 return;
664 inc_irq_stat(IRQEXT_PFL);
665 /* Get the token (= pid of the affected task). */
666 pid = sizeof(void *) == 4 ? param32 : param64;
667 rcu_read_lock();
668 tsk = find_task_by_pid_ns(pid, &init_pid_ns);
669 if (tsk)
670 get_task_struct(tsk);
671 rcu_read_unlock();
672 if (!tsk)
673 return;
674 spin_lock(&pfault_lock);
675 if (subcode & 0x0080) {
676 /* signal bit is set -> a page has been swapped in by VM */
677 if (tsk->thread.pfault_wait == 1) {
678 /* Initial interrupt was faster than the completion
679 * interrupt. pfault_wait is valid. Set pfault_wait
680 * back to zero and wake up the process. This can
681 * safely be done because the task is still sleeping
682 * and can't produce new pfaults. */
683 tsk->thread.pfault_wait = 0;
684 list_del(&tsk->thread.list);
685 wake_up_process(tsk);
686 put_task_struct(tsk);
687 } else {
688 /* Completion interrupt was faster than initial
689 * interrupt. Set pfault_wait to -1 so the initial
690 * interrupt doesn't put the task to sleep.
691 * If the task is not running, ignore the completion
692 * interrupt since it must be a leftover of a PFAULT
693 * CANCEL operation which didn't remove all pending
694 * completion interrupts. */
695 if (tsk->state == TASK_RUNNING)
696 tsk->thread.pfault_wait = -1;
697 }
698 } else {
699 /* signal bit not set -> a real page is missing. */
700 if (WARN_ON_ONCE(tsk != current))
701 goto out;
702 if (tsk->thread.pfault_wait == 1) {
703 /* Already on the list with a reference: put to sleep */
704 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
705 set_tsk_need_resched(tsk);
706 } else if (tsk->thread.pfault_wait == -1) {
707 /* Completion interrupt was faster than the initial
708 * interrupt (pfault_wait == -1). Set pfault_wait
709 * back to zero and exit. */
710 tsk->thread.pfault_wait = 0;
711 } else {
712 /* Initial interrupt arrived before completion
713 * interrupt. Let the task sleep.
714 * An extra task reference is needed since a different
715 * cpu may set the task state to TASK_RUNNING again
716 * before the scheduler is reached. */
717 get_task_struct(tsk);
718 tsk->thread.pfault_wait = 1;
719 list_add(&tsk->thread.list, &pfault_list);
720 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
721 set_tsk_need_resched(tsk);
722 }
723 }
724out:
725 spin_unlock(&pfault_lock);
726 put_task_struct(tsk);
727}
728
729static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
730 void *hcpu)
731{
732 struct thread_struct *thread, *next;
733 struct task_struct *tsk;
734
735 switch (action & ~CPU_TASKS_FROZEN) {
736 case CPU_DEAD:
737 spin_lock_irq(&pfault_lock);
738 list_for_each_entry_safe(thread, next, &pfault_list, list) {
739 thread->pfault_wait = 0;
740 list_del(&thread->list);
741 tsk = container_of(thread, struct task_struct, thread);
742 wake_up_process(tsk);
743 put_task_struct(tsk);
744 }
745 spin_unlock_irq(&pfault_lock);
746 break;
747 default:
748 break;
749 }
750 return NOTIFY_OK;
751}
752
753static int __init pfault_irq_init(void)
754{
755 int rc;
756
757 rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
758 if (rc)
759 goto out_extint;
760 rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
761 if (rc)
762 goto out_pfault;
763 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
764 hotcpu_notifier(pfault_cpu_notify, 0);
765 return 0;
766
767out_pfault:
768 unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
769out_extint:
770 pfault_disable = 1;
771 return rc;
772}
773early_initcall(pfault_irq_init);
774
775#endif /* CONFIG_PFAULT */