Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Ulrich Weigand (uweigand@de.ibm.com)
  7 *
  8 *  Derived from "arch/i386/mm/fault.c"
  9 *    Copyright (C) 1995  Linus Torvalds
 10 */
 11
 12#include <linux/kernel_stat.h>
 
 13#include <linux/perf_event.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/sched/debug.h>
 
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/ptrace.h>
 22#include <linux/mman.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/smp.h>
 26#include <linux/kdebug.h>
 27#include <linux/init.h>
 28#include <linux/console.h>
 29#include <linux/extable.h>
 30#include <linux/hardirq.h>
 31#include <linux/kprobes.h>
 32#include <linux/uaccess.h>
 33#include <linux/hugetlb.h>
 
 
 34#include <asm/asm-offsets.h>
 
 
 35#include <asm/diag.h>
 36#include <asm/pgtable.h>
 37#include <asm/gmap.h>
 38#include <asm/irq.h>
 39#include <asm/mmu_context.h>
 40#include <asm/facility.h>
 
 41#include "../kernel/entry.h"
 42
 43#define __FAIL_ADDR_MASK -4096L
 44#define __SUBCODE_MASK 0x0600
 45#define __PF_RES_FIELD 0x8000000000000000ULL
 46
 47#define VM_FAULT_BADCONTEXT	0x010000
 48#define VM_FAULT_BADMAP		0x020000
 49#define VM_FAULT_BADACCESS	0x040000
 50#define VM_FAULT_SIGNAL		0x080000
 51#define VM_FAULT_PFAULT		0x100000
 52
 53enum fault_type {
 54	KERNEL_FAULT,
 55	USER_FAULT,
 56	VDSO_FAULT,
 57	GMAP_FAULT,
 58};
 59
 60static unsigned long store_indication __read_mostly;
 61
 62static int __init fault_init(void)
 63{
 64	if (test_facility(75))
 65		store_indication = 0xc00;
 66	return 0;
 67}
 68early_initcall(fault_init);
 69
 70/*
 71 * Find out which address space caused the exception.
 72 */
 73static enum fault_type get_fault_type(struct pt_regs *regs)
 74{
 75	unsigned long trans_exc_code;
 76
 77	trans_exc_code = regs->int_parm_long & 3;
 78	if (likely(trans_exc_code == 0)) {
 79		/* primary space exception */
 80		if (IS_ENABLED(CONFIG_PGSTE) &&
 81		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
 82			return GMAP_FAULT;
 83		if (current->thread.mm_segment == USER_DS)
 84			return USER_FAULT;
 85		return KERNEL_FAULT;
 86	}
 87	if (trans_exc_code == 2) {
 88		/* secondary space exception */
 89		if (current->thread.mm_segment & 1) {
 90			if (current->thread.mm_segment == USER_DS_SACF)
 91				return USER_FAULT;
 92			return KERNEL_FAULT;
 93		}
 94		return VDSO_FAULT;
 
 95	}
 96	if (trans_exc_code == 1) {
 97		/* access register mode, not used in the kernel */
 98		return USER_FAULT;
 99	}
100	/* home space exception -> access via kernel ASCE */
 
 
101	return KERNEL_FAULT;
102}
103
104static int bad_address(void *p)
 
 
 
 
 
 
 
105{
106	unsigned long dummy;
107
108	return probe_kernel_address((unsigned long *)p, dummy);
 
 
109}
110
111static void dump_pagetable(unsigned long asce, unsigned long address)
112{
113	unsigned long *table = __va(asce & _ASCE_ORIGIN);
114
115	pr_alert("AS:%016lx ", asce);
116	switch (asce & _ASCE_TYPE_MASK) {
117	case _ASCE_TYPE_REGION1:
118		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119		if (bad_address(table))
120			goto bad;
121		pr_cont("R1:%016lx ", *table);
122		if (*table & _REGION_ENTRY_INVALID)
123			goto out;
124		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
125		/* fallthrough */
126	case _ASCE_TYPE_REGION2:
127		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128		if (bad_address(table))
129			goto bad;
130		pr_cont("R2:%016lx ", *table);
131		if (*table & _REGION_ENTRY_INVALID)
132			goto out;
133		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
134		/* fallthrough */
135	case _ASCE_TYPE_REGION3:
136		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137		if (bad_address(table))
138			goto bad;
139		pr_cont("R3:%016lx ", *table);
140		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
141			goto out;
142		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
143		/* fallthrough */
144	case _ASCE_TYPE_SEGMENT:
145		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146		if (bad_address(table))
147			goto bad;
148		pr_cont("S:%016lx ", *table);
149		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
150			goto out;
151		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
152	}
153	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154	if (bad_address(table))
155		goto bad;
156	pr_cont("P:%016lx ", *table);
157out:
158	pr_cont("\n");
159	return;
160bad:
161	pr_cont("BAD\n");
162}
163
164static void dump_fault_info(struct pt_regs *regs)
165{
 
166	unsigned long asce;
167
168	pr_alert("Failing address: %016lx TEID: %016lx\n",
169		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170	pr_alert("Fault in ");
171	switch (regs->int_parm_long & 3) {
172	case 3:
173		pr_cont("home space ");
174		break;
175	case 2:
176		pr_cont("secondary space ");
177		break;
178	case 1:
179		pr_cont("access register ");
180		break;
181	case 0:
182		pr_cont("primary space ");
183		break;
184	}
185	pr_cont("mode while using ");
186	switch (get_fault_type(regs)) {
187	case USER_FAULT:
188		asce = S390_lowcore.user_asce;
189		pr_cont("user ");
190		break;
191	case VDSO_FAULT:
192		asce = S390_lowcore.vdso_asce;
193		pr_cont("vdso ");
194		break;
195	case GMAP_FAULT:
196		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
197		pr_cont("gmap ");
198		break;
199	case KERNEL_FAULT:
200		asce = S390_lowcore.kernel_asce;
201		pr_cont("kernel ");
202		break;
203	default:
204		unreachable();
205	}
206	pr_cont("ASCE.\n");
207	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
208}
209
210int show_unhandled_signals = 1;
211
212void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
213{
 
 
214	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
215		return;
216	if (!unhandled_signal(current, signr))
217		return;
218	if (!printk_ratelimit())
219		return;
220	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221	       regs->int_code & 0xffff, regs->int_code >> 17);
222	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
223	printk(KERN_CONT "\n");
224	if (is_mm_fault)
225		dump_fault_info(regs);
226	show_regs(regs);
227}
228
229/*
230 * Send SIGSEGV to task.  This is an external routine
231 * to keep the stack usage of do_page_fault small.
232 */
233static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
234{
235	report_user_fault(regs, SIGSEGV, 1);
236	force_sig_fault(SIGSEGV, si_code,
237			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
238}
239
240const struct exception_table_entry *s390_search_extables(unsigned long addr)
241{
242	const struct exception_table_entry *fixup;
243
244	fixup = search_extable(__start_dma_ex_table,
245			       __stop_dma_ex_table - __start_dma_ex_table,
246			       addr);
247	if (!fixup)
248		fixup = search_exception_tables(addr);
249	return fixup;
250}
251
252static noinline void do_no_context(struct pt_regs *regs)
253{
254	const struct exception_table_entry *fixup;
255
256	/* Are we prepared to handle this kernel fault?  */
257	fixup = s390_search_extables(regs->psw.addr);
258	if (fixup) {
259		regs->psw.addr = extable_fixup(fixup);
 
 
260		return;
 
 
 
 
 
 
261	}
262
263	/*
264	 * Oops. The kernel tried to access some bad page. We'll have to
265	 * terminate things with extreme prejudice.
266	 */
267	if (get_fault_type(regs) == KERNEL_FAULT)
268		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
269		       " in virtual kernel address space\n");
270	else
271		printk(KERN_ALERT "Unable to handle kernel paging request"
272		       " in virtual user address space\n");
273	dump_fault_info(regs);
274	die(regs, "Oops");
275	do_exit(SIGKILL);
276}
277
278static noinline void do_low_address(struct pt_regs *regs)
279{
280	/* Low-address protection hit in kernel mode means
281	   NULL pointer write access in kernel mode.  */
282	if (regs->psw.mask & PSW_MASK_PSTATE) {
283		/* Low-address protection hit in user mode 'cannot happen'. */
284		die (regs, "Low-address protection");
285		do_exit(SIGKILL);
286	}
287
288	do_no_context(regs);
 
289}
290
291static noinline void do_sigbus(struct pt_regs *regs)
292{
293	/*
294	 * Send a sigbus, regardless of whether we were in kernel
295	 * or user mode.
296	 */
297	force_sig_fault(SIGBUS, BUS_ADRERR,
298			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
299}
300
301static noinline int signal_return(struct pt_regs *regs)
302{
303	u16 instruction;
304	int rc;
305
306	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
307	if (rc)
308		return rc;
309	if (instruction == 0x0a77) {
310		set_pt_regs_flag(regs, PIF_SYSCALL);
311		regs->int_code = 0x00040077;
312		return 0;
313	} else if (instruction == 0x0aad) {
314		set_pt_regs_flag(regs, PIF_SYSCALL);
315		regs->int_code = 0x000400ad;
316		return 0;
317	}
318	return -EACCES;
319}
320
321static noinline void do_fault_error(struct pt_regs *regs, int access,
322					vm_fault_t fault)
323{
324	int si_code;
325
326	switch (fault) {
327	case VM_FAULT_BADACCESS:
328		if (access == VM_EXEC && signal_return(regs) == 0)
329			break;
330		/* fallthrough */
331	case VM_FAULT_BADMAP:
332		/* Bad memory access. Check if it is kernel or user space. */
333		if (user_mode(regs)) {
334			/* User mode accesses just cause a SIGSEGV */
335			si_code = (fault == VM_FAULT_BADMAP) ?
336				SEGV_MAPERR : SEGV_ACCERR;
337			do_sigsegv(regs, si_code);
338			break;
339		}
340		/* fallthrough */
341	case VM_FAULT_BADCONTEXT:
342		/* fallthrough */
343	case VM_FAULT_PFAULT:
344		do_no_context(regs);
345		break;
346	case VM_FAULT_SIGNAL:
347		if (!user_mode(regs))
348			do_no_context(regs);
349		break;
350	default: /* fault & VM_FAULT_ERROR */
351		if (fault & VM_FAULT_OOM) {
352			if (!user_mode(regs))
353				do_no_context(regs);
354			else
355				pagefault_out_of_memory();
356		} else if (fault & VM_FAULT_SIGSEGV) {
357			/* Kernel mode? Handle exceptions or die */
358			if (!user_mode(regs))
359				do_no_context(regs);
360			else
361				do_sigsegv(regs, SEGV_MAPERR);
362		} else if (fault & VM_FAULT_SIGBUS) {
363			/* Kernel mode? Handle exceptions or die */
364			if (!user_mode(regs))
365				do_no_context(regs);
366			else
367				do_sigbus(regs);
368		} else
369			BUG();
370		break;
371	}
372}
373
374/*
375 * This routine handles page faults.  It determines the address,
376 * and the problem, and then passes it off to one of the appropriate
377 * routines.
378 *
379 * interruption code (int_code):
380 *   04       Protection           ->  Write-Protection  (suprression)
381 *   10       Segment translation  ->  Not present       (nullification)
382 *   11       Page translation     ->  Not present       (nullification)
383 *   3b       Region third trans.  ->  Not present       (nullification)
384 */
385static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
386{
387	struct gmap *gmap;
388	struct task_struct *tsk;
389	struct mm_struct *mm;
390	struct vm_area_struct *vma;
391	enum fault_type type;
392	unsigned long trans_exc_code;
393	unsigned long address;
 
 
394	unsigned int flags;
 
395	vm_fault_t fault;
 
396
397	tsk = current;
398	/*
399	 * The instruction that caused the program check has
400	 * been nullified. Don't signal single step via SIGTRAP.
401	 */
402	clear_pt_regs_flag(regs, PIF_PER_TRAP);
403
404	if (kprobe_page_fault(regs, 14))
405		return 0;
406
407	mm = tsk->mm;
408	trans_exc_code = regs->int_parm_long;
409
410	/*
411	 * Verify that the fault happened in user space, that
412	 * we are not in an interrupt and that there is a 
413	 * user context.
414	 */
415	fault = VM_FAULT_BADCONTEXT;
416	type = get_fault_type(regs);
417	switch (type) {
418	case KERNEL_FAULT:
419		goto out;
420	case VDSO_FAULT:
421		fault = VM_FAULT_BADMAP;
422		goto out;
423	case USER_FAULT:
424	case GMAP_FAULT:
425		if (faulthandler_disabled() || !mm)
426			goto out;
427		break;
428	}
429
430	address = trans_exc_code & __FAIL_ADDR_MASK;
431	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
432	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
433	if (user_mode(regs))
434		flags |= FAULT_FLAG_USER;
435	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
 
 
436		flags |= FAULT_FLAG_WRITE;
437	down_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
 
 
 
 
 
 
 
 
439	gmap = NULL;
440	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
441		gmap = (struct gmap *) S390_lowcore.gmap;
442		current->thread.gmap_addr = address;
443		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
444		current->thread.gmap_int_code = regs->int_code & 0xffff;
445		address = __gmap_translate(gmap, address);
446		if (address == -EFAULT) {
447			fault = VM_FAULT_BADMAP;
448			goto out_up;
449		}
450		if (gmap->pfault_enabled)
451			flags |= FAULT_FLAG_RETRY_NOWAIT;
452	}
453
454retry:
455	fault = VM_FAULT_BADMAP;
456	vma = find_vma(mm, address);
457	if (!vma)
458		goto out_up;
459
460	if (unlikely(vma->vm_start > address)) {
461		if (!(vma->vm_flags & VM_GROWSDOWN))
462			goto out_up;
463		if (expand_stack(vma, address))
464			goto out_up;
 
465	}
466
467	/*
468	 * Ok, we have a good vm_area for this memory access, so
469	 * we can handle it..
470	 */
471	fault = VM_FAULT_BADACCESS;
472	if (unlikely(!(vma->vm_flags & access)))
473		goto out_up;
474
475	if (is_vm_hugetlb_page(vma))
476		address &= HPAGE_MASK;
477	/*
478	 * If for any reason at all we couldn't handle the fault,
479	 * make sure we exit gracefully rather than endlessly redo
480	 * the fault.
481	 */
482	fault = handle_mm_fault(vma, address, flags);
483	/* No reason to continue if interrupted by SIGKILL. */
484	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
485		fault = VM_FAULT_SIGNAL;
486		if (flags & FAULT_FLAG_RETRY_NOWAIT)
487			goto out_up;
488		goto out;
 
 
489	}
490	if (unlikely(fault & VM_FAULT_ERROR))
491		goto out_up;
492
493	/*
494	 * Major/minor page fault accounting is only done on the
495	 * initial attempt. If we go through a retry, it is extremely
496	 * likely that the page will be found in page cache at that point.
497	 */
498	if (flags & FAULT_FLAG_ALLOW_RETRY) {
499		if (fault & VM_FAULT_MAJOR) {
500			tsk->maj_flt++;
501			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
502				      regs, address);
503		} else {
504			tsk->min_flt++;
505			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
506				      regs, address);
507		}
508		if (fault & VM_FAULT_RETRY) {
509			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
510			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
511				/* FAULT_FLAG_RETRY_NOWAIT has been set,
512				 * mmap_sem has not been released */
513				current->thread.gmap_pfault = 1;
514				fault = VM_FAULT_PFAULT;
515				goto out_up;
516			}
517			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
518			 * of starvation. */
519			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
520				   FAULT_FLAG_RETRY_NOWAIT);
521			flags |= FAULT_FLAG_TRIED;
522			down_read(&mm->mmap_sem);
523			goto retry;
524		}
 
 
 
 
525	}
 
526	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
527		address =  __gmap_link(gmap, current->thread.gmap_addr,
528				       address);
529		if (address == -EFAULT) {
530			fault = VM_FAULT_BADMAP;
531			goto out_up;
532		}
533		if (address == -ENOMEM) {
534			fault = VM_FAULT_OOM;
535			goto out_up;
 
536		}
537	}
538	fault = 0;
539out_up:
540	up_read(&mm->mmap_sem);
541out:
542	return fault;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543}
544
545void do_protection_exception(struct pt_regs *regs)
546{
547	unsigned long trans_exc_code;
548	int access;
549	vm_fault_t fault;
550
551	trans_exc_code = regs->int_parm_long;
552	/*
553	 * Protection exceptions are suppressing, decrement psw address.
554	 * The exception to this rule are aborted transactions, for these
555	 * the PSW already points to the correct location.
556	 */
557	if (!(regs->int_code & 0x200))
558		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
559	/*
560	 * Check for low-address protection.  This needs to be treated
561	 * as a special case because the translation exception code
562	 * field is not guaranteed to contain valid data in this case.
563	 */
564	if (unlikely(!(trans_exc_code & 4))) {
565		do_low_address(regs);
566		return;
567	}
568	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
569		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
570					(regs->psw.addr & PAGE_MASK);
571		access = VM_EXEC;
572		fault = VM_FAULT_BADACCESS;
573	} else {
574		access = VM_WRITE;
575		fault = do_exception(regs, access);
 
 
576	}
577	if (unlikely(fault))
578		do_fault_error(regs, access, fault);
579}
580NOKPROBE_SYMBOL(do_protection_exception);
581
582void do_dat_exception(struct pt_regs *regs)
583{
584	int access;
585	vm_fault_t fault;
586
587	access = VM_READ | VM_EXEC | VM_WRITE;
588	fault = do_exception(regs, access);
589	if (unlikely(fault))
590		do_fault_error(regs, access, fault);
591}
592NOKPROBE_SYMBOL(do_dat_exception);
593
594#ifdef CONFIG_PFAULT 
595/*
596 * 'pfault' pseudo page faults routines.
597 */
598static int pfault_disable;
599
600static int __init nopfault(char *str)
601{
602	pfault_disable = 1;
603	return 1;
604}
605
606__setup("nopfault", nopfault);
607
608struct pfault_refbk {
609	u16 refdiagc;
610	u16 reffcode;
611	u16 refdwlen;
612	u16 refversn;
613	u64 refgaddr;
614	u64 refselmk;
615	u64 refcmpmk;
616	u64 reserved;
617} __attribute__ ((packed, aligned(8)));
618
619static struct pfault_refbk pfault_init_refbk = {
620	.refdiagc = 0x258,
621	.reffcode = 0,
622	.refdwlen = 5,
623	.refversn = 2,
624	.refgaddr = __LC_LPP,
625	.refselmk = 1ULL << 48,
626	.refcmpmk = 1ULL << 48,
627	.reserved = __PF_RES_FIELD
628};
629
630int pfault_init(void)
631{
632        int rc;
633
634	if (pfault_disable)
635		return -1;
636	diag_stat_inc(DIAG_STAT_X258);
637	asm volatile(
638		"	diag	%1,%0,0x258\n"
639		"0:	j	2f\n"
640		"1:	la	%0,8\n"
641		"2:\n"
642		EX_TABLE(0b,1b)
643		: "=d" (rc)
644		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
645        return rc;
646}
647
648static struct pfault_refbk pfault_fini_refbk = {
649	.refdiagc = 0x258,
650	.reffcode = 1,
651	.refdwlen = 5,
652	.refversn = 2,
653};
654
655void pfault_fini(void)
656{
657
658	if (pfault_disable)
659		return;
660	diag_stat_inc(DIAG_STAT_X258);
661	asm volatile(
662		"	diag	%0,0,0x258\n"
663		"0:	nopr	%%r7\n"
664		EX_TABLE(0b,0b)
665		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
666}
667
668static DEFINE_SPINLOCK(pfault_lock);
669static LIST_HEAD(pfault_list);
670
671#define PF_COMPLETE	0x0080
672
673/*
674 * The mechanism of our pfault code: if Linux is running as guest, runs a user
675 * space process and the user space process accesses a page that the host has
676 * paged out we get a pfault interrupt.
677 *
678 * This allows us, within the guest, to schedule a different process. Without
679 * this mechanism the host would have to suspend the whole virtual cpu until
680 * the page has been paged in.
681 *
682 * So when we get such an interrupt then we set the state of the current task
683 * to uninterruptible and also set the need_resched flag. Both happens within
684 * interrupt context(!). If we later on want to return to user space we
685 * recognize the need_resched flag and then call schedule().  It's not very
686 * obvious how this works...
687 *
688 * Of course we have a lot of additional fun with the completion interrupt (->
689 * host signals that a page of a process has been paged in and the process can
690 * continue to run). This interrupt can arrive on any cpu and, since we have
691 * virtual cpus, actually appear before the interrupt that signals that a page
692 * is missing.
693 */
694static void pfault_interrupt(struct ext_code ext_code,
695			     unsigned int param32, unsigned long param64)
696{
697	struct task_struct *tsk;
698	__u16 subcode;
699	pid_t pid;
700
701	/*
702	 * Get the external interruption subcode & pfault initial/completion
703	 * signal bit. VM stores this in the 'cpu address' field associated
704	 * with the external interrupt.
 
705	 */
706	subcode = ext_code.subcode;
707	if ((subcode & 0xff00) != __SUBCODE_MASK)
708		return;
709	inc_irq_stat(IRQEXT_PFL);
710	/* Get the token (= pid of the affected task). */
711	pid = param64 & LPP_PID_MASK;
712	rcu_read_lock();
713	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
714	if (tsk)
715		get_task_struct(tsk);
716	rcu_read_unlock();
717	if (!tsk)
718		return;
719	spin_lock(&pfault_lock);
720	if (subcode & PF_COMPLETE) {
721		/* signal bit is set -> a page has been swapped in by VM */
722		if (tsk->thread.pfault_wait == 1) {
723			/* Initial interrupt was faster than the completion
724			 * interrupt. pfault_wait is valid. Set pfault_wait
725			 * back to zero and wake up the process. This can
726			 * safely be done because the task is still sleeping
727			 * and can't produce new pfaults. */
728			tsk->thread.pfault_wait = 0;
729			list_del(&tsk->thread.list);
730			wake_up_process(tsk);
731			put_task_struct(tsk);
732		} else {
733			/* Completion interrupt was faster than initial
734			 * interrupt. Set pfault_wait to -1 so the initial
735			 * interrupt doesn't put the task to sleep.
736			 * If the task is not running, ignore the completion
737			 * interrupt since it must be a leftover of a PFAULT
738			 * CANCEL operation which didn't remove all pending
739			 * completion interrupts. */
740			if (tsk->state == TASK_RUNNING)
741				tsk->thread.pfault_wait = -1;
742		}
743	} else {
744		/* signal bit not set -> a real page is missing. */
745		if (WARN_ON_ONCE(tsk != current))
746			goto out;
747		if (tsk->thread.pfault_wait == 1) {
748			/* Already on the list with a reference: put to sleep */
749			goto block;
750		} else if (tsk->thread.pfault_wait == -1) {
751			/* Completion interrupt was faster than the initial
752			 * interrupt (pfault_wait == -1). Set pfault_wait
753			 * back to zero and exit. */
754			tsk->thread.pfault_wait = 0;
755		} else {
756			/* Initial interrupt arrived before completion
757			 * interrupt. Let the task sleep.
758			 * An extra task reference is needed since a different
759			 * cpu may set the task state to TASK_RUNNING again
760			 * before the scheduler is reached. */
761			get_task_struct(tsk);
762			tsk->thread.pfault_wait = 1;
763			list_add(&tsk->thread.list, &pfault_list);
764block:
765			/* Since this must be a userspace fault, there
766			 * is no kernel task state to trample. Rely on the
767			 * return to userspace schedule() to block. */
768			__set_current_state(TASK_UNINTERRUPTIBLE);
769			set_tsk_need_resched(tsk);
770			set_preempt_need_resched();
771		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
772	}
773out:
774	spin_unlock(&pfault_lock);
775	put_task_struct(tsk);
776}
 
777
778static int pfault_cpu_dead(unsigned int cpu)
779{
780	struct thread_struct *thread, *next;
781	struct task_struct *tsk;
782
783	spin_lock_irq(&pfault_lock);
784	list_for_each_entry_safe(thread, next, &pfault_list, list) {
785		thread->pfault_wait = 0;
786		list_del(&thread->list);
787		tsk = container_of(thread, struct task_struct, thread);
788		wake_up_process(tsk);
789		put_task_struct(tsk);
790	}
791	spin_unlock_irq(&pfault_lock);
792	return 0;
793}
 
794
795static int __init pfault_irq_init(void)
796{
797	int rc;
 
798
799	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
800	if (rc)
801		goto out_extint;
802	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
803	if (rc)
804		goto out_pfault;
805	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
806	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
807				  NULL, pfault_cpu_dead);
808	return 0;
809
810out_pfault:
811	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
812out_extint:
813	pfault_disable = 1;
814	return rc;
 
815}
816early_initcall(pfault_irq_init);
817
818#endif /* CONFIG_PFAULT */
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *		 Ulrich Weigand (uweigand@de.ibm.com)
  7 *
  8 *  Derived from "arch/i386/mm/fault.c"
  9 *    Copyright (C) 1995  Linus Torvalds
 10 */
 11
 12#include <linux/kernel_stat.h>
 13#include <linux/mmu_context.h>
 14#include <linux/perf_event.h>
 15#include <linux/signal.h>
 16#include <linux/sched.h>
 17#include <linux/sched/debug.h>
 18#include <linux/jump_label.h>
 19#include <linux/kernel.h>
 20#include <linux/errno.h>
 21#include <linux/string.h>
 22#include <linux/types.h>
 23#include <linux/ptrace.h>
 24#include <linux/mman.h>
 25#include <linux/mm.h>
 26#include <linux/compat.h>
 27#include <linux/smp.h>
 28#include <linux/kdebug.h>
 29#include <linux/init.h>
 30#include <linux/console.h>
 31#include <linux/extable.h>
 32#include <linux/hardirq.h>
 33#include <linux/kprobes.h>
 34#include <linux/uaccess.h>
 35#include <linux/hugetlb.h>
 36#include <linux/kfence.h>
 37#include <asm/asm-extable.h>
 38#include <asm/asm-offsets.h>
 39#include <asm/ptrace.h>
 40#include <asm/fault.h>
 41#include <asm/diag.h>
 
 42#include <asm/gmap.h>
 43#include <asm/irq.h>
 
 44#include <asm/facility.h>
 45#include <asm/uv.h>
 46#include "../kernel/entry.h"
 47
 
 
 
 
 
 
 
 
 
 
 48enum fault_type {
 49	KERNEL_FAULT,
 50	USER_FAULT,
 
 51	GMAP_FAULT,
 52};
 53
 54static DEFINE_STATIC_KEY_FALSE(have_store_indication);
 55
 56static int __init fault_init(void)
 57{
 58	if (test_facility(75))
 59		static_branch_enable(&have_store_indication);
 60	return 0;
 61}
 62early_initcall(fault_init);
 63
 64/*
 65 * Find out which address space caused the exception.
 66 */
 67static enum fault_type get_fault_type(struct pt_regs *regs)
 68{
 69	union teid teid = { .val = regs->int_parm_long };
 70
 71	if (likely(teid.as == PSW_BITS_AS_PRIMARY)) {
 72		if (user_mode(regs))
 
 
 
 
 
 73			return USER_FAULT;
 74		if (!IS_ENABLED(CONFIG_PGSTE))
 
 
 
 
 
 
 75			return KERNEL_FAULT;
 76		if (test_pt_regs_flag(regs, PIF_GUEST_FAULT))
 77			return GMAP_FAULT;
 78		return KERNEL_FAULT;
 79	}
 80	if (teid.as == PSW_BITS_AS_SECONDARY)
 
 81		return USER_FAULT;
 82	/* Access register mode, not used in the kernel */
 83	if (teid.as == PSW_BITS_AS_ACCREG)
 84		return USER_FAULT;
 85	/* Home space -> access via kernel ASCE */
 86	return KERNEL_FAULT;
 87}
 88
 89static unsigned long get_fault_address(struct pt_regs *regs)
 90{
 91	union teid teid = { .val = regs->int_parm_long };
 92
 93	return teid.addr * PAGE_SIZE;
 94}
 95
 96static __always_inline bool fault_is_write(struct pt_regs *regs)
 97{
 98	union teid teid = { .val = regs->int_parm_long };
 99
100	if (static_branch_likely(&have_store_indication))
101		return teid.fsi == TEID_FSI_STORE;
102	return false;
103}
104
105static void dump_pagetable(unsigned long asce, unsigned long address)
106{
107	unsigned long entry, *table = __va(asce & _ASCE_ORIGIN);
108
109	pr_alert("AS:%016lx ", asce);
110	switch (asce & _ASCE_TYPE_MASK) {
111	case _ASCE_TYPE_REGION1:
112		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
113		if (get_kernel_nofault(entry, table))
114			goto bad;
115		pr_cont("R1:%016lx ", entry);
116		if (entry & _REGION_ENTRY_INVALID)
117			goto out;
118		table = __va(entry & _REGION_ENTRY_ORIGIN);
119		fallthrough;
120	case _ASCE_TYPE_REGION2:
121		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
122		if (get_kernel_nofault(entry, table))
123			goto bad;
124		pr_cont("R2:%016lx ", entry);
125		if (entry & _REGION_ENTRY_INVALID)
126			goto out;
127		table = __va(entry & _REGION_ENTRY_ORIGIN);
128		fallthrough;
129	case _ASCE_TYPE_REGION3:
130		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
131		if (get_kernel_nofault(entry, table))
132			goto bad;
133		pr_cont("R3:%016lx ", entry);
134		if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
135			goto out;
136		table = __va(entry & _REGION_ENTRY_ORIGIN);
137		fallthrough;
138	case _ASCE_TYPE_SEGMENT:
139		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
140		if (get_kernel_nofault(entry, table))
141			goto bad;
142		pr_cont("S:%016lx ", entry);
143		if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
144			goto out;
145		table = __va(entry & _SEGMENT_ENTRY_ORIGIN);
146	}
147	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
148	if (get_kernel_nofault(entry, table))
149		goto bad;
150	pr_cont("P:%016lx ", entry);
151out:
152	pr_cont("\n");
153	return;
154bad:
155	pr_cont("BAD\n");
156}
157
158static void dump_fault_info(struct pt_regs *regs)
159{
160	union teid teid = { .val = regs->int_parm_long };
161	unsigned long asce;
162
163	pr_alert("Failing address: %016lx TEID: %016lx\n",
164		 get_fault_address(regs), teid.val);
165	pr_alert("Fault in ");
166	switch (teid.as) {
167	case PSW_BITS_AS_HOME:
168		pr_cont("home space ");
169		break;
170	case PSW_BITS_AS_SECONDARY:
171		pr_cont("secondary space ");
172		break;
173	case PSW_BITS_AS_ACCREG:
174		pr_cont("access register ");
175		break;
176	case PSW_BITS_AS_PRIMARY:
177		pr_cont("primary space ");
178		break;
179	}
180	pr_cont("mode while using ");
181	switch (get_fault_type(regs)) {
182	case USER_FAULT:
183		asce = S390_lowcore.user_asce.val;
184		pr_cont("user ");
185		break;
 
 
 
 
186	case GMAP_FAULT:
187		asce = ((struct gmap *)S390_lowcore.gmap)->asce;
188		pr_cont("gmap ");
189		break;
190	case KERNEL_FAULT:
191		asce = S390_lowcore.kernel_asce.val;
192		pr_cont("kernel ");
193		break;
194	default:
195		unreachable();
196	}
197	pr_cont("ASCE.\n");
198	dump_pagetable(asce, get_fault_address(regs));
199}
200
201int show_unhandled_signals = 1;
202
203void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
204{
205	static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
206
207	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
208		return;
209	if (!unhandled_signal(current, signr))
210		return;
211	if (!__ratelimit(&rs))
212		return;
213	pr_alert("User process fault: interruption code %04x ilc:%d ",
214		 regs->int_code & 0xffff, regs->int_code >> 17);
215	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
216	pr_cont("\n");
217	if (is_mm_fault)
218		dump_fault_info(regs);
219	show_regs(regs);
220}
221
222static void do_sigsegv(struct pt_regs *regs, int si_code)
 
 
 
 
223{
224	report_user_fault(regs, SIGSEGV, 1);
225	force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs));
 
226}
227
228static void handle_fault_error_nolock(struct pt_regs *regs, int si_code)
229{
230	enum fault_type fault_type;
231	unsigned long address;
232	bool is_write;
 
 
 
 
 
 
 
 
 
 
233
234	if (user_mode(regs)) {
235		if (WARN_ON_ONCE(!si_code))
236			si_code = SEGV_MAPERR;
237		return do_sigsegv(regs, si_code);
238	}
239	if (fixup_exception(regs))
240		return;
241	fault_type = get_fault_type(regs);
242	if (fault_type == KERNEL_FAULT) {
243		address = get_fault_address(regs);
244		is_write = fault_is_write(regs);
245		if (kfence_handle_page_fault(address, is_write, regs))
246			return;
247	}
248	if (fault_type == KERNEL_FAULT)
249		pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n");
 
 
 
 
 
 
250	else
251		pr_alert("Unable to handle kernel paging request in virtual user address space\n");
 
252	dump_fault_info(regs);
253	die(regs, "Oops");
 
254}
255
256static void handle_fault_error(struct pt_regs *regs, int si_code)
257{
258	struct mm_struct *mm = current->mm;
 
 
 
 
 
 
259
260	mmap_read_unlock(mm);
261	handle_fault_error_nolock(regs, si_code);
262}
263
264static void do_sigbus(struct pt_regs *regs)
265{
266	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267}
268
269/*
270 * This routine handles page faults.  It determines the address,
271 * and the problem, and then passes it off to one of the appropriate
272 * routines.
273 *
274 * interruption code (int_code):
275 *   04       Protection	   ->  Write-Protection  (suppression)
276 *   10       Segment translation  ->  Not present	 (nullification)
277 *   11       Page translation	   ->  Not present	 (nullification)
278 *   3b       Region third trans.  ->  Not present	 (nullification)
279 */
280static void do_exception(struct pt_regs *regs, int access)
281{
 
 
 
282	struct vm_area_struct *vma;
 
 
283	unsigned long address;
284	struct mm_struct *mm;
285	enum fault_type type;
286	unsigned int flags;
287	struct gmap *gmap;
288	vm_fault_t fault;
289	bool is_write;
290
 
291	/*
292	 * The instruction that caused the program check has
293	 * been nullified. Don't signal single step via SIGTRAP.
294	 */
295	clear_thread_flag(TIF_PER_TRAP);
 
296	if (kprobe_page_fault(regs, 14))
297		return;
298	mm = current->mm;
299	address = get_fault_address(regs);
300	is_write = fault_is_write(regs);
 
 
 
 
 
 
 
301	type = get_fault_type(regs);
302	switch (type) {
303	case KERNEL_FAULT:
304		return handle_fault_error_nolock(regs, 0);
 
 
 
305	case USER_FAULT:
306	case GMAP_FAULT:
307		if (faulthandler_disabled() || !mm)
308			return handle_fault_error_nolock(regs, 0);
309		break;
310	}
 
 
311	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
312	flags = FAULT_FLAG_DEFAULT;
313	if (user_mode(regs))
314		flags |= FAULT_FLAG_USER;
315	if (is_write)
316		access = VM_WRITE;
317	if (access == VM_WRITE)
318		flags |= FAULT_FLAG_WRITE;
319	if (!(flags & FAULT_FLAG_USER))
320		goto lock_mmap;
321	vma = lock_vma_under_rcu(mm, address);
322	if (!vma)
323		goto lock_mmap;
324	if (!(vma->vm_flags & access)) {
325		vma_end_read(vma);
326		goto lock_mmap;
327	}
328	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
329	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
330		vma_end_read(vma);
331	if (!(fault & VM_FAULT_RETRY)) {
332		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
333		if (unlikely(fault & VM_FAULT_ERROR))
334			goto error;
335		return;
336	}
337	count_vm_vma_lock_event(VMA_LOCK_RETRY);
338	if (fault & VM_FAULT_MAJOR)
339		flags |= FAULT_FLAG_TRIED;
340
341	/* Quick path to respond to signals */
342	if (fault_signal_pending(fault, regs)) {
343		if (!user_mode(regs))
344			handle_fault_error_nolock(regs, 0);
345		return;
346	}
347lock_mmap:
348	mmap_read_lock(mm);
349	gmap = NULL;
350	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
351		gmap = (struct gmap *)S390_lowcore.gmap;
352		current->thread.gmap_addr = address;
353		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
354		current->thread.gmap_int_code = regs->int_code & 0xffff;
355		address = __gmap_translate(gmap, address);
356		if (address == -EFAULT)
357			return handle_fault_error(regs, SEGV_MAPERR);
 
 
358		if (gmap->pfault_enabled)
359			flags |= FAULT_FLAG_RETRY_NOWAIT;
360	}
 
361retry:
 
362	vma = find_vma(mm, address);
363	if (!vma)
364		return handle_fault_error(regs, SEGV_MAPERR);
 
365	if (unlikely(vma->vm_start > address)) {
366		if (!(vma->vm_flags & VM_GROWSDOWN))
367			return handle_fault_error(regs, SEGV_MAPERR);
368		vma = expand_stack(mm, address);
369		if (!vma)
370			return handle_fault_error_nolock(regs, SEGV_MAPERR);
371	}
 
 
 
 
 
 
372	if (unlikely(!(vma->vm_flags & access)))
373		return handle_fault_error(regs, SEGV_ACCERR);
374	fault = handle_mm_fault(vma, address, flags, regs);
375	if (fault_signal_pending(fault, regs)) {
 
 
 
 
 
 
 
 
 
 
376		if (flags & FAULT_FLAG_RETRY_NOWAIT)
377			mmap_read_unlock(mm);
378		if (!user_mode(regs))
379			handle_fault_error_nolock(regs, 0);
380		return;
381	}
382	/* The fault is fully completed (including releasing mmap lock) */
383	if (fault & VM_FAULT_COMPLETED) {
384		if (gmap) {
385			mmap_read_lock(mm);
386			goto gmap;
 
 
 
 
 
 
 
 
 
 
 
 
387		}
388		return;
389	}
390	if (unlikely(fault & VM_FAULT_ERROR)) {
391		mmap_read_unlock(mm);
392		goto error;
393	}
394	if (fault & VM_FAULT_RETRY) {
395		if (IS_ENABLED(CONFIG_PGSTE) && gmap &&	(flags & FAULT_FLAG_RETRY_NOWAIT)) {
396			/*
397			 * FAULT_FLAG_RETRY_NOWAIT has been set,
398			 * mmap_lock has not been released
399			 */
400			current->thread.gmap_pfault = 1;
401			return handle_fault_error(regs, 0);
 
 
402		}
403		flags &= ~FAULT_FLAG_RETRY_NOWAIT;
404		flags |= FAULT_FLAG_TRIED;
405		mmap_read_lock(mm);
406		goto retry;
407	}
408gmap:
409	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
410		address =  __gmap_link(gmap, current->thread.gmap_addr,
411				       address);
412		if (address == -EFAULT)
413			return handle_fault_error(regs, SEGV_MAPERR);
 
 
414		if (address == -ENOMEM) {
415			fault = VM_FAULT_OOM;
416			mmap_read_unlock(mm);
417			goto error;
418		}
419	}
420	mmap_read_unlock(mm);
421	return;
422error:
423	if (fault & VM_FAULT_OOM) {
424		if (!user_mode(regs))
425			handle_fault_error_nolock(regs, 0);
426		else
427			pagefault_out_of_memory();
428	} else if (fault & VM_FAULT_SIGSEGV) {
429		if (!user_mode(regs))
430			handle_fault_error_nolock(regs, 0);
431		else
432			do_sigsegv(regs, SEGV_MAPERR);
433	} else if (fault & VM_FAULT_SIGBUS) {
434		if (!user_mode(regs))
435			handle_fault_error_nolock(regs, 0);
436		else
437			do_sigbus(regs);
438	} else {
439		BUG();
440	}
441}
442
443void do_protection_exception(struct pt_regs *regs)
444{
445	union teid teid = { .val = regs->int_parm_long };
 
 
446
 
447	/*
448	 * Protection exceptions are suppressing, decrement psw address.
449	 * The exception to this rule are aborted transactions, for these
450	 * the PSW already points to the correct location.
451	 */
452	if (!(regs->int_code & 0x200))
453		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
454	/*
455	 * Check for low-address protection.  This needs to be treated
456	 * as a special case because the translation exception code
457	 * field is not guaranteed to contain valid data in this case.
458	 */
459	if (unlikely(!teid.b61)) {
460		if (user_mode(regs)) {
461			/* Low-address protection in user mode: cannot happen */
462			die(regs, "Low-address protection");
463		}
464		/*
465		 * Low-address protection in kernel mode means
466		 * NULL pointer write access in kernel mode.
467		 */
468		return handle_fault_error_nolock(regs, 0);
469	}
470	if (unlikely(MACHINE_HAS_NX && teid.b56)) {
471		regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK);
472		return handle_fault_error_nolock(regs, SEGV_ACCERR);
473	}
474	do_exception(regs, VM_WRITE);
 
475}
476NOKPROBE_SYMBOL(do_protection_exception);
477
478void do_dat_exception(struct pt_regs *regs)
479{
480	do_exception(regs, VM_ACCESS_FLAGS);
 
 
 
 
 
 
481}
482NOKPROBE_SYMBOL(do_dat_exception);
483
484#if IS_ENABLED(CONFIG_PGSTE)
 
 
 
 
 
 
 
 
 
 
485
486void do_secure_storage_access(struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487{
488	union teid teid = { .val = regs->int_parm_long };
489	unsigned long addr = get_fault_address(regs);
490	struct vm_area_struct *vma;
491	struct mm_struct *mm;
492	struct page *page;
493	struct gmap *gmap;
494	int rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
495
496	/*
497	 * Bit 61 indicates if the address is valid, if it is not the
498	 * kernel should be stopped or SIGSEGV should be sent to the
499	 * process. Bit 61 is not reliable without the misc UV feature,
500	 * therefore this needs to be checked too.
501	 */
502	if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) {
503		/*
504		 * When this happens, userspace did something that it
505		 * was not supposed to do, e.g. branching into secure
506		 * memory. Trigger a segmentation fault.
507		 */
508		if (user_mode(regs)) {
509			send_sig(SIGSEGV, current, 0);
510			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511		}
512		/*
513		 * The kernel should never run into this case and
514		 * there is no way out of this situation.
515		 */
516		panic("Unexpected PGM 0x3d with TEID bit 61=0");
517	}
518	switch (get_fault_type(regs)) {
519	case GMAP_FAULT:
520		mm = current->mm;
521		gmap = (struct gmap *)S390_lowcore.gmap;
522		mmap_read_lock(mm);
523		addr = __gmap_translate(gmap, addr);
524		mmap_read_unlock(mm);
525		if (IS_ERR_VALUE(addr))
526			return handle_fault_error_nolock(regs, SEGV_MAPERR);
527		fallthrough;
528	case USER_FAULT:
529		mm = current->mm;
530		mmap_read_lock(mm);
531		vma = find_vma(mm, addr);
532		if (!vma)
533			return handle_fault_error(regs, SEGV_MAPERR);
534		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
535		if (IS_ERR_OR_NULL(page)) {
536			mmap_read_unlock(mm);
537			break;
 
 
538		}
539		if (arch_make_page_accessible(page))
540			send_sig(SIGSEGV, current, 0);
541		put_page(page);
542		mmap_read_unlock(mm);
543		break;
544	case KERNEL_FAULT:
545		page = phys_to_page(addr);
546		if (unlikely(!try_get_page(page)))
547			break;
548		rc = arch_make_page_accessible(page);
549		put_page(page);
550		if (rc)
551			BUG();
552		break;
553	default:
554		unreachable();
555	}
 
 
 
556}
557NOKPROBE_SYMBOL(do_secure_storage_access);
558
559void do_non_secure_storage_access(struct pt_regs *regs)
560{
561	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
562	unsigned long gaddr = get_fault_address(regs);
563
564	if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT))
565		return handle_fault_error_nolock(regs, SEGV_MAPERR);
566	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
567		send_sig(SIGSEGV, current, 0);
 
 
 
 
 
 
568}
569NOKPROBE_SYMBOL(do_non_secure_storage_access);
570
571void do_secure_storage_violation(struct pt_regs *regs)
572{
573	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
574	unsigned long gaddr = get_fault_address(regs);
575
576	/*
577	 * If the VM has been rebooted, its address space might still contain
578	 * secure pages from the previous boot.
579	 * Clear the page so it can be reused.
580	 */
581	if (!gmap_destroy_page(gmap, gaddr))
582		return;
583	/*
584	 * Either KVM messed up the secure guest mapping or the same
585	 * page is mapped into multiple secure guests.
586	 *
587	 * This exception is only triggered when a guest 2 is running
588	 * and can therefore never occur in kernel context.
589	 */
590	pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n",
591			    current->comm, current->pid);
592	send_sig(SIGSEGV, current, 0);
593}
 
594
595#endif /* CONFIG_PGSTE */