Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Ulrich Weigand (uweigand@de.ibm.com)
  7 *
  8 *  Derived from "arch/i386/mm/fault.c"
  9 *    Copyright (C) 1995  Linus Torvalds
 10 */
 11
 12#include <linux/kernel_stat.h>
 13#include <linux/perf_event.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/sched/debug.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/ptrace.h>
 22#include <linux/mman.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/smp.h>
 26#include <linux/kdebug.h>
 27#include <linux/init.h>
 28#include <linux/console.h>
 29#include <linux/extable.h>
 30#include <linux/hardirq.h>
 31#include <linux/kprobes.h>
 32#include <linux/uaccess.h>
 33#include <linux/hugetlb.h>
 34#include <asm/asm-offsets.h>
 35#include <asm/diag.h>
 
 36#include <asm/gmap.h>
 37#include <asm/irq.h>
 38#include <asm/mmu_context.h>
 39#include <asm/facility.h>
 40#include <asm/uv.h>
 41#include "../kernel/entry.h"
 42
 43#define __FAIL_ADDR_MASK -4096L
 44#define __SUBCODE_MASK 0x0600
 45#define __PF_RES_FIELD 0x8000000000000000ULL
 46
 47#define VM_FAULT_BADCONTEXT	((__force vm_fault_t) 0x010000)
 48#define VM_FAULT_BADMAP		((__force vm_fault_t) 0x020000)
 49#define VM_FAULT_BADACCESS	((__force vm_fault_t) 0x040000)
 50#define VM_FAULT_SIGNAL		((__force vm_fault_t) 0x080000)
 51#define VM_FAULT_PFAULT		((__force vm_fault_t) 0x100000)
 52
 53enum fault_type {
 54	KERNEL_FAULT,
 55	USER_FAULT,
 56	VDSO_FAULT,
 57	GMAP_FAULT,
 58};
 59
 60static unsigned long store_indication __read_mostly;
 61
 62static int __init fault_init(void)
 63{
 64	if (test_facility(75))
 65		store_indication = 0xc00;
 66	return 0;
 67}
 68early_initcall(fault_init);
 69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70/*
 71 * Find out which address space caused the exception.
 
 72 */
 73static enum fault_type get_fault_type(struct pt_regs *regs)
 74{
 75	unsigned long trans_exc_code;
 76
 77	trans_exc_code = regs->int_parm_long & 3;
 78	if (likely(trans_exc_code == 0)) {
 79		/* primary space exception */
 80		if (IS_ENABLED(CONFIG_PGSTE) &&
 81		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
 82			return GMAP_FAULT;
 83		if (current->thread.mm_segment == USER_DS)
 84			return USER_FAULT;
 85		return KERNEL_FAULT;
 86	}
 87	if (trans_exc_code == 2) {
 88		/* secondary space exception */
 89		if (current->thread.mm_segment & 1) {
 90			if (current->thread.mm_segment == USER_DS_SACF)
 91				return USER_FAULT;
 92			return KERNEL_FAULT;
 93		}
 94		return VDSO_FAULT;
 95	}
 96	if (trans_exc_code == 1) {
 97		/* access register mode, not used in the kernel */
 98		return USER_FAULT;
 99	}
100	/* home space exception -> access via kernel ASCE */
101	return KERNEL_FAULT;
102}
103
104static int bad_address(void *p)
105{
106	unsigned long dummy;
107
108	return get_kernel_nofault(dummy, (unsigned long *)p);
109}
110
111static void dump_pagetable(unsigned long asce, unsigned long address)
112{
113	unsigned long *table = __va(asce & _ASCE_ORIGIN);
114
115	pr_alert("AS:%016lx ", asce);
116	switch (asce & _ASCE_TYPE_MASK) {
117	case _ASCE_TYPE_REGION1:
118		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
119		if (bad_address(table))
120			goto bad;
121		pr_cont("R1:%016lx ", *table);
122		if (*table & _REGION_ENTRY_INVALID)
123			goto out;
124		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
125		fallthrough;
126	case _ASCE_TYPE_REGION2:
127		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
128		if (bad_address(table))
129			goto bad;
130		pr_cont("R2:%016lx ", *table);
131		if (*table & _REGION_ENTRY_INVALID)
132			goto out;
133		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
134		fallthrough;
135	case _ASCE_TYPE_REGION3:
136		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
137		if (bad_address(table))
138			goto bad;
139		pr_cont("R3:%016lx ", *table);
140		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
141			goto out;
142		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
143		fallthrough;
144	case _ASCE_TYPE_SEGMENT:
145		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
146		if (bad_address(table))
147			goto bad;
148		pr_cont("S:%016lx ", *table);
149		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
150			goto out;
151		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
152	}
153	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
154	if (bad_address(table))
155		goto bad;
156	pr_cont("P:%016lx ", *table);
157out:
158	pr_cont("\n");
159	return;
160bad:
161	pr_cont("BAD\n");
162}
163
164static void dump_fault_info(struct pt_regs *regs)
165{
166	unsigned long asce;
167
168	pr_alert("Failing address: %016lx TEID: %016lx\n",
169		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
170	pr_alert("Fault in ");
171	switch (regs->int_parm_long & 3) {
172	case 3:
173		pr_cont("home space ");
174		break;
175	case 2:
176		pr_cont("secondary space ");
177		break;
178	case 1:
179		pr_cont("access register ");
180		break;
181	case 0:
182		pr_cont("primary space ");
183		break;
184	}
185	pr_cont("mode while using ");
186	switch (get_fault_type(regs)) {
187	case USER_FAULT:
188		asce = S390_lowcore.user_asce;
189		pr_cont("user ");
190		break;
191	case VDSO_FAULT:
192		asce = S390_lowcore.vdso_asce;
193		pr_cont("vdso ");
194		break;
195	case GMAP_FAULT:
196		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
197		pr_cont("gmap ");
198		break;
199	case KERNEL_FAULT:
200		asce = S390_lowcore.kernel_asce;
201		pr_cont("kernel ");
202		break;
203	default:
204		unreachable();
205	}
206	pr_cont("ASCE.\n");
207	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
208}
209
210int show_unhandled_signals = 1;
211
212void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
213{
214	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
215		return;
216	if (!unhandled_signal(current, signr))
217		return;
218	if (!printk_ratelimit())
219		return;
220	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
221	       regs->int_code & 0xffff, regs->int_code >> 17);
222	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
223	printk(KERN_CONT "\n");
224	if (is_mm_fault)
225		dump_fault_info(regs);
226	show_regs(regs);
227}
228
229/*
230 * Send SIGSEGV to task.  This is an external routine
231 * to keep the stack usage of do_page_fault small.
232 */
233static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
234{
235	report_user_fault(regs, SIGSEGV, 1);
236	force_sig_fault(SIGSEGV, si_code,
237			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
238}
239
240const struct exception_table_entry *s390_search_extables(unsigned long addr)
241{
242	const struct exception_table_entry *fixup;
243
244	fixup = search_extable(__start_dma_ex_table,
245			       __stop_dma_ex_table - __start_dma_ex_table,
246			       addr);
247	if (!fixup)
248		fixup = search_exception_tables(addr);
249	return fixup;
250}
251
252static noinline void do_no_context(struct pt_regs *regs)
253{
254	const struct exception_table_entry *fixup;
255
256	/* Are we prepared to handle this kernel fault?  */
257	fixup = s390_search_extables(regs->psw.addr);
258	if (fixup && ex_handle(fixup, regs))
 
259		return;
 
260
261	/*
262	 * Oops. The kernel tried to access some bad page. We'll have to
263	 * terminate things with extreme prejudice.
264	 */
265	if (get_fault_type(regs) == KERNEL_FAULT)
266		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
267		       " in virtual kernel address space\n");
268	else
269		printk(KERN_ALERT "Unable to handle kernel paging request"
270		       " in virtual user address space\n");
271	dump_fault_info(regs);
272	die(regs, "Oops");
273	do_exit(SIGKILL);
274}
275
276static noinline void do_low_address(struct pt_regs *regs)
277{
278	/* Low-address protection hit in kernel mode means
279	   NULL pointer write access in kernel mode.  */
280	if (regs->psw.mask & PSW_MASK_PSTATE) {
281		/* Low-address protection hit in user mode 'cannot happen'. */
282		die (regs, "Low-address protection");
283		do_exit(SIGKILL);
284	}
285
286	do_no_context(regs);
287}
288
289static noinline void do_sigbus(struct pt_regs *regs)
290{
 
 
 
291	/*
292	 * Send a sigbus, regardless of whether we were in kernel
293	 * or user mode.
294	 */
295	force_sig_fault(SIGBUS, BUS_ADRERR,
296			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
 
 
 
297}
298
299static noinline int signal_return(struct pt_regs *regs)
300{
301	u16 instruction;
302	int rc;
303
304	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
305	if (rc)
306		return rc;
307	if (instruction == 0x0a77) {
308		set_pt_regs_flag(regs, PIF_SYSCALL);
309		regs->int_code = 0x00040077;
310		return 0;
311	} else if (instruction == 0x0aad) {
312		set_pt_regs_flag(regs, PIF_SYSCALL);
313		regs->int_code = 0x000400ad;
314		return 0;
315	}
316	return -EACCES;
317}
318
319static noinline void do_fault_error(struct pt_regs *regs, int access,
320					vm_fault_t fault)
321{
322	int si_code;
323
324	switch (fault) {
325	case VM_FAULT_BADACCESS:
326		if (access == VM_EXEC && signal_return(regs) == 0)
327			break;
328		fallthrough;
329	case VM_FAULT_BADMAP:
330		/* Bad memory access. Check if it is kernel or user space. */
331		if (user_mode(regs)) {
332			/* User mode accesses just cause a SIGSEGV */
333			si_code = (fault == VM_FAULT_BADMAP) ?
334				SEGV_MAPERR : SEGV_ACCERR;
335			do_sigsegv(regs, si_code);
336			break;
337		}
338		fallthrough;
339	case VM_FAULT_BADCONTEXT:
340	case VM_FAULT_PFAULT:
341		do_no_context(regs);
342		break;
343	case VM_FAULT_SIGNAL:
344		if (!user_mode(regs))
345			do_no_context(regs);
346		break;
347	default: /* fault & VM_FAULT_ERROR */
348		if (fault & VM_FAULT_OOM) {
349			if (!user_mode(regs))
350				do_no_context(regs);
351			else
352				pagefault_out_of_memory();
353		} else if (fault & VM_FAULT_SIGSEGV) {
354			/* Kernel mode? Handle exceptions or die */
355			if (!user_mode(regs))
356				do_no_context(regs);
357			else
358				do_sigsegv(regs, SEGV_MAPERR);
359		} else if (fault & VM_FAULT_SIGBUS) {
360			/* Kernel mode? Handle exceptions or die */
361			if (!user_mode(regs))
362				do_no_context(regs);
363			else
364				do_sigbus(regs);
365		} else
366			BUG();
367		break;
368	}
369}
370
371/*
372 * This routine handles page faults.  It determines the address,
373 * and the problem, and then passes it off to one of the appropriate
374 * routines.
375 *
376 * interruption code (int_code):
377 *   04       Protection           ->  Write-Protection  (suppression)
378 *   10       Segment translation  ->  Not present       (nullification)
379 *   11       Page translation     ->  Not present       (nullification)
380 *   3b       Region third trans.  ->  Not present       (nullification)
381 */
382static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
383{
384	struct gmap *gmap;
385	struct task_struct *tsk;
386	struct mm_struct *mm;
387	struct vm_area_struct *vma;
388	enum fault_type type;
389	unsigned long trans_exc_code;
390	unsigned long address;
391	unsigned int flags;
392	vm_fault_t fault;
393
394	tsk = current;
395	/*
396	 * The instruction that caused the program check has
397	 * been nullified. Don't signal single step via SIGTRAP.
398	 */
399	clear_pt_regs_flag(regs, PIF_PER_TRAP);
400
401	if (kprobe_page_fault(regs, 14))
402		return 0;
403
404	mm = tsk->mm;
405	trans_exc_code = regs->int_parm_long;
406
407	/*
408	 * Verify that the fault happened in user space, that
409	 * we are not in an interrupt and that there is a 
410	 * user context.
411	 */
412	fault = VM_FAULT_BADCONTEXT;
413	type = get_fault_type(regs);
414	switch (type) {
415	case KERNEL_FAULT:
416		goto out;
417	case VDSO_FAULT:
418		fault = VM_FAULT_BADMAP;
419		goto out;
420	case USER_FAULT:
421	case GMAP_FAULT:
422		if (faulthandler_disabled() || !mm)
423			goto out;
424		break;
425	}
426
427	address = trans_exc_code & __FAIL_ADDR_MASK;
428	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
429	flags = FAULT_FLAG_DEFAULT;
430	if (user_mode(regs))
431		flags |= FAULT_FLAG_USER;
432	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
433		flags |= FAULT_FLAG_WRITE;
434	mmap_read_lock(mm);
435
436	gmap = NULL;
437	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
438		gmap = (struct gmap *) S390_lowcore.gmap;
439		current->thread.gmap_addr = address;
440		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
441		current->thread.gmap_int_code = regs->int_code & 0xffff;
442		address = __gmap_translate(gmap, address);
443		if (address == -EFAULT) {
444			fault = VM_FAULT_BADMAP;
445			goto out_up;
446		}
447		if (gmap->pfault_enabled)
448			flags |= FAULT_FLAG_RETRY_NOWAIT;
449	}
450
451retry:
452	fault = VM_FAULT_BADMAP;
453	vma = find_vma(mm, address);
454	if (!vma)
455		goto out_up;
456
457	if (unlikely(vma->vm_start > address)) {
458		if (!(vma->vm_flags & VM_GROWSDOWN))
459			goto out_up;
460		if (expand_stack(vma, address))
461			goto out_up;
462	}
463
464	/*
465	 * Ok, we have a good vm_area for this memory access, so
466	 * we can handle it..
467	 */
468	fault = VM_FAULT_BADACCESS;
469	if (unlikely(!(vma->vm_flags & access)))
470		goto out_up;
471
472	if (is_vm_hugetlb_page(vma))
473		address &= HPAGE_MASK;
474	/*
475	 * If for any reason at all we couldn't handle the fault,
476	 * make sure we exit gracefully rather than endlessly redo
477	 * the fault.
478	 */
479	fault = handle_mm_fault(vma, address, flags, regs);
480	if (fault_signal_pending(fault, regs)) {
 
481		fault = VM_FAULT_SIGNAL;
482		if (flags & FAULT_FLAG_RETRY_NOWAIT)
483			goto out_up;
484		goto out;
485	}
486	if (unlikely(fault & VM_FAULT_ERROR))
487		goto out_up;
488
 
 
 
 
 
489	if (flags & FAULT_FLAG_ALLOW_RETRY) {
 
 
 
 
 
 
 
 
 
490		if (fault & VM_FAULT_RETRY) {
491			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
492			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
493				/* FAULT_FLAG_RETRY_NOWAIT has been set,
494				 * mmap_lock has not been released */
495				current->thread.gmap_pfault = 1;
496				fault = VM_FAULT_PFAULT;
497				goto out_up;
498			}
499			flags &= ~FAULT_FLAG_RETRY_NOWAIT;
 
 
 
500			flags |= FAULT_FLAG_TRIED;
501			mmap_read_lock(mm);
502			goto retry;
503		}
504	}
505	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
506		address =  __gmap_link(gmap, current->thread.gmap_addr,
507				       address);
508		if (address == -EFAULT) {
509			fault = VM_FAULT_BADMAP;
510			goto out_up;
511		}
512		if (address == -ENOMEM) {
513			fault = VM_FAULT_OOM;
514			goto out_up;
515		}
516	}
517	fault = 0;
518out_up:
519	mmap_read_unlock(mm);
520out:
521	return fault;
522}
523
524void do_protection_exception(struct pt_regs *regs)
525{
526	unsigned long trans_exc_code;
527	int access;
528	vm_fault_t fault;
529
530	trans_exc_code = regs->int_parm_long;
531	/*
532	 * Protection exceptions are suppressing, decrement psw address.
533	 * The exception to this rule are aborted transactions, for these
534	 * the PSW already points to the correct location.
535	 */
536	if (!(regs->int_code & 0x200))
537		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
538	/*
539	 * Check for low-address protection.  This needs to be treated
540	 * as a special case because the translation exception code
541	 * field is not guaranteed to contain valid data in this case.
542	 */
543	if (unlikely(!(trans_exc_code & 4))) {
544		do_low_address(regs);
545		return;
546	}
547	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
548		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
549					(regs->psw.addr & PAGE_MASK);
550		access = VM_EXEC;
551		fault = VM_FAULT_BADACCESS;
552	} else {
553		access = VM_WRITE;
554		fault = do_exception(regs, access);
555	}
556	if (unlikely(fault))
557		do_fault_error(regs, access, fault);
558}
559NOKPROBE_SYMBOL(do_protection_exception);
560
561void do_dat_exception(struct pt_regs *regs)
562{
563	int access;
564	vm_fault_t fault;
565
566	access = VM_ACCESS_FLAGS;
567	fault = do_exception(regs, access);
568	if (unlikely(fault))
569		do_fault_error(regs, access, fault);
570}
571NOKPROBE_SYMBOL(do_dat_exception);
572
573#ifdef CONFIG_PFAULT 
574/*
575 * 'pfault' pseudo page faults routines.
576 */
577static int pfault_disable;
578
579static int __init nopfault(char *str)
580{
581	pfault_disable = 1;
582	return 1;
583}
584
585__setup("nopfault", nopfault);
586
587struct pfault_refbk {
588	u16 refdiagc;
589	u16 reffcode;
590	u16 refdwlen;
591	u16 refversn;
592	u64 refgaddr;
593	u64 refselmk;
594	u64 refcmpmk;
595	u64 reserved;
596} __attribute__ ((packed, aligned(8)));
597
598static struct pfault_refbk pfault_init_refbk = {
599	.refdiagc = 0x258,
600	.reffcode = 0,
601	.refdwlen = 5,
602	.refversn = 2,
603	.refgaddr = __LC_LPP,
604	.refselmk = 1ULL << 48,
605	.refcmpmk = 1ULL << 48,
606	.reserved = __PF_RES_FIELD
607};
608
609int pfault_init(void)
610{
 
 
 
 
 
 
 
 
 
611        int rc;
612
613	if (pfault_disable)
614		return -1;
615	diag_stat_inc(DIAG_STAT_X258);
616	asm volatile(
617		"	diag	%1,%0,0x258\n"
618		"0:	j	2f\n"
619		"1:	la	%0,8\n"
620		"2:\n"
621		EX_TABLE(0b,1b)
622		: "=d" (rc)
623		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
624        return rc;
625}
626
627static struct pfault_refbk pfault_fini_refbk = {
628	.refdiagc = 0x258,
629	.reffcode = 1,
630	.refdwlen = 5,
631	.refversn = 2,
632};
633
634void pfault_fini(void)
635{
 
 
 
 
 
 
636
637	if (pfault_disable)
638		return;
639	diag_stat_inc(DIAG_STAT_X258);
640	asm volatile(
641		"	diag	%0,0,0x258\n"
642		"0:	nopr	%%r7\n"
643		EX_TABLE(0b,0b)
644		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
645}
646
647static DEFINE_SPINLOCK(pfault_lock);
648static LIST_HEAD(pfault_list);
649
650#define PF_COMPLETE	0x0080
651
652/*
653 * The mechanism of our pfault code: if Linux is running as guest, runs a user
654 * space process and the user space process accesses a page that the host has
655 * paged out we get a pfault interrupt.
656 *
657 * This allows us, within the guest, to schedule a different process. Without
658 * this mechanism the host would have to suspend the whole virtual cpu until
659 * the page has been paged in.
660 *
661 * So when we get such an interrupt then we set the state of the current task
662 * to uninterruptible and also set the need_resched flag. Both happens within
663 * interrupt context(!). If we later on want to return to user space we
664 * recognize the need_resched flag and then call schedule().  It's not very
665 * obvious how this works...
666 *
667 * Of course we have a lot of additional fun with the completion interrupt (->
668 * host signals that a page of a process has been paged in and the process can
669 * continue to run). This interrupt can arrive on any cpu and, since we have
670 * virtual cpus, actually appear before the interrupt that signals that a page
671 * is missing.
672 */
673static void pfault_interrupt(struct ext_code ext_code,
674			     unsigned int param32, unsigned long param64)
675{
676	struct task_struct *tsk;
677	__u16 subcode;
678	pid_t pid;
679
680	/*
681	 * Get the external interruption subcode & pfault initial/completion
682	 * signal bit. VM stores this in the 'cpu address' field associated
683	 * with the external interrupt.
684	 */
685	subcode = ext_code.subcode;
686	if ((subcode & 0xff00) != __SUBCODE_MASK)
687		return;
688	inc_irq_stat(IRQEXT_PFL);
689	/* Get the token (= pid of the affected task). */
690	pid = param64 & LPP_PID_MASK;
691	rcu_read_lock();
692	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
693	if (tsk)
694		get_task_struct(tsk);
695	rcu_read_unlock();
696	if (!tsk)
697		return;
698	spin_lock(&pfault_lock);
699	if (subcode & PF_COMPLETE) {
700		/* signal bit is set -> a page has been swapped in by VM */
701		if (tsk->thread.pfault_wait == 1) {
702			/* Initial interrupt was faster than the completion
703			 * interrupt. pfault_wait is valid. Set pfault_wait
704			 * back to zero and wake up the process. This can
705			 * safely be done because the task is still sleeping
706			 * and can't produce new pfaults. */
707			tsk->thread.pfault_wait = 0;
708			list_del(&tsk->thread.list);
709			wake_up_process(tsk);
710			put_task_struct(tsk);
711		} else {
712			/* Completion interrupt was faster than initial
713			 * interrupt. Set pfault_wait to -1 so the initial
714			 * interrupt doesn't put the task to sleep.
715			 * If the task is not running, ignore the completion
716			 * interrupt since it must be a leftover of a PFAULT
717			 * CANCEL operation which didn't remove all pending
718			 * completion interrupts. */
719			if (tsk->state == TASK_RUNNING)
720				tsk->thread.pfault_wait = -1;
721		}
722	} else {
723		/* signal bit not set -> a real page is missing. */
724		if (WARN_ON_ONCE(tsk != current))
725			goto out;
726		if (tsk->thread.pfault_wait == 1) {
727			/* Already on the list with a reference: put to sleep */
728			goto block;
729		} else if (tsk->thread.pfault_wait == -1) {
730			/* Completion interrupt was faster than the initial
731			 * interrupt (pfault_wait == -1). Set pfault_wait
732			 * back to zero and exit. */
733			tsk->thread.pfault_wait = 0;
734		} else {
735			/* Initial interrupt arrived before completion
736			 * interrupt. Let the task sleep.
737			 * An extra task reference is needed since a different
738			 * cpu may set the task state to TASK_RUNNING again
739			 * before the scheduler is reached. */
740			get_task_struct(tsk);
741			tsk->thread.pfault_wait = 1;
742			list_add(&tsk->thread.list, &pfault_list);
743block:
744			/* Since this must be a userspace fault, there
745			 * is no kernel task state to trample. Rely on the
746			 * return to userspace schedule() to block. */
747			__set_current_state(TASK_UNINTERRUPTIBLE);
748			set_tsk_need_resched(tsk);
749			set_preempt_need_resched();
750		}
751	}
752out:
753	spin_unlock(&pfault_lock);
754	put_task_struct(tsk);
755}
756
757static int pfault_cpu_dead(unsigned int cpu)
758{
759	struct thread_struct *thread, *next;
760	struct task_struct *tsk;
761
762	spin_lock_irq(&pfault_lock);
763	list_for_each_entry_safe(thread, next, &pfault_list, list) {
764		thread->pfault_wait = 0;
765		list_del(&thread->list);
766		tsk = container_of(thread, struct task_struct, thread);
767		wake_up_process(tsk);
768		put_task_struct(tsk);
769	}
770	spin_unlock_irq(&pfault_lock);
771	return 0;
772}
773
774static int __init pfault_irq_init(void)
775{
776	int rc;
777
778	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
779	if (rc)
780		goto out_extint;
781	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
782	if (rc)
783		goto out_pfault;
784	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
785	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
786				  NULL, pfault_cpu_dead);
787	return 0;
788
789out_pfault:
790	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
791out_extint:
792	pfault_disable = 1;
793	return rc;
794}
795early_initcall(pfault_irq_init);
796
797#endif /* CONFIG_PFAULT */
798
799#if IS_ENABLED(CONFIG_PGSTE)
800void do_secure_storage_access(struct pt_regs *regs)
801{
802	unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
803	struct vm_area_struct *vma;
804	struct mm_struct *mm;
805	struct page *page;
806	int rc;
807
808	switch (get_fault_type(regs)) {
809	case USER_FAULT:
810		mm = current->mm;
811		mmap_read_lock(mm);
812		vma = find_vma(mm, addr);
813		if (!vma) {
814			mmap_read_unlock(mm);
815			do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
816			break;
817		}
818		page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
819		if (IS_ERR_OR_NULL(page)) {
820			mmap_read_unlock(mm);
821			break;
822		}
823		if (arch_make_page_accessible(page))
824			send_sig(SIGSEGV, current, 0);
825		put_page(page);
826		mmap_read_unlock(mm);
827		break;
828	case KERNEL_FAULT:
829		page = phys_to_page(addr);
830		if (unlikely(!try_get_page(page)))
831			break;
832		rc = arch_make_page_accessible(page);
833		put_page(page);
834		if (rc)
835			BUG();
836		break;
837	case VDSO_FAULT:
838	case GMAP_FAULT:
839	default:
840		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
841		WARN_ON_ONCE(1);
842	}
843}
844NOKPROBE_SYMBOL(do_secure_storage_access);
845
846void do_non_secure_storage_access(struct pt_regs *regs)
847{
848	unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
849	struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
850
851	if (get_fault_type(regs) != GMAP_FAULT) {
852		do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
853		WARN_ON_ONCE(1);
854		return;
855	}
856
857	if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
858		send_sig(SIGSEGV, current, 0);
859}
860NOKPROBE_SYMBOL(do_non_secure_storage_access);
861
862void do_secure_storage_violation(struct pt_regs *regs)
863{
864	/*
865	 * Either KVM messed up the secure guest mapping or the same
866	 * page is mapped into multiple secure guests.
867	 *
868	 * This exception is only triggered when a guest 2 is running
869	 * and can therefore never occur in kernel context.
870	 */
871	printk_ratelimited(KERN_WARNING
872			   "Secure storage violation in task: %s, pid %d\n",
873			   current->comm, current->pid);
874	send_sig(SIGSEGV, current, 0);
875}
876
877#else
878void do_secure_storage_access(struct pt_regs *regs)
879{
880	default_trap_handler(regs);
881}
882
883void do_non_secure_storage_access(struct pt_regs *regs)
884{
885	default_trap_handler(regs);
886}
887
888void do_secure_storage_violation(struct pt_regs *regs)
889{
890	default_trap_handler(regs);
891}
892#endif
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *               Ulrich Weigand (uweigand@de.ibm.com)
  7 *
  8 *  Derived from "arch/i386/mm/fault.c"
  9 *    Copyright (C) 1995  Linus Torvalds
 10 */
 11
 12#include <linux/kernel_stat.h>
 13#include <linux/perf_event.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/sched/debug.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/ptrace.h>
 22#include <linux/mman.h>
 23#include <linux/mm.h>
 24#include <linux/compat.h>
 25#include <linux/smp.h>
 26#include <linux/kdebug.h>
 27#include <linux/init.h>
 28#include <linux/console.h>
 29#include <linux/extable.h>
 30#include <linux/hardirq.h>
 31#include <linux/kprobes.h>
 32#include <linux/uaccess.h>
 33#include <linux/hugetlb.h>
 34#include <asm/asm-offsets.h>
 35#include <asm/diag.h>
 36#include <asm/pgtable.h>
 37#include <asm/gmap.h>
 38#include <asm/irq.h>
 39#include <asm/mmu_context.h>
 40#include <asm/facility.h>
 
 41#include "../kernel/entry.h"
 42
 43#define __FAIL_ADDR_MASK -4096L
 44#define __SUBCODE_MASK 0x0600
 45#define __PF_RES_FIELD 0x8000000000000000ULL
 46
 47#define VM_FAULT_BADCONTEXT	0x010000
 48#define VM_FAULT_BADMAP		0x020000
 49#define VM_FAULT_BADACCESS	0x040000
 50#define VM_FAULT_SIGNAL		0x080000
 51#define VM_FAULT_PFAULT		0x100000
 52
 53enum fault_type {
 54	KERNEL_FAULT,
 55	USER_FAULT,
 56	VDSO_FAULT,
 57	GMAP_FAULT,
 58};
 59
 60static unsigned long store_indication __read_mostly;
 61
 62static int __init fault_init(void)
 63{
 64	if (test_facility(75))
 65		store_indication = 0xc00;
 66	return 0;
 67}
 68early_initcall(fault_init);
 69
 70static inline int notify_page_fault(struct pt_regs *regs)
 71{
 72	int ret = 0;
 73
 74	/* kprobe_running() needs smp_processor_id() */
 75	if (kprobes_built_in() && !user_mode(regs)) {
 76		preempt_disable();
 77		if (kprobe_running() && kprobe_fault_handler(regs, 14))
 78			ret = 1;
 79		preempt_enable();
 80	}
 81	return ret;
 82}
 83
 84
 85/*
 86 * Unlock any spinlocks which will prevent us from getting the
 87 * message out.
 88 */
 89void bust_spinlocks(int yes)
 90{
 91	if (yes) {
 92		oops_in_progress = 1;
 93	} else {
 94		int loglevel_save = console_loglevel;
 95		console_unblank();
 96		oops_in_progress = 0;
 97		/*
 98		 * OK, the message is on the console.  Now we call printk()
 99		 * without oops_in_progress set so that printk will give klogd
100		 * a poke.  Hold onto your hats...
101		 */
102		console_loglevel = 15;
103		printk(" ");
104		console_loglevel = loglevel_save;
105	}
106}
107
108/*
109 * Find out which address space caused the exception.
110 * Access register mode is impossible, ignore space == 3.
111 */
112static inline enum fault_type get_fault_type(struct pt_regs *regs)
113{
114	unsigned long trans_exc_code;
115
116	trans_exc_code = regs->int_parm_long & 3;
117	if (likely(trans_exc_code == 0)) {
118		/* primary space exception */
119		if (IS_ENABLED(CONFIG_PGSTE) &&
120		    test_pt_regs_flag(regs, PIF_GUEST_FAULT))
121			return GMAP_FAULT;
122		if (current->thread.mm_segment == USER_DS)
123			return USER_FAULT;
124		return KERNEL_FAULT;
125	}
126	if (trans_exc_code == 2) {
127		/* secondary space exception */
128		if (current->thread.mm_segment & 1) {
129			if (current->thread.mm_segment == USER_DS_SACF)
130				return USER_FAULT;
131			return KERNEL_FAULT;
132		}
133		return VDSO_FAULT;
134	}
 
 
 
 
135	/* home space exception -> access via kernel ASCE */
136	return KERNEL_FAULT;
137}
138
139static int bad_address(void *p)
140{
141	unsigned long dummy;
142
143	return probe_kernel_address((unsigned long *)p, dummy);
144}
145
146static void dump_pagetable(unsigned long asce, unsigned long address)
147{
148	unsigned long *table = __va(asce & _ASCE_ORIGIN);
149
150	pr_alert("AS:%016lx ", asce);
151	switch (asce & _ASCE_TYPE_MASK) {
152	case _ASCE_TYPE_REGION1:
153		table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
154		if (bad_address(table))
155			goto bad;
156		pr_cont("R1:%016lx ", *table);
157		if (*table & _REGION_ENTRY_INVALID)
158			goto out;
159		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
160		/* fallthrough */
161	case _ASCE_TYPE_REGION2:
162		table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
163		if (bad_address(table))
164			goto bad;
165		pr_cont("R2:%016lx ", *table);
166		if (*table & _REGION_ENTRY_INVALID)
167			goto out;
168		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
169		/* fallthrough */
170	case _ASCE_TYPE_REGION3:
171		table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
172		if (bad_address(table))
173			goto bad;
174		pr_cont("R3:%016lx ", *table);
175		if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
176			goto out;
177		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
178		/* fallthrough */
179	case _ASCE_TYPE_SEGMENT:
180		table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
181		if (bad_address(table))
182			goto bad;
183		pr_cont("S:%016lx ", *table);
184		if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
185			goto out;
186		table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
187	}
188	table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
189	if (bad_address(table))
190		goto bad;
191	pr_cont("P:%016lx ", *table);
192out:
193	pr_cont("\n");
194	return;
195bad:
196	pr_cont("BAD\n");
197}
198
199static void dump_fault_info(struct pt_regs *regs)
200{
201	unsigned long asce;
202
203	pr_alert("Failing address: %016lx TEID: %016lx\n",
204		 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
205	pr_alert("Fault in ");
206	switch (regs->int_parm_long & 3) {
207	case 3:
208		pr_cont("home space ");
209		break;
210	case 2:
211		pr_cont("secondary space ");
212		break;
213	case 1:
214		pr_cont("access register ");
215		break;
216	case 0:
217		pr_cont("primary space ");
218		break;
219	}
220	pr_cont("mode while using ");
221	switch (get_fault_type(regs)) {
222	case USER_FAULT:
223		asce = S390_lowcore.user_asce;
224		pr_cont("user ");
225		break;
226	case VDSO_FAULT:
227		asce = S390_lowcore.vdso_asce;
228		pr_cont("vdso ");
229		break;
230	case GMAP_FAULT:
231		asce = ((struct gmap *) S390_lowcore.gmap)->asce;
232		pr_cont("gmap ");
233		break;
234	case KERNEL_FAULT:
235		asce = S390_lowcore.kernel_asce;
236		pr_cont("kernel ");
237		break;
 
 
238	}
239	pr_cont("ASCE.\n");
240	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
241}
242
243int show_unhandled_signals = 1;
244
245void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
246{
247	if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
248		return;
249	if (!unhandled_signal(current, signr))
250		return;
251	if (!printk_ratelimit())
252		return;
253	printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
254	       regs->int_code & 0xffff, regs->int_code >> 17);
255	print_vma_addr(KERN_CONT "in ", regs->psw.addr);
256	printk(KERN_CONT "\n");
257	if (is_mm_fault)
258		dump_fault_info(regs);
259	show_regs(regs);
260}
261
262/*
263 * Send SIGSEGV to task.  This is an external routine
264 * to keep the stack usage of do_page_fault small.
265 */
266static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
267{
268	struct siginfo si;
 
 
 
 
 
 
 
269
270	report_user_fault(regs, SIGSEGV, 1);
271	si.si_signo = SIGSEGV;
272	si.si_errno = 0;
273	si.si_code = si_code;
274	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
275	force_sig_info(SIGSEGV, &si, current);
276}
277
278static noinline void do_no_context(struct pt_regs *regs)
279{
280	const struct exception_table_entry *fixup;
281
282	/* Are we prepared to handle this kernel fault?  */
283	fixup = search_exception_tables(regs->psw.addr);
284	if (fixup) {
285		regs->psw.addr = extable_fixup(fixup);
286		return;
287	}
288
289	/*
290	 * Oops. The kernel tried to access some bad page. We'll have to
291	 * terminate things with extreme prejudice.
292	 */
293	if (get_fault_type(regs) == KERNEL_FAULT)
294		printk(KERN_ALERT "Unable to handle kernel pointer dereference"
295		       " in virtual kernel address space\n");
296	else
297		printk(KERN_ALERT "Unable to handle kernel paging request"
298		       " in virtual user address space\n");
299	dump_fault_info(regs);
300	die(regs, "Oops");
301	do_exit(SIGKILL);
302}
303
304static noinline void do_low_address(struct pt_regs *regs)
305{
306	/* Low-address protection hit in kernel mode means
307	   NULL pointer write access in kernel mode.  */
308	if (regs->psw.mask & PSW_MASK_PSTATE) {
309		/* Low-address protection hit in user mode 'cannot happen'. */
310		die (regs, "Low-address protection");
311		do_exit(SIGKILL);
312	}
313
314	do_no_context(regs);
315}
316
317static noinline void do_sigbus(struct pt_regs *regs)
318{
319	struct task_struct *tsk = current;
320	struct siginfo si;
321
322	/*
323	 * Send a sigbus, regardless of whether we were in kernel
324	 * or user mode.
325	 */
326	si.si_signo = SIGBUS;
327	si.si_errno = 0;
328	si.si_code = BUS_ADRERR;
329	si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
330	force_sig_info(SIGBUS, &si, tsk);
331}
332
333static noinline int signal_return(struct pt_regs *regs)
334{
335	u16 instruction;
336	int rc;
337
338	rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
339	if (rc)
340		return rc;
341	if (instruction == 0x0a77) {
342		set_pt_regs_flag(regs, PIF_SYSCALL);
343		regs->int_code = 0x00040077;
344		return 0;
345	} else if (instruction == 0x0aad) {
346		set_pt_regs_flag(regs, PIF_SYSCALL);
347		regs->int_code = 0x000400ad;
348		return 0;
349	}
350	return -EACCES;
351}
352
353static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
 
354{
355	int si_code;
356
357	switch (fault) {
358	case VM_FAULT_BADACCESS:
359		if (access == VM_EXEC && signal_return(regs) == 0)
360			break;
 
361	case VM_FAULT_BADMAP:
362		/* Bad memory access. Check if it is kernel or user space. */
363		if (user_mode(regs)) {
364			/* User mode accesses just cause a SIGSEGV */
365			si_code = (fault == VM_FAULT_BADMAP) ?
366				SEGV_MAPERR : SEGV_ACCERR;
367			do_sigsegv(regs, si_code);
368			break;
369		}
 
370	case VM_FAULT_BADCONTEXT:
371	case VM_FAULT_PFAULT:
372		do_no_context(regs);
373		break;
374	case VM_FAULT_SIGNAL:
375		if (!user_mode(regs))
376			do_no_context(regs);
377		break;
378	default: /* fault & VM_FAULT_ERROR */
379		if (fault & VM_FAULT_OOM) {
380			if (!user_mode(regs))
381				do_no_context(regs);
382			else
383				pagefault_out_of_memory();
384		} else if (fault & VM_FAULT_SIGSEGV) {
385			/* Kernel mode? Handle exceptions or die */
386			if (!user_mode(regs))
387				do_no_context(regs);
388			else
389				do_sigsegv(regs, SEGV_MAPERR);
390		} else if (fault & VM_FAULT_SIGBUS) {
391			/* Kernel mode? Handle exceptions or die */
392			if (!user_mode(regs))
393				do_no_context(regs);
394			else
395				do_sigbus(regs);
396		} else
397			BUG();
398		break;
399	}
400}
401
402/*
403 * This routine handles page faults.  It determines the address,
404 * and the problem, and then passes it off to one of the appropriate
405 * routines.
406 *
407 * interruption code (int_code):
408 *   04       Protection           ->  Write-Protection  (suprression)
409 *   10       Segment translation  ->  Not present       (nullification)
410 *   11       Page translation     ->  Not present       (nullification)
411 *   3b       Region third trans.  ->  Not present       (nullification)
412 */
413static inline int do_exception(struct pt_regs *regs, int access)
414{
415	struct gmap *gmap;
416	struct task_struct *tsk;
417	struct mm_struct *mm;
418	struct vm_area_struct *vma;
419	enum fault_type type;
420	unsigned long trans_exc_code;
421	unsigned long address;
422	unsigned int flags;
423	int fault;
424
425	tsk = current;
426	/*
427	 * The instruction that caused the program check has
428	 * been nullified. Don't signal single step via SIGTRAP.
429	 */
430	clear_pt_regs_flag(regs, PIF_PER_TRAP);
431
432	if (notify_page_fault(regs))
433		return 0;
434
435	mm = tsk->mm;
436	trans_exc_code = regs->int_parm_long;
437
438	/*
439	 * Verify that the fault happened in user space, that
440	 * we are not in an interrupt and that there is a 
441	 * user context.
442	 */
443	fault = VM_FAULT_BADCONTEXT;
444	type = get_fault_type(regs);
445	switch (type) {
446	case KERNEL_FAULT:
447		goto out;
448	case VDSO_FAULT:
449		fault = VM_FAULT_BADMAP;
450		goto out;
451	case USER_FAULT:
452	case GMAP_FAULT:
453		if (faulthandler_disabled() || !mm)
454			goto out;
455		break;
456	}
457
458	address = trans_exc_code & __FAIL_ADDR_MASK;
459	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
460	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
461	if (user_mode(regs))
462		flags |= FAULT_FLAG_USER;
463	if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
464		flags |= FAULT_FLAG_WRITE;
465	down_read(&mm->mmap_sem);
466
467	gmap = NULL;
468	if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
469		gmap = (struct gmap *) S390_lowcore.gmap;
470		current->thread.gmap_addr = address;
471		current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
472		current->thread.gmap_int_code = regs->int_code & 0xffff;
473		address = __gmap_translate(gmap, address);
474		if (address == -EFAULT) {
475			fault = VM_FAULT_BADMAP;
476			goto out_up;
477		}
478		if (gmap->pfault_enabled)
479			flags |= FAULT_FLAG_RETRY_NOWAIT;
480	}
481
482retry:
483	fault = VM_FAULT_BADMAP;
484	vma = find_vma(mm, address);
485	if (!vma)
486		goto out_up;
487
488	if (unlikely(vma->vm_start > address)) {
489		if (!(vma->vm_flags & VM_GROWSDOWN))
490			goto out_up;
491		if (expand_stack(vma, address))
492			goto out_up;
493	}
494
495	/*
496	 * Ok, we have a good vm_area for this memory access, so
497	 * we can handle it..
498	 */
499	fault = VM_FAULT_BADACCESS;
500	if (unlikely(!(vma->vm_flags & access)))
501		goto out_up;
502
503	if (is_vm_hugetlb_page(vma))
504		address &= HPAGE_MASK;
505	/*
506	 * If for any reason at all we couldn't handle the fault,
507	 * make sure we exit gracefully rather than endlessly redo
508	 * the fault.
509	 */
510	fault = handle_mm_fault(vma, address, flags);
511	/* No reason to continue if interrupted by SIGKILL. */
512	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
513		fault = VM_FAULT_SIGNAL;
 
 
514		goto out;
515	}
516	if (unlikely(fault & VM_FAULT_ERROR))
517		goto out_up;
518
519	/*
520	 * Major/minor page fault accounting is only done on the
521	 * initial attempt. If we go through a retry, it is extremely
522	 * likely that the page will be found in page cache at that point.
523	 */
524	if (flags & FAULT_FLAG_ALLOW_RETRY) {
525		if (fault & VM_FAULT_MAJOR) {
526			tsk->maj_flt++;
527			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
528				      regs, address);
529		} else {
530			tsk->min_flt++;
531			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
532				      regs, address);
533		}
534		if (fault & VM_FAULT_RETRY) {
535			if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
536			    (flags & FAULT_FLAG_RETRY_NOWAIT)) {
537				/* FAULT_FLAG_RETRY_NOWAIT has been set,
538				 * mmap_sem has not been released */
539				current->thread.gmap_pfault = 1;
540				fault = VM_FAULT_PFAULT;
541				goto out_up;
542			}
543			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
544			 * of starvation. */
545			flags &= ~(FAULT_FLAG_ALLOW_RETRY |
546				   FAULT_FLAG_RETRY_NOWAIT);
547			flags |= FAULT_FLAG_TRIED;
548			down_read(&mm->mmap_sem);
549			goto retry;
550		}
551	}
552	if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
553		address =  __gmap_link(gmap, current->thread.gmap_addr,
554				       address);
555		if (address == -EFAULT) {
556			fault = VM_FAULT_BADMAP;
557			goto out_up;
558		}
559		if (address == -ENOMEM) {
560			fault = VM_FAULT_OOM;
561			goto out_up;
562		}
563	}
564	fault = 0;
565out_up:
566	up_read(&mm->mmap_sem);
567out:
568	return fault;
569}
570
571void do_protection_exception(struct pt_regs *regs)
572{
573	unsigned long trans_exc_code;
574	int access, fault;
 
575
576	trans_exc_code = regs->int_parm_long;
577	/*
578	 * Protection exceptions are suppressing, decrement psw address.
579	 * The exception to this rule are aborted transactions, for these
580	 * the PSW already points to the correct location.
581	 */
582	if (!(regs->int_code & 0x200))
583		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
584	/*
585	 * Check for low-address protection.  This needs to be treated
586	 * as a special case because the translation exception code
587	 * field is not guaranteed to contain valid data in this case.
588	 */
589	if (unlikely(!(trans_exc_code & 4))) {
590		do_low_address(regs);
591		return;
592	}
593	if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
594		regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
595					(regs->psw.addr & PAGE_MASK);
596		access = VM_EXEC;
597		fault = VM_FAULT_BADACCESS;
598	} else {
599		access = VM_WRITE;
600		fault = do_exception(regs, access);
601	}
602	if (unlikely(fault))
603		do_fault_error(regs, access, fault);
604}
605NOKPROBE_SYMBOL(do_protection_exception);
606
607void do_dat_exception(struct pt_regs *regs)
608{
609	int access, fault;
 
610
611	access = VM_READ | VM_EXEC | VM_WRITE;
612	fault = do_exception(regs, access);
613	if (unlikely(fault))
614		do_fault_error(regs, access, fault);
615}
616NOKPROBE_SYMBOL(do_dat_exception);
617
618#ifdef CONFIG_PFAULT 
619/*
620 * 'pfault' pseudo page faults routines.
621 */
622static int pfault_disable;
623
624static int __init nopfault(char *str)
625{
626	pfault_disable = 1;
627	return 1;
628}
629
630__setup("nopfault", nopfault);
631
632struct pfault_refbk {
633	u16 refdiagc;
634	u16 reffcode;
635	u16 refdwlen;
636	u16 refversn;
637	u64 refgaddr;
638	u64 refselmk;
639	u64 refcmpmk;
640	u64 reserved;
641} __attribute__ ((packed, aligned(8)));
642
 
 
 
 
 
 
 
 
 
 
 
643int pfault_init(void)
644{
645	struct pfault_refbk refbk = {
646		.refdiagc = 0x258,
647		.reffcode = 0,
648		.refdwlen = 5,
649		.refversn = 2,
650		.refgaddr = __LC_LPP,
651		.refselmk = 1ULL << 48,
652		.refcmpmk = 1ULL << 48,
653		.reserved = __PF_RES_FIELD };
654        int rc;
655
656	if (pfault_disable)
657		return -1;
658	diag_stat_inc(DIAG_STAT_X258);
659	asm volatile(
660		"	diag	%1,%0,0x258\n"
661		"0:	j	2f\n"
662		"1:	la	%0,8\n"
663		"2:\n"
664		EX_TABLE(0b,1b)
665		: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
 
666        return rc;
667}
668
 
 
 
 
 
 
 
669void pfault_fini(void)
670{
671	struct pfault_refbk refbk = {
672		.refdiagc = 0x258,
673		.reffcode = 1,
674		.refdwlen = 5,
675		.refversn = 2,
676	};
677
678	if (pfault_disable)
679		return;
680	diag_stat_inc(DIAG_STAT_X258);
681	asm volatile(
682		"	diag	%0,0,0x258\n"
683		"0:	nopr	%%r7\n"
684		EX_TABLE(0b,0b)
685		: : "a" (&refbk), "m" (refbk) : "cc");
686}
687
688static DEFINE_SPINLOCK(pfault_lock);
689static LIST_HEAD(pfault_list);
690
691#define PF_COMPLETE	0x0080
692
693/*
694 * The mechanism of our pfault code: if Linux is running as guest, runs a user
695 * space process and the user space process accesses a page that the host has
696 * paged out we get a pfault interrupt.
697 *
698 * This allows us, within the guest, to schedule a different process. Without
699 * this mechanism the host would have to suspend the whole virtual cpu until
700 * the page has been paged in.
701 *
702 * So when we get such an interrupt then we set the state of the current task
703 * to uninterruptible and also set the need_resched flag. Both happens within
704 * interrupt context(!). If we later on want to return to user space we
705 * recognize the need_resched flag and then call schedule().  It's not very
706 * obvious how this works...
707 *
708 * Of course we have a lot of additional fun with the completion interrupt (->
709 * host signals that a page of a process has been paged in and the process can
710 * continue to run). This interrupt can arrive on any cpu and, since we have
711 * virtual cpus, actually appear before the interrupt that signals that a page
712 * is missing.
713 */
714static void pfault_interrupt(struct ext_code ext_code,
715			     unsigned int param32, unsigned long param64)
716{
717	struct task_struct *tsk;
718	__u16 subcode;
719	pid_t pid;
720
721	/*
722	 * Get the external interruption subcode & pfault initial/completion
723	 * signal bit. VM stores this in the 'cpu address' field associated
724	 * with the external interrupt.
725	 */
726	subcode = ext_code.subcode;
727	if ((subcode & 0xff00) != __SUBCODE_MASK)
728		return;
729	inc_irq_stat(IRQEXT_PFL);
730	/* Get the token (= pid of the affected task). */
731	pid = param64 & LPP_PID_MASK;
732	rcu_read_lock();
733	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
734	if (tsk)
735		get_task_struct(tsk);
736	rcu_read_unlock();
737	if (!tsk)
738		return;
739	spin_lock(&pfault_lock);
740	if (subcode & PF_COMPLETE) {
741		/* signal bit is set -> a page has been swapped in by VM */
742		if (tsk->thread.pfault_wait == 1) {
743			/* Initial interrupt was faster than the completion
744			 * interrupt. pfault_wait is valid. Set pfault_wait
745			 * back to zero and wake up the process. This can
746			 * safely be done because the task is still sleeping
747			 * and can't produce new pfaults. */
748			tsk->thread.pfault_wait = 0;
749			list_del(&tsk->thread.list);
750			wake_up_process(tsk);
751			put_task_struct(tsk);
752		} else {
753			/* Completion interrupt was faster than initial
754			 * interrupt. Set pfault_wait to -1 so the initial
755			 * interrupt doesn't put the task to sleep.
756			 * If the task is not running, ignore the completion
757			 * interrupt since it must be a leftover of a PFAULT
758			 * CANCEL operation which didn't remove all pending
759			 * completion interrupts. */
760			if (tsk->state == TASK_RUNNING)
761				tsk->thread.pfault_wait = -1;
762		}
763	} else {
764		/* signal bit not set -> a real page is missing. */
765		if (WARN_ON_ONCE(tsk != current))
766			goto out;
767		if (tsk->thread.pfault_wait == 1) {
768			/* Already on the list with a reference: put to sleep */
769			goto block;
770		} else if (tsk->thread.pfault_wait == -1) {
771			/* Completion interrupt was faster than the initial
772			 * interrupt (pfault_wait == -1). Set pfault_wait
773			 * back to zero and exit. */
774			tsk->thread.pfault_wait = 0;
775		} else {
776			/* Initial interrupt arrived before completion
777			 * interrupt. Let the task sleep.
778			 * An extra task reference is needed since a different
779			 * cpu may set the task state to TASK_RUNNING again
780			 * before the scheduler is reached. */
781			get_task_struct(tsk);
782			tsk->thread.pfault_wait = 1;
783			list_add(&tsk->thread.list, &pfault_list);
784block:
785			/* Since this must be a userspace fault, there
786			 * is no kernel task state to trample. Rely on the
787			 * return to userspace schedule() to block. */
788			__set_current_state(TASK_UNINTERRUPTIBLE);
789			set_tsk_need_resched(tsk);
790			set_preempt_need_resched();
791		}
792	}
793out:
794	spin_unlock(&pfault_lock);
795	put_task_struct(tsk);
796}
797
798static int pfault_cpu_dead(unsigned int cpu)
799{
800	struct thread_struct *thread, *next;
801	struct task_struct *tsk;
802
803	spin_lock_irq(&pfault_lock);
804	list_for_each_entry_safe(thread, next, &pfault_list, list) {
805		thread->pfault_wait = 0;
806		list_del(&thread->list);
807		tsk = container_of(thread, struct task_struct, thread);
808		wake_up_process(tsk);
809		put_task_struct(tsk);
810	}
811	spin_unlock_irq(&pfault_lock);
812	return 0;
813}
814
815static int __init pfault_irq_init(void)
816{
817	int rc;
818
819	rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
820	if (rc)
821		goto out_extint;
822	rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
823	if (rc)
824		goto out_pfault;
825	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
826	cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
827				  NULL, pfault_cpu_dead);
828	return 0;
829
830out_pfault:
831	unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
832out_extint:
833	pfault_disable = 1;
834	return rc;
835}
836early_initcall(pfault_irq_init);
837
838#endif /* CONFIG_PFAULT */