Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *
  6 *  Derived from "arch/i386/mm/fault.c"
  7 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  8 *
  9 *  Modified by Cort Dougan and Paul Mackerras.
 10 *
 11 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
 12 */
 13
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/sched/task_stack.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/interrupt.h>
 26#include <linux/highmem.h>
 27#include <linux/extable.h>
 28#include <linux/kprobes.h>
 29#include <linux/kdebug.h>
 30#include <linux/perf_event.h>
 31#include <linux/ratelimit.h>
 32#include <linux/context_tracking.h>
 33#include <linux/hugetlb.h>
 34#include <linux/uaccess.h>
 35#include <linux/kfence.h>
 36#include <linux/pkeys.h>
 37
 38#include <asm/firmware.h>
 39#include <asm/interrupt.h>
 40#include <asm/page.h>
 41#include <asm/mmu.h>
 42#include <asm/mmu_context.h>
 43#include <asm/siginfo.h>
 44#include <asm/debug.h>
 45#include <asm/kup.h>
 46#include <asm/inst.h>
 47
 48
 49/*
 50 * do_page_fault error handling helpers
 51 */
 52
 53static int
 54__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
 55{
 56	/*
 57	 * If we are in kernel mode, bail out with a SEGV, this will
 58	 * be caught by the assembly which will restore the non-volatile
 59	 * registers before calling bad_page_fault()
 60	 */
 61	if (!user_mode(regs))
 62		return SIGSEGV;
 63
 64	_exception(SIGSEGV, regs, si_code, address);
 65
 66	return 0;
 67}
 68
 69static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
 70{
 71	return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
 72}
 73
 74static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
 75{
 76	struct mm_struct *mm = current->mm;
 77
 78	/*
 79	 * Something tried to access memory that isn't in our memory map..
 80	 * Fix it, but check if it's kernel or user first..
 81	 */
 82	mmap_read_unlock(mm);
 83
 84	return __bad_area_nosemaphore(regs, address, si_code);
 85}
 86
 87static noinline int bad_area(struct pt_regs *regs, unsigned long address)
 88{
 89	return __bad_area(regs, address, SEGV_MAPERR);
 90}
 91
 
 92static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
 93				    struct vm_area_struct *vma)
 94{
 95	struct mm_struct *mm = current->mm;
 96	int pkey;
 97
 98	/*
 99	 * We don't try to fetch the pkey from page table because reading
100	 * page table without locking doesn't guarantee stable pte value.
101	 * Hence the pkey value that we return to userspace can be different
102	 * from the pkey that actually caused access error.
103	 *
104	 * It does *not* guarantee that the VMA we find here
105	 * was the one that we faulted on.
106	 *
107	 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
108	 * 2. T1   : set AMR to deny access to pkey=4, touches, page
109	 * 3. T1   : faults...
110	 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
111	 * 5. T1   : enters fault handler, takes mmap_lock, etc...
112	 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
113	 *	     faulted on a pte with its pkey=4.
114	 */
115	pkey = vma_pkey(vma);
116
117	mmap_read_unlock(mm);
118
119	/*
120	 * If we are in kernel mode, bail out with a SEGV, this will
121	 * be caught by the assembly which will restore the non-volatile
122	 * registers before calling bad_page_fault()
123	 */
124	if (!user_mode(regs))
125		return SIGSEGV;
126
127	_exception_pkey(regs, address, pkey);
128
129	return 0;
130}
 
131
132static noinline int bad_access(struct pt_regs *regs, unsigned long address)
133{
134	return __bad_area(regs, address, SEGV_ACCERR);
135}
136
137static int do_sigbus(struct pt_regs *regs, unsigned long address,
138		     vm_fault_t fault)
139{
140	if (!user_mode(regs))
141		return SIGBUS;
142
143	current->thread.trap_nr = BUS_ADRERR;
144#ifdef CONFIG_MEMORY_FAILURE
145	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
146		unsigned int lsb = 0; /* shutup gcc */
147
148		pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
149			current->comm, current->pid, address);
150
151		if (fault & VM_FAULT_HWPOISON_LARGE)
152			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
153		if (fault & VM_FAULT_HWPOISON)
154			lsb = PAGE_SHIFT;
155
156		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
157		return 0;
158	}
159
160#endif
161	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
162	return 0;
163}
164
165static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
166				vm_fault_t fault)
167{
168	/*
169	 * Kernel page fault interrupted by SIGKILL. We have no reason to
170	 * continue processing.
171	 */
172	if (fatal_signal_pending(current) && !user_mode(regs))
173		return SIGKILL;
174
175	/* Out of memory */
176	if (fault & VM_FAULT_OOM) {
177		/*
178		 * We ran out of memory, or some other thing happened to us that
179		 * made us unable to handle the page fault gracefully.
180		 */
181		if (!user_mode(regs))
182			return SIGSEGV;
183		pagefault_out_of_memory();
184	} else {
185		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
186			     VM_FAULT_HWPOISON_LARGE))
187			return do_sigbus(regs, addr, fault);
188		else if (fault & VM_FAULT_SIGSEGV)
189			return bad_area_nosemaphore(regs, addr);
190		else
191			BUG();
192	}
193	return 0;
194}
195
196/* Is this a bad kernel fault ? */
197static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
198			     unsigned long address, bool is_write)
199{
200	int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
201
202	if (is_exec) {
 
 
203		pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
204				    address >= TASK_SIZE ? "exec-protected" : "user",
205				    address,
206				    from_kuid(&init_user_ns, current_uid()));
207
208		// Kernel exec fault is always bad
209		return true;
210	}
211
 
 
 
 
 
 
 
212	// Kernel fault on kernel address is bad
213	if (address >= TASK_SIZE)
214		return true;
215
216	// Read/write fault blocked by KUAP is bad, it can never succeed.
217	if (bad_kuap_fault(regs, address, is_write)) {
218		pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
219				    is_write ? "write" : "read", address,
220				    from_kuid(&init_user_ns, current_uid()));
221
222		// Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
223		if (!search_exception_tables(regs->nip))
224			return true;
225
226		// Read/write fault in a valid region (the exception table search passed
227		// above), but blocked by KUAP is bad, it can never succeed.
228		return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
229	}
230
231	// What's left? Kernel fault on user and allowed by KUAP in the faulting context.
 
232	return false;
233}
234
 
235static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
236			      struct vm_area_struct *vma)
237{
238	/*
239	 * Make sure to check the VMA so that we do not perform
240	 * faults just to hit a pkey fault as soon as we fill in a
241	 * page. Only called for current mm, hence foreign == 0
242	 */
243	if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
244		return true;
245
246	return false;
247}
 
248
249static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
250{
251	/*
252	 * Allow execution from readable areas if the MMU does not
253	 * provide separate controls over reading and executing.
254	 *
255	 * Note: That code used to not be enabled for 4xx/BookE.
256	 * It is now as I/D cache coherency for these is done at
257	 * set_pte_at() time and I see no reason why the test
258	 * below wouldn't be valid on those processors. This -may-
259	 * break programs compiled with a really old ABI though.
260	 */
261	if (is_exec) {
262		return !(vma->vm_flags & VM_EXEC) &&
263			(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
264			 !(vma->vm_flags & (VM_READ | VM_WRITE)));
265	}
266
267	if (is_write) {
268		if (unlikely(!(vma->vm_flags & VM_WRITE)))
269			return true;
270		return false;
271	}
272
273	/*
274	 * Check for a read fault.  This could be caused by a read on an
275	 * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
276	 */
277	if (unlikely(!(vma->vm_flags & VM_READ)))
278		return true;
279	/*
280	 * We should ideally do the vma pkey access check here. But in the
281	 * fault path, handle_mm_fault() also does the same check. To avoid
282	 * these multiple checks, we skip it here and handle access error due
283	 * to pkeys later.
284	 */
285	return false;
286}
287
288#ifdef CONFIG_PPC_SMLPAR
289static inline void cmo_account_page_fault(void)
290{
291	if (firmware_has_feature(FW_FEATURE_CMO)) {
292		u32 page_ins;
293
294		preempt_disable();
295		page_ins = be32_to_cpu(get_lppaca()->page_ins);
296		page_ins += 1 << PAGE_FACTOR;
297		get_lppaca()->page_ins = cpu_to_be32(page_ins);
298		preempt_enable();
299	}
300}
301#else
302static inline void cmo_account_page_fault(void) { }
303#endif /* CONFIG_PPC_SMLPAR */
304
 
305static void sanity_check_fault(bool is_write, bool is_user,
306			       unsigned long error_code, unsigned long address)
307{
308	/*
309	 * Userspace trying to access kernel address, we get PROTFAULT for that.
310	 */
311	if (is_user && address >= TASK_SIZE) {
312		if ((long)address == -1)
313			return;
314
315		pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
316				   current->comm, current->pid, address,
317				   from_kuid(&init_user_ns, current_uid()));
318		return;
319	}
320
321	if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
322		return;
323
324	/*
325	 * For hash translation mode, we should never get a
326	 * PROTFAULT. Any update to pte to reduce access will result in us
327	 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
328	 * fault instead of DSISR_PROTFAULT.
329	 *
330	 * A pte update to relax the access will not result in a hash page table
331	 * entry invalidate and hence can result in DSISR_PROTFAULT.
332	 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
333	 * the special !is_write in the below conditional.
334	 *
335	 * For platforms that doesn't supports coherent icache and do support
336	 * per page noexec bit, we do setup things such that we do the
337	 * sync between D/I cache via fault. But that is handled via low level
338	 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
339	 * here in such case.
340	 *
341	 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
342	 * check should handle those and hence we should fall to the bad_area
343	 * handling correctly.
344	 *
345	 * For embedded with per page exec support that doesn't support coherent
346	 * icache we do get PROTFAULT and we handle that D/I cache sync in
347	 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
348	 * is conditional for server MMU.
349	 *
350	 * For radix, we can get prot fault for autonuma case, because radix
351	 * page table will have them marked noaccess for user.
352	 */
353	if (radix_enabled() || is_write)
354		return;
355
356	WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
357}
 
 
 
 
358
359/*
360 * Define the correct "is_write" bit in error_code based
361 * on the processor family
362 */
363#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
364#define page_fault_is_write(__err)	((__err) & ESR_DST)
 
365#else
366#define page_fault_is_write(__err)	((__err) & DSISR_ISSTORE)
367#endif
368
369#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
370#define page_fault_is_bad(__err)	(0)
371#elif defined(CONFIG_PPC_8xx)
372#define page_fault_is_bad(__err)	((__err) & DSISR_NOEXEC_OR_G)
373#elif defined(CONFIG_PPC64)
374static int page_fault_is_bad(unsigned long err)
375{
376	unsigned long flag = DSISR_BAD_FAULT_64S;
377
378	/*
379	 * PAPR+ v2.11 § 14.15.3.4.1 (unreleased)
380	 * If byte 0, bit 3 of pi-attribute-specifier-type in
381	 * ibm,pi-features property is defined, ignore the DSI error
382	 * which is caused by the paste instruction on the
383	 * suspended NX window.
384	 */
385	if (mmu_has_feature(MMU_FTR_NX_DSI))
386		flag &= ~DSISR_BAD_COPYPASTE;
387
388	return err & flag;
389}
390#else
391#define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_32S)
392#endif
 
393
394/*
395 * For 600- and 800-family processors, the error_code parameter is DSISR
396 * for a data fault, SRR1 for an instruction fault.
397 * For 400-family processors the error_code parameter is ESR for a data fault,
398 * 0 for an instruction fault.
399 * For 64-bit processors, the error_code parameter is DSISR for a data access
400 * fault, SRR1 & 0x08000000 for an instruction access fault.
 
 
401 *
402 * The return value is 0 if the fault was handled, or the signal
403 * number if this is a kernel fault that can't be handled here.
404 */
405static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
406			   unsigned long error_code)
407{
408	struct vm_area_struct * vma;
409	struct mm_struct *mm = current->mm;
410	unsigned int flags = FAULT_FLAG_DEFAULT;
411	int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
412	int is_user = user_mode(regs);
413	int is_write = page_fault_is_write(error_code);
414	vm_fault_t fault, major = 0;
415	bool kprobe_fault = kprobe_page_fault(regs, 11);
416
417	if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
418		return 0;
419
420	if (unlikely(page_fault_is_bad(error_code))) {
421		if (is_user) {
422			_exception(SIGBUS, regs, BUS_OBJERR, address);
423			return 0;
424		}
425		return SIGBUS;
426	}
427
428	/* Additional sanity check(s) */
429	sanity_check_fault(is_write, is_user, error_code, address);
430
431	/*
432	 * The kernel should never take an execute fault nor should it
433	 * take a page fault to a kernel address or a page fault to a user
434	 * address outside of dedicated places
435	 */
436	if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
437		if (kfence_handle_page_fault(address, is_write, regs))
438			return 0;
439
440		return SIGSEGV;
441	}
442
443	/*
444	 * If we're in an interrupt, have no user context or are running
445	 * in a region with pagefaults disabled then we must not take the fault
446	 */
447	if (unlikely(faulthandler_disabled() || !mm)) {
448		if (is_user)
449			printk_ratelimited(KERN_ERR "Page fault in user mode"
450					   " with faulthandler_disabled()=%d"
451					   " mm=%p\n",
452					   faulthandler_disabled(), mm);
453		return bad_area_nosemaphore(regs, address);
454	}
455
456	interrupt_cond_local_irq_enable(regs);
 
 
457
458	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
459
460	/*
461	 * We want to do this outside mmap_lock, because reading code around nip
462	 * can result in fault, which will cause a deadlock when called with
463	 * mmap_lock held
464	 */
465	if (is_user)
466		flags |= FAULT_FLAG_USER;
467	if (is_write)
468		flags |= FAULT_FLAG_WRITE;
469	if (is_exec)
470		flags |= FAULT_FLAG_INSTRUCTION;
471
472	/* When running in the kernel we expect faults to occur only to
473	 * addresses in user space.  All other faults represent errors in the
474	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
475	 * erroneous fault occurring in a code path which already holds mmap_lock
476	 * we will deadlock attempting to validate the fault against the
477	 * address space.  Luckily the kernel only validly references user
478	 * space from well defined areas of code, which are listed in the
479	 * exceptions table.
480	 *
481	 * As the vast majority of faults will be valid we will only perform
482	 * the source reference check when there is a possibility of a deadlock.
483	 * Attempt to lock the address space, if we cannot we then validate the
484	 * source.  If this is invalid we can skip the address space check,
485	 * thus avoiding the deadlock.
486	 */
487	if (unlikely(!mmap_read_trylock(mm))) {
488		if (!is_user && !search_exception_tables(regs->nip))
489			return bad_area_nosemaphore(regs, address);
490
491retry:
492		mmap_read_lock(mm);
493	} else {
494		/*
495		 * The above down_read_trylock() might have succeeded in
496		 * which case we'll have missed the might_sleep() from
497		 * down_read():
498		 */
499		might_sleep();
500	}
501
502	vma = find_vma(mm, address);
503	if (unlikely(!vma))
504		return bad_area(regs, address);
505
506	if (unlikely(vma->vm_start > address)) {
507		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
508			return bad_area(regs, address);
509
510		if (unlikely(expand_stack(vma, address)))
511			return bad_area(regs, address);
512	}
513
 
514	if (unlikely(access_pkey_error(is_write, is_exec,
515				       (error_code & DSISR_KEYFAULT), vma)))
516		return bad_access_pkey(regs, address, vma);
 
517
518	if (unlikely(access_error(is_write, is_exec, vma)))
519		return bad_access(regs, address);
520
521	/*
522	 * If for any reason at all we couldn't handle the fault,
523	 * make sure we exit gracefully rather than endlessly redo
524	 * the fault.
525	 */
526	fault = handle_mm_fault(vma, address, flags, regs);
527
528	major |= fault & VM_FAULT_MAJOR;
529
530	if (fault_signal_pending(fault, regs))
531		return user_mode(regs) ? 0 : SIGBUS;
532
533	/* The fault is fully completed (including releasing mmap lock) */
534	if (fault & VM_FAULT_COMPLETED)
535		goto out;
536
537	/*
538	 * Handle the retry right now, the mmap_lock has been released in that
539	 * case.
540	 */
541	if (unlikely(fault & VM_FAULT_RETRY)) {
542		flags |= FAULT_FLAG_TRIED;
543		goto retry;
 
 
544	}
545
546	mmap_read_unlock(current->mm);
547
548	if (unlikely(fault & VM_FAULT_ERROR))
549		return mm_fault_error(regs, address, fault);
550
551out:
552	/*
553	 * Major/minor page fault accounting.
554	 */
555	if (major)
556		cmo_account_page_fault();
557
558	return 0;
559}
560NOKPROBE_SYMBOL(___do_page_fault);
561
562static __always_inline void __do_page_fault(struct pt_regs *regs)
 
563{
564	long err;
565
566	err = ___do_page_fault(regs, regs->dar, regs->dsisr);
567	if (unlikely(err))
568		bad_page_fault(regs, err);
569}
570
571DEFINE_INTERRUPT_HANDLER(do_page_fault)
572{
573	__do_page_fault(regs);
574}
575
576#ifdef CONFIG_PPC_BOOK3S_64
577/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
578void hash__do_page_fault(struct pt_regs *regs)
579{
580	__do_page_fault(regs);
581}
582NOKPROBE_SYMBOL(hash__do_page_fault);
583#endif
584
585/*
586 * bad_page_fault is called when we have a bad access from the kernel.
587 * It is called from the DSI and ISI handlers in head.S and from some
588 * of the procedures in traps.c.
589 */
590static void __bad_page_fault(struct pt_regs *regs, int sig)
591{
 
592	int is_write = page_fault_is_write(regs->dsisr);
593	const char *msg;
594
595	/* kernel has accessed a bad area */
 
 
 
 
596
597	if (regs->dar < PAGE_SIZE)
598		msg = "Kernel NULL pointer dereference";
599	else
600		msg = "Unable to handle kernel data access";
601
602	switch (TRAP(regs)) {
603	case INTERRUPT_DATA_STORAGE:
604	case INTERRUPT_H_DATA_STORAGE:
605		pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
 
 
 
606			 is_write ? "write" : "read", regs->dar);
607		break;
608	case INTERRUPT_DATA_SEGMENT:
609		pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
610		break;
611	case INTERRUPT_INST_STORAGE:
612	case INTERRUPT_INST_SEGMENT:
613		pr_alert("BUG: Unable to handle kernel instruction fetch%s",
614			 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
615		break;
616	case INTERRUPT_ALIGNMENT:
617		pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
618			 regs->dar);
619		break;
620	default:
621		pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
622			 regs->dar);
623		break;
624	}
625	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
626		regs->nip);
627
628	if (task_stack_end_corrupted(current))
629		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
630
631	die("Kernel access of bad area", regs, sig);
632}
633
634void bad_page_fault(struct pt_regs *regs, int sig)
635{
636	const struct exception_table_entry *entry;
637
638	/* Are we prepared to handle this fault?  */
639	entry = search_exception_tables(instruction_pointer(regs));
640	if (entry)
641		instruction_pointer_set(regs, extable_fixup(entry));
642	else
643		__bad_page_fault(regs, sig);
644}
645
646#ifdef CONFIG_PPC_BOOK3S_64
647DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
648{
649	bad_page_fault(regs, SIGSEGV);
650}
651
652/*
653 * In radix, segment interrupts indicate the EA is not addressable by the
654 * page table geometry, so they are always sent here.
655 *
656 * In hash, this is called if do_slb_fault returns error. Typically it is
657 * because the EA was outside the region allowed by software.
658 */
659DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
660{
661	int err = regs->result;
662
663	if (err == -EFAULT) {
664		if (user_mode(regs))
665			_exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
666		else
667			bad_page_fault(regs, SIGSEGV);
668	} else if (err == -EINVAL) {
669		unrecoverable_exception(regs);
670	} else {
671		BUG();
672	}
673}
674#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *
  6 *  Derived from "arch/i386/mm/fault.c"
  7 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  8 *
  9 *  Modified by Cort Dougan and Paul Mackerras.
 10 *
 11 *  Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
 12 */
 13
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/sched/task_stack.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/interrupt.h>
 26#include <linux/highmem.h>
 27#include <linux/extable.h>
 28#include <linux/kprobes.h>
 29#include <linux/kdebug.h>
 30#include <linux/perf_event.h>
 31#include <linux/ratelimit.h>
 32#include <linux/context_tracking.h>
 33#include <linux/hugetlb.h>
 34#include <linux/uaccess.h>
 
 
 35
 36#include <asm/firmware.h>
 
 37#include <asm/page.h>
 38#include <asm/mmu.h>
 39#include <asm/mmu_context.h>
 40#include <asm/siginfo.h>
 41#include <asm/debug.h>
 42#include <asm/kup.h>
 43#include <asm/inst.h>
 44
 45
 46/*
 47 * do_page_fault error handling helpers
 48 */
 49
 50static int
 51__bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
 52{
 53	/*
 54	 * If we are in kernel mode, bail out with a SEGV, this will
 55	 * be caught by the assembly which will restore the non-volatile
 56	 * registers before calling bad_page_fault()
 57	 */
 58	if (!user_mode(regs))
 59		return SIGSEGV;
 60
 61	_exception(SIGSEGV, regs, si_code, address);
 62
 63	return 0;
 64}
 65
 66static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
 67{
 68	return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
 69}
 70
 71static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
 72{
 73	struct mm_struct *mm = current->mm;
 74
 75	/*
 76	 * Something tried to access memory that isn't in our memory map..
 77	 * Fix it, but check if it's kernel or user first..
 78	 */
 79	mmap_read_unlock(mm);
 80
 81	return __bad_area_nosemaphore(regs, address, si_code);
 82}
 83
 84static noinline int bad_area(struct pt_regs *regs, unsigned long address)
 85{
 86	return __bad_area(regs, address, SEGV_MAPERR);
 87}
 88
 89#ifdef CONFIG_PPC_MEM_KEYS
 90static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
 91				    struct vm_area_struct *vma)
 92{
 93	struct mm_struct *mm = current->mm;
 94	int pkey;
 95
 96	/*
 97	 * We don't try to fetch the pkey from page table because reading
 98	 * page table without locking doesn't guarantee stable pte value.
 99	 * Hence the pkey value that we return to userspace can be different
100	 * from the pkey that actually caused access error.
101	 *
102	 * It does *not* guarantee that the VMA we find here
103	 * was the one that we faulted on.
104	 *
105	 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
106	 * 2. T1   : set AMR to deny access to pkey=4, touches, page
107	 * 3. T1   : faults...
108	 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
109	 * 5. T1   : enters fault handler, takes mmap_lock, etc...
110	 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
111	 *	     faulted on a pte with its pkey=4.
112	 */
113	pkey = vma_pkey(vma);
114
115	mmap_read_unlock(mm);
116
117	/*
118	 * If we are in kernel mode, bail out with a SEGV, this will
119	 * be caught by the assembly which will restore the non-volatile
120	 * registers before calling bad_page_fault()
121	 */
122	if (!user_mode(regs))
123		return SIGSEGV;
124
125	_exception_pkey(regs, address, pkey);
126
127	return 0;
128}
129#endif
130
131static noinline int bad_access(struct pt_regs *regs, unsigned long address)
132{
133	return __bad_area(regs, address, SEGV_ACCERR);
134}
135
136static int do_sigbus(struct pt_regs *regs, unsigned long address,
137		     vm_fault_t fault)
138{
139	if (!user_mode(regs))
140		return SIGBUS;
141
142	current->thread.trap_nr = BUS_ADRERR;
143#ifdef CONFIG_MEMORY_FAILURE
144	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
145		unsigned int lsb = 0; /* shutup gcc */
146
147		pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
148			current->comm, current->pid, address);
149
150		if (fault & VM_FAULT_HWPOISON_LARGE)
151			lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
152		if (fault & VM_FAULT_HWPOISON)
153			lsb = PAGE_SHIFT;
154
155		force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
156		return 0;
157	}
158
159#endif
160	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
161	return 0;
162}
163
164static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
165				vm_fault_t fault)
166{
167	/*
168	 * Kernel page fault interrupted by SIGKILL. We have no reason to
169	 * continue processing.
170	 */
171	if (fatal_signal_pending(current) && !user_mode(regs))
172		return SIGKILL;
173
174	/* Out of memory */
175	if (fault & VM_FAULT_OOM) {
176		/*
177		 * We ran out of memory, or some other thing happened to us that
178		 * made us unable to handle the page fault gracefully.
179		 */
180		if (!user_mode(regs))
181			return SIGSEGV;
182		pagefault_out_of_memory();
183	} else {
184		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
185			     VM_FAULT_HWPOISON_LARGE))
186			return do_sigbus(regs, addr, fault);
187		else if (fault & VM_FAULT_SIGSEGV)
188			return bad_area_nosemaphore(regs, addr);
189		else
190			BUG();
191	}
192	return 0;
193}
194
195/* Is this a bad kernel fault ? */
196static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
197			     unsigned long address, bool is_write)
198{
199	int is_exec = TRAP(regs) == 0x400;
200
201	/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
202	if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
203				      DSISR_PROTFAULT))) {
204		pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
205				    address >= TASK_SIZE ? "exec-protected" : "user",
206				    address,
207				    from_kuid(&init_user_ns, current_uid()));
208
209		// Kernel exec fault is always bad
210		return true;
211	}
212
213	if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
214	    !search_exception_tables(regs->nip)) {
215		pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
216				    address,
217				    from_kuid(&init_user_ns, current_uid()));
218	}
219
220	// Kernel fault on kernel address is bad
221	if (address >= TASK_SIZE)
222		return true;
223
224	// Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
225	if (!search_exception_tables(regs->nip))
226		return true;
 
 
 
 
 
 
227
228	// Read/write fault in a valid region (the exception table search passed
229	// above), but blocked by KUAP is bad, it can never succeed.
230	if (bad_kuap_fault(regs, address, is_write))
231		return true;
232
233	// What's left? Kernel fault on user in well defined regions (extable
234	// matched), and allowed by KUAP in the faulting context.
235	return false;
236}
237
238#ifdef CONFIG_PPC_MEM_KEYS
239static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
240			      struct vm_area_struct *vma)
241{
242	/*
243	 * Make sure to check the VMA so that we do not perform
244	 * faults just to hit a pkey fault as soon as we fill in a
245	 * page. Only called for current mm, hence foreign == 0
246	 */
247	if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
248		return true;
249
250	return false;
251}
252#endif
253
254static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
255{
256	/*
257	 * Allow execution from readable areas if the MMU does not
258	 * provide separate controls over reading and executing.
259	 *
260	 * Note: That code used to not be enabled for 4xx/BookE.
261	 * It is now as I/D cache coherency for these is done at
262	 * set_pte_at() time and I see no reason why the test
263	 * below wouldn't be valid on those processors. This -may-
264	 * break programs compiled with a really old ABI though.
265	 */
266	if (is_exec) {
267		return !(vma->vm_flags & VM_EXEC) &&
268			(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
269			 !(vma->vm_flags & (VM_READ | VM_WRITE)));
270	}
271
272	if (is_write) {
273		if (unlikely(!(vma->vm_flags & VM_WRITE)))
274			return true;
275		return false;
276	}
277
278	if (unlikely(!vma_is_accessible(vma)))
 
 
 
 
279		return true;
280	/*
281	 * We should ideally do the vma pkey access check here. But in the
282	 * fault path, handle_mm_fault() also does the same check. To avoid
283	 * these multiple checks, we skip it here and handle access error due
284	 * to pkeys later.
285	 */
286	return false;
287}
288
289#ifdef CONFIG_PPC_SMLPAR
290static inline void cmo_account_page_fault(void)
291{
292	if (firmware_has_feature(FW_FEATURE_CMO)) {
293		u32 page_ins;
294
295		preempt_disable();
296		page_ins = be32_to_cpu(get_lppaca()->page_ins);
297		page_ins += 1 << PAGE_FACTOR;
298		get_lppaca()->page_ins = cpu_to_be32(page_ins);
299		preempt_enable();
300	}
301}
302#else
303static inline void cmo_account_page_fault(void) { }
304#endif /* CONFIG_PPC_SMLPAR */
305
306#ifdef CONFIG_PPC_BOOK3S
307static void sanity_check_fault(bool is_write, bool is_user,
308			       unsigned long error_code, unsigned long address)
309{
310	/*
311	 * Userspace trying to access kernel address, we get PROTFAULT for that.
312	 */
313	if (is_user && address >= TASK_SIZE) {
314		if ((long)address == -1)
315			return;
316
317		pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
318				   current->comm, current->pid, address,
319				   from_kuid(&init_user_ns, current_uid()));
320		return;
321	}
322
 
 
 
323	/*
324	 * For hash translation mode, we should never get a
325	 * PROTFAULT. Any update to pte to reduce access will result in us
326	 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
327	 * fault instead of DSISR_PROTFAULT.
328	 *
329	 * A pte update to relax the access will not result in a hash page table
330	 * entry invalidate and hence can result in DSISR_PROTFAULT.
331	 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
332	 * the special !is_write in the below conditional.
333	 *
334	 * For platforms that doesn't supports coherent icache and do support
335	 * per page noexec bit, we do setup things such that we do the
336	 * sync between D/I cache via fault. But that is handled via low level
337	 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
338	 * here in such case.
339	 *
340	 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
341	 * check should handle those and hence we should fall to the bad_area
342	 * handling correctly.
343	 *
344	 * For embedded with per page exec support that doesn't support coherent
345	 * icache we do get PROTFAULT and we handle that D/I cache sync in
346	 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
347	 * is conditional for server MMU.
348	 *
349	 * For radix, we can get prot fault for autonuma case, because radix
350	 * page table will have them marked noaccess for user.
351	 */
352	if (radix_enabled() || is_write)
353		return;
354
355	WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
356}
357#else
358static void sanity_check_fault(bool is_write, bool is_user,
359			       unsigned long error_code, unsigned long address) { }
360#endif /* CONFIG_PPC_BOOK3S */
361
362/*
363 * Define the correct "is_write" bit in error_code based
364 * on the processor family
365 */
366#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
367#define page_fault_is_write(__err)	((__err) & ESR_DST)
368#define page_fault_is_bad(__err)	(0)
369#else
370#define page_fault_is_write(__err)	((__err) & DSISR_ISSTORE)
371#if defined(CONFIG_PPC_8xx)
 
 
 
 
372#define page_fault_is_bad(__err)	((__err) & DSISR_NOEXEC_OR_G)
373#elif defined(CONFIG_PPC64)
374#define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_64S)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375#else
376#define page_fault_is_bad(__err)	((__err) & DSISR_BAD_FAULT_32S)
377#endif
378#endif
379
380/*
381 * For 600- and 800-family processors, the error_code parameter is DSISR
382 * for a data fault, SRR1 for an instruction fault. For 400-family processors
383 * the error_code parameter is ESR for a data fault, 0 for an instruction
384 * fault.
385 * For 64-bit processors, the error_code parameter is
386 *  - DSISR for a non-SLB data access fault,
387 *  - SRR1 & 0x08000000 for a non-SLB instruction access fault
388 *  - 0 any SLB fault.
389 *
390 * The return value is 0 if the fault was handled, or the signal
391 * number if this is a kernel fault that can't be handled here.
392 */
393static int __do_page_fault(struct pt_regs *regs, unsigned long address,
394			   unsigned long error_code)
395{
396	struct vm_area_struct * vma;
397	struct mm_struct *mm = current->mm;
398	unsigned int flags = FAULT_FLAG_DEFAULT;
399 	int is_exec = TRAP(regs) == 0x400;
400	int is_user = user_mode(regs);
401	int is_write = page_fault_is_write(error_code);
402	vm_fault_t fault, major = 0;
403	bool kprobe_fault = kprobe_page_fault(regs, 11);
404
405	if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
406		return 0;
407
408	if (unlikely(page_fault_is_bad(error_code))) {
409		if (is_user) {
410			_exception(SIGBUS, regs, BUS_OBJERR, address);
411			return 0;
412		}
413		return SIGBUS;
414	}
415
416	/* Additional sanity check(s) */
417	sanity_check_fault(is_write, is_user, error_code, address);
418
419	/*
420	 * The kernel should never take an execute fault nor should it
421	 * take a page fault to a kernel address or a page fault to a user
422	 * address outside of dedicated places
423	 */
424	if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
 
 
 
425		return SIGSEGV;
 
426
427	/*
428	 * If we're in an interrupt, have no user context or are running
429	 * in a region with pagefaults disabled then we must not take the fault
430	 */
431	if (unlikely(faulthandler_disabled() || !mm)) {
432		if (is_user)
433			printk_ratelimited(KERN_ERR "Page fault in user mode"
434					   " with faulthandler_disabled()=%d"
435					   " mm=%p\n",
436					   faulthandler_disabled(), mm);
437		return bad_area_nosemaphore(regs, address);
438	}
439
440	/* We restore the interrupt state now */
441	if (!arch_irq_disabled_regs(regs))
442		local_irq_enable();
443
444	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
445
446	/*
447	 * We want to do this outside mmap_lock, because reading code around nip
448	 * can result in fault, which will cause a deadlock when called with
449	 * mmap_lock held
450	 */
451	if (is_user)
452		flags |= FAULT_FLAG_USER;
453	if (is_write)
454		flags |= FAULT_FLAG_WRITE;
455	if (is_exec)
456		flags |= FAULT_FLAG_INSTRUCTION;
457
458	/* When running in the kernel we expect faults to occur only to
459	 * addresses in user space.  All other faults represent errors in the
460	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
461	 * erroneous fault occurring in a code path which already holds mmap_lock
462	 * we will deadlock attempting to validate the fault against the
463	 * address space.  Luckily the kernel only validly references user
464	 * space from well defined areas of code, which are listed in the
465	 * exceptions table.
466	 *
467	 * As the vast majority of faults will be valid we will only perform
468	 * the source reference check when there is a possibility of a deadlock.
469	 * Attempt to lock the address space, if we cannot we then validate the
470	 * source.  If this is invalid we can skip the address space check,
471	 * thus avoiding the deadlock.
472	 */
473	if (unlikely(!mmap_read_trylock(mm))) {
474		if (!is_user && !search_exception_tables(regs->nip))
475			return bad_area_nosemaphore(regs, address);
476
477retry:
478		mmap_read_lock(mm);
479	} else {
480		/*
481		 * The above down_read_trylock() might have succeeded in
482		 * which case we'll have missed the might_sleep() from
483		 * down_read():
484		 */
485		might_sleep();
486	}
487
488	vma = find_vma(mm, address);
489	if (unlikely(!vma))
490		return bad_area(regs, address);
491
492	if (unlikely(vma->vm_start > address)) {
493		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
494			return bad_area(regs, address);
495
496		if (unlikely(expand_stack(vma, address)))
497			return bad_area(regs, address);
498	}
499
500#ifdef CONFIG_PPC_MEM_KEYS
501	if (unlikely(access_pkey_error(is_write, is_exec,
502				       (error_code & DSISR_KEYFAULT), vma)))
503		return bad_access_pkey(regs, address, vma);
504#endif /* CONFIG_PPC_MEM_KEYS */
505
506	if (unlikely(access_error(is_write, is_exec, vma)))
507		return bad_access(regs, address);
508
509	/*
510	 * If for any reason at all we couldn't handle the fault,
511	 * make sure we exit gracefully rather than endlessly redo
512	 * the fault.
513	 */
514	fault = handle_mm_fault(vma, address, flags, regs);
515
516	major |= fault & VM_FAULT_MAJOR;
517
518	if (fault_signal_pending(fault, regs))
519		return user_mode(regs) ? 0 : SIGBUS;
520
 
 
 
 
521	/*
522	 * Handle the retry right now, the mmap_lock has been released in that
523	 * case.
524	 */
525	if (unlikely(fault & VM_FAULT_RETRY)) {
526		if (flags & FAULT_FLAG_ALLOW_RETRY) {
527			flags |= FAULT_FLAG_TRIED;
528			goto retry;
529		}
530	}
531
532	mmap_read_unlock(current->mm);
533
534	if (unlikely(fault & VM_FAULT_ERROR))
535		return mm_fault_error(regs, address, fault);
536
 
537	/*
538	 * Major/minor page fault accounting.
539	 */
540	if (major)
541		cmo_account_page_fault();
542
543	return 0;
544}
545NOKPROBE_SYMBOL(__do_page_fault);
546
547int do_page_fault(struct pt_regs *regs, unsigned long address,
548		  unsigned long error_code)
549{
550	enum ctx_state prev_state = exception_enter();
551	int rc = __do_page_fault(regs, address, error_code);
552	exception_exit(prev_state);
553	return rc;
 
554}
555NOKPROBE_SYMBOL(do_page_fault);
 
 
 
 
 
 
 
 
 
 
 
 
 
556
557/*
558 * bad_page_fault is called when we have a bad access from the kernel.
559 * It is called from the DSI and ISI handlers in head.S and from some
560 * of the procedures in traps.c.
561 */
562void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
563{
564	const struct exception_table_entry *entry;
565	int is_write = page_fault_is_write(regs->dsisr);
 
566
567	/* Are we prepared to handle this fault?  */
568	if ((entry = search_exception_tables(regs->nip)) != NULL) {
569		regs->nip = extable_fixup(entry);
570		return;
571	}
572
573	/* kernel has accessed a bad area */
 
 
 
574
575	switch (TRAP(regs)) {
576	case 0x300:
577	case 0x380:
578	case 0xe00:
579		pr_alert("BUG: %s on %s at 0x%08lx\n",
580			 regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
581			 "Unable to handle kernel data access",
582			 is_write ? "write" : "read", regs->dar);
583		break;
584	case 0x400:
585	case 0x480:
 
 
 
586		pr_alert("BUG: Unable to handle kernel instruction fetch%s",
587			 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
588		break;
589	case 0x600:
590		pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
591			 regs->dar);
592		break;
593	default:
594		pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
595			 regs->dar);
596		break;
597	}
598	printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
599		regs->nip);
600
601	if (task_stack_end_corrupted(current))
602		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
603
604	die("Kernel access of bad area", regs, sig);
605}