Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/fault.c
  4 *
  5 *  Copyright (C) 1995  Linus Torvalds
  6 *  Modifications for ARM processor (c) 1995-2004 Russell King
  7 */
  8#include <linux/extable.h>
  9#include <linux/signal.h>
 10#include <linux/mm.h>
 11#include <linux/hardirq.h>
 12#include <linux/init.h>
 13#include <linux/kprobes.h>
 14#include <linux/uaccess.h>
 15#include <linux/page-flags.h>
 16#include <linux/sched/signal.h>
 17#include <linux/sched/debug.h>
 18#include <linux/highmem.h>
 19#include <linux/perf_event.h>
 20#include <linux/kfence.h>
 21
 22#include <asm/system_misc.h>
 23#include <asm/system_info.h>
 24#include <asm/tlbflush.h>
 25
 26#include "fault.h"
 27
 
 
 
 
 
 
 
 28#ifdef CONFIG_MMU
 29
 30/*
 31 * This is useful to dump out the page tables associated with
 32 * 'addr' in mm 'mm'.
 33 */
 34void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 35{
 36	pgd_t *pgd;
 37
 38	if (!mm)
 39		mm = &init_mm;
 40
 41	pgd = pgd_offset(mm, addr);
 42	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
 43
 44	do {
 45		p4d_t *p4d;
 46		pud_t *pud;
 47		pmd_t *pmd;
 48		pte_t *pte;
 49
 50		p4d = p4d_offset(pgd, addr);
 51		if (p4d_none(*p4d))
 52			break;
 53
 54		if (p4d_bad(*p4d)) {
 55			pr_cont("(bad)");
 56			break;
 57		}
 58
 59		pud = pud_offset(p4d, addr);
 60		if (PTRS_PER_PUD != 1)
 61			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
 62
 63		if (pud_none(*pud))
 64			break;
 65
 66		if (pud_bad(*pud)) {
 67			pr_cont("(bad)");
 68			break;
 69		}
 70
 71		pmd = pmd_offset(pud, addr);
 72		if (PTRS_PER_PMD != 1)
 73			pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
 74
 75		if (pmd_none(*pmd))
 76			break;
 77
 78		if (pmd_bad(*pmd)) {
 79			pr_cont("(bad)");
 80			break;
 81		}
 82
 83		/* We must not map this if we have highmem enabled */
 84		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 85			break;
 86
 87		pte = pte_offset_map(pmd, addr);
 88		if (!pte)
 89			break;
 90
 91		pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
 92#ifndef CONFIG_ARM_LPAE
 93		pr_cont(", *ppte=%08llx",
 94		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
 95#endif
 96		pte_unmap(pte);
 97	} while(0);
 98
 99	pr_cont("\n");
100}
101#else					/* CONFIG_MMU */
102void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
103{ }
104#endif					/* CONFIG_MMU */
105
106static inline bool is_write_fault(unsigned int fsr)
107{
108	return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
109}
110
111static inline bool is_translation_fault(unsigned int fsr)
112{
113	int fs = fsr_fs(fsr);
114#ifdef CONFIG_ARM_LPAE
115	if ((fs & FS_MMU_NOLL_MASK) == FS_TRANS_NOLL)
116		return true;
117#else
118	if (fs == FS_L1_TRANS || fs == FS_L2_TRANS)
119		return true;
120#endif
121	return false;
122}
123
124static void die_kernel_fault(const char *msg, struct mm_struct *mm,
125			     unsigned long addr, unsigned int fsr,
126			     struct pt_regs *regs)
127{
128	bust_spinlocks(1);
129	pr_alert("8<--- cut here ---\n");
130	pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
131		 msg, addr, fsr & FSR_LNX_PF ? "execute" :
132		 fsr & FSR_WRITE ? "write" : "read");
133
134	show_pte(KERN_ALERT, mm, addr);
135	die("Oops", regs, fsr);
136	bust_spinlocks(0);
137	make_task_dead(SIGKILL);
138}
139
140/*
141 * Oops.  The kernel tried to access some page that wasn't present.
142 */
143static void
144__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
145		  struct pt_regs *regs)
146{
147	const char *msg;
148	/*
149	 * Are we prepared to handle this kernel fault?
150	 */
151	if (fixup_exception(regs))
152		return;
153
154	/*
155	 * No handler, we'll have to terminate things with extreme prejudice.
156	 */
157	if (addr < PAGE_SIZE) {
158		msg = "NULL pointer dereference";
159	} else {
160		if (is_translation_fault(fsr) &&
161		    kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
162			return;
163
164		msg = "paging request";
165	}
166
167	die_kernel_fault(msg, mm, addr, fsr, regs);
168}
169
170/*
171 * Something tried to access memory that isn't in our memory map..
172 * User mode accesses just cause a SIGSEGV
173 */
174static void
175__do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
176		int code, struct pt_regs *regs)
177{
178	struct task_struct *tsk = current;
179
180	if (addr > TASK_SIZE)
181		harden_branch_predictor();
182
183#ifdef CONFIG_DEBUG_USER
184	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
185	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
186		pr_err("8<--- cut here ---\n");
187		pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
188		       tsk->comm, sig, addr, fsr);
189		show_pte(KERN_ERR, tsk->mm, addr);
190		show_regs(regs);
191	}
192#endif
193#ifndef CONFIG_KUSER_HELPERS
194	if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
195		printk_ratelimited(KERN_DEBUG
196				   "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
197				   tsk->comm, addr);
198#endif
199
200	tsk->thread.address = addr;
201	tsk->thread.error_code = fsr;
202	tsk->thread.trap_no = 14;
203	force_sig_fault(sig, code, (void __user *)addr);
204}
205
206void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
207{
208	struct task_struct *tsk = current;
209	struct mm_struct *mm = tsk->active_mm;
210
211	/*
212	 * If we are in kernel mode at this point, we
213	 * have no context to handle this fault with.
214	 */
215	if (user_mode(regs))
216		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
217	else
218		__do_kernel_fault(mm, addr, fsr, regs);
219}
220
221#ifdef CONFIG_MMU
222#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
223#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
224
225static inline bool is_permission_fault(unsigned int fsr)
226{
227	int fs = fsr_fs(fsr);
228#ifdef CONFIG_ARM_LPAE
229	if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
230		return true;
231#else
232	if (fs == FS_L1_PERM || fs == FS_L2_PERM)
233		return true;
234#endif
235	return false;
236}
237
238static int __kprobes
239do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
240{
241	struct mm_struct *mm = current->mm;
242	struct vm_area_struct *vma;
243	int sig, code;
244	vm_fault_t fault;
245	unsigned int flags = FAULT_FLAG_DEFAULT;
246	unsigned long vm_flags = VM_ACCESS_FLAGS;
247
248	if (kprobe_page_fault(regs, fsr))
249		return 0;
250
251
252	/* Enable interrupts if they were enabled in the parent context. */
253	if (interrupts_enabled(regs))
254		local_irq_enable();
255
256	/*
257	 * If we're in an interrupt or have no user
258	 * context, we must not take the fault..
259	 */
260	if (faulthandler_disabled() || !mm)
261		goto no_context;
262
263	if (user_mode(regs))
264		flags |= FAULT_FLAG_USER;
265
266	if (is_write_fault(fsr)) {
267		flags |= FAULT_FLAG_WRITE;
268		vm_flags = VM_WRITE;
269	}
270
271	if (fsr & FSR_LNX_PF) {
272		vm_flags = VM_EXEC;
273
274		if (is_permission_fault(fsr) && !user_mode(regs))
275			die_kernel_fault("execution of memory",
276					 mm, addr, fsr, regs);
277	}
278
279	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
280
281	if (!(flags & FAULT_FLAG_USER))
282		goto lock_mmap;
283
284	vma = lock_vma_under_rcu(mm, addr);
285	if (!vma)
286		goto lock_mmap;
287
288	if (!(vma->vm_flags & vm_flags)) {
289		vma_end_read(vma);
290		goto lock_mmap;
291	}
292	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
293	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
294		vma_end_read(vma);
295
296	if (!(fault & VM_FAULT_RETRY)) {
297		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
298		goto done;
299	}
300	count_vm_vma_lock_event(VMA_LOCK_RETRY);
301	if (fault & VM_FAULT_MAJOR)
302		flags |= FAULT_FLAG_TRIED;
303
304	/* Quick path to respond to signals */
305	if (fault_signal_pending(fault, regs)) {
306		if (!user_mode(regs))
307			goto no_context;
308		return 0;
309	}
310lock_mmap:
311
312retry:
313	vma = lock_mm_and_find_vma(mm, addr, regs);
314	if (unlikely(!vma)) {
315		fault = VM_FAULT_BADMAP;
316		goto bad_area;
317	}
318
319	/*
320	 * ok, we have a good vm_area for this memory access, check the
321	 * permissions on the VMA allow for the fault which occurred.
322	 */
323	if (!(vma->vm_flags & vm_flags))
324		fault = VM_FAULT_BADACCESS;
325	else
326		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
327
328	/* If we need to retry but a fatal signal is pending, handle the
329	 * signal first. We do not need to release the mmap_lock because
330	 * it would already be released in __lock_page_or_retry in
331	 * mm/filemap.c. */
332	if (fault_signal_pending(fault, regs)) {
333		if (!user_mode(regs))
334			goto no_context;
335		return 0;
336	}
337
338	/* The fault is fully completed (including releasing mmap lock) */
339	if (fault & VM_FAULT_COMPLETED)
340		return 0;
341
342	if (!(fault & VM_FAULT_ERROR)) {
343		if (fault & VM_FAULT_RETRY) {
344			flags |= FAULT_FLAG_TRIED;
345			goto retry;
346		}
347	}
348
349	mmap_read_unlock(mm);
350done:
351
352	/*
353	 * Handle the "normal" case first - VM_FAULT_MAJOR
354	 */
355	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
356		return 0;
357
358bad_area:
359	/*
360	 * If we are in kernel mode at this point, we
361	 * have no context to handle this fault with.
362	 */
363	if (!user_mode(regs))
364		goto no_context;
365
366	if (fault & VM_FAULT_OOM) {
367		/*
368		 * We ran out of memory, call the OOM killer, and return to
369		 * userspace (which will retry the fault, or kill us if we
370		 * got oom-killed)
371		 */
372		pagefault_out_of_memory();
373		return 0;
374	}
375
376	if (fault & VM_FAULT_SIGBUS) {
377		/*
378		 * We had some memory, but were unable to
379		 * successfully fix up this page fault.
380		 */
381		sig = SIGBUS;
382		code = BUS_ADRERR;
383	} else {
384		/*
385		 * Something tried to access memory that
386		 * isn't in our memory map..
387		 */
388		sig = SIGSEGV;
389		code = fault == VM_FAULT_BADACCESS ?
390			SEGV_ACCERR : SEGV_MAPERR;
391	}
392
393	__do_user_fault(addr, fsr, sig, code, regs);
394	return 0;
395
396no_context:
397	__do_kernel_fault(mm, addr, fsr, regs);
398	return 0;
399}
400#else					/* CONFIG_MMU */
401static int
402do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
403{
404	return 0;
405}
406#endif					/* CONFIG_MMU */
407
408/*
409 * First Level Translation Fault Handler
410 *
411 * We enter here because the first level page table doesn't contain
412 * a valid entry for the address.
413 *
414 * If the address is in kernel space (>= TASK_SIZE), then we are
415 * probably faulting in the vmalloc() area.
416 *
417 * If the init_task's first level page tables contains the relevant
418 * entry, we copy the it to this task.  If not, we send the process
419 * a signal, fixup the exception, or oops the kernel.
420 *
421 * NOTE! We MUST NOT take any locks for this case. We may be in an
422 * interrupt or a critical region, and should only copy the information
423 * from the master page table, nothing more.
424 */
425#ifdef CONFIG_MMU
426static int __kprobes
427do_translation_fault(unsigned long addr, unsigned int fsr,
428		     struct pt_regs *regs)
429{
430	unsigned int index;
431	pgd_t *pgd, *pgd_k;
432	p4d_t *p4d, *p4d_k;
433	pud_t *pud, *pud_k;
434	pmd_t *pmd, *pmd_k;
435
436	if (addr < TASK_SIZE)
437		return do_page_fault(addr, fsr, regs);
438
439	if (user_mode(regs))
440		goto bad_area;
441
442	index = pgd_index(addr);
443
444	pgd = cpu_get_pgd() + index;
445	pgd_k = init_mm.pgd + index;
446
447	p4d = p4d_offset(pgd, addr);
448	p4d_k = p4d_offset(pgd_k, addr);
449
450	if (p4d_none(*p4d_k))
451		goto bad_area;
452	if (!p4d_present(*p4d))
453		set_p4d(p4d, *p4d_k);
454
455	pud = pud_offset(p4d, addr);
456	pud_k = pud_offset(p4d_k, addr);
457
458	if (pud_none(*pud_k))
459		goto bad_area;
460	if (!pud_present(*pud))
461		set_pud(pud, *pud_k);
462
463	pmd = pmd_offset(pud, addr);
464	pmd_k = pmd_offset(pud_k, addr);
465
466#ifdef CONFIG_ARM_LPAE
467	/*
468	 * Only one hardware entry per PMD with LPAE.
469	 */
470	index = 0;
471#else
472	/*
473	 * On ARM one Linux PGD entry contains two hardware entries (see page
474	 * tables layout in pgtable.h). We normally guarantee that we always
475	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
476	 * It can create inidividual L1 entries, so here we have to call
477	 * pmd_none() check for the entry really corresponded to address, not
478	 * for the first of pair.
479	 */
480	index = (addr >> SECTION_SHIFT) & 1;
481#endif
482	if (pmd_none(pmd_k[index]))
483		goto bad_area;
484
485	copy_pmd(pmd, pmd_k);
486	return 0;
487
488bad_area:
489	do_bad_area(addr, fsr, regs);
490	return 0;
491}
492#else					/* CONFIG_MMU */
493static int
494do_translation_fault(unsigned long addr, unsigned int fsr,
495		     struct pt_regs *regs)
496{
497	return 0;
498}
499#endif					/* CONFIG_MMU */
500
501/*
502 * Some section permission faults need to be handled gracefully.
503 * They can happen due to a __{get,put}_user during an oops.
504 */
505#ifndef CONFIG_ARM_LPAE
506static int
507do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
508{
509	do_bad_area(addr, fsr, regs);
510	return 0;
511}
512#endif /* CONFIG_ARM_LPAE */
513
514/*
515 * This abort handler always returns "fault".
516 */
517static int
518do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
519{
520	return 1;
521}
522
523struct fsr_info {
524	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
525	int	sig;
526	int	code;
527	const char *name;
528};
529
530/* FSR definition */
531#ifdef CONFIG_ARM_LPAE
532#include "fsr-3level.c"
533#else
534#include "fsr-2level.c"
535#endif
536
537void __init
538hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
539		int sig, int code, const char *name)
540{
541	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
542		BUG();
543
544	fsr_info[nr].fn   = fn;
545	fsr_info[nr].sig  = sig;
546	fsr_info[nr].code = code;
547	fsr_info[nr].name = name;
548}
549
550/*
551 * Dispatch a data abort to the relevant handler.
552 */
553asmlinkage void
554do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
555{
556	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
557
558	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
559		return;
560
561	pr_alert("8<--- cut here ---\n");
562	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
563		inf->name, fsr, addr);
564	show_pte(KERN_ALERT, current->mm, addr);
565
566	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
567		       fsr, 0);
568}
569
570void __init
571hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
572		 int sig, int code, const char *name)
573{
574	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
575		BUG();
576
577	ifsr_info[nr].fn   = fn;
578	ifsr_info[nr].sig  = sig;
579	ifsr_info[nr].code = code;
580	ifsr_info[nr].name = name;
581}
582
583asmlinkage void
584do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
585{
586	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
587
588	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
589		return;
590
 
591	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
592		inf->name, ifsr, addr);
593
594	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
595		       ifsr, 0);
596}
597
598/*
599 * Abort handler to be used only during first unmasking of asynchronous aborts
600 * on the boot CPU. This makes sure that the machine will not die if the
601 * firmware/bootloader left an imprecise abort pending for us to trip over.
602 */
603static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
604				      struct pt_regs *regs)
605{
606	pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
607		"first unmask, this is most likely caused by a "
608		"firmware/bootloader bug.\n", fsr);
609
610	return 0;
611}
612
613void __init early_abt_enable(void)
614{
615	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
616	local_abt_enable();
617	fsr_info[FSR_FS_AEA].fn = do_bad;
618}
619
620#ifndef CONFIG_ARM_LPAE
621static int __init exceptions_init(void)
622{
623	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
624		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
625				"I-cache maintenance fault");
626	}
627
628	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
629		/*
630		 * TODO: Access flag faults introduced in ARMv6K.
631		 * Runtime check for 'K' extension is needed
632		 */
633		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
634				"section access flag fault");
635		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
636				"section access flag fault");
637	}
638
639	return 0;
640}
641
642arch_initcall(exceptions_init);
643#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/fault.c
  4 *
  5 *  Copyright (C) 1995  Linus Torvalds
  6 *  Modifications for ARM processor (c) 1995-2004 Russell King
  7 */
  8#include <linux/extable.h>
  9#include <linux/signal.h>
 10#include <linux/mm.h>
 11#include <linux/hardirq.h>
 12#include <linux/init.h>
 13#include <linux/kprobes.h>
 14#include <linux/uaccess.h>
 15#include <linux/page-flags.h>
 16#include <linux/sched/signal.h>
 17#include <linux/sched/debug.h>
 18#include <linux/highmem.h>
 19#include <linux/perf_event.h>
 20#include <linux/kfence.h>
 21
 22#include <asm/system_misc.h>
 23#include <asm/system_info.h>
 24#include <asm/tlbflush.h>
 25
 26#include "fault.h"
 27
 28bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
 29{
 30	unsigned long addr = (unsigned long)unsafe_src;
 31
 32	return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
 33}
 34
 35#ifdef CONFIG_MMU
 36
 37/*
 38 * This is useful to dump out the page tables associated with
 39 * 'addr' in mm 'mm'.
 40 */
 41void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 42{
 43	pgd_t *pgd;
 44
 45	if (!mm)
 46		mm = &init_mm;
 47
 48	pgd = pgd_offset(mm, addr);
 49	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
 50
 51	do {
 52		p4d_t *p4d;
 53		pud_t *pud;
 54		pmd_t *pmd;
 55		pte_t *pte;
 56
 57		p4d = p4d_offset(pgd, addr);
 58		if (p4d_none(*p4d))
 59			break;
 60
 61		if (p4d_bad(*p4d)) {
 62			pr_cont("(bad)");
 63			break;
 64		}
 65
 66		pud = pud_offset(p4d, addr);
 67		if (PTRS_PER_PUD != 1)
 68			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
 69
 70		if (pud_none(*pud))
 71			break;
 72
 73		if (pud_bad(*pud)) {
 74			pr_cont("(bad)");
 75			break;
 76		}
 77
 78		pmd = pmd_offset(pud, addr);
 79		if (PTRS_PER_PMD != 1)
 80			pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
 81
 82		if (pmd_none(*pmd))
 83			break;
 84
 85		if (pmd_bad(*pmd)) {
 86			pr_cont("(bad)");
 87			break;
 88		}
 89
 90		/* We must not map this if we have highmem enabled */
 91		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 92			break;
 93
 94		pte = pte_offset_map(pmd, addr);
 95		if (!pte)
 96			break;
 97
 98		pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
 99#ifndef CONFIG_ARM_LPAE
100		pr_cont(", *ppte=%08llx",
101		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
102#endif
103		pte_unmap(pte);
104	} while(0);
105
106	pr_cont("\n");
107}
108#else					/* CONFIG_MMU */
109void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
110{ }
111#endif					/* CONFIG_MMU */
112
113static inline bool is_write_fault(unsigned int fsr)
114{
115	return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
116}
117
118static inline bool is_translation_fault(unsigned int fsr)
119{
120	int fs = fsr_fs(fsr);
121#ifdef CONFIG_ARM_LPAE
122	if ((fs & FS_MMU_NOLL_MASK) == FS_TRANS_NOLL)
123		return true;
124#else
125	if (fs == FS_L1_TRANS || fs == FS_L2_TRANS)
126		return true;
127#endif
128	return false;
129}
130
131static void die_kernel_fault(const char *msg, struct mm_struct *mm,
132			     unsigned long addr, unsigned int fsr,
133			     struct pt_regs *regs)
134{
135	bust_spinlocks(1);
136	pr_alert("8<--- cut here ---\n");
137	pr_alert("Unable to handle kernel %s at virtual address %08lx when %s\n",
138		 msg, addr, fsr & FSR_LNX_PF ? "execute" :
139		 fsr & FSR_WRITE ? "write" : "read");
140
141	show_pte(KERN_ALERT, mm, addr);
142	die("Oops", regs, fsr);
143	bust_spinlocks(0);
144	make_task_dead(SIGKILL);
145}
146
147/*
148 * Oops.  The kernel tried to access some page that wasn't present.
149 */
150static void
151__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
152		  struct pt_regs *regs)
153{
154	const char *msg;
155	/*
156	 * Are we prepared to handle this kernel fault?
157	 */
158	if (fixup_exception(regs))
159		return;
160
161	/*
162	 * No handler, we'll have to terminate things with extreme prejudice.
163	 */
164	if (addr < PAGE_SIZE) {
165		msg = "NULL pointer dereference";
166	} else {
167		if (is_translation_fault(fsr) &&
168		    kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
169			return;
170
171		msg = "paging request";
172	}
173
174	die_kernel_fault(msg, mm, addr, fsr, regs);
175}
176
177/*
178 * Something tried to access memory that isn't in our memory map..
179 * User mode accesses just cause a SIGSEGV
180 */
181static void
182__do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
183		int code, struct pt_regs *regs)
184{
185	struct task_struct *tsk = current;
186
187	if (addr > TASK_SIZE)
188		harden_branch_predictor();
189
190#ifdef CONFIG_DEBUG_USER
191	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
192	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
193		pr_err("8<--- cut here ---\n");
194		pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
195		       tsk->comm, sig, addr, fsr);
196		show_pte(KERN_ERR, tsk->mm, addr);
197		show_regs(regs);
198	}
199#endif
200#ifndef CONFIG_KUSER_HELPERS
201	if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
202		printk_ratelimited(KERN_DEBUG
203				   "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
204				   tsk->comm, addr);
205#endif
206
207	tsk->thread.address = addr;
208	tsk->thread.error_code = fsr;
209	tsk->thread.trap_no = 14;
210	force_sig_fault(sig, code, (void __user *)addr);
211}
212
213void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
214{
215	struct task_struct *tsk = current;
216	struct mm_struct *mm = tsk->active_mm;
217
218	/*
219	 * If we are in kernel mode at this point, we
220	 * have no context to handle this fault with.
221	 */
222	if (user_mode(regs))
223		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
224	else
225		__do_kernel_fault(mm, addr, fsr, regs);
226}
227
228#ifdef CONFIG_MMU
229#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
230#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
231
232static inline bool is_permission_fault(unsigned int fsr)
233{
234	int fs = fsr_fs(fsr);
235#ifdef CONFIG_ARM_LPAE
236	if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL)
237		return true;
238#else
239	if (fs == FS_L1_PERM || fs == FS_L2_PERM)
240		return true;
241#endif
242	return false;
243}
244
245static int __kprobes
246do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
247{
248	struct mm_struct *mm = current->mm;
249	struct vm_area_struct *vma;
250	int sig, code;
251	vm_fault_t fault;
252	unsigned int flags = FAULT_FLAG_DEFAULT;
253	unsigned long vm_flags = VM_ACCESS_FLAGS;
254
255	if (kprobe_page_fault(regs, fsr))
256		return 0;
257
258
259	/* Enable interrupts if they were enabled in the parent context. */
260	if (interrupts_enabled(regs))
261		local_irq_enable();
262
263	/*
264	 * If we're in an interrupt or have no user
265	 * context, we must not take the fault..
266	 */
267	if (faulthandler_disabled() || !mm)
268		goto no_context;
269
270	if (user_mode(regs))
271		flags |= FAULT_FLAG_USER;
272
273	if (is_write_fault(fsr)) {
274		flags |= FAULT_FLAG_WRITE;
275		vm_flags = VM_WRITE;
276	}
277
278	if (fsr & FSR_LNX_PF) {
279		vm_flags = VM_EXEC;
280
281		if (is_permission_fault(fsr) && !user_mode(regs))
282			die_kernel_fault("execution of memory",
283					 mm, addr, fsr, regs);
284	}
285
286	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
287
288	if (!(flags & FAULT_FLAG_USER))
289		goto lock_mmap;
290
291	vma = lock_vma_under_rcu(mm, addr);
292	if (!vma)
293		goto lock_mmap;
294
295	if (!(vma->vm_flags & vm_flags)) {
296		vma_end_read(vma);
297		goto lock_mmap;
298	}
299	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
300	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
301		vma_end_read(vma);
302
303	if (!(fault & VM_FAULT_RETRY)) {
304		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
305		goto done;
306	}
307	count_vm_vma_lock_event(VMA_LOCK_RETRY);
308	if (fault & VM_FAULT_MAJOR)
309		flags |= FAULT_FLAG_TRIED;
310
311	/* Quick path to respond to signals */
312	if (fault_signal_pending(fault, regs)) {
313		if (!user_mode(regs))
314			goto no_context;
315		return 0;
316	}
317lock_mmap:
318
319retry:
320	vma = lock_mm_and_find_vma(mm, addr, regs);
321	if (unlikely(!vma)) {
322		fault = VM_FAULT_BADMAP;
323		goto bad_area;
324	}
325
326	/*
327	 * ok, we have a good vm_area for this memory access, check the
328	 * permissions on the VMA allow for the fault which occurred.
329	 */
330	if (!(vma->vm_flags & vm_flags))
331		fault = VM_FAULT_BADACCESS;
332	else
333		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
334
335	/* If we need to retry but a fatal signal is pending, handle the
336	 * signal first. We do not need to release the mmap_lock because
337	 * it would already be released in __lock_page_or_retry in
338	 * mm/filemap.c. */
339	if (fault_signal_pending(fault, regs)) {
340		if (!user_mode(regs))
341			goto no_context;
342		return 0;
343	}
344
345	/* The fault is fully completed (including releasing mmap lock) */
346	if (fault & VM_FAULT_COMPLETED)
347		return 0;
348
349	if (!(fault & VM_FAULT_ERROR)) {
350		if (fault & VM_FAULT_RETRY) {
351			flags |= FAULT_FLAG_TRIED;
352			goto retry;
353		}
354	}
355
356	mmap_read_unlock(mm);
357done:
358
359	/*
360	 * Handle the "normal" case first - VM_FAULT_MAJOR
361	 */
362	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
363		return 0;
364
365bad_area:
366	/*
367	 * If we are in kernel mode at this point, we
368	 * have no context to handle this fault with.
369	 */
370	if (!user_mode(regs))
371		goto no_context;
372
373	if (fault & VM_FAULT_OOM) {
374		/*
375		 * We ran out of memory, call the OOM killer, and return to
376		 * userspace (which will retry the fault, or kill us if we
377		 * got oom-killed)
378		 */
379		pagefault_out_of_memory();
380		return 0;
381	}
382
383	if (fault & VM_FAULT_SIGBUS) {
384		/*
385		 * We had some memory, but were unable to
386		 * successfully fix up this page fault.
387		 */
388		sig = SIGBUS;
389		code = BUS_ADRERR;
390	} else {
391		/*
392		 * Something tried to access memory that
393		 * isn't in our memory map..
394		 */
395		sig = SIGSEGV;
396		code = fault == VM_FAULT_BADACCESS ?
397			SEGV_ACCERR : SEGV_MAPERR;
398	}
399
400	__do_user_fault(addr, fsr, sig, code, regs);
401	return 0;
402
403no_context:
404	__do_kernel_fault(mm, addr, fsr, regs);
405	return 0;
406}
407#else					/* CONFIG_MMU */
408static int
409do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
410{
411	return 0;
412}
413#endif					/* CONFIG_MMU */
414
415/*
416 * First Level Translation Fault Handler
417 *
418 * We enter here because the first level page table doesn't contain
419 * a valid entry for the address.
420 *
421 * If the address is in kernel space (>= TASK_SIZE), then we are
422 * probably faulting in the vmalloc() area.
423 *
424 * If the init_task's first level page tables contains the relevant
425 * entry, we copy the it to this task.  If not, we send the process
426 * a signal, fixup the exception, or oops the kernel.
427 *
428 * NOTE! We MUST NOT take any locks for this case. We may be in an
429 * interrupt or a critical region, and should only copy the information
430 * from the master page table, nothing more.
431 */
432#ifdef CONFIG_MMU
433static int __kprobes
434do_translation_fault(unsigned long addr, unsigned int fsr,
435		     struct pt_regs *regs)
436{
437	unsigned int index;
438	pgd_t *pgd, *pgd_k;
439	p4d_t *p4d, *p4d_k;
440	pud_t *pud, *pud_k;
441	pmd_t *pmd, *pmd_k;
442
443	if (addr < TASK_SIZE)
444		return do_page_fault(addr, fsr, regs);
445
446	if (user_mode(regs))
447		goto bad_area;
448
449	index = pgd_index(addr);
450
451	pgd = cpu_get_pgd() + index;
452	pgd_k = init_mm.pgd + index;
453
454	p4d = p4d_offset(pgd, addr);
455	p4d_k = p4d_offset(pgd_k, addr);
456
457	if (p4d_none(*p4d_k))
458		goto bad_area;
459	if (!p4d_present(*p4d))
460		set_p4d(p4d, *p4d_k);
461
462	pud = pud_offset(p4d, addr);
463	pud_k = pud_offset(p4d_k, addr);
464
465	if (pud_none(*pud_k))
466		goto bad_area;
467	if (!pud_present(*pud))
468		set_pud(pud, *pud_k);
469
470	pmd = pmd_offset(pud, addr);
471	pmd_k = pmd_offset(pud_k, addr);
472
473#ifdef CONFIG_ARM_LPAE
474	/*
475	 * Only one hardware entry per PMD with LPAE.
476	 */
477	index = 0;
478#else
479	/*
480	 * On ARM one Linux PGD entry contains two hardware entries (see page
481	 * tables layout in pgtable.h). We normally guarantee that we always
482	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
483	 * It can create inidividual L1 entries, so here we have to call
484	 * pmd_none() check for the entry really corresponded to address, not
485	 * for the first of pair.
486	 */
487	index = (addr >> SECTION_SHIFT) & 1;
488#endif
489	if (pmd_none(pmd_k[index]))
490		goto bad_area;
491
492	copy_pmd(pmd, pmd_k);
493	return 0;
494
495bad_area:
496	do_bad_area(addr, fsr, regs);
497	return 0;
498}
499#else					/* CONFIG_MMU */
500static int
501do_translation_fault(unsigned long addr, unsigned int fsr,
502		     struct pt_regs *regs)
503{
504	return 0;
505}
506#endif					/* CONFIG_MMU */
507
508/*
509 * Some section permission faults need to be handled gracefully.
510 * They can happen due to a __{get,put}_user during an oops.
511 */
512#ifndef CONFIG_ARM_LPAE
513static int
514do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
515{
516	do_bad_area(addr, fsr, regs);
517	return 0;
518}
519#endif /* CONFIG_ARM_LPAE */
520
521/*
522 * This abort handler always returns "fault".
523 */
524static int
525do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
526{
527	return 1;
528}
529
530struct fsr_info {
531	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
532	int	sig;
533	int	code;
534	const char *name;
535};
536
537/* FSR definition */
538#ifdef CONFIG_ARM_LPAE
539#include "fsr-3level.c"
540#else
541#include "fsr-2level.c"
542#endif
543
544void __init
545hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
546		int sig, int code, const char *name)
547{
548	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
549		BUG();
550
551	fsr_info[nr].fn   = fn;
552	fsr_info[nr].sig  = sig;
553	fsr_info[nr].code = code;
554	fsr_info[nr].name = name;
555}
556
557/*
558 * Dispatch a data abort to the relevant handler.
559 */
560asmlinkage void
561do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
562{
563	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
564
565	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
566		return;
567
568	pr_alert("8<--- cut here ---\n");
569	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
570		inf->name, fsr, addr);
571	show_pte(KERN_ALERT, current->mm, addr);
572
573	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
574		       fsr, 0);
575}
576
577void __init
578hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
579		 int sig, int code, const char *name)
580{
581	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
582		BUG();
583
584	ifsr_info[nr].fn   = fn;
585	ifsr_info[nr].sig  = sig;
586	ifsr_info[nr].code = code;
587	ifsr_info[nr].name = name;
588}
589
590asmlinkage void
591do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
592{
593	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
594
595	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
596		return;
597
598	pr_alert("8<--- cut here ---\n");
599	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
600		inf->name, ifsr, addr);
601
602	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
603		       ifsr, 0);
604}
605
606/*
607 * Abort handler to be used only during first unmasking of asynchronous aborts
608 * on the boot CPU. This makes sure that the machine will not die if the
609 * firmware/bootloader left an imprecise abort pending for us to trip over.
610 */
611static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
612				      struct pt_regs *regs)
613{
614	pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
615		"first unmask, this is most likely caused by a "
616		"firmware/bootloader bug.\n", fsr);
617
618	return 0;
619}
620
621void __init early_abt_enable(void)
622{
623	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
624	local_abt_enable();
625	fsr_info[FSR_FS_AEA].fn = do_bad;
626}
627
628#ifndef CONFIG_ARM_LPAE
629static int __init exceptions_init(void)
630{
631	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
632		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
633				"I-cache maintenance fault");
634	}
635
636	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
637		/*
638		 * TODO: Access flag faults introduced in ARMv6K.
639		 * Runtime check for 'K' extension is needed
640		 */
641		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
642				"section access flag fault");
643		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
644				"section access flag fault");
645	}
646
647	return 0;
648}
649
650arch_initcall(exceptions_init);
651#endif