Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  linux/arch/arm/mm/fault.c
  3 *
  4 *  Copyright (C) 1995  Linus Torvalds
  5 *  Modifications for ARM processor (c) 1995-2004 Russell King
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <linux/module.h>
 12#include <linux/signal.h>
 13#include <linux/mm.h>
 14#include <linux/hardirq.h>
 15#include <linux/init.h>
 16#include <linux/kprobes.h>
 17#include <linux/uaccess.h>
 18#include <linux/page-flags.h>
 19#include <linux/sched.h>
 
 20#include <linux/highmem.h>
 21#include <linux/perf_event.h>
 22
 23#include <asm/exception.h>
 24#include <asm/pgtable.h>
 25#include <asm/system_misc.h>
 26#include <asm/system_info.h>
 27#include <asm/tlbflush.h>
 28
 29#include "fault.h"
 30
 31#ifdef CONFIG_MMU
 32
 33#ifdef CONFIG_KPROBES
 34static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
 35{
 36	int ret = 0;
 37
 38	if (!user_mode(regs)) {
 39		/* kprobe_running() needs smp_processor_id() */
 40		preempt_disable();
 41		if (kprobe_running() && kprobe_fault_handler(regs, fsr))
 42			ret = 1;
 43		preempt_enable();
 44	}
 45
 46	return ret;
 47}
 48#else
 49static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
 50{
 51	return 0;
 52}
 53#endif
 54
 55/*
 56 * This is useful to dump out the page tables associated with
 57 * 'addr' in mm 'mm'.
 58 */
 59void show_pte(struct mm_struct *mm, unsigned long addr)
 60{
 61	pgd_t *pgd;
 62
 63	if (!mm)
 64		mm = &init_mm;
 65
 66	printk(KERN_ALERT "pgd = %p\n", mm->pgd);
 67	pgd = pgd_offset(mm, addr);
 68	printk(KERN_ALERT "[%08lx] *pgd=%08llx",
 69			addr, (long long)pgd_val(*pgd));
 70
 71	do {
 72		pud_t *pud;
 73		pmd_t *pmd;
 74		pte_t *pte;
 75
 76		if (pgd_none(*pgd))
 77			break;
 78
 79		if (pgd_bad(*pgd)) {
 80			printk("(bad)");
 81			break;
 82		}
 83
 84		pud = pud_offset(pgd, addr);
 85		if (PTRS_PER_PUD != 1)
 86			printk(", *pud=%08llx", (long long)pud_val(*pud));
 87
 88		if (pud_none(*pud))
 89			break;
 90
 91		if (pud_bad(*pud)) {
 92			printk("(bad)");
 93			break;
 94		}
 95
 96		pmd = pmd_offset(pud, addr);
 97		if (PTRS_PER_PMD != 1)
 98			printk(", *pmd=%08llx", (long long)pmd_val(*pmd));
 99
100		if (pmd_none(*pmd))
101			break;
102
103		if (pmd_bad(*pmd)) {
104			printk("(bad)");
105			break;
106		}
107
108		/* We must not map this if we have highmem enabled */
109		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110			break;
111
112		pte = pte_offset_map(pmd, addr);
113		printk(", *pte=%08llx", (long long)pte_val(*pte));
114#ifndef CONFIG_ARM_LPAE
115		printk(", *ppte=%08llx",
116		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
117#endif
118		pte_unmap(pte);
119	} while(0);
120
121	printk("\n");
122}
123#else					/* CONFIG_MMU */
124void show_pte(struct mm_struct *mm, unsigned long addr)
125{ }
126#endif					/* CONFIG_MMU */
127
128/*
129 * Oops.  The kernel tried to access some page that wasn't present.
130 */
131static void
132__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
133		  struct pt_regs *regs)
134{
135	/*
136	 * Are we prepared to handle this kernel fault?
137	 */
138	if (fixup_exception(regs))
139		return;
140
141	/*
142	 * No handler, we'll have to terminate things with extreme prejudice.
143	 */
144	bust_spinlocks(1);
145	printk(KERN_ALERT
146		"Unable to handle kernel %s at virtual address %08lx\n",
147		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
148		"paging request", addr);
149
150	show_pte(mm, addr);
151	die("Oops", regs, fsr);
152	bust_spinlocks(0);
153	do_exit(SIGKILL);
154}
155
156/*
157 * Something tried to access memory that isn't in our memory map..
158 * User mode accesses just cause a SIGSEGV
159 */
160static void
161__do_user_fault(struct task_struct *tsk, unsigned long addr,
162		unsigned int fsr, unsigned int sig, int code,
163		struct pt_regs *regs)
164{
165	struct siginfo si;
 
 
 
166
167#ifdef CONFIG_DEBUG_USER
168	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
169	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
170		printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
 
171		       tsk->comm, sig, addr, fsr);
172		show_pte(tsk->mm, addr);
173		show_regs(regs);
174	}
175#endif
 
 
 
 
 
 
176
177	tsk->thread.address = addr;
178	tsk->thread.error_code = fsr;
179	tsk->thread.trap_no = 14;
180	si.si_signo = sig;
181	si.si_errno = 0;
182	si.si_code = code;
183	si.si_addr = (void __user *)addr;
184	force_sig_info(sig, &si, tsk);
185}
186
187void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
188{
189	struct task_struct *tsk = current;
190	struct mm_struct *mm = tsk->active_mm;
191
192	/*
193	 * If we are in kernel mode at this point, we
194	 * have no context to handle this fault with.
195	 */
196	if (user_mode(regs))
197		__do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
198	else
199		__do_kernel_fault(mm, addr, fsr, regs);
200}
201
202#ifdef CONFIG_MMU
203#define VM_FAULT_BADMAP		0x010000
204#define VM_FAULT_BADACCESS	0x020000
205
206/*
207 * Check that the permissions on the VMA allow for the fault which occurred.
208 * If we encountered a write fault, we must have write permission, otherwise
209 * we allow any permission.
210 */
211static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
212{
213	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
214
215	if (fsr & FSR_WRITE)
216		mask = VM_WRITE;
217	if (fsr & FSR_LNX_PF)
218		mask = VM_EXEC;
219
220	return vma->vm_flags & mask ? false : true;
221}
222
223static int __kprobes
224__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
225		unsigned int flags, struct task_struct *tsk)
226{
227	struct vm_area_struct *vma;
228	int fault;
229
230	vma = find_vma(mm, addr);
231	fault = VM_FAULT_BADMAP;
232	if (unlikely(!vma))
233		goto out;
234	if (unlikely(vma->vm_start > addr))
235		goto check_stack;
236
237	/*
238	 * Ok, we have a good vm_area for this
239	 * memory access, so we can handle it.
240	 */
241good_area:
242	if (access_error(fsr, vma)) {
243		fault = VM_FAULT_BADACCESS;
244		goto out;
245	}
246
247	return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
248
249check_stack:
250	/* Don't allow expansion below FIRST_USER_ADDRESS */
251	if (vma->vm_flags & VM_GROWSDOWN &&
252	    addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
253		goto good_area;
254out:
255	return fault;
256}
257
258static int __kprobes
259do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
260{
261	struct task_struct *tsk;
262	struct mm_struct *mm;
263	int fault, sig, code;
 
264	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
265
266	if (notify_page_fault(regs, fsr))
267		return 0;
268
269	tsk = current;
270	mm  = tsk->mm;
271
272	/* Enable interrupts if they were enabled in the parent context. */
273	if (interrupts_enabled(regs))
274		local_irq_enable();
275
276	/*
277	 * If we're in an interrupt or have no user
278	 * context, we must not take the fault..
279	 */
280	if (in_atomic() || !mm)
281		goto no_context;
282
283	if (user_mode(regs))
284		flags |= FAULT_FLAG_USER;
285	if (fsr & FSR_WRITE)
286		flags |= FAULT_FLAG_WRITE;
287
288	/*
289	 * As per x86, we may deadlock here.  However, since the kernel only
290	 * validly references user space from well defined areas of the code,
291	 * we can bug out early if this is from code which shouldn't.
292	 */
293	if (!down_read_trylock(&mm->mmap_sem)) {
294		if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
295			goto no_context;
296retry:
297		down_read(&mm->mmap_sem);
298	} else {
299		/*
300		 * The above down_read_trylock() might have succeeded in
301		 * which case, we'll have missed the might_sleep() from
302		 * down_read()
303		 */
304		might_sleep();
305#ifdef CONFIG_DEBUG_VM
306		if (!user_mode(regs) &&
307		    !search_exception_tables(regs->ARM_pc))
308			goto no_context;
309#endif
310	}
311
312	fault = __do_page_fault(mm, addr, fsr, flags, tsk);
313
314	/* If we need to retry but a fatal signal is pending, handle the
315	 * signal first. We do not need to release the mmap_sem because
316	 * it would already be released in __lock_page_or_retry in
317	 * mm/filemap.c. */
318	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 
 
319		return 0;
 
320
321	/*
322	 * Major/minor page fault accounting is only done on the
323	 * initial attempt. If we go through a retry, it is extremely
324	 * likely that the page will be found in page cache at that point.
325	 */
326
327	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
328	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
329		if (fault & VM_FAULT_MAJOR) {
330			tsk->maj_flt++;
331			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
332					regs, addr);
333		} else {
334			tsk->min_flt++;
335			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
336					regs, addr);
337		}
338		if (fault & VM_FAULT_RETRY) {
339			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
340			* of starvation. */
341			flags &= ~FAULT_FLAG_ALLOW_RETRY;
342			flags |= FAULT_FLAG_TRIED;
343			goto retry;
344		}
345	}
346
347	up_read(&mm->mmap_sem);
348
349	/*
350	 * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
351	 */
352	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
353		return 0;
354
355	/*
356	 * If we are in kernel mode at this point, we
357	 * have no context to handle this fault with.
358	 */
359	if (!user_mode(regs))
360		goto no_context;
361
362	if (fault & VM_FAULT_OOM) {
363		/*
364		 * We ran out of memory, call the OOM killer, and return to
365		 * userspace (which will retry the fault, or kill us if we
366		 * got oom-killed)
367		 */
368		pagefault_out_of_memory();
369		return 0;
370	}
371
372	if (fault & VM_FAULT_SIGBUS) {
373		/*
374		 * We had some memory, but were unable to
375		 * successfully fix up this page fault.
376		 */
377		sig = SIGBUS;
378		code = BUS_ADRERR;
379	} else {
380		/*
381		 * Something tried to access memory that
382		 * isn't in our memory map..
383		 */
384		sig = SIGSEGV;
385		code = fault == VM_FAULT_BADACCESS ?
386			SEGV_ACCERR : SEGV_MAPERR;
387	}
388
389	__do_user_fault(tsk, addr, fsr, sig, code, regs);
390	return 0;
391
392no_context:
393	__do_kernel_fault(mm, addr, fsr, regs);
394	return 0;
395}
396#else					/* CONFIG_MMU */
397static int
398do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
399{
400	return 0;
401}
402#endif					/* CONFIG_MMU */
403
404/*
405 * First Level Translation Fault Handler
406 *
407 * We enter here because the first level page table doesn't contain
408 * a valid entry for the address.
409 *
410 * If the address is in kernel space (>= TASK_SIZE), then we are
411 * probably faulting in the vmalloc() area.
412 *
413 * If the init_task's first level page tables contains the relevant
414 * entry, we copy the it to this task.  If not, we send the process
415 * a signal, fixup the exception, or oops the kernel.
416 *
417 * NOTE! We MUST NOT take any locks for this case. We may be in an
418 * interrupt or a critical region, and should only copy the information
419 * from the master page table, nothing more.
420 */
421#ifdef CONFIG_MMU
422static int __kprobes
423do_translation_fault(unsigned long addr, unsigned int fsr,
424		     struct pt_regs *regs)
425{
426	unsigned int index;
427	pgd_t *pgd, *pgd_k;
428	pud_t *pud, *pud_k;
429	pmd_t *pmd, *pmd_k;
430
431	if (addr < TASK_SIZE)
432		return do_page_fault(addr, fsr, regs);
433
434	if (user_mode(regs))
435		goto bad_area;
436
437	index = pgd_index(addr);
438
439	pgd = cpu_get_pgd() + index;
440	pgd_k = init_mm.pgd + index;
441
442	if (pgd_none(*pgd_k))
443		goto bad_area;
444	if (!pgd_present(*pgd))
445		set_pgd(pgd, *pgd_k);
446
447	pud = pud_offset(pgd, addr);
448	pud_k = pud_offset(pgd_k, addr);
449
450	if (pud_none(*pud_k))
451		goto bad_area;
452	if (!pud_present(*pud))
453		set_pud(pud, *pud_k);
454
455	pmd = pmd_offset(pud, addr);
456	pmd_k = pmd_offset(pud_k, addr);
457
458#ifdef CONFIG_ARM_LPAE
459	/*
460	 * Only one hardware entry per PMD with LPAE.
461	 */
462	index = 0;
463#else
464	/*
465	 * On ARM one Linux PGD entry contains two hardware entries (see page
466	 * tables layout in pgtable.h). We normally guarantee that we always
467	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
468	 * It can create inidividual L1 entries, so here we have to call
469	 * pmd_none() check for the entry really corresponded to address, not
470	 * for the first of pair.
471	 */
472	index = (addr >> SECTION_SHIFT) & 1;
473#endif
474	if (pmd_none(pmd_k[index]))
475		goto bad_area;
476
477	copy_pmd(pmd, pmd_k);
478	return 0;
479
480bad_area:
481	do_bad_area(addr, fsr, regs);
482	return 0;
483}
484#else					/* CONFIG_MMU */
485static int
486do_translation_fault(unsigned long addr, unsigned int fsr,
487		     struct pt_regs *regs)
488{
489	return 0;
490}
491#endif					/* CONFIG_MMU */
492
493/*
494 * Some section permission faults need to be handled gracefully.
495 * They can happen due to a __{get,put}_user during an oops.
496 */
497#ifndef CONFIG_ARM_LPAE
498static int
499do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
500{
501	do_bad_area(addr, fsr, regs);
502	return 0;
503}
504#endif /* CONFIG_ARM_LPAE */
505
506/*
507 * This abort handler always returns "fault".
508 */
509static int
510do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
511{
512	return 1;
513}
514
515struct fsr_info {
516	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
517	int	sig;
518	int	code;
519	const char *name;
520};
521
522/* FSR definition */
523#ifdef CONFIG_ARM_LPAE
524#include "fsr-3level.c"
525#else
526#include "fsr-2level.c"
527#endif
528
529void __init
530hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
531		int sig, int code, const char *name)
532{
533	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
534		BUG();
535
536	fsr_info[nr].fn   = fn;
537	fsr_info[nr].sig  = sig;
538	fsr_info[nr].code = code;
539	fsr_info[nr].name = name;
540}
541
542/*
543 * Dispatch a data abort to the relevant handler.
544 */
545asmlinkage void __exception
546do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
547{
548	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
549	struct siginfo info;
550
551	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
552		return;
553
554	printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 
555		inf->name, fsr, addr);
 
556
557	info.si_signo = inf->sig;
558	info.si_errno = 0;
559	info.si_code  = inf->code;
560	info.si_addr  = (void __user *)addr;
561	arm_notify_die("", regs, &info, fsr, 0);
562}
563
564void __init
565hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
566		 int sig, int code, const char *name)
567{
568	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
569		BUG();
570
571	ifsr_info[nr].fn   = fn;
572	ifsr_info[nr].sig  = sig;
573	ifsr_info[nr].code = code;
574	ifsr_info[nr].name = name;
575}
576
577asmlinkage void __exception
578do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
579{
580	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
581	struct siginfo info;
582
583	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
584		return;
585
586	printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
587		inf->name, ifsr, addr);
588
589	info.si_signo = inf->sig;
590	info.si_errno = 0;
591	info.si_code  = inf->code;
592	info.si_addr  = (void __user *)addr;
593	arm_notify_die("", regs, &info, ifsr, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594}
595
596#ifndef CONFIG_ARM_LPAE
597static int __init exceptions_init(void)
598{
599	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
600		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
601				"I-cache maintenance fault");
602	}
603
604	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
605		/*
606		 * TODO: Access flag faults introduced in ARMv6K.
607		 * Runtime check for 'K' extension is needed
608		 */
609		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
610				"section access flag fault");
611		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
612				"section access flag fault");
613	}
614
615	return 0;
616}
617
618arch_initcall(exceptions_init);
619#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/fault.c
  4 *
  5 *  Copyright (C) 1995  Linus Torvalds
  6 *  Modifications for ARM processor (c) 1995-2004 Russell King
 
 
 
 
  7 */
  8#include <linux/extable.h>
  9#include <linux/signal.h>
 10#include <linux/mm.h>
 11#include <linux/hardirq.h>
 12#include <linux/init.h>
 13#include <linux/kprobes.h>
 14#include <linux/uaccess.h>
 15#include <linux/page-flags.h>
 16#include <linux/sched/signal.h>
 17#include <linux/sched/debug.h>
 18#include <linux/highmem.h>
 19#include <linux/perf_event.h>
 20
 
 21#include <asm/pgtable.h>
 22#include <asm/system_misc.h>
 23#include <asm/system_info.h>
 24#include <asm/tlbflush.h>
 25
 26#include "fault.h"
 27
 28#ifdef CONFIG_MMU
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30/*
 31 * This is useful to dump out the page tables associated with
 32 * 'addr' in mm 'mm'.
 33 */
 34void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 35{
 36	pgd_t *pgd;
 37
 38	if (!mm)
 39		mm = &init_mm;
 40
 41	printk("%spgd = %p\n", lvl, mm->pgd);
 42	pgd = pgd_offset(mm, addr);
 43	printk("%s[%08lx] *pgd=%08llx", lvl, addr, (long long)pgd_val(*pgd));
 
 44
 45	do {
 46		pud_t *pud;
 47		pmd_t *pmd;
 48		pte_t *pte;
 49
 50		if (pgd_none(*pgd))
 51			break;
 52
 53		if (pgd_bad(*pgd)) {
 54			pr_cont("(bad)");
 55			break;
 56		}
 57
 58		pud = pud_offset(pgd, addr);
 59		if (PTRS_PER_PUD != 1)
 60			pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
 61
 62		if (pud_none(*pud))
 63			break;
 64
 65		if (pud_bad(*pud)) {
 66			pr_cont("(bad)");
 67			break;
 68		}
 69
 70		pmd = pmd_offset(pud, addr);
 71		if (PTRS_PER_PMD != 1)
 72			pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
 73
 74		if (pmd_none(*pmd))
 75			break;
 76
 77		if (pmd_bad(*pmd)) {
 78			pr_cont("(bad)");
 79			break;
 80		}
 81
 82		/* We must not map this if we have highmem enabled */
 83		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 84			break;
 85
 86		pte = pte_offset_map(pmd, addr);
 87		pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
 88#ifndef CONFIG_ARM_LPAE
 89		pr_cont(", *ppte=%08llx",
 90		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
 91#endif
 92		pte_unmap(pte);
 93	} while(0);
 94
 95	pr_cont("\n");
 96}
 97#else					/* CONFIG_MMU */
 98void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 99{ }
100#endif					/* CONFIG_MMU */
101
102/*
103 * Oops.  The kernel tried to access some page that wasn't present.
104 */
105static void
106__do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
107		  struct pt_regs *regs)
108{
109	/*
110	 * Are we prepared to handle this kernel fault?
111	 */
112	if (fixup_exception(regs))
113		return;
114
115	/*
116	 * No handler, we'll have to terminate things with extreme prejudice.
117	 */
118	bust_spinlocks(1);
119	pr_alert("8<--- cut here ---\n");
120	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
121		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
122		 "paging request", addr);
123
124	show_pte(KERN_ALERT, mm, addr);
125	die("Oops", regs, fsr);
126	bust_spinlocks(0);
127	do_exit(SIGKILL);
128}
129
130/*
131 * Something tried to access memory that isn't in our memory map..
132 * User mode accesses just cause a SIGSEGV
133 */
134static void
135__do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig,
136		int code, struct pt_regs *regs)
 
137{
138	struct task_struct *tsk = current;
139
140	if (addr > TASK_SIZE)
141		harden_branch_predictor();
142
143#ifdef CONFIG_DEBUG_USER
144	if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
145	    ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
146		pr_err("8<--- cut here ---\n");
147		pr_err("%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
148		       tsk->comm, sig, addr, fsr);
149		show_pte(KERN_ERR, tsk->mm, addr);
150		show_regs(regs);
151	}
152#endif
153#ifndef CONFIG_KUSER_HELPERS
154	if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000))
155		printk_ratelimited(KERN_DEBUG
156				   "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n",
157				   tsk->comm, addr);
158#endif
159
160	tsk->thread.address = addr;
161	tsk->thread.error_code = fsr;
162	tsk->thread.trap_no = 14;
163	force_sig_fault(sig, code, (void __user *)addr);
 
 
 
 
164}
165
166void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
167{
168	struct task_struct *tsk = current;
169	struct mm_struct *mm = tsk->active_mm;
170
171	/*
172	 * If we are in kernel mode at this point, we
173	 * have no context to handle this fault with.
174	 */
175	if (user_mode(regs))
176		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
177	else
178		__do_kernel_fault(mm, addr, fsr, regs);
179}
180
181#ifdef CONFIG_MMU
182#define VM_FAULT_BADMAP		0x010000
183#define VM_FAULT_BADACCESS	0x020000
184
185/*
186 * Check that the permissions on the VMA allow for the fault which occurred.
187 * If we encountered a write fault, we must have write permission, otherwise
188 * we allow any permission.
189 */
190static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
191{
192	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
193
194	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
195		mask = VM_WRITE;
196	if (fsr & FSR_LNX_PF)
197		mask = VM_EXEC;
198
199	return vma->vm_flags & mask ? false : true;
200}
201
202static vm_fault_t __kprobes
203__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
204		unsigned int flags, struct task_struct *tsk)
205{
206	struct vm_area_struct *vma;
207	vm_fault_t fault;
208
209	vma = find_vma(mm, addr);
210	fault = VM_FAULT_BADMAP;
211	if (unlikely(!vma))
212		goto out;
213	if (unlikely(vma->vm_start > addr))
214		goto check_stack;
215
216	/*
217	 * Ok, we have a good vm_area for this
218	 * memory access, so we can handle it.
219	 */
220good_area:
221	if (access_error(fsr, vma)) {
222		fault = VM_FAULT_BADACCESS;
223		goto out;
224	}
225
226	return handle_mm_fault(vma, addr & PAGE_MASK, flags);
227
228check_stack:
229	/* Don't allow expansion below FIRST_USER_ADDRESS */
230	if (vma->vm_flags & VM_GROWSDOWN &&
231	    addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
232		goto good_area;
233out:
234	return fault;
235}
236
237static int __kprobes
238do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
239{
240	struct task_struct *tsk;
241	struct mm_struct *mm;
242	int sig, code;
243	vm_fault_t fault;
244	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
245
246	if (kprobe_page_fault(regs, fsr))
247		return 0;
248
249	tsk = current;
250	mm  = tsk->mm;
251
252	/* Enable interrupts if they were enabled in the parent context. */
253	if (interrupts_enabled(regs))
254		local_irq_enable();
255
256	/*
257	 * If we're in an interrupt or have no user
258	 * context, we must not take the fault..
259	 */
260	if (faulthandler_disabled() || !mm)
261		goto no_context;
262
263	if (user_mode(regs))
264		flags |= FAULT_FLAG_USER;
265	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
266		flags |= FAULT_FLAG_WRITE;
267
268	/*
269	 * As per x86, we may deadlock here.  However, since the kernel only
270	 * validly references user space from well defined areas of the code,
271	 * we can bug out early if this is from code which shouldn't.
272	 */
273	if (!down_read_trylock(&mm->mmap_sem)) {
274		if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
275			goto no_context;
276retry:
277		down_read(&mm->mmap_sem);
278	} else {
279		/*
280		 * The above down_read_trylock() might have succeeded in
281		 * which case, we'll have missed the might_sleep() from
282		 * down_read()
283		 */
284		might_sleep();
285#ifdef CONFIG_DEBUG_VM
286		if (!user_mode(regs) &&
287		    !search_exception_tables(regs->ARM_pc))
288			goto no_context;
289#endif
290	}
291
292	fault = __do_page_fault(mm, addr, fsr, flags, tsk);
293
294	/* If we need to retry but a fatal signal is pending, handle the
295	 * signal first. We do not need to release the mmap_sem because
296	 * it would already be released in __lock_page_or_retry in
297	 * mm/filemap.c. */
298	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
299		if (!user_mode(regs))
300			goto no_context;
301		return 0;
302	}
303
304	/*
305	 * Major/minor page fault accounting is only done on the
306	 * initial attempt. If we go through a retry, it is extremely
307	 * likely that the page will be found in page cache at that point.
308	 */
309
310	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
311	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
312		if (fault & VM_FAULT_MAJOR) {
313			tsk->maj_flt++;
314			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
315					regs, addr);
316		} else {
317			tsk->min_flt++;
318			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
319					regs, addr);
320		}
321		if (fault & VM_FAULT_RETRY) {
322			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
323			* of starvation. */
324			flags &= ~FAULT_FLAG_ALLOW_RETRY;
325			flags |= FAULT_FLAG_TRIED;
326			goto retry;
327		}
328	}
329
330	up_read(&mm->mmap_sem);
331
332	/*
333	 * Handle the "normal" case first - VM_FAULT_MAJOR
334	 */
335	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
336		return 0;
337
338	/*
339	 * If we are in kernel mode at this point, we
340	 * have no context to handle this fault with.
341	 */
342	if (!user_mode(regs))
343		goto no_context;
344
345	if (fault & VM_FAULT_OOM) {
346		/*
347		 * We ran out of memory, call the OOM killer, and return to
348		 * userspace (which will retry the fault, or kill us if we
349		 * got oom-killed)
350		 */
351		pagefault_out_of_memory();
352		return 0;
353	}
354
355	if (fault & VM_FAULT_SIGBUS) {
356		/*
357		 * We had some memory, but were unable to
358		 * successfully fix up this page fault.
359		 */
360		sig = SIGBUS;
361		code = BUS_ADRERR;
362	} else {
363		/*
364		 * Something tried to access memory that
365		 * isn't in our memory map..
366		 */
367		sig = SIGSEGV;
368		code = fault == VM_FAULT_BADACCESS ?
369			SEGV_ACCERR : SEGV_MAPERR;
370	}
371
372	__do_user_fault(addr, fsr, sig, code, regs);
373	return 0;
374
375no_context:
376	__do_kernel_fault(mm, addr, fsr, regs);
377	return 0;
378}
379#else					/* CONFIG_MMU */
380static int
381do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
382{
383	return 0;
384}
385#endif					/* CONFIG_MMU */
386
387/*
388 * First Level Translation Fault Handler
389 *
390 * We enter here because the first level page table doesn't contain
391 * a valid entry for the address.
392 *
393 * If the address is in kernel space (>= TASK_SIZE), then we are
394 * probably faulting in the vmalloc() area.
395 *
396 * If the init_task's first level page tables contains the relevant
397 * entry, we copy the it to this task.  If not, we send the process
398 * a signal, fixup the exception, or oops the kernel.
399 *
400 * NOTE! We MUST NOT take any locks for this case. We may be in an
401 * interrupt or a critical region, and should only copy the information
402 * from the master page table, nothing more.
403 */
404#ifdef CONFIG_MMU
405static int __kprobes
406do_translation_fault(unsigned long addr, unsigned int fsr,
407		     struct pt_regs *regs)
408{
409	unsigned int index;
410	pgd_t *pgd, *pgd_k;
411	pud_t *pud, *pud_k;
412	pmd_t *pmd, *pmd_k;
413
414	if (addr < TASK_SIZE)
415		return do_page_fault(addr, fsr, regs);
416
417	if (user_mode(regs))
418		goto bad_area;
419
420	index = pgd_index(addr);
421
422	pgd = cpu_get_pgd() + index;
423	pgd_k = init_mm.pgd + index;
424
425	if (pgd_none(*pgd_k))
426		goto bad_area;
427	if (!pgd_present(*pgd))
428		set_pgd(pgd, *pgd_k);
429
430	pud = pud_offset(pgd, addr);
431	pud_k = pud_offset(pgd_k, addr);
432
433	if (pud_none(*pud_k))
434		goto bad_area;
435	if (!pud_present(*pud))
436		set_pud(pud, *pud_k);
437
438	pmd = pmd_offset(pud, addr);
439	pmd_k = pmd_offset(pud_k, addr);
440
441#ifdef CONFIG_ARM_LPAE
442	/*
443	 * Only one hardware entry per PMD with LPAE.
444	 */
445	index = 0;
446#else
447	/*
448	 * On ARM one Linux PGD entry contains two hardware entries (see page
449	 * tables layout in pgtable.h). We normally guarantee that we always
450	 * fill both L1 entries. But create_mapping() doesn't follow the rule.
451	 * It can create inidividual L1 entries, so here we have to call
452	 * pmd_none() check for the entry really corresponded to address, not
453	 * for the first of pair.
454	 */
455	index = (addr >> SECTION_SHIFT) & 1;
456#endif
457	if (pmd_none(pmd_k[index]))
458		goto bad_area;
459
460	copy_pmd(pmd, pmd_k);
461	return 0;
462
463bad_area:
464	do_bad_area(addr, fsr, regs);
465	return 0;
466}
467#else					/* CONFIG_MMU */
468static int
469do_translation_fault(unsigned long addr, unsigned int fsr,
470		     struct pt_regs *regs)
471{
472	return 0;
473}
474#endif					/* CONFIG_MMU */
475
476/*
477 * Some section permission faults need to be handled gracefully.
478 * They can happen due to a __{get,put}_user during an oops.
479 */
480#ifndef CONFIG_ARM_LPAE
481static int
482do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
483{
484	do_bad_area(addr, fsr, regs);
485	return 0;
486}
487#endif /* CONFIG_ARM_LPAE */
488
489/*
490 * This abort handler always returns "fault".
491 */
492static int
493do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
494{
495	return 1;
496}
497
498struct fsr_info {
499	int	(*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
500	int	sig;
501	int	code;
502	const char *name;
503};
504
505/* FSR definition */
506#ifdef CONFIG_ARM_LPAE
507#include "fsr-3level.c"
508#else
509#include "fsr-2level.c"
510#endif
511
512void __init
513hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
514		int sig, int code, const char *name)
515{
516	if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
517		BUG();
518
519	fsr_info[nr].fn   = fn;
520	fsr_info[nr].sig  = sig;
521	fsr_info[nr].code = code;
522	fsr_info[nr].name = name;
523}
524
525/*
526 * Dispatch a data abort to the relevant handler.
527 */
528asmlinkage void
529do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
530{
531	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
 
532
533	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
534		return;
535
536	pr_alert("8<--- cut here ---\n");
537	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
538		inf->name, fsr, addr);
539	show_pte(KERN_ALERT, current->mm, addr);
540
541	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
542		       fsr, 0);
 
 
 
543}
544
545void __init
546hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
547		 int sig, int code, const char *name)
548{
549	if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
550		BUG();
551
552	ifsr_info[nr].fn   = fn;
553	ifsr_info[nr].sig  = sig;
554	ifsr_info[nr].code = code;
555	ifsr_info[nr].name = name;
556}
557
558asmlinkage void
559do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
560{
561	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
 
562
563	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
564		return;
565
566	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
567		inf->name, ifsr, addr);
568
569	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
570		       ifsr, 0);
571}
572
573/*
574 * Abort handler to be used only during first unmasking of asynchronous aborts
575 * on the boot CPU. This makes sure that the machine will not die if the
576 * firmware/bootloader left an imprecise abort pending for us to trip over.
577 */
578static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
579				      struct pt_regs *regs)
580{
581	pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
582		"first unmask, this is most likely caused by a "
583		"firmware/bootloader bug.\n", fsr);
584
585	return 0;
586}
587
588void __init early_abt_enable(void)
589{
590	fsr_info[FSR_FS_AEA].fn = early_abort_handler;
591	local_abt_enable();
592	fsr_info[FSR_FS_AEA].fn = do_bad;
593}
594
595#ifndef CONFIG_ARM_LPAE
596static int __init exceptions_init(void)
597{
598	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
599		hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
600				"I-cache maintenance fault");
601	}
602
603	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
604		/*
605		 * TODO: Access flag faults introduced in ARMv6K.
606		 * Runtime check for 'K' extension is needed
607		 */
608		hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
609				"section access flag fault");
610		hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
611				"section access flag fault");
612	}
613
614	return 0;
615}
616
617arch_initcall(exceptions_init);
618#endif