Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Page fault handler for SH with an MMU.
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2003 - 2012  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/fault.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License.  See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/sched/signal.h>
 17#include <linux/hardirq.h>
 18#include <linux/kprobes.h>
 19#include <linux/perf_event.h>
 20#include <linux/kdebug.h>
 21#include <linux/uaccess.h>
 22#include <asm/io_trapped.h>
 23#include <asm/mmu_context.h>
 24#include <asm/tlbflush.h>
 25#include <asm/traps.h>
 26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27static void
 28force_sig_info_fault(int si_signo, int si_code, unsigned long address)
 
 29{
 30	force_sig_fault(si_signo, si_code, (void __user *)address);
 
 
 
 
 
 
 
 31}
 32
 33/*
 34 * This is useful to dump out the page tables associated with
 35 * 'addr' in mm 'mm'.
 36 */
 37static void show_pte(struct mm_struct *mm, unsigned long addr)
 38{
 39	pgd_t *pgd;
 40
 41	if (mm) {
 42		pgd = mm->pgd;
 43	} else {
 44		pgd = get_TTB();
 45
 46		if (unlikely(!pgd))
 47			pgd = swapper_pg_dir;
 48	}
 49
 50	printk(KERN_ALERT "pgd = %p\n", pgd);
 51	pgd += pgd_index(addr);
 52	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
 53	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
 54
 55	do {
 56		pud_t *pud;
 57		pmd_t *pmd;
 58		pte_t *pte;
 59
 60		if (pgd_none(*pgd))
 61			break;
 62
 63		if (pgd_bad(*pgd)) {
 64			printk("(bad)");
 65			break;
 66		}
 67
 68		pud = pud_offset(pgd, addr);
 69		if (PTRS_PER_PUD != 1)
 70			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
 71			       (u64)pud_val(*pud));
 72
 73		if (pud_none(*pud))
 74			break;
 75
 76		if (pud_bad(*pud)) {
 77			printk("(bad)");
 78			break;
 79		}
 80
 81		pmd = pmd_offset(pud, addr);
 82		if (PTRS_PER_PMD != 1)
 83			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
 84			       (u64)pmd_val(*pmd));
 85
 86		if (pmd_none(*pmd))
 87			break;
 88
 89		if (pmd_bad(*pmd)) {
 90			printk("(bad)");
 91			break;
 92		}
 93
 94		/* We must not map this if we have highmem enabled */
 95		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 96			break;
 97
 98		pte = pte_offset_kernel(pmd, addr);
 99		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
100		       (u64)pte_val(*pte));
101	} while (0);
102
103	printk("\n");
104}
105
106static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
107{
108	unsigned index = pgd_index(address);
109	pgd_t *pgd_k;
110	pud_t *pud, *pud_k;
111	pmd_t *pmd, *pmd_k;
112
113	pgd += index;
114	pgd_k = init_mm.pgd + index;
115
116	if (!pgd_present(*pgd_k))
117		return NULL;
118
119	pud = pud_offset(pgd, address);
120	pud_k = pud_offset(pgd_k, address);
121	if (!pud_present(*pud_k))
122		return NULL;
123
124	if (!pud_present(*pud))
125	    set_pud(pud, *pud_k);
126
127	pmd = pmd_offset(pud, address);
128	pmd_k = pmd_offset(pud_k, address);
129	if (!pmd_present(*pmd_k))
130		return NULL;
131
132	if (!pmd_present(*pmd))
133		set_pmd(pmd, *pmd_k);
134	else {
135		/*
136		 * The page tables are fully synchronised so there must
137		 * be another reason for the fault. Return NULL here to
138		 * signal that we have not taken care of the fault.
139		 */
140		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
141		return NULL;
142	}
143
144	return pmd_k;
145}
146
147#ifdef CONFIG_SH_STORE_QUEUES
148#define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
149#else
150#define __FAULT_ADDR_LIMIT	VMALLOC_END
151#endif
152
153/*
154 * Handle a fault on the vmalloc or module mapping area
155 */
156static noinline int vmalloc_fault(unsigned long address)
157{
158	pgd_t *pgd_k;
159	pmd_t *pmd_k;
160	pte_t *pte_k;
161
162	/* Make sure we are in vmalloc/module/P3 area: */
163	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
164		return -1;
165
166	/*
167	 * Synchronize this task's top level page-table
168	 * with the 'reference' page table.
169	 *
170	 * Do _not_ use "current" here. We might be inside
171	 * an interrupt in the middle of a task switch..
172	 */
173	pgd_k = get_TTB();
174	pmd_k = vmalloc_sync_one(pgd_k, address);
175	if (!pmd_k)
176		return -1;
177
178	pte_k = pte_offset_kernel(pmd_k, address);
179	if (!pte_present(*pte_k))
180		return -1;
181
182	return 0;
183}
184
185static void
186show_fault_oops(struct pt_regs *regs, unsigned long address)
187{
188	if (!oops_may_print())
189		return;
190
191	printk(KERN_ALERT "BUG: unable to handle kernel ");
192	if (address < PAGE_SIZE)
193		printk(KERN_CONT "NULL pointer dereference");
194	else
195		printk(KERN_CONT "paging request");
196
197	printk(KERN_CONT " at %08lx\n", address);
198	printk(KERN_ALERT "PC:");
199	printk_address(regs->pc, 1);
200
201	show_pte(NULL, address);
202}
203
204static noinline void
205no_context(struct pt_regs *regs, unsigned long error_code,
206	   unsigned long address)
207{
208	/* Are we prepared to handle this kernel fault?  */
209	if (fixup_exception(regs))
210		return;
211
212	if (handle_trapped_io(regs, address))
213		return;
214
215	/*
216	 * Oops. The kernel tried to access some bad page. We'll have to
217	 * terminate things with extreme prejudice.
218	 */
219	bust_spinlocks(1);
220
221	show_fault_oops(regs, address);
222
223	die("Oops", regs, error_code);
224	bust_spinlocks(0);
225	do_exit(SIGKILL);
226}
227
228static void
229__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
230		       unsigned long address, int si_code)
231{
 
 
232	/* User mode accesses just cause a SIGSEGV */
233	if (user_mode(regs)) {
234		/*
235		 * It's possible to have interrupts off here:
236		 */
237		local_irq_enable();
238
239		force_sig_info_fault(SIGSEGV, si_code, address);
240
241		return;
242	}
243
244	no_context(regs, error_code, address);
245}
246
247static noinline void
248bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
249		     unsigned long address)
250{
251	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
252}
253
254static void
255__bad_area(struct pt_regs *regs, unsigned long error_code,
256	   unsigned long address, int si_code)
257{
258	struct mm_struct *mm = current->mm;
259
260	/*
261	 * Something tried to access memory that isn't in our memory map..
262	 * Fix it, but check if it's kernel or user first..
263	 */
264	up_read(&mm->mmap_sem);
265
266	__bad_area_nosemaphore(regs, error_code, address, si_code);
267}
268
269static noinline void
270bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
271{
272	__bad_area(regs, error_code, address, SEGV_MAPERR);
273}
274
275static noinline void
276bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
277		      unsigned long address)
278{
279	__bad_area(regs, error_code, address, SEGV_ACCERR);
280}
281
282static void
283do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
284{
285	struct task_struct *tsk = current;
286	struct mm_struct *mm = tsk->mm;
287
288	up_read(&mm->mmap_sem);
289
290	/* Kernel mode? Handle exceptions or die: */
291	if (!user_mode(regs))
292		no_context(regs, error_code, address);
293
294	force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
295}
296
297static noinline int
298mm_fault_error(struct pt_regs *regs, unsigned long error_code,
299	       unsigned long address, vm_fault_t fault)
300{
301	/*
302	 * Pagefault was interrupted by SIGKILL. We have no reason to
303	 * continue pagefault.
304	 */
305	if (fatal_signal_pending(current)) {
306		if (!(fault & VM_FAULT_RETRY))
307			up_read(&current->mm->mmap_sem);
308		if (!user_mode(regs))
309			no_context(regs, error_code, address);
310		return 1;
311	}
312
313	if (!(fault & VM_FAULT_ERROR))
314		return 0;
315
316	if (fault & VM_FAULT_OOM) {
317		/* Kernel mode? Handle exceptions or die: */
318		if (!user_mode(regs)) {
319			up_read(&current->mm->mmap_sem);
320			no_context(regs, error_code, address);
321			return 1;
322		}
323		up_read(&current->mm->mmap_sem);
324
325		/*
326		 * We ran out of memory, call the OOM killer, and return the
327		 * userspace (which will retry the fault, or kill us if we got
328		 * oom-killed):
329		 */
330		pagefault_out_of_memory();
331	} else {
332		if (fault & VM_FAULT_SIGBUS)
333			do_sigbus(regs, error_code, address);
334		else if (fault & VM_FAULT_SIGSEGV)
335			bad_area(regs, error_code, address);
336		else
337			BUG();
338	}
339
340	return 1;
341}
342
343static inline int access_error(int error_code, struct vm_area_struct *vma)
344{
345	if (error_code & FAULT_CODE_WRITE) {
346		/* write, present and write, not present: */
347		if (unlikely(!(vma->vm_flags & VM_WRITE)))
348			return 1;
349		return 0;
350	}
351
352	/* ITLB miss on NX page */
353	if (unlikely((error_code & FAULT_CODE_ITLB) &&
354		     !(vma->vm_flags & VM_EXEC)))
355		return 1;
356
357	/* read, not present: */
358	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
359		return 1;
360
361	return 0;
362}
363
364static int fault_in_kernel_space(unsigned long address)
365{
366	return address >= TASK_SIZE;
367}
368
369/*
370 * This routine handles page faults.  It determines the address,
371 * and the problem, and then passes it off to one of the appropriate
372 * routines.
373 */
374asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
375					unsigned long error_code,
376					unsigned long address)
377{
378	unsigned long vec;
379	struct task_struct *tsk;
380	struct mm_struct *mm;
381	struct vm_area_struct * vma;
382	vm_fault_t fault;
383	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
384
385	tsk = current;
386	mm = tsk->mm;
387	vec = lookup_exception_vector();
388
389	/*
390	 * We fault-in kernel-space virtual memory on-demand. The
391	 * 'reference' page table is init_mm.pgd.
392	 *
393	 * NOTE! We MUST NOT take any locks for this case. We may
394	 * be in an interrupt or a critical region, and should
395	 * only copy the information from the master page table,
396	 * nothing more.
397	 */
398	if (unlikely(fault_in_kernel_space(address))) {
399		if (vmalloc_fault(address) >= 0)
400			return;
401		if (kprobe_page_fault(regs, vec))
402			return;
403
404		bad_area_nosemaphore(regs, error_code, address);
405		return;
406	}
407
408	if (unlikely(kprobe_page_fault(regs, vec)))
409		return;
410
411	/* Only enable interrupts if they were on before the fault */
412	if ((regs->sr & SR_IMASK) != SR_IMASK)
413		local_irq_enable();
414
415	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
416
417	/*
418	 * If we're in an interrupt, have no user context or are running
419	 * with pagefaults disabled then we must not take the fault:
420	 */
421	if (unlikely(faulthandler_disabled() || !mm)) {
422		bad_area_nosemaphore(regs, error_code, address);
423		return;
424	}
425
426retry:
427	down_read(&mm->mmap_sem);
428
429	vma = find_vma(mm, address);
430	if (unlikely(!vma)) {
431		bad_area(regs, error_code, address);
432		return;
433	}
434	if (likely(vma->vm_start <= address))
435		goto good_area;
436	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
437		bad_area(regs, error_code, address);
438		return;
439	}
440	if (unlikely(expand_stack(vma, address))) {
441		bad_area(regs, error_code, address);
442		return;
443	}
444
445	/*
446	 * Ok, we have a good vm_area for this memory access, so
447	 * we can handle it..
448	 */
449good_area:
450	if (unlikely(access_error(error_code, vma))) {
451		bad_area_access_error(regs, error_code, address);
452		return;
453	}
454
455	set_thread_fault_code(error_code);
456
457	if (user_mode(regs))
458		flags |= FAULT_FLAG_USER;
459	if (error_code & FAULT_CODE_WRITE)
460		flags |= FAULT_FLAG_WRITE;
461
462	/*
463	 * If for any reason at all we couldn't handle the fault,
464	 * make sure we exit gracefully rather than endlessly redo
465	 * the fault.
466	 */
467	fault = handle_mm_fault(vma, address, flags);
468
469	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
470		if (mm_fault_error(regs, error_code, address, fault))
471			return;
472
473	if (flags & FAULT_FLAG_ALLOW_RETRY) {
474		if (fault & VM_FAULT_MAJOR) {
475			tsk->maj_flt++;
476			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
477				      regs, address);
478		} else {
479			tsk->min_flt++;
480			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
481				      regs, address);
482		}
483		if (fault & VM_FAULT_RETRY) {
484			flags &= ~FAULT_FLAG_ALLOW_RETRY;
485			flags |= FAULT_FLAG_TRIED;
486
487			/*
488			 * No need to up_read(&mm->mmap_sem) as we would
489			 * have already released it in __lock_page_or_retry
490			 * in mm/filemap.c.
491			 */
492			goto retry;
493		}
494	}
495
496	up_read(&mm->mmap_sem);
497}
v3.15
  1/*
  2 * Page fault handler for SH with an MMU.
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2003 - 2012  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/fault.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License.  See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 
 16#include <linux/hardirq.h>
 17#include <linux/kprobes.h>
 18#include <linux/perf_event.h>
 19#include <linux/kdebug.h>
 
 20#include <asm/io_trapped.h>
 21#include <asm/mmu_context.h>
 22#include <asm/tlbflush.h>
 23#include <asm/traps.h>
 24
 25static inline int notify_page_fault(struct pt_regs *regs, int trap)
 26{
 27	int ret = 0;
 28
 29	if (kprobes_built_in() && !user_mode(regs)) {
 30		preempt_disable();
 31		if (kprobe_running() && kprobe_fault_handler(regs, trap))
 32			ret = 1;
 33		preempt_enable();
 34	}
 35
 36	return ret;
 37}
 38
 39static void
 40force_sig_info_fault(int si_signo, int si_code, unsigned long address,
 41		     struct task_struct *tsk)
 42{
 43	siginfo_t info;
 44
 45	info.si_signo	= si_signo;
 46	info.si_errno	= 0;
 47	info.si_code	= si_code;
 48	info.si_addr	= (void __user *)address;
 49
 50	force_sig_info(si_signo, &info, tsk);
 51}
 52
 53/*
 54 * This is useful to dump out the page tables associated with
 55 * 'addr' in mm 'mm'.
 56 */
 57static void show_pte(struct mm_struct *mm, unsigned long addr)
 58{
 59	pgd_t *pgd;
 60
 61	if (mm) {
 62		pgd = mm->pgd;
 63	} else {
 64		pgd = get_TTB();
 65
 66		if (unlikely(!pgd))
 67			pgd = swapper_pg_dir;
 68	}
 69
 70	printk(KERN_ALERT "pgd = %p\n", pgd);
 71	pgd += pgd_index(addr);
 72	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
 73	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
 74
 75	do {
 76		pud_t *pud;
 77		pmd_t *pmd;
 78		pte_t *pte;
 79
 80		if (pgd_none(*pgd))
 81			break;
 82
 83		if (pgd_bad(*pgd)) {
 84			printk("(bad)");
 85			break;
 86		}
 87
 88		pud = pud_offset(pgd, addr);
 89		if (PTRS_PER_PUD != 1)
 90			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
 91			       (u64)pud_val(*pud));
 92
 93		if (pud_none(*pud))
 94			break;
 95
 96		if (pud_bad(*pud)) {
 97			printk("(bad)");
 98			break;
 99		}
100
101		pmd = pmd_offset(pud, addr);
102		if (PTRS_PER_PMD != 1)
103			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
104			       (u64)pmd_val(*pmd));
105
106		if (pmd_none(*pmd))
107			break;
108
109		if (pmd_bad(*pmd)) {
110			printk("(bad)");
111			break;
112		}
113
114		/* We must not map this if we have highmem enabled */
115		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
116			break;
117
118		pte = pte_offset_kernel(pmd, addr);
119		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
120		       (u64)pte_val(*pte));
121	} while (0);
122
123	printk("\n");
124}
125
126static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
127{
128	unsigned index = pgd_index(address);
129	pgd_t *pgd_k;
130	pud_t *pud, *pud_k;
131	pmd_t *pmd, *pmd_k;
132
133	pgd += index;
134	pgd_k = init_mm.pgd + index;
135
136	if (!pgd_present(*pgd_k))
137		return NULL;
138
139	pud = pud_offset(pgd, address);
140	pud_k = pud_offset(pgd_k, address);
141	if (!pud_present(*pud_k))
142		return NULL;
143
144	if (!pud_present(*pud))
145	    set_pud(pud, *pud_k);
146
147	pmd = pmd_offset(pud, address);
148	pmd_k = pmd_offset(pud_k, address);
149	if (!pmd_present(*pmd_k))
150		return NULL;
151
152	if (!pmd_present(*pmd))
153		set_pmd(pmd, *pmd_k);
154	else {
155		/*
156		 * The page tables are fully synchronised so there must
157		 * be another reason for the fault. Return NULL here to
158		 * signal that we have not taken care of the fault.
159		 */
160		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
161		return NULL;
162	}
163
164	return pmd_k;
165}
166
167#ifdef CONFIG_SH_STORE_QUEUES
168#define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
169#else
170#define __FAULT_ADDR_LIMIT	VMALLOC_END
171#endif
172
173/*
174 * Handle a fault on the vmalloc or module mapping area
175 */
176static noinline int vmalloc_fault(unsigned long address)
177{
178	pgd_t *pgd_k;
179	pmd_t *pmd_k;
180	pte_t *pte_k;
181
182	/* Make sure we are in vmalloc/module/P3 area: */
183	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
184		return -1;
185
186	/*
187	 * Synchronize this task's top level page-table
188	 * with the 'reference' page table.
189	 *
190	 * Do _not_ use "current" here. We might be inside
191	 * an interrupt in the middle of a task switch..
192	 */
193	pgd_k = get_TTB();
194	pmd_k = vmalloc_sync_one(pgd_k, address);
195	if (!pmd_k)
196		return -1;
197
198	pte_k = pte_offset_kernel(pmd_k, address);
199	if (!pte_present(*pte_k))
200		return -1;
201
202	return 0;
203}
204
205static void
206show_fault_oops(struct pt_regs *regs, unsigned long address)
207{
208	if (!oops_may_print())
209		return;
210
211	printk(KERN_ALERT "BUG: unable to handle kernel ");
212	if (address < PAGE_SIZE)
213		printk(KERN_CONT "NULL pointer dereference");
214	else
215		printk(KERN_CONT "paging request");
216
217	printk(KERN_CONT " at %08lx\n", address);
218	printk(KERN_ALERT "PC:");
219	printk_address(regs->pc, 1);
220
221	show_pte(NULL, address);
222}
223
224static noinline void
225no_context(struct pt_regs *regs, unsigned long error_code,
226	   unsigned long address)
227{
228	/* Are we prepared to handle this kernel fault?  */
229	if (fixup_exception(regs))
230		return;
231
232	if (handle_trapped_io(regs, address))
233		return;
234
235	/*
236	 * Oops. The kernel tried to access some bad page. We'll have to
237	 * terminate things with extreme prejudice.
238	 */
239	bust_spinlocks(1);
240
241	show_fault_oops(regs, address);
242
243	die("Oops", regs, error_code);
244	bust_spinlocks(0);
245	do_exit(SIGKILL);
246}
247
248static void
249__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
250		       unsigned long address, int si_code)
251{
252	struct task_struct *tsk = current;
253
254	/* User mode accesses just cause a SIGSEGV */
255	if (user_mode(regs)) {
256		/*
257		 * It's possible to have interrupts off here:
258		 */
259		local_irq_enable();
260
261		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
262
263		return;
264	}
265
266	no_context(regs, error_code, address);
267}
268
269static noinline void
270bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
271		     unsigned long address)
272{
273	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
274}
275
276static void
277__bad_area(struct pt_regs *regs, unsigned long error_code,
278	   unsigned long address, int si_code)
279{
280	struct mm_struct *mm = current->mm;
281
282	/*
283	 * Something tried to access memory that isn't in our memory map..
284	 * Fix it, but check if it's kernel or user first..
285	 */
286	up_read(&mm->mmap_sem);
287
288	__bad_area_nosemaphore(regs, error_code, address, si_code);
289}
290
291static noinline void
292bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
293{
294	__bad_area(regs, error_code, address, SEGV_MAPERR);
295}
296
297static noinline void
298bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
299		      unsigned long address)
300{
301	__bad_area(regs, error_code, address, SEGV_ACCERR);
302}
303
304static void
305do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
306{
307	struct task_struct *tsk = current;
308	struct mm_struct *mm = tsk->mm;
309
310	up_read(&mm->mmap_sem);
311
312	/* Kernel mode? Handle exceptions or die: */
313	if (!user_mode(regs))
314		no_context(regs, error_code, address);
315
316	force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
317}
318
319static noinline int
320mm_fault_error(struct pt_regs *regs, unsigned long error_code,
321	       unsigned long address, unsigned int fault)
322{
323	/*
324	 * Pagefault was interrupted by SIGKILL. We have no reason to
325	 * continue pagefault.
326	 */
327	if (fatal_signal_pending(current)) {
328		if (!(fault & VM_FAULT_RETRY))
329			up_read(&current->mm->mmap_sem);
330		if (!user_mode(regs))
331			no_context(regs, error_code, address);
332		return 1;
333	}
334
335	if (!(fault & VM_FAULT_ERROR))
336		return 0;
337
338	if (fault & VM_FAULT_OOM) {
339		/* Kernel mode? Handle exceptions or die: */
340		if (!user_mode(regs)) {
341			up_read(&current->mm->mmap_sem);
342			no_context(regs, error_code, address);
343			return 1;
344		}
345		up_read(&current->mm->mmap_sem);
346
347		/*
348		 * We ran out of memory, call the OOM killer, and return the
349		 * userspace (which will retry the fault, or kill us if we got
350		 * oom-killed):
351		 */
352		pagefault_out_of_memory();
353	} else {
354		if (fault & VM_FAULT_SIGBUS)
355			do_sigbus(regs, error_code, address);
 
 
356		else
357			BUG();
358	}
359
360	return 1;
361}
362
363static inline int access_error(int error_code, struct vm_area_struct *vma)
364{
365	if (error_code & FAULT_CODE_WRITE) {
366		/* write, present and write, not present: */
367		if (unlikely(!(vma->vm_flags & VM_WRITE)))
368			return 1;
369		return 0;
370	}
371
372	/* ITLB miss on NX page */
373	if (unlikely((error_code & FAULT_CODE_ITLB) &&
374		     !(vma->vm_flags & VM_EXEC)))
375		return 1;
376
377	/* read, not present: */
378	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
379		return 1;
380
381	return 0;
382}
383
384static int fault_in_kernel_space(unsigned long address)
385{
386	return address >= TASK_SIZE;
387}
388
389/*
390 * This routine handles page faults.  It determines the address,
391 * and the problem, and then passes it off to one of the appropriate
392 * routines.
393 */
394asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
395					unsigned long error_code,
396					unsigned long address)
397{
398	unsigned long vec;
399	struct task_struct *tsk;
400	struct mm_struct *mm;
401	struct vm_area_struct * vma;
402	int fault;
403	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
404
405	tsk = current;
406	mm = tsk->mm;
407	vec = lookup_exception_vector();
408
409	/*
410	 * We fault-in kernel-space virtual memory on-demand. The
411	 * 'reference' page table is init_mm.pgd.
412	 *
413	 * NOTE! We MUST NOT take any locks for this case. We may
414	 * be in an interrupt or a critical region, and should
415	 * only copy the information from the master page table,
416	 * nothing more.
417	 */
418	if (unlikely(fault_in_kernel_space(address))) {
419		if (vmalloc_fault(address) >= 0)
420			return;
421		if (notify_page_fault(regs, vec))
422			return;
423
424		bad_area_nosemaphore(regs, error_code, address);
425		return;
426	}
427
428	if (unlikely(notify_page_fault(regs, vec)))
429		return;
430
431	/* Only enable interrupts if they were on before the fault */
432	if ((regs->sr & SR_IMASK) != SR_IMASK)
433		local_irq_enable();
434
435	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
436
437	/*
438	 * If we're in an interrupt, have no user context or are running
439	 * in an atomic region then we must not take the fault:
440	 */
441	if (unlikely(in_atomic() || !mm)) {
442		bad_area_nosemaphore(regs, error_code, address);
443		return;
444	}
445
446retry:
447	down_read(&mm->mmap_sem);
448
449	vma = find_vma(mm, address);
450	if (unlikely(!vma)) {
451		bad_area(regs, error_code, address);
452		return;
453	}
454	if (likely(vma->vm_start <= address))
455		goto good_area;
456	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
457		bad_area(regs, error_code, address);
458		return;
459	}
460	if (unlikely(expand_stack(vma, address))) {
461		bad_area(regs, error_code, address);
462		return;
463	}
464
465	/*
466	 * Ok, we have a good vm_area for this memory access, so
467	 * we can handle it..
468	 */
469good_area:
470	if (unlikely(access_error(error_code, vma))) {
471		bad_area_access_error(regs, error_code, address);
472		return;
473	}
474
475	set_thread_fault_code(error_code);
476
477	if (user_mode(regs))
478		flags |= FAULT_FLAG_USER;
479	if (error_code & FAULT_CODE_WRITE)
480		flags |= FAULT_FLAG_WRITE;
481
482	/*
483	 * If for any reason at all we couldn't handle the fault,
484	 * make sure we exit gracefully rather than endlessly redo
485	 * the fault.
486	 */
487	fault = handle_mm_fault(mm, vma, address, flags);
488
489	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
490		if (mm_fault_error(regs, error_code, address, fault))
491			return;
492
493	if (flags & FAULT_FLAG_ALLOW_RETRY) {
494		if (fault & VM_FAULT_MAJOR) {
495			tsk->maj_flt++;
496			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
497				      regs, address);
498		} else {
499			tsk->min_flt++;
500			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
501				      regs, address);
502		}
503		if (fault & VM_FAULT_RETRY) {
504			flags &= ~FAULT_FLAG_ALLOW_RETRY;
505			flags |= FAULT_FLAG_TRIED;
506
507			/*
508			 * No need to up_read(&mm->mmap_sem) as we would
509			 * have already released it in __lock_page_or_retry
510			 * in mm/filemap.c.
511			 */
512			goto retry;
513		}
514	}
515
516	up_read(&mm->mmap_sem);
517}