Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Page fault handler for SH with an MMU.
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2003 - 2012  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/fault.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License.  See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/sched/signal.h>
 17#include <linux/hardirq.h>
 18#include <linux/kprobes.h>
 19#include <linux/perf_event.h>
 20#include <linux/kdebug.h>
 21#include <linux/uaccess.h>
 22#include <asm/io_trapped.h>
 23#include <asm/mmu_context.h>
 24#include <asm/tlbflush.h>
 25#include <asm/traps.h>
 26
 27static void
 28force_sig_info_fault(int si_signo, int si_code, unsigned long address)
 29{
 30	force_sig_fault(si_signo, si_code, (void __user *)address);
 31}
 32
 33/*
 34 * This is useful to dump out the page tables associated with
 35 * 'addr' in mm 'mm'.
 36 */
 37static void show_pte(struct mm_struct *mm, unsigned long addr)
 38{
 39	pgd_t *pgd;
 40
 41	if (mm) {
 42		pgd = mm->pgd;
 43	} else {
 44		pgd = get_TTB();
 45
 46		if (unlikely(!pgd))
 47			pgd = swapper_pg_dir;
 48	}
 49
 50	printk(KERN_ALERT "pgd = %p\n", pgd);
 51	pgd += pgd_index(addr);
 52	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
 53	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
 54
 55	do {
 
 56		pud_t *pud;
 57		pmd_t *pmd;
 58		pte_t *pte;
 59
 60		if (pgd_none(*pgd))
 61			break;
 62
 63		if (pgd_bad(*pgd)) {
 64			printk("(bad)");
 65			break;
 66		}
 67
 68		pud = pud_offset(pgd, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 69		if (PTRS_PER_PUD != 1)
 70			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
 71			       (u64)pud_val(*pud));
 72
 73		if (pud_none(*pud))
 74			break;
 75
 76		if (pud_bad(*pud)) {
 77			printk("(bad)");
 78			break;
 79		}
 80
 81		pmd = pmd_offset(pud, addr);
 82		if (PTRS_PER_PMD != 1)
 83			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
 84			       (u64)pmd_val(*pmd));
 85
 86		if (pmd_none(*pmd))
 87			break;
 88
 89		if (pmd_bad(*pmd)) {
 90			printk("(bad)");
 91			break;
 92		}
 93
 94		/* We must not map this if we have highmem enabled */
 95		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
 96			break;
 97
 98		pte = pte_offset_kernel(pmd, addr);
 99		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
100		       (u64)pte_val(*pte));
101	} while (0);
102
103	printk("\n");
104}
105
106static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
107{
108	unsigned index = pgd_index(address);
109	pgd_t *pgd_k;
 
110	pud_t *pud, *pud_k;
111	pmd_t *pmd, *pmd_k;
112
113	pgd += index;
114	pgd_k = init_mm.pgd + index;
115
116	if (!pgd_present(*pgd_k))
117		return NULL;
118
119	pud = pud_offset(pgd, address);
120	pud_k = pud_offset(pgd_k, address);
 
 
 
 
 
121	if (!pud_present(*pud_k))
122		return NULL;
123
124	if (!pud_present(*pud))
125	    set_pud(pud, *pud_k);
126
127	pmd = pmd_offset(pud, address);
128	pmd_k = pmd_offset(pud_k, address);
129	if (!pmd_present(*pmd_k))
130		return NULL;
131
132	if (!pmd_present(*pmd))
133		set_pmd(pmd, *pmd_k);
134	else {
135		/*
136		 * The page tables are fully synchronised so there must
137		 * be another reason for the fault. Return NULL here to
138		 * signal that we have not taken care of the fault.
139		 */
140		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
141		return NULL;
142	}
143
144	return pmd_k;
145}
146
147#ifdef CONFIG_SH_STORE_QUEUES
148#define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
149#else
150#define __FAULT_ADDR_LIMIT	VMALLOC_END
151#endif
152
153/*
154 * Handle a fault on the vmalloc or module mapping area
155 */
156static noinline int vmalloc_fault(unsigned long address)
157{
158	pgd_t *pgd_k;
159	pmd_t *pmd_k;
160	pte_t *pte_k;
161
162	/* Make sure we are in vmalloc/module/P3 area: */
163	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
164		return -1;
165
166	/*
167	 * Synchronize this task's top level page-table
168	 * with the 'reference' page table.
169	 *
170	 * Do _not_ use "current" here. We might be inside
171	 * an interrupt in the middle of a task switch..
172	 */
173	pgd_k = get_TTB();
174	pmd_k = vmalloc_sync_one(pgd_k, address);
175	if (!pmd_k)
176		return -1;
177
178	pte_k = pte_offset_kernel(pmd_k, address);
179	if (!pte_present(*pte_k))
180		return -1;
181
182	return 0;
183}
184
185static void
186show_fault_oops(struct pt_regs *regs, unsigned long address)
187{
188	if (!oops_may_print())
189		return;
190
191	printk(KERN_ALERT "BUG: unable to handle kernel ");
192	if (address < PAGE_SIZE)
193		printk(KERN_CONT "NULL pointer dereference");
194	else
195		printk(KERN_CONT "paging request");
196
197	printk(KERN_CONT " at %08lx\n", address);
198	printk(KERN_ALERT "PC:");
199	printk_address(regs->pc, 1);
200
201	show_pte(NULL, address);
202}
203
204static noinline void
205no_context(struct pt_regs *regs, unsigned long error_code,
206	   unsigned long address)
207{
208	/* Are we prepared to handle this kernel fault?  */
209	if (fixup_exception(regs))
210		return;
211
212	if (handle_trapped_io(regs, address))
213		return;
214
215	/*
216	 * Oops. The kernel tried to access some bad page. We'll have to
217	 * terminate things with extreme prejudice.
218	 */
219	bust_spinlocks(1);
220
221	show_fault_oops(regs, address);
222
223	die("Oops", regs, error_code);
224	bust_spinlocks(0);
225	do_exit(SIGKILL);
226}
227
228static void
229__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
230		       unsigned long address, int si_code)
231{
232	/* User mode accesses just cause a SIGSEGV */
233	if (user_mode(regs)) {
234		/*
235		 * It's possible to have interrupts off here:
236		 */
237		local_irq_enable();
238
239		force_sig_info_fault(SIGSEGV, si_code, address);
240
241		return;
242	}
243
244	no_context(regs, error_code, address);
245}
246
247static noinline void
248bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
249		     unsigned long address)
250{
251	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
252}
253
254static void
255__bad_area(struct pt_regs *regs, unsigned long error_code,
256	   unsigned long address, int si_code)
257{
258	struct mm_struct *mm = current->mm;
259
260	/*
261	 * Something tried to access memory that isn't in our memory map..
262	 * Fix it, but check if it's kernel or user first..
263	 */
264	up_read(&mm->mmap_sem);
265
266	__bad_area_nosemaphore(regs, error_code, address, si_code);
267}
268
269static noinline void
270bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
271{
272	__bad_area(regs, error_code, address, SEGV_MAPERR);
273}
274
275static noinline void
276bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
277		      unsigned long address)
278{
279	__bad_area(regs, error_code, address, SEGV_ACCERR);
280}
281
282static void
283do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
284{
285	struct task_struct *tsk = current;
286	struct mm_struct *mm = tsk->mm;
287
288	up_read(&mm->mmap_sem);
289
290	/* Kernel mode? Handle exceptions or die: */
291	if (!user_mode(regs))
292		no_context(regs, error_code, address);
293
294	force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
295}
296
297static noinline int
298mm_fault_error(struct pt_regs *regs, unsigned long error_code,
299	       unsigned long address, vm_fault_t fault)
300{
301	/*
302	 * Pagefault was interrupted by SIGKILL. We have no reason to
303	 * continue pagefault.
304	 */
305	if (fatal_signal_pending(current)) {
306		if (!(fault & VM_FAULT_RETRY))
307			up_read(&current->mm->mmap_sem);
308		if (!user_mode(regs))
309			no_context(regs, error_code, address);
310		return 1;
311	}
312
 
 
 
 
313	if (!(fault & VM_FAULT_ERROR))
314		return 0;
315
316	if (fault & VM_FAULT_OOM) {
317		/* Kernel mode? Handle exceptions or die: */
318		if (!user_mode(regs)) {
319			up_read(&current->mm->mmap_sem);
320			no_context(regs, error_code, address);
321			return 1;
322		}
323		up_read(&current->mm->mmap_sem);
324
325		/*
326		 * We ran out of memory, call the OOM killer, and return the
327		 * userspace (which will retry the fault, or kill us if we got
328		 * oom-killed):
329		 */
330		pagefault_out_of_memory();
331	} else {
332		if (fault & VM_FAULT_SIGBUS)
333			do_sigbus(regs, error_code, address);
334		else if (fault & VM_FAULT_SIGSEGV)
335			bad_area(regs, error_code, address);
336		else
337			BUG();
338	}
339
340	return 1;
341}
342
343static inline int access_error(int error_code, struct vm_area_struct *vma)
344{
345	if (error_code & FAULT_CODE_WRITE) {
346		/* write, present and write, not present: */
347		if (unlikely(!(vma->vm_flags & VM_WRITE)))
348			return 1;
349		return 0;
350	}
351
352	/* ITLB miss on NX page */
353	if (unlikely((error_code & FAULT_CODE_ITLB) &&
354		     !(vma->vm_flags & VM_EXEC)))
355		return 1;
356
357	/* read, not present: */
358	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
359		return 1;
360
361	return 0;
362}
363
364static int fault_in_kernel_space(unsigned long address)
365{
366	return address >= TASK_SIZE;
367}
368
369/*
370 * This routine handles page faults.  It determines the address,
371 * and the problem, and then passes it off to one of the appropriate
372 * routines.
373 */
374asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
375					unsigned long error_code,
376					unsigned long address)
377{
378	unsigned long vec;
379	struct task_struct *tsk;
380	struct mm_struct *mm;
381	struct vm_area_struct * vma;
382	vm_fault_t fault;
383	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
384
385	tsk = current;
386	mm = tsk->mm;
387	vec = lookup_exception_vector();
388
389	/*
390	 * We fault-in kernel-space virtual memory on-demand. The
391	 * 'reference' page table is init_mm.pgd.
392	 *
393	 * NOTE! We MUST NOT take any locks for this case. We may
394	 * be in an interrupt or a critical region, and should
395	 * only copy the information from the master page table,
396	 * nothing more.
397	 */
398	if (unlikely(fault_in_kernel_space(address))) {
399		if (vmalloc_fault(address) >= 0)
400			return;
401		if (kprobe_page_fault(regs, vec))
402			return;
403
404		bad_area_nosemaphore(regs, error_code, address);
405		return;
406	}
407
408	if (unlikely(kprobe_page_fault(regs, vec)))
409		return;
410
411	/* Only enable interrupts if they were on before the fault */
412	if ((regs->sr & SR_IMASK) != SR_IMASK)
413		local_irq_enable();
414
415	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
416
417	/*
418	 * If we're in an interrupt, have no user context or are running
419	 * with pagefaults disabled then we must not take the fault:
420	 */
421	if (unlikely(faulthandler_disabled() || !mm)) {
422		bad_area_nosemaphore(regs, error_code, address);
423		return;
424	}
425
426retry:
427	down_read(&mm->mmap_sem);
428
429	vma = find_vma(mm, address);
430	if (unlikely(!vma)) {
431		bad_area(regs, error_code, address);
432		return;
433	}
434	if (likely(vma->vm_start <= address))
435		goto good_area;
436	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
437		bad_area(regs, error_code, address);
438		return;
439	}
440	if (unlikely(expand_stack(vma, address))) {
441		bad_area(regs, error_code, address);
442		return;
443	}
444
445	/*
446	 * Ok, we have a good vm_area for this memory access, so
447	 * we can handle it..
448	 */
449good_area:
450	if (unlikely(access_error(error_code, vma))) {
451		bad_area_access_error(regs, error_code, address);
452		return;
453	}
454
455	set_thread_fault_code(error_code);
456
457	if (user_mode(regs))
458		flags |= FAULT_FLAG_USER;
459	if (error_code & FAULT_CODE_WRITE)
460		flags |= FAULT_FLAG_WRITE;
461
462	/*
463	 * If for any reason at all we couldn't handle the fault,
464	 * make sure we exit gracefully rather than endlessly redo
465	 * the fault.
466	 */
467	fault = handle_mm_fault(vma, address, flags);
468
469	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
470		if (mm_fault_error(regs, error_code, address, fault))
471			return;
472
473	if (flags & FAULT_FLAG_ALLOW_RETRY) {
474		if (fault & VM_FAULT_MAJOR) {
475			tsk->maj_flt++;
476			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
477				      regs, address);
478		} else {
479			tsk->min_flt++;
480			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
481				      regs, address);
482		}
483		if (fault & VM_FAULT_RETRY) {
484			flags &= ~FAULT_FLAG_ALLOW_RETRY;
485			flags |= FAULT_FLAG_TRIED;
486
487			/*
488			 * No need to up_read(&mm->mmap_sem) as we would
489			 * have already released it in __lock_page_or_retry
490			 * in mm/filemap.c.
491			 */
492			goto retry;
493		}
494	}
495
496	up_read(&mm->mmap_sem);
497}
v6.9.4
  1/*
  2 * Page fault handler for SH with an MMU.
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2003 - 2012  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/fault.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License.  See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/sched/signal.h>
 17#include <linux/hardirq.h>
 18#include <linux/kprobes.h>
 19#include <linux/perf_event.h>
 20#include <linux/kdebug.h>
 21#include <linux/uaccess.h>
 22#include <asm/io_trapped.h>
 23#include <asm/mmu_context.h>
 24#include <asm/tlbflush.h>
 25#include <asm/traps.h>
 26
 27static void
 28force_sig_info_fault(int si_signo, int si_code, unsigned long address)
 29{
 30	force_sig_fault(si_signo, si_code, (void __user *)address);
 31}
 32
 33/*
 34 * This is useful to dump out the page tables associated with
 35 * 'addr' in mm 'mm'.
 36 */
 37static void show_pte(struct mm_struct *mm, unsigned long addr)
 38{
 39	pgd_t *pgd;
 40
 41	if (mm) {
 42		pgd = mm->pgd;
 43	} else {
 44		pgd = get_TTB();
 45
 46		if (unlikely(!pgd))
 47			pgd = swapper_pg_dir;
 48	}
 49
 50	pr_alert("pgd = %p\n", pgd);
 51	pgd += pgd_index(addr);
 52	pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
 53		 (u64)pgd_val(*pgd));
 54
 55	do {
 56		p4d_t *p4d;
 57		pud_t *pud;
 58		pmd_t *pmd;
 59		pte_t *pte;
 60
 61		if (pgd_none(*pgd))
 62			break;
 63
 64		if (pgd_bad(*pgd)) {
 65			pr_cont("(bad)");
 66			break;
 67		}
 68
 69		p4d = p4d_offset(pgd, addr);
 70		if (PTRS_PER_P4D != 1)
 71			pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
 72			        (u64)p4d_val(*p4d));
 73
 74		if (p4d_none(*p4d))
 75			break;
 76
 77		if (p4d_bad(*p4d)) {
 78			pr_cont("(bad)");
 79			break;
 80		}
 81
 82		pud = pud_offset(p4d, addr);
 83		if (PTRS_PER_PUD != 1)
 84			pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
 85				(u64)pud_val(*pud));
 86
 87		if (pud_none(*pud))
 88			break;
 89
 90		if (pud_bad(*pud)) {
 91			pr_cont("(bad)");
 92			break;
 93		}
 94
 95		pmd = pmd_offset(pud, addr);
 96		if (PTRS_PER_PMD != 1)
 97			pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
 98				(u64)pmd_val(*pmd));
 99
100		if (pmd_none(*pmd))
101			break;
102
103		if (pmd_bad(*pmd)) {
104			pr_cont("(bad)");
105			break;
106		}
107
108		/* We must not map this if we have highmem enabled */
109		if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
110			break;
111
112		pte = pte_offset_kernel(pmd, addr);
113		pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
114			(u64)pte_val(*pte));
115	} while (0);
116
117	pr_cont("\n");
118}
119
120static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
121{
122	unsigned index = pgd_index(address);
123	pgd_t *pgd_k;
124	p4d_t *p4d, *p4d_k;
125	pud_t *pud, *pud_k;
126	pmd_t *pmd, *pmd_k;
127
128	pgd += index;
129	pgd_k = init_mm.pgd + index;
130
131	if (!pgd_present(*pgd_k))
132		return NULL;
133
134	p4d = p4d_offset(pgd, address);
135	p4d_k = p4d_offset(pgd_k, address);
136	if (!p4d_present(*p4d_k))
137		return NULL;
138
139	pud = pud_offset(p4d, address);
140	pud_k = pud_offset(p4d_k, address);
141	if (!pud_present(*pud_k))
142		return NULL;
143
144	if (!pud_present(*pud))
145	    set_pud(pud, *pud_k);
146
147	pmd = pmd_offset(pud, address);
148	pmd_k = pmd_offset(pud_k, address);
149	if (!pmd_present(*pmd_k))
150		return NULL;
151
152	if (!pmd_present(*pmd))
153		set_pmd(pmd, *pmd_k);
154	else {
155		/*
156		 * The page tables are fully synchronised so there must
157		 * be another reason for the fault. Return NULL here to
158		 * signal that we have not taken care of the fault.
159		 */
160		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
161		return NULL;
162	}
163
164	return pmd_k;
165}
166
167#ifdef CONFIG_SH_STORE_QUEUES
168#define __FAULT_ADDR_LIMIT	P3_ADDR_MAX
169#else
170#define __FAULT_ADDR_LIMIT	VMALLOC_END
171#endif
172
173/*
174 * Handle a fault on the vmalloc or module mapping area
175 */
176static noinline int vmalloc_fault(unsigned long address)
177{
178	pgd_t *pgd_k;
179	pmd_t *pmd_k;
180	pte_t *pte_k;
181
182	/* Make sure we are in vmalloc/module/P3 area: */
183	if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
184		return -1;
185
186	/*
187	 * Synchronize this task's top level page-table
188	 * with the 'reference' page table.
189	 *
190	 * Do _not_ use "current" here. We might be inside
191	 * an interrupt in the middle of a task switch..
192	 */
193	pgd_k = get_TTB();
194	pmd_k = vmalloc_sync_one(pgd_k, address);
195	if (!pmd_k)
196		return -1;
197
198	pte_k = pte_offset_kernel(pmd_k, address);
199	if (!pte_present(*pte_k))
200		return -1;
201
202	return 0;
203}
204
205static void
206show_fault_oops(struct pt_regs *regs, unsigned long address)
207{
208	if (!oops_may_print())
209		return;
210
211	pr_alert("BUG: unable to handle kernel %s at %08lx\n",
212		 address < PAGE_SIZE ? "NULL pointer dereference"
213				     : "paging request",
214		 address);
215	pr_alert("PC:");
 
 
 
216	printk_address(regs->pc, 1);
217
218	show_pte(NULL, address);
219}
220
221static noinline void
222no_context(struct pt_regs *regs, unsigned long error_code,
223	   unsigned long address)
224{
225	/* Are we prepared to handle this kernel fault?  */
226	if (fixup_exception(regs))
227		return;
228
229	if (handle_trapped_io(regs, address))
230		return;
231
232	/*
233	 * Oops. The kernel tried to access some bad page. We'll have to
234	 * terminate things with extreme prejudice.
235	 */
236	bust_spinlocks(1);
237
238	show_fault_oops(regs, address);
239
240	die("Oops", regs, error_code);
 
 
241}
242
243static void
244__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
245		       unsigned long address, int si_code)
246{
247	/* User mode accesses just cause a SIGSEGV */
248	if (user_mode(regs)) {
249		/*
250		 * It's possible to have interrupts off here:
251		 */
252		local_irq_enable();
253
254		force_sig_info_fault(SIGSEGV, si_code, address);
255
256		return;
257	}
258
259	no_context(regs, error_code, address);
260}
261
262static noinline void
263bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
264		     unsigned long address)
265{
266	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
267}
268
269static void
270__bad_area(struct pt_regs *regs, unsigned long error_code,
271	   unsigned long address, int si_code)
272{
273	struct mm_struct *mm = current->mm;
274
275	/*
276	 * Something tried to access memory that isn't in our memory map..
277	 * Fix it, but check if it's kernel or user first..
278	 */
279	mmap_read_unlock(mm);
280
281	__bad_area_nosemaphore(regs, error_code, address, si_code);
282}
283
284static noinline void
285bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
286{
287	__bad_area(regs, error_code, address, SEGV_MAPERR);
288}
289
290static noinline void
291bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
292		      unsigned long address)
293{
294	__bad_area(regs, error_code, address, SEGV_ACCERR);
295}
296
297static void
298do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
299{
300	struct task_struct *tsk = current;
301	struct mm_struct *mm = tsk->mm;
302
303	mmap_read_unlock(mm);
304
305	/* Kernel mode? Handle exceptions or die: */
306	if (!user_mode(regs))
307		no_context(regs, error_code, address);
308
309	force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
310}
311
312static noinline int
313mm_fault_error(struct pt_regs *regs, unsigned long error_code,
314	       unsigned long address, vm_fault_t fault)
315{
316	/*
317	 * Pagefault was interrupted by SIGKILL. We have no reason to
318	 * continue pagefault.
319	 */
320	if (fault_signal_pending(fault, regs)) {
 
 
321		if (!user_mode(regs))
322			no_context(regs, error_code, address);
323		return 1;
324	}
325
326	/* Release mmap_lock first if necessary */
327	if (!(fault & VM_FAULT_RETRY))
328		mmap_read_unlock(current->mm);
329
330	if (!(fault & VM_FAULT_ERROR))
331		return 0;
332
333	if (fault & VM_FAULT_OOM) {
334		/* Kernel mode? Handle exceptions or die: */
335		if (!user_mode(regs)) {
 
336			no_context(regs, error_code, address);
337			return 1;
338		}
 
339
340		/*
341		 * We ran out of memory, call the OOM killer, and return the
342		 * userspace (which will retry the fault, or kill us if we got
343		 * oom-killed):
344		 */
345		pagefault_out_of_memory();
346	} else {
347		if (fault & VM_FAULT_SIGBUS)
348			do_sigbus(regs, error_code, address);
349		else if (fault & VM_FAULT_SIGSEGV)
350			bad_area(regs, error_code, address);
351		else
352			BUG();
353	}
354
355	return 1;
356}
357
358static inline int access_error(int error_code, struct vm_area_struct *vma)
359{
360	if (error_code & FAULT_CODE_WRITE) {
361		/* write, present and write, not present: */
362		if (unlikely(!(vma->vm_flags & VM_WRITE)))
363			return 1;
364		return 0;
365	}
366
367	/* ITLB miss on NX page */
368	if (unlikely((error_code & FAULT_CODE_ITLB) &&
369		     !(vma->vm_flags & VM_EXEC)))
370		return 1;
371
372	/* read, not present: */
373	if (unlikely(!vma_is_accessible(vma)))
374		return 1;
375
376	return 0;
377}
378
379static int fault_in_kernel_space(unsigned long address)
380{
381	return address >= TASK_SIZE;
382}
383
384/*
385 * This routine handles page faults.  It determines the address,
386 * and the problem, and then passes it off to one of the appropriate
387 * routines.
388 */
389asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
390					unsigned long error_code,
391					unsigned long address)
392{
393	unsigned long vec;
394	struct task_struct *tsk;
395	struct mm_struct *mm;
396	struct vm_area_struct * vma;
397	vm_fault_t fault;
398	unsigned int flags = FAULT_FLAG_DEFAULT;
399
400	tsk = current;
401	mm = tsk->mm;
402	vec = lookup_exception_vector();
403
404	/*
405	 * We fault-in kernel-space virtual memory on-demand. The
406	 * 'reference' page table is init_mm.pgd.
407	 *
408	 * NOTE! We MUST NOT take any locks for this case. We may
409	 * be in an interrupt or a critical region, and should
410	 * only copy the information from the master page table,
411	 * nothing more.
412	 */
413	if (unlikely(fault_in_kernel_space(address))) {
414		if (vmalloc_fault(address) >= 0)
415			return;
416		if (kprobe_page_fault(regs, vec))
417			return;
418
419		bad_area_nosemaphore(regs, error_code, address);
420		return;
421	}
422
423	if (unlikely(kprobe_page_fault(regs, vec)))
424		return;
425
426	/* Only enable interrupts if they were on before the fault */
427	if ((regs->sr & SR_IMASK) != SR_IMASK)
428		local_irq_enable();
429
430	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
431
432	/*
433	 * If we're in an interrupt, have no user context or are running
434	 * with pagefaults disabled then we must not take the fault:
435	 */
436	if (unlikely(faulthandler_disabled() || !mm)) {
437		bad_area_nosemaphore(regs, error_code, address);
438		return;
439	}
440
441retry:
442	vma = lock_mm_and_find_vma(mm, address, regs);
 
 
443	if (unlikely(!vma)) {
444		bad_area_nosemaphore(regs, error_code, address);
 
 
 
 
 
 
 
 
 
 
445		return;
446	}
447
448	/*
449	 * Ok, we have a good vm_area for this memory access, so
450	 * we can handle it..
451	 */
 
452	if (unlikely(access_error(error_code, vma))) {
453		bad_area_access_error(regs, error_code, address);
454		return;
455	}
456
457	set_thread_fault_code(error_code);
458
459	if (user_mode(regs))
460		flags |= FAULT_FLAG_USER;
461	if (error_code & FAULT_CODE_WRITE)
462		flags |= FAULT_FLAG_WRITE;
463
464	/*
465	 * If for any reason at all we couldn't handle the fault,
466	 * make sure we exit gracefully rather than endlessly redo
467	 * the fault.
468	 */
469	fault = handle_mm_fault(vma, address, flags, regs);
470
471	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
472		if (mm_fault_error(regs, error_code, address, fault))
473			return;
474
475	/* The fault is fully completed (including releasing mmap lock) */
476	if (fault & VM_FAULT_COMPLETED)
477		return;
478
479	if (fault & VM_FAULT_RETRY) {
480		flags |= FAULT_FLAG_TRIED;
481
482		/*
483		 * No need to mmap_read_unlock(mm) as we would
484		 * have already released it in __lock_page_or_retry
485		 * in mm/filemap.c.
486		 */
487		goto retry;
 
 
 
 
 
 
 
 
488	}
489
490	mmap_read_unlock(mm);
491}