Loading...
1/*
2 * Page fault handler for SH with an MMU.
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/sched/signal.h>
17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
19#include <linux/perf_event.h>
20#include <linux/kdebug.h>
21#include <linux/uaccess.h>
22#include <asm/io_trapped.h>
23#include <asm/mmu_context.h>
24#include <asm/tlbflush.h>
25#include <asm/traps.h>
26
27static void
28force_sig_info_fault(int si_signo, int si_code, unsigned long address)
29{
30 force_sig_fault(si_signo, si_code, (void __user *)address);
31}
32
33/*
34 * This is useful to dump out the page tables associated with
35 * 'addr' in mm 'mm'.
36 */
37static void show_pte(struct mm_struct *mm, unsigned long addr)
38{
39 pgd_t *pgd;
40
41 if (mm) {
42 pgd = mm->pgd;
43 } else {
44 pgd = get_TTB();
45
46 if (unlikely(!pgd))
47 pgd = swapper_pg_dir;
48 }
49
50 printk(KERN_ALERT "pgd = %p\n", pgd);
51 pgd += pgd_index(addr);
52 printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
53 (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
54
55 do {
56 pud_t *pud;
57 pmd_t *pmd;
58 pte_t *pte;
59
60 if (pgd_none(*pgd))
61 break;
62
63 if (pgd_bad(*pgd)) {
64 printk("(bad)");
65 break;
66 }
67
68 pud = pud_offset(pgd, addr);
69 if (PTRS_PER_PUD != 1)
70 printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
71 (u64)pud_val(*pud));
72
73 if (pud_none(*pud))
74 break;
75
76 if (pud_bad(*pud)) {
77 printk("(bad)");
78 break;
79 }
80
81 pmd = pmd_offset(pud, addr);
82 if (PTRS_PER_PMD != 1)
83 printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
84 (u64)pmd_val(*pmd));
85
86 if (pmd_none(*pmd))
87 break;
88
89 if (pmd_bad(*pmd)) {
90 printk("(bad)");
91 break;
92 }
93
94 /* We must not map this if we have highmem enabled */
95 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
96 break;
97
98 pte = pte_offset_kernel(pmd, addr);
99 printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
100 (u64)pte_val(*pte));
101 } while (0);
102
103 printk("\n");
104}
105
106static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
107{
108 unsigned index = pgd_index(address);
109 pgd_t *pgd_k;
110 pud_t *pud, *pud_k;
111 pmd_t *pmd, *pmd_k;
112
113 pgd += index;
114 pgd_k = init_mm.pgd + index;
115
116 if (!pgd_present(*pgd_k))
117 return NULL;
118
119 pud = pud_offset(pgd, address);
120 pud_k = pud_offset(pgd_k, address);
121 if (!pud_present(*pud_k))
122 return NULL;
123
124 if (!pud_present(*pud))
125 set_pud(pud, *pud_k);
126
127 pmd = pmd_offset(pud, address);
128 pmd_k = pmd_offset(pud_k, address);
129 if (!pmd_present(*pmd_k))
130 return NULL;
131
132 if (!pmd_present(*pmd))
133 set_pmd(pmd, *pmd_k);
134 else {
135 /*
136 * The page tables are fully synchronised so there must
137 * be another reason for the fault. Return NULL here to
138 * signal that we have not taken care of the fault.
139 */
140 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
141 return NULL;
142 }
143
144 return pmd_k;
145}
146
147#ifdef CONFIG_SH_STORE_QUEUES
148#define __FAULT_ADDR_LIMIT P3_ADDR_MAX
149#else
150#define __FAULT_ADDR_LIMIT VMALLOC_END
151#endif
152
153/*
154 * Handle a fault on the vmalloc or module mapping area
155 */
156static noinline int vmalloc_fault(unsigned long address)
157{
158 pgd_t *pgd_k;
159 pmd_t *pmd_k;
160 pte_t *pte_k;
161
162 /* Make sure we are in vmalloc/module/P3 area: */
163 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
164 return -1;
165
166 /*
167 * Synchronize this task's top level page-table
168 * with the 'reference' page table.
169 *
170 * Do _not_ use "current" here. We might be inside
171 * an interrupt in the middle of a task switch..
172 */
173 pgd_k = get_TTB();
174 pmd_k = vmalloc_sync_one(pgd_k, address);
175 if (!pmd_k)
176 return -1;
177
178 pte_k = pte_offset_kernel(pmd_k, address);
179 if (!pte_present(*pte_k))
180 return -1;
181
182 return 0;
183}
184
185static void
186show_fault_oops(struct pt_regs *regs, unsigned long address)
187{
188 if (!oops_may_print())
189 return;
190
191 printk(KERN_ALERT "BUG: unable to handle kernel ");
192 if (address < PAGE_SIZE)
193 printk(KERN_CONT "NULL pointer dereference");
194 else
195 printk(KERN_CONT "paging request");
196
197 printk(KERN_CONT " at %08lx\n", address);
198 printk(KERN_ALERT "PC:");
199 printk_address(regs->pc, 1);
200
201 show_pte(NULL, address);
202}
203
204static noinline void
205no_context(struct pt_regs *regs, unsigned long error_code,
206 unsigned long address)
207{
208 /* Are we prepared to handle this kernel fault? */
209 if (fixup_exception(regs))
210 return;
211
212 if (handle_trapped_io(regs, address))
213 return;
214
215 /*
216 * Oops. The kernel tried to access some bad page. We'll have to
217 * terminate things with extreme prejudice.
218 */
219 bust_spinlocks(1);
220
221 show_fault_oops(regs, address);
222
223 die("Oops", regs, error_code);
224 bust_spinlocks(0);
225 do_exit(SIGKILL);
226}
227
228static void
229__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
230 unsigned long address, int si_code)
231{
232 /* User mode accesses just cause a SIGSEGV */
233 if (user_mode(regs)) {
234 /*
235 * It's possible to have interrupts off here:
236 */
237 local_irq_enable();
238
239 force_sig_info_fault(SIGSEGV, si_code, address);
240
241 return;
242 }
243
244 no_context(regs, error_code, address);
245}
246
247static noinline void
248bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
249 unsigned long address)
250{
251 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
252}
253
254static void
255__bad_area(struct pt_regs *regs, unsigned long error_code,
256 unsigned long address, int si_code)
257{
258 struct mm_struct *mm = current->mm;
259
260 /*
261 * Something tried to access memory that isn't in our memory map..
262 * Fix it, but check if it's kernel or user first..
263 */
264 up_read(&mm->mmap_sem);
265
266 __bad_area_nosemaphore(regs, error_code, address, si_code);
267}
268
269static noinline void
270bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
271{
272 __bad_area(regs, error_code, address, SEGV_MAPERR);
273}
274
275static noinline void
276bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
277 unsigned long address)
278{
279 __bad_area(regs, error_code, address, SEGV_ACCERR);
280}
281
282static void
283do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
284{
285 struct task_struct *tsk = current;
286 struct mm_struct *mm = tsk->mm;
287
288 up_read(&mm->mmap_sem);
289
290 /* Kernel mode? Handle exceptions or die: */
291 if (!user_mode(regs))
292 no_context(regs, error_code, address);
293
294 force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
295}
296
297static noinline int
298mm_fault_error(struct pt_regs *regs, unsigned long error_code,
299 unsigned long address, vm_fault_t fault)
300{
301 /*
302 * Pagefault was interrupted by SIGKILL. We have no reason to
303 * continue pagefault.
304 */
305 if (fatal_signal_pending(current)) {
306 if (!(fault & VM_FAULT_RETRY))
307 up_read(¤t->mm->mmap_sem);
308 if (!user_mode(regs))
309 no_context(regs, error_code, address);
310 return 1;
311 }
312
313 if (!(fault & VM_FAULT_ERROR))
314 return 0;
315
316 if (fault & VM_FAULT_OOM) {
317 /* Kernel mode? Handle exceptions or die: */
318 if (!user_mode(regs)) {
319 up_read(¤t->mm->mmap_sem);
320 no_context(regs, error_code, address);
321 return 1;
322 }
323 up_read(¤t->mm->mmap_sem);
324
325 /*
326 * We ran out of memory, call the OOM killer, and return the
327 * userspace (which will retry the fault, or kill us if we got
328 * oom-killed):
329 */
330 pagefault_out_of_memory();
331 } else {
332 if (fault & VM_FAULT_SIGBUS)
333 do_sigbus(regs, error_code, address);
334 else if (fault & VM_FAULT_SIGSEGV)
335 bad_area(regs, error_code, address);
336 else
337 BUG();
338 }
339
340 return 1;
341}
342
343static inline int access_error(int error_code, struct vm_area_struct *vma)
344{
345 if (error_code & FAULT_CODE_WRITE) {
346 /* write, present and write, not present: */
347 if (unlikely(!(vma->vm_flags & VM_WRITE)))
348 return 1;
349 return 0;
350 }
351
352 /* ITLB miss on NX page */
353 if (unlikely((error_code & FAULT_CODE_ITLB) &&
354 !(vma->vm_flags & VM_EXEC)))
355 return 1;
356
357 /* read, not present: */
358 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
359 return 1;
360
361 return 0;
362}
363
364static int fault_in_kernel_space(unsigned long address)
365{
366 return address >= TASK_SIZE;
367}
368
369/*
370 * This routine handles page faults. It determines the address,
371 * and the problem, and then passes it off to one of the appropriate
372 * routines.
373 */
374asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
375 unsigned long error_code,
376 unsigned long address)
377{
378 unsigned long vec;
379 struct task_struct *tsk;
380 struct mm_struct *mm;
381 struct vm_area_struct * vma;
382 vm_fault_t fault;
383 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
384
385 tsk = current;
386 mm = tsk->mm;
387 vec = lookup_exception_vector();
388
389 /*
390 * We fault-in kernel-space virtual memory on-demand. The
391 * 'reference' page table is init_mm.pgd.
392 *
393 * NOTE! We MUST NOT take any locks for this case. We may
394 * be in an interrupt or a critical region, and should
395 * only copy the information from the master page table,
396 * nothing more.
397 */
398 if (unlikely(fault_in_kernel_space(address))) {
399 if (vmalloc_fault(address) >= 0)
400 return;
401 if (kprobe_page_fault(regs, vec))
402 return;
403
404 bad_area_nosemaphore(regs, error_code, address);
405 return;
406 }
407
408 if (unlikely(kprobe_page_fault(regs, vec)))
409 return;
410
411 /* Only enable interrupts if they were on before the fault */
412 if ((regs->sr & SR_IMASK) != SR_IMASK)
413 local_irq_enable();
414
415 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
416
417 /*
418 * If we're in an interrupt, have no user context or are running
419 * with pagefaults disabled then we must not take the fault:
420 */
421 if (unlikely(faulthandler_disabled() || !mm)) {
422 bad_area_nosemaphore(regs, error_code, address);
423 return;
424 }
425
426retry:
427 down_read(&mm->mmap_sem);
428
429 vma = find_vma(mm, address);
430 if (unlikely(!vma)) {
431 bad_area(regs, error_code, address);
432 return;
433 }
434 if (likely(vma->vm_start <= address))
435 goto good_area;
436 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
437 bad_area(regs, error_code, address);
438 return;
439 }
440 if (unlikely(expand_stack(vma, address))) {
441 bad_area(regs, error_code, address);
442 return;
443 }
444
445 /*
446 * Ok, we have a good vm_area for this memory access, so
447 * we can handle it..
448 */
449good_area:
450 if (unlikely(access_error(error_code, vma))) {
451 bad_area_access_error(regs, error_code, address);
452 return;
453 }
454
455 set_thread_fault_code(error_code);
456
457 if (user_mode(regs))
458 flags |= FAULT_FLAG_USER;
459 if (error_code & FAULT_CODE_WRITE)
460 flags |= FAULT_FLAG_WRITE;
461
462 /*
463 * If for any reason at all we couldn't handle the fault,
464 * make sure we exit gracefully rather than endlessly redo
465 * the fault.
466 */
467 fault = handle_mm_fault(vma, address, flags);
468
469 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
470 if (mm_fault_error(regs, error_code, address, fault))
471 return;
472
473 if (flags & FAULT_FLAG_ALLOW_RETRY) {
474 if (fault & VM_FAULT_MAJOR) {
475 tsk->maj_flt++;
476 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
477 regs, address);
478 } else {
479 tsk->min_flt++;
480 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
481 regs, address);
482 }
483 if (fault & VM_FAULT_RETRY) {
484 flags &= ~FAULT_FLAG_ALLOW_RETRY;
485 flags |= FAULT_FLAG_TRIED;
486
487 /*
488 * No need to up_read(&mm->mmap_sem) as we would
489 * have already released it in __lock_page_or_retry
490 * in mm/filemap.c.
491 */
492 goto retry;
493 }
494 }
495
496 up_read(&mm->mmap_sem);
497}
1/*
2 * Page fault handler for SH with an MMU.
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
9 *
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
12 * for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
18#include <linux/perf_event.h>
19#include <linux/kdebug.h>
20#include <linux/uaccess.h>
21#include <asm/io_trapped.h>
22#include <asm/mmu_context.h>
23#include <asm/tlbflush.h>
24#include <asm/traps.h>
25
26static inline int notify_page_fault(struct pt_regs *regs, int trap)
27{
28 int ret = 0;
29
30 if (kprobes_built_in() && !user_mode(regs)) {
31 preempt_disable();
32 if (kprobe_running() && kprobe_fault_handler(regs, trap))
33 ret = 1;
34 preempt_enable();
35 }
36
37 return ret;
38}
39
40static void
41force_sig_info_fault(int si_signo, int si_code, unsigned long address,
42 struct task_struct *tsk)
43{
44 siginfo_t info;
45
46 info.si_signo = si_signo;
47 info.si_errno = 0;
48 info.si_code = si_code;
49 info.si_addr = (void __user *)address;
50
51 force_sig_info(si_signo, &info, tsk);
52}
53
54/*
55 * This is useful to dump out the page tables associated with
56 * 'addr' in mm 'mm'.
57 */
58static void show_pte(struct mm_struct *mm, unsigned long addr)
59{
60 pgd_t *pgd;
61
62 if (mm) {
63 pgd = mm->pgd;
64 } else {
65 pgd = get_TTB();
66
67 if (unlikely(!pgd))
68 pgd = swapper_pg_dir;
69 }
70
71 printk(KERN_ALERT "pgd = %p\n", pgd);
72 pgd += pgd_index(addr);
73 printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
74 (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
75
76 do {
77 pud_t *pud;
78 pmd_t *pmd;
79 pte_t *pte;
80
81 if (pgd_none(*pgd))
82 break;
83
84 if (pgd_bad(*pgd)) {
85 printk("(bad)");
86 break;
87 }
88
89 pud = pud_offset(pgd, addr);
90 if (PTRS_PER_PUD != 1)
91 printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
92 (u64)pud_val(*pud));
93
94 if (pud_none(*pud))
95 break;
96
97 if (pud_bad(*pud)) {
98 printk("(bad)");
99 break;
100 }
101
102 pmd = pmd_offset(pud, addr);
103 if (PTRS_PER_PMD != 1)
104 printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
105 (u64)pmd_val(*pmd));
106
107 if (pmd_none(*pmd))
108 break;
109
110 if (pmd_bad(*pmd)) {
111 printk("(bad)");
112 break;
113 }
114
115 /* We must not map this if we have highmem enabled */
116 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
117 break;
118
119 pte = pte_offset_kernel(pmd, addr);
120 printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
121 (u64)pte_val(*pte));
122 } while (0);
123
124 printk("\n");
125}
126
127static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
128{
129 unsigned index = pgd_index(address);
130 pgd_t *pgd_k;
131 pud_t *pud, *pud_k;
132 pmd_t *pmd, *pmd_k;
133
134 pgd += index;
135 pgd_k = init_mm.pgd + index;
136
137 if (!pgd_present(*pgd_k))
138 return NULL;
139
140 pud = pud_offset(pgd, address);
141 pud_k = pud_offset(pgd_k, address);
142 if (!pud_present(*pud_k))
143 return NULL;
144
145 if (!pud_present(*pud))
146 set_pud(pud, *pud_k);
147
148 pmd = pmd_offset(pud, address);
149 pmd_k = pmd_offset(pud_k, address);
150 if (!pmd_present(*pmd_k))
151 return NULL;
152
153 if (!pmd_present(*pmd))
154 set_pmd(pmd, *pmd_k);
155 else {
156 /*
157 * The page tables are fully synchronised so there must
158 * be another reason for the fault. Return NULL here to
159 * signal that we have not taken care of the fault.
160 */
161 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
162 return NULL;
163 }
164
165 return pmd_k;
166}
167
168#ifdef CONFIG_SH_STORE_QUEUES
169#define __FAULT_ADDR_LIMIT P3_ADDR_MAX
170#else
171#define __FAULT_ADDR_LIMIT VMALLOC_END
172#endif
173
174/*
175 * Handle a fault on the vmalloc or module mapping area
176 */
177static noinline int vmalloc_fault(unsigned long address)
178{
179 pgd_t *pgd_k;
180 pmd_t *pmd_k;
181 pte_t *pte_k;
182
183 /* Make sure we are in vmalloc/module/P3 area: */
184 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
185 return -1;
186
187 /*
188 * Synchronize this task's top level page-table
189 * with the 'reference' page table.
190 *
191 * Do _not_ use "current" here. We might be inside
192 * an interrupt in the middle of a task switch..
193 */
194 pgd_k = get_TTB();
195 pmd_k = vmalloc_sync_one(pgd_k, address);
196 if (!pmd_k)
197 return -1;
198
199 pte_k = pte_offset_kernel(pmd_k, address);
200 if (!pte_present(*pte_k))
201 return -1;
202
203 return 0;
204}
205
206static void
207show_fault_oops(struct pt_regs *regs, unsigned long address)
208{
209 if (!oops_may_print())
210 return;
211
212 printk(KERN_ALERT "BUG: unable to handle kernel ");
213 if (address < PAGE_SIZE)
214 printk(KERN_CONT "NULL pointer dereference");
215 else
216 printk(KERN_CONT "paging request");
217
218 printk(KERN_CONT " at %08lx\n", address);
219 printk(KERN_ALERT "PC:");
220 printk_address(regs->pc, 1);
221
222 show_pte(NULL, address);
223}
224
225static noinline void
226no_context(struct pt_regs *regs, unsigned long error_code,
227 unsigned long address)
228{
229 /* Are we prepared to handle this kernel fault? */
230 if (fixup_exception(regs))
231 return;
232
233 if (handle_trapped_io(regs, address))
234 return;
235
236 /*
237 * Oops. The kernel tried to access some bad page. We'll have to
238 * terminate things with extreme prejudice.
239 */
240 bust_spinlocks(1);
241
242 show_fault_oops(regs, address);
243
244 die("Oops", regs, error_code);
245 bust_spinlocks(0);
246 do_exit(SIGKILL);
247}
248
249static void
250__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
251 unsigned long address, int si_code)
252{
253 struct task_struct *tsk = current;
254
255 /* User mode accesses just cause a SIGSEGV */
256 if (user_mode(regs)) {
257 /*
258 * It's possible to have interrupts off here:
259 */
260 local_irq_enable();
261
262 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
263
264 return;
265 }
266
267 no_context(regs, error_code, address);
268}
269
270static noinline void
271bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
272 unsigned long address)
273{
274 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
275}
276
277static void
278__bad_area(struct pt_regs *regs, unsigned long error_code,
279 unsigned long address, int si_code)
280{
281 struct mm_struct *mm = current->mm;
282
283 /*
284 * Something tried to access memory that isn't in our memory map..
285 * Fix it, but check if it's kernel or user first..
286 */
287 up_read(&mm->mmap_sem);
288
289 __bad_area_nosemaphore(regs, error_code, address, si_code);
290}
291
292static noinline void
293bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
294{
295 __bad_area(regs, error_code, address, SEGV_MAPERR);
296}
297
298static noinline void
299bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
300 unsigned long address)
301{
302 __bad_area(regs, error_code, address, SEGV_ACCERR);
303}
304
305static void
306do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
307{
308 struct task_struct *tsk = current;
309 struct mm_struct *mm = tsk->mm;
310
311 up_read(&mm->mmap_sem);
312
313 /* Kernel mode? Handle exceptions or die: */
314 if (!user_mode(regs))
315 no_context(regs, error_code, address);
316
317 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
318}
319
320static noinline int
321mm_fault_error(struct pt_regs *regs, unsigned long error_code,
322 unsigned long address, unsigned int fault)
323{
324 /*
325 * Pagefault was interrupted by SIGKILL. We have no reason to
326 * continue pagefault.
327 */
328 if (fatal_signal_pending(current)) {
329 if (!(fault & VM_FAULT_RETRY))
330 up_read(¤t->mm->mmap_sem);
331 if (!user_mode(regs))
332 no_context(regs, error_code, address);
333 return 1;
334 }
335
336 if (!(fault & VM_FAULT_ERROR))
337 return 0;
338
339 if (fault & VM_FAULT_OOM) {
340 /* Kernel mode? Handle exceptions or die: */
341 if (!user_mode(regs)) {
342 up_read(¤t->mm->mmap_sem);
343 no_context(regs, error_code, address);
344 return 1;
345 }
346 up_read(¤t->mm->mmap_sem);
347
348 /*
349 * We ran out of memory, call the OOM killer, and return the
350 * userspace (which will retry the fault, or kill us if we got
351 * oom-killed):
352 */
353 pagefault_out_of_memory();
354 } else {
355 if (fault & VM_FAULT_SIGBUS)
356 do_sigbus(regs, error_code, address);
357 else if (fault & VM_FAULT_SIGSEGV)
358 bad_area(regs, error_code, address);
359 else
360 BUG();
361 }
362
363 return 1;
364}
365
366static inline int access_error(int error_code, struct vm_area_struct *vma)
367{
368 if (error_code & FAULT_CODE_WRITE) {
369 /* write, present and write, not present: */
370 if (unlikely(!(vma->vm_flags & VM_WRITE)))
371 return 1;
372 return 0;
373 }
374
375 /* ITLB miss on NX page */
376 if (unlikely((error_code & FAULT_CODE_ITLB) &&
377 !(vma->vm_flags & VM_EXEC)))
378 return 1;
379
380 /* read, not present: */
381 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
382 return 1;
383
384 return 0;
385}
386
387static int fault_in_kernel_space(unsigned long address)
388{
389 return address >= TASK_SIZE;
390}
391
392/*
393 * This routine handles page faults. It determines the address,
394 * and the problem, and then passes it off to one of the appropriate
395 * routines.
396 */
397asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
398 unsigned long error_code,
399 unsigned long address)
400{
401 unsigned long vec;
402 struct task_struct *tsk;
403 struct mm_struct *mm;
404 struct vm_area_struct * vma;
405 int fault;
406 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
407
408 tsk = current;
409 mm = tsk->mm;
410 vec = lookup_exception_vector();
411
412 /*
413 * We fault-in kernel-space virtual memory on-demand. The
414 * 'reference' page table is init_mm.pgd.
415 *
416 * NOTE! We MUST NOT take any locks for this case. We may
417 * be in an interrupt or a critical region, and should
418 * only copy the information from the master page table,
419 * nothing more.
420 */
421 if (unlikely(fault_in_kernel_space(address))) {
422 if (vmalloc_fault(address) >= 0)
423 return;
424 if (notify_page_fault(regs, vec))
425 return;
426
427 bad_area_nosemaphore(regs, error_code, address);
428 return;
429 }
430
431 if (unlikely(notify_page_fault(regs, vec)))
432 return;
433
434 /* Only enable interrupts if they were on before the fault */
435 if ((regs->sr & SR_IMASK) != SR_IMASK)
436 local_irq_enable();
437
438 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
439
440 /*
441 * If we're in an interrupt, have no user context or are running
442 * with pagefaults disabled then we must not take the fault:
443 */
444 if (unlikely(faulthandler_disabled() || !mm)) {
445 bad_area_nosemaphore(regs, error_code, address);
446 return;
447 }
448
449retry:
450 down_read(&mm->mmap_sem);
451
452 vma = find_vma(mm, address);
453 if (unlikely(!vma)) {
454 bad_area(regs, error_code, address);
455 return;
456 }
457 if (likely(vma->vm_start <= address))
458 goto good_area;
459 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
460 bad_area(regs, error_code, address);
461 return;
462 }
463 if (unlikely(expand_stack(vma, address))) {
464 bad_area(regs, error_code, address);
465 return;
466 }
467
468 /*
469 * Ok, we have a good vm_area for this memory access, so
470 * we can handle it..
471 */
472good_area:
473 if (unlikely(access_error(error_code, vma))) {
474 bad_area_access_error(regs, error_code, address);
475 return;
476 }
477
478 set_thread_fault_code(error_code);
479
480 if (user_mode(regs))
481 flags |= FAULT_FLAG_USER;
482 if (error_code & FAULT_CODE_WRITE)
483 flags |= FAULT_FLAG_WRITE;
484
485 /*
486 * If for any reason at all we couldn't handle the fault,
487 * make sure we exit gracefully rather than endlessly redo
488 * the fault.
489 */
490 fault = handle_mm_fault(vma, address, flags);
491
492 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
493 if (mm_fault_error(regs, error_code, address, fault))
494 return;
495
496 if (flags & FAULT_FLAG_ALLOW_RETRY) {
497 if (fault & VM_FAULT_MAJOR) {
498 tsk->maj_flt++;
499 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
500 regs, address);
501 } else {
502 tsk->min_flt++;
503 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
504 regs, address);
505 }
506 if (fault & VM_FAULT_RETRY) {
507 flags &= ~FAULT_FLAG_ALLOW_RETRY;
508 flags |= FAULT_FLAG_TRIED;
509
510 /*
511 * No need to up_read(&mm->mmap_sem) as we would
512 * have already released it in __lock_page_or_retry
513 * in mm/filemap.c.
514 */
515 goto retry;
516 }
517 }
518
519 up_read(&mm->mmap_sem);
520}