Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 - 2000 by Ralf Baechle
7 */
8#include <linux/context_tracking.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/ratelimit.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/module.h>
22#include <linux/kprobes.h>
23#include <linux/perf_event.h>
24#include <linux/uaccess.h>
25
26#include <asm/branch.h>
27#include <asm/mmu_context.h>
28#include <asm/ptrace.h>
29#include <asm/highmem.h> /* For VMALLOC_END */
30#include <linux/kdebug.h>
31
32int show_unhandled_signals = 1;
33
34/*
35 * This routine handles page faults. It determines the address,
36 * and the problem, and then passes it off to one of the appropriate
37 * routines.
38 */
39static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
40 unsigned long address)
41{
42 struct vm_area_struct * vma = NULL;
43 struct task_struct *tsk = current;
44 struct mm_struct *mm = tsk->mm;
45 const int field = sizeof(unsigned long) * 2;
46 siginfo_t info;
47 int fault;
48 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
49
50 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
51
52#if 0
53 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
54 current->comm, current->pid, field, address, write,
55 field, regs->cp0_epc);
56#endif
57
58#ifdef CONFIG_KPROBES
59 /*
60 * This is to notify the fault handler of the kprobes.
61 */
62 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
63 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
64 return;
65#endif
66
67 info.si_code = SEGV_MAPERR;
68
69 /*
70 * We fault-in kernel-space virtual memory on-demand. The
71 * 'reference' page table is init_mm.pgd.
72 *
73 * NOTE! We MUST NOT take any locks for this case. We may
74 * be in an interrupt or a critical region, and should
75 * only copy the information from the master page table,
76 * nothing more.
77 */
78#ifdef CONFIG_64BIT
79# define VMALLOC_FAULT_TARGET no_context
80#else
81# define VMALLOC_FAULT_TARGET vmalloc_fault
82#endif
83
84 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
85 goto VMALLOC_FAULT_TARGET;
86#ifdef MODULE_START
87 if (unlikely(address >= MODULE_START && address < MODULE_END))
88 goto VMALLOC_FAULT_TARGET;
89#endif
90
91 /*
92 * If we're in an interrupt or have no user
93 * context, we must not take the fault..
94 */
95 if (faulthandler_disabled() || !mm)
96 goto bad_area_nosemaphore;
97
98 if (user_mode(regs))
99 flags |= FAULT_FLAG_USER;
100retry:
101 down_read(&mm->mmap_sem);
102 vma = find_vma(mm, address);
103 if (!vma)
104 goto bad_area;
105 if (vma->vm_start <= address)
106 goto good_area;
107 if (!(vma->vm_flags & VM_GROWSDOWN))
108 goto bad_area;
109 if (expand_stack(vma, address))
110 goto bad_area;
111/*
112 * Ok, we have a good vm_area for this memory access, so
113 * we can handle it..
114 */
115good_area:
116 info.si_code = SEGV_ACCERR;
117
118 if (write) {
119 if (!(vma->vm_flags & VM_WRITE))
120 goto bad_area;
121 flags |= FAULT_FLAG_WRITE;
122 } else {
123 if (cpu_has_rixi) {
124 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
125#if 0
126 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
127 raw_smp_processor_id(),
128 current->comm, current->pid,
129 field, address, write,
130 field, regs->cp0_epc);
131#endif
132 goto bad_area;
133 }
134 if (!(vma->vm_flags & VM_READ) &&
135 exception_epc(regs) != address) {
136#if 0
137 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
138 raw_smp_processor_id(),
139 current->comm, current->pid,
140 field, address, write,
141 field, regs->cp0_epc);
142#endif
143 goto bad_area;
144 }
145 } else {
146 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
147 goto bad_area;
148 }
149 }
150
151 /*
152 * If for any reason at all we couldn't handle the fault,
153 * make sure we exit gracefully rather than endlessly redo
154 * the fault.
155 */
156 fault = handle_mm_fault(mm, vma, address, flags);
157
158 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
159 return;
160
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
162 if (unlikely(fault & VM_FAULT_ERROR)) {
163 if (fault & VM_FAULT_OOM)
164 goto out_of_memory;
165 else if (fault & VM_FAULT_SIGSEGV)
166 goto bad_area;
167 else if (fault & VM_FAULT_SIGBUS)
168 goto do_sigbus;
169 BUG();
170 }
171 if (flags & FAULT_FLAG_ALLOW_RETRY) {
172 if (fault & VM_FAULT_MAJOR) {
173 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
174 regs, address);
175 tsk->maj_flt++;
176 } else {
177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
178 regs, address);
179 tsk->min_flt++;
180 }
181 if (fault & VM_FAULT_RETRY) {
182 flags &= ~FAULT_FLAG_ALLOW_RETRY;
183 flags |= FAULT_FLAG_TRIED;
184
185 /*
186 * No need to up_read(&mm->mmap_sem) as we would
187 * have already released it in __lock_page_or_retry
188 * in mm/filemap.c.
189 */
190
191 goto retry;
192 }
193 }
194
195 up_read(&mm->mmap_sem);
196 return;
197
198/*
199 * Something tried to access memory that isn't in our memory map..
200 * Fix it, but check if it's kernel or user first..
201 */
202bad_area:
203 up_read(&mm->mmap_sem);
204
205bad_area_nosemaphore:
206 /* User mode accesses just cause a SIGSEGV */
207 if (user_mode(regs)) {
208 tsk->thread.cp0_badvaddr = address;
209 tsk->thread.error_code = write;
210 if (show_unhandled_signals &&
211 unhandled_signal(tsk, SIGSEGV) &&
212 __ratelimit(&ratelimit_state)) {
213 pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
214 tsk->comm,
215 write ? "write access to" : "read access from",
216 field, address);
217 pr_info("epc = %0*lx in", field,
218 (unsigned long) regs->cp0_epc);
219 print_vma_addr(" ", regs->cp0_epc);
220 pr_info("ra = %0*lx in", field,
221 (unsigned long) regs->regs[31]);
222 print_vma_addr(" ", regs->regs[31]);
223 pr_info("\n");
224 }
225 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
226 info.si_signo = SIGSEGV;
227 info.si_errno = 0;
228 /* info.si_code has been set above */
229 info.si_addr = (void __user *) address;
230 force_sig_info(SIGSEGV, &info, tsk);
231 return;
232 }
233
234no_context:
235 /* Are we prepared to handle this kernel fault? */
236 if (fixup_exception(regs)) {
237 current->thread.cp0_baduaddr = address;
238 return;
239 }
240
241 /*
242 * Oops. The kernel tried to access some bad page. We'll have to
243 * terminate things with extreme prejudice.
244 */
245 bust_spinlocks(1);
246
247 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
248 "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
249 raw_smp_processor_id(), field, address, field, regs->cp0_epc,
250 field, regs->regs[31]);
251 die("Oops", regs);
252
253out_of_memory:
254 /*
255 * We ran out of memory, call the OOM killer, and return the userspace
256 * (which will retry the fault, or kill us if we got oom-killed).
257 */
258 up_read(&mm->mmap_sem);
259 if (!user_mode(regs))
260 goto no_context;
261 pagefault_out_of_memory();
262 return;
263
264do_sigbus:
265 up_read(&mm->mmap_sem);
266
267 /* Kernel mode? Handle exceptions or die */
268 if (!user_mode(regs))
269 goto no_context;
270 else
271 /*
272 * Send a sigbus, regardless of whether we were in kernel
273 * or user mode.
274 */
275#if 0
276 printk("do_page_fault() #3: sending SIGBUS to %s for "
277 "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
278 tsk->comm,
279 write ? "write access to" : "read access from",
280 field, address,
281 field, (unsigned long) regs->cp0_epc,
282 field, (unsigned long) regs->regs[31]);
283#endif
284 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
285 tsk->thread.cp0_badvaddr = address;
286 info.si_signo = SIGBUS;
287 info.si_errno = 0;
288 info.si_code = BUS_ADRERR;
289 info.si_addr = (void __user *) address;
290 force_sig_info(SIGBUS, &info, tsk);
291
292 return;
293#ifndef CONFIG_64BIT
294vmalloc_fault:
295 {
296 /*
297 * Synchronize this task's top level page-table
298 * with the 'reference' page table.
299 *
300 * Do _not_ use "tsk" here. We might be inside
301 * an interrupt in the middle of a task switch..
302 */
303 int offset = __pgd_offset(address);
304 pgd_t *pgd, *pgd_k;
305 pud_t *pud, *pud_k;
306 pmd_t *pmd, *pmd_k;
307 pte_t *pte_k;
308
309 pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
310 pgd_k = init_mm.pgd + offset;
311
312 if (!pgd_present(*pgd_k))
313 goto no_context;
314 set_pgd(pgd, *pgd_k);
315
316 pud = pud_offset(pgd, address);
317 pud_k = pud_offset(pgd_k, address);
318 if (!pud_present(*pud_k))
319 goto no_context;
320
321 pmd = pmd_offset(pud, address);
322 pmd_k = pmd_offset(pud_k, address);
323 if (!pmd_present(*pmd_k))
324 goto no_context;
325 set_pmd(pmd, *pmd_k);
326
327 pte_k = pte_offset_kernel(pmd_k, address);
328 if (!pte_present(*pte_k))
329 goto no_context;
330 return;
331 }
332#endif
333}
334
335asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
336 unsigned long write, unsigned long address)
337{
338 enum ctx_state prev_state;
339
340 prev_state = exception_enter();
341 __do_page_fault(regs, write, address);
342 exception_exit(prev_state);
343}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 - 2000 by Ralf Baechle
7 */
8#include <linux/context_tracking.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/interrupt.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/ratelimit.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/kprobes.h>
22#include <linux/perf_event.h>
23#include <linux/uaccess.h>
24
25#include <asm/branch.h>
26#include <asm/mmu_context.h>
27#include <asm/ptrace.h>
28#include <asm/highmem.h> /* For VMALLOC_END */
29#include <linux/kdebug.h>
30
31int show_unhandled_signals = 1;
32
33/*
34 * This routine handles page faults. It determines the address,
35 * and the problem, and then passes it off to one of the appropriate
36 * routines.
37 */
38static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
39 unsigned long address)
40{
41 struct vm_area_struct * vma = NULL;
42 struct task_struct *tsk = current;
43 struct mm_struct *mm = tsk->mm;
44 const int field = sizeof(unsigned long) * 2;
45 int si_code;
46 vm_fault_t fault;
47 unsigned int flags = FAULT_FLAG_DEFAULT;
48
49 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
50
51#if 0
52 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
53 current->comm, current->pid, field, address, write,
54 field, regs->cp0_epc);
55#endif
56
57#ifdef CONFIG_KPROBES
58 /*
59 * This is to notify the fault handler of the kprobes.
60 */
61 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
62 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
63 return;
64#endif
65
66 si_code = SEGV_MAPERR;
67
68 /*
69 * We fault-in kernel-space virtual memory on-demand. The
70 * 'reference' page table is init_mm.pgd.
71 *
72 * NOTE! We MUST NOT take any locks for this case. We may
73 * be in an interrupt or a critical region, and should
74 * only copy the information from the master page table,
75 * nothing more.
76 */
77#ifdef CONFIG_64BIT
78# define VMALLOC_FAULT_TARGET no_context
79#else
80# define VMALLOC_FAULT_TARGET vmalloc_fault
81#endif
82
83 if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
84 goto VMALLOC_FAULT_TARGET;
85#ifdef MODULE_START
86 if (unlikely(address >= MODULE_START && address < MODULE_END))
87 goto VMALLOC_FAULT_TARGET;
88#endif
89
90 /*
91 * If we're in an interrupt or have no user
92 * context, we must not take the fault..
93 */
94 if (faulthandler_disabled() || !mm)
95 goto bad_area_nosemaphore;
96
97 if (user_mode(regs))
98 flags |= FAULT_FLAG_USER;
99
100 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
101retry:
102 mmap_read_lock(mm);
103 vma = find_vma(mm, address);
104 if (!vma)
105 goto bad_area;
106 if (vma->vm_start <= address)
107 goto good_area;
108 if (!(vma->vm_flags & VM_GROWSDOWN))
109 goto bad_area;
110 if (expand_stack(vma, address))
111 goto bad_area;
112/*
113 * Ok, we have a good vm_area for this memory access, so
114 * we can handle it..
115 */
116good_area:
117 si_code = SEGV_ACCERR;
118
119 if (write) {
120 if (!(vma->vm_flags & VM_WRITE))
121 goto bad_area;
122 flags |= FAULT_FLAG_WRITE;
123 } else {
124 if (cpu_has_rixi) {
125 if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
126#if 0
127 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
128 raw_smp_processor_id(),
129 current->comm, current->pid,
130 field, address, write,
131 field, regs->cp0_epc);
132#endif
133 goto bad_area;
134 }
135 if (!(vma->vm_flags & VM_READ) &&
136 exception_epc(regs) != address) {
137#if 0
138 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
139 raw_smp_processor_id(),
140 current->comm, current->pid,
141 field, address, write,
142 field, regs->cp0_epc);
143#endif
144 goto bad_area;
145 }
146 } else {
147 if (unlikely(!vma_is_accessible(vma)))
148 goto bad_area;
149 }
150 }
151
152 /*
153 * If for any reason at all we couldn't handle the fault,
154 * make sure we exit gracefully rather than endlessly redo
155 * the fault.
156 */
157 fault = handle_mm_fault(vma, address, flags, regs);
158
159 if (fault_signal_pending(fault, regs))
160 return;
161
162 if (unlikely(fault & VM_FAULT_ERROR)) {
163 if (fault & VM_FAULT_OOM)
164 goto out_of_memory;
165 else if (fault & VM_FAULT_SIGSEGV)
166 goto bad_area;
167 else if (fault & VM_FAULT_SIGBUS)
168 goto do_sigbus;
169 BUG();
170 }
171 if (flags & FAULT_FLAG_ALLOW_RETRY) {
172 if (fault & VM_FAULT_RETRY) {
173 flags |= FAULT_FLAG_TRIED;
174
175 /*
176 * No need to mmap_read_unlock(mm) as we would
177 * have already released it in __lock_page_or_retry
178 * in mm/filemap.c.
179 */
180
181 goto retry;
182 }
183 }
184
185 mmap_read_unlock(mm);
186 return;
187
188/*
189 * Something tried to access memory that isn't in our memory map..
190 * Fix it, but check if it's kernel or user first..
191 */
192bad_area:
193 mmap_read_unlock(mm);
194
195bad_area_nosemaphore:
196 /* User mode accesses just cause a SIGSEGV */
197 if (user_mode(regs)) {
198 tsk->thread.cp0_badvaddr = address;
199 tsk->thread.error_code = write;
200 if (show_unhandled_signals &&
201 unhandled_signal(tsk, SIGSEGV) &&
202 __ratelimit(&ratelimit_state)) {
203 pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
204 tsk->comm,
205 write ? "write access to" : "read access from",
206 field, address);
207 pr_info("epc = %0*lx in", field,
208 (unsigned long) regs->cp0_epc);
209 print_vma_addr(KERN_CONT " ", regs->cp0_epc);
210 pr_cont("\n");
211 pr_info("ra = %0*lx in", field,
212 (unsigned long) regs->regs[31]);
213 print_vma_addr(KERN_CONT " ", regs->regs[31]);
214 pr_cont("\n");
215 }
216 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
217 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
218 return;
219 }
220
221no_context:
222 /* Are we prepared to handle this kernel fault? */
223 if (fixup_exception(regs)) {
224 current->thread.cp0_baduaddr = address;
225 return;
226 }
227
228 /*
229 * Oops. The kernel tried to access some bad page. We'll have to
230 * terminate things with extreme prejudice.
231 */
232 bust_spinlocks(1);
233
234 printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
235 "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
236 raw_smp_processor_id(), field, address, field, regs->cp0_epc,
237 field, regs->regs[31]);
238 die("Oops", regs);
239
240out_of_memory:
241 /*
242 * We ran out of memory, call the OOM killer, and return the userspace
243 * (which will retry the fault, or kill us if we got oom-killed).
244 */
245 mmap_read_unlock(mm);
246 if (!user_mode(regs))
247 goto no_context;
248 pagefault_out_of_memory();
249 return;
250
251do_sigbus:
252 mmap_read_unlock(mm);
253
254 /* Kernel mode? Handle exceptions or die */
255 if (!user_mode(regs))
256 goto no_context;
257
258 /*
259 * Send a sigbus, regardless of whether we were in kernel
260 * or user mode.
261 */
262#if 0
263 printk("do_page_fault() #3: sending SIGBUS to %s for "
264 "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
265 tsk->comm,
266 write ? "write access to" : "read access from",
267 field, address,
268 field, (unsigned long) regs->cp0_epc,
269 field, (unsigned long) regs->regs[31]);
270#endif
271 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
272 tsk->thread.cp0_badvaddr = address;
273 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
274
275 return;
276#ifndef CONFIG_64BIT
277vmalloc_fault:
278 {
279 /*
280 * Synchronize this task's top level page-table
281 * with the 'reference' page table.
282 *
283 * Do _not_ use "tsk" here. We might be inside
284 * an interrupt in the middle of a task switch..
285 */
286 int offset = pgd_index(address);
287 pgd_t *pgd, *pgd_k;
288 p4d_t *p4d, *p4d_k;
289 pud_t *pud, *pud_k;
290 pmd_t *pmd, *pmd_k;
291 pte_t *pte_k;
292
293 pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
294 pgd_k = init_mm.pgd + offset;
295
296 if (!pgd_present(*pgd_k))
297 goto no_context;
298 set_pgd(pgd, *pgd_k);
299
300 p4d = p4d_offset(pgd, address);
301 p4d_k = p4d_offset(pgd_k, address);
302 if (!p4d_present(*p4d_k))
303 goto no_context;
304
305 pud = pud_offset(p4d, address);
306 pud_k = pud_offset(p4d_k, address);
307 if (!pud_present(*pud_k))
308 goto no_context;
309
310 pmd = pmd_offset(pud, address);
311 pmd_k = pmd_offset(pud_k, address);
312 if (!pmd_present(*pmd_k))
313 goto no_context;
314 set_pmd(pmd, *pmd_k);
315
316 pte_k = pte_offset_kernel(pmd_k, address);
317 if (!pte_present(*pte_k))
318 goto no_context;
319 return;
320 }
321#endif
322}
323
324asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
325 unsigned long write, unsigned long address)
326{
327 enum ctx_state prev_state;
328
329 prev_state = exception_enter();
330 __do_page_fault(regs, write, address);
331 exception_exit(prev_state);
332}