Loading...
1/*
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/perf_event.h>
22#include <linux/interrupt.h>
23#include <linux/kdebug.h>
24
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/openprom.h>
28#include <asm/oplib.h>
29#include <asm/smp.h>
30#include <asm/traps.h>
31#include <asm/uaccess.h>
32
33int show_unhandled_signals = 1;
34
35/* At boot time we determine these two values necessary for setting
36 * up the segment maps and page table entries (pte's).
37 */
38
39int num_contexts;
40
41/* Return how much physical memory we have. */
42unsigned long probe_memory(void)
43{
44 unsigned long total = 0;
45 int i;
46
47 for (i = 0; sp_banks[i].num_bytes; i++)
48 total += sp_banks[i].num_bytes;
49
50 return total;
51}
52
53static void unhandled_fault(unsigned long, struct task_struct *,
54 struct pt_regs *) __attribute__ ((noreturn));
55
56static void __noreturn unhandled_fault(unsigned long address,
57 struct task_struct *tsk,
58 struct pt_regs *regs)
59{
60 if ((unsigned long) address < PAGE_SIZE) {
61 printk(KERN_ALERT
62 "Unable to handle kernel NULL pointer dereference\n");
63 } else {
64 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
65 address);
66 }
67 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
68 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
69 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
70 (tsk->mm ? (unsigned long) tsk->mm->pgd :
71 (unsigned long) tsk->active_mm->pgd));
72 die_if_kernel("Oops", regs);
73}
74
75asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
76 unsigned long address)
77{
78 struct pt_regs regs;
79 unsigned long g2;
80 unsigned int insn;
81 int i;
82
83 i = search_extables_range(ret_pc, &g2);
84 switch (i) {
85 case 3:
86 /* load & store will be handled by fixup */
87 return 3;
88
89 case 1:
90 /* store will be handled by fixup, load will bump out */
91 /* for _to_ macros */
92 insn = *((unsigned int *) pc);
93 if ((insn >> 21) & 1)
94 return 1;
95 break;
96
97 case 2:
98 /* load will be handled by fixup, store will bump out */
99 /* for _from_ macros */
100 insn = *((unsigned int *) pc);
101 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
102 return 2;
103 break;
104
105 default:
106 break;
107 }
108
109 memset(®s, 0, sizeof(regs));
110 regs.pc = pc;
111 regs.npc = pc + 4;
112 __asm__ __volatile__(
113 "rd %%psr, %0\n\t"
114 "nop\n\t"
115 "nop\n\t"
116 "nop\n" : "=r" (regs.psr));
117 unhandled_fault(address, current, ®s);
118
119 /* Not reached */
120 return 0;
121}
122
123static inline void
124show_signal_msg(struct pt_regs *regs, int sig, int code,
125 unsigned long address, struct task_struct *tsk)
126{
127 if (!unhandled_signal(tsk, sig))
128 return;
129
130 if (!printk_ratelimit())
131 return;
132
133 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
134 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
135 tsk->comm, task_pid_nr(tsk), address,
136 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
137 (void *)regs->u_regs[UREG_FP], code);
138
139 print_vma_addr(KERN_CONT " in ", regs->pc);
140
141 printk(KERN_CONT "\n");
142}
143
144static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
145 unsigned long addr)
146{
147 siginfo_t info;
148
149 info.si_signo = sig;
150 info.si_code = code;
151 info.si_errno = 0;
152 info.si_addr = (void __user *) addr;
153 info.si_trapno = 0;
154
155 if (unlikely(show_unhandled_signals))
156 show_signal_msg(regs, sig, info.si_code,
157 addr, current);
158
159 force_sig_info (sig, &info, current);
160}
161
162extern unsigned long safe_compute_effective_address(struct pt_regs *,
163 unsigned int);
164
165static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
166{
167 unsigned int insn;
168
169 if (text_fault)
170 return regs->pc;
171
172 if (regs->psr & PSR_PS)
173 insn = *(unsigned int *) regs->pc;
174 else
175 __get_user(insn, (unsigned int *) regs->pc);
176
177 return safe_compute_effective_address(regs, insn);
178}
179
180static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
181 int text_fault)
182{
183 unsigned long addr = compute_si_addr(regs, text_fault);
184
185 __do_fault_siginfo(code, sig, regs, addr);
186}
187
188asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
189 unsigned long address)
190{
191 struct vm_area_struct *vma;
192 struct task_struct *tsk = current;
193 struct mm_struct *mm = tsk->mm;
194 unsigned int fixup;
195 unsigned long g2;
196 int from_user = !(regs->psr & PSR_PS);
197 int fault, code;
198 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
199 (write ? FAULT_FLAG_WRITE : 0));
200
201 if (text_fault)
202 address = regs->pc;
203
204 /*
205 * We fault-in kernel-space virtual memory on-demand. The
206 * 'reference' page table is init_mm.pgd.
207 *
208 * NOTE! We MUST NOT take any locks for this case. We may
209 * be in an interrupt or a critical region, and should
210 * only copy the information from the master page table,
211 * nothing more.
212 */
213 code = SEGV_MAPERR;
214 if (address >= TASK_SIZE)
215 goto vmalloc_fault;
216
217 /*
218 * If we're in an interrupt or have no user
219 * context, we must not take the fault..
220 */
221 if (in_atomic() || !mm)
222 goto no_context;
223
224 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
225
226retry:
227 down_read(&mm->mmap_sem);
228
229 if (!from_user && address >= PAGE_OFFSET)
230 goto bad_area;
231
232 vma = find_vma(mm, address);
233 if (!vma)
234 goto bad_area;
235 if (vma->vm_start <= address)
236 goto good_area;
237 if (!(vma->vm_flags & VM_GROWSDOWN))
238 goto bad_area;
239 if (expand_stack(vma, address))
240 goto bad_area;
241 /*
242 * Ok, we have a good vm_area for this memory access, so
243 * we can handle it..
244 */
245good_area:
246 code = SEGV_ACCERR;
247 if (write) {
248 if (!(vma->vm_flags & VM_WRITE))
249 goto bad_area;
250 } else {
251 /* Allow reads even for write-only mappings */
252 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
253 goto bad_area;
254 }
255
256 /*
257 * If for any reason at all we couldn't handle the fault,
258 * make sure we exit gracefully rather than endlessly redo
259 * the fault.
260 */
261 fault = handle_mm_fault(mm, vma, address, flags);
262
263 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
264 return;
265
266 if (unlikely(fault & VM_FAULT_ERROR)) {
267 if (fault & VM_FAULT_OOM)
268 goto out_of_memory;
269 else if (fault & VM_FAULT_SIGBUS)
270 goto do_sigbus;
271 BUG();
272 }
273
274 if (flags & FAULT_FLAG_ALLOW_RETRY) {
275 if (fault & VM_FAULT_MAJOR) {
276 current->maj_flt++;
277 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
278 1, regs, address);
279 } else {
280 current->min_flt++;
281 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
282 1, regs, address);
283 }
284 if (fault & VM_FAULT_RETRY) {
285 flags &= ~FAULT_FLAG_ALLOW_RETRY;
286
287 /* No need to up_read(&mm->mmap_sem) as we would
288 * have already released it in __lock_page_or_retry
289 * in mm/filemap.c.
290 */
291
292 goto retry;
293 }
294 }
295
296 up_read(&mm->mmap_sem);
297 return;
298
299 /*
300 * Something tried to access memory that isn't in our memory map..
301 * Fix it, but check if it's kernel or user first..
302 */
303bad_area:
304 up_read(&mm->mmap_sem);
305
306bad_area_nosemaphore:
307 /* User mode accesses just cause a SIGSEGV */
308 if (from_user) {
309 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
310 return;
311 }
312
313 /* Is this in ex_table? */
314no_context:
315 g2 = regs->u_regs[UREG_G2];
316 if (!from_user) {
317 fixup = search_extables_range(regs->pc, &g2);
318 /* Values below 10 are reserved for other things */
319 if (fixup > 10) {
320 extern const unsigned __memset_start[];
321 extern const unsigned __memset_end[];
322 extern const unsigned __csum_partial_copy_start[];
323 extern const unsigned __csum_partial_copy_end[];
324
325#ifdef DEBUG_EXCEPTIONS
326 printk("Exception: PC<%08lx> faddr<%08lx>\n",
327 regs->pc, address);
328 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
329 regs->pc, fixup, g2);
330#endif
331 if ((regs->pc >= (unsigned long)__memset_start &&
332 regs->pc < (unsigned long)__memset_end) ||
333 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
334 regs->pc < (unsigned long)__csum_partial_copy_end)) {
335 regs->u_regs[UREG_I4] = address;
336 regs->u_regs[UREG_I5] = regs->pc;
337 }
338 regs->u_regs[UREG_G2] = g2;
339 regs->pc = fixup;
340 regs->npc = regs->pc + 4;
341 return;
342 }
343 }
344
345 unhandled_fault(address, tsk, regs);
346 do_exit(SIGKILL);
347
348/*
349 * We ran out of memory, or some other thing happened to us that made
350 * us unable to handle the page fault gracefully.
351 */
352out_of_memory:
353 up_read(&mm->mmap_sem);
354 if (from_user) {
355 pagefault_out_of_memory();
356 return;
357 }
358 goto no_context;
359
360do_sigbus:
361 up_read(&mm->mmap_sem);
362 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
363 if (!from_user)
364 goto no_context;
365
366vmalloc_fault:
367 {
368 /*
369 * Synchronize this task's top level page-table
370 * with the 'reference' page table.
371 */
372 int offset = pgd_index(address);
373 pgd_t *pgd, *pgd_k;
374 pmd_t *pmd, *pmd_k;
375
376 pgd = tsk->active_mm->pgd + offset;
377 pgd_k = init_mm.pgd + offset;
378
379 if (!pgd_present(*pgd)) {
380 if (!pgd_present(*pgd_k))
381 goto bad_area_nosemaphore;
382 pgd_val(*pgd) = pgd_val(*pgd_k);
383 return;
384 }
385
386 pmd = pmd_offset(pgd, address);
387 pmd_k = pmd_offset(pgd_k, address);
388
389 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
390 goto bad_area_nosemaphore;
391
392 *pmd = *pmd_k;
393 return;
394 }
395}
396
397/* This always deals with user addresses. */
398static void force_user_fault(unsigned long address, int write)
399{
400 struct vm_area_struct *vma;
401 struct task_struct *tsk = current;
402 struct mm_struct *mm = tsk->mm;
403 int code;
404
405 code = SEGV_MAPERR;
406
407 down_read(&mm->mmap_sem);
408 vma = find_vma(mm, address);
409 if (!vma)
410 goto bad_area;
411 if (vma->vm_start <= address)
412 goto good_area;
413 if (!(vma->vm_flags & VM_GROWSDOWN))
414 goto bad_area;
415 if (expand_stack(vma, address))
416 goto bad_area;
417good_area:
418 code = SEGV_ACCERR;
419 if (write) {
420 if (!(vma->vm_flags & VM_WRITE))
421 goto bad_area;
422 } else {
423 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
424 goto bad_area;
425 }
426 switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
427 case VM_FAULT_SIGBUS:
428 case VM_FAULT_OOM:
429 goto do_sigbus;
430 }
431 up_read(&mm->mmap_sem);
432 return;
433bad_area:
434 up_read(&mm->mmap_sem);
435 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
436 return;
437
438do_sigbus:
439 up_read(&mm->mmap_sem);
440 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
441}
442
443static void check_stack_aligned(unsigned long sp)
444{
445 if (sp & 0x7UL)
446 force_sig(SIGILL, current);
447}
448
449void window_overflow_fault(void)
450{
451 unsigned long sp;
452
453 sp = current_thread_info()->rwbuf_stkptrs[0];
454 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
455 force_user_fault(sp + 0x38, 1);
456 force_user_fault(sp, 1);
457
458 check_stack_aligned(sp);
459}
460
461void window_underflow_fault(unsigned long sp)
462{
463 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
464 force_user_fault(sp + 0x38, 0);
465 force_user_fault(sp, 0);
466
467 check_stack_aligned(sp);
468}
469
470void window_ret_fault(struct pt_regs *regs)
471{
472 unsigned long sp;
473
474 sp = regs->u_regs[UREG_FP];
475 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
476 force_user_fault(sp + 0x38, 0);
477 force_user_fault(sp, 0);
478
479 check_stack_aligned(sp);
480}
1/*
2 * fault.c: Page fault handlers for the Sparc.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/ptrace.h>
15#include <linux/mman.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/perf_event.h>
22#include <linux/interrupt.h>
23#include <linux/kdebug.h>
24#include <linux/uaccess.h>
25
26#include <asm/page.h>
27#include <asm/pgtable.h>
28#include <asm/openprom.h>
29#include <asm/oplib.h>
30#include <asm/setup.h>
31#include <asm/smp.h>
32#include <asm/traps.h>
33
34#include "mm_32.h"
35
36int show_unhandled_signals = 1;
37
38static void __noreturn unhandled_fault(unsigned long address,
39 struct task_struct *tsk,
40 struct pt_regs *regs)
41{
42 if ((unsigned long) address < PAGE_SIZE) {
43 printk(KERN_ALERT
44 "Unable to handle kernel NULL pointer dereference\n");
45 } else {
46 printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
47 address);
48 }
49 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
50 (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
51 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
52 (tsk->mm ? (unsigned long) tsk->mm->pgd :
53 (unsigned long) tsk->active_mm->pgd));
54 die_if_kernel("Oops", regs);
55}
56
57asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
58 unsigned long address)
59{
60 struct pt_regs regs;
61 unsigned long g2;
62 unsigned int insn;
63 int i;
64
65 i = search_extables_range(ret_pc, &g2);
66 switch (i) {
67 case 3:
68 /* load & store will be handled by fixup */
69 return 3;
70
71 case 1:
72 /* store will be handled by fixup, load will bump out */
73 /* for _to_ macros */
74 insn = *((unsigned int *) pc);
75 if ((insn >> 21) & 1)
76 return 1;
77 break;
78
79 case 2:
80 /* load will be handled by fixup, store will bump out */
81 /* for _from_ macros */
82 insn = *((unsigned int *) pc);
83 if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
84 return 2;
85 break;
86
87 default:
88 break;
89 }
90
91 memset(®s, 0, sizeof(regs));
92 regs.pc = pc;
93 regs.npc = pc + 4;
94 __asm__ __volatile__(
95 "rd %%psr, %0\n\t"
96 "nop\n\t"
97 "nop\n\t"
98 "nop\n" : "=r" (regs.psr));
99 unhandled_fault(address, current, ®s);
100
101 /* Not reached */
102 return 0;
103}
104
105static inline void
106show_signal_msg(struct pt_regs *regs, int sig, int code,
107 unsigned long address, struct task_struct *tsk)
108{
109 if (!unhandled_signal(tsk, sig))
110 return;
111
112 if (!printk_ratelimit())
113 return;
114
115 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
116 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
117 tsk->comm, task_pid_nr(tsk), address,
118 (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
119 (void *)regs->u_regs[UREG_FP], code);
120
121 print_vma_addr(KERN_CONT " in ", regs->pc);
122
123 printk(KERN_CONT "\n");
124}
125
126static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
127 unsigned long addr)
128{
129 siginfo_t info;
130
131 info.si_signo = sig;
132 info.si_code = code;
133 info.si_errno = 0;
134 info.si_addr = (void __user *) addr;
135 info.si_trapno = 0;
136
137 if (unlikely(show_unhandled_signals))
138 show_signal_msg(regs, sig, info.si_code,
139 addr, current);
140
141 force_sig_info (sig, &info, current);
142}
143
144static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
145{
146 unsigned int insn;
147
148 if (text_fault)
149 return regs->pc;
150
151 if (regs->psr & PSR_PS)
152 insn = *(unsigned int *) regs->pc;
153 else
154 __get_user(insn, (unsigned int *) regs->pc);
155
156 return safe_compute_effective_address(regs, insn);
157}
158
159static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
160 int text_fault)
161{
162 unsigned long addr = compute_si_addr(regs, text_fault);
163
164 __do_fault_siginfo(code, sig, regs, addr);
165}
166
167asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
168 unsigned long address)
169{
170 struct vm_area_struct *vma;
171 struct task_struct *tsk = current;
172 struct mm_struct *mm = tsk->mm;
173 unsigned int fixup;
174 unsigned long g2;
175 int from_user = !(regs->psr & PSR_PS);
176 int fault, code;
177 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
178
179 if (text_fault)
180 address = regs->pc;
181
182 /*
183 * We fault-in kernel-space virtual memory on-demand. The
184 * 'reference' page table is init_mm.pgd.
185 *
186 * NOTE! We MUST NOT take any locks for this case. We may
187 * be in an interrupt or a critical region, and should
188 * only copy the information from the master page table,
189 * nothing more.
190 */
191 code = SEGV_MAPERR;
192 if (address >= TASK_SIZE)
193 goto vmalloc_fault;
194
195 /*
196 * If we're in an interrupt or have no user
197 * context, we must not take the fault..
198 */
199 if (pagefault_disabled() || !mm)
200 goto no_context;
201
202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
203
204retry:
205 down_read(&mm->mmap_sem);
206
207 if (!from_user && address >= PAGE_OFFSET)
208 goto bad_area;
209
210 vma = find_vma(mm, address);
211 if (!vma)
212 goto bad_area;
213 if (vma->vm_start <= address)
214 goto good_area;
215 if (!(vma->vm_flags & VM_GROWSDOWN))
216 goto bad_area;
217 if (expand_stack(vma, address))
218 goto bad_area;
219 /*
220 * Ok, we have a good vm_area for this memory access, so
221 * we can handle it..
222 */
223good_area:
224 code = SEGV_ACCERR;
225 if (write) {
226 if (!(vma->vm_flags & VM_WRITE))
227 goto bad_area;
228 } else {
229 /* Allow reads even for write-only mappings */
230 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
231 goto bad_area;
232 }
233
234 if (from_user)
235 flags |= FAULT_FLAG_USER;
236 if (write)
237 flags |= FAULT_FLAG_WRITE;
238
239 /*
240 * If for any reason at all we couldn't handle the fault,
241 * make sure we exit gracefully rather than endlessly redo
242 * the fault.
243 */
244 fault = handle_mm_fault(mm, vma, address, flags);
245
246 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
247 return;
248
249 if (unlikely(fault & VM_FAULT_ERROR)) {
250 if (fault & VM_FAULT_OOM)
251 goto out_of_memory;
252 else if (fault & VM_FAULT_SIGSEGV)
253 goto bad_area;
254 else if (fault & VM_FAULT_SIGBUS)
255 goto do_sigbus;
256 BUG();
257 }
258
259 if (flags & FAULT_FLAG_ALLOW_RETRY) {
260 if (fault & VM_FAULT_MAJOR) {
261 current->maj_flt++;
262 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
263 1, regs, address);
264 } else {
265 current->min_flt++;
266 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
267 1, regs, address);
268 }
269 if (fault & VM_FAULT_RETRY) {
270 flags &= ~FAULT_FLAG_ALLOW_RETRY;
271 flags |= FAULT_FLAG_TRIED;
272
273 /* No need to up_read(&mm->mmap_sem) as we would
274 * have already released it in __lock_page_or_retry
275 * in mm/filemap.c.
276 */
277
278 goto retry;
279 }
280 }
281
282 up_read(&mm->mmap_sem);
283 return;
284
285 /*
286 * Something tried to access memory that isn't in our memory map..
287 * Fix it, but check if it's kernel or user first..
288 */
289bad_area:
290 up_read(&mm->mmap_sem);
291
292bad_area_nosemaphore:
293 /* User mode accesses just cause a SIGSEGV */
294 if (from_user) {
295 do_fault_siginfo(code, SIGSEGV, regs, text_fault);
296 return;
297 }
298
299 /* Is this in ex_table? */
300no_context:
301 g2 = regs->u_regs[UREG_G2];
302 if (!from_user) {
303 fixup = search_extables_range(regs->pc, &g2);
304 /* Values below 10 are reserved for other things */
305 if (fixup > 10) {
306 extern const unsigned int __memset_start[];
307 extern const unsigned int __memset_end[];
308 extern const unsigned int __csum_partial_copy_start[];
309 extern const unsigned int __csum_partial_copy_end[];
310
311#ifdef DEBUG_EXCEPTIONS
312 printk("Exception: PC<%08lx> faddr<%08lx>\n",
313 regs->pc, address);
314 printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
315 regs->pc, fixup, g2);
316#endif
317 if ((regs->pc >= (unsigned long)__memset_start &&
318 regs->pc < (unsigned long)__memset_end) ||
319 (regs->pc >= (unsigned long)__csum_partial_copy_start &&
320 regs->pc < (unsigned long)__csum_partial_copy_end)) {
321 regs->u_regs[UREG_I4] = address;
322 regs->u_regs[UREG_I5] = regs->pc;
323 }
324 regs->u_regs[UREG_G2] = g2;
325 regs->pc = fixup;
326 regs->npc = regs->pc + 4;
327 return;
328 }
329 }
330
331 unhandled_fault(address, tsk, regs);
332 do_exit(SIGKILL);
333
334/*
335 * We ran out of memory, or some other thing happened to us that made
336 * us unable to handle the page fault gracefully.
337 */
338out_of_memory:
339 up_read(&mm->mmap_sem);
340 if (from_user) {
341 pagefault_out_of_memory();
342 return;
343 }
344 goto no_context;
345
346do_sigbus:
347 up_read(&mm->mmap_sem);
348 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
349 if (!from_user)
350 goto no_context;
351
352vmalloc_fault:
353 {
354 /*
355 * Synchronize this task's top level page-table
356 * with the 'reference' page table.
357 */
358 int offset = pgd_index(address);
359 pgd_t *pgd, *pgd_k;
360 pmd_t *pmd, *pmd_k;
361
362 pgd = tsk->active_mm->pgd + offset;
363 pgd_k = init_mm.pgd + offset;
364
365 if (!pgd_present(*pgd)) {
366 if (!pgd_present(*pgd_k))
367 goto bad_area_nosemaphore;
368 pgd_val(*pgd) = pgd_val(*pgd_k);
369 return;
370 }
371
372 pmd = pmd_offset(pgd, address);
373 pmd_k = pmd_offset(pgd_k, address);
374
375 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
376 goto bad_area_nosemaphore;
377
378 *pmd = *pmd_k;
379 return;
380 }
381}
382
383/* This always deals with user addresses. */
384static void force_user_fault(unsigned long address, int write)
385{
386 struct vm_area_struct *vma;
387 struct task_struct *tsk = current;
388 struct mm_struct *mm = tsk->mm;
389 unsigned int flags = FAULT_FLAG_USER;
390 int code;
391
392 code = SEGV_MAPERR;
393
394 down_read(&mm->mmap_sem);
395 vma = find_vma(mm, address);
396 if (!vma)
397 goto bad_area;
398 if (vma->vm_start <= address)
399 goto good_area;
400 if (!(vma->vm_flags & VM_GROWSDOWN))
401 goto bad_area;
402 if (expand_stack(vma, address))
403 goto bad_area;
404good_area:
405 code = SEGV_ACCERR;
406 if (write) {
407 if (!(vma->vm_flags & VM_WRITE))
408 goto bad_area;
409 flags |= FAULT_FLAG_WRITE;
410 } else {
411 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
412 goto bad_area;
413 }
414 switch (handle_mm_fault(mm, vma, address, flags)) {
415 case VM_FAULT_SIGBUS:
416 case VM_FAULT_OOM:
417 goto do_sigbus;
418 }
419 up_read(&mm->mmap_sem);
420 return;
421bad_area:
422 up_read(&mm->mmap_sem);
423 __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
424 return;
425
426do_sigbus:
427 up_read(&mm->mmap_sem);
428 __do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
429}
430
431static void check_stack_aligned(unsigned long sp)
432{
433 if (sp & 0x7UL)
434 force_sig(SIGILL, current);
435}
436
437void window_overflow_fault(void)
438{
439 unsigned long sp;
440
441 sp = current_thread_info()->rwbuf_stkptrs[0];
442 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
443 force_user_fault(sp + 0x38, 1);
444 force_user_fault(sp, 1);
445
446 check_stack_aligned(sp);
447}
448
449void window_underflow_fault(unsigned long sp)
450{
451 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
452 force_user_fault(sp + 0x38, 0);
453 force_user_fault(sp, 0);
454
455 check_stack_aligned(sp);
456}
457
458void window_ret_fault(struct pt_regs *regs)
459{
460 unsigned long sp;
461
462 sp = regs->u_regs[UREG_FP];
463 if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
464 force_user_fault(sp + 0x38, 0);
465 force_user_fault(sp, 0);
466
467 check_stack_aligned(sp);
468}