Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
4 *
5 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
7 */
8
9#include <asm/head.h>
10
11#include <linux/string.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/signal.h>
18#include <linux/mm.h>
19#include <linux/extable.h>
20#include <linux/init.h>
21#include <linux/perf_event.h>
22#include <linux/interrupt.h>
23#include <linux/kprobes.h>
24#include <linux/kdebug.h>
25#include <linux/percpu.h>
26#include <linux/context_tracking.h>
27#include <linux/uaccess.h>
28
29#include <asm/page.h>
30#include <asm/openprom.h>
31#include <asm/oplib.h>
32#include <asm/asi.h>
33#include <asm/lsu.h>
34#include <asm/sections.h>
35#include <asm/mmu_context.h>
36#include <asm/setup.h>
37
38int show_unhandled_signals = 1;
39
40static void __kprobes unhandled_fault(unsigned long address,
41 struct task_struct *tsk,
42 struct pt_regs *regs)
43{
44 if ((unsigned long) address < PAGE_SIZE) {
45 printk(KERN_ALERT "Unable to handle kernel NULL "
46 "pointer dereference\n");
47 } else {
48 printk(KERN_ALERT "Unable to handle kernel paging request "
49 "at virtual address %016lx\n", (unsigned long)address);
50 }
51 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
52 (tsk->mm ?
53 CTX_HWBITS(tsk->mm->context) :
54 CTX_HWBITS(tsk->active_mm->context)));
55 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
56 (tsk->mm ? (unsigned long) tsk->mm->pgd :
57 (unsigned long) tsk->active_mm->pgd));
58 die_if_kernel("Oops", regs);
59}
60
61static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
62{
63 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
64 regs->tpc);
65 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
66 printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
67 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
68 dump_stack();
69 unhandled_fault(regs->tpc, current, regs);
70}
71
72/*
73 * We now make sure that mmap_lock is held in all paths that call
74 * this. Additionally, to prevent kswapd from ripping ptes from
75 * under us, raise interrupts around the time that we look at the
76 * pte, kswapd will have to wait to get his smp ipi response from
77 * us. vmtruncate likewise. This saves us having to get pte lock.
78 */
79static unsigned int get_user_insn(unsigned long tpc)
80{
81 pgd_t *pgdp = pgd_offset(current->mm, tpc);
82 p4d_t *p4dp;
83 pud_t *pudp;
84 pmd_t *pmdp;
85 pte_t *ptep, pte;
86 unsigned long pa;
87 u32 insn = 0;
88
89 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
90 goto out;
91 p4dp = p4d_offset(pgdp, tpc);
92 if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp)))
93 goto out;
94 pudp = pud_offset(p4dp, tpc);
95 if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
96 goto out;
97
98 /* This disables preemption for us as well. */
99 local_irq_disable();
100
101 pmdp = pmd_offset(pudp, tpc);
102again:
103 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
104 goto out_irq_enable;
105
106#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
107 if (is_hugetlb_pmd(*pmdp)) {
108 pa = pmd_pfn(*pmdp) << PAGE_SHIFT;
109 pa += tpc & ~HPAGE_MASK;
110
111 /* Use phys bypass so we don't pollute dtlb/dcache. */
112 __asm__ __volatile__("lduwa [%1] %2, %0"
113 : "=r" (insn)
114 : "r" (pa), "i" (ASI_PHYS_USE_EC));
115 } else
116#endif
117 {
118 ptep = pte_offset_map(pmdp, tpc);
119 if (!ptep)
120 goto again;
121 pte = *ptep;
122 if (pte_present(pte)) {
123 pa = (pte_pfn(pte) << PAGE_SHIFT);
124 pa += (tpc & ~PAGE_MASK);
125
126 /* Use phys bypass so we don't pollute dtlb/dcache. */
127 __asm__ __volatile__("lduwa [%1] %2, %0"
128 : "=r" (insn)
129 : "r" (pa), "i" (ASI_PHYS_USE_EC));
130 }
131 pte_unmap(ptep);
132 }
133out_irq_enable:
134 local_irq_enable();
135out:
136 return insn;
137}
138
139static inline void
140show_signal_msg(struct pt_regs *regs, int sig, int code,
141 unsigned long address, struct task_struct *tsk)
142{
143 if (!unhandled_signal(tsk, sig))
144 return;
145
146 if (!printk_ratelimit())
147 return;
148
149 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
150 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
151 tsk->comm, task_pid_nr(tsk), address,
152 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
153 (void *)regs->u_regs[UREG_FP], code);
154
155 print_vma_addr(KERN_CONT " in ", regs->tpc);
156
157 printk(KERN_CONT "\n");
158}
159
160static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
161 unsigned long fault_addr, unsigned int insn,
162 int fault_code)
163{
164 unsigned long addr;
165
166 if (fault_code & FAULT_CODE_ITLB) {
167 addr = regs->tpc;
168 } else {
169 /* If we were able to probe the faulting instruction, use it
170 * to compute a precise fault address. Otherwise use the fault
171 * time provided address which may only have page granularity.
172 */
173 if (insn)
174 addr = compute_effective_address(regs, insn, 0);
175 else
176 addr = fault_addr;
177 }
178
179 if (unlikely(show_unhandled_signals))
180 show_signal_msg(regs, sig, code, addr, current);
181
182 force_sig_fault(sig, code, (void __user *) addr);
183}
184
185static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
186{
187 if (!insn) {
188 if (!regs->tpc || (regs->tpc & 0x3))
189 return 0;
190 if (regs->tstate & TSTATE_PRIV) {
191 insn = *(unsigned int *) regs->tpc;
192 } else {
193 insn = get_user_insn(regs->tpc);
194 }
195 }
196 return insn;
197}
198
199static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
200 int fault_code, unsigned int insn,
201 unsigned long address)
202{
203 unsigned char asi = ASI_P;
204
205 if ((!insn) && (regs->tstate & TSTATE_PRIV))
206 goto cannot_handle;
207
208 /* If user insn could be read (thus insn is zero), that
209 * is fine. We will just gun down the process with a signal
210 * in that case.
211 */
212
213 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
214 (insn & 0xc0800000) == 0xc0800000) {
215 if (insn & 0x2000)
216 asi = (regs->tstate >> 24);
217 else
218 asi = (insn >> 5);
219 if ((asi & 0xf2) == 0x82) {
220 if (insn & 0x1000000) {
221 handle_ldf_stq(insn, regs);
222 } else {
223 /* This was a non-faulting load. Just clear the
224 * destination register(s) and continue with the next
225 * instruction. -jj
226 */
227 handle_ld_nf(insn, regs);
228 }
229 return;
230 }
231 }
232
233 /* Is this in ex_table? */
234 if (regs->tstate & TSTATE_PRIV) {
235 const struct exception_table_entry *entry;
236
237 entry = search_exception_tables(regs->tpc);
238 if (entry) {
239 regs->tpc = entry->fixup;
240 regs->tnpc = regs->tpc + 4;
241 return;
242 }
243 } else {
244 /* The si_code was set to make clear whether
245 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
246 */
247 do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
248 return;
249 }
250
251cannot_handle:
252 unhandled_fault (address, current, regs);
253}
254
255static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
256{
257 static int times;
258
259 if (times++ < 10)
260 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
261 "64-bit TPC [%lx]\n",
262 current->comm, current->pid,
263 regs->tpc);
264 show_regs(regs);
265}
266
267asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
268{
269 enum ctx_state prev_state = exception_enter();
270 struct mm_struct *mm = current->mm;
271 struct vm_area_struct *vma;
272 unsigned int insn = 0;
273 int si_code, fault_code;
274 vm_fault_t fault;
275 unsigned long address, mm_rss;
276 unsigned int flags = FAULT_FLAG_DEFAULT;
277
278 fault_code = get_thread_fault_code();
279
280 if (kprobe_page_fault(regs, 0))
281 goto exit_exception;
282
283 si_code = SEGV_MAPERR;
284 address = current_thread_info()->fault_address;
285
286 if ((fault_code & FAULT_CODE_ITLB) &&
287 (fault_code & FAULT_CODE_DTLB))
288 BUG();
289
290 if (test_thread_flag(TIF_32BIT)) {
291 if (!(regs->tstate & TSTATE_PRIV)) {
292 if (unlikely((regs->tpc >> 32) != 0)) {
293 bogus_32bit_fault_tpc(regs);
294 goto intr_or_no_mm;
295 }
296 }
297 if (unlikely((address >> 32) != 0))
298 goto intr_or_no_mm;
299 }
300
301 if (regs->tstate & TSTATE_PRIV) {
302 unsigned long tpc = regs->tpc;
303
304 /* Sanity check the PC. */
305 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
306 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
307 /* Valid, no problems... */
308 } else {
309 bad_kernel_pc(regs, address);
310 goto exit_exception;
311 }
312 } else
313 flags |= FAULT_FLAG_USER;
314
315 /*
316 * If we're in an interrupt or have no user
317 * context, we must not take the fault..
318 */
319 if (faulthandler_disabled() || !mm)
320 goto intr_or_no_mm;
321
322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
323
324 if (!mmap_read_trylock(mm)) {
325 if ((regs->tstate & TSTATE_PRIV) &&
326 !search_exception_tables(regs->tpc)) {
327 insn = get_fault_insn(regs, insn);
328 goto handle_kernel_fault;
329 }
330
331retry:
332 mmap_read_lock(mm);
333 }
334
335 if (fault_code & FAULT_CODE_BAD_RA)
336 goto do_sigbus;
337
338 vma = find_vma(mm, address);
339 if (!vma)
340 goto bad_area;
341
342 /* Pure DTLB misses do not tell us whether the fault causing
343 * load/store/atomic was a write or not, it only says that there
344 * was no match. So in such a case we (carefully) read the
345 * instruction to try and figure this out. It's an optimization
346 * so it's ok if we can't do this.
347 *
348 * Special hack, window spill/fill knows the exact fault type.
349 */
350 if (((fault_code &
351 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
352 (vma->vm_flags & VM_WRITE) != 0) {
353 insn = get_fault_insn(regs, 0);
354 if (!insn)
355 goto continue_fault;
356 /* All loads, stores and atomics have bits 30 and 31 both set
357 * in the instruction. Bit 21 is set in all stores, but we
358 * have to avoid prefetches which also have bit 21 set.
359 */
360 if ((insn & 0xc0200000) == 0xc0200000 &&
361 (insn & 0x01780000) != 0x01680000) {
362 /* Don't bother updating thread struct value,
363 * because update_mmu_cache only cares which tlb
364 * the access came from.
365 */
366 fault_code |= FAULT_CODE_WRITE;
367 }
368 }
369continue_fault:
370
371 if (vma->vm_start <= address)
372 goto good_area;
373 if (!(vma->vm_flags & VM_GROWSDOWN))
374 goto bad_area;
375 if (!(fault_code & FAULT_CODE_WRITE)) {
376 /* Non-faulting loads shouldn't expand stack. */
377 insn = get_fault_insn(regs, insn);
378 if ((insn & 0xc0800000) == 0xc0800000) {
379 unsigned char asi;
380
381 if (insn & 0x2000)
382 asi = (regs->tstate >> 24);
383 else
384 asi = (insn >> 5);
385 if ((asi & 0xf2) == 0x82)
386 goto bad_area;
387 }
388 }
389 vma = expand_stack(mm, address);
390 if (!vma)
391 goto bad_area_nosemaphore;
392 /*
393 * Ok, we have a good vm_area for this memory access, so
394 * we can handle it..
395 */
396good_area:
397 si_code = SEGV_ACCERR;
398
399 /* If we took a ITLB miss on a non-executable page, catch
400 * that here.
401 */
402 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
403 WARN(address != regs->tpc,
404 "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
405 WARN_ON(regs->tstate & TSTATE_PRIV);
406 goto bad_area;
407 }
408
409 if (fault_code & FAULT_CODE_WRITE) {
410 if (!(vma->vm_flags & VM_WRITE))
411 goto bad_area;
412
413 /* Spitfire has an icache which does not snoop
414 * processor stores. Later processors do...
415 */
416 if (tlb_type == spitfire &&
417 (vma->vm_flags & VM_EXEC) != 0 &&
418 vma->vm_file != NULL)
419 set_thread_fault_code(fault_code |
420 FAULT_CODE_BLKCOMMIT);
421
422 flags |= FAULT_FLAG_WRITE;
423 } else {
424 /* Allow reads even for write-only mappings */
425 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
426 goto bad_area;
427 }
428
429 fault = handle_mm_fault(vma, address, flags, regs);
430
431 if (fault_signal_pending(fault, regs)) {
432 if (regs->tstate & TSTATE_PRIV) {
433 insn = get_fault_insn(regs, insn);
434 goto handle_kernel_fault;
435 }
436 goto exit_exception;
437 }
438
439 /* The fault is fully completed (including releasing mmap lock) */
440 if (fault & VM_FAULT_COMPLETED)
441 goto lock_released;
442
443 if (unlikely(fault & VM_FAULT_ERROR)) {
444 if (fault & VM_FAULT_OOM)
445 goto out_of_memory;
446 else if (fault & VM_FAULT_SIGSEGV)
447 goto bad_area;
448 else if (fault & VM_FAULT_SIGBUS)
449 goto do_sigbus;
450 BUG();
451 }
452
453 if (fault & VM_FAULT_RETRY) {
454 flags |= FAULT_FLAG_TRIED;
455
456 /* No need to mmap_read_unlock(mm) as we would
457 * have already released it in __lock_page_or_retry
458 * in mm/filemap.c.
459 */
460
461 goto retry;
462 }
463 mmap_read_unlock(mm);
464
465lock_released:
466 mm_rss = get_mm_rss(mm);
467#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
468 mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
469#endif
470 if (unlikely(mm_rss >
471 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
472 tsb_grow(mm, MM_TSB_BASE, mm_rss);
473#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
474 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
475 mm_rss *= REAL_HPAGE_PER_HPAGE;
476 if (unlikely(mm_rss >
477 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
478 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
479 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
480 else
481 hugetlb_setup(regs);
482
483 }
484#endif
485exit_exception:
486 exception_exit(prev_state);
487 return;
488
489 /*
490 * Something tried to access memory that isn't in our memory map..
491 * Fix it, but check if it's kernel or user first..
492 */
493bad_area:
494 mmap_read_unlock(mm);
495bad_area_nosemaphore:
496 insn = get_fault_insn(regs, insn);
497
498handle_kernel_fault:
499 do_kernel_fault(regs, si_code, fault_code, insn, address);
500 goto exit_exception;
501
502/*
503 * We ran out of memory, or some other thing happened to us that made
504 * us unable to handle the page fault gracefully.
505 */
506out_of_memory:
507 insn = get_fault_insn(regs, insn);
508 mmap_read_unlock(mm);
509 if (!(regs->tstate & TSTATE_PRIV)) {
510 pagefault_out_of_memory();
511 goto exit_exception;
512 }
513 goto handle_kernel_fault;
514
515intr_or_no_mm:
516 insn = get_fault_insn(regs, 0);
517 goto handle_kernel_fault;
518
519do_sigbus:
520 insn = get_fault_insn(regs, insn);
521 mmap_read_unlock(mm);
522
523 /*
524 * Send a sigbus, regardless of whether we were in kernel
525 * or user mode.
526 */
527 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
528
529 /* Kernel mode? Handle exceptions or die */
530 if (regs->tstate & TSTATE_PRIV)
531 goto handle_kernel_fault;
532}
1/*
2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
3 *
4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
6 */
7
8#include <asm/head.h>
9
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/ptrace.h>
14#include <linux/mman.h>
15#include <linux/signal.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/perf_event.h>
20#include <linux/interrupt.h>
21#include <linux/kprobes.h>
22#include <linux/kdebug.h>
23#include <linux/percpu.h>
24
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/openprom.h>
28#include <asm/oplib.h>
29#include <asm/uaccess.h>
30#include <asm/asi.h>
31#include <asm/lsu.h>
32#include <asm/sections.h>
33#include <asm/mmu_context.h>
34
35int show_unhandled_signals = 1;
36
37static inline __kprobes int notify_page_fault(struct pt_regs *regs)
38{
39 int ret = 0;
40
41 /* kprobe_running() needs smp_processor_id() */
42 if (kprobes_built_in() && !user_mode(regs)) {
43 preempt_disable();
44 if (kprobe_running() && kprobe_fault_handler(regs, 0))
45 ret = 1;
46 preempt_enable();
47 }
48 return ret;
49}
50
51static void __kprobes unhandled_fault(unsigned long address,
52 struct task_struct *tsk,
53 struct pt_regs *regs)
54{
55 if ((unsigned long) address < PAGE_SIZE) {
56 printk(KERN_ALERT "Unable to handle kernel NULL "
57 "pointer dereference\n");
58 } else {
59 printk(KERN_ALERT "Unable to handle kernel paging request "
60 "at virtual address %016lx\n", (unsigned long)address);
61 }
62 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
63 (tsk->mm ?
64 CTX_HWBITS(tsk->mm->context) :
65 CTX_HWBITS(tsk->active_mm->context)));
66 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
67 (tsk->mm ? (unsigned long) tsk->mm->pgd :
68 (unsigned long) tsk->active_mm->pgd));
69 die_if_kernel("Oops", regs);
70}
71
72static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
73{
74 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
75 regs->tpc);
76 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
77 printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
78 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
79 dump_stack();
80 unhandled_fault(regs->tpc, current, regs);
81}
82
83/*
84 * We now make sure that mmap_sem is held in all paths that call
85 * this. Additionally, to prevent kswapd from ripping ptes from
86 * under us, raise interrupts around the time that we look at the
87 * pte, kswapd will have to wait to get his smp ipi response from
88 * us. vmtruncate likewise. This saves us having to get pte lock.
89 */
90static unsigned int get_user_insn(unsigned long tpc)
91{
92 pgd_t *pgdp = pgd_offset(current->mm, tpc);
93 pud_t *pudp;
94 pmd_t *pmdp;
95 pte_t *ptep, pte;
96 unsigned long pa;
97 u32 insn = 0;
98 unsigned long pstate;
99
100 if (pgd_none(*pgdp))
101 goto outret;
102 pudp = pud_offset(pgdp, tpc);
103 if (pud_none(*pudp))
104 goto outret;
105 pmdp = pmd_offset(pudp, tpc);
106 if (pmd_none(*pmdp))
107 goto outret;
108
109 /* This disables preemption for us as well. */
110 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
111 __asm__ __volatile__("wrpr %0, %1, %%pstate"
112 : : "r" (pstate), "i" (PSTATE_IE));
113 ptep = pte_offset_map(pmdp, tpc);
114 pte = *ptep;
115 if (!pte_present(pte))
116 goto out;
117
118 pa = (pte_pfn(pte) << PAGE_SHIFT);
119 pa += (tpc & ~PAGE_MASK);
120
121 /* Use phys bypass so we don't pollute dtlb/dcache. */
122 __asm__ __volatile__("lduwa [%1] %2, %0"
123 : "=r" (insn)
124 : "r" (pa), "i" (ASI_PHYS_USE_EC));
125
126out:
127 pte_unmap(ptep);
128 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
129outret:
130 return insn;
131}
132
133static inline void
134show_signal_msg(struct pt_regs *regs, int sig, int code,
135 unsigned long address, struct task_struct *tsk)
136{
137 if (!unhandled_signal(tsk, sig))
138 return;
139
140 if (!printk_ratelimit())
141 return;
142
143 printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
144 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
145 tsk->comm, task_pid_nr(tsk), address,
146 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
147 (void *)regs->u_regs[UREG_FP], code);
148
149 print_vma_addr(KERN_CONT " in ", regs->tpc);
150
151 printk(KERN_CONT "\n");
152}
153
154extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
155
156static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
157 unsigned int insn, int fault_code)
158{
159 unsigned long addr;
160 siginfo_t info;
161
162 info.si_code = code;
163 info.si_signo = sig;
164 info.si_errno = 0;
165 if (fault_code & FAULT_CODE_ITLB)
166 addr = regs->tpc;
167 else
168 addr = compute_effective_address(regs, insn, 0);
169 info.si_addr = (void __user *) addr;
170 info.si_trapno = 0;
171
172 if (unlikely(show_unhandled_signals))
173 show_signal_msg(regs, sig, code, addr, current);
174
175 force_sig_info(sig, &info, current);
176}
177
178extern int handle_ldf_stq(u32, struct pt_regs *);
179extern int handle_ld_nf(u32, struct pt_regs *);
180
181static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
182{
183 if (!insn) {
184 if (!regs->tpc || (regs->tpc & 0x3))
185 return 0;
186 if (regs->tstate & TSTATE_PRIV) {
187 insn = *(unsigned int *) regs->tpc;
188 } else {
189 insn = get_user_insn(regs->tpc);
190 }
191 }
192 return insn;
193}
194
195static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
196 int fault_code, unsigned int insn,
197 unsigned long address)
198{
199 unsigned char asi = ASI_P;
200
201 if ((!insn) && (regs->tstate & TSTATE_PRIV))
202 goto cannot_handle;
203
204 /* If user insn could be read (thus insn is zero), that
205 * is fine. We will just gun down the process with a signal
206 * in that case.
207 */
208
209 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
210 (insn & 0xc0800000) == 0xc0800000) {
211 if (insn & 0x2000)
212 asi = (regs->tstate >> 24);
213 else
214 asi = (insn >> 5);
215 if ((asi & 0xf2) == 0x82) {
216 if (insn & 0x1000000) {
217 handle_ldf_stq(insn, regs);
218 } else {
219 /* This was a non-faulting load. Just clear the
220 * destination register(s) and continue with the next
221 * instruction. -jj
222 */
223 handle_ld_nf(insn, regs);
224 }
225 return;
226 }
227 }
228
229 /* Is this in ex_table? */
230 if (regs->tstate & TSTATE_PRIV) {
231 const struct exception_table_entry *entry;
232
233 entry = search_exception_tables(regs->tpc);
234 if (entry) {
235 regs->tpc = entry->fixup;
236 regs->tnpc = regs->tpc + 4;
237 return;
238 }
239 } else {
240 /* The si_code was set to make clear whether
241 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
242 */
243 do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
244 return;
245 }
246
247cannot_handle:
248 unhandled_fault (address, current, regs);
249}
250
251static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
252{
253 static int times;
254
255 if (times++ < 10)
256 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
257 "64-bit TPC [%lx]\n",
258 current->comm, current->pid,
259 regs->tpc);
260 show_regs(regs);
261}
262
263static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
264 unsigned long addr)
265{
266 static int times;
267
268 if (times++ < 10)
269 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
270 "reports 64-bit fault address [%lx]\n",
271 current->comm, current->pid, addr);
272 show_regs(regs);
273}
274
275asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
276{
277 struct mm_struct *mm = current->mm;
278 struct vm_area_struct *vma;
279 unsigned int insn = 0;
280 int si_code, fault_code, fault;
281 unsigned long address, mm_rss;
282 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
283
284 fault_code = get_thread_fault_code();
285
286 if (notify_page_fault(regs))
287 return;
288
289 si_code = SEGV_MAPERR;
290 address = current_thread_info()->fault_address;
291
292 if ((fault_code & FAULT_CODE_ITLB) &&
293 (fault_code & FAULT_CODE_DTLB))
294 BUG();
295
296 if (test_thread_flag(TIF_32BIT)) {
297 if (!(regs->tstate & TSTATE_PRIV)) {
298 if (unlikely((regs->tpc >> 32) != 0)) {
299 bogus_32bit_fault_tpc(regs);
300 goto intr_or_no_mm;
301 }
302 }
303 if (unlikely((address >> 32) != 0)) {
304 bogus_32bit_fault_address(regs, address);
305 goto intr_or_no_mm;
306 }
307 }
308
309 if (regs->tstate & TSTATE_PRIV) {
310 unsigned long tpc = regs->tpc;
311
312 /* Sanity check the PC. */
313 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
314 (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
315 /* Valid, no problems... */
316 } else {
317 bad_kernel_pc(regs, address);
318 return;
319 }
320 }
321
322 /*
323 * If we're in an interrupt or have no user
324 * context, we must not take the fault..
325 */
326 if (in_atomic() || !mm)
327 goto intr_or_no_mm;
328
329 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
330
331 if (!down_read_trylock(&mm->mmap_sem)) {
332 if ((regs->tstate & TSTATE_PRIV) &&
333 !search_exception_tables(regs->tpc)) {
334 insn = get_fault_insn(regs, insn);
335 goto handle_kernel_fault;
336 }
337
338retry:
339 down_read(&mm->mmap_sem);
340 }
341
342 vma = find_vma(mm, address);
343 if (!vma)
344 goto bad_area;
345
346 /* Pure DTLB misses do not tell us whether the fault causing
347 * load/store/atomic was a write or not, it only says that there
348 * was no match. So in such a case we (carefully) read the
349 * instruction to try and figure this out. It's an optimization
350 * so it's ok if we can't do this.
351 *
352 * Special hack, window spill/fill knows the exact fault type.
353 */
354 if (((fault_code &
355 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
356 (vma->vm_flags & VM_WRITE) != 0) {
357 insn = get_fault_insn(regs, 0);
358 if (!insn)
359 goto continue_fault;
360 /* All loads, stores and atomics have bits 30 and 31 both set
361 * in the instruction. Bit 21 is set in all stores, but we
362 * have to avoid prefetches which also have bit 21 set.
363 */
364 if ((insn & 0xc0200000) == 0xc0200000 &&
365 (insn & 0x01780000) != 0x01680000) {
366 /* Don't bother updating thread struct value,
367 * because update_mmu_cache only cares which tlb
368 * the access came from.
369 */
370 fault_code |= FAULT_CODE_WRITE;
371 }
372 }
373continue_fault:
374
375 if (vma->vm_start <= address)
376 goto good_area;
377 if (!(vma->vm_flags & VM_GROWSDOWN))
378 goto bad_area;
379 if (!(fault_code & FAULT_CODE_WRITE)) {
380 /* Non-faulting loads shouldn't expand stack. */
381 insn = get_fault_insn(regs, insn);
382 if ((insn & 0xc0800000) == 0xc0800000) {
383 unsigned char asi;
384
385 if (insn & 0x2000)
386 asi = (regs->tstate >> 24);
387 else
388 asi = (insn >> 5);
389 if ((asi & 0xf2) == 0x82)
390 goto bad_area;
391 }
392 }
393 if (expand_stack(vma, address))
394 goto bad_area;
395 /*
396 * Ok, we have a good vm_area for this memory access, so
397 * we can handle it..
398 */
399good_area:
400 si_code = SEGV_ACCERR;
401
402 /* If we took a ITLB miss on a non-executable page, catch
403 * that here.
404 */
405 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
406 BUG_ON(address != regs->tpc);
407 BUG_ON(regs->tstate & TSTATE_PRIV);
408 goto bad_area;
409 }
410
411 if (fault_code & FAULT_CODE_WRITE) {
412 if (!(vma->vm_flags & VM_WRITE))
413 goto bad_area;
414
415 /* Spitfire has an icache which does not snoop
416 * processor stores. Later processors do...
417 */
418 if (tlb_type == spitfire &&
419 (vma->vm_flags & VM_EXEC) != 0 &&
420 vma->vm_file != NULL)
421 set_thread_fault_code(fault_code |
422 FAULT_CODE_BLKCOMMIT);
423 } else {
424 /* Allow reads even for write-only mappings */
425 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
426 goto bad_area;
427 }
428
429 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
430 fault = handle_mm_fault(mm, vma, address, flags);
431
432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
433 return;
434
435 if (unlikely(fault & VM_FAULT_ERROR)) {
436 if (fault & VM_FAULT_OOM)
437 goto out_of_memory;
438 else if (fault & VM_FAULT_SIGBUS)
439 goto do_sigbus;
440 BUG();
441 }
442
443 if (flags & FAULT_FLAG_ALLOW_RETRY) {
444 if (fault & VM_FAULT_MAJOR) {
445 current->maj_flt++;
446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
447 1, regs, address);
448 } else {
449 current->min_flt++;
450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
451 1, regs, address);
452 }
453 if (fault & VM_FAULT_RETRY) {
454 flags &= ~FAULT_FLAG_ALLOW_RETRY;
455
456 /* No need to up_read(&mm->mmap_sem) as we would
457 * have already released it in __lock_page_or_retry
458 * in mm/filemap.c.
459 */
460
461 goto retry;
462 }
463 }
464 up_read(&mm->mmap_sem);
465
466 mm_rss = get_mm_rss(mm);
467#ifdef CONFIG_HUGETLB_PAGE
468 mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
469#endif
470 if (unlikely(mm_rss >
471 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
472 tsb_grow(mm, MM_TSB_BASE, mm_rss);
473#ifdef CONFIG_HUGETLB_PAGE
474 mm_rss = mm->context.huge_pte_count;
475 if (unlikely(mm_rss >
476 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
477 tsb_grow(mm, MM_TSB_HUGE, mm_rss);
478#endif
479 return;
480
481 /*
482 * Something tried to access memory that isn't in our memory map..
483 * Fix it, but check if it's kernel or user first..
484 */
485bad_area:
486 insn = get_fault_insn(regs, insn);
487 up_read(&mm->mmap_sem);
488
489handle_kernel_fault:
490 do_kernel_fault(regs, si_code, fault_code, insn, address);
491 return;
492
493/*
494 * We ran out of memory, or some other thing happened to us that made
495 * us unable to handle the page fault gracefully.
496 */
497out_of_memory:
498 insn = get_fault_insn(regs, insn);
499 up_read(&mm->mmap_sem);
500 if (!(regs->tstate & TSTATE_PRIV)) {
501 pagefault_out_of_memory();
502 return;
503 }
504 goto handle_kernel_fault;
505
506intr_or_no_mm:
507 insn = get_fault_insn(regs, 0);
508 goto handle_kernel_fault;
509
510do_sigbus:
511 insn = get_fault_insn(regs, insn);
512 up_read(&mm->mmap_sem);
513
514 /*
515 * Send a sigbus, regardless of whether we were in kernel
516 * or user mode.
517 */
518 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
519
520 /* Kernel mode? Handle exceptions or die */
521 if (regs->tstate & TSTATE_PRIV)
522 goto handle_kernel_fault;
523}