Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
  3 *
  4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
  6 */
  7
  8#include <asm/head.h>
  9
 10#include <linux/string.h>
 11#include <linux/types.h>
 12#include <linux/sched.h>
 13#include <linux/ptrace.h>
 14#include <linux/mman.h>
 15#include <linux/signal.h>
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/init.h>
 19#include <linux/perf_event.h>
 20#include <linux/interrupt.h>
 21#include <linux/kprobes.h>
 22#include <linux/kdebug.h>
 23#include <linux/percpu.h>
 
 
 24
 25#include <asm/page.h>
 26#include <asm/pgtable.h>
 27#include <asm/openprom.h>
 28#include <asm/oplib.h>
 29#include <asm/uaccess.h>
 30#include <asm/asi.h>
 31#include <asm/lsu.h>
 32#include <asm/sections.h>
 33#include <asm/mmu_context.h>
 
 34
 35int show_unhandled_signals = 1;
 36
 37static inline __kprobes int notify_page_fault(struct pt_regs *regs)
 38{
 39	int ret = 0;
 40
 41	/* kprobe_running() needs smp_processor_id() */
 42	if (kprobes_built_in() && !user_mode(regs)) {
 43		preempt_disable();
 44		if (kprobe_running() && kprobe_fault_handler(regs, 0))
 45			ret = 1;
 46		preempt_enable();
 47	}
 48	return ret;
 49}
 50
 51static void __kprobes unhandled_fault(unsigned long address,
 52				      struct task_struct *tsk,
 53				      struct pt_regs *regs)
 54{
 55	if ((unsigned long) address < PAGE_SIZE) {
 56		printk(KERN_ALERT "Unable to handle kernel NULL "
 57		       "pointer dereference\n");
 58	} else {
 59		printk(KERN_ALERT "Unable to handle kernel paging request "
 60		       "at virtual address %016lx\n", (unsigned long)address);
 61	}
 62	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
 63	       (tsk->mm ?
 64		CTX_HWBITS(tsk->mm->context) :
 65		CTX_HWBITS(tsk->active_mm->context)));
 66	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
 67	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
 68		          (unsigned long) tsk->active_mm->pgd));
 69	die_if_kernel("Oops", regs);
 70}
 71
 72static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
 73{
 74	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
 75	       regs->tpc);
 76	printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
 77	printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
 78	printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
 79	dump_stack();
 80	unhandled_fault(regs->tpc, current, regs);
 81}
 82
 83/*
 84 * We now make sure that mmap_sem is held in all paths that call 
 85 * this. Additionally, to prevent kswapd from ripping ptes from
 86 * under us, raise interrupts around the time that we look at the
 87 * pte, kswapd will have to wait to get his smp ipi response from
 88 * us. vmtruncate likewise. This saves us having to get pte lock.
 89 */
 90static unsigned int get_user_insn(unsigned long tpc)
 91{
 92	pgd_t *pgdp = pgd_offset(current->mm, tpc);
 93	pud_t *pudp;
 94	pmd_t *pmdp;
 95	pte_t *ptep, pte;
 96	unsigned long pa;
 97	u32 insn = 0;
 98	unsigned long pstate;
 99
100	if (pgd_none(*pgdp))
101		goto outret;
102	pudp = pud_offset(pgdp, tpc);
103	if (pud_none(*pudp))
104		goto outret;
105	pmdp = pmd_offset(pudp, tpc);
106	if (pmd_none(*pmdp))
107		goto outret;
108
109	/* This disables preemption for us as well. */
110	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
111	__asm__ __volatile__("wrpr %0, %1, %%pstate"
112				: : "r" (pstate), "i" (PSTATE_IE));
113	ptep = pte_offset_map(pmdp, tpc);
114	pte = *ptep;
115	if (!pte_present(pte))
116		goto out;
117
118	pa  = (pte_pfn(pte) << PAGE_SHIFT);
119	pa += (tpc & ~PAGE_MASK);
120
121	/* Use phys bypass so we don't pollute dtlb/dcache. */
122	__asm__ __volatile__("lduwa [%1] %2, %0"
123			     : "=r" (insn)
124			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126out:
127	pte_unmap(ptep);
128	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
129outret:
130	return insn;
131}
132
133static inline void
134show_signal_msg(struct pt_regs *regs, int sig, int code,
135		unsigned long address, struct task_struct *tsk)
136{
137	if (!unhandled_signal(tsk, sig))
138		return;
139
140	if (!printk_ratelimit())
141		return;
142
143	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
144	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
145	       tsk->comm, task_pid_nr(tsk), address,
146	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
147	       (void *)regs->u_regs[UREG_FP], code);
148
149	print_vma_addr(KERN_CONT " in ", regs->tpc);
150
151	printk(KERN_CONT "\n");
152}
153
154extern unsigned long compute_effective_address(struct pt_regs *, unsigned int, unsigned int);
155
156static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
157			     unsigned int insn, int fault_code)
 
158{
159	unsigned long addr;
160	siginfo_t info;
161
162	info.si_code = code;
163	info.si_signo = sig;
164	info.si_errno = 0;
165	if (fault_code & FAULT_CODE_ITLB)
166		addr = regs->tpc;
167	else
168		addr = compute_effective_address(regs, insn, 0);
 
 
 
 
 
 
 
 
169	info.si_addr = (void __user *) addr;
170	info.si_trapno = 0;
171
172	if (unlikely(show_unhandled_signals))
173		show_signal_msg(regs, sig, code, addr, current);
174
175	force_sig_info(sig, &info, current);
176}
177
178extern int handle_ldf_stq(u32, struct pt_regs *);
179extern int handle_ld_nf(u32, struct pt_regs *);
180
181static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
182{
183	if (!insn) {
184		if (!regs->tpc || (regs->tpc & 0x3))
185			return 0;
186		if (regs->tstate & TSTATE_PRIV) {
187			insn = *(unsigned int *) regs->tpc;
188		} else {
189			insn = get_user_insn(regs->tpc);
190		}
191	}
192	return insn;
193}
194
195static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
196				      int fault_code, unsigned int insn,
197				      unsigned long address)
198{
199	unsigned char asi = ASI_P;
200 
201	if ((!insn) && (regs->tstate & TSTATE_PRIV))
202		goto cannot_handle;
203
204	/* If user insn could be read (thus insn is zero), that
205	 * is fine.  We will just gun down the process with a signal
206	 * in that case.
207	 */
208
209	if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
210	    (insn & 0xc0800000) == 0xc0800000) {
211		if (insn & 0x2000)
212			asi = (regs->tstate >> 24);
213		else
214			asi = (insn >> 5);
215		if ((asi & 0xf2) == 0x82) {
216			if (insn & 0x1000000) {
217				handle_ldf_stq(insn, regs);
218			} else {
219				/* This was a non-faulting load. Just clear the
220				 * destination register(s) and continue with the next
221				 * instruction. -jj
222				 */
223				handle_ld_nf(insn, regs);
224			}
225			return;
226		}
227	}
228		
229	/* Is this in ex_table? */
230	if (regs->tstate & TSTATE_PRIV) {
231		const struct exception_table_entry *entry;
232
233		entry = search_exception_tables(regs->tpc);
234		if (entry) {
235			regs->tpc = entry->fixup;
236			regs->tnpc = regs->tpc + 4;
237			return;
238		}
239	} else {
240		/* The si_code was set to make clear whether
241		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
242		 */
243		do_fault_siginfo(si_code, SIGSEGV, regs, insn, fault_code);
244		return;
245	}
246
247cannot_handle:
248	unhandled_fault (address, current, regs);
249}
250
251static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
252{
253	static int times;
254
255	if (times++ < 10)
256		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
257		       "64-bit TPC [%lx]\n",
258		       current->comm, current->pid,
259		       regs->tpc);
260	show_regs(regs);
261}
262
263static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
264							 unsigned long addr)
265{
266	static int times;
267
268	if (times++ < 10)
269		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process "
270		       "reports 64-bit fault address [%lx]\n",
271		       current->comm, current->pid, addr);
272	show_regs(regs);
273}
274
275asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
276{
 
277	struct mm_struct *mm = current->mm;
278	struct vm_area_struct *vma;
279	unsigned int insn = 0;
280	int si_code, fault_code, fault;
281	unsigned long address, mm_rss;
 
282
283	fault_code = get_thread_fault_code();
284
285	if (notify_page_fault(regs))
286		return;
287
288	si_code = SEGV_MAPERR;
289	address = current_thread_info()->fault_address;
290
291	if ((fault_code & FAULT_CODE_ITLB) &&
292	    (fault_code & FAULT_CODE_DTLB))
293		BUG();
294
295	if (test_thread_flag(TIF_32BIT)) {
296		if (!(regs->tstate & TSTATE_PRIV)) {
297			if (unlikely((regs->tpc >> 32) != 0)) {
298				bogus_32bit_fault_tpc(regs);
299				goto intr_or_no_mm;
300			}
301		}
302		if (unlikely((address >> 32) != 0)) {
303			bogus_32bit_fault_address(regs, address);
304			goto intr_or_no_mm;
305		}
306	}
307
308	if (regs->tstate & TSTATE_PRIV) {
309		unsigned long tpc = regs->tpc;
310
311		/* Sanity check the PC. */
312		if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
313		    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
314			/* Valid, no problems... */
315		} else {
316			bad_kernel_pc(regs, address);
317			return;
318		}
319	}
 
320
321	/*
322	 * If we're in an interrupt or have no user
323	 * context, we must not take the fault..
324	 */
325	if (in_atomic() || !mm)
326		goto intr_or_no_mm;
327
328	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
329
330	if (!down_read_trylock(&mm->mmap_sem)) {
331		if ((regs->tstate & TSTATE_PRIV) &&
332		    !search_exception_tables(regs->tpc)) {
333			insn = get_fault_insn(regs, insn);
334			goto handle_kernel_fault;
335		}
 
 
336		down_read(&mm->mmap_sem);
337	}
338
 
 
 
339	vma = find_vma(mm, address);
340	if (!vma)
341		goto bad_area;
342
343	/* Pure DTLB misses do not tell us whether the fault causing
344	 * load/store/atomic was a write or not, it only says that there
345	 * was no match.  So in such a case we (carefully) read the
346	 * instruction to try and figure this out.  It's an optimization
347	 * so it's ok if we can't do this.
348	 *
349	 * Special hack, window spill/fill knows the exact fault type.
350	 */
351	if (((fault_code &
352	      (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
353	    (vma->vm_flags & VM_WRITE) != 0) {
354		insn = get_fault_insn(regs, 0);
355		if (!insn)
356			goto continue_fault;
357		/* All loads, stores and atomics have bits 30 and 31 both set
358		 * in the instruction.  Bit 21 is set in all stores, but we
359		 * have to avoid prefetches which also have bit 21 set.
360		 */
361		if ((insn & 0xc0200000) == 0xc0200000 &&
362		    (insn & 0x01780000) != 0x01680000) {
363			/* Don't bother updating thread struct value,
364			 * because update_mmu_cache only cares which tlb
365			 * the access came from.
366			 */
367			fault_code |= FAULT_CODE_WRITE;
368		}
369	}
370continue_fault:
371
372	if (vma->vm_start <= address)
373		goto good_area;
374	if (!(vma->vm_flags & VM_GROWSDOWN))
375		goto bad_area;
376	if (!(fault_code & FAULT_CODE_WRITE)) {
377		/* Non-faulting loads shouldn't expand stack. */
378		insn = get_fault_insn(regs, insn);
379		if ((insn & 0xc0800000) == 0xc0800000) {
380			unsigned char asi;
381
382			if (insn & 0x2000)
383				asi = (regs->tstate >> 24);
384			else
385				asi = (insn >> 5);
386			if ((asi & 0xf2) == 0x82)
387				goto bad_area;
388		}
389	}
390	if (expand_stack(vma, address))
391		goto bad_area;
392	/*
393	 * Ok, we have a good vm_area for this memory access, so
394	 * we can handle it..
395	 */
396good_area:
397	si_code = SEGV_ACCERR;
398
399	/* If we took a ITLB miss on a non-executable page, catch
400	 * that here.
401	 */
402	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
403		BUG_ON(address != regs->tpc);
404		BUG_ON(regs->tstate & TSTATE_PRIV);
 
405		goto bad_area;
406	}
407
408	if (fault_code & FAULT_CODE_WRITE) {
409		if (!(vma->vm_flags & VM_WRITE))
410			goto bad_area;
411
412		/* Spitfire has an icache which does not snoop
413		 * processor stores.  Later processors do...
414		 */
415		if (tlb_type == spitfire &&
416		    (vma->vm_flags & VM_EXEC) != 0 &&
417		    vma->vm_file != NULL)
418			set_thread_fault_code(fault_code |
419					      FAULT_CODE_BLKCOMMIT);
 
 
420	} else {
421		/* Allow reads even for write-only mappings */
422		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
423			goto bad_area;
424	}
425
426	fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
 
 
 
 
427	if (unlikely(fault & VM_FAULT_ERROR)) {
428		if (fault & VM_FAULT_OOM)
429			goto out_of_memory;
 
 
430		else if (fault & VM_FAULT_SIGBUS)
431			goto do_sigbus;
432		BUG();
433	}
434	if (fault & VM_FAULT_MAJOR) {
435		current->maj_flt++;
436		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
437	} else {
438		current->min_flt++;
439		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440	}
441	up_read(&mm->mmap_sem);
442
443	mm_rss = get_mm_rss(mm);
444#ifdef CONFIG_HUGETLB_PAGE
445	mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE));
446#endif
447	if (unlikely(mm_rss >
448		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
449		tsb_grow(mm, MM_TSB_BASE, mm_rss);
450#ifdef CONFIG_HUGETLB_PAGE
451	mm_rss = mm->context.huge_pte_count;
 
452	if (unlikely(mm_rss >
453		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit))
454		tsb_grow(mm, MM_TSB_HUGE, mm_rss);
 
 
 
 
 
455#endif
 
 
456	return;
457
458	/*
459	 * Something tried to access memory that isn't in our memory map..
460	 * Fix it, but check if it's kernel or user first..
461	 */
462bad_area:
463	insn = get_fault_insn(regs, insn);
464	up_read(&mm->mmap_sem);
465
466handle_kernel_fault:
467	do_kernel_fault(regs, si_code, fault_code, insn, address);
468	return;
469
470/*
471 * We ran out of memory, or some other thing happened to us that made
472 * us unable to handle the page fault gracefully.
473 */
474out_of_memory:
475	insn = get_fault_insn(regs, insn);
476	up_read(&mm->mmap_sem);
477	if (!(regs->tstate & TSTATE_PRIV)) {
478		pagefault_out_of_memory();
479		return;
480	}
481	goto handle_kernel_fault;
482
483intr_or_no_mm:
484	insn = get_fault_insn(regs, 0);
485	goto handle_kernel_fault;
486
487do_sigbus:
488	insn = get_fault_insn(regs, insn);
489	up_read(&mm->mmap_sem);
490
491	/*
492	 * Send a sigbus, regardless of whether we were in kernel
493	 * or user mode.
494	 */
495	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, insn, fault_code);
496
497	/* Kernel mode? Handle exceptions or die */
498	if (regs->tstate & TSTATE_PRIV)
499		goto handle_kernel_fault;
500}
v4.10.11
  1/*
  2 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
  3 *
  4 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
  6 */
  7
  8#include <asm/head.h>
  9
 10#include <linux/string.h>
 11#include <linux/types.h>
 12#include <linux/sched.h>
 13#include <linux/ptrace.h>
 14#include <linux/mman.h>
 15#include <linux/signal.h>
 16#include <linux/mm.h>
 17#include <linux/extable.h>
 18#include <linux/init.h>
 19#include <linux/perf_event.h>
 20#include <linux/interrupt.h>
 21#include <linux/kprobes.h>
 22#include <linux/kdebug.h>
 23#include <linux/percpu.h>
 24#include <linux/context_tracking.h>
 25#include <linux/uaccess.h>
 26
 27#include <asm/page.h>
 28#include <asm/pgtable.h>
 29#include <asm/openprom.h>
 30#include <asm/oplib.h>
 
 31#include <asm/asi.h>
 32#include <asm/lsu.h>
 33#include <asm/sections.h>
 34#include <asm/mmu_context.h>
 35#include <asm/setup.h>
 36
 37int show_unhandled_signals = 1;
 38
 39static inline __kprobes int notify_page_fault(struct pt_regs *regs)
 40{
 41	int ret = 0;
 42
 43	/* kprobe_running() needs smp_processor_id() */
 44	if (kprobes_built_in() && !user_mode(regs)) {
 45		preempt_disable();
 46		if (kprobe_running() && kprobe_fault_handler(regs, 0))
 47			ret = 1;
 48		preempt_enable();
 49	}
 50	return ret;
 51}
 52
 53static void __kprobes unhandled_fault(unsigned long address,
 54				      struct task_struct *tsk,
 55				      struct pt_regs *regs)
 56{
 57	if ((unsigned long) address < PAGE_SIZE) {
 58		printk(KERN_ALERT "Unable to handle kernel NULL "
 59		       "pointer dereference\n");
 60	} else {
 61		printk(KERN_ALERT "Unable to handle kernel paging request "
 62		       "at virtual address %016lx\n", (unsigned long)address);
 63	}
 64	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
 65	       (tsk->mm ?
 66		CTX_HWBITS(tsk->mm->context) :
 67		CTX_HWBITS(tsk->active_mm->context)));
 68	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
 69	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
 70		          (unsigned long) tsk->active_mm->pgd));
 71	die_if_kernel("Oops", regs);
 72}
 73
 74static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
 75{
 76	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
 77	       regs->tpc);
 78	printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
 79	printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
 80	printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
 81	dump_stack();
 82	unhandled_fault(regs->tpc, current, regs);
 83}
 84
 85/*
 86 * We now make sure that mmap_sem is held in all paths that call 
 87 * this. Additionally, to prevent kswapd from ripping ptes from
 88 * under us, raise interrupts around the time that we look at the
 89 * pte, kswapd will have to wait to get his smp ipi response from
 90 * us. vmtruncate likewise. This saves us having to get pte lock.
 91 */
 92static unsigned int get_user_insn(unsigned long tpc)
 93{
 94	pgd_t *pgdp = pgd_offset(current->mm, tpc);
 95	pud_t *pudp;
 96	pmd_t *pmdp;
 97	pte_t *ptep, pte;
 98	unsigned long pa;
 99	u32 insn = 0;
 
100
101	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
102		goto out;
103	pudp = pud_offset(pgdp, tpc);
104	if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
 
 
 
 
 
 
 
 
 
 
 
 
105		goto out;
106
107	/* This disables preemption for us as well. */
108	local_irq_disable();
109
110	pmdp = pmd_offset(pudp, tpc);
111	if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
112		goto out_irq_enable;
 
113
114#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
115	if (is_hugetlb_pmd(*pmdp)) {
116		pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
117		pa += tpc & ~HPAGE_MASK;
118
119		/* Use phys bypass so we don't pollute dtlb/dcache. */
120		__asm__ __volatile__("lduwa [%1] %2, %0"
121				     : "=r" (insn)
122				     : "r" (pa), "i" (ASI_PHYS_USE_EC));
123	} else
124#endif
125	{
126		ptep = pte_offset_map(pmdp, tpc);
127		pte = *ptep;
128		if (pte_present(pte)) {
129			pa  = (pte_pfn(pte) << PAGE_SHIFT);
130			pa += (tpc & ~PAGE_MASK);
131
132			/* Use phys bypass so we don't pollute dtlb/dcache. */
133			__asm__ __volatile__("lduwa [%1] %2, %0"
134					     : "=r" (insn)
135					     : "r" (pa), "i" (ASI_PHYS_USE_EC));
136		}
137		pte_unmap(ptep);
138	}
139out_irq_enable:
140	local_irq_enable();
141out:
 
 
 
142	return insn;
143}
144
145static inline void
146show_signal_msg(struct pt_regs *regs, int sig, int code,
147		unsigned long address, struct task_struct *tsk)
148{
149	if (!unhandled_signal(tsk, sig))
150		return;
151
152	if (!printk_ratelimit())
153		return;
154
155	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
156	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
157	       tsk->comm, task_pid_nr(tsk), address,
158	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
159	       (void *)regs->u_regs[UREG_FP], code);
160
161	print_vma_addr(KERN_CONT " in ", regs->tpc);
162
163	printk(KERN_CONT "\n");
164}
165
 
 
166static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
167			     unsigned long fault_addr, unsigned int insn,
168			     int fault_code)
169{
170	unsigned long addr;
171	siginfo_t info;
172
173	info.si_code = code;
174	info.si_signo = sig;
175	info.si_errno = 0;
176	if (fault_code & FAULT_CODE_ITLB) {
177		addr = regs->tpc;
178	} else {
179		/* If we were able to probe the faulting instruction, use it
180		 * to compute a precise fault address.  Otherwise use the fault
181		 * time provided address which may only have page granularity.
182		 */
183		if (insn)
184			addr = compute_effective_address(regs, insn, 0);
185		else
186			addr = fault_addr;
187	}
188	info.si_addr = (void __user *) addr;
189	info.si_trapno = 0;
190
191	if (unlikely(show_unhandled_signals))
192		show_signal_msg(regs, sig, code, addr, current);
193
194	force_sig_info(sig, &info, current);
195}
196
 
 
 
197static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
198{
199	if (!insn) {
200		if (!regs->tpc || (regs->tpc & 0x3))
201			return 0;
202		if (regs->tstate & TSTATE_PRIV) {
203			insn = *(unsigned int *) regs->tpc;
204		} else {
205			insn = get_user_insn(regs->tpc);
206		}
207	}
208	return insn;
209}
210
211static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
212				      int fault_code, unsigned int insn,
213				      unsigned long address)
214{
215	unsigned char asi = ASI_P;
216 
217	if ((!insn) && (regs->tstate & TSTATE_PRIV))
218		goto cannot_handle;
219
220	/* If user insn could be read (thus insn is zero), that
221	 * is fine.  We will just gun down the process with a signal
222	 * in that case.
223	 */
224
225	if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
226	    (insn & 0xc0800000) == 0xc0800000) {
227		if (insn & 0x2000)
228			asi = (regs->tstate >> 24);
229		else
230			asi = (insn >> 5);
231		if ((asi & 0xf2) == 0x82) {
232			if (insn & 0x1000000) {
233				handle_ldf_stq(insn, regs);
234			} else {
235				/* This was a non-faulting load. Just clear the
236				 * destination register(s) and continue with the next
237				 * instruction. -jj
238				 */
239				handle_ld_nf(insn, regs);
240			}
241			return;
242		}
243	}
244		
245	/* Is this in ex_table? */
246	if (regs->tstate & TSTATE_PRIV) {
247		const struct exception_table_entry *entry;
248
249		entry = search_exception_tables(regs->tpc);
250		if (entry) {
251			regs->tpc = entry->fixup;
252			regs->tnpc = regs->tpc + 4;
253			return;
254		}
255	} else {
256		/* The si_code was set to make clear whether
257		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
258		 */
259		do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
260		return;
261	}
262
263cannot_handle:
264	unhandled_fault (address, current, regs);
265}
266
267static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
268{
269	static int times;
270
271	if (times++ < 10)
272		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
273		       "64-bit TPC [%lx]\n",
274		       current->comm, current->pid,
275		       regs->tpc);
276	show_regs(regs);
277}
278
 
 
 
 
 
 
 
 
 
 
 
 
279asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
280{
281	enum ctx_state prev_state = exception_enter();
282	struct mm_struct *mm = current->mm;
283	struct vm_area_struct *vma;
284	unsigned int insn = 0;
285	int si_code, fault_code, fault;
286	unsigned long address, mm_rss;
287	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
288
289	fault_code = get_thread_fault_code();
290
291	if (notify_page_fault(regs))
292		goto exit_exception;
293
294	si_code = SEGV_MAPERR;
295	address = current_thread_info()->fault_address;
296
297	if ((fault_code & FAULT_CODE_ITLB) &&
298	    (fault_code & FAULT_CODE_DTLB))
299		BUG();
300
301	if (test_thread_flag(TIF_32BIT)) {
302		if (!(regs->tstate & TSTATE_PRIV)) {
303			if (unlikely((regs->tpc >> 32) != 0)) {
304				bogus_32bit_fault_tpc(regs);
305				goto intr_or_no_mm;
306			}
307		}
308		if (unlikely((address >> 32) != 0))
 
309			goto intr_or_no_mm;
 
310	}
311
312	if (regs->tstate & TSTATE_PRIV) {
313		unsigned long tpc = regs->tpc;
314
315		/* Sanity check the PC. */
316		if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
317		    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
318			/* Valid, no problems... */
319		} else {
320			bad_kernel_pc(regs, address);
321			goto exit_exception;
322		}
323	} else
324		flags |= FAULT_FLAG_USER;
325
326	/*
327	 * If we're in an interrupt or have no user
328	 * context, we must not take the fault..
329	 */
330	if (faulthandler_disabled() || !mm)
331		goto intr_or_no_mm;
332
333	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
334
335	if (!down_read_trylock(&mm->mmap_sem)) {
336		if ((regs->tstate & TSTATE_PRIV) &&
337		    !search_exception_tables(regs->tpc)) {
338			insn = get_fault_insn(regs, insn);
339			goto handle_kernel_fault;
340		}
341
342retry:
343		down_read(&mm->mmap_sem);
344	}
345
346	if (fault_code & FAULT_CODE_BAD_RA)
347		goto do_sigbus;
348
349	vma = find_vma(mm, address);
350	if (!vma)
351		goto bad_area;
352
353	/* Pure DTLB misses do not tell us whether the fault causing
354	 * load/store/atomic was a write or not, it only says that there
355	 * was no match.  So in such a case we (carefully) read the
356	 * instruction to try and figure this out.  It's an optimization
357	 * so it's ok if we can't do this.
358	 *
359	 * Special hack, window spill/fill knows the exact fault type.
360	 */
361	if (((fault_code &
362	      (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
363	    (vma->vm_flags & VM_WRITE) != 0) {
364		insn = get_fault_insn(regs, 0);
365		if (!insn)
366			goto continue_fault;
367		/* All loads, stores and atomics have bits 30 and 31 both set
368		 * in the instruction.  Bit 21 is set in all stores, but we
369		 * have to avoid prefetches which also have bit 21 set.
370		 */
371		if ((insn & 0xc0200000) == 0xc0200000 &&
372		    (insn & 0x01780000) != 0x01680000) {
373			/* Don't bother updating thread struct value,
374			 * because update_mmu_cache only cares which tlb
375			 * the access came from.
376			 */
377			fault_code |= FAULT_CODE_WRITE;
378		}
379	}
380continue_fault:
381
382	if (vma->vm_start <= address)
383		goto good_area;
384	if (!(vma->vm_flags & VM_GROWSDOWN))
385		goto bad_area;
386	if (!(fault_code & FAULT_CODE_WRITE)) {
387		/* Non-faulting loads shouldn't expand stack. */
388		insn = get_fault_insn(regs, insn);
389		if ((insn & 0xc0800000) == 0xc0800000) {
390			unsigned char asi;
391
392			if (insn & 0x2000)
393				asi = (regs->tstate >> 24);
394			else
395				asi = (insn >> 5);
396			if ((asi & 0xf2) == 0x82)
397				goto bad_area;
398		}
399	}
400	if (expand_stack(vma, address))
401		goto bad_area;
402	/*
403	 * Ok, we have a good vm_area for this memory access, so
404	 * we can handle it..
405	 */
406good_area:
407	si_code = SEGV_ACCERR;
408
409	/* If we took a ITLB miss on a non-executable page, catch
410	 * that here.
411	 */
412	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
413		WARN(address != regs->tpc,
414		     "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
415		WARN_ON(regs->tstate & TSTATE_PRIV);
416		goto bad_area;
417	}
418
419	if (fault_code & FAULT_CODE_WRITE) {
420		if (!(vma->vm_flags & VM_WRITE))
421			goto bad_area;
422
423		/* Spitfire has an icache which does not snoop
424		 * processor stores.  Later processors do...
425		 */
426		if (tlb_type == spitfire &&
427		    (vma->vm_flags & VM_EXEC) != 0 &&
428		    vma->vm_file != NULL)
429			set_thread_fault_code(fault_code |
430					      FAULT_CODE_BLKCOMMIT);
431
432		flags |= FAULT_FLAG_WRITE;
433	} else {
434		/* Allow reads even for write-only mappings */
435		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
436			goto bad_area;
437	}
438
439	fault = handle_mm_fault(vma, address, flags);
440
441	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
442		goto exit_exception;
443
444	if (unlikely(fault & VM_FAULT_ERROR)) {
445		if (fault & VM_FAULT_OOM)
446			goto out_of_memory;
447		else if (fault & VM_FAULT_SIGSEGV)
448			goto bad_area;
449		else if (fault & VM_FAULT_SIGBUS)
450			goto do_sigbus;
451		BUG();
452	}
453
454	if (flags & FAULT_FLAG_ALLOW_RETRY) {
455		if (fault & VM_FAULT_MAJOR) {
456			current->maj_flt++;
457			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
458				      1, regs, address);
459		} else {
460			current->min_flt++;
461			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
462				      1, regs, address);
463		}
464		if (fault & VM_FAULT_RETRY) {
465			flags &= ~FAULT_FLAG_ALLOW_RETRY;
466			flags |= FAULT_FLAG_TRIED;
467
468			/* No need to up_read(&mm->mmap_sem) as we would
469			 * have already released it in __lock_page_or_retry
470			 * in mm/filemap.c.
471			 */
472
473			goto retry;
474		}
475	}
476	up_read(&mm->mmap_sem);
477
478	mm_rss = get_mm_rss(mm);
479#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
480	mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
481#endif
482	if (unlikely(mm_rss >
483		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
484		tsb_grow(mm, MM_TSB_BASE, mm_rss);
485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
486	mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
487	mm_rss *= REAL_HPAGE_PER_HPAGE;
488	if (unlikely(mm_rss >
489		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
490		if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
491			tsb_grow(mm, MM_TSB_HUGE, mm_rss);
492		else
493			hugetlb_setup(regs);
494
495	}
496#endif
497exit_exception:
498	exception_exit(prev_state);
499	return;
500
501	/*
502	 * Something tried to access memory that isn't in our memory map..
503	 * Fix it, but check if it's kernel or user first..
504	 */
505bad_area:
506	insn = get_fault_insn(regs, insn);
507	up_read(&mm->mmap_sem);
508
509handle_kernel_fault:
510	do_kernel_fault(regs, si_code, fault_code, insn, address);
511	goto exit_exception;
512
513/*
514 * We ran out of memory, or some other thing happened to us that made
515 * us unable to handle the page fault gracefully.
516 */
517out_of_memory:
518	insn = get_fault_insn(regs, insn);
519	up_read(&mm->mmap_sem);
520	if (!(regs->tstate & TSTATE_PRIV)) {
521		pagefault_out_of_memory();
522		goto exit_exception;
523	}
524	goto handle_kernel_fault;
525
526intr_or_no_mm:
527	insn = get_fault_insn(regs, 0);
528	goto handle_kernel_fault;
529
530do_sigbus:
531	insn = get_fault_insn(regs, insn);
532	up_read(&mm->mmap_sem);
533
534	/*
535	 * Send a sigbus, regardless of whether we were in kernel
536	 * or user mode.
537	 */
538	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
539
540	/* Kernel mode? Handle exceptions or die */
541	if (regs->tstate & TSTATE_PRIV)
542		goto handle_kernel_fault;
543}