Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * fault.c:  Page fault handlers for the Sparc.
  4 *
  5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8 */
  9
 10#include <asm/head.h>
 11
 12#include <linux/string.h>
 13#include <linux/types.h>
 14#include <linux/sched.h>
 15#include <linux/ptrace.h>
 16#include <linux/mman.h>
 17#include <linux/threads.h>
 18#include <linux/kernel.h>
 19#include <linux/signal.h>
 20#include <linux/mm.h>
 21#include <linux/smp.h>
 22#include <linux/perf_event.h>
 23#include <linux/interrupt.h>
 24#include <linux/kdebug.h>
 25#include <linux/uaccess.h>
 26
 27#include <asm/page.h>
 28#include <asm/pgtable.h>
 29#include <asm/openprom.h>
 30#include <asm/oplib.h>
 31#include <asm/setup.h>
 32#include <asm/smp.h>
 33#include <asm/traps.h>
 34
 35#include "mm_32.h"
 36
 37int show_unhandled_signals = 1;
 38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39static void __noreturn unhandled_fault(unsigned long address,
 40				       struct task_struct *tsk,
 41				       struct pt_regs *regs)
 42{
 43	if ((unsigned long) address < PAGE_SIZE) {
 44		printk(KERN_ALERT
 45		    "Unable to handle kernel NULL pointer dereference\n");
 46	} else {
 47		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
 48		       address);
 49	}
 50	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
 51		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 52	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 53		(tsk->mm ? (unsigned long) tsk->mm->pgd :
 54			(unsigned long) tsk->active_mm->pgd));
 55	die_if_kernel("Oops", regs);
 56}
 57
 58asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
 59			    unsigned long address)
 60{
 61	struct pt_regs regs;
 62	unsigned long g2;
 63	unsigned int insn;
 64	int i;
 65
 66	i = search_extables_range(ret_pc, &g2);
 67	switch (i) {
 68	case 3:
 69		/* load & store will be handled by fixup */
 70		return 3;
 71
 72	case 1:
 73		/* store will be handled by fixup, load will bump out */
 74		/* for _to_ macros */
 75		insn = *((unsigned int *) pc);
 76		if ((insn >> 21) & 1)
 77			return 1;
 78		break;
 79
 80	case 2:
 81		/* load will be handled by fixup, store will bump out */
 82		/* for _from_ macros */
 83		insn = *((unsigned int *) pc);
 84		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
 85			return 2;
 86		break;
 87
 88	default:
 89		break;
 90	}
 91
 92	memset(&regs, 0, sizeof(regs));
 93	regs.pc = pc;
 94	regs.npc = pc + 4;
 95	__asm__ __volatile__(
 96		"rd %%psr, %0\n\t"
 97		"nop\n\t"
 98		"nop\n\t"
 99		"nop\n" : "=r" (regs.psr));
100	unhandled_fault(address, current, &regs);
101
102	/* Not reached */
103	return 0;
104}
105
106static inline void
107show_signal_msg(struct pt_regs *regs, int sig, int code,
108		unsigned long address, struct task_struct *tsk)
109{
110	if (!unhandled_signal(tsk, sig))
111		return;
112
113	if (!printk_ratelimit())
114		return;
115
116	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
117	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
118	       tsk->comm, task_pid_nr(tsk), address,
119	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
120	       (void *)regs->u_regs[UREG_FP], code);
121
122	print_vma_addr(KERN_CONT " in ", regs->pc);
123
124	printk(KERN_CONT "\n");
125}
126
127static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
128			       unsigned long addr)
129{
130	siginfo_t info;
131
132	info.si_signo = sig;
133	info.si_code = code;
134	info.si_errno = 0;
135	info.si_addr = (void __user *) addr;
136	info.si_trapno = 0;
137
138	if (unlikely(show_unhandled_signals))
139		show_signal_msg(regs, sig, info.si_code,
140				addr, current);
141
142	force_sig_info (sig, &info, current);
143}
144
 
 
 
145static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
146{
147	unsigned int insn;
148
149	if (text_fault)
150		return regs->pc;
151
152	if (regs->psr & PSR_PS)
153		insn = *(unsigned int *) regs->pc;
154	else
155		__get_user(insn, (unsigned int *) regs->pc);
156
157	return safe_compute_effective_address(regs, insn);
158}
159
160static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
161				      int text_fault)
162{
163	unsigned long addr = compute_si_addr(regs, text_fault);
164
165	__do_fault_siginfo(code, sig, regs, addr);
166}
167
168asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
169			       unsigned long address)
170{
171	struct vm_area_struct *vma;
172	struct task_struct *tsk = current;
173	struct mm_struct *mm = tsk->mm;
174	unsigned int fixup;
175	unsigned long g2;
176	int from_user = !(regs->psr & PSR_PS);
177	int fault, code;
178	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
179
180	if (text_fault)
181		address = regs->pc;
182
183	/*
184	 * We fault-in kernel-space virtual memory on-demand. The
185	 * 'reference' page table is init_mm.pgd.
186	 *
187	 * NOTE! We MUST NOT take any locks for this case. We may
188	 * be in an interrupt or a critical region, and should
189	 * only copy the information from the master page table,
190	 * nothing more.
191	 */
192	code = SEGV_MAPERR;
193	if (address >= TASK_SIZE)
194		goto vmalloc_fault;
195
196	/*
197	 * If we're in an interrupt or have no user
198	 * context, we must not take the fault..
199	 */
200	if (pagefault_disabled() || !mm)
201		goto no_context;
202
203	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
204
205retry:
206	down_read(&mm->mmap_sem);
207
208	if (!from_user && address >= PAGE_OFFSET)
209		goto bad_area;
210
211	vma = find_vma(mm, address);
212	if (!vma)
213		goto bad_area;
214	if (vma->vm_start <= address)
215		goto good_area;
216	if (!(vma->vm_flags & VM_GROWSDOWN))
217		goto bad_area;
218	if (expand_stack(vma, address))
219		goto bad_area;
220	/*
221	 * Ok, we have a good vm_area for this memory access, so
222	 * we can handle it..
223	 */
224good_area:
225	code = SEGV_ACCERR;
226	if (write) {
227		if (!(vma->vm_flags & VM_WRITE))
228			goto bad_area;
229	} else {
230		/* Allow reads even for write-only mappings */
231		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
232			goto bad_area;
233	}
234
235	if (from_user)
236		flags |= FAULT_FLAG_USER;
237	if (write)
238		flags |= FAULT_FLAG_WRITE;
239
240	/*
241	 * If for any reason at all we couldn't handle the fault,
242	 * make sure we exit gracefully rather than endlessly redo
243	 * the fault.
244	 */
245	fault = handle_mm_fault(vma, address, flags);
246
247	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
248		return;
249
250	if (unlikely(fault & VM_FAULT_ERROR)) {
251		if (fault & VM_FAULT_OOM)
252			goto out_of_memory;
253		else if (fault & VM_FAULT_SIGSEGV)
254			goto bad_area;
255		else if (fault & VM_FAULT_SIGBUS)
256			goto do_sigbus;
257		BUG();
258	}
259
260	if (flags & FAULT_FLAG_ALLOW_RETRY) {
261		if (fault & VM_FAULT_MAJOR) {
262			current->maj_flt++;
263			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
264				      1, regs, address);
265		} else {
266			current->min_flt++;
267			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
268				      1, regs, address);
269		}
270		if (fault & VM_FAULT_RETRY) {
271			flags &= ~FAULT_FLAG_ALLOW_RETRY;
272			flags |= FAULT_FLAG_TRIED;
273
274			/* No need to up_read(&mm->mmap_sem) as we would
275			 * have already released it in __lock_page_or_retry
276			 * in mm/filemap.c.
277			 */
278
279			goto retry;
280		}
281	}
282
283	up_read(&mm->mmap_sem);
284	return;
285
286	/*
287	 * Something tried to access memory that isn't in our memory map..
288	 * Fix it, but check if it's kernel or user first..
289	 */
290bad_area:
291	up_read(&mm->mmap_sem);
292
293bad_area_nosemaphore:
294	/* User mode accesses just cause a SIGSEGV */
295	if (from_user) {
296		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
297		return;
298	}
299
300	/* Is this in ex_table? */
301no_context:
302	g2 = regs->u_regs[UREG_G2];
303	if (!from_user) {
304		fixup = search_extables_range(regs->pc, &g2);
305		/* Values below 10 are reserved for other things */
306		if (fixup > 10) {
307			extern const unsigned int __memset_start[];
308			extern const unsigned int __memset_end[];
309			extern const unsigned int __csum_partial_copy_start[];
310			extern const unsigned int __csum_partial_copy_end[];
311
312#ifdef DEBUG_EXCEPTIONS
313			printk("Exception: PC<%08lx> faddr<%08lx>\n",
314			       regs->pc, address);
315			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
316				regs->pc, fixup, g2);
317#endif
318			if ((regs->pc >= (unsigned long)__memset_start &&
319			     regs->pc < (unsigned long)__memset_end) ||
320			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
321			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
322				regs->u_regs[UREG_I4] = address;
323				regs->u_regs[UREG_I5] = regs->pc;
324			}
325			regs->u_regs[UREG_G2] = g2;
326			regs->pc = fixup;
327			regs->npc = regs->pc + 4;
328			return;
329		}
330	}
331
332	unhandled_fault(address, tsk, regs);
333	do_exit(SIGKILL);
334
335/*
336 * We ran out of memory, or some other thing happened to us that made
337 * us unable to handle the page fault gracefully.
338 */
339out_of_memory:
340	up_read(&mm->mmap_sem);
341	if (from_user) {
342		pagefault_out_of_memory();
343		return;
344	}
345	goto no_context;
346
347do_sigbus:
348	up_read(&mm->mmap_sem);
349	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
350	if (!from_user)
351		goto no_context;
352
353vmalloc_fault:
354	{
355		/*
356		 * Synchronize this task's top level page-table
357		 * with the 'reference' page table.
358		 */
359		int offset = pgd_index(address);
360		pgd_t *pgd, *pgd_k;
361		pmd_t *pmd, *pmd_k;
362
363		pgd = tsk->active_mm->pgd + offset;
364		pgd_k = init_mm.pgd + offset;
365
366		if (!pgd_present(*pgd)) {
367			if (!pgd_present(*pgd_k))
368				goto bad_area_nosemaphore;
369			pgd_val(*pgd) = pgd_val(*pgd_k);
370			return;
371		}
372
373		pmd = pmd_offset(pgd, address);
374		pmd_k = pmd_offset(pgd_k, address);
375
376		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
377			goto bad_area_nosemaphore;
378
379		*pmd = *pmd_k;
380		return;
381	}
382}
383
384/* This always deals with user addresses. */
385static void force_user_fault(unsigned long address, int write)
386{
387	struct vm_area_struct *vma;
388	struct task_struct *tsk = current;
389	struct mm_struct *mm = tsk->mm;
390	unsigned int flags = FAULT_FLAG_USER;
391	int code;
392
393	code = SEGV_MAPERR;
394
395	down_read(&mm->mmap_sem);
396	vma = find_vma(mm, address);
397	if (!vma)
398		goto bad_area;
399	if (vma->vm_start <= address)
400		goto good_area;
401	if (!(vma->vm_flags & VM_GROWSDOWN))
402		goto bad_area;
403	if (expand_stack(vma, address))
404		goto bad_area;
405good_area:
406	code = SEGV_ACCERR;
407	if (write) {
408		if (!(vma->vm_flags & VM_WRITE))
409			goto bad_area;
410		flags |= FAULT_FLAG_WRITE;
411	} else {
412		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
413			goto bad_area;
414	}
415	switch (handle_mm_fault(vma, address, flags)) {
416	case VM_FAULT_SIGBUS:
417	case VM_FAULT_OOM:
418		goto do_sigbus;
419	}
420	up_read(&mm->mmap_sem);
421	return;
422bad_area:
423	up_read(&mm->mmap_sem);
424	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
425	return;
426
427do_sigbus:
428	up_read(&mm->mmap_sem);
429	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
430}
431
432static void check_stack_aligned(unsigned long sp)
433{
434	if (sp & 0x7UL)
435		force_sig(SIGILL, current);
436}
437
438void window_overflow_fault(void)
439{
440	unsigned long sp;
441
442	sp = current_thread_info()->rwbuf_stkptrs[0];
443	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
444		force_user_fault(sp + 0x38, 1);
445	force_user_fault(sp, 1);
446
447	check_stack_aligned(sp);
448}
449
450void window_underflow_fault(unsigned long sp)
451{
452	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
453		force_user_fault(sp + 0x38, 0);
454	force_user_fault(sp, 0);
455
456	check_stack_aligned(sp);
457}
458
459void window_ret_fault(struct pt_regs *regs)
460{
461	unsigned long sp;
462
463	sp = regs->u_regs[UREG_FP];
464	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
465		force_user_fault(sp + 0x38, 0);
466	force_user_fault(sp, 0);
467
468	check_stack_aligned(sp);
469}
v3.5.6
 
  1/*
  2 * fault.c:  Page fault handlers for the Sparc.
  3 *
  4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7 */
  8
  9#include <asm/head.h>
 10
 11#include <linux/string.h>
 12#include <linux/types.h>
 13#include <linux/sched.h>
 14#include <linux/ptrace.h>
 15#include <linux/mman.h>
 16#include <linux/threads.h>
 17#include <linux/kernel.h>
 18#include <linux/signal.h>
 19#include <linux/mm.h>
 20#include <linux/smp.h>
 21#include <linux/perf_event.h>
 22#include <linux/interrupt.h>
 23#include <linux/kdebug.h>
 
 24
 25#include <asm/page.h>
 26#include <asm/pgtable.h>
 27#include <asm/openprom.h>
 28#include <asm/oplib.h>
 
 29#include <asm/smp.h>
 30#include <asm/traps.h>
 31#include <asm/uaccess.h>
 
 32
 33int show_unhandled_signals = 1;
 34
 35/* At boot time we determine these two values necessary for setting
 36 * up the segment maps and page table entries (pte's).
 37 */
 38
 39int num_contexts;
 40
 41/* Return how much physical memory we have.  */
 42unsigned long probe_memory(void)
 43{
 44	unsigned long total = 0;
 45	int i;
 46
 47	for (i = 0; sp_banks[i].num_bytes; i++)
 48		total += sp_banks[i].num_bytes;
 49
 50	return total;
 51}
 52
 53static void unhandled_fault(unsigned long, struct task_struct *,
 54		struct pt_regs *) __attribute__ ((noreturn));
 55
 56static void __noreturn unhandled_fault(unsigned long address,
 57				       struct task_struct *tsk,
 58				       struct pt_regs *regs)
 59{
 60	if ((unsigned long) address < PAGE_SIZE) {
 61		printk(KERN_ALERT
 62		    "Unable to handle kernel NULL pointer dereference\n");
 63	} else {
 64		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
 65		       address);
 66	}
 67	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
 68		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 69	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 70		(tsk->mm ? (unsigned long) tsk->mm->pgd :
 71			(unsigned long) tsk->active_mm->pgd));
 72	die_if_kernel("Oops", regs);
 73}
 74
 75asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
 76			    unsigned long address)
 77{
 78	struct pt_regs regs;
 79	unsigned long g2;
 80	unsigned int insn;
 81	int i;
 82
 83	i = search_extables_range(ret_pc, &g2);
 84	switch (i) {
 85	case 3:
 86		/* load & store will be handled by fixup */
 87		return 3;
 88
 89	case 1:
 90		/* store will be handled by fixup, load will bump out */
 91		/* for _to_ macros */
 92		insn = *((unsigned int *) pc);
 93		if ((insn >> 21) & 1)
 94			return 1;
 95		break;
 96
 97	case 2:
 98		/* load will be handled by fixup, store will bump out */
 99		/* for _from_ macros */
100		insn = *((unsigned int *) pc);
101		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
102			return 2;
103		break;
104
105	default:
106		break;
107	}
108
109	memset(&regs, 0, sizeof(regs));
110	regs.pc = pc;
111	regs.npc = pc + 4;
112	__asm__ __volatile__(
113		"rd %%psr, %0\n\t"
114		"nop\n\t"
115		"nop\n\t"
116		"nop\n" : "=r" (regs.psr));
117	unhandled_fault(address, current, &regs);
118
119	/* Not reached */
120	return 0;
121}
122
123static inline void
124show_signal_msg(struct pt_regs *regs, int sig, int code,
125		unsigned long address, struct task_struct *tsk)
126{
127	if (!unhandled_signal(tsk, sig))
128		return;
129
130	if (!printk_ratelimit())
131		return;
132
133	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
134	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
135	       tsk->comm, task_pid_nr(tsk), address,
136	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
137	       (void *)regs->u_regs[UREG_FP], code);
138
139	print_vma_addr(KERN_CONT " in ", regs->pc);
140
141	printk(KERN_CONT "\n");
142}
143
144static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
145			       unsigned long addr)
146{
147	siginfo_t info;
148
149	info.si_signo = sig;
150	info.si_code = code;
151	info.si_errno = 0;
152	info.si_addr = (void __user *) addr;
153	info.si_trapno = 0;
154
155	if (unlikely(show_unhandled_signals))
156		show_signal_msg(regs, sig, info.si_code,
157				addr, current);
158
159	force_sig_info (sig, &info, current);
160}
161
162extern unsigned long safe_compute_effective_address(struct pt_regs *,
163						    unsigned int);
164
165static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
166{
167	unsigned int insn;
168
169	if (text_fault)
170		return regs->pc;
171
172	if (regs->psr & PSR_PS)
173		insn = *(unsigned int *) regs->pc;
174	else
175		__get_user(insn, (unsigned int *) regs->pc);
176
177	return safe_compute_effective_address(regs, insn);
178}
179
180static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
181				      int text_fault)
182{
183	unsigned long addr = compute_si_addr(regs, text_fault);
184
185	__do_fault_siginfo(code, sig, regs, addr);
186}
187
188asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
189			       unsigned long address)
190{
191	struct vm_area_struct *vma;
192	struct task_struct *tsk = current;
193	struct mm_struct *mm = tsk->mm;
194	unsigned int fixup;
195	unsigned long g2;
196	int from_user = !(regs->psr & PSR_PS);
197	int fault, code;
198	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
199			      (write ? FAULT_FLAG_WRITE : 0));
200
201	if (text_fault)
202		address = regs->pc;
203
204	/*
205	 * We fault-in kernel-space virtual memory on-demand. The
206	 * 'reference' page table is init_mm.pgd.
207	 *
208	 * NOTE! We MUST NOT take any locks for this case. We may
209	 * be in an interrupt or a critical region, and should
210	 * only copy the information from the master page table,
211	 * nothing more.
212	 */
213	code = SEGV_MAPERR;
214	if (address >= TASK_SIZE)
215		goto vmalloc_fault;
216
217	/*
218	 * If we're in an interrupt or have no user
219	 * context, we must not take the fault..
220	 */
221	if (in_atomic() || !mm)
222		goto no_context;
223
224	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
225
226retry:
227	down_read(&mm->mmap_sem);
228
229	if (!from_user && address >= PAGE_OFFSET)
230		goto bad_area;
231
232	vma = find_vma(mm, address);
233	if (!vma)
234		goto bad_area;
235	if (vma->vm_start <= address)
236		goto good_area;
237	if (!(vma->vm_flags & VM_GROWSDOWN))
238		goto bad_area;
239	if (expand_stack(vma, address))
240		goto bad_area;
241	/*
242	 * Ok, we have a good vm_area for this memory access, so
243	 * we can handle it..
244	 */
245good_area:
246	code = SEGV_ACCERR;
247	if (write) {
248		if (!(vma->vm_flags & VM_WRITE))
249			goto bad_area;
250	} else {
251		/* Allow reads even for write-only mappings */
252		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
253			goto bad_area;
254	}
255
 
 
 
 
 
256	/*
257	 * If for any reason at all we couldn't handle the fault,
258	 * make sure we exit gracefully rather than endlessly redo
259	 * the fault.
260	 */
261	fault = handle_mm_fault(mm, vma, address, flags);
262
263	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
264		return;
265
266	if (unlikely(fault & VM_FAULT_ERROR)) {
267		if (fault & VM_FAULT_OOM)
268			goto out_of_memory;
 
 
269		else if (fault & VM_FAULT_SIGBUS)
270			goto do_sigbus;
271		BUG();
272	}
273
274	if (flags & FAULT_FLAG_ALLOW_RETRY) {
275		if (fault & VM_FAULT_MAJOR) {
276			current->maj_flt++;
277			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
278				      1, regs, address);
279		} else {
280			current->min_flt++;
281			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
282				      1, regs, address);
283		}
284		if (fault & VM_FAULT_RETRY) {
285			flags &= ~FAULT_FLAG_ALLOW_RETRY;
 
286
287			/* No need to up_read(&mm->mmap_sem) as we would
288			 * have already released it in __lock_page_or_retry
289			 * in mm/filemap.c.
290			 */
291
292			goto retry;
293		}
294	}
295
296	up_read(&mm->mmap_sem);
297	return;
298
299	/*
300	 * Something tried to access memory that isn't in our memory map..
301	 * Fix it, but check if it's kernel or user first..
302	 */
303bad_area:
304	up_read(&mm->mmap_sem);
305
306bad_area_nosemaphore:
307	/* User mode accesses just cause a SIGSEGV */
308	if (from_user) {
309		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
310		return;
311	}
312
313	/* Is this in ex_table? */
314no_context:
315	g2 = regs->u_regs[UREG_G2];
316	if (!from_user) {
317		fixup = search_extables_range(regs->pc, &g2);
318		/* Values below 10 are reserved for other things */
319		if (fixup > 10) {
320			extern const unsigned __memset_start[];
321			extern const unsigned __memset_end[];
322			extern const unsigned __csum_partial_copy_start[];
323			extern const unsigned __csum_partial_copy_end[];
324
325#ifdef DEBUG_EXCEPTIONS
326			printk("Exception: PC<%08lx> faddr<%08lx>\n",
327			       regs->pc, address);
328			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
329				regs->pc, fixup, g2);
330#endif
331			if ((regs->pc >= (unsigned long)__memset_start &&
332			     regs->pc < (unsigned long)__memset_end) ||
333			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
334			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
335				regs->u_regs[UREG_I4] = address;
336				regs->u_regs[UREG_I5] = regs->pc;
337			}
338			regs->u_regs[UREG_G2] = g2;
339			regs->pc = fixup;
340			regs->npc = regs->pc + 4;
341			return;
342		}
343	}
344
345	unhandled_fault(address, tsk, regs);
346	do_exit(SIGKILL);
347
348/*
349 * We ran out of memory, or some other thing happened to us that made
350 * us unable to handle the page fault gracefully.
351 */
352out_of_memory:
353	up_read(&mm->mmap_sem);
354	if (from_user) {
355		pagefault_out_of_memory();
356		return;
357	}
358	goto no_context;
359
360do_sigbus:
361	up_read(&mm->mmap_sem);
362	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
363	if (!from_user)
364		goto no_context;
365
366vmalloc_fault:
367	{
368		/*
369		 * Synchronize this task's top level page-table
370		 * with the 'reference' page table.
371		 */
372		int offset = pgd_index(address);
373		pgd_t *pgd, *pgd_k;
374		pmd_t *pmd, *pmd_k;
375
376		pgd = tsk->active_mm->pgd + offset;
377		pgd_k = init_mm.pgd + offset;
378
379		if (!pgd_present(*pgd)) {
380			if (!pgd_present(*pgd_k))
381				goto bad_area_nosemaphore;
382			pgd_val(*pgd) = pgd_val(*pgd_k);
383			return;
384		}
385
386		pmd = pmd_offset(pgd, address);
387		pmd_k = pmd_offset(pgd_k, address);
388
389		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
390			goto bad_area_nosemaphore;
391
392		*pmd = *pmd_k;
393		return;
394	}
395}
396
397/* This always deals with user addresses. */
398static void force_user_fault(unsigned long address, int write)
399{
400	struct vm_area_struct *vma;
401	struct task_struct *tsk = current;
402	struct mm_struct *mm = tsk->mm;
 
403	int code;
404
405	code = SEGV_MAPERR;
406
407	down_read(&mm->mmap_sem);
408	vma = find_vma(mm, address);
409	if (!vma)
410		goto bad_area;
411	if (vma->vm_start <= address)
412		goto good_area;
413	if (!(vma->vm_flags & VM_GROWSDOWN))
414		goto bad_area;
415	if (expand_stack(vma, address))
416		goto bad_area;
417good_area:
418	code = SEGV_ACCERR;
419	if (write) {
420		if (!(vma->vm_flags & VM_WRITE))
421			goto bad_area;
 
422	} else {
423		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
424			goto bad_area;
425	}
426	switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
427	case VM_FAULT_SIGBUS:
428	case VM_FAULT_OOM:
429		goto do_sigbus;
430	}
431	up_read(&mm->mmap_sem);
432	return;
433bad_area:
434	up_read(&mm->mmap_sem);
435	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
436	return;
437
438do_sigbus:
439	up_read(&mm->mmap_sem);
440	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
441}
442
443static void check_stack_aligned(unsigned long sp)
444{
445	if (sp & 0x7UL)
446		force_sig(SIGILL, current);
447}
448
449void window_overflow_fault(void)
450{
451	unsigned long sp;
452
453	sp = current_thread_info()->rwbuf_stkptrs[0];
454	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
455		force_user_fault(sp + 0x38, 1);
456	force_user_fault(sp, 1);
457
458	check_stack_aligned(sp);
459}
460
461void window_underflow_fault(unsigned long sp)
462{
463	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
464		force_user_fault(sp + 0x38, 0);
465	force_user_fault(sp, 0);
466
467	check_stack_aligned(sp);
468}
469
470void window_ret_fault(struct pt_regs *regs)
471{
472	unsigned long sp;
473
474	sp = regs->u_regs[UREG_FP];
475	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
476		force_user_fault(sp + 0x38, 0);
477	force_user_fault(sp, 0);
478
479	check_stack_aligned(sp);
480}