Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * fault.c:  Page fault handlers for the Sparc.
  4 *
  5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  7 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8 */
  9
 10#include <asm/head.h>
 11
 12#include <linux/string.h>
 13#include <linux/types.h>
 14#include <linux/sched.h>
 15#include <linux/ptrace.h>
 16#include <linux/mman.h>
 17#include <linux/threads.h>
 18#include <linux/kernel.h>
 19#include <linux/signal.h>
 20#include <linux/mm.h>
 21#include <linux/smp.h>
 22#include <linux/perf_event.h>
 23#include <linux/interrupt.h>
 24#include <linux/kdebug.h>
 25#include <linux/uaccess.h>
 26#include <linux/extable.h>
 27
 28#include <asm/page.h>
 
 29#include <asm/openprom.h>
 30#include <asm/oplib.h>
 31#include <asm/setup.h>
 32#include <asm/smp.h>
 33#include <asm/traps.h>
 34
 35#include "mm_32.h"
 36
 37int show_unhandled_signals = 1;
 38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39static void __noreturn unhandled_fault(unsigned long address,
 40				       struct task_struct *tsk,
 41				       struct pt_regs *regs)
 42{
 43	if ((unsigned long) address < PAGE_SIZE) {
 44		printk(KERN_ALERT
 45		    "Unable to handle kernel NULL pointer dereference\n");
 46	} else {
 47		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
 48		       address);
 49	}
 50	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
 51		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 52	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 53		(tsk->mm ? (unsigned long) tsk->mm->pgd :
 54			(unsigned long) tsk->active_mm->pgd));
 55	die_if_kernel("Oops", regs);
 56}
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58static inline void
 59show_signal_msg(struct pt_regs *regs, int sig, int code,
 60		unsigned long address, struct task_struct *tsk)
 61{
 62	if (!unhandled_signal(tsk, sig))
 63		return;
 64
 65	if (!printk_ratelimit())
 66		return;
 67
 68	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
 69	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
 70	       tsk->comm, task_pid_nr(tsk), address,
 71	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
 72	       (void *)regs->u_regs[UREG_FP], code);
 73
 74	print_vma_addr(KERN_CONT " in ", regs->pc);
 75
 76	printk(KERN_CONT "\n");
 77}
 78
 79static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
 80			       unsigned long addr)
 81{
 
 
 
 
 
 
 
 
 82	if (unlikely(show_unhandled_signals))
 83		show_signal_msg(regs, sig, code,
 84				addr, current);
 85
 86	force_sig_fault(sig, code, (void __user *) addr);
 87}
 88
 
 
 
 89static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 90{
 91	unsigned int insn;
 92
 93	if (text_fault)
 94		return regs->pc;
 95
 96	if (regs->psr & PSR_PS)
 97		insn = *(unsigned int *) regs->pc;
 98	else
 99		__get_user(insn, (unsigned int *) regs->pc);
100
101	return safe_compute_effective_address(regs, insn);
102}
103
104static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
105				      int text_fault)
106{
107	unsigned long addr = compute_si_addr(regs, text_fault);
108
109	__do_fault_siginfo(code, sig, regs, addr);
110}
111
112asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
113			       unsigned long address)
114{
115	struct vm_area_struct *vma;
116	struct task_struct *tsk = current;
117	struct mm_struct *mm = tsk->mm;
 
 
118	int from_user = !(regs->psr & PSR_PS);
119	int code;
120	vm_fault_t fault;
121	unsigned int flags = FAULT_FLAG_DEFAULT;
122
123	if (text_fault)
124		address = regs->pc;
125
126	/*
127	 * We fault-in kernel-space virtual memory on-demand. The
128	 * 'reference' page table is init_mm.pgd.
129	 *
130	 * NOTE! We MUST NOT take any locks for this case. We may
131	 * be in an interrupt or a critical region, and should
132	 * only copy the information from the master page table,
133	 * nothing more.
134	 */
135	code = SEGV_MAPERR;
136	if (address >= TASK_SIZE)
137		goto vmalloc_fault;
138
139	/*
140	 * If we're in an interrupt or have no user
141	 * context, we must not take the fault..
142	 */
143	if (pagefault_disabled() || !mm)
144		goto no_context;
145
146	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
147
148retry:
149	mmap_read_lock(mm);
150
151	if (!from_user && address >= PAGE_OFFSET)
152		goto bad_area;
153
154	vma = find_vma(mm, address);
155	if (!vma)
156		goto bad_area;
157	if (vma->vm_start <= address)
158		goto good_area;
159	if (!(vma->vm_flags & VM_GROWSDOWN))
160		goto bad_area;
161	if (expand_stack(vma, address))
162		goto bad_area;
163	/*
164	 * Ok, we have a good vm_area for this memory access, so
165	 * we can handle it..
166	 */
167good_area:
168	code = SEGV_ACCERR;
169	if (write) {
170		if (!(vma->vm_flags & VM_WRITE))
171			goto bad_area;
172	} else {
173		/* Allow reads even for write-only mappings */
174		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
175			goto bad_area;
176	}
177
178	if (from_user)
179		flags |= FAULT_FLAG_USER;
180	if (write)
181		flags |= FAULT_FLAG_WRITE;
182
183	/*
184	 * If for any reason at all we couldn't handle the fault,
185	 * make sure we exit gracefully rather than endlessly redo
186	 * the fault.
187	 */
188	fault = handle_mm_fault(vma, address, flags, regs);
189
190	if (fault_signal_pending(fault, regs))
191		return;
192
193	/* The fault is fully completed (including releasing mmap lock) */
194	if (fault & VM_FAULT_COMPLETED)
195		return;
196
197	if (unlikely(fault & VM_FAULT_ERROR)) {
198		if (fault & VM_FAULT_OOM)
199			goto out_of_memory;
200		else if (fault & VM_FAULT_SIGSEGV)
201			goto bad_area;
202		else if (fault & VM_FAULT_SIGBUS)
203			goto do_sigbus;
204		BUG();
205	}
206
207	if (fault & VM_FAULT_RETRY) {
208		flags |= FAULT_FLAG_TRIED;
 
 
 
 
 
 
 
 
 
 
209
210		/* No need to mmap_read_unlock(mm) as we would
211		 * have already released it in __lock_page_or_retry
212		 * in mm/filemap.c.
213		 */
214
215		goto retry;
 
216	}
217
218	mmap_read_unlock(mm);
219	return;
220
221	/*
222	 * Something tried to access memory that isn't in our memory map..
223	 * Fix it, but check if it's kernel or user first..
224	 */
225bad_area:
226	mmap_read_unlock(mm);
227
228bad_area_nosemaphore:
229	/* User mode accesses just cause a SIGSEGV */
230	if (from_user) {
231		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
232		return;
233	}
234
235	/* Is this in ex_table? */
236no_context:
 
237	if (!from_user) {
238		const struct exception_table_entry *entry;
 
 
 
 
 
 
239
240		entry = search_exception_tables(regs->pc);
241#ifdef DEBUG_EXCEPTIONS
242		printk("Exception: PC<%08lx> faddr<%08lx>\n",
243		       regs->pc, address);
244		printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
245			regs->pc, entry->fixup);
246#endif
247		regs->pc = entry->fixup;
248		regs->npc = regs->pc + 4;
249		return;
 
 
 
 
 
 
 
 
 
250	}
251
252	unhandled_fault(address, tsk, regs);
 
253
254/*
255 * We ran out of memory, or some other thing happened to us that made
256 * us unable to handle the page fault gracefully.
257 */
258out_of_memory:
259	mmap_read_unlock(mm);
260	if (from_user) {
261		pagefault_out_of_memory();
262		return;
263	}
264	goto no_context;
265
266do_sigbus:
267	mmap_read_unlock(mm);
268	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
269	if (!from_user)
270		goto no_context;
271
272vmalloc_fault:
273	{
274		/*
275		 * Synchronize this task's top level page-table
276		 * with the 'reference' page table.
277		 */
278		int offset = pgd_index(address);
279		pgd_t *pgd, *pgd_k;
280		p4d_t *p4d, *p4d_k;
281		pud_t *pud, *pud_k;
282		pmd_t *pmd, *pmd_k;
283
284		pgd = tsk->active_mm->pgd + offset;
285		pgd_k = init_mm.pgd + offset;
286
287		if (!pgd_present(*pgd)) {
288			if (!pgd_present(*pgd_k))
289				goto bad_area_nosemaphore;
290			pgd_val(*pgd) = pgd_val(*pgd_k);
291			return;
292		}
293
294		p4d = p4d_offset(pgd, address);
295		pud = pud_offset(p4d, address);
296		pmd = pmd_offset(pud, address);
297
298		p4d_k = p4d_offset(pgd_k, address);
299		pud_k = pud_offset(p4d_k, address);
300		pmd_k = pmd_offset(pud_k, address);
301
302		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
303			goto bad_area_nosemaphore;
304
305		*pmd = *pmd_k;
306		return;
307	}
308}
309
310/* This always deals with user addresses. */
311static void force_user_fault(unsigned long address, int write)
312{
313	struct vm_area_struct *vma;
314	struct task_struct *tsk = current;
315	struct mm_struct *mm = tsk->mm;
316	unsigned int flags = FAULT_FLAG_USER;
317	int code;
318
319	code = SEGV_MAPERR;
320
321	mmap_read_lock(mm);
322	vma = find_vma(mm, address);
323	if (!vma)
324		goto bad_area;
325	if (vma->vm_start <= address)
326		goto good_area;
327	if (!(vma->vm_flags & VM_GROWSDOWN))
328		goto bad_area;
329	if (expand_stack(vma, address))
330		goto bad_area;
331good_area:
332	code = SEGV_ACCERR;
333	if (write) {
334		if (!(vma->vm_flags & VM_WRITE))
335			goto bad_area;
336		flags |= FAULT_FLAG_WRITE;
337	} else {
338		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
339			goto bad_area;
340	}
341	switch (handle_mm_fault(vma, address, flags, NULL)) {
342	case VM_FAULT_SIGBUS:
343	case VM_FAULT_OOM:
344		goto do_sigbus;
345	}
346	mmap_read_unlock(mm);
347	return;
348bad_area:
349	mmap_read_unlock(mm);
350	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
351	return;
352
353do_sigbus:
354	mmap_read_unlock(mm);
355	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
356}
357
358static void check_stack_aligned(unsigned long sp)
359{
360	if (sp & 0x7UL)
361		force_sig(SIGILL);
362}
363
364void window_overflow_fault(void)
365{
366	unsigned long sp;
367
368	sp = current_thread_info()->rwbuf_stkptrs[0];
369	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
370		force_user_fault(sp + 0x38, 1);
371	force_user_fault(sp, 1);
372
373	check_stack_aligned(sp);
374}
375
376void window_underflow_fault(unsigned long sp)
377{
378	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
379		force_user_fault(sp + 0x38, 0);
380	force_user_fault(sp, 0);
381
382	check_stack_aligned(sp);
383}
384
385void window_ret_fault(struct pt_regs *regs)
386{
387	unsigned long sp;
388
389	sp = regs->u_regs[UREG_FP];
390	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
391		force_user_fault(sp + 0x38, 0);
392	force_user_fault(sp, 0);
393
394	check_stack_aligned(sp);
395}
v3.5.6
 
  1/*
  2 * fault.c:  Page fault handlers for the Sparc.
  3 *
  4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
  6 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7 */
  8
  9#include <asm/head.h>
 10
 11#include <linux/string.h>
 12#include <linux/types.h>
 13#include <linux/sched.h>
 14#include <linux/ptrace.h>
 15#include <linux/mman.h>
 16#include <linux/threads.h>
 17#include <linux/kernel.h>
 18#include <linux/signal.h>
 19#include <linux/mm.h>
 20#include <linux/smp.h>
 21#include <linux/perf_event.h>
 22#include <linux/interrupt.h>
 23#include <linux/kdebug.h>
 
 
 24
 25#include <asm/page.h>
 26#include <asm/pgtable.h>
 27#include <asm/openprom.h>
 28#include <asm/oplib.h>
 
 29#include <asm/smp.h>
 30#include <asm/traps.h>
 31#include <asm/uaccess.h>
 
 32
 33int show_unhandled_signals = 1;
 34
 35/* At boot time we determine these two values necessary for setting
 36 * up the segment maps and page table entries (pte's).
 37 */
 38
 39int num_contexts;
 40
 41/* Return how much physical memory we have.  */
 42unsigned long probe_memory(void)
 43{
 44	unsigned long total = 0;
 45	int i;
 46
 47	for (i = 0; sp_banks[i].num_bytes; i++)
 48		total += sp_banks[i].num_bytes;
 49
 50	return total;
 51}
 52
 53static void unhandled_fault(unsigned long, struct task_struct *,
 54		struct pt_regs *) __attribute__ ((noreturn));
 55
 56static void __noreturn unhandled_fault(unsigned long address,
 57				       struct task_struct *tsk,
 58				       struct pt_regs *regs)
 59{
 60	if ((unsigned long) address < PAGE_SIZE) {
 61		printk(KERN_ALERT
 62		    "Unable to handle kernel NULL pointer dereference\n");
 63	} else {
 64		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
 65		       address);
 66	}
 67	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
 68		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
 69	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
 70		(tsk->mm ? (unsigned long) tsk->mm->pgd :
 71			(unsigned long) tsk->active_mm->pgd));
 72	die_if_kernel("Oops", regs);
 73}
 74
 75asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
 76			    unsigned long address)
 77{
 78	struct pt_regs regs;
 79	unsigned long g2;
 80	unsigned int insn;
 81	int i;
 82
 83	i = search_extables_range(ret_pc, &g2);
 84	switch (i) {
 85	case 3:
 86		/* load & store will be handled by fixup */
 87		return 3;
 88
 89	case 1:
 90		/* store will be handled by fixup, load will bump out */
 91		/* for _to_ macros */
 92		insn = *((unsigned int *) pc);
 93		if ((insn >> 21) & 1)
 94			return 1;
 95		break;
 96
 97	case 2:
 98		/* load will be handled by fixup, store will bump out */
 99		/* for _from_ macros */
100		insn = *((unsigned int *) pc);
101		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
102			return 2;
103		break;
104
105	default:
106		break;
107	}
108
109	memset(&regs, 0, sizeof(regs));
110	regs.pc = pc;
111	regs.npc = pc + 4;
112	__asm__ __volatile__(
113		"rd %%psr, %0\n\t"
114		"nop\n\t"
115		"nop\n\t"
116		"nop\n" : "=r" (regs.psr));
117	unhandled_fault(address, current, &regs);
118
119	/* Not reached */
120	return 0;
121}
122
123static inline void
124show_signal_msg(struct pt_regs *regs, int sig, int code,
125		unsigned long address, struct task_struct *tsk)
126{
127	if (!unhandled_signal(tsk, sig))
128		return;
129
130	if (!printk_ratelimit())
131		return;
132
133	printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
134	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
135	       tsk->comm, task_pid_nr(tsk), address,
136	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
137	       (void *)regs->u_regs[UREG_FP], code);
138
139	print_vma_addr(KERN_CONT " in ", regs->pc);
140
141	printk(KERN_CONT "\n");
142}
143
144static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
145			       unsigned long addr)
146{
147	siginfo_t info;
148
149	info.si_signo = sig;
150	info.si_code = code;
151	info.si_errno = 0;
152	info.si_addr = (void __user *) addr;
153	info.si_trapno = 0;
154
155	if (unlikely(show_unhandled_signals))
156		show_signal_msg(regs, sig, info.si_code,
157				addr, current);
158
159	force_sig_info (sig, &info, current);
160}
161
162extern unsigned long safe_compute_effective_address(struct pt_regs *,
163						    unsigned int);
164
165static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
166{
167	unsigned int insn;
168
169	if (text_fault)
170		return regs->pc;
171
172	if (regs->psr & PSR_PS)
173		insn = *(unsigned int *) regs->pc;
174	else
175		__get_user(insn, (unsigned int *) regs->pc);
176
177	return safe_compute_effective_address(regs, insn);
178}
179
180static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
181				      int text_fault)
182{
183	unsigned long addr = compute_si_addr(regs, text_fault);
184
185	__do_fault_siginfo(code, sig, regs, addr);
186}
187
188asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
189			       unsigned long address)
190{
191	struct vm_area_struct *vma;
192	struct task_struct *tsk = current;
193	struct mm_struct *mm = tsk->mm;
194	unsigned int fixup;
195	unsigned long g2;
196	int from_user = !(regs->psr & PSR_PS);
197	int fault, code;
198	unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
199			      (write ? FAULT_FLAG_WRITE : 0));
200
201	if (text_fault)
202		address = regs->pc;
203
204	/*
205	 * We fault-in kernel-space virtual memory on-demand. The
206	 * 'reference' page table is init_mm.pgd.
207	 *
208	 * NOTE! We MUST NOT take any locks for this case. We may
209	 * be in an interrupt or a critical region, and should
210	 * only copy the information from the master page table,
211	 * nothing more.
212	 */
213	code = SEGV_MAPERR;
214	if (address >= TASK_SIZE)
215		goto vmalloc_fault;
216
217	/*
218	 * If we're in an interrupt or have no user
219	 * context, we must not take the fault..
220	 */
221	if (in_atomic() || !mm)
222		goto no_context;
223
224	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
225
226retry:
227	down_read(&mm->mmap_sem);
228
229	if (!from_user && address >= PAGE_OFFSET)
230		goto bad_area;
231
232	vma = find_vma(mm, address);
233	if (!vma)
234		goto bad_area;
235	if (vma->vm_start <= address)
236		goto good_area;
237	if (!(vma->vm_flags & VM_GROWSDOWN))
238		goto bad_area;
239	if (expand_stack(vma, address))
240		goto bad_area;
241	/*
242	 * Ok, we have a good vm_area for this memory access, so
243	 * we can handle it..
244	 */
245good_area:
246	code = SEGV_ACCERR;
247	if (write) {
248		if (!(vma->vm_flags & VM_WRITE))
249			goto bad_area;
250	} else {
251		/* Allow reads even for write-only mappings */
252		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
253			goto bad_area;
254	}
255
 
 
 
 
 
256	/*
257	 * If for any reason at all we couldn't handle the fault,
258	 * make sure we exit gracefully rather than endlessly redo
259	 * the fault.
260	 */
261	fault = handle_mm_fault(mm, vma, address, flags);
 
 
 
262
263	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
 
264		return;
265
266	if (unlikely(fault & VM_FAULT_ERROR)) {
267		if (fault & VM_FAULT_OOM)
268			goto out_of_memory;
 
 
269		else if (fault & VM_FAULT_SIGBUS)
270			goto do_sigbus;
271		BUG();
272	}
273
274	if (flags & FAULT_FLAG_ALLOW_RETRY) {
275		if (fault & VM_FAULT_MAJOR) {
276			current->maj_flt++;
277			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
278				      1, regs, address);
279		} else {
280			current->min_flt++;
281			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
282				      1, regs, address);
283		}
284		if (fault & VM_FAULT_RETRY) {
285			flags &= ~FAULT_FLAG_ALLOW_RETRY;
286
287			/* No need to up_read(&mm->mmap_sem) as we would
288			 * have already released it in __lock_page_or_retry
289			 * in mm/filemap.c.
290			 */
291
292			goto retry;
293		}
294	}
295
296	up_read(&mm->mmap_sem);
297	return;
298
299	/*
300	 * Something tried to access memory that isn't in our memory map..
301	 * Fix it, but check if it's kernel or user first..
302	 */
303bad_area:
304	up_read(&mm->mmap_sem);
305
306bad_area_nosemaphore:
307	/* User mode accesses just cause a SIGSEGV */
308	if (from_user) {
309		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
310		return;
311	}
312
313	/* Is this in ex_table? */
314no_context:
315	g2 = regs->u_regs[UREG_G2];
316	if (!from_user) {
317		fixup = search_extables_range(regs->pc, &g2);
318		/* Values below 10 are reserved for other things */
319		if (fixup > 10) {
320			extern const unsigned __memset_start[];
321			extern const unsigned __memset_end[];
322			extern const unsigned __csum_partial_copy_start[];
323			extern const unsigned __csum_partial_copy_end[];
324
 
325#ifdef DEBUG_EXCEPTIONS
326			printk("Exception: PC<%08lx> faddr<%08lx>\n",
327			       regs->pc, address);
328			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
329				regs->pc, fixup, g2);
330#endif
331			if ((regs->pc >= (unsigned long)__memset_start &&
332			     regs->pc < (unsigned long)__memset_end) ||
333			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
334			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
335				regs->u_regs[UREG_I4] = address;
336				regs->u_regs[UREG_I5] = regs->pc;
337			}
338			regs->u_regs[UREG_G2] = g2;
339			regs->pc = fixup;
340			regs->npc = regs->pc + 4;
341			return;
342		}
343	}
344
345	unhandled_fault(address, tsk, regs);
346	do_exit(SIGKILL);
347
348/*
349 * We ran out of memory, or some other thing happened to us that made
350 * us unable to handle the page fault gracefully.
351 */
352out_of_memory:
353	up_read(&mm->mmap_sem);
354	if (from_user) {
355		pagefault_out_of_memory();
356		return;
357	}
358	goto no_context;
359
360do_sigbus:
361	up_read(&mm->mmap_sem);
362	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
363	if (!from_user)
364		goto no_context;
365
366vmalloc_fault:
367	{
368		/*
369		 * Synchronize this task's top level page-table
370		 * with the 'reference' page table.
371		 */
372		int offset = pgd_index(address);
373		pgd_t *pgd, *pgd_k;
 
 
374		pmd_t *pmd, *pmd_k;
375
376		pgd = tsk->active_mm->pgd + offset;
377		pgd_k = init_mm.pgd + offset;
378
379		if (!pgd_present(*pgd)) {
380			if (!pgd_present(*pgd_k))
381				goto bad_area_nosemaphore;
382			pgd_val(*pgd) = pgd_val(*pgd_k);
383			return;
384		}
385
386		pmd = pmd_offset(pgd, address);
387		pmd_k = pmd_offset(pgd_k, address);
 
 
 
 
 
388
389		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
390			goto bad_area_nosemaphore;
391
392		*pmd = *pmd_k;
393		return;
394	}
395}
396
397/* This always deals with user addresses. */
398static void force_user_fault(unsigned long address, int write)
399{
400	struct vm_area_struct *vma;
401	struct task_struct *tsk = current;
402	struct mm_struct *mm = tsk->mm;
 
403	int code;
404
405	code = SEGV_MAPERR;
406
407	down_read(&mm->mmap_sem);
408	vma = find_vma(mm, address);
409	if (!vma)
410		goto bad_area;
411	if (vma->vm_start <= address)
412		goto good_area;
413	if (!(vma->vm_flags & VM_GROWSDOWN))
414		goto bad_area;
415	if (expand_stack(vma, address))
416		goto bad_area;
417good_area:
418	code = SEGV_ACCERR;
419	if (write) {
420		if (!(vma->vm_flags & VM_WRITE))
421			goto bad_area;
 
422	} else {
423		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
424			goto bad_area;
425	}
426	switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
427	case VM_FAULT_SIGBUS:
428	case VM_FAULT_OOM:
429		goto do_sigbus;
430	}
431	up_read(&mm->mmap_sem);
432	return;
433bad_area:
434	up_read(&mm->mmap_sem);
435	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
436	return;
437
438do_sigbus:
439	up_read(&mm->mmap_sem);
440	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
441}
442
443static void check_stack_aligned(unsigned long sp)
444{
445	if (sp & 0x7UL)
446		force_sig(SIGILL, current);
447}
448
449void window_overflow_fault(void)
450{
451	unsigned long sp;
452
453	sp = current_thread_info()->rwbuf_stkptrs[0];
454	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
455		force_user_fault(sp + 0x38, 1);
456	force_user_fault(sp, 1);
457
458	check_stack_aligned(sp);
459}
460
461void window_underflow_fault(unsigned long sp)
462{
463	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
464		force_user_fault(sp + 0x38, 0);
465	force_user_fault(sp, 0);
466
467	check_stack_aligned(sp);
468}
469
470void window_ret_fault(struct pt_regs *regs)
471{
472	unsigned long sp;
473
474	sp = regs->u_regs[UREG_FP];
475	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
476		force_user_fault(sp + 0x38, 0);
477	force_user_fault(sp, 0);
478
479	check_stack_aligned(sp);
480}