Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MMU fault handling support.
  4 *
  5 * Copyright (C) 1998-2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <linux/sched/signal.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/extable.h>
 12#include <linux/interrupt.h>
 13#include <linux/kprobes.h>
 14#include <linux/kdebug.h>
 15#include <linux/prefetch.h>
 16#include <linux/uaccess.h>
 17
 18#include <asm/pgtable.h>
 19#include <asm/processor.h>
 20#include <asm/exception.h>
 21
 22extern int die(char *, struct pt_regs *, long);
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24/*
 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 26 * (inside region 5, on ia64) and that page is present.
 27 */
 28static int
 29mapped_kernel_page_is_present (unsigned long address)
 30{
 31	pgd_t *pgd;
 32	pud_t *pud;
 33	pmd_t *pmd;
 34	pte_t *ptep, pte;
 35
 36	pgd = pgd_offset_k(address);
 37	if (pgd_none(*pgd) || pgd_bad(*pgd))
 38		return 0;
 39
 40	pud = pud_offset(pgd, address);
 41	if (pud_none(*pud) || pud_bad(*pud))
 42		return 0;
 43
 44	pmd = pmd_offset(pud, address);
 45	if (pmd_none(*pmd) || pmd_bad(*pmd))
 46		return 0;
 47
 48	ptep = pte_offset_kernel(pmd, address);
 49	if (!ptep)
 50		return 0;
 51
 52	pte = *ptep;
 53	return pte_present(pte);
 54}
 55
 56#	define VM_READ_BIT	0
 57#	define VM_WRITE_BIT	1
 58#	define VM_EXEC_BIT	2
 59
 60void __kprobes
 61ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 62{
 63	int signal = SIGSEGV, code = SEGV_MAPERR;
 64	struct vm_area_struct *vma, *prev_vma;
 65	struct mm_struct *mm = current->mm;
 
 66	unsigned long mask;
 67	vm_fault_t fault;
 68	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 69
 70	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 71		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 72
 73	/* mmap_sem is performance critical.... */
 74	prefetchw(&mm->mmap_sem);
 75
 76	/*
 77	 * If we're in an interrupt or have no user context, we must not take the fault..
 78	 */
 79	if (faulthandler_disabled() || !mm)
 80		goto no_context;
 81
 82#ifdef CONFIG_VIRTUAL_MEM_MAP
 83	/*
 84	 * If fault is in region 5 and we are in the kernel, we may already
 85	 * have the mmap_sem (pfn_valid macro is called during mmap). There
 86	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
 87	 * and go directly to the exception handling code.
 88	 */
 89
 90	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
 91		goto bad_area_no_up;
 92#endif
 93
 94	/*
 95	 * This is to handle the kprobes on user space access instructions
 96	 */
 97	if (kprobe_page_fault(regs, TRAP_BRKPT))
 98		return;
 99
100	if (user_mode(regs))
101		flags |= FAULT_FLAG_USER;
102	if (mask & VM_WRITE)
103		flags |= FAULT_FLAG_WRITE;
104retry:
105	down_read(&mm->mmap_sem);
106
107	vma = find_vma_prev(mm, address, &prev_vma);
108	if (!vma && !prev_vma )
109		goto bad_area;
110
111        /*
112         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
113         *
114         * May find no vma, but could be that the last vm area is the
115         * register backing store that needs to expand upwards, in
116         * this case vma will be null, but prev_vma will ne non-null
117         */
118        if (( !vma && prev_vma ) || (address < vma->vm_start) )
119		goto check_expansion;
120
121  good_area:
122	code = SEGV_ACCERR;
123
124	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
125
126#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
127	    || (1 << VM_EXEC_BIT) != VM_EXEC)
128#		error File is out of sync with <linux/mm.h>.  Please update.
129#	endif
130
131	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
132		goto bad_area;
133
134	if ((vma->vm_flags & mask) != mask)
135		goto bad_area;
136
137	/*
138	 * If for any reason at all we couldn't handle the fault, make
139	 * sure we exit gracefully rather than endlessly redo the
140	 * fault.
141	 */
142	fault = handle_mm_fault(vma, address, flags);
143
144	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
145		return;
146
147	if (unlikely(fault & VM_FAULT_ERROR)) {
148		/*
149		 * We ran out of memory, or some other thing happened
150		 * to us that made us unable to handle the page fault
151		 * gracefully.
152		 */
153		if (fault & VM_FAULT_OOM) {
154			goto out_of_memory;
155		} else if (fault & VM_FAULT_SIGSEGV) {
156			goto bad_area;
157		} else if (fault & VM_FAULT_SIGBUS) {
158			signal = SIGBUS;
159			goto bad_area;
160		}
161		BUG();
162	}
163
164	if (flags & FAULT_FLAG_ALLOW_RETRY) {
165		if (fault & VM_FAULT_MAJOR)
166			current->maj_flt++;
167		else
168			current->min_flt++;
169		if (fault & VM_FAULT_RETRY) {
170			flags &= ~FAULT_FLAG_ALLOW_RETRY;
171			flags |= FAULT_FLAG_TRIED;
172
173			 /* No need to up_read(&mm->mmap_sem) as we would
174			 * have already released it in __lock_page_or_retry
175			 * in mm/filemap.c.
176			 */
177
178			goto retry;
179		}
180	}
181
182	up_read(&mm->mmap_sem);
183	return;
184
185  check_expansion:
186	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
187		if (!vma)
188			goto bad_area;
189		if (!(vma->vm_flags & VM_GROWSDOWN))
190			goto bad_area;
191		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
192		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
193			goto bad_area;
194		if (expand_stack(vma, address))
195			goto bad_area;
196	} else {
197		vma = prev_vma;
198		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
199		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
200			goto bad_area;
201		/*
202		 * Since the register backing store is accessed sequentially,
203		 * we disallow growing it by more than a page at a time.
204		 */
205		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
206			goto bad_area;
207		if (expand_upwards(vma, address))
208			goto bad_area;
209	}
210	goto good_area;
211
212  bad_area:
213	up_read(&mm->mmap_sem);
214#ifdef CONFIG_VIRTUAL_MEM_MAP
215  bad_area_no_up:
216#endif
217	if ((isr & IA64_ISR_SP)
218	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
219	{
220		/*
221		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
222		 * bit in the psr to ensure forward progress.  (Target register will get a
223		 * NaT for ld.s, lfetch will be canceled.)
224		 */
225		ia64_psr(regs)->ed = 1;
226		return;
227	}
228	if (user_mode(regs)) {
229		force_sig_fault(signal, code, (void __user *) address,
230				0, __ISR_VALID, isr);
 
 
 
 
 
231		return;
232	}
233
234  no_context:
235	if ((isr & IA64_ISR_SP)
236	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
237	{
238		/*
239		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
240		 * bit in the psr to ensure forward progress.  (Target register will get a
241		 * NaT for ld.s, lfetch will be canceled.)
242		 */
243		ia64_psr(regs)->ed = 1;
244		return;
245	}
246
247	/*
248	 * Since we have no vma's for region 5, we might get here even if the address is
249	 * valid, due to the VHPT walker inserting a non present translation that becomes
250	 * stale. If that happens, the non present fault handler already purged the stale
251	 * translation, which fixed the problem. So, we check to see if the translation is
252	 * valid, and return if it is.
253	 */
254	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
255		return;
256
257	if (ia64_done_with_exception(regs))
258		return;
259
260	/*
261	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
262	 * with extreme prejudice.
263	 */
264	bust_spinlocks(1);
265
266	if (address < PAGE_SIZE)
267		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
268	else
269		printk(KERN_ALERT "Unable to handle kernel paging request at "
270		       "virtual address %016lx\n", address);
271	if (die("Oops", regs, isr))
272		regs = NULL;
273	bust_spinlocks(0);
274	if (regs)
275		do_exit(SIGKILL);
276	return;
277
278  out_of_memory:
279	up_read(&mm->mmap_sem);
280	if (!user_mode(regs))
281		goto no_context;
282	pagefault_out_of_memory();
283}
v4.6
 
  1/*
  2 * MMU fault handling support.
  3 *
  4 * Copyright (C) 1998-2002 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 */
  7#include <linux/sched.h>
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 
 10#include <linux/interrupt.h>
 11#include <linux/kprobes.h>
 12#include <linux/kdebug.h>
 13#include <linux/prefetch.h>
 14#include <linux/uaccess.h>
 15
 16#include <asm/pgtable.h>
 17#include <asm/processor.h>
 
 18
 19extern int die(char *, struct pt_regs *, long);
 20
 21#ifdef CONFIG_KPROBES
 22static inline int notify_page_fault(struct pt_regs *regs, int trap)
 23{
 24	int ret = 0;
 25
 26	if (!user_mode(regs)) {
 27		/* kprobe_running() needs smp_processor_id() */
 28		preempt_disable();
 29		if (kprobe_running() && kprobe_fault_handler(regs, trap))
 30			ret = 1;
 31		preempt_enable();
 32	}
 33
 34	return ret;
 35}
 36#else
 37static inline int notify_page_fault(struct pt_regs *regs, int trap)
 38{
 39	return 0;
 40}
 41#endif
 42
 43/*
 44 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 45 * (inside region 5, on ia64) and that page is present.
 46 */
 47static int
 48mapped_kernel_page_is_present (unsigned long address)
 49{
 50	pgd_t *pgd;
 51	pud_t *pud;
 52	pmd_t *pmd;
 53	pte_t *ptep, pte;
 54
 55	pgd = pgd_offset_k(address);
 56	if (pgd_none(*pgd) || pgd_bad(*pgd))
 57		return 0;
 58
 59	pud = pud_offset(pgd, address);
 60	if (pud_none(*pud) || pud_bad(*pud))
 61		return 0;
 62
 63	pmd = pmd_offset(pud, address);
 64	if (pmd_none(*pmd) || pmd_bad(*pmd))
 65		return 0;
 66
 67	ptep = pte_offset_kernel(pmd, address);
 68	if (!ptep)
 69		return 0;
 70
 71	pte = *ptep;
 72	return pte_present(pte);
 73}
 74
 75#	define VM_READ_BIT	0
 76#	define VM_WRITE_BIT	1
 77#	define VM_EXEC_BIT	2
 78
 79void __kprobes
 80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 81{
 82	int signal = SIGSEGV, code = SEGV_MAPERR;
 83	struct vm_area_struct *vma, *prev_vma;
 84	struct mm_struct *mm = current->mm;
 85	struct siginfo si;
 86	unsigned long mask;
 87	int fault;
 88	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 89
 90	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 91		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 92
 93	/* mmap_sem is performance critical.... */
 94	prefetchw(&mm->mmap_sem);
 95
 96	/*
 97	 * If we're in an interrupt or have no user context, we must not take the fault..
 98	 */
 99	if (faulthandler_disabled() || !mm)
100		goto no_context;
101
102#ifdef CONFIG_VIRTUAL_MEM_MAP
103	/*
104	 * If fault is in region 5 and we are in the kernel, we may already
105	 * have the mmap_sem (pfn_valid macro is called during mmap). There
106	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
107	 * and go directly to the exception handling code.
108	 */
109
110	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
111		goto bad_area_no_up;
112#endif
113
114	/*
115	 * This is to handle the kprobes on user space access instructions
116	 */
117	if (notify_page_fault(regs, TRAP_BRKPT))
118		return;
119
120	if (user_mode(regs))
121		flags |= FAULT_FLAG_USER;
122	if (mask & VM_WRITE)
123		flags |= FAULT_FLAG_WRITE;
124retry:
125	down_read(&mm->mmap_sem);
126
127	vma = find_vma_prev(mm, address, &prev_vma);
128	if (!vma && !prev_vma )
129		goto bad_area;
130
131        /*
132         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
133         *
134         * May find no vma, but could be that the last vm area is the
135         * register backing store that needs to expand upwards, in
136         * this case vma will be null, but prev_vma will ne non-null
137         */
138        if (( !vma && prev_vma ) || (address < vma->vm_start) )
139		goto check_expansion;
140
141  good_area:
142	code = SEGV_ACCERR;
143
144	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
145
146#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
147	    || (1 << VM_EXEC_BIT) != VM_EXEC)
148#		error File is out of sync with <linux/mm.h>.  Please update.
149#	endif
150
151	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
152		goto bad_area;
153
154	if ((vma->vm_flags & mask) != mask)
155		goto bad_area;
156
157	/*
158	 * If for any reason at all we couldn't handle the fault, make
159	 * sure we exit gracefully rather than endlessly redo the
160	 * fault.
161	 */
162	fault = handle_mm_fault(mm, vma, address, flags);
163
164	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
165		return;
166
167	if (unlikely(fault & VM_FAULT_ERROR)) {
168		/*
169		 * We ran out of memory, or some other thing happened
170		 * to us that made us unable to handle the page fault
171		 * gracefully.
172		 */
173		if (fault & VM_FAULT_OOM) {
174			goto out_of_memory;
175		} else if (fault & VM_FAULT_SIGSEGV) {
176			goto bad_area;
177		} else if (fault & VM_FAULT_SIGBUS) {
178			signal = SIGBUS;
179			goto bad_area;
180		}
181		BUG();
182	}
183
184	if (flags & FAULT_FLAG_ALLOW_RETRY) {
185		if (fault & VM_FAULT_MAJOR)
186			current->maj_flt++;
187		else
188			current->min_flt++;
189		if (fault & VM_FAULT_RETRY) {
190			flags &= ~FAULT_FLAG_ALLOW_RETRY;
191			flags |= FAULT_FLAG_TRIED;
192
193			 /* No need to up_read(&mm->mmap_sem) as we would
194			 * have already released it in __lock_page_or_retry
195			 * in mm/filemap.c.
196			 */
197
198			goto retry;
199		}
200	}
201
202	up_read(&mm->mmap_sem);
203	return;
204
205  check_expansion:
206	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
207		if (!vma)
208			goto bad_area;
209		if (!(vma->vm_flags & VM_GROWSDOWN))
210			goto bad_area;
211		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
212		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
213			goto bad_area;
214		if (expand_stack(vma, address))
215			goto bad_area;
216	} else {
217		vma = prev_vma;
218		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
219		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
220			goto bad_area;
221		/*
222		 * Since the register backing store is accessed sequentially,
223		 * we disallow growing it by more than a page at a time.
224		 */
225		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
226			goto bad_area;
227		if (expand_upwards(vma, address))
228			goto bad_area;
229	}
230	goto good_area;
231
232  bad_area:
233	up_read(&mm->mmap_sem);
234#ifdef CONFIG_VIRTUAL_MEM_MAP
235  bad_area_no_up:
236#endif
237	if ((isr & IA64_ISR_SP)
238	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
239	{
240		/*
241		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
242		 * bit in the psr to ensure forward progress.  (Target register will get a
243		 * NaT for ld.s, lfetch will be canceled.)
244		 */
245		ia64_psr(regs)->ed = 1;
246		return;
247	}
248	if (user_mode(regs)) {
249		si.si_signo = signal;
250		si.si_errno = 0;
251		si.si_code = code;
252		si.si_addr = (void __user *) address;
253		si.si_isr = isr;
254		si.si_flags = __ISR_VALID;
255		force_sig_info(signal, &si, current);
256		return;
257	}
258
259  no_context:
260	if ((isr & IA64_ISR_SP)
261	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
262	{
263		/*
264		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
265		 * bit in the psr to ensure forward progress.  (Target register will get a
266		 * NaT for ld.s, lfetch will be canceled.)
267		 */
268		ia64_psr(regs)->ed = 1;
269		return;
270	}
271
272	/*
273	 * Since we have no vma's for region 5, we might get here even if the address is
274	 * valid, due to the VHPT walker inserting a non present translation that becomes
275	 * stale. If that happens, the non present fault handler already purged the stale
276	 * translation, which fixed the problem. So, we check to see if the translation is
277	 * valid, and return if it is.
278	 */
279	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
280		return;
281
282	if (ia64_done_with_exception(regs))
283		return;
284
285	/*
286	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
287	 * with extreme prejudice.
288	 */
289	bust_spinlocks(1);
290
291	if (address < PAGE_SIZE)
292		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
293	else
294		printk(KERN_ALERT "Unable to handle kernel paging request at "
295		       "virtual address %016lx\n", address);
296	if (die("Oops", regs, isr))
297		regs = NULL;
298	bust_spinlocks(0);
299	if (regs)
300		do_exit(SIGKILL);
301	return;
302
303  out_of_memory:
304	up_read(&mm->mmap_sem);
305	if (!user_mode(regs))
306		goto no_context;
307	pagefault_out_of_memory();
308}