Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * MMU fault handling support.
  3 *
  4 * Copyright (C) 1998-2002 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 */
  7#include <linux/sched.h>
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 
 10#include <linux/interrupt.h>
 11#include <linux/kprobes.h>
 12#include <linux/kdebug.h>
 13#include <linux/prefetch.h>
 
 
 14
 15#include <asm/pgtable.h>
 16#include <asm/processor.h>
 17#include <asm/system.h>
 18#include <asm/uaccess.h>
 19
 20extern int die(char *, struct pt_regs *, long);
 21
 22#ifdef CONFIG_KPROBES
 23static inline int notify_page_fault(struct pt_regs *regs, int trap)
 24{
 25	int ret = 0;
 26
 27	if (!user_mode(regs)) {
 28		/* kprobe_running() needs smp_processor_id() */
 29		preempt_disable();
 30		if (kprobe_running() && kprobe_fault_handler(regs, trap))
 31			ret = 1;
 32		preempt_enable();
 33	}
 34
 35	return ret;
 36}
 37#else
 38static inline int notify_page_fault(struct pt_regs *regs, int trap)
 39{
 40	return 0;
 41}
 42#endif
 43
 44/*
 45 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 46 * (inside region 5, on ia64) and that page is present.
 47 */
 48static int
 49mapped_kernel_page_is_present (unsigned long address)
 50{
 51	pgd_t *pgd;
 
 52	pud_t *pud;
 53	pmd_t *pmd;
 54	pte_t *ptep, pte;
 55
 56	pgd = pgd_offset_k(address);
 57	if (pgd_none(*pgd) || pgd_bad(*pgd))
 58		return 0;
 59
 60	pud = pud_offset(pgd, address);
 
 
 
 
 61	if (pud_none(*pud) || pud_bad(*pud))
 62		return 0;
 63
 64	pmd = pmd_offset(pud, address);
 65	if (pmd_none(*pmd) || pmd_bad(*pmd))
 66		return 0;
 67
 68	ptep = pte_offset_kernel(pmd, address);
 69	if (!ptep)
 70		return 0;
 71
 72	pte = *ptep;
 73	return pte_present(pte);
 74}
 75
 
 
 
 
 76void __kprobes
 77ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 78{
 79	int signal = SIGSEGV, code = SEGV_MAPERR;
 80	struct vm_area_struct *vma, *prev_vma;
 81	struct mm_struct *mm = current->mm;
 82	struct siginfo si;
 83	unsigned long mask;
 84	int fault;
 
 
 
 
 85
 86	/* mmap_sem is performance critical.... */
 87	prefetchw(&mm->mmap_sem);
 88
 89	/*
 90	 * If we're in an interrupt or have no user context, we must not take the fault..
 91	 */
 92	if (in_atomic() || !mm)
 93		goto no_context;
 94
 95#ifdef CONFIG_VIRTUAL_MEM_MAP
 96	/*
 97	 * If fault is in region 5 and we are in the kernel, we may already
 98	 * have the mmap_sem (pfn_valid macro is called during mmap). There
 99	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
100	 * and go directly to the exception handling code.
101	 */
102
103	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
104		goto bad_area_no_up;
105#endif
106
107	/*
108	 * This is to handle the kprobes on user space access instructions
109	 */
110	if (notify_page_fault(regs, TRAP_BRKPT))
111		return;
112
113	down_read(&mm->mmap_sem);
 
 
 
 
 
 
 
114
115	vma = find_vma_prev(mm, address, &prev_vma);
116	if (!vma && !prev_vma )
117		goto bad_area;
118
119        /*
120         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
121         *
122         * May find no vma, but could be that the last vm area is the
123         * register backing store that needs to expand upwards, in
124         * this case vma will be null, but prev_vma will ne non-null
125         */
126        if (( !vma && prev_vma ) || (address < vma->vm_start) )
127		goto check_expansion;
128
129  good_area:
130	code = SEGV_ACCERR;
131
132	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
133
134#	define VM_READ_BIT	0
135#	define VM_WRITE_BIT	1
136#	define VM_EXEC_BIT	2
137
138#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
139	    || (1 << VM_EXEC_BIT) != VM_EXEC)
140#		error File is out of sync with <linux/mm.h>.  Please update.
141#	endif
142
143	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
144		goto bad_area;
145
146	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
147		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
148
149	if ((vma->vm_flags & mask) != mask)
150		goto bad_area;
151
152	/*
153	 * If for any reason at all we couldn't handle the fault, make
154	 * sure we exit gracefully rather than endlessly redo the
155	 * fault.
156	 */
157	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
 
 
 
 
158	if (unlikely(fault & VM_FAULT_ERROR)) {
159		/*
160		 * We ran out of memory, or some other thing happened
161		 * to us that made us unable to handle the page fault
162		 * gracefully.
163		 */
164		if (fault & VM_FAULT_OOM) {
165			goto out_of_memory;
 
 
166		} else if (fault & VM_FAULT_SIGBUS) {
167			signal = SIGBUS;
168			goto bad_area;
169		}
170		BUG();
171	}
172	if (fault & VM_FAULT_MAJOR)
173		current->maj_flt++;
174	else
175		current->min_flt++;
176	up_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
 
 
177	return;
178
179  check_expansion:
180	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
181		if (!vma)
182			goto bad_area;
183		if (!(vma->vm_flags & VM_GROWSDOWN))
184			goto bad_area;
185		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
186		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
187			goto bad_area;
188		if (expand_stack(vma, address))
189			goto bad_area;
190	} else {
191		vma = prev_vma;
192		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
193		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
194			goto bad_area;
195		/*
196		 * Since the register backing store is accessed sequentially,
197		 * we disallow growing it by more than a page at a time.
198		 */
199		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
200			goto bad_area;
201		if (expand_upwards(vma, address))
202			goto bad_area;
203	}
204	goto good_area;
205
206  bad_area:
207	up_read(&mm->mmap_sem);
208#ifdef CONFIG_VIRTUAL_MEM_MAP
209  bad_area_no_up:
210#endif
211	if ((isr & IA64_ISR_SP)
212	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
213	{
214		/*
215		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
216		 * bit in the psr to ensure forward progress.  (Target register will get a
217		 * NaT for ld.s, lfetch will be canceled.)
218		 */
219		ia64_psr(regs)->ed = 1;
220		return;
221	}
222	if (user_mode(regs)) {
223		si.si_signo = signal;
224		si.si_errno = 0;
225		si.si_code = code;
226		si.si_addr = (void __user *) address;
227		si.si_isr = isr;
228		si.si_flags = __ISR_VALID;
229		force_sig_info(signal, &si, current);
230		return;
231	}
232
233  no_context:
234	if ((isr & IA64_ISR_SP)
235	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
236	{
237		/*
238		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
239		 * bit in the psr to ensure forward progress.  (Target register will get a
240		 * NaT for ld.s, lfetch will be canceled.)
241		 */
242		ia64_psr(regs)->ed = 1;
243		return;
244	}
245
246	/*
247	 * Since we have no vma's for region 5, we might get here even if the address is
248	 * valid, due to the VHPT walker inserting a non present translation that becomes
249	 * stale. If that happens, the non present fault handler already purged the stale
250	 * translation, which fixed the problem. So, we check to see if the translation is
251	 * valid, and return if it is.
252	 */
253	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
254		return;
255
256	if (ia64_done_with_exception(regs))
257		return;
258
259	/*
260	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
261	 * with extreme prejudice.
262	 */
263	bust_spinlocks(1);
264
265	if (address < PAGE_SIZE)
266		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
267	else
268		printk(KERN_ALERT "Unable to handle kernel paging request at "
269		       "virtual address %016lx\n", address);
270	if (die("Oops", regs, isr))
271		regs = NULL;
272	bust_spinlocks(0);
273	if (regs)
274		do_exit(SIGKILL);
275	return;
276
277  out_of_memory:
278	up_read(&mm->mmap_sem);
279	if (!user_mode(regs))
280		goto no_context;
281	pagefault_out_of_memory();
282}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MMU fault handling support.
  4 *
  5 * Copyright (C) 1998-2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <linux/sched/signal.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/extable.h>
 12#include <linux/interrupt.h>
 13#include <linux/kprobes.h>
 14#include <linux/kdebug.h>
 15#include <linux/prefetch.h>
 16#include <linux/uaccess.h>
 17#include <linux/perf_event.h>
 18
 
 19#include <asm/processor.h>
 20#include <asm/exception.h>
 
 21
 22extern int die(char *, struct pt_regs *, long);
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24/*
 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 26 * (inside region 5, on ia64) and that page is present.
 27 */
 28static int
 29mapped_kernel_page_is_present (unsigned long address)
 30{
 31	pgd_t *pgd;
 32	p4d_t *p4d;
 33	pud_t *pud;
 34	pmd_t *pmd;
 35	pte_t *ptep, pte;
 36
 37	pgd = pgd_offset_k(address);
 38	if (pgd_none(*pgd) || pgd_bad(*pgd))
 39		return 0;
 40
 41	p4d = p4d_offset(pgd, address);
 42	if (p4d_none(*p4d) || p4d_bad(*p4d))
 43		return 0;
 44
 45	pud = pud_offset(p4d, address);
 46	if (pud_none(*pud) || pud_bad(*pud))
 47		return 0;
 48
 49	pmd = pmd_offset(pud, address);
 50	if (pmd_none(*pmd) || pmd_bad(*pmd))
 51		return 0;
 52
 53	ptep = pte_offset_kernel(pmd, address);
 54	if (!ptep)
 55		return 0;
 56
 57	pte = *ptep;
 58	return pte_present(pte);
 59}
 60
 61#	define VM_READ_BIT	0
 62#	define VM_WRITE_BIT	1
 63#	define VM_EXEC_BIT	2
 64
 65void __kprobes
 66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 67{
 68	int signal = SIGSEGV, code = SEGV_MAPERR;
 69	struct vm_area_struct *vma, *prev_vma;
 70	struct mm_struct *mm = current->mm;
 
 71	unsigned long mask;
 72	vm_fault_t fault;
 73	unsigned int flags = FAULT_FLAG_DEFAULT;
 74
 75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 77
 78	/* mmap_lock is performance critical.... */
 79	prefetchw(&mm->mmap_lock);
 80
 81	/*
 82	 * If we're in an interrupt or have no user context, we must not take the fault..
 83	 */
 84	if (faulthandler_disabled() || !mm)
 85		goto no_context;
 86
 87#ifdef CONFIG_VIRTUAL_MEM_MAP
 88	/*
 89	 * If fault is in region 5 and we are in the kernel, we may already
 90	 * have the mmap_lock (pfn_valid macro is called during mmap). There
 91	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
 92	 * and go directly to the exception handling code.
 93	 */
 94
 95	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
 96		goto bad_area_no_up;
 97#endif
 98
 99	/*
100	 * This is to handle the kprobes on user space access instructions
101	 */
102	if (kprobe_page_fault(regs, TRAP_BRKPT))
103		return;
104
105	if (user_mode(regs))
106		flags |= FAULT_FLAG_USER;
107	if (mask & VM_WRITE)
108		flags |= FAULT_FLAG_WRITE;
109
110	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
111retry:
112	mmap_read_lock(mm);
113
114	vma = find_vma_prev(mm, address, &prev_vma);
115	if (!vma && !prev_vma )
116		goto bad_area;
117
118        /*
119         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
120         *
121         * May find no vma, but could be that the last vm area is the
122         * register backing store that needs to expand upwards, in
123         * this case vma will be null, but prev_vma will ne non-null
124         */
125        if (( !vma && prev_vma ) || (address < vma->vm_start) )
126		goto check_expansion;
127
128  good_area:
129	code = SEGV_ACCERR;
130
131	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
132
 
 
 
 
133#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
134	    || (1 << VM_EXEC_BIT) != VM_EXEC)
135#		error File is out of sync with <linux/mm.h>.  Please update.
136#	endif
137
138	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
139		goto bad_area;
140
 
 
 
141	if ((vma->vm_flags & mask) != mask)
142		goto bad_area;
143
144	/*
145	 * If for any reason at all we couldn't handle the fault, make
146	 * sure we exit gracefully rather than endlessly redo the
147	 * fault.
148	 */
149	fault = handle_mm_fault(vma, address, flags, regs);
150
151	if (fault_signal_pending(fault, regs))
152		return;
153
154	if (unlikely(fault & VM_FAULT_ERROR)) {
155		/*
156		 * We ran out of memory, or some other thing happened
157		 * to us that made us unable to handle the page fault
158		 * gracefully.
159		 */
160		if (fault & VM_FAULT_OOM) {
161			goto out_of_memory;
162		} else if (fault & VM_FAULT_SIGSEGV) {
163			goto bad_area;
164		} else if (fault & VM_FAULT_SIGBUS) {
165			signal = SIGBUS;
166			goto bad_area;
167		}
168		BUG();
169	}
170
171	if (flags & FAULT_FLAG_ALLOW_RETRY) {
172		if (fault & VM_FAULT_RETRY) {
173			flags |= FAULT_FLAG_TRIED;
174
175			 /* No need to mmap_read_unlock(mm) as we would
176			 * have already released it in __lock_page_or_retry
177			 * in mm/filemap.c.
178			 */
179
180			goto retry;
181		}
182	}
183
184	mmap_read_unlock(mm);
185	return;
186
187  check_expansion:
188	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
189		if (!vma)
190			goto bad_area;
191		if (!(vma->vm_flags & VM_GROWSDOWN))
192			goto bad_area;
193		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
194		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
195			goto bad_area;
196		if (expand_stack(vma, address))
197			goto bad_area;
198	} else {
199		vma = prev_vma;
200		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
201		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
202			goto bad_area;
203		/*
204		 * Since the register backing store is accessed sequentially,
205		 * we disallow growing it by more than a page at a time.
206		 */
207		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
208			goto bad_area;
209		if (expand_upwards(vma, address))
210			goto bad_area;
211	}
212	goto good_area;
213
214  bad_area:
215	mmap_read_unlock(mm);
216#ifdef CONFIG_VIRTUAL_MEM_MAP
217  bad_area_no_up:
218#endif
219	if ((isr & IA64_ISR_SP)
220	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
221	{
222		/*
223		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
224		 * bit in the psr to ensure forward progress.  (Target register will get a
225		 * NaT for ld.s, lfetch will be canceled.)
226		 */
227		ia64_psr(regs)->ed = 1;
228		return;
229	}
230	if (user_mode(regs)) {
231		force_sig_fault(signal, code, (void __user *) address,
232				0, __ISR_VALID, isr);
 
 
 
 
 
233		return;
234	}
235
236  no_context:
237	if ((isr & IA64_ISR_SP)
238	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
239	{
240		/*
241		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
242		 * bit in the psr to ensure forward progress.  (Target register will get a
243		 * NaT for ld.s, lfetch will be canceled.)
244		 */
245		ia64_psr(regs)->ed = 1;
246		return;
247	}
248
249	/*
250	 * Since we have no vma's for region 5, we might get here even if the address is
251	 * valid, due to the VHPT walker inserting a non present translation that becomes
252	 * stale. If that happens, the non present fault handler already purged the stale
253	 * translation, which fixed the problem. So, we check to see if the translation is
254	 * valid, and return if it is.
255	 */
256	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
257		return;
258
259	if (ia64_done_with_exception(regs))
260		return;
261
262	/*
263	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
264	 * with extreme prejudice.
265	 */
266	bust_spinlocks(1);
267
268	if (address < PAGE_SIZE)
269		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
270	else
271		printk(KERN_ALERT "Unable to handle kernel paging request at "
272		       "virtual address %016lx\n", address);
273	if (die("Oops", regs, isr))
274		regs = NULL;
275	bust_spinlocks(0);
276	if (regs)
277		do_exit(SIGKILL);
278	return;
279
280  out_of_memory:
281	mmap_read_unlock(mm);
282	if (!user_mode(regs))
283		goto no_context;
284	pagefault_out_of_memory();
285}