Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MMU fault handling support.
  4 *
  5 * Copyright (C) 1998-2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <linux/sched/signal.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/extable.h>
 12#include <linux/interrupt.h>
 13#include <linux/kprobes.h>
 14#include <linux/kdebug.h>
 15#include <linux/prefetch.h>
 16#include <linux/uaccess.h>
 17#include <linux/perf_event.h>
 18
 
 19#include <asm/processor.h>
 20#include <asm/exception.h>
 21
 22extern int die(char *, struct pt_regs *, long);
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24/*
 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 26 * (inside region 5, on ia64) and that page is present.
 27 */
 28static int
 29mapped_kernel_page_is_present (unsigned long address)
 30{
 31	pgd_t *pgd;
 32	p4d_t *p4d;
 33	pud_t *pud;
 34	pmd_t *pmd;
 35	pte_t *ptep, pte;
 36
 37	pgd = pgd_offset_k(address);
 38	if (pgd_none(*pgd) || pgd_bad(*pgd))
 39		return 0;
 40
 41	p4d = p4d_offset(pgd, address);
 42	if (p4d_none(*p4d) || p4d_bad(*p4d))
 43		return 0;
 44
 45	pud = pud_offset(p4d, address);
 46	if (pud_none(*pud) || pud_bad(*pud))
 47		return 0;
 48
 49	pmd = pmd_offset(pud, address);
 50	if (pmd_none(*pmd) || pmd_bad(*pmd))
 51		return 0;
 52
 53	ptep = pte_offset_kernel(pmd, address);
 54	if (!ptep)
 55		return 0;
 56
 57	pte = *ptep;
 58	return pte_present(pte);
 59}
 60
 61#	define VM_READ_BIT	0
 62#	define VM_WRITE_BIT	1
 63#	define VM_EXEC_BIT	2
 64
 65void __kprobes
 66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 67{
 68	int signal = SIGSEGV, code = SEGV_MAPERR;
 69	struct vm_area_struct *vma, *prev_vma;
 70	struct mm_struct *mm = current->mm;
 
 71	unsigned long mask;
 72	vm_fault_t fault;
 73	unsigned int flags = FAULT_FLAG_DEFAULT;
 74
 75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 77
 78	/* mmap_lock is performance critical.... */
 79	prefetchw(&mm->mmap_lock);
 80
 81	/*
 82	 * If we're in an interrupt or have no user context, we must not take the fault..
 83	 */
 84	if (faulthandler_disabled() || !mm)
 85		goto no_context;
 86
 
 
 
 
 
 
 
 
 
 
 
 
 87	/*
 88	 * This is to handle the kprobes on user space access instructions
 89	 */
 90	if (kprobe_page_fault(regs, TRAP_BRKPT))
 91		return;
 92
 93	if (user_mode(regs))
 94		flags |= FAULT_FLAG_USER;
 95	if (mask & VM_WRITE)
 96		flags |= FAULT_FLAG_WRITE;
 97
 98	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 99retry:
100	mmap_read_lock(mm);
101
102	vma = find_vma_prev(mm, address, &prev_vma);
103	if (!vma && !prev_vma )
104		goto bad_area;
105
106        /*
107         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108         *
109         * May find no vma, but could be that the last vm area is the
110         * register backing store that needs to expand upwards, in
111         * this case vma will be null, but prev_vma will ne non-null
112         */
113        if (( !vma && prev_vma ) || (address < vma->vm_start) )
114		goto check_expansion;
115
116  good_area:
117	code = SEGV_ACCERR;
118
119	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
120
 
 
 
 
121#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122	    || (1 << VM_EXEC_BIT) != VM_EXEC)
123#		error File is out of sync with <linux/mm.h>.  Please update.
124#	endif
125
126	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
127		goto bad_area;
128
 
 
 
129	if ((vma->vm_flags & mask) != mask)
130		goto bad_area;
131
132	/*
133	 * If for any reason at all we couldn't handle the fault, make
134	 * sure we exit gracefully rather than endlessly redo the
135	 * fault.
136	 */
137	fault = handle_mm_fault(vma, address, flags, regs);
138
139	if (fault_signal_pending(fault, regs))
140		return;
141
142	/* The fault is fully completed (including releasing mmap lock) */
143	if (fault & VM_FAULT_COMPLETED)
144		return;
145
146	if (unlikely(fault & VM_FAULT_ERROR)) {
147		/*
148		 * We ran out of memory, or some other thing happened
149		 * to us that made us unable to handle the page fault
150		 * gracefully.
151		 */
152		if (fault & VM_FAULT_OOM) {
153			goto out_of_memory;
154		} else if (fault & VM_FAULT_SIGSEGV) {
155			goto bad_area;
156		} else if (fault & VM_FAULT_SIGBUS) {
157			signal = SIGBUS;
158			goto bad_area;
159		}
160		BUG();
161	}
162
163	if (fault & VM_FAULT_RETRY) {
164		flags |= FAULT_FLAG_TRIED;
165
166		/* No need to mmap_read_unlock(mm) as we would
167		 * have already released it in __lock_page_or_retry
168		 * in mm/filemap.c.
169		 */
170
171		goto retry;
172	}
173
174	mmap_read_unlock(mm);
175	return;
176
177  check_expansion:
178	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
179		if (!vma)
180			goto bad_area;
181		if (!(vma->vm_flags & VM_GROWSDOWN))
182			goto bad_area;
183		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
184		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
185			goto bad_area;
186		if (expand_stack(vma, address))
187			goto bad_area;
188	} else {
189		vma = prev_vma;
190		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
191		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
192			goto bad_area;
193		/*
194		 * Since the register backing store is accessed sequentially,
195		 * we disallow growing it by more than a page at a time.
196		 */
197		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
198			goto bad_area;
199		if (expand_upwards(vma, address))
200			goto bad_area;
201	}
202	goto good_area;
203
204  bad_area:
205	mmap_read_unlock(mm);
 
 
 
206	if ((isr & IA64_ISR_SP)
207	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
208	{
209		/*
210		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
211		 * bit in the psr to ensure forward progress.  (Target register will get a
212		 * NaT for ld.s, lfetch will be canceled.)
213		 */
214		ia64_psr(regs)->ed = 1;
215		return;
216	}
217	if (user_mode(regs)) {
218		force_sig_fault(signal, code, (void __user *) address,
219				0, __ISR_VALID, isr);
 
 
 
 
 
220		return;
221	}
222
223  no_context:
224	if ((isr & IA64_ISR_SP)
225	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
226	{
227		/*
228		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
229		 * bit in the psr to ensure forward progress.  (Target register will get a
230		 * NaT for ld.s, lfetch will be canceled.)
231		 */
232		ia64_psr(regs)->ed = 1;
233		return;
234	}
235
236	/*
237	 * Since we have no vma's for region 5, we might get here even if the address is
238	 * valid, due to the VHPT walker inserting a non present translation that becomes
239	 * stale. If that happens, the non present fault handler already purged the stale
240	 * translation, which fixed the problem. So, we check to see if the translation is
241	 * valid, and return if it is.
242	 */
243	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
244		return;
245
246	if (ia64_done_with_exception(regs))
247		return;
248
249	/*
250	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
251	 * with extreme prejudice.
252	 */
253	bust_spinlocks(1);
254
255	if (address < PAGE_SIZE)
256		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
257	else
258		printk(KERN_ALERT "Unable to handle kernel paging request at "
259		       "virtual address %016lx\n", address);
260	if (die("Oops", regs, isr))
261		regs = NULL;
262	bust_spinlocks(0);
263	if (regs)
264		make_task_dead(SIGKILL);
265	return;
266
267  out_of_memory:
268	mmap_read_unlock(mm);
269	if (!user_mode(regs))
270		goto no_context;
271	pagefault_out_of_memory();
272}
v3.5.6
 
  1/*
  2 * MMU fault handling support.
  3 *
  4 * Copyright (C) 1998-2002 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 */
  7#include <linux/sched.h>
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 
 10#include <linux/interrupt.h>
 11#include <linux/kprobes.h>
 12#include <linux/kdebug.h>
 13#include <linux/prefetch.h>
 
 
 14
 15#include <asm/pgtable.h>
 16#include <asm/processor.h>
 17#include <asm/uaccess.h>
 18
 19extern int die(char *, struct pt_regs *, long);
 20
 21#ifdef CONFIG_KPROBES
 22static inline int notify_page_fault(struct pt_regs *regs, int trap)
 23{
 24	int ret = 0;
 25
 26	if (!user_mode(regs)) {
 27		/* kprobe_running() needs smp_processor_id() */
 28		preempt_disable();
 29		if (kprobe_running() && kprobe_fault_handler(regs, trap))
 30			ret = 1;
 31		preempt_enable();
 32	}
 33
 34	return ret;
 35}
 36#else
 37static inline int notify_page_fault(struct pt_regs *regs, int trap)
 38{
 39	return 0;
 40}
 41#endif
 42
 43/*
 44 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 45 * (inside region 5, on ia64) and that page is present.
 46 */
 47static int
 48mapped_kernel_page_is_present (unsigned long address)
 49{
 50	pgd_t *pgd;
 
 51	pud_t *pud;
 52	pmd_t *pmd;
 53	pte_t *ptep, pte;
 54
 55	pgd = pgd_offset_k(address);
 56	if (pgd_none(*pgd) || pgd_bad(*pgd))
 57		return 0;
 58
 59	pud = pud_offset(pgd, address);
 
 
 
 
 60	if (pud_none(*pud) || pud_bad(*pud))
 61		return 0;
 62
 63	pmd = pmd_offset(pud, address);
 64	if (pmd_none(*pmd) || pmd_bad(*pmd))
 65		return 0;
 66
 67	ptep = pte_offset_kernel(pmd, address);
 68	if (!ptep)
 69		return 0;
 70
 71	pte = *ptep;
 72	return pte_present(pte);
 73}
 74
 
 
 
 
 75void __kprobes
 76ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 77{
 78	int signal = SIGSEGV, code = SEGV_MAPERR;
 79	struct vm_area_struct *vma, *prev_vma;
 80	struct mm_struct *mm = current->mm;
 81	struct siginfo si;
 82	unsigned long mask;
 83	int fault;
 
 
 
 
 84
 85	/* mmap_sem is performance critical.... */
 86	prefetchw(&mm->mmap_sem);
 87
 88	/*
 89	 * If we're in an interrupt or have no user context, we must not take the fault..
 90	 */
 91	if (in_atomic() || !mm)
 92		goto no_context;
 93
 94#ifdef CONFIG_VIRTUAL_MEM_MAP
 95	/*
 96	 * If fault is in region 5 and we are in the kernel, we may already
 97	 * have the mmap_sem (pfn_valid macro is called during mmap). There
 98	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
 99	 * and go directly to the exception handling code.
100	 */
101
102	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
103		goto bad_area_no_up;
104#endif
105
106	/*
107	 * This is to handle the kprobes on user space access instructions
108	 */
109	if (notify_page_fault(regs, TRAP_BRKPT))
110		return;
111
112	down_read(&mm->mmap_sem);
 
 
 
 
 
 
 
113
114	vma = find_vma_prev(mm, address, &prev_vma);
115	if (!vma && !prev_vma )
116		goto bad_area;
117
118        /*
119         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
120         *
121         * May find no vma, but could be that the last vm area is the
122         * register backing store that needs to expand upwards, in
123         * this case vma will be null, but prev_vma will ne non-null
124         */
125        if (( !vma && prev_vma ) || (address < vma->vm_start) )
126		goto check_expansion;
127
128  good_area:
129	code = SEGV_ACCERR;
130
131	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
132
133#	define VM_READ_BIT	0
134#	define VM_WRITE_BIT	1
135#	define VM_EXEC_BIT	2
136
137#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
138	    || (1 << VM_EXEC_BIT) != VM_EXEC)
139#		error File is out of sync with <linux/mm.h>.  Please update.
140#	endif
141
142	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
143		goto bad_area;
144
145	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
146		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
147
148	if ((vma->vm_flags & mask) != mask)
149		goto bad_area;
150
151	/*
152	 * If for any reason at all we couldn't handle the fault, make
153	 * sure we exit gracefully rather than endlessly redo the
154	 * fault.
155	 */
156	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
 
 
 
 
 
 
 
 
157	if (unlikely(fault & VM_FAULT_ERROR)) {
158		/*
159		 * We ran out of memory, or some other thing happened
160		 * to us that made us unable to handle the page fault
161		 * gracefully.
162		 */
163		if (fault & VM_FAULT_OOM) {
164			goto out_of_memory;
 
 
165		} else if (fault & VM_FAULT_SIGBUS) {
166			signal = SIGBUS;
167			goto bad_area;
168		}
169		BUG();
170	}
171	if (fault & VM_FAULT_MAJOR)
172		current->maj_flt++;
173	else
174		current->min_flt++;
175	up_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
176	return;
177
178  check_expansion:
179	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
180		if (!vma)
181			goto bad_area;
182		if (!(vma->vm_flags & VM_GROWSDOWN))
183			goto bad_area;
184		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
185		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
186			goto bad_area;
187		if (expand_stack(vma, address))
188			goto bad_area;
189	} else {
190		vma = prev_vma;
191		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
192		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
193			goto bad_area;
194		/*
195		 * Since the register backing store is accessed sequentially,
196		 * we disallow growing it by more than a page at a time.
197		 */
198		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
199			goto bad_area;
200		if (expand_upwards(vma, address))
201			goto bad_area;
202	}
203	goto good_area;
204
205  bad_area:
206	up_read(&mm->mmap_sem);
207#ifdef CONFIG_VIRTUAL_MEM_MAP
208  bad_area_no_up:
209#endif
210	if ((isr & IA64_ISR_SP)
211	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
212	{
213		/*
214		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
215		 * bit in the psr to ensure forward progress.  (Target register will get a
216		 * NaT for ld.s, lfetch will be canceled.)
217		 */
218		ia64_psr(regs)->ed = 1;
219		return;
220	}
221	if (user_mode(regs)) {
222		si.si_signo = signal;
223		si.si_errno = 0;
224		si.si_code = code;
225		si.si_addr = (void __user *) address;
226		si.si_isr = isr;
227		si.si_flags = __ISR_VALID;
228		force_sig_info(signal, &si, current);
229		return;
230	}
231
232  no_context:
233	if ((isr & IA64_ISR_SP)
234	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
235	{
236		/*
237		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
238		 * bit in the psr to ensure forward progress.  (Target register will get a
239		 * NaT for ld.s, lfetch will be canceled.)
240		 */
241		ia64_psr(regs)->ed = 1;
242		return;
243	}
244
245	/*
246	 * Since we have no vma's for region 5, we might get here even if the address is
247	 * valid, due to the VHPT walker inserting a non present translation that becomes
248	 * stale. If that happens, the non present fault handler already purged the stale
249	 * translation, which fixed the problem. So, we check to see if the translation is
250	 * valid, and return if it is.
251	 */
252	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
253		return;
254
255	if (ia64_done_with_exception(regs))
256		return;
257
258	/*
259	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
260	 * with extreme prejudice.
261	 */
262	bust_spinlocks(1);
263
264	if (address < PAGE_SIZE)
265		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
266	else
267		printk(KERN_ALERT "Unable to handle kernel paging request at "
268		       "virtual address %016lx\n", address);
269	if (die("Oops", regs, isr))
270		regs = NULL;
271	bust_spinlocks(0);
272	if (regs)
273		do_exit(SIGKILL);
274	return;
275
276  out_of_memory:
277	up_read(&mm->mmap_sem);
278	if (!user_mode(regs))
279		goto no_context;
280	pagefault_out_of_memory();
281}