Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MMU fault handling support.
  4 *
  5 * Copyright (C) 1998-2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <linux/sched/signal.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/extable.h>
 12#include <linux/interrupt.h>
 13#include <linux/kprobes.h>
 14#include <linux/kdebug.h>
 15#include <linux/prefetch.h>
 16#include <linux/uaccess.h>
 17#include <linux/perf_event.h>
 18
 
 19#include <asm/processor.h>
 20#include <asm/exception.h>
 
 21
 22extern int die(char *, struct pt_regs *, long);
 23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24/*
 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 26 * (inside region 5, on ia64) and that page is present.
 27 */
 28static int
 29mapped_kernel_page_is_present (unsigned long address)
 30{
 31	pgd_t *pgd;
 32	p4d_t *p4d;
 33	pud_t *pud;
 34	pmd_t *pmd;
 35	pte_t *ptep, pte;
 36
 37	pgd = pgd_offset_k(address);
 38	if (pgd_none(*pgd) || pgd_bad(*pgd))
 39		return 0;
 40
 41	p4d = p4d_offset(pgd, address);
 42	if (p4d_none(*p4d) || p4d_bad(*p4d))
 43		return 0;
 44
 45	pud = pud_offset(p4d, address);
 46	if (pud_none(*pud) || pud_bad(*pud))
 47		return 0;
 48
 49	pmd = pmd_offset(pud, address);
 50	if (pmd_none(*pmd) || pmd_bad(*pmd))
 51		return 0;
 52
 53	ptep = pte_offset_kernel(pmd, address);
 54	if (!ptep)
 55		return 0;
 56
 57	pte = *ptep;
 58	return pte_present(pte);
 59}
 60
 61#	define VM_READ_BIT	0
 62#	define VM_WRITE_BIT	1
 63#	define VM_EXEC_BIT	2
 64
 65void __kprobes
 66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 67{
 68	int signal = SIGSEGV, code = SEGV_MAPERR;
 69	struct vm_area_struct *vma, *prev_vma;
 70	struct mm_struct *mm = current->mm;
 
 71	unsigned long mask;
 72	vm_fault_t fault;
 73	unsigned int flags = FAULT_FLAG_DEFAULT;
 74
 75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 77
 78	/* mmap_lock is performance critical.... */
 79	prefetchw(&mm->mmap_lock);
 80
 81	/*
 82	 * If we're in an interrupt or have no user context, we must not take the fault..
 83	 */
 84	if (faulthandler_disabled() || !mm)
 85		goto no_context;
 86
 
 
 
 
 
 
 
 
 
 
 
 
 87	/*
 88	 * This is to handle the kprobes on user space access instructions
 89	 */
 90	if (kprobe_page_fault(regs, TRAP_BRKPT))
 91		return;
 92
 93	if (user_mode(regs))
 94		flags |= FAULT_FLAG_USER;
 95	if (mask & VM_WRITE)
 96		flags |= FAULT_FLAG_WRITE;
 97
 98	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 99retry:
100	mmap_read_lock(mm);
101
102	vma = find_vma_prev(mm, address, &prev_vma);
103	if (!vma && !prev_vma )
104		goto bad_area;
105
106        /*
107         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108         *
109         * May find no vma, but could be that the last vm area is the
110         * register backing store that needs to expand upwards, in
111         * this case vma will be null, but prev_vma will ne non-null
112         */
113        if (( !vma && prev_vma ) || (address < vma->vm_start) )
114		goto check_expansion;
115
116  good_area:
117	code = SEGV_ACCERR;
118
119	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
120
 
 
 
 
121#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122	    || (1 << VM_EXEC_BIT) != VM_EXEC)
123#		error File is out of sync with <linux/mm.h>.  Please update.
124#	endif
125
126	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
127		goto bad_area;
128
 
 
 
129	if ((vma->vm_flags & mask) != mask)
130		goto bad_area;
131
132	/*
133	 * If for any reason at all we couldn't handle the fault, make
134	 * sure we exit gracefully rather than endlessly redo the
135	 * fault.
136	 */
137	fault = handle_mm_fault(vma, address, flags, regs);
138
139	if (fault_signal_pending(fault, regs))
140		return;
141
142	/* The fault is fully completed (including releasing mmap lock) */
143	if (fault & VM_FAULT_COMPLETED)
144		return;
145
146	if (unlikely(fault & VM_FAULT_ERROR)) {
147		/*
148		 * We ran out of memory, or some other thing happened
149		 * to us that made us unable to handle the page fault
150		 * gracefully.
151		 */
152		if (fault & VM_FAULT_OOM) {
153			goto out_of_memory;
154		} else if (fault & VM_FAULT_SIGSEGV) {
155			goto bad_area;
156		} else if (fault & VM_FAULT_SIGBUS) {
157			signal = SIGBUS;
158			goto bad_area;
159		}
160		BUG();
161	}
162
163	if (fault & VM_FAULT_RETRY) {
164		flags |= FAULT_FLAG_TRIED;
165
166		/* No need to mmap_read_unlock(mm) as we would
167		 * have already released it in __lock_page_or_retry
168		 * in mm/filemap.c.
169		 */
170
171		goto retry;
172	}
173
174	mmap_read_unlock(mm);
175	return;
176
177  check_expansion:
178	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
179		if (!vma)
180			goto bad_area;
181		if (!(vma->vm_flags & VM_GROWSDOWN))
182			goto bad_area;
183		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
184		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
185			goto bad_area;
186		if (expand_stack(vma, address))
187			goto bad_area;
188	} else {
189		vma = prev_vma;
190		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
191		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
192			goto bad_area;
193		/*
194		 * Since the register backing store is accessed sequentially,
195		 * we disallow growing it by more than a page at a time.
196		 */
197		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
198			goto bad_area;
199		if (expand_upwards(vma, address))
200			goto bad_area;
201	}
202	goto good_area;
203
204  bad_area:
205	mmap_read_unlock(mm);
 
 
 
206	if ((isr & IA64_ISR_SP)
207	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
208	{
209		/*
210		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
211		 * bit in the psr to ensure forward progress.  (Target register will get a
212		 * NaT for ld.s, lfetch will be canceled.)
213		 */
214		ia64_psr(regs)->ed = 1;
215		return;
216	}
217	if (user_mode(regs)) {
218		force_sig_fault(signal, code, (void __user *) address,
219				0, __ISR_VALID, isr);
 
 
 
 
 
220		return;
221	}
222
223  no_context:
224	if ((isr & IA64_ISR_SP)
225	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
226	{
227		/*
228		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
229		 * bit in the psr to ensure forward progress.  (Target register will get a
230		 * NaT for ld.s, lfetch will be canceled.)
231		 */
232		ia64_psr(regs)->ed = 1;
233		return;
234	}
235
236	/*
237	 * Since we have no vma's for region 5, we might get here even if the address is
238	 * valid, due to the VHPT walker inserting a non present translation that becomes
239	 * stale. If that happens, the non present fault handler already purged the stale
240	 * translation, which fixed the problem. So, we check to see if the translation is
241	 * valid, and return if it is.
242	 */
243	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
244		return;
245
246	if (ia64_done_with_exception(regs))
247		return;
248
249	/*
250	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
251	 * with extreme prejudice.
252	 */
253	bust_spinlocks(1);
254
255	if (address < PAGE_SIZE)
256		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
257	else
258		printk(KERN_ALERT "Unable to handle kernel paging request at "
259		       "virtual address %016lx\n", address);
260	if (die("Oops", regs, isr))
261		regs = NULL;
262	bust_spinlocks(0);
263	if (regs)
264		make_task_dead(SIGKILL);
265	return;
266
267  out_of_memory:
268	mmap_read_unlock(mm);
269	if (!user_mode(regs))
270		goto no_context;
271	pagefault_out_of_memory();
272}
v3.1
 
  1/*
  2 * MMU fault handling support.
  3 *
  4 * Copyright (C) 1998-2002 Hewlett-Packard Co
  5 *	David Mosberger-Tang <davidm@hpl.hp.com>
  6 */
  7#include <linux/sched.h>
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 
 10#include <linux/interrupt.h>
 11#include <linux/kprobes.h>
 12#include <linux/kdebug.h>
 13#include <linux/prefetch.h>
 
 
 14
 15#include <asm/pgtable.h>
 16#include <asm/processor.h>
 17#include <asm/system.h>
 18#include <asm/uaccess.h>
 19
 20extern int die(char *, struct pt_regs *, long);
 21
 22#ifdef CONFIG_KPROBES
 23static inline int notify_page_fault(struct pt_regs *regs, int trap)
 24{
 25	int ret = 0;
 26
 27	if (!user_mode(regs)) {
 28		/* kprobe_running() needs smp_processor_id() */
 29		preempt_disable();
 30		if (kprobe_running() && kprobe_fault_handler(regs, trap))
 31			ret = 1;
 32		preempt_enable();
 33	}
 34
 35	return ret;
 36}
 37#else
 38static inline int notify_page_fault(struct pt_regs *regs, int trap)
 39{
 40	return 0;
 41}
 42#endif
 43
 44/*
 45 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 46 * (inside region 5, on ia64) and that page is present.
 47 */
 48static int
 49mapped_kernel_page_is_present (unsigned long address)
 50{
 51	pgd_t *pgd;
 
 52	pud_t *pud;
 53	pmd_t *pmd;
 54	pte_t *ptep, pte;
 55
 56	pgd = pgd_offset_k(address);
 57	if (pgd_none(*pgd) || pgd_bad(*pgd))
 58		return 0;
 59
 60	pud = pud_offset(pgd, address);
 
 
 
 
 61	if (pud_none(*pud) || pud_bad(*pud))
 62		return 0;
 63
 64	pmd = pmd_offset(pud, address);
 65	if (pmd_none(*pmd) || pmd_bad(*pmd))
 66		return 0;
 67
 68	ptep = pte_offset_kernel(pmd, address);
 69	if (!ptep)
 70		return 0;
 71
 72	pte = *ptep;
 73	return pte_present(pte);
 74}
 75
 
 
 
 
 76void __kprobes
 77ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 78{
 79	int signal = SIGSEGV, code = SEGV_MAPERR;
 80	struct vm_area_struct *vma, *prev_vma;
 81	struct mm_struct *mm = current->mm;
 82	struct siginfo si;
 83	unsigned long mask;
 84	int fault;
 
 
 
 
 85
 86	/* mmap_sem is performance critical.... */
 87	prefetchw(&mm->mmap_sem);
 88
 89	/*
 90	 * If we're in an interrupt or have no user context, we must not take the fault..
 91	 */
 92	if (in_atomic() || !mm)
 93		goto no_context;
 94
 95#ifdef CONFIG_VIRTUAL_MEM_MAP
 96	/*
 97	 * If fault is in region 5 and we are in the kernel, we may already
 98	 * have the mmap_sem (pfn_valid macro is called during mmap). There
 99	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
100	 * and go directly to the exception handling code.
101	 */
102
103	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
104		goto bad_area_no_up;
105#endif
106
107	/*
108	 * This is to handle the kprobes on user space access instructions
109	 */
110	if (notify_page_fault(regs, TRAP_BRKPT))
111		return;
112
113	down_read(&mm->mmap_sem);
 
 
 
 
 
 
 
114
115	vma = find_vma_prev(mm, address, &prev_vma);
116	if (!vma && !prev_vma )
117		goto bad_area;
118
119        /*
120         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
121         *
122         * May find no vma, but could be that the last vm area is the
123         * register backing store that needs to expand upwards, in
124         * this case vma will be null, but prev_vma will ne non-null
125         */
126        if (( !vma && prev_vma ) || (address < vma->vm_start) )
127		goto check_expansion;
128
129  good_area:
130	code = SEGV_ACCERR;
131
132	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
133
134#	define VM_READ_BIT	0
135#	define VM_WRITE_BIT	1
136#	define VM_EXEC_BIT	2
137
138#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
139	    || (1 << VM_EXEC_BIT) != VM_EXEC)
140#		error File is out of sync with <linux/mm.h>.  Please update.
141#	endif
142
143	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
144		goto bad_area;
145
146	mask = (  (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
147		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
148
149	if ((vma->vm_flags & mask) != mask)
150		goto bad_area;
151
152	/*
153	 * If for any reason at all we couldn't handle the fault, make
154	 * sure we exit gracefully rather than endlessly redo the
155	 * fault.
156	 */
157	fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
 
 
 
 
 
 
 
 
158	if (unlikely(fault & VM_FAULT_ERROR)) {
159		/*
160		 * We ran out of memory, or some other thing happened
161		 * to us that made us unable to handle the page fault
162		 * gracefully.
163		 */
164		if (fault & VM_FAULT_OOM) {
165			goto out_of_memory;
 
 
166		} else if (fault & VM_FAULT_SIGBUS) {
167			signal = SIGBUS;
168			goto bad_area;
169		}
170		BUG();
171	}
172	if (fault & VM_FAULT_MAJOR)
173		current->maj_flt++;
174	else
175		current->min_flt++;
176	up_read(&mm->mmap_sem);
 
 
 
 
 
 
 
 
177	return;
178
179  check_expansion:
180	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
181		if (!vma)
182			goto bad_area;
183		if (!(vma->vm_flags & VM_GROWSDOWN))
184			goto bad_area;
185		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
186		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
187			goto bad_area;
188		if (expand_stack(vma, address))
189			goto bad_area;
190	} else {
191		vma = prev_vma;
192		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
193		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
194			goto bad_area;
195		/*
196		 * Since the register backing store is accessed sequentially,
197		 * we disallow growing it by more than a page at a time.
198		 */
199		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
200			goto bad_area;
201		if (expand_upwards(vma, address))
202			goto bad_area;
203	}
204	goto good_area;
205
206  bad_area:
207	up_read(&mm->mmap_sem);
208#ifdef CONFIG_VIRTUAL_MEM_MAP
209  bad_area_no_up:
210#endif
211	if ((isr & IA64_ISR_SP)
212	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
213	{
214		/*
215		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
216		 * bit in the psr to ensure forward progress.  (Target register will get a
217		 * NaT for ld.s, lfetch will be canceled.)
218		 */
219		ia64_psr(regs)->ed = 1;
220		return;
221	}
222	if (user_mode(regs)) {
223		si.si_signo = signal;
224		si.si_errno = 0;
225		si.si_code = code;
226		si.si_addr = (void __user *) address;
227		si.si_isr = isr;
228		si.si_flags = __ISR_VALID;
229		force_sig_info(signal, &si, current);
230		return;
231	}
232
233  no_context:
234	if ((isr & IA64_ISR_SP)
235	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
236	{
237		/*
238		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
239		 * bit in the psr to ensure forward progress.  (Target register will get a
240		 * NaT for ld.s, lfetch will be canceled.)
241		 */
242		ia64_psr(regs)->ed = 1;
243		return;
244	}
245
246	/*
247	 * Since we have no vma's for region 5, we might get here even if the address is
248	 * valid, due to the VHPT walker inserting a non present translation that becomes
249	 * stale. If that happens, the non present fault handler already purged the stale
250	 * translation, which fixed the problem. So, we check to see if the translation is
251	 * valid, and return if it is.
252	 */
253	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
254		return;
255
256	if (ia64_done_with_exception(regs))
257		return;
258
259	/*
260	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
261	 * with extreme prejudice.
262	 */
263	bust_spinlocks(1);
264
265	if (address < PAGE_SIZE)
266		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
267	else
268		printk(KERN_ALERT "Unable to handle kernel paging request at "
269		       "virtual address %016lx\n", address);
270	if (die("Oops", regs, isr))
271		regs = NULL;
272	bust_spinlocks(0);
273	if (regs)
274		do_exit(SIGKILL);
275	return;
276
277  out_of_memory:
278	up_read(&mm->mmap_sem);
279	if (!user_mode(regs))
280		goto no_context;
281	pagefault_out_of_memory();
282}