Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * MMU fault handling support.
  4 *
  5 * Copyright (C) 1998-2002 Hewlett-Packard Co
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 */
  8#include <linux/sched/signal.h>
  9#include <linux/kernel.h>
 10#include <linux/mm.h>
 11#include <linux/extable.h>
 12#include <linux/interrupt.h>
 13#include <linux/kprobes.h>
 14#include <linux/kdebug.h>
 15#include <linux/prefetch.h>
 16#include <linux/uaccess.h>
 17#include <linux/perf_event.h>
 18
 19#include <asm/processor.h>
 20#include <asm/exception.h>
 21
 22extern int die(char *, struct pt_regs *, long);
 23
 24/*
 25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
 26 * (inside region 5, on ia64) and that page is present.
 27 */
 28static int
 29mapped_kernel_page_is_present (unsigned long address)
 30{
 31	pgd_t *pgd;
 32	p4d_t *p4d;
 33	pud_t *pud;
 34	pmd_t *pmd;
 35	pte_t *ptep, pte;
 36
 37	pgd = pgd_offset_k(address);
 38	if (pgd_none(*pgd) || pgd_bad(*pgd))
 39		return 0;
 40
 41	p4d = p4d_offset(pgd, address);
 42	if (p4d_none(*p4d) || p4d_bad(*p4d))
 43		return 0;
 44
 45	pud = pud_offset(p4d, address);
 46	if (pud_none(*pud) || pud_bad(*pud))
 47		return 0;
 48
 49	pmd = pmd_offset(pud, address);
 50	if (pmd_none(*pmd) || pmd_bad(*pmd))
 51		return 0;
 52
 53	ptep = pte_offset_kernel(pmd, address);
 54	if (!ptep)
 55		return 0;
 56
 57	pte = *ptep;
 58	return pte_present(pte);
 59}
 60
 61#	define VM_READ_BIT	0
 62#	define VM_WRITE_BIT	1
 63#	define VM_EXEC_BIT	2
 64
 65void __kprobes
 66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
 67{
 68	int signal = SIGSEGV, code = SEGV_MAPERR;
 69	struct vm_area_struct *vma, *prev_vma;
 70	struct mm_struct *mm = current->mm;
 71	unsigned long mask;
 72	vm_fault_t fault;
 73	unsigned int flags = FAULT_FLAG_DEFAULT;
 74
 75	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 76		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 77
 78	/* mmap_lock is performance critical.... */
 79	prefetchw(&mm->mmap_lock);
 80
 81	/*
 82	 * If we're in an interrupt or have no user context, we must not take the fault..
 83	 */
 84	if (faulthandler_disabled() || !mm)
 85		goto no_context;
 86
 87	/*
 88	 * This is to handle the kprobes on user space access instructions
 89	 */
 90	if (kprobe_page_fault(regs, TRAP_BRKPT))
 91		return;
 92
 93	if (user_mode(regs))
 94		flags |= FAULT_FLAG_USER;
 95	if (mask & VM_WRITE)
 96		flags |= FAULT_FLAG_WRITE;
 97
 98	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 99retry:
100	mmap_read_lock(mm);
101
102	vma = find_vma_prev(mm, address, &prev_vma);
103	if (!vma && !prev_vma )
104		goto bad_area;
105
106        /*
107         * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108         *
109         * May find no vma, but could be that the last vm area is the
110         * register backing store that needs to expand upwards, in
111         * this case vma will be null, but prev_vma will ne non-null
112         */
113        if (( !vma && prev_vma ) || (address < vma->vm_start) )
114		goto check_expansion;
115
116  good_area:
117	code = SEGV_ACCERR;
118
119	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
120
121#	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122	    || (1 << VM_EXEC_BIT) != VM_EXEC)
123#		error File is out of sync with <linux/mm.h>.  Please update.
124#	endif
125
126	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
127		goto bad_area;
128
129	if ((vma->vm_flags & mask) != mask)
130		goto bad_area;
131
132	/*
133	 * If for any reason at all we couldn't handle the fault, make
134	 * sure we exit gracefully rather than endlessly redo the
135	 * fault.
136	 */
137	fault = handle_mm_fault(vma, address, flags, regs);
138
139	if (fault_signal_pending(fault, regs))
140		return;
141
142	if (unlikely(fault & VM_FAULT_ERROR)) {
143		/*
144		 * We ran out of memory, or some other thing happened
145		 * to us that made us unable to handle the page fault
146		 * gracefully.
147		 */
148		if (fault & VM_FAULT_OOM) {
149			goto out_of_memory;
150		} else if (fault & VM_FAULT_SIGSEGV) {
151			goto bad_area;
152		} else if (fault & VM_FAULT_SIGBUS) {
153			signal = SIGBUS;
154			goto bad_area;
155		}
156		BUG();
157	}
158
159	if (flags & FAULT_FLAG_ALLOW_RETRY) {
160		if (fault & VM_FAULT_RETRY) {
161			flags |= FAULT_FLAG_TRIED;
162
163			 /* No need to mmap_read_unlock(mm) as we would
164			 * have already released it in __lock_page_or_retry
165			 * in mm/filemap.c.
166			 */
167
168			goto retry;
169		}
170	}
171
172	mmap_read_unlock(mm);
173	return;
174
175  check_expansion:
176	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
177		if (!vma)
178			goto bad_area;
179		if (!(vma->vm_flags & VM_GROWSDOWN))
180			goto bad_area;
181		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
182		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
183			goto bad_area;
184		if (expand_stack(vma, address))
185			goto bad_area;
186	} else {
187		vma = prev_vma;
188		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
189		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
190			goto bad_area;
191		/*
192		 * Since the register backing store is accessed sequentially,
193		 * we disallow growing it by more than a page at a time.
194		 */
195		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
196			goto bad_area;
197		if (expand_upwards(vma, address))
198			goto bad_area;
199	}
200	goto good_area;
201
202  bad_area:
203	mmap_read_unlock(mm);
204	if ((isr & IA64_ISR_SP)
205	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
206	{
207		/*
208		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
209		 * bit in the psr to ensure forward progress.  (Target register will get a
210		 * NaT for ld.s, lfetch will be canceled.)
211		 */
212		ia64_psr(regs)->ed = 1;
213		return;
214	}
215	if (user_mode(regs)) {
216		force_sig_fault(signal, code, (void __user *) address,
217				0, __ISR_VALID, isr);
218		return;
219	}
220
221  no_context:
222	if ((isr & IA64_ISR_SP)
223	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
224	{
225		/*
226		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
227		 * bit in the psr to ensure forward progress.  (Target register will get a
228		 * NaT for ld.s, lfetch will be canceled.)
229		 */
230		ia64_psr(regs)->ed = 1;
231		return;
232	}
233
234	/*
235	 * Since we have no vma's for region 5, we might get here even if the address is
236	 * valid, due to the VHPT walker inserting a non present translation that becomes
237	 * stale. If that happens, the non present fault handler already purged the stale
238	 * translation, which fixed the problem. So, we check to see if the translation is
239	 * valid, and return if it is.
240	 */
241	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
242		return;
243
244	if (ia64_done_with_exception(regs))
245		return;
246
247	/*
248	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
249	 * with extreme prejudice.
250	 */
251	bust_spinlocks(1);
252
253	if (address < PAGE_SIZE)
254		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
255	else
256		printk(KERN_ALERT "Unable to handle kernel paging request at "
257		       "virtual address %016lx\n", address);
258	if (die("Oops", regs, isr))
259		regs = NULL;
260	bust_spinlocks(0);
261	if (regs)
262		do_exit(SIGKILL);
263	return;
264
265  out_of_memory:
266	mmap_read_unlock(mm);
267	if (!user_mode(regs))
268		goto no_context;
269	pagefault_out_of_memory();
270}