Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/sched/signal.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/extable.h>
12#include <linux/interrupt.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <linux/prefetch.h>
16#include <linux/uaccess.h>
17
18#include <asm/pgtable.h>
19#include <asm/processor.h>
20#include <asm/exception.h>
21
22extern int die(char *, struct pt_regs *, long);
23
24/*
25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26 * (inside region 5, on ia64) and that page is present.
27 */
28static int
29mapped_kernel_page_is_present (unsigned long address)
30{
31 pgd_t *pgd;
32 pud_t *pud;
33 pmd_t *pmd;
34 pte_t *ptep, pte;
35
36 pgd = pgd_offset_k(address);
37 if (pgd_none(*pgd) || pgd_bad(*pgd))
38 return 0;
39
40 pud = pud_offset(pgd, address);
41 if (pud_none(*pud) || pud_bad(*pud))
42 return 0;
43
44 pmd = pmd_offset(pud, address);
45 if (pmd_none(*pmd) || pmd_bad(*pmd))
46 return 0;
47
48 ptep = pte_offset_kernel(pmd, address);
49 if (!ptep)
50 return 0;
51
52 pte = *ptep;
53 return pte_present(pte);
54}
55
56# define VM_READ_BIT 0
57# define VM_WRITE_BIT 1
58# define VM_EXEC_BIT 2
59
60void __kprobes
61ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
62{
63 int signal = SIGSEGV, code = SEGV_MAPERR;
64 struct vm_area_struct *vma, *prev_vma;
65 struct mm_struct *mm = current->mm;
66 unsigned long mask;
67 vm_fault_t fault;
68 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
69
70 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
71 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
72
73 /* mmap_sem is performance critical.... */
74 prefetchw(&mm->mmap_sem);
75
76 /*
77 * If we're in an interrupt or have no user context, we must not take the fault..
78 */
79 if (faulthandler_disabled() || !mm)
80 goto no_context;
81
82#ifdef CONFIG_VIRTUAL_MEM_MAP
83 /*
84 * If fault is in region 5 and we are in the kernel, we may already
85 * have the mmap_sem (pfn_valid macro is called during mmap). There
86 * is no vma for region 5 addr's anyway, so skip getting the semaphore
87 * and go directly to the exception handling code.
88 */
89
90 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
91 goto bad_area_no_up;
92#endif
93
94 /*
95 * This is to handle the kprobes on user space access instructions
96 */
97 if (kprobe_page_fault(regs, TRAP_BRKPT))
98 return;
99
100 if (user_mode(regs))
101 flags |= FAULT_FLAG_USER;
102 if (mask & VM_WRITE)
103 flags |= FAULT_FLAG_WRITE;
104retry:
105 down_read(&mm->mmap_sem);
106
107 vma = find_vma_prev(mm, address, &prev_vma);
108 if (!vma && !prev_vma )
109 goto bad_area;
110
111 /*
112 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
113 *
114 * May find no vma, but could be that the last vm area is the
115 * register backing store that needs to expand upwards, in
116 * this case vma will be null, but prev_vma will ne non-null
117 */
118 if (( !vma && prev_vma ) || (address < vma->vm_start) )
119 goto check_expansion;
120
121 good_area:
122 code = SEGV_ACCERR;
123
124 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
125
126# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
127 || (1 << VM_EXEC_BIT) != VM_EXEC)
128# error File is out of sync with <linux/mm.h>. Please update.
129# endif
130
131 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
132 goto bad_area;
133
134 if ((vma->vm_flags & mask) != mask)
135 goto bad_area;
136
137 /*
138 * If for any reason at all we couldn't handle the fault, make
139 * sure we exit gracefully rather than endlessly redo the
140 * fault.
141 */
142 fault = handle_mm_fault(vma, address, flags);
143
144 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
145 return;
146
147 if (unlikely(fault & VM_FAULT_ERROR)) {
148 /*
149 * We ran out of memory, or some other thing happened
150 * to us that made us unable to handle the page fault
151 * gracefully.
152 */
153 if (fault & VM_FAULT_OOM) {
154 goto out_of_memory;
155 } else if (fault & VM_FAULT_SIGSEGV) {
156 goto bad_area;
157 } else if (fault & VM_FAULT_SIGBUS) {
158 signal = SIGBUS;
159 goto bad_area;
160 }
161 BUG();
162 }
163
164 if (flags & FAULT_FLAG_ALLOW_RETRY) {
165 if (fault & VM_FAULT_MAJOR)
166 current->maj_flt++;
167 else
168 current->min_flt++;
169 if (fault & VM_FAULT_RETRY) {
170 flags &= ~FAULT_FLAG_ALLOW_RETRY;
171 flags |= FAULT_FLAG_TRIED;
172
173 /* No need to up_read(&mm->mmap_sem) as we would
174 * have already released it in __lock_page_or_retry
175 * in mm/filemap.c.
176 */
177
178 goto retry;
179 }
180 }
181
182 up_read(&mm->mmap_sem);
183 return;
184
185 check_expansion:
186 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
187 if (!vma)
188 goto bad_area;
189 if (!(vma->vm_flags & VM_GROWSDOWN))
190 goto bad_area;
191 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
192 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
193 goto bad_area;
194 if (expand_stack(vma, address))
195 goto bad_area;
196 } else {
197 vma = prev_vma;
198 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
199 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
200 goto bad_area;
201 /*
202 * Since the register backing store is accessed sequentially,
203 * we disallow growing it by more than a page at a time.
204 */
205 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
206 goto bad_area;
207 if (expand_upwards(vma, address))
208 goto bad_area;
209 }
210 goto good_area;
211
212 bad_area:
213 up_read(&mm->mmap_sem);
214#ifdef CONFIG_VIRTUAL_MEM_MAP
215 bad_area_no_up:
216#endif
217 if ((isr & IA64_ISR_SP)
218 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
219 {
220 /*
221 * This fault was due to a speculative load or lfetch.fault, set the "ed"
222 * bit in the psr to ensure forward progress. (Target register will get a
223 * NaT for ld.s, lfetch will be canceled.)
224 */
225 ia64_psr(regs)->ed = 1;
226 return;
227 }
228 if (user_mode(regs)) {
229 force_sig_fault(signal, code, (void __user *) address,
230 0, __ISR_VALID, isr);
231 return;
232 }
233
234 no_context:
235 if ((isr & IA64_ISR_SP)
236 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
237 {
238 /*
239 * This fault was due to a speculative load or lfetch.fault, set the "ed"
240 * bit in the psr to ensure forward progress. (Target register will get a
241 * NaT for ld.s, lfetch will be canceled.)
242 */
243 ia64_psr(regs)->ed = 1;
244 return;
245 }
246
247 /*
248 * Since we have no vma's for region 5, we might get here even if the address is
249 * valid, due to the VHPT walker inserting a non present translation that becomes
250 * stale. If that happens, the non present fault handler already purged the stale
251 * translation, which fixed the problem. So, we check to see if the translation is
252 * valid, and return if it is.
253 */
254 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
255 return;
256
257 if (ia64_done_with_exception(regs))
258 return;
259
260 /*
261 * Oops. The kernel tried to access some bad page. We'll have to terminate things
262 * with extreme prejudice.
263 */
264 bust_spinlocks(1);
265
266 if (address < PAGE_SIZE)
267 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
268 else
269 printk(KERN_ALERT "Unable to handle kernel paging request at "
270 "virtual address %016lx\n", address);
271 if (die("Oops", regs, isr))
272 regs = NULL;
273 bust_spinlocks(0);
274 if (regs)
275 do_exit(SIGKILL);
276 return;
277
278 out_of_memory:
279 up_read(&mm->mmap_sem);
280 if (!user_mode(regs))
281 goto no_context;
282 pagefault_out_of_memory();
283}
1/*
2 * MMU fault handling support.
3 *
4 * Copyright (C) 1998-2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/mm.h>
10#include <linux/interrupt.h>
11#include <linux/kprobes.h>
12#include <linux/kdebug.h>
13#include <linux/prefetch.h>
14
15#include <asm/pgtable.h>
16#include <asm/processor.h>
17#include <asm/system.h>
18#include <asm/uaccess.h>
19
20extern int die(char *, struct pt_regs *, long);
21
22#ifdef CONFIG_KPROBES
23static inline int notify_page_fault(struct pt_regs *regs, int trap)
24{
25 int ret = 0;
26
27 if (!user_mode(regs)) {
28 /* kprobe_running() needs smp_processor_id() */
29 preempt_disable();
30 if (kprobe_running() && kprobe_fault_handler(regs, trap))
31 ret = 1;
32 preempt_enable();
33 }
34
35 return ret;
36}
37#else
38static inline int notify_page_fault(struct pt_regs *regs, int trap)
39{
40 return 0;
41}
42#endif
43
44/*
45 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
46 * (inside region 5, on ia64) and that page is present.
47 */
48static int
49mapped_kernel_page_is_present (unsigned long address)
50{
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *ptep, pte;
55
56 pgd = pgd_offset_k(address);
57 if (pgd_none(*pgd) || pgd_bad(*pgd))
58 return 0;
59
60 pud = pud_offset(pgd, address);
61 if (pud_none(*pud) || pud_bad(*pud))
62 return 0;
63
64 pmd = pmd_offset(pud, address);
65 if (pmd_none(*pmd) || pmd_bad(*pmd))
66 return 0;
67
68 ptep = pte_offset_kernel(pmd, address);
69 if (!ptep)
70 return 0;
71
72 pte = *ptep;
73 return pte_present(pte);
74}
75
76void __kprobes
77ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
78{
79 int signal = SIGSEGV, code = SEGV_MAPERR;
80 struct vm_area_struct *vma, *prev_vma;
81 struct mm_struct *mm = current->mm;
82 struct siginfo si;
83 unsigned long mask;
84 int fault;
85
86 /* mmap_sem is performance critical.... */
87 prefetchw(&mm->mmap_sem);
88
89 /*
90 * If we're in an interrupt or have no user context, we must not take the fault..
91 */
92 if (in_atomic() || !mm)
93 goto no_context;
94
95#ifdef CONFIG_VIRTUAL_MEM_MAP
96 /*
97 * If fault is in region 5 and we are in the kernel, we may already
98 * have the mmap_sem (pfn_valid macro is called during mmap). There
99 * is no vma for region 5 addr's anyway, so skip getting the semaphore
100 * and go directly to the exception handling code.
101 */
102
103 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
104 goto bad_area_no_up;
105#endif
106
107 /*
108 * This is to handle the kprobes on user space access instructions
109 */
110 if (notify_page_fault(regs, TRAP_BRKPT))
111 return;
112
113 down_read(&mm->mmap_sem);
114
115 vma = find_vma_prev(mm, address, &prev_vma);
116 if (!vma && !prev_vma )
117 goto bad_area;
118
119 /*
120 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
121 *
122 * May find no vma, but could be that the last vm area is the
123 * register backing store that needs to expand upwards, in
124 * this case vma will be null, but prev_vma will ne non-null
125 */
126 if (( !vma && prev_vma ) || (address < vma->vm_start) )
127 goto check_expansion;
128
129 good_area:
130 code = SEGV_ACCERR;
131
132 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
133
134# define VM_READ_BIT 0
135# define VM_WRITE_BIT 1
136# define VM_EXEC_BIT 2
137
138# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
139 || (1 << VM_EXEC_BIT) != VM_EXEC)
140# error File is out of sync with <linux/mm.h>. Please update.
141# endif
142
143 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
144 goto bad_area;
145
146 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
147 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
148
149 if ((vma->vm_flags & mask) != mask)
150 goto bad_area;
151
152 /*
153 * If for any reason at all we couldn't handle the fault, make
154 * sure we exit gracefully rather than endlessly redo the
155 * fault.
156 */
157 fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
158 if (unlikely(fault & VM_FAULT_ERROR)) {
159 /*
160 * We ran out of memory, or some other thing happened
161 * to us that made us unable to handle the page fault
162 * gracefully.
163 */
164 if (fault & VM_FAULT_OOM) {
165 goto out_of_memory;
166 } else if (fault & VM_FAULT_SIGBUS) {
167 signal = SIGBUS;
168 goto bad_area;
169 }
170 BUG();
171 }
172 if (fault & VM_FAULT_MAJOR)
173 current->maj_flt++;
174 else
175 current->min_flt++;
176 up_read(&mm->mmap_sem);
177 return;
178
179 check_expansion:
180 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
181 if (!vma)
182 goto bad_area;
183 if (!(vma->vm_flags & VM_GROWSDOWN))
184 goto bad_area;
185 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
186 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
187 goto bad_area;
188 if (expand_stack(vma, address))
189 goto bad_area;
190 } else {
191 vma = prev_vma;
192 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
193 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
194 goto bad_area;
195 /*
196 * Since the register backing store is accessed sequentially,
197 * we disallow growing it by more than a page at a time.
198 */
199 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
200 goto bad_area;
201 if (expand_upwards(vma, address))
202 goto bad_area;
203 }
204 goto good_area;
205
206 bad_area:
207 up_read(&mm->mmap_sem);
208#ifdef CONFIG_VIRTUAL_MEM_MAP
209 bad_area_no_up:
210#endif
211 if ((isr & IA64_ISR_SP)
212 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
213 {
214 /*
215 * This fault was due to a speculative load or lfetch.fault, set the "ed"
216 * bit in the psr to ensure forward progress. (Target register will get a
217 * NaT for ld.s, lfetch will be canceled.)
218 */
219 ia64_psr(regs)->ed = 1;
220 return;
221 }
222 if (user_mode(regs)) {
223 si.si_signo = signal;
224 si.si_errno = 0;
225 si.si_code = code;
226 si.si_addr = (void __user *) address;
227 si.si_isr = isr;
228 si.si_flags = __ISR_VALID;
229 force_sig_info(signal, &si, current);
230 return;
231 }
232
233 no_context:
234 if ((isr & IA64_ISR_SP)
235 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
236 {
237 /*
238 * This fault was due to a speculative load or lfetch.fault, set the "ed"
239 * bit in the psr to ensure forward progress. (Target register will get a
240 * NaT for ld.s, lfetch will be canceled.)
241 */
242 ia64_psr(regs)->ed = 1;
243 return;
244 }
245
246 /*
247 * Since we have no vma's for region 5, we might get here even if the address is
248 * valid, due to the VHPT walker inserting a non present translation that becomes
249 * stale. If that happens, the non present fault handler already purged the stale
250 * translation, which fixed the problem. So, we check to see if the translation is
251 * valid, and return if it is.
252 */
253 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
254 return;
255
256 if (ia64_done_with_exception(regs))
257 return;
258
259 /*
260 * Oops. The kernel tried to access some bad page. We'll have to terminate things
261 * with extreme prejudice.
262 */
263 bust_spinlocks(1);
264
265 if (address < PAGE_SIZE)
266 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
267 else
268 printk(KERN_ALERT "Unable to handle kernel paging request at "
269 "virtual address %016lx\n", address);
270 if (die("Oops", regs, isr))
271 regs = NULL;
272 bust_spinlocks(0);
273 if (regs)
274 do_exit(SIGKILL);
275 return;
276
277 out_of_memory:
278 up_read(&mm->mmap_sem);
279 if (!user_mode(regs))
280 goto no_context;
281 pagefault_out_of_memory();
282}