Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/sched/signal.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/extable.h>
12#include <linux/interrupt.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <linux/prefetch.h>
16#include <linux/uaccess.h>
17
18#include <asm/pgtable.h>
19#include <asm/processor.h>
20#include <asm/exception.h>
21
22extern int die(char *, struct pt_regs *, long);
23
24#ifdef CONFIG_KPROBES
25static inline int notify_page_fault(struct pt_regs *regs, int trap)
26{
27 int ret = 0;
28
29 if (!user_mode(regs)) {
30 /* kprobe_running() needs smp_processor_id() */
31 preempt_disable();
32 if (kprobe_running() && kprobe_fault_handler(regs, trap))
33 ret = 1;
34 preempt_enable();
35 }
36
37 return ret;
38}
39#else
40static inline int notify_page_fault(struct pt_regs *regs, int trap)
41{
42 return 0;
43}
44#endif
45
46/*
47 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
48 * (inside region 5, on ia64) and that page is present.
49 */
50static int
51mapped_kernel_page_is_present (unsigned long address)
52{
53 pgd_t *pgd;
54 pud_t *pud;
55 pmd_t *pmd;
56 pte_t *ptep, pte;
57
58 pgd = pgd_offset_k(address);
59 if (pgd_none(*pgd) || pgd_bad(*pgd))
60 return 0;
61
62 pud = pud_offset(pgd, address);
63 if (pud_none(*pud) || pud_bad(*pud))
64 return 0;
65
66 pmd = pmd_offset(pud, address);
67 if (pmd_none(*pmd) || pmd_bad(*pmd))
68 return 0;
69
70 ptep = pte_offset_kernel(pmd, address);
71 if (!ptep)
72 return 0;
73
74 pte = *ptep;
75 return pte_present(pte);
76}
77
78# define VM_READ_BIT 0
79# define VM_WRITE_BIT 1
80# define VM_EXEC_BIT 2
81
82void __kprobes
83ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
84{
85 int signal = SIGSEGV, code = SEGV_MAPERR;
86 struct vm_area_struct *vma, *prev_vma;
87 struct mm_struct *mm = current->mm;
88 struct siginfo si;
89 unsigned long mask;
90 int fault;
91 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
92
93 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
94 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
95
96 /* mmap_sem is performance critical.... */
97 prefetchw(&mm->mmap_sem);
98
99 /*
100 * If we're in an interrupt or have no user context, we must not take the fault..
101 */
102 if (faulthandler_disabled() || !mm)
103 goto no_context;
104
105#ifdef CONFIG_VIRTUAL_MEM_MAP
106 /*
107 * If fault is in region 5 and we are in the kernel, we may already
108 * have the mmap_sem (pfn_valid macro is called during mmap). There
109 * is no vma for region 5 addr's anyway, so skip getting the semaphore
110 * and go directly to the exception handling code.
111 */
112
113 if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
114 goto bad_area_no_up;
115#endif
116
117 /*
118 * This is to handle the kprobes on user space access instructions
119 */
120 if (notify_page_fault(regs, TRAP_BRKPT))
121 return;
122
123 if (user_mode(regs))
124 flags |= FAULT_FLAG_USER;
125 if (mask & VM_WRITE)
126 flags |= FAULT_FLAG_WRITE;
127retry:
128 down_read(&mm->mmap_sem);
129
130 vma = find_vma_prev(mm, address, &prev_vma);
131 if (!vma && !prev_vma )
132 goto bad_area;
133
134 /*
135 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
136 *
137 * May find no vma, but could be that the last vm area is the
138 * register backing store that needs to expand upwards, in
139 * this case vma will be null, but prev_vma will ne non-null
140 */
141 if (( !vma && prev_vma ) || (address < vma->vm_start) )
142 goto check_expansion;
143
144 good_area:
145 code = SEGV_ACCERR;
146
147 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
148
149# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
150 || (1 << VM_EXEC_BIT) != VM_EXEC)
151# error File is out of sync with <linux/mm.h>. Please update.
152# endif
153
154 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
155 goto bad_area;
156
157 if ((vma->vm_flags & mask) != mask)
158 goto bad_area;
159
160 /*
161 * If for any reason at all we couldn't handle the fault, make
162 * sure we exit gracefully rather than endlessly redo the
163 * fault.
164 */
165 fault = handle_mm_fault(vma, address, flags);
166
167 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
168 return;
169
170 if (unlikely(fault & VM_FAULT_ERROR)) {
171 /*
172 * We ran out of memory, or some other thing happened
173 * to us that made us unable to handle the page fault
174 * gracefully.
175 */
176 if (fault & VM_FAULT_OOM) {
177 goto out_of_memory;
178 } else if (fault & VM_FAULT_SIGSEGV) {
179 goto bad_area;
180 } else if (fault & VM_FAULT_SIGBUS) {
181 signal = SIGBUS;
182 goto bad_area;
183 }
184 BUG();
185 }
186
187 if (flags & FAULT_FLAG_ALLOW_RETRY) {
188 if (fault & VM_FAULT_MAJOR)
189 current->maj_flt++;
190 else
191 current->min_flt++;
192 if (fault & VM_FAULT_RETRY) {
193 flags &= ~FAULT_FLAG_ALLOW_RETRY;
194 flags |= FAULT_FLAG_TRIED;
195
196 /* No need to up_read(&mm->mmap_sem) as we would
197 * have already released it in __lock_page_or_retry
198 * in mm/filemap.c.
199 */
200
201 goto retry;
202 }
203 }
204
205 up_read(&mm->mmap_sem);
206 return;
207
208 check_expansion:
209 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
210 if (!vma)
211 goto bad_area;
212 if (!(vma->vm_flags & VM_GROWSDOWN))
213 goto bad_area;
214 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
215 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
216 goto bad_area;
217 if (expand_stack(vma, address))
218 goto bad_area;
219 } else {
220 vma = prev_vma;
221 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
222 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
223 goto bad_area;
224 /*
225 * Since the register backing store is accessed sequentially,
226 * we disallow growing it by more than a page at a time.
227 */
228 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
229 goto bad_area;
230 if (expand_upwards(vma, address))
231 goto bad_area;
232 }
233 goto good_area;
234
235 bad_area:
236 up_read(&mm->mmap_sem);
237#ifdef CONFIG_VIRTUAL_MEM_MAP
238 bad_area_no_up:
239#endif
240 if ((isr & IA64_ISR_SP)
241 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
242 {
243 /*
244 * This fault was due to a speculative load or lfetch.fault, set the "ed"
245 * bit in the psr to ensure forward progress. (Target register will get a
246 * NaT for ld.s, lfetch will be canceled.)
247 */
248 ia64_psr(regs)->ed = 1;
249 return;
250 }
251 if (user_mode(regs)) {
252 si.si_signo = signal;
253 si.si_errno = 0;
254 si.si_code = code;
255 si.si_addr = (void __user *) address;
256 si.si_isr = isr;
257 si.si_flags = __ISR_VALID;
258 force_sig_info(signal, &si, current);
259 return;
260 }
261
262 no_context:
263 if ((isr & IA64_ISR_SP)
264 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
265 {
266 /*
267 * This fault was due to a speculative load or lfetch.fault, set the "ed"
268 * bit in the psr to ensure forward progress. (Target register will get a
269 * NaT for ld.s, lfetch will be canceled.)
270 */
271 ia64_psr(regs)->ed = 1;
272 return;
273 }
274
275 /*
276 * Since we have no vma's for region 5, we might get here even if the address is
277 * valid, due to the VHPT walker inserting a non present translation that becomes
278 * stale. If that happens, the non present fault handler already purged the stale
279 * translation, which fixed the problem. So, we check to see if the translation is
280 * valid, and return if it is.
281 */
282 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
283 return;
284
285 if (ia64_done_with_exception(regs))
286 return;
287
288 /*
289 * Oops. The kernel tried to access some bad page. We'll have to terminate things
290 * with extreme prejudice.
291 */
292 bust_spinlocks(1);
293
294 if (address < PAGE_SIZE)
295 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
296 else
297 printk(KERN_ALERT "Unable to handle kernel paging request at "
298 "virtual address %016lx\n", address);
299 if (die("Oops", regs, isr))
300 regs = NULL;
301 bust_spinlocks(0);
302 if (regs)
303 do_exit(SIGKILL);
304 return;
305
306 out_of_memory:
307 up_read(&mm->mmap_sem);
308 if (!user_mode(regs))
309 goto no_context;
310 pagefault_out_of_memory();
311}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * MMU fault handling support.
4 *
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/sched/signal.h>
9#include <linux/kernel.h>
10#include <linux/mm.h>
11#include <linux/extable.h>
12#include <linux/interrupt.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <linux/prefetch.h>
16#include <linux/uaccess.h>
17#include <linux/perf_event.h>
18
19#include <asm/processor.h>
20#include <asm/exception.h>
21
22extern int die(char *, struct pt_regs *, long);
23
24/*
25 * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
26 * (inside region 5, on ia64) and that page is present.
27 */
28static int
29mapped_kernel_page_is_present (unsigned long address)
30{
31 pgd_t *pgd;
32 p4d_t *p4d;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *ptep, pte;
36
37 pgd = pgd_offset_k(address);
38 if (pgd_none(*pgd) || pgd_bad(*pgd))
39 return 0;
40
41 p4d = p4d_offset(pgd, address);
42 if (p4d_none(*p4d) || p4d_bad(*p4d))
43 return 0;
44
45 pud = pud_offset(p4d, address);
46 if (pud_none(*pud) || pud_bad(*pud))
47 return 0;
48
49 pmd = pmd_offset(pud, address);
50 if (pmd_none(*pmd) || pmd_bad(*pmd))
51 return 0;
52
53 ptep = pte_offset_kernel(pmd, address);
54 if (!ptep)
55 return 0;
56
57 pte = *ptep;
58 return pte_present(pte);
59}
60
61# define VM_READ_BIT 0
62# define VM_WRITE_BIT 1
63# define VM_EXEC_BIT 2
64
65void __kprobes
66ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
67{
68 int signal = SIGSEGV, code = SEGV_MAPERR;
69 struct vm_area_struct *vma, *prev_vma;
70 struct mm_struct *mm = current->mm;
71 unsigned long mask;
72 vm_fault_t fault;
73 unsigned int flags = FAULT_FLAG_DEFAULT;
74
75 mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
76 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
77
78 /* mmap_lock is performance critical.... */
79 prefetchw(&mm->mmap_lock);
80
81 /*
82 * If we're in an interrupt or have no user context, we must not take the fault..
83 */
84 if (faulthandler_disabled() || !mm)
85 goto no_context;
86
87 /*
88 * This is to handle the kprobes on user space access instructions
89 */
90 if (kprobe_page_fault(regs, TRAP_BRKPT))
91 return;
92
93 if (user_mode(regs))
94 flags |= FAULT_FLAG_USER;
95 if (mask & VM_WRITE)
96 flags |= FAULT_FLAG_WRITE;
97
98 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
99retry:
100 mmap_read_lock(mm);
101
102 vma = find_vma_prev(mm, address, &prev_vma);
103 if (!vma && !prev_vma )
104 goto bad_area;
105
106 /*
107 * find_vma_prev() returns vma such that address < vma->vm_end or NULL
108 *
109 * May find no vma, but could be that the last vm area is the
110 * register backing store that needs to expand upwards, in
111 * this case vma will be null, but prev_vma will ne non-null
112 */
113 if (( !vma && prev_vma ) || (address < vma->vm_start) )
114 goto check_expansion;
115
116 good_area:
117 code = SEGV_ACCERR;
118
119 /* OK, we've got a good vm_area for this memory area. Check the access permissions: */
120
121# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
122 || (1 << VM_EXEC_BIT) != VM_EXEC)
123# error File is out of sync with <linux/mm.h>. Please update.
124# endif
125
126 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
127 goto bad_area;
128
129 if ((vma->vm_flags & mask) != mask)
130 goto bad_area;
131
132 /*
133 * If for any reason at all we couldn't handle the fault, make
134 * sure we exit gracefully rather than endlessly redo the
135 * fault.
136 */
137 fault = handle_mm_fault(vma, address, flags, regs);
138
139 if (fault_signal_pending(fault, regs))
140 return;
141
142 /* The fault is fully completed (including releasing mmap lock) */
143 if (fault & VM_FAULT_COMPLETED)
144 return;
145
146 if (unlikely(fault & VM_FAULT_ERROR)) {
147 /*
148 * We ran out of memory, or some other thing happened
149 * to us that made us unable to handle the page fault
150 * gracefully.
151 */
152 if (fault & VM_FAULT_OOM) {
153 goto out_of_memory;
154 } else if (fault & VM_FAULT_SIGSEGV) {
155 goto bad_area;
156 } else if (fault & VM_FAULT_SIGBUS) {
157 signal = SIGBUS;
158 goto bad_area;
159 }
160 BUG();
161 }
162
163 if (fault & VM_FAULT_RETRY) {
164 flags |= FAULT_FLAG_TRIED;
165
166 /* No need to mmap_read_unlock(mm) as we would
167 * have already released it in __lock_page_or_retry
168 * in mm/filemap.c.
169 */
170
171 goto retry;
172 }
173
174 mmap_read_unlock(mm);
175 return;
176
177 check_expansion:
178 if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
179 if (!vma)
180 goto bad_area;
181 if (!(vma->vm_flags & VM_GROWSDOWN))
182 goto bad_area;
183 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
184 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
185 goto bad_area;
186 if (expand_stack(vma, address))
187 goto bad_area;
188 } else {
189 vma = prev_vma;
190 if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
191 || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
192 goto bad_area;
193 /*
194 * Since the register backing store is accessed sequentially,
195 * we disallow growing it by more than a page at a time.
196 */
197 if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
198 goto bad_area;
199 if (expand_upwards(vma, address))
200 goto bad_area;
201 }
202 goto good_area;
203
204 bad_area:
205 mmap_read_unlock(mm);
206 if ((isr & IA64_ISR_SP)
207 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
208 {
209 /*
210 * This fault was due to a speculative load or lfetch.fault, set the "ed"
211 * bit in the psr to ensure forward progress. (Target register will get a
212 * NaT for ld.s, lfetch will be canceled.)
213 */
214 ia64_psr(regs)->ed = 1;
215 return;
216 }
217 if (user_mode(regs)) {
218 force_sig_fault(signal, code, (void __user *) address,
219 0, __ISR_VALID, isr);
220 return;
221 }
222
223 no_context:
224 if ((isr & IA64_ISR_SP)
225 || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
226 {
227 /*
228 * This fault was due to a speculative load or lfetch.fault, set the "ed"
229 * bit in the psr to ensure forward progress. (Target register will get a
230 * NaT for ld.s, lfetch will be canceled.)
231 */
232 ia64_psr(regs)->ed = 1;
233 return;
234 }
235
236 /*
237 * Since we have no vma's for region 5, we might get here even if the address is
238 * valid, due to the VHPT walker inserting a non present translation that becomes
239 * stale. If that happens, the non present fault handler already purged the stale
240 * translation, which fixed the problem. So, we check to see if the translation is
241 * valid, and return if it is.
242 */
243 if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
244 return;
245
246 if (ia64_done_with_exception(regs))
247 return;
248
249 /*
250 * Oops. The kernel tried to access some bad page. We'll have to terminate things
251 * with extreme prejudice.
252 */
253 bust_spinlocks(1);
254
255 if (address < PAGE_SIZE)
256 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
257 else
258 printk(KERN_ALERT "Unable to handle kernel paging request at "
259 "virtual address %016lx\n", address);
260 if (die("Oops", regs, isr))
261 regs = NULL;
262 bust_spinlocks(0);
263 if (regs)
264 make_task_dead(SIGKILL);
265 return;
266
267 out_of_memory:
268 mmap_read_unlock(mm);
269 if (!user_mode(regs))
270 goto no_context;
271 pagefault_out_of_memory();
272}