Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 */
8
9
10#include <linux/mm.h>
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/perf_event.h>
14#include <linux/signal.h>
15#include <linux/uaccess.h>
16#include <linux/kprobes.h>
17#include <linux/kfence.h>
18
19#include <asm/ptrace.h>
20#include <asm/tlbflush.h>
21
22#include "../kernel/head.h"
23
24static void die_kernel_fault(const char *msg, unsigned long addr,
25 struct pt_regs *regs)
26{
27 bust_spinlocks(1);
28
29 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
30 addr);
31
32 bust_spinlocks(0);
33 die(regs, "Oops");
34 make_task_dead(SIGKILL);
35}
36
37static inline void no_context(struct pt_regs *regs, unsigned long addr)
38{
39 const char *msg;
40
41 /* Are we prepared to handle this kernel fault? */
42 if (fixup_exception(regs))
43 return;
44
45 /*
46 * Oops. The kernel tried to access some bad page. We'll have to
47 * terminate things with extreme prejudice.
48 */
49 if (addr < PAGE_SIZE)
50 msg = "NULL pointer dereference";
51 else {
52 if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
53 return;
54
55 msg = "paging request";
56 }
57
58 die_kernel_fault(msg, addr, regs);
59}
60
61static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
62{
63 if (fault & VM_FAULT_OOM) {
64 /*
65 * We ran out of memory, call the OOM killer, and return the userspace
66 * (which will retry the fault, or kill us if we got oom-killed).
67 */
68 if (!user_mode(regs)) {
69 no_context(regs, addr);
70 return;
71 }
72 pagefault_out_of_memory();
73 return;
74 } else if (fault & VM_FAULT_SIGBUS) {
75 /* Kernel mode? Handle exceptions or die */
76 if (!user_mode(regs)) {
77 no_context(regs, addr);
78 return;
79 }
80 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
81 return;
82 }
83 BUG();
84}
85
86static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
87{
88 /*
89 * Something tried to access memory that isn't in our memory map.
90 * Fix it, but check if it's kernel or user first.
91 */
92 mmap_read_unlock(mm);
93 /* User mode accesses just cause a SIGSEGV */
94 if (user_mode(regs)) {
95 do_trap(regs, SIGSEGV, code, addr);
96 return;
97 }
98
99 no_context(regs, addr);
100}
101
102static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
103{
104 pgd_t *pgd, *pgd_k;
105 pud_t *pud_k;
106 p4d_t *p4d_k;
107 pmd_t *pmd_k;
108 pte_t *pte_k;
109 int index;
110 unsigned long pfn;
111
112 /* User mode accesses just cause a SIGSEGV */
113 if (user_mode(regs))
114 return do_trap(regs, SIGSEGV, code, addr);
115
116 /*
117 * Synchronize this task's top level page-table
118 * with the 'reference' page table.
119 *
120 * Do _not_ use "tsk->active_mm->pgd" here.
121 * We might be inside an interrupt in the middle
122 * of a task switch.
123 */
124 index = pgd_index(addr);
125 pfn = csr_read(CSR_SATP) & SATP_PPN;
126 pgd = (pgd_t *)pfn_to_virt(pfn) + index;
127 pgd_k = init_mm.pgd + index;
128
129 if (!pgd_present(*pgd_k)) {
130 no_context(regs, addr);
131 return;
132 }
133 set_pgd(pgd, *pgd_k);
134
135 p4d_k = p4d_offset(pgd_k, addr);
136 if (!p4d_present(*p4d_k)) {
137 no_context(regs, addr);
138 return;
139 }
140
141 pud_k = pud_offset(p4d_k, addr);
142 if (!pud_present(*pud_k)) {
143 no_context(regs, addr);
144 return;
145 }
146
147 /*
148 * Since the vmalloc area is global, it is unnecessary
149 * to copy individual PTEs
150 */
151 pmd_k = pmd_offset(pud_k, addr);
152 if (!pmd_present(*pmd_k)) {
153 no_context(regs, addr);
154 return;
155 }
156
157 /*
158 * Make sure the actual PTE exists as well to
159 * catch kernel vmalloc-area accesses to non-mapped
160 * addresses. If we don't do this, this will just
161 * silently loop forever.
162 */
163 pte_k = pte_offset_kernel(pmd_k, addr);
164 if (!pte_present(*pte_k)) {
165 no_context(regs, addr);
166 return;
167 }
168
169 /*
170 * The kernel assumes that TLBs don't cache invalid
171 * entries, but in RISC-V, SFENCE.VMA specifies an
172 * ordering constraint, not a cache flush; it is
173 * necessary even after writing invalid entries.
174 */
175 local_flush_tlb_page(addr);
176}
177
178static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
179{
180 switch (cause) {
181 case EXC_INST_PAGE_FAULT:
182 if (!(vma->vm_flags & VM_EXEC)) {
183 return true;
184 }
185 break;
186 case EXC_LOAD_PAGE_FAULT:
187 /* Write implies read */
188 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
189 return true;
190 }
191 break;
192 case EXC_STORE_PAGE_FAULT:
193 if (!(vma->vm_flags & VM_WRITE)) {
194 return true;
195 }
196 break;
197 default:
198 panic("%s: unhandled cause %lu", __func__, cause);
199 }
200 return false;
201}
202
203/*
204 * This routine handles page faults. It determines the address and the
205 * problem, and then passes it off to one of the appropriate routines.
206 */
207asmlinkage void do_page_fault(struct pt_regs *regs)
208{
209 struct task_struct *tsk;
210 struct vm_area_struct *vma;
211 struct mm_struct *mm;
212 unsigned long addr, cause;
213 unsigned int flags = FAULT_FLAG_DEFAULT;
214 int code = SEGV_MAPERR;
215 vm_fault_t fault;
216
217 cause = regs->cause;
218 addr = regs->badaddr;
219
220 tsk = current;
221 mm = tsk->mm;
222
223 if (kprobe_page_fault(regs, cause))
224 return;
225
226 /*
227 * Fault-in kernel-space virtual memory on-demand.
228 * The 'reference' page table is init_mm.pgd.
229 *
230 * NOTE! We MUST NOT take any locks for this case. We may
231 * be in an interrupt or a critical region, and should
232 * only copy the information from the master page table,
233 * nothing more.
234 */
235 if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) {
236 vmalloc_fault(regs, code, addr);
237 return;
238 }
239
240#ifdef CONFIG_64BIT
241 /*
242 * Modules in 64bit kernels lie in their own virtual region which is not
243 * in the vmalloc region, but dealing with page faults in this region
244 * or the vmalloc region amounts to doing the same thing: checking that
245 * the mapping exists in init_mm.pgd and updating user page table, so
246 * just use vmalloc_fault.
247 */
248 if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
249 vmalloc_fault(regs, code, addr);
250 return;
251 }
252#endif
253 /* Enable interrupts if they were enabled in the parent context. */
254 if (likely(regs->status & SR_PIE))
255 local_irq_enable();
256
257 /*
258 * If we're in an interrupt, have no user context, or are running
259 * in an atomic region, then we must not take the fault.
260 */
261 if (unlikely(faulthandler_disabled() || !mm)) {
262 tsk->thread.bad_cause = cause;
263 no_context(regs, addr);
264 return;
265 }
266
267 if (user_mode(regs))
268 flags |= FAULT_FLAG_USER;
269
270 if (!user_mode(regs) && addr < TASK_SIZE &&
271 unlikely(!(regs->status & SR_SUM)))
272 die_kernel_fault("access to user memory without uaccess routines",
273 addr, regs);
274
275 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
276
277 if (cause == EXC_STORE_PAGE_FAULT)
278 flags |= FAULT_FLAG_WRITE;
279 else if (cause == EXC_INST_PAGE_FAULT)
280 flags |= FAULT_FLAG_INSTRUCTION;
281retry:
282 mmap_read_lock(mm);
283 vma = find_vma(mm, addr);
284 if (unlikely(!vma)) {
285 tsk->thread.bad_cause = cause;
286 bad_area(regs, mm, code, addr);
287 return;
288 }
289 if (likely(vma->vm_start <= addr))
290 goto good_area;
291 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
292 tsk->thread.bad_cause = cause;
293 bad_area(regs, mm, code, addr);
294 return;
295 }
296 if (unlikely(expand_stack(vma, addr))) {
297 tsk->thread.bad_cause = cause;
298 bad_area(regs, mm, code, addr);
299 return;
300 }
301
302 /*
303 * Ok, we have a good vm_area for this memory access, so
304 * we can handle it.
305 */
306good_area:
307 code = SEGV_ACCERR;
308
309 if (unlikely(access_error(cause, vma))) {
310 tsk->thread.bad_cause = cause;
311 bad_area(regs, mm, code, addr);
312 return;
313 }
314
315 /*
316 * If for any reason at all we could not handle the fault,
317 * make sure we exit gracefully rather than endlessly redo
318 * the fault.
319 */
320 fault = handle_mm_fault(vma, addr, flags, regs);
321
322 /*
323 * If we need to retry but a fatal signal is pending, handle the
324 * signal first. We do not need to release the mmap_lock because it
325 * would already be released in __lock_page_or_retry in mm/filemap.c.
326 */
327 if (fault_signal_pending(fault, regs))
328 return;
329
330 /* The fault is fully completed (including releasing mmap lock) */
331 if (fault & VM_FAULT_COMPLETED)
332 return;
333
334 if (unlikely(fault & VM_FAULT_RETRY)) {
335 flags |= FAULT_FLAG_TRIED;
336
337 /*
338 * No need to mmap_read_unlock(mm) as we would
339 * have already released it in __lock_page_or_retry
340 * in mm/filemap.c.
341 */
342 goto retry;
343 }
344
345 mmap_read_unlock(mm);
346
347 if (unlikely(fault & VM_FAULT_ERROR)) {
348 tsk->thread.bad_cause = cause;
349 mm_fault_error(regs, addr, fault);
350 return;
351 }
352 return;
353}
354NOKPROBE_SYMBOL(do_page_fault);
1/*
2 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
3 * Lennox Wu <lennox.wu@sunplusct.com>
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Copyright (C) 2012 Regents of the University of California
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see the file COPYING, or write
19 * to the Free Software Foundation, Inc.,
20 */
21
22
23#include <linux/mm.h>
24#include <linux/kernel.h>
25#include <linux/interrupt.h>
26#include <linux/perf_event.h>
27#include <linux/signal.h>
28#include <linux/uaccess.h>
29
30#include <asm/pgalloc.h>
31#include <asm/ptrace.h>
32
33/*
34 * This routine handles page faults. It determines the address and the
35 * problem, and then passes it off to one of the appropriate routines.
36 */
37asmlinkage void do_page_fault(struct pt_regs *regs)
38{
39 struct task_struct *tsk;
40 struct vm_area_struct *vma;
41 struct mm_struct *mm;
42 unsigned long addr, cause;
43 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
44 int fault, code = SEGV_MAPERR;
45
46 cause = regs->scause;
47 addr = regs->sbadaddr;
48
49 tsk = current;
50 mm = tsk->mm;
51
52 /*
53 * Fault-in kernel-space virtual memory on-demand.
54 * The 'reference' page table is init_mm.pgd.
55 *
56 * NOTE! We MUST NOT take any locks for this case. We may
57 * be in an interrupt or a critical region, and should
58 * only copy the information from the master page table,
59 * nothing more.
60 */
61 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
62 goto vmalloc_fault;
63
64 /* Enable interrupts if they were enabled in the parent context. */
65 if (likely(regs->sstatus & SR_SPIE))
66 local_irq_enable();
67
68 /*
69 * If we're in an interrupt, have no user context, or are running
70 * in an atomic region, then we must not take the fault.
71 */
72 if (unlikely(faulthandler_disabled() || !mm))
73 goto no_context;
74
75 if (user_mode(regs))
76 flags |= FAULT_FLAG_USER;
77
78 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
79
80retry:
81 down_read(&mm->mmap_sem);
82 vma = find_vma(mm, addr);
83 if (unlikely(!vma))
84 goto bad_area;
85 if (likely(vma->vm_start <= addr))
86 goto good_area;
87 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
88 goto bad_area;
89 if (unlikely(expand_stack(vma, addr)))
90 goto bad_area;
91
92 /*
93 * Ok, we have a good vm_area for this memory access, so
94 * we can handle it.
95 */
96good_area:
97 code = SEGV_ACCERR;
98
99 switch (cause) {
100 case EXC_INST_PAGE_FAULT:
101 if (!(vma->vm_flags & VM_EXEC))
102 goto bad_area;
103 break;
104 case EXC_LOAD_PAGE_FAULT:
105 if (!(vma->vm_flags & VM_READ))
106 goto bad_area;
107 break;
108 case EXC_STORE_PAGE_FAULT:
109 if (!(vma->vm_flags & VM_WRITE))
110 goto bad_area;
111 flags |= FAULT_FLAG_WRITE;
112 break;
113 default:
114 panic("%s: unhandled cause %lu", __func__, cause);
115 }
116
117 /*
118 * If for any reason at all we could not handle the fault,
119 * make sure we exit gracefully rather than endlessly redo
120 * the fault.
121 */
122 fault = handle_mm_fault(vma, addr, flags);
123
124 /*
125 * If we need to retry but a fatal signal is pending, handle the
126 * signal first. We do not need to release the mmap_sem because it
127 * would already be released in __lock_page_or_retry in mm/filemap.c.
128 */
129 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
130 return;
131
132 if (unlikely(fault & VM_FAULT_ERROR)) {
133 if (fault & VM_FAULT_OOM)
134 goto out_of_memory;
135 else if (fault & VM_FAULT_SIGBUS)
136 goto do_sigbus;
137 BUG();
138 }
139
140 /*
141 * Major/minor page fault accounting is only done on the
142 * initial attempt. If we go through a retry, it is extremely
143 * likely that the page will be found in page cache at that point.
144 */
145 if (flags & FAULT_FLAG_ALLOW_RETRY) {
146 if (fault & VM_FAULT_MAJOR) {
147 tsk->maj_flt++;
148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
149 1, regs, addr);
150 } else {
151 tsk->min_flt++;
152 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
153 1, regs, addr);
154 }
155 if (fault & VM_FAULT_RETRY) {
156 /*
157 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
158 * of starvation.
159 */
160 flags &= ~(FAULT_FLAG_ALLOW_RETRY);
161 flags |= FAULT_FLAG_TRIED;
162
163 /*
164 * No need to up_read(&mm->mmap_sem) as we would
165 * have already released it in __lock_page_or_retry
166 * in mm/filemap.c.
167 */
168 goto retry;
169 }
170 }
171
172 up_read(&mm->mmap_sem);
173 return;
174
175 /*
176 * Something tried to access memory that isn't in our memory map.
177 * Fix it, but check if it's kernel or user first.
178 */
179bad_area:
180 up_read(&mm->mmap_sem);
181 /* User mode accesses just cause a SIGSEGV */
182 if (user_mode(regs)) {
183 do_trap(regs, SIGSEGV, code, addr, tsk);
184 return;
185 }
186
187no_context:
188 /* Are we prepared to handle this kernel fault? */
189 if (fixup_exception(regs))
190 return;
191
192 /*
193 * Oops. The kernel tried to access some bad page. We'll have to
194 * terminate things with extreme prejudice.
195 */
196 bust_spinlocks(1);
197 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
198 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
199 "paging request", addr);
200 die(regs, "Oops");
201 do_exit(SIGKILL);
202
203 /*
204 * We ran out of memory, call the OOM killer, and return the userspace
205 * (which will retry the fault, or kill us if we got oom-killed).
206 */
207out_of_memory:
208 up_read(&mm->mmap_sem);
209 if (!user_mode(regs))
210 goto no_context;
211 pagefault_out_of_memory();
212 return;
213
214do_sigbus:
215 up_read(&mm->mmap_sem);
216 /* Kernel mode? Handle exceptions or die */
217 if (!user_mode(regs))
218 goto no_context;
219 do_trap(regs, SIGBUS, BUS_ADRERR, addr, tsk);
220 return;
221
222vmalloc_fault:
223 {
224 pgd_t *pgd, *pgd_k;
225 pud_t *pud, *pud_k;
226 p4d_t *p4d, *p4d_k;
227 pmd_t *pmd, *pmd_k;
228 pte_t *pte_k;
229 int index;
230
231 if (user_mode(regs))
232 goto bad_area;
233
234 /*
235 * Synchronize this task's top level page-table
236 * with the 'reference' page table.
237 *
238 * Do _not_ use "tsk->active_mm->pgd" here.
239 * We might be inside an interrupt in the middle
240 * of a task switch.
241 *
242 * Note: Use the old spbtr name instead of using the current
243 * satp name to support binutils 2.29 which doesn't know about
244 * the privileged ISA 1.10 yet.
245 */
246 index = pgd_index(addr);
247 pgd = (pgd_t *)pfn_to_virt(csr_read(sptbr)) + index;
248 pgd_k = init_mm.pgd + index;
249
250 if (!pgd_present(*pgd_k))
251 goto no_context;
252 set_pgd(pgd, *pgd_k);
253
254 p4d = p4d_offset(pgd, addr);
255 p4d_k = p4d_offset(pgd_k, addr);
256 if (!p4d_present(*p4d_k))
257 goto no_context;
258
259 pud = pud_offset(p4d, addr);
260 pud_k = pud_offset(p4d_k, addr);
261 if (!pud_present(*pud_k))
262 goto no_context;
263
264 /*
265 * Since the vmalloc area is global, it is unnecessary
266 * to copy individual PTEs
267 */
268 pmd = pmd_offset(pud, addr);
269 pmd_k = pmd_offset(pud_k, addr);
270 if (!pmd_present(*pmd_k))
271 goto no_context;
272 set_pmd(pmd, *pmd_k);
273
274 /*
275 * Make sure the actual PTE exists as well to
276 * catch kernel vmalloc-area accesses to non-mapped
277 * addresses. If we don't do this, this will just
278 * silently loop forever.
279 */
280 pte_k = pte_offset_kernel(pmd_k, addr);
281 if (!pte_present(*pte_k))
282 goto no_context;
283 return;
284 }
285}