Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 */
8
9
10#include <linux/mm.h>
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/perf_event.h>
14#include <linux/signal.h>
15#include <linux/uaccess.h>
16
17#include <asm/ptrace.h>
18#include <asm/tlbflush.h>
19
20#include "../kernel/head.h"
21
22/*
23 * This routine handles page faults. It determines the address and the
24 * problem, and then passes it off to one of the appropriate routines.
25 */
26asmlinkage void do_page_fault(struct pt_regs *regs)
27{
28 struct task_struct *tsk;
29 struct vm_area_struct *vma;
30 struct mm_struct *mm;
31 unsigned long addr, cause;
32 unsigned int flags = FAULT_FLAG_DEFAULT;
33 int code = SEGV_MAPERR;
34 vm_fault_t fault;
35
36 cause = regs->cause;
37 addr = regs->badaddr;
38
39 tsk = current;
40 mm = tsk->mm;
41
42 /*
43 * Fault-in kernel-space virtual memory on-demand.
44 * The 'reference' page table is init_mm.pgd.
45 *
46 * NOTE! We MUST NOT take any locks for this case. We may
47 * be in an interrupt or a critical region, and should
48 * only copy the information from the master page table,
49 * nothing more.
50 */
51 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
52 goto vmalloc_fault;
53
54 /* Enable interrupts if they were enabled in the parent context. */
55 if (likely(regs->status & SR_PIE))
56 local_irq_enable();
57
58 /*
59 * If we're in an interrupt, have no user context, or are running
60 * in an atomic region, then we must not take the fault.
61 */
62 if (unlikely(faulthandler_disabled() || !mm))
63 goto no_context;
64
65 if (user_mode(regs))
66 flags |= FAULT_FLAG_USER;
67
68 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
69
70retry:
71 mmap_read_lock(mm);
72 vma = find_vma(mm, addr);
73 if (unlikely(!vma))
74 goto bad_area;
75 if (likely(vma->vm_start <= addr))
76 goto good_area;
77 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
78 goto bad_area;
79 if (unlikely(expand_stack(vma, addr)))
80 goto bad_area;
81
82 /*
83 * Ok, we have a good vm_area for this memory access, so
84 * we can handle it.
85 */
86good_area:
87 code = SEGV_ACCERR;
88
89 switch (cause) {
90 case EXC_INST_PAGE_FAULT:
91 if (!(vma->vm_flags & VM_EXEC))
92 goto bad_area;
93 break;
94 case EXC_LOAD_PAGE_FAULT:
95 if (!(vma->vm_flags & VM_READ))
96 goto bad_area;
97 break;
98 case EXC_STORE_PAGE_FAULT:
99 if (!(vma->vm_flags & VM_WRITE))
100 goto bad_area;
101 flags |= FAULT_FLAG_WRITE;
102 break;
103 default:
104 panic("%s: unhandled cause %lu", __func__, cause);
105 }
106
107 /*
108 * If for any reason at all we could not handle the fault,
109 * make sure we exit gracefully rather than endlessly redo
110 * the fault.
111 */
112 fault = handle_mm_fault(vma, addr, flags, regs);
113
114 /*
115 * If we need to retry but a fatal signal is pending, handle the
116 * signal first. We do not need to release the mmap_lock because it
117 * would already be released in __lock_page_or_retry in mm/filemap.c.
118 */
119 if (fault_signal_pending(fault, regs))
120 return;
121
122 if (unlikely(fault & VM_FAULT_ERROR)) {
123 if (fault & VM_FAULT_OOM)
124 goto out_of_memory;
125 else if (fault & VM_FAULT_SIGBUS)
126 goto do_sigbus;
127 BUG();
128 }
129
130 if (flags & FAULT_FLAG_ALLOW_RETRY) {
131 if (fault & VM_FAULT_RETRY) {
132 flags |= FAULT_FLAG_TRIED;
133
134 /*
135 * No need to mmap_read_unlock(mm) as we would
136 * have already released it in __lock_page_or_retry
137 * in mm/filemap.c.
138 */
139 goto retry;
140 }
141 }
142
143 mmap_read_unlock(mm);
144 return;
145
146 /*
147 * Something tried to access memory that isn't in our memory map.
148 * Fix it, but check if it's kernel or user first.
149 */
150bad_area:
151 mmap_read_unlock(mm);
152 /* User mode accesses just cause a SIGSEGV */
153 if (user_mode(regs)) {
154 do_trap(regs, SIGSEGV, code, addr);
155 return;
156 }
157
158no_context:
159 /* Are we prepared to handle this kernel fault? */
160 if (fixup_exception(regs))
161 return;
162
163 /*
164 * Oops. The kernel tried to access some bad page. We'll have to
165 * terminate things with extreme prejudice.
166 */
167 bust_spinlocks(1);
168 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
169 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
170 "paging request", addr);
171 die(regs, "Oops");
172 do_exit(SIGKILL);
173
174 /*
175 * We ran out of memory, call the OOM killer, and return the userspace
176 * (which will retry the fault, or kill us if we got oom-killed).
177 */
178out_of_memory:
179 mmap_read_unlock(mm);
180 if (!user_mode(regs))
181 goto no_context;
182 pagefault_out_of_memory();
183 return;
184
185do_sigbus:
186 mmap_read_unlock(mm);
187 /* Kernel mode? Handle exceptions or die */
188 if (!user_mode(regs))
189 goto no_context;
190 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
191 return;
192
193vmalloc_fault:
194 {
195 pgd_t *pgd, *pgd_k;
196 pud_t *pud, *pud_k;
197 p4d_t *p4d, *p4d_k;
198 pmd_t *pmd, *pmd_k;
199 pte_t *pte_k;
200 int index;
201
202 /* User mode accesses just cause a SIGSEGV */
203 if (user_mode(regs))
204 return do_trap(regs, SIGSEGV, code, addr);
205
206 /*
207 * Synchronize this task's top level page-table
208 * with the 'reference' page table.
209 *
210 * Do _not_ use "tsk->active_mm->pgd" here.
211 * We might be inside an interrupt in the middle
212 * of a task switch.
213 */
214 index = pgd_index(addr);
215 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
216 pgd_k = init_mm.pgd + index;
217
218 if (!pgd_present(*pgd_k))
219 goto no_context;
220 set_pgd(pgd, *pgd_k);
221
222 p4d = p4d_offset(pgd, addr);
223 p4d_k = p4d_offset(pgd_k, addr);
224 if (!p4d_present(*p4d_k))
225 goto no_context;
226
227 pud = pud_offset(p4d, addr);
228 pud_k = pud_offset(p4d_k, addr);
229 if (!pud_present(*pud_k))
230 goto no_context;
231
232 /*
233 * Since the vmalloc area is global, it is unnecessary
234 * to copy individual PTEs
235 */
236 pmd = pmd_offset(pud, addr);
237 pmd_k = pmd_offset(pud_k, addr);
238 if (!pmd_present(*pmd_k))
239 goto no_context;
240 set_pmd(pmd, *pmd_k);
241
242 /*
243 * Make sure the actual PTE exists as well to
244 * catch kernel vmalloc-area accesses to non-mapped
245 * addresses. If we don't do this, this will just
246 * silently loop forever.
247 */
248 pte_k = pte_offset_kernel(pmd_k, addr);
249 if (!pte_present(*pte_k))
250 goto no_context;
251
252 /*
253 * The kernel assumes that TLBs don't cache invalid
254 * entries, but in RISC-V, SFENCE.VMA specifies an
255 * ordering constraint, not a cache flush; it is
256 * necessary even after writing invalid entries.
257 */
258 local_flush_tlb_page(addr);
259
260 return;
261 }
262}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 */
8
9
10#include <linux/mm.h>
11#include <linux/kernel.h>
12#include <linux/interrupt.h>
13#include <linux/perf_event.h>
14#include <linux/signal.h>
15#include <linux/uaccess.h>
16
17#include <asm/pgalloc.h>
18#include <asm/ptrace.h>
19#include <asm/tlbflush.h>
20
21#include "../kernel/head.h"
22
23/*
24 * This routine handles page faults. It determines the address and the
25 * problem, and then passes it off to one of the appropriate routines.
26 */
27asmlinkage void do_page_fault(struct pt_regs *regs)
28{
29 struct task_struct *tsk;
30 struct vm_area_struct *vma;
31 struct mm_struct *mm;
32 unsigned long addr, cause;
33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
34 int code = SEGV_MAPERR;
35 vm_fault_t fault;
36
37 cause = regs->scause;
38 addr = regs->sbadaddr;
39
40 tsk = current;
41 mm = tsk->mm;
42
43 /*
44 * Fault-in kernel-space virtual memory on-demand.
45 * The 'reference' page table is init_mm.pgd.
46 *
47 * NOTE! We MUST NOT take any locks for this case. We may
48 * be in an interrupt or a critical region, and should
49 * only copy the information from the master page table,
50 * nothing more.
51 */
52 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
53 goto vmalloc_fault;
54
55 /* Enable interrupts if they were enabled in the parent context. */
56 if (likely(regs->sstatus & SR_SPIE))
57 local_irq_enable();
58
59 /*
60 * If we're in an interrupt, have no user context, or are running
61 * in an atomic region, then we must not take the fault.
62 */
63 if (unlikely(faulthandler_disabled() || !mm))
64 goto no_context;
65
66 if (user_mode(regs))
67 flags |= FAULT_FLAG_USER;
68
69 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
70
71retry:
72 down_read(&mm->mmap_sem);
73 vma = find_vma(mm, addr);
74 if (unlikely(!vma))
75 goto bad_area;
76 if (likely(vma->vm_start <= addr))
77 goto good_area;
78 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
79 goto bad_area;
80 if (unlikely(expand_stack(vma, addr)))
81 goto bad_area;
82
83 /*
84 * Ok, we have a good vm_area for this memory access, so
85 * we can handle it.
86 */
87good_area:
88 code = SEGV_ACCERR;
89
90 switch (cause) {
91 case EXC_INST_PAGE_FAULT:
92 if (!(vma->vm_flags & VM_EXEC))
93 goto bad_area;
94 break;
95 case EXC_LOAD_PAGE_FAULT:
96 if (!(vma->vm_flags & VM_READ))
97 goto bad_area;
98 break;
99 case EXC_STORE_PAGE_FAULT:
100 if (!(vma->vm_flags & VM_WRITE))
101 goto bad_area;
102 flags |= FAULT_FLAG_WRITE;
103 break;
104 default:
105 panic("%s: unhandled cause %lu", __func__, cause);
106 }
107
108 /*
109 * If for any reason at all we could not handle the fault,
110 * make sure we exit gracefully rather than endlessly redo
111 * the fault.
112 */
113 fault = handle_mm_fault(vma, addr, flags);
114
115 /*
116 * If we need to retry but a fatal signal is pending, handle the
117 * signal first. We do not need to release the mmap_sem because it
118 * would already be released in __lock_page_or_retry in mm/filemap.c.
119 */
120 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk))
121 return;
122
123 if (unlikely(fault & VM_FAULT_ERROR)) {
124 if (fault & VM_FAULT_OOM)
125 goto out_of_memory;
126 else if (fault & VM_FAULT_SIGBUS)
127 goto do_sigbus;
128 BUG();
129 }
130
131 /*
132 * Major/minor page fault accounting is only done on the
133 * initial attempt. If we go through a retry, it is extremely
134 * likely that the page will be found in page cache at that point.
135 */
136 if (flags & FAULT_FLAG_ALLOW_RETRY) {
137 if (fault & VM_FAULT_MAJOR) {
138 tsk->maj_flt++;
139 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
140 1, regs, addr);
141 } else {
142 tsk->min_flt++;
143 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
144 1, regs, addr);
145 }
146 if (fault & VM_FAULT_RETRY) {
147 /*
148 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
149 * of starvation.
150 */
151 flags &= ~(FAULT_FLAG_ALLOW_RETRY);
152 flags |= FAULT_FLAG_TRIED;
153
154 /*
155 * No need to up_read(&mm->mmap_sem) as we would
156 * have already released it in __lock_page_or_retry
157 * in mm/filemap.c.
158 */
159 goto retry;
160 }
161 }
162
163 up_read(&mm->mmap_sem);
164 return;
165
166 /*
167 * Something tried to access memory that isn't in our memory map.
168 * Fix it, but check if it's kernel or user first.
169 */
170bad_area:
171 up_read(&mm->mmap_sem);
172 /* User mode accesses just cause a SIGSEGV */
173 if (user_mode(regs)) {
174 do_trap(regs, SIGSEGV, code, addr);
175 return;
176 }
177
178no_context:
179 /* Are we prepared to handle this kernel fault? */
180 if (fixup_exception(regs))
181 return;
182
183 /*
184 * Oops. The kernel tried to access some bad page. We'll have to
185 * terminate things with extreme prejudice.
186 */
187 bust_spinlocks(1);
188 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
189 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
190 "paging request", addr);
191 die(regs, "Oops");
192 do_exit(SIGKILL);
193
194 /*
195 * We ran out of memory, call the OOM killer, and return the userspace
196 * (which will retry the fault, or kill us if we got oom-killed).
197 */
198out_of_memory:
199 up_read(&mm->mmap_sem);
200 if (!user_mode(regs))
201 goto no_context;
202 pagefault_out_of_memory();
203 return;
204
205do_sigbus:
206 up_read(&mm->mmap_sem);
207 /* Kernel mode? Handle exceptions or die */
208 if (!user_mode(regs))
209 goto no_context;
210 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
211 return;
212
213vmalloc_fault:
214 {
215 pgd_t *pgd, *pgd_k;
216 pud_t *pud, *pud_k;
217 p4d_t *p4d, *p4d_k;
218 pmd_t *pmd, *pmd_k;
219 pte_t *pte_k;
220 int index;
221
222 /* User mode accesses just cause a SIGSEGV */
223 if (user_mode(regs))
224 return do_trap(regs, SIGSEGV, code, addr);
225
226 /*
227 * Synchronize this task's top level page-table
228 * with the 'reference' page table.
229 *
230 * Do _not_ use "tsk->active_mm->pgd" here.
231 * We might be inside an interrupt in the middle
232 * of a task switch.
233 */
234 index = pgd_index(addr);
235 pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
236 pgd_k = init_mm.pgd + index;
237
238 if (!pgd_present(*pgd_k))
239 goto no_context;
240 set_pgd(pgd, *pgd_k);
241
242 p4d = p4d_offset(pgd, addr);
243 p4d_k = p4d_offset(pgd_k, addr);
244 if (!p4d_present(*p4d_k))
245 goto no_context;
246
247 pud = pud_offset(p4d, addr);
248 pud_k = pud_offset(p4d_k, addr);
249 if (!pud_present(*pud_k))
250 goto no_context;
251
252 /*
253 * Since the vmalloc area is global, it is unnecessary
254 * to copy individual PTEs
255 */
256 pmd = pmd_offset(pud, addr);
257 pmd_k = pmd_offset(pud_k, addr);
258 if (!pmd_present(*pmd_k))
259 goto no_context;
260 set_pmd(pmd, *pmd_k);
261
262 /*
263 * Make sure the actual PTE exists as well to
264 * catch kernel vmalloc-area accesses to non-mapped
265 * addresses. If we don't do this, this will just
266 * silently loop forever.
267 */
268 pte_k = pte_offset_kernel(pmd_k, addr);
269 if (!pte_present(*pte_k))
270 goto no_context;
271
272 /*
273 * The kernel assumes that TLBs don't cache invalid
274 * entries, but in RISC-V, SFENCE.VMA specifies an
275 * ordering constraint, not a cache flush; it is
276 * necessary even after writing invalid entries.
277 */
278 local_flush_tlb_page(addr);
279
280 return;
281 }
282}