Loading...
1/* Page Fault Handling for ARC (TLB Miss / ProtV)
2 *
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/signal.h>
11#include <linux/interrupt.h>
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/ptrace.h>
15#include <linux/uaccess.h>
16#include <linux/kdebug.h>
17#include <linux/perf_event.h>
18#include <asm/pgalloc.h>
19#include <asm/mmu.h>
20
21/*
22 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
23 * Refer to asm/processor.h for System Memory Map
24 *
25 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
26 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
27 */
28noinline static int handle_kernel_vaddr_fault(unsigned long address)
29{
30 /*
31 * Synchronize this task's top level page-table
32 * with the 'reference' page table.
33 */
34 pgd_t *pgd, *pgd_k;
35 pud_t *pud, *pud_k;
36 pmd_t *pmd, *pmd_k;
37
38 pgd = pgd_offset_fast(current->active_mm, address);
39 pgd_k = pgd_offset_k(address);
40
41 if (!pgd_present(*pgd_k))
42 goto bad_area;
43
44 pud = pud_offset(pgd, address);
45 pud_k = pud_offset(pgd_k, address);
46 if (!pud_present(*pud_k))
47 goto bad_area;
48
49 pmd = pmd_offset(pud, address);
50 pmd_k = pmd_offset(pud_k, address);
51 if (!pmd_present(*pmd_k))
52 goto bad_area;
53
54 set_pmd(pmd, *pmd_k);
55
56 /* XXX: create the TLB entry here */
57 return 0;
58
59bad_area:
60 return 1;
61}
62
63void do_page_fault(unsigned long address, struct pt_regs *regs)
64{
65 struct vm_area_struct *vma = NULL;
66 struct task_struct *tsk = current;
67 struct mm_struct *mm = tsk->mm;
68 siginfo_t info;
69 int fault, ret;
70 int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
71 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
72
73 /*
74 * We fault-in kernel-space virtual memory on-demand. The
75 * 'reference' page table is init_mm.pgd.
76 *
77 * NOTE! We MUST NOT take any locks for this case. We may
78 * be in an interrupt or a critical region, and should
79 * only copy the information from the master page table,
80 * nothing more.
81 */
82 if (address >= VMALLOC_START) {
83 ret = handle_kernel_vaddr_fault(address);
84 if (unlikely(ret))
85 goto bad_area_nosemaphore;
86 else
87 return;
88 }
89
90 info.si_code = SEGV_MAPERR;
91
92 /*
93 * If we're in an interrupt or have no user
94 * context, we must not take the fault..
95 */
96 if (faulthandler_disabled() || !mm)
97 goto no_context;
98
99 if (user_mode(regs))
100 flags |= FAULT_FLAG_USER;
101retry:
102 down_read(&mm->mmap_sem);
103 vma = find_vma(mm, address);
104 if (!vma)
105 goto bad_area;
106 if (vma->vm_start <= address)
107 goto good_area;
108 if (!(vma->vm_flags & VM_GROWSDOWN))
109 goto bad_area;
110 if (expand_stack(vma, address))
111 goto bad_area;
112
113 /*
114 * Ok, we have a good vm_area for this memory access, so
115 * we can handle it..
116 */
117good_area:
118 info.si_code = SEGV_ACCERR;
119
120 /* Handle protection violation, execute on heap or stack */
121
122 if ((regs->ecr_vec == ECR_V_PROTV) &&
123 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
124 goto bad_area;
125
126 if (write) {
127 if (!(vma->vm_flags & VM_WRITE))
128 goto bad_area;
129 flags |= FAULT_FLAG_WRITE;
130 } else {
131 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
132 goto bad_area;
133 }
134
135 /*
136 * If for any reason at all we couldn't handle the fault,
137 * make sure we exit gracefully rather than endlessly redo
138 * the fault.
139 */
140 fault = handle_mm_fault(vma, address, flags);
141
142 /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
143 if (unlikely(fatal_signal_pending(current))) {
144 if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
145 up_read(&mm->mmap_sem);
146 if (user_mode(regs))
147 return;
148 }
149
150 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
151
152 if (likely(!(fault & VM_FAULT_ERROR))) {
153 if (flags & FAULT_FLAG_ALLOW_RETRY) {
154 /* To avoid updating stats twice for retry case */
155 if (fault & VM_FAULT_MAJOR) {
156 tsk->maj_flt++;
157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
158 regs, address);
159 } else {
160 tsk->min_flt++;
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
162 regs, address);
163 }
164
165 if (fault & VM_FAULT_RETRY) {
166 flags &= ~FAULT_FLAG_ALLOW_RETRY;
167 flags |= FAULT_FLAG_TRIED;
168 goto retry;
169 }
170 }
171
172 /* Fault Handled Gracefully */
173 up_read(&mm->mmap_sem);
174 return;
175 }
176
177 if (fault & VM_FAULT_OOM)
178 goto out_of_memory;
179 else if (fault & VM_FAULT_SIGSEGV)
180 goto bad_area;
181 else if (fault & VM_FAULT_SIGBUS)
182 goto do_sigbus;
183
184 /* no man's land */
185 BUG();
186
187 /*
188 * Something tried to access memory that isn't in our memory map..
189 * Fix it, but check if it's kernel or user first..
190 */
191bad_area:
192 up_read(&mm->mmap_sem);
193
194bad_area_nosemaphore:
195 /* User mode accesses just cause a SIGSEGV */
196 if (user_mode(regs)) {
197 tsk->thread.fault_address = address;
198 info.si_signo = SIGSEGV;
199 info.si_errno = 0;
200 /* info.si_code has been set above */
201 info.si_addr = (void __user *)address;
202 force_sig_info(SIGSEGV, &info, tsk);
203 return;
204 }
205
206no_context:
207 /* Are we prepared to handle this kernel fault?
208 *
209 * (The kernel has valid exception-points in the source
210 * when it acesses user-memory. When it fails in one
211 * of those points, we find it in a table and do a jump
212 * to some fixup code that loads an appropriate error
213 * code)
214 */
215 if (fixup_exception(regs))
216 return;
217
218 die("Oops", regs, address);
219
220out_of_memory:
221 up_read(&mm->mmap_sem);
222
223 if (user_mode(regs)) {
224 pagefault_out_of_memory();
225 return;
226 }
227
228 goto no_context;
229
230do_sigbus:
231 up_read(&mm->mmap_sem);
232
233 if (!user_mode(regs))
234 goto no_context;
235
236 tsk->thread.fault_address = address;
237 info.si_signo = SIGBUS;
238 info.si_errno = 0;
239 info.si_code = BUS_ADRERR;
240 info.si_addr = (void __user *)address;
241 force_sig_info(SIGBUS, &info, tsk);
242}
1// SPDX-License-Identifier: GPL-2.0-only
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
9#include <linux/sched/signal.h>
10#include <linux/errno.h>
11#include <linux/ptrace.h>
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
14#include <linux/perf_event.h>
15#include <linux/mm_types.h>
16#include <asm/entry.h>
17#include <asm/mmu.h>
18
19/*
20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
21 * Refer to asm/processor.h for System Memory Map
22 *
23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
25 */
26noinline static int handle_kernel_vaddr_fault(unsigned long address)
27{
28 /*
29 * Synchronize this task's top level page-table
30 * with the 'reference' page table.
31 */
32 pgd_t *pgd, *pgd_k;
33 p4d_t *p4d, *p4d_k;
34 pud_t *pud, *pud_k;
35 pmd_t *pmd, *pmd_k;
36
37 pgd = pgd_offset(current->active_mm, address);
38 pgd_k = pgd_offset_k(address);
39
40 if (pgd_none (*pgd_k))
41 goto bad_area;
42 if (!pgd_present(*pgd))
43 set_pgd(pgd, *pgd_k);
44
45 p4d = p4d_offset(pgd, address);
46 p4d_k = p4d_offset(pgd_k, address);
47 if (p4d_none(*p4d_k))
48 goto bad_area;
49 if (!p4d_present(*p4d))
50 set_p4d(p4d, *p4d_k);
51
52 pud = pud_offset(p4d, address);
53 pud_k = pud_offset(p4d_k, address);
54 if (pud_none(*pud_k))
55 goto bad_area;
56 if (!pud_present(*pud))
57 set_pud(pud, *pud_k);
58
59 pmd = pmd_offset(pud, address);
60 pmd_k = pmd_offset(pud_k, address);
61 if (pmd_none(*pmd_k))
62 goto bad_area;
63 if (!pmd_present(*pmd))
64 set_pmd(pmd, *pmd_k);
65
66 /* XXX: create the TLB entry here */
67 return 0;
68
69bad_area:
70 return 1;
71}
72
73void do_page_fault(unsigned long address, struct pt_regs *regs)
74{
75 struct vm_area_struct *vma = NULL;
76 struct task_struct *tsk = current;
77 struct mm_struct *mm = tsk->mm;
78 int sig, si_code = SEGV_MAPERR;
79 unsigned int write = 0, exec = 0, mask;
80 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
81 unsigned int flags; /* handle_mm_fault() input */
82
83 /*
84 * NOTE! We MUST NOT take any locks for this case. We may
85 * be in an interrupt or a critical region, and should
86 * only copy the information from the master page table,
87 * nothing more.
88 */
89 if (address >= VMALLOC_START && !user_mode(regs)) {
90 if (unlikely(handle_kernel_vaddr_fault(address)))
91 goto no_context;
92 else
93 return;
94 }
95
96 /*
97 * If we're in an interrupt or have no user
98 * context, we must not take the fault..
99 */
100 if (faulthandler_disabled() || !mm)
101 goto no_context;
102
103 if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
104 write = 1;
105 else if ((regs->ecr.vec == ECR_V_PROTV) &&
106 (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
107 exec = 1;
108
109 flags = FAULT_FLAG_DEFAULT;
110 if (user_mode(regs))
111 flags |= FAULT_FLAG_USER;
112 if (write)
113 flags |= FAULT_FLAG_WRITE;
114
115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
116retry:
117 vma = lock_mm_and_find_vma(mm, address, regs);
118 if (!vma)
119 goto bad_area_nosemaphore;
120
121 /*
122 * vm_area is good, now check permissions for this memory access
123 */
124 mask = VM_READ;
125 if (write)
126 mask = VM_WRITE;
127 if (exec)
128 mask = VM_EXEC;
129
130 if (!(vma->vm_flags & mask)) {
131 si_code = SEGV_ACCERR;
132 goto bad_area;
133 }
134
135 fault = handle_mm_fault(vma, address, flags, regs);
136
137 /* Quick path to respond to signals */
138 if (fault_signal_pending(fault, regs)) {
139 if (!user_mode(regs))
140 goto no_context;
141 return;
142 }
143
144 /* The fault is fully completed (including releasing mmap lock) */
145 if (fault & VM_FAULT_COMPLETED)
146 return;
147
148 /*
149 * Fault retry nuances, mmap_lock already relinquished by core mm
150 */
151 if (unlikely(fault & VM_FAULT_RETRY)) {
152 flags |= FAULT_FLAG_TRIED;
153 goto retry;
154 }
155
156bad_area:
157 mmap_read_unlock(mm);
158
159bad_area_nosemaphore:
160 /*
161 * Major/minor page fault accounting
162 * (in case of retry we only land here once)
163 */
164 if (likely(!(fault & VM_FAULT_ERROR)))
165 /* Normal return path: fault Handled Gracefully */
166 return;
167
168 if (!user_mode(regs))
169 goto no_context;
170
171 if (fault & VM_FAULT_OOM) {
172 pagefault_out_of_memory();
173 return;
174 }
175
176 if (fault & VM_FAULT_SIGBUS) {
177 sig = SIGBUS;
178 si_code = BUS_ADRERR;
179 }
180 else {
181 sig = SIGSEGV;
182 }
183
184 tsk->thread.fault_address = address;
185 force_sig_fault(sig, si_code, (void __user *)address);
186 return;
187
188no_context:
189 if (fixup_exception(regs))
190 return;
191
192 die("Oops", regs, address);
193}