Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/signal.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/interrupt.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/ptrace.h>
13#include <linux/mman.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/version.h>
17#include <linux/vt_kern.h>
18#include <linux/extable.h>
19#include <linux/uaccess.h>
20#include <linux/perf_event.h>
21#include <linux/kprobes.h>
22
23#include <asm/hardirq.h>
24#include <asm/mmu_context.h>
25#include <asm/traps.h>
26#include <asm/page.h>
27
28int fixup_exception(struct pt_regs *regs)
29{
30 const struct exception_table_entry *fixup;
31
32 fixup = search_exception_tables(instruction_pointer(regs));
33 if (fixup) {
34 regs->pc = fixup->nextinsn;
35
36 return 1;
37 }
38
39 return 0;
40}
41
42/*
43 * This routine handles page faults. It determines the address,
44 * and the problem, and then passes it off to one of the appropriate
45 * routines.
46 */
47asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
48 unsigned long mmu_meh)
49{
50 struct vm_area_struct *vma = NULL;
51 struct task_struct *tsk = current;
52 struct mm_struct *mm = tsk->mm;
53 int si_code;
54 int fault;
55 unsigned long address = mmu_meh & PAGE_MASK;
56
57 if (kprobe_page_fault(regs, tsk->thread.trap_no))
58 return;
59
60 si_code = SEGV_MAPERR;
61
62#ifndef CONFIG_CPU_HAS_TLBI
63 /*
64 * We fault-in kernel-space virtual memory on-demand. The
65 * 'reference' page table is init_mm.pgd.
66 *
67 * NOTE! We MUST NOT take any locks for this case. We may
68 * be in an interrupt or a critical region, and should
69 * only copy the information from the master page table,
70 * nothing more.
71 */
72 if (unlikely(address >= VMALLOC_START) &&
73 unlikely(address <= VMALLOC_END)) {
74 /*
75 * Synchronize this task's top level page-table
76 * with the 'reference' page table.
77 *
78 * Do _not_ use "tsk" here. We might be inside
79 * an interrupt in the middle of a task switch..
80 */
81 int offset = pgd_index(address);
82 pgd_t *pgd, *pgd_k;
83 pud_t *pud, *pud_k;
84 pmd_t *pmd, *pmd_k;
85 pte_t *pte_k;
86
87 unsigned long pgd_base;
88
89 pgd_base = (unsigned long)__va(get_pgd());
90 pgd = (pgd_t *)pgd_base + offset;
91 pgd_k = init_mm.pgd + offset;
92
93 if (!pgd_present(*pgd_k))
94 goto no_context;
95 set_pgd(pgd, *pgd_k);
96
97 pud = (pud_t *)pgd;
98 pud_k = (pud_t *)pgd_k;
99 if (!pud_present(*pud_k))
100 goto no_context;
101
102 pmd = pmd_offset(pud, address);
103 pmd_k = pmd_offset(pud_k, address);
104 if (!pmd_present(*pmd_k))
105 goto no_context;
106 set_pmd(pmd, *pmd_k);
107
108 pte_k = pte_offset_kernel(pmd_k, address);
109 if (!pte_present(*pte_k))
110 goto no_context;
111 return;
112 }
113#endif
114
115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
116 /*
117 * If we're in an interrupt or have no user
118 * context, we must not take the fault..
119 */
120 if (in_atomic() || !mm)
121 goto bad_area_nosemaphore;
122
123 mmap_read_lock(mm);
124 vma = find_vma(mm, address);
125 if (!vma)
126 goto bad_area;
127 if (vma->vm_start <= address)
128 goto good_area;
129 if (!(vma->vm_flags & VM_GROWSDOWN))
130 goto bad_area;
131 if (expand_stack(vma, address))
132 goto bad_area;
133 /*
134 * Ok, we have a good vm_area for this memory access, so
135 * we can handle it..
136 */
137good_area:
138 si_code = SEGV_ACCERR;
139
140 if (write) {
141 if (!(vma->vm_flags & VM_WRITE))
142 goto bad_area;
143 } else {
144 if (unlikely(!vma_is_accessible(vma)))
145 goto bad_area;
146 }
147
148 /*
149 * If for any reason at all we couldn't handle the fault,
150 * make sure we exit gracefully rather than endlessly redo
151 * the fault.
152 */
153 fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0,
154 regs);
155 if (unlikely(fault & VM_FAULT_ERROR)) {
156 if (fault & VM_FAULT_OOM)
157 goto out_of_memory;
158 else if (fault & VM_FAULT_SIGBUS)
159 goto do_sigbus;
160 else if (fault & VM_FAULT_SIGSEGV)
161 goto bad_area;
162 BUG();
163 }
164 mmap_read_unlock(mm);
165 return;
166
167 /*
168 * Something tried to access memory that isn't in our memory map..
169 * Fix it, but check if it's kernel or user first..
170 */
171bad_area:
172 mmap_read_unlock(mm);
173
174bad_area_nosemaphore:
175 /* User mode accesses just cause a SIGSEGV */
176 if (user_mode(regs)) {
177 tsk->thread.trap_no = trap_no(regs);
178 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
179 return;
180 }
181
182no_context:
183 tsk->thread.trap_no = trap_no(regs);
184
185 /* Are we prepared to handle this kernel fault? */
186 if (fixup_exception(regs))
187 return;
188
189 /*
190 * Oops. The kernel tried to access some bad page. We'll have to
191 * terminate things with extreme prejudice.
192 */
193 bust_spinlocks(1);
194 pr_alert("Unable to handle kernel paging request at virtual "
195 "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
196 die(regs, "Oops");
197
198out_of_memory:
199 tsk->thread.trap_no = trap_no(regs);
200
201 /*
202 * We ran out of memory, call the OOM killer, and return the userspace
203 * (which will retry the fault, or kill us if we got oom-killed).
204 */
205 pagefault_out_of_memory();
206 return;
207
208do_sigbus:
209 tsk->thread.trap_no = trap_no(regs);
210
211 mmap_read_unlock(mm);
212
213 /* Kernel mode? Handle exceptions or die */
214 if (!user_mode(regs))
215 goto no_context;
216
217 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
218}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/extable.h>
5#include <linux/kprobes.h>
6#include <linux/mmu_context.h>
7#include <linux/perf_event.h>
8
9int fixup_exception(struct pt_regs *regs)
10{
11 const struct exception_table_entry *fixup;
12
13 fixup = search_exception_tables(instruction_pointer(regs));
14 if (fixup) {
15 regs->pc = fixup->fixup;
16
17 return 1;
18 }
19
20 return 0;
21}
22
23static inline bool is_write(struct pt_regs *regs)
24{
25 switch (trap_no(regs)) {
26 case VEC_TLBINVALIDS:
27 return true;
28 case VEC_TLBMODIFIED:
29 return true;
30 }
31
32 return false;
33}
34
35#ifdef CONFIG_CPU_HAS_LDSTEX
36static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
37{
38 return;
39}
40#else
41extern unsigned long csky_cmpxchg_ldw;
42extern unsigned long csky_cmpxchg_stw;
43static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
44{
45 if (trap_no(regs) != VEC_TLBMODIFIED)
46 return;
47
48 if (instruction_pointer(regs) == csky_cmpxchg_stw)
49 instruction_pointer_set(regs, csky_cmpxchg_ldw);
50 return;
51}
52#endif
53
54static inline void no_context(struct pt_regs *regs, unsigned long addr)
55{
56 current->thread.trap_no = trap_no(regs);
57
58 /* Are we prepared to handle this kernel fault? */
59 if (fixup_exception(regs))
60 return;
61
62 /*
63 * Oops. The kernel tried to access some bad page. We'll have to
64 * terminate things with extreme prejudice.
65 */
66 bust_spinlocks(1);
67 pr_alert("Unable to handle kernel paging request at virtual "
68 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
69 die(regs, "Oops");
70 make_task_dead(SIGKILL);
71}
72
73static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
74{
75 current->thread.trap_no = trap_no(regs);
76
77 if (fault & VM_FAULT_OOM) {
78 /*
79 * We ran out of memory, call the OOM killer, and return the userspace
80 * (which will retry the fault, or kill us if we got oom-killed).
81 */
82 if (!user_mode(regs)) {
83 no_context(regs, addr);
84 return;
85 }
86 pagefault_out_of_memory();
87 return;
88 } else if (fault & VM_FAULT_SIGBUS) {
89 /* Kernel mode? Handle exceptions or die */
90 if (!user_mode(regs)) {
91 no_context(regs, addr);
92 return;
93 }
94 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
95 return;
96 }
97 BUG();
98}
99
100static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101{
102 /*
103 * Something tried to access memory that isn't in our memory map.
104 * Fix it, but check if it's kernel or user first.
105 */
106 mmap_read_unlock(mm);
107 /* User mode accesses just cause a SIGSEGV */
108 if (user_mode(regs)) {
109 do_trap(regs, SIGSEGV, code, addr);
110 return;
111 }
112
113 no_context(regs, addr);
114}
115
116static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
117{
118 pgd_t *pgd, *pgd_k;
119 pud_t *pud, *pud_k;
120 pmd_t *pmd, *pmd_k;
121 pte_t *pte_k;
122 int offset;
123
124 /* User mode accesses just cause a SIGSEGV */
125 if (user_mode(regs)) {
126 do_trap(regs, SIGSEGV, code, addr);
127 return;
128 }
129
130 /*
131 * Synchronize this task's top level page-table
132 * with the 'reference' page table.
133 *
134 * Do _not_ use "tsk" here. We might be inside
135 * an interrupt in the middle of a task switch..
136 */
137 offset = pgd_index(addr);
138
139 pgd = get_pgd() + offset;
140 pgd_k = init_mm.pgd + offset;
141
142 if (!pgd_present(*pgd_k)) {
143 no_context(regs, addr);
144 return;
145 }
146 set_pgd(pgd, *pgd_k);
147
148 pud = (pud_t *)pgd;
149 pud_k = (pud_t *)pgd_k;
150 if (!pud_present(*pud_k)) {
151 no_context(regs, addr);
152 return;
153 }
154
155 pmd = pmd_offset(pud, addr);
156 pmd_k = pmd_offset(pud_k, addr);
157 if (!pmd_present(*pmd_k)) {
158 no_context(regs, addr);
159 return;
160 }
161 set_pmd(pmd, *pmd_k);
162
163 pte_k = pte_offset_kernel(pmd_k, addr);
164 if (!pte_present(*pte_k)) {
165 no_context(regs, addr);
166 return;
167 }
168
169 flush_tlb_one(addr);
170}
171
172static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
173{
174 if (is_write(regs)) {
175 if (!(vma->vm_flags & VM_WRITE))
176 return true;
177 } else {
178 if (unlikely(!vma_is_accessible(vma)))
179 return true;
180 }
181 return false;
182}
183
184/*
185 * This routine handles page faults. It determines the address and the
186 * problem, and then passes it off to one of the appropriate routines.
187 */
188asmlinkage void do_page_fault(struct pt_regs *regs)
189{
190 struct task_struct *tsk;
191 struct vm_area_struct *vma;
192 struct mm_struct *mm;
193 unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
194 unsigned int flags = FAULT_FLAG_DEFAULT;
195 int code = SEGV_MAPERR;
196 vm_fault_t fault;
197
198 tsk = current;
199 mm = tsk->mm;
200
201 csky_cmpxchg_fixup(regs);
202
203 if (kprobe_page_fault(regs, tsk->thread.trap_no))
204 return;
205
206 /*
207 * Fault-in kernel-space virtual memory on-demand.
208 * The 'reference' page table is init_mm.pgd.
209 *
210 * NOTE! We MUST NOT take any locks for this case. We may
211 * be in an interrupt or a critical region, and should
212 * only copy the information from the master page table,
213 * nothing more.
214 */
215 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216 vmalloc_fault(regs, code, addr);
217 return;
218 }
219
220 /* Enable interrupts if they were enabled in the parent context. */
221 if (likely(regs->sr & BIT(6)))
222 local_irq_enable();
223
224 /*
225 * If we're in an interrupt, have no user context, or are running
226 * in an atomic region, then we must not take the fault.
227 */
228 if (unlikely(faulthandler_disabled() || !mm)) {
229 no_context(regs, addr);
230 return;
231 }
232
233 if (user_mode(regs))
234 flags |= FAULT_FLAG_USER;
235
236 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
237
238 if (is_write(regs))
239 flags |= FAULT_FLAG_WRITE;
240retry:
241 mmap_read_lock(mm);
242 vma = find_vma(mm, addr);
243 if (unlikely(!vma)) {
244 bad_area(regs, mm, code, addr);
245 return;
246 }
247 if (likely(vma->vm_start <= addr))
248 goto good_area;
249 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250 bad_area(regs, mm, code, addr);
251 return;
252 }
253 if (unlikely(expand_stack(vma, addr))) {
254 bad_area(regs, mm, code, addr);
255 return;
256 }
257
258 /*
259 * Ok, we have a good vm_area for this memory access, so
260 * we can handle it.
261 */
262good_area:
263 code = SEGV_ACCERR;
264
265 if (unlikely(access_error(regs, vma))) {
266 bad_area(regs, mm, code, addr);
267 return;
268 }
269
270 /*
271 * If for any reason at all we could not handle the fault,
272 * make sure we exit gracefully rather than endlessly redo
273 * the fault.
274 */
275 fault = handle_mm_fault(vma, addr, flags, regs);
276
277 /*
278 * If we need to retry but a fatal signal is pending, handle the
279 * signal first. We do not need to release the mmap_lock because it
280 * would already be released in __lock_page_or_retry in mm/filemap.c.
281 */
282 if (fault_signal_pending(fault, regs)) {
283 if (!user_mode(regs))
284 no_context(regs, addr);
285 return;
286 }
287
288 /* The fault is fully completed (including releasing mmap lock) */
289 if (fault & VM_FAULT_COMPLETED)
290 return;
291
292 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
293 flags |= FAULT_FLAG_TRIED;
294
295 /*
296 * No need to mmap_read_unlock(mm) as we would
297 * have already released it in __lock_page_or_retry
298 * in mm/filemap.c.
299 */
300 goto retry;
301 }
302
303 mmap_read_unlock(mm);
304
305 if (unlikely(fault & VM_FAULT_ERROR)) {
306 mm_fault_error(regs, addr, fault);
307 return;
308 }
309 return;
310}