Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/extable.h>
5#include <linux/kprobes.h>
6#include <linux/mmu_context.h>
7#include <linux/perf_event.h>
8
9int fixup_exception(struct pt_regs *regs)
10{
11 const struct exception_table_entry *fixup;
12
13 fixup = search_exception_tables(instruction_pointer(regs));
14 if (fixup) {
15 regs->pc = fixup->fixup;
16
17 return 1;
18 }
19
20 return 0;
21}
22
23static inline bool is_write(struct pt_regs *regs)
24{
25 switch (trap_no(regs)) {
26 case VEC_TLBINVALIDS:
27 return true;
28 case VEC_TLBMODIFIED:
29 return true;
30 }
31
32 return false;
33}
34
35#ifdef CONFIG_CPU_HAS_LDSTEX
36static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
37{
38 return;
39}
40#else
41extern unsigned long csky_cmpxchg_ldw;
42extern unsigned long csky_cmpxchg_stw;
43static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
44{
45 if (trap_no(regs) != VEC_TLBMODIFIED)
46 return;
47
48 if (instruction_pointer(regs) == csky_cmpxchg_stw)
49 instruction_pointer_set(regs, csky_cmpxchg_ldw);
50 return;
51}
52#endif
53
54static inline void no_context(struct pt_regs *regs, unsigned long addr)
55{
56 current->thread.trap_no = trap_no(regs);
57
58 /* Are we prepared to handle this kernel fault? */
59 if (fixup_exception(regs))
60 return;
61
62 /*
63 * Oops. The kernel tried to access some bad page. We'll have to
64 * terminate things with extreme prejudice.
65 */
66 bust_spinlocks(1);
67 pr_alert("Unable to handle kernel paging request at virtual "
68 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
69 die(regs, "Oops");
70 make_task_dead(SIGKILL);
71}
72
73static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
74{
75 current->thread.trap_no = trap_no(regs);
76
77 if (fault & VM_FAULT_OOM) {
78 /*
79 * We ran out of memory, call the OOM killer, and return the userspace
80 * (which will retry the fault, or kill us if we got oom-killed).
81 */
82 if (!user_mode(regs)) {
83 no_context(regs, addr);
84 return;
85 }
86 pagefault_out_of_memory();
87 return;
88 } else if (fault & VM_FAULT_SIGBUS) {
89 /* Kernel mode? Handle exceptions or die */
90 if (!user_mode(regs)) {
91 no_context(regs, addr);
92 return;
93 }
94 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
95 return;
96 }
97 BUG();
98}
99
100static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101{
102 /*
103 * Something tried to access memory that isn't in our memory map.
104 * Fix it, but check if it's kernel or user first.
105 */
106 /* User mode accesses just cause a SIGSEGV */
107 if (user_mode(regs)) {
108 do_trap(regs, SIGSEGV, code, addr);
109 return;
110 }
111
112 no_context(regs, addr);
113}
114
115static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
116{
117 pgd_t *pgd, *pgd_k;
118 pud_t *pud, *pud_k;
119 pmd_t *pmd, *pmd_k;
120 pte_t *pte_k;
121 int offset;
122
123 /* User mode accesses just cause a SIGSEGV */
124 if (user_mode(regs)) {
125 do_trap(regs, SIGSEGV, code, addr);
126 return;
127 }
128
129 /*
130 * Synchronize this task's top level page-table
131 * with the 'reference' page table.
132 *
133 * Do _not_ use "tsk" here. We might be inside
134 * an interrupt in the middle of a task switch..
135 */
136 offset = pgd_index(addr);
137
138 pgd = get_pgd() + offset;
139 pgd_k = init_mm.pgd + offset;
140
141 if (!pgd_present(*pgd_k)) {
142 no_context(regs, addr);
143 return;
144 }
145 set_pgd(pgd, *pgd_k);
146
147 pud = (pud_t *)pgd;
148 pud_k = (pud_t *)pgd_k;
149 if (!pud_present(*pud_k)) {
150 no_context(regs, addr);
151 return;
152 }
153
154 pmd = pmd_offset(pud, addr);
155 pmd_k = pmd_offset(pud_k, addr);
156 if (!pmd_present(*pmd_k)) {
157 no_context(regs, addr);
158 return;
159 }
160 set_pmd(pmd, *pmd_k);
161
162 pte_k = pte_offset_kernel(pmd_k, addr);
163 if (!pte_present(*pte_k)) {
164 no_context(regs, addr);
165 return;
166 }
167
168 flush_tlb_one(addr);
169}
170
171static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
172{
173 if (is_write(regs)) {
174 if (!(vma->vm_flags & VM_WRITE))
175 return true;
176 } else {
177 if (unlikely(!vma_is_accessible(vma)))
178 return true;
179 }
180 return false;
181}
182
183/*
184 * This routine handles page faults. It determines the address and the
185 * problem, and then passes it off to one of the appropriate routines.
186 */
187asmlinkage void do_page_fault(struct pt_regs *regs)
188{
189 struct task_struct *tsk;
190 struct vm_area_struct *vma;
191 struct mm_struct *mm;
192 unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
193 unsigned int flags = FAULT_FLAG_DEFAULT;
194 int code = SEGV_MAPERR;
195 vm_fault_t fault;
196
197 tsk = current;
198 mm = tsk->mm;
199
200 csky_cmpxchg_fixup(regs);
201
202 if (kprobe_page_fault(regs, tsk->thread.trap_no))
203 return;
204
205 /*
206 * Fault-in kernel-space virtual memory on-demand.
207 * The 'reference' page table is init_mm.pgd.
208 *
209 * NOTE! We MUST NOT take any locks for this case. We may
210 * be in an interrupt or a critical region, and should
211 * only copy the information from the master page table,
212 * nothing more.
213 */
214 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
215 vmalloc_fault(regs, code, addr);
216 return;
217 }
218
219 /* Enable interrupts if they were enabled in the parent context. */
220 if (likely(regs->sr & BIT(6)))
221 local_irq_enable();
222
223 /*
224 * If we're in an interrupt, have no user context, or are running
225 * in an atomic region, then we must not take the fault.
226 */
227 if (unlikely(faulthandler_disabled() || !mm)) {
228 no_context(regs, addr);
229 return;
230 }
231
232 if (user_mode(regs))
233 flags |= FAULT_FLAG_USER;
234
235 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
236
237 if (is_write(regs))
238 flags |= FAULT_FLAG_WRITE;
239retry:
240 vma = lock_mm_and_find_vma(mm, addr, regs);
241 if (unlikely(!vma)) {
242 bad_area_nosemaphore(regs, mm, code, addr);
243 return;
244 }
245
246 /*
247 * Ok, we have a good vm_area for this memory access, so
248 * we can handle it.
249 */
250 code = SEGV_ACCERR;
251
252 if (unlikely(access_error(regs, vma))) {
253 mmap_read_unlock(mm);
254 bad_area_nosemaphore(regs, mm, code, addr);
255 return;
256 }
257
258 /*
259 * If for any reason at all we could not handle the fault,
260 * make sure we exit gracefully rather than endlessly redo
261 * the fault.
262 */
263 fault = handle_mm_fault(vma, addr, flags, regs);
264
265 /*
266 * If we need to retry but a fatal signal is pending, handle the
267 * signal first. We do not need to release the mmap_lock because it
268 * would already be released in __lock_page_or_retry in mm/filemap.c.
269 */
270 if (fault_signal_pending(fault, regs)) {
271 if (!user_mode(regs))
272 no_context(regs, addr);
273 return;
274 }
275
276 /* The fault is fully completed (including releasing mmap lock) */
277 if (fault & VM_FAULT_COMPLETED)
278 return;
279
280 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
281 flags |= FAULT_FLAG_TRIED;
282
283 /*
284 * No need to mmap_read_unlock(mm) as we would
285 * have already released it in __lock_page_or_retry
286 * in mm/filemap.c.
287 */
288 goto retry;
289 }
290
291 mmap_read_unlock(mm);
292
293 if (unlikely(fault & VM_FAULT_ERROR)) {
294 mm_fault_error(regs, addr, fault);
295 return;
296 }
297 return;
298}
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/signal.h>
5#include <linux/module.h>
6#include <linux/sched.h>
7#include <linux/interrupt.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/types.h>
12#include <linux/ptrace.h>
13#include <linux/mman.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/version.h>
17#include <linux/vt_kern.h>
18#include <linux/extable.h>
19#include <linux/uaccess.h>
20#include <linux/perf_event.h>
21
22#include <asm/hardirq.h>
23#include <asm/mmu_context.h>
24#include <asm/traps.h>
25#include <asm/page.h>
26
27int fixup_exception(struct pt_regs *regs)
28{
29 const struct exception_table_entry *fixup;
30
31 fixup = search_exception_tables(instruction_pointer(regs));
32 if (fixup) {
33 regs->pc = fixup->nextinsn;
34
35 return 1;
36 }
37
38 return 0;
39}
40
41/*
42 * This routine handles page faults. It determines the address,
43 * and the problem, and then passes it off to one of the appropriate
44 * routines.
45 */
46asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
47 unsigned long mmu_meh)
48{
49 struct vm_area_struct *vma = NULL;
50 struct task_struct *tsk = current;
51 struct mm_struct *mm = tsk->mm;
52 int si_code;
53 int fault;
54 unsigned long address = mmu_meh & PAGE_MASK;
55
56 si_code = SEGV_MAPERR;
57
58#ifndef CONFIG_CPU_HAS_TLBI
59 /*
60 * We fault-in kernel-space virtual memory on-demand. The
61 * 'reference' page table is init_mm.pgd.
62 *
63 * NOTE! We MUST NOT take any locks for this case. We may
64 * be in an interrupt or a critical region, and should
65 * only copy the information from the master page table,
66 * nothing more.
67 */
68 if (unlikely(address >= VMALLOC_START) &&
69 unlikely(address <= VMALLOC_END)) {
70 /*
71 * Synchronize this task's top level page-table
72 * with the 'reference' page table.
73 *
74 * Do _not_ use "tsk" here. We might be inside
75 * an interrupt in the middle of a task switch..
76 */
77 int offset = __pgd_offset(address);
78 pgd_t *pgd, *pgd_k;
79 pud_t *pud, *pud_k;
80 pmd_t *pmd, *pmd_k;
81 pte_t *pte_k;
82
83 unsigned long pgd_base;
84
85 pgd_base = (unsigned long)__va(get_pgd());
86 pgd = (pgd_t *)pgd_base + offset;
87 pgd_k = init_mm.pgd + offset;
88
89 if (!pgd_present(*pgd_k))
90 goto no_context;
91 set_pgd(pgd, *pgd_k);
92
93 pud = (pud_t *)pgd;
94 pud_k = (pud_t *)pgd_k;
95 if (!pud_present(*pud_k))
96 goto no_context;
97
98 pmd = pmd_offset(pud, address);
99 pmd_k = pmd_offset(pud_k, address);
100 if (!pmd_present(*pmd_k))
101 goto no_context;
102 set_pmd(pmd, *pmd_k);
103
104 pte_k = pte_offset_kernel(pmd_k, address);
105 if (!pte_present(*pte_k))
106 goto no_context;
107 return;
108 }
109#endif
110
111 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
112 /*
113 * If we're in an interrupt or have no user
114 * context, we must not take the fault..
115 */
116 if (in_atomic() || !mm)
117 goto bad_area_nosemaphore;
118
119 down_read(&mm->mmap_sem);
120 vma = find_vma(mm, address);
121 if (!vma)
122 goto bad_area;
123 if (vma->vm_start <= address)
124 goto good_area;
125 if (!(vma->vm_flags & VM_GROWSDOWN))
126 goto bad_area;
127 if (expand_stack(vma, address))
128 goto bad_area;
129 /*
130 * Ok, we have a good vm_area for this memory access, so
131 * we can handle it..
132 */
133good_area:
134 si_code = SEGV_ACCERR;
135
136 if (write) {
137 if (!(vma->vm_flags & VM_WRITE))
138 goto bad_area;
139 } else {
140 if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
141 goto bad_area;
142 }
143
144 /*
145 * If for any reason at all we couldn't handle the fault,
146 * make sure we exit gracefully rather than endlessly redo
147 * the fault.
148 */
149 fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
150 if (unlikely(fault & VM_FAULT_ERROR)) {
151 if (fault & VM_FAULT_OOM)
152 goto out_of_memory;
153 else if (fault & VM_FAULT_SIGBUS)
154 goto do_sigbus;
155 else if (fault & VM_FAULT_SIGSEGV)
156 goto bad_area;
157 BUG();
158 }
159 if (fault & VM_FAULT_MAJOR) {
160 tsk->maj_flt++;
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
162 address);
163 } else {
164 tsk->min_flt++;
165 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
166 address);
167 }
168
169 up_read(&mm->mmap_sem);
170 return;
171
172 /*
173 * Something tried to access memory that isn't in our memory map..
174 * Fix it, but check if it's kernel or user first..
175 */
176bad_area:
177 up_read(&mm->mmap_sem);
178
179bad_area_nosemaphore:
180 /* User mode accesses just cause a SIGSEGV */
181 if (user_mode(regs)) {
182 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
183 return;
184 }
185
186no_context:
187 /* Are we prepared to handle this kernel fault? */
188 if (fixup_exception(regs))
189 return;
190
191 /*
192 * Oops. The kernel tried to access some bad page. We'll have to
193 * terminate things with extreme prejudice.
194 */
195 bust_spinlocks(1);
196 pr_alert("Unable to handle kernel paging request at virtual "
197 "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
198 die_if_kernel("Oops", regs, write);
199
200out_of_memory:
201 /*
202 * We ran out of memory, call the OOM killer, and return the userspace
203 * (which will retry the fault, or kill us if we got oom-killed).
204 */
205 pagefault_out_of_memory();
206 return;
207
208do_sigbus:
209 up_read(&mm->mmap_sem);
210
211 /* Kernel mode? Handle exceptions or die */
212 if (!user_mode(regs))
213 goto no_context;
214
215 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
216}