Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
9#include <linux/sched/signal.h>
10#include <linux/errno.h>
11#include <linux/ptrace.h>
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
14#include <linux/perf_event.h>
15#include <linux/mm_types.h>
16#include <asm/mmu.h>
17
18/*
19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
20 * Refer to asm/processor.h for System Memory Map
21 *
22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
24 */
25noinline static int handle_kernel_vaddr_fault(unsigned long address)
26{
27 /*
28 * Synchronize this task's top level page-table
29 * with the 'reference' page table.
30 */
31 pgd_t *pgd, *pgd_k;
32 p4d_t *p4d, *p4d_k;
33 pud_t *pud, *pud_k;
34 pmd_t *pmd, *pmd_k;
35
36 pgd = pgd_offset_fast(current->active_mm, address);
37 pgd_k = pgd_offset_k(address);
38
39 if (!pgd_present(*pgd_k))
40 goto bad_area;
41
42 p4d = p4d_offset(pgd, address);
43 p4d_k = p4d_offset(pgd_k, address);
44 if (!p4d_present(*p4d_k))
45 goto bad_area;
46
47 pud = pud_offset(p4d, address);
48 pud_k = pud_offset(p4d_k, address);
49 if (!pud_present(*pud_k))
50 goto bad_area;
51
52 pmd = pmd_offset(pud, address);
53 pmd_k = pmd_offset(pud_k, address);
54 if (!pmd_present(*pmd_k))
55 goto bad_area;
56
57 set_pmd(pmd, *pmd_k);
58
59 /* XXX: create the TLB entry here */
60 return 0;
61
62bad_area:
63 return 1;
64}
65
66void do_page_fault(unsigned long address, struct pt_regs *regs)
67{
68 struct vm_area_struct *vma = NULL;
69 struct task_struct *tsk = current;
70 struct mm_struct *mm = tsk->mm;
71 int sig, si_code = SEGV_MAPERR;
72 unsigned int write = 0, exec = 0, mask;
73 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
74 unsigned int flags; /* handle_mm_fault() input */
75
76 /*
77 * NOTE! We MUST NOT take any locks for this case. We may
78 * be in an interrupt or a critical region, and should
79 * only copy the information from the master page table,
80 * nothing more.
81 */
82 if (address >= VMALLOC_START && !user_mode(regs)) {
83 if (unlikely(handle_kernel_vaddr_fault(address)))
84 goto no_context;
85 else
86 return;
87 }
88
89 /*
90 * If we're in an interrupt or have no user
91 * context, we must not take the fault..
92 */
93 if (faulthandler_disabled() || !mm)
94 goto no_context;
95
96 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
97 write = 1;
98 else if ((regs->ecr_vec == ECR_V_PROTV) &&
99 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
100 exec = 1;
101
102 flags = FAULT_FLAG_DEFAULT;
103 if (user_mode(regs))
104 flags |= FAULT_FLAG_USER;
105 if (write)
106 flags |= FAULT_FLAG_WRITE;
107
108 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
109retry:
110 mmap_read_lock(mm);
111
112 vma = find_vma(mm, address);
113 if (!vma)
114 goto bad_area;
115 if (unlikely(address < vma->vm_start)) {
116 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
117 goto bad_area;
118 }
119
120 /*
121 * vm_area is good, now check permissions for this memory access
122 */
123 mask = VM_READ;
124 if (write)
125 mask = VM_WRITE;
126 if (exec)
127 mask = VM_EXEC;
128
129 if (!(vma->vm_flags & mask)) {
130 si_code = SEGV_ACCERR;
131 goto bad_area;
132 }
133
134 fault = handle_mm_fault(vma, address, flags, regs);
135
136 /* Quick path to respond to signals */
137 if (fault_signal_pending(fault, regs)) {
138 if (!user_mode(regs))
139 goto no_context;
140 return;
141 }
142
143 /*
144 * Fault retry nuances, mmap_lock already relinquished by core mm
145 */
146 if (unlikely((fault & VM_FAULT_RETRY) &&
147 (flags & FAULT_FLAG_ALLOW_RETRY))) {
148 flags |= FAULT_FLAG_TRIED;
149 goto retry;
150 }
151
152bad_area:
153 mmap_read_unlock(mm);
154
155 /*
156 * Major/minor page fault accounting
157 * (in case of retry we only land here once)
158 */
159 if (likely(!(fault & VM_FAULT_ERROR)))
160 /* Normal return path: fault Handled Gracefully */
161 return;
162
163 if (!user_mode(regs))
164 goto no_context;
165
166 if (fault & VM_FAULT_OOM) {
167 pagefault_out_of_memory();
168 return;
169 }
170
171 if (fault & VM_FAULT_SIGBUS) {
172 sig = SIGBUS;
173 si_code = BUS_ADRERR;
174 }
175 else {
176 sig = SIGSEGV;
177 }
178
179 tsk->thread.fault_address = address;
180 force_sig_fault(sig, si_code, (void __user *)address);
181 return;
182
183no_context:
184 if (fixup_exception(regs))
185 return;
186
187 die("Oops", regs, address);
188}
1// SPDX-License-Identifier: GPL-2.0-only
2/* Page Fault Handling for ARC (TLB Miss / ProtV)
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 */
6
7#include <linux/signal.h>
8#include <linux/interrupt.h>
9#include <linux/sched/signal.h>
10#include <linux/errno.h>
11#include <linux/ptrace.h>
12#include <linux/uaccess.h>
13#include <linux/kdebug.h>
14#include <linux/perf_event.h>
15#include <linux/mm_types.h>
16#include <asm/mmu.h>
17
18/*
19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
20 * Refer to asm/processor.h for System Memory Map
21 *
22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
24 */
25noinline static int handle_kernel_vaddr_fault(unsigned long address)
26{
27 /*
28 * Synchronize this task's top level page-table
29 * with the 'reference' page table.
30 */
31 pgd_t *pgd, *pgd_k;
32 p4d_t *p4d, *p4d_k;
33 pud_t *pud, *pud_k;
34 pmd_t *pmd, *pmd_k;
35
36 pgd = pgd_offset(current->active_mm, address);
37 pgd_k = pgd_offset_k(address);
38
39 if (pgd_none (*pgd_k))
40 goto bad_area;
41 if (!pgd_present(*pgd))
42 set_pgd(pgd, *pgd_k);
43
44 p4d = p4d_offset(pgd, address);
45 p4d_k = p4d_offset(pgd_k, address);
46 if (p4d_none(*p4d_k))
47 goto bad_area;
48 if (!p4d_present(*p4d))
49 set_p4d(p4d, *p4d_k);
50
51 pud = pud_offset(p4d, address);
52 pud_k = pud_offset(p4d_k, address);
53 if (pud_none(*pud_k))
54 goto bad_area;
55 if (!pud_present(*pud))
56 set_pud(pud, *pud_k);
57
58 pmd = pmd_offset(pud, address);
59 pmd_k = pmd_offset(pud_k, address);
60 if (pmd_none(*pmd_k))
61 goto bad_area;
62 if (!pmd_present(*pmd))
63 set_pmd(pmd, *pmd_k);
64
65 /* XXX: create the TLB entry here */
66 return 0;
67
68bad_area:
69 return 1;
70}
71
72void do_page_fault(unsigned long address, struct pt_regs *regs)
73{
74 struct vm_area_struct *vma = NULL;
75 struct task_struct *tsk = current;
76 struct mm_struct *mm = tsk->mm;
77 int sig, si_code = SEGV_MAPERR;
78 unsigned int write = 0, exec = 0, mask;
79 vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
80 unsigned int flags; /* handle_mm_fault() input */
81
82 /*
83 * NOTE! We MUST NOT take any locks for this case. We may
84 * be in an interrupt or a critical region, and should
85 * only copy the information from the master page table,
86 * nothing more.
87 */
88 if (address >= VMALLOC_START && !user_mode(regs)) {
89 if (unlikely(handle_kernel_vaddr_fault(address)))
90 goto no_context;
91 else
92 return;
93 }
94
95 /*
96 * If we're in an interrupt or have no user
97 * context, we must not take the fault..
98 */
99 if (faulthandler_disabled() || !mm)
100 goto no_context;
101
102 if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
103 write = 1;
104 else if ((regs->ecr_vec == ECR_V_PROTV) &&
105 (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
106 exec = 1;
107
108 flags = FAULT_FLAG_DEFAULT;
109 if (user_mode(regs))
110 flags |= FAULT_FLAG_USER;
111 if (write)
112 flags |= FAULT_FLAG_WRITE;
113
114 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115retry:
116 mmap_read_lock(mm);
117
118 vma = find_vma(mm, address);
119 if (!vma)
120 goto bad_area;
121 if (unlikely(address < vma->vm_start)) {
122 if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123 goto bad_area;
124 }
125
126 /*
127 * vm_area is good, now check permissions for this memory access
128 */
129 mask = VM_READ;
130 if (write)
131 mask = VM_WRITE;
132 if (exec)
133 mask = VM_EXEC;
134
135 if (!(vma->vm_flags & mask)) {
136 si_code = SEGV_ACCERR;
137 goto bad_area;
138 }
139
140 fault = handle_mm_fault(vma, address, flags, regs);
141
142 /* Quick path to respond to signals */
143 if (fault_signal_pending(fault, regs)) {
144 if (!user_mode(regs))
145 goto no_context;
146 return;
147 }
148
149 /* The fault is fully completed (including releasing mmap lock) */
150 if (fault & VM_FAULT_COMPLETED)
151 return;
152
153 /*
154 * Fault retry nuances, mmap_lock already relinquished by core mm
155 */
156 if (unlikely(fault & VM_FAULT_RETRY)) {
157 flags |= FAULT_FLAG_TRIED;
158 goto retry;
159 }
160
161bad_area:
162 mmap_read_unlock(mm);
163
164 /*
165 * Major/minor page fault accounting
166 * (in case of retry we only land here once)
167 */
168 if (likely(!(fault & VM_FAULT_ERROR)))
169 /* Normal return path: fault Handled Gracefully */
170 return;
171
172 if (!user_mode(regs))
173 goto no_context;
174
175 if (fault & VM_FAULT_OOM) {
176 pagefault_out_of_memory();
177 return;
178 }
179
180 if (fault & VM_FAULT_SIGBUS) {
181 sig = SIGBUS;
182 si_code = BUS_ADRERR;
183 }
184 else {
185 sig = SIGSEGV;
186 }
187
188 tsk->thread.fault_address = address;
189 force_sig_fault(sig, si_code, (void __user *)address);
190 return;
191
192no_context:
193 if (fixup_exception(regs))
194 return;
195
196 die("Oops", regs, address);
197}