Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3
  4#include <linux/extable.h>
  5#include <linux/kprobes.h>
  6#include <linux/mmu_context.h>
  7#include <linux/perf_event.h>
  8
  9int fixup_exception(struct pt_regs *regs)
 10{
 11	const struct exception_table_entry *fixup;
 12
 13	fixup = search_exception_tables(instruction_pointer(regs));
 14	if (fixup) {
 15		regs->pc = fixup->fixup;
 16
 17		return 1;
 18	}
 19
 20	return 0;
 21}
 22
 23static inline bool is_write(struct pt_regs *regs)
 24{
 25	switch (trap_no(regs)) {
 26	case VEC_TLBINVALIDS:
 27		return true;
 28	case VEC_TLBMODIFIED:
 29		return true;
 30	}
 31
 32	return false;
 33}
 34
 35#ifdef CONFIG_CPU_HAS_LDSTEX
 36static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 37{
 38	return;
 39}
 40#else
 41extern unsigned long csky_cmpxchg_ldw;
 42extern unsigned long csky_cmpxchg_stw;
 43static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 44{
 45	if (trap_no(regs) != VEC_TLBMODIFIED)
 46		return;
 47
 48	if (instruction_pointer(regs) == csky_cmpxchg_stw)
 49		instruction_pointer_set(regs, csky_cmpxchg_ldw);
 50	return;
 51}
 52#endif
 53
 54static inline void no_context(struct pt_regs *regs, unsigned long addr)
 55{
 56	current->thread.trap_no = trap_no(regs);
 57
 58	/* Are we prepared to handle this kernel fault? */
 59	if (fixup_exception(regs))
 60		return;
 61
 62	/*
 63	 * Oops. The kernel tried to access some bad page. We'll have to
 64	 * terminate things with extreme prejudice.
 65	 */
 66	bust_spinlocks(1);
 67	pr_alert("Unable to handle kernel paging request at virtual "
 68		 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
 69	die(regs, "Oops");
 70	make_task_dead(SIGKILL);
 71}
 72
 73static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
 74{
 75	current->thread.trap_no = trap_no(regs);
 76
 77	if (fault & VM_FAULT_OOM) {
 78		/*
 79		 * We ran out of memory, call the OOM killer, and return the userspace
 80		 * (which will retry the fault, or kill us if we got oom-killed).
 81		 */
 82		if (!user_mode(regs)) {
 83			no_context(regs, addr);
 84			return;
 85		}
 86		pagefault_out_of_memory();
 87		return;
 88	} else if (fault & VM_FAULT_SIGBUS) {
 89		/* Kernel mode? Handle exceptions or die */
 90		if (!user_mode(regs)) {
 91			no_context(regs, addr);
 92			return;
 93		}
 94		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 95		return;
 96	}
 97	BUG();
 98}
 99
100static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101{
102	/*
103	 * Something tried to access memory that isn't in our memory map.
104	 * Fix it, but check if it's kernel or user first.
105	 */
 
106	/* User mode accesses just cause a SIGSEGV */
107	if (user_mode(regs)) {
108		do_trap(regs, SIGSEGV, code, addr);
109		return;
110	}
111
112	no_context(regs, addr);
113}
114
115static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
116{
117	pgd_t *pgd, *pgd_k;
118	pud_t *pud, *pud_k;
119	pmd_t *pmd, *pmd_k;
120	pte_t *pte_k;
121	int offset;
122
123	/* User mode accesses just cause a SIGSEGV */
124	if (user_mode(regs)) {
125		do_trap(regs, SIGSEGV, code, addr);
126		return;
127	}
128
129	/*
130	 * Synchronize this task's top level page-table
131	 * with the 'reference' page table.
132	 *
133	 * Do _not_ use "tsk" here. We might be inside
134	 * an interrupt in the middle of a task switch..
135	 */
136	offset = pgd_index(addr);
137
138	pgd = get_pgd() + offset;
139	pgd_k = init_mm.pgd + offset;
140
141	if (!pgd_present(*pgd_k)) {
142		no_context(regs, addr);
143		return;
144	}
145	set_pgd(pgd, *pgd_k);
146
147	pud = (pud_t *)pgd;
148	pud_k = (pud_t *)pgd_k;
149	if (!pud_present(*pud_k)) {
150		no_context(regs, addr);
151		return;
152	}
153
154	pmd = pmd_offset(pud, addr);
155	pmd_k = pmd_offset(pud_k, addr);
156	if (!pmd_present(*pmd_k)) {
157		no_context(regs, addr);
158		return;
159	}
160	set_pmd(pmd, *pmd_k);
161
162	pte_k = pte_offset_kernel(pmd_k, addr);
163	if (!pte_present(*pte_k)) {
164		no_context(regs, addr);
165		return;
166	}
167
168	flush_tlb_one(addr);
169}
170
171static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
172{
173	if (is_write(regs)) {
174		if (!(vma->vm_flags & VM_WRITE))
175			return true;
176	} else {
177		if (unlikely(!vma_is_accessible(vma)))
178			return true;
179	}
180	return false;
181}
182
183/*
184 * This routine handles page faults.  It determines the address and the
185 * problem, and then passes it off to one of the appropriate routines.
186 */
187asmlinkage void do_page_fault(struct pt_regs *regs)
188{
189	struct task_struct *tsk;
190	struct vm_area_struct *vma;
191	struct mm_struct *mm;
192	unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
193	unsigned int flags = FAULT_FLAG_DEFAULT;
194	int code = SEGV_MAPERR;
195	vm_fault_t fault;
196
197	tsk = current;
198	mm = tsk->mm;
199
200	csky_cmpxchg_fixup(regs);
201
202	if (kprobe_page_fault(regs, tsk->thread.trap_no))
203		return;
204
205	/*
206	 * Fault-in kernel-space virtual memory on-demand.
207	 * The 'reference' page table is init_mm.pgd.
208	 *
209	 * NOTE! We MUST NOT take any locks for this case. We may
210	 * be in an interrupt or a critical region, and should
211	 * only copy the information from the master page table,
212	 * nothing more.
213	 */
214	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
215		vmalloc_fault(regs, code, addr);
216		return;
217	}
218
219	/* Enable interrupts if they were enabled in the parent context. */
220	if (likely(regs->sr & BIT(6)))
221		local_irq_enable();
222
223	/*
224	 * If we're in an interrupt, have no user context, or are running
225	 * in an atomic region, then we must not take the fault.
226	 */
227	if (unlikely(faulthandler_disabled() || !mm)) {
228		no_context(regs, addr);
229		return;
230	}
231
232	if (user_mode(regs))
233		flags |= FAULT_FLAG_USER;
234
235	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
236
237	if (is_write(regs))
238		flags |= FAULT_FLAG_WRITE;
239retry:
240	vma = lock_mm_and_find_vma(mm, addr, regs);
 
241	if (unlikely(!vma)) {
242		bad_area_nosemaphore(regs, mm, code, addr);
 
 
 
 
 
 
 
 
 
 
243		return;
244	}
245
246	/*
247	 * Ok, we have a good vm_area for this memory access, so
248	 * we can handle it.
249	 */
 
250	code = SEGV_ACCERR;
251
252	if (unlikely(access_error(regs, vma))) {
253		mmap_read_unlock(mm);
254		bad_area_nosemaphore(regs, mm, code, addr);
255		return;
256	}
257
258	/*
259	 * If for any reason at all we could not handle the fault,
260	 * make sure we exit gracefully rather than endlessly redo
261	 * the fault.
262	 */
263	fault = handle_mm_fault(vma, addr, flags, regs);
264
265	/*
266	 * If we need to retry but a fatal signal is pending, handle the
267	 * signal first. We do not need to release the mmap_lock because it
268	 * would already be released in __lock_page_or_retry in mm/filemap.c.
269	 */
270	if (fault_signal_pending(fault, regs)) {
271		if (!user_mode(regs))
272			no_context(regs, addr);
273		return;
274	}
275
276	/* The fault is fully completed (including releasing mmap lock) */
277	if (fault & VM_FAULT_COMPLETED)
278		return;
279
280	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
281		flags |= FAULT_FLAG_TRIED;
282
283		/*
284		 * No need to mmap_read_unlock(mm) as we would
285		 * have already released it in __lock_page_or_retry
286		 * in mm/filemap.c.
287		 */
288		goto retry;
289	}
290
291	mmap_read_unlock(mm);
292
293	if (unlikely(fault & VM_FAULT_ERROR)) {
294		mm_fault_error(regs, addr, fault);
295		return;
296	}
297	return;
298}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3
  4#include <linux/extable.h>
  5#include <linux/kprobes.h>
  6#include <linux/mmu_context.h>
  7#include <linux/perf_event.h>
  8
  9int fixup_exception(struct pt_regs *regs)
 10{
 11	const struct exception_table_entry *fixup;
 12
 13	fixup = search_exception_tables(instruction_pointer(regs));
 14	if (fixup) {
 15		regs->pc = fixup->fixup;
 16
 17		return 1;
 18	}
 19
 20	return 0;
 21}
 22
 23static inline bool is_write(struct pt_regs *regs)
 24{
 25	switch (trap_no(regs)) {
 26	case VEC_TLBINVALIDS:
 27		return true;
 28	case VEC_TLBMODIFIED:
 29		return true;
 30	}
 31
 32	return false;
 33}
 34
 35#ifdef CONFIG_CPU_HAS_LDSTEX
 36static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 37{
 38	return;
 39}
 40#else
 41extern unsigned long csky_cmpxchg_ldw;
 42extern unsigned long csky_cmpxchg_stw;
 43static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
 44{
 45	if (trap_no(regs) != VEC_TLBMODIFIED)
 46		return;
 47
 48	if (instruction_pointer(regs) == csky_cmpxchg_stw)
 49		instruction_pointer_set(regs, csky_cmpxchg_ldw);
 50	return;
 51}
 52#endif
 53
 54static inline void no_context(struct pt_regs *regs, unsigned long addr)
 55{
 56	current->thread.trap_no = trap_no(regs);
 57
 58	/* Are we prepared to handle this kernel fault? */
 59	if (fixup_exception(regs))
 60		return;
 61
 62	/*
 63	 * Oops. The kernel tried to access some bad page. We'll have to
 64	 * terminate things with extreme prejudice.
 65	 */
 66	bust_spinlocks(1);
 67	pr_alert("Unable to handle kernel paging request at virtual "
 68		 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
 69	die(regs, "Oops");
 70	make_task_dead(SIGKILL);
 71}
 72
 73static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
 74{
 75	current->thread.trap_no = trap_no(regs);
 76
 77	if (fault & VM_FAULT_OOM) {
 78		/*
 79		 * We ran out of memory, call the OOM killer, and return the userspace
 80		 * (which will retry the fault, or kill us if we got oom-killed).
 81		 */
 82		if (!user_mode(regs)) {
 83			no_context(regs, addr);
 84			return;
 85		}
 86		pagefault_out_of_memory();
 87		return;
 88	} else if (fault & VM_FAULT_SIGBUS) {
 89		/* Kernel mode? Handle exceptions or die */
 90		if (!user_mode(regs)) {
 91			no_context(regs, addr);
 92			return;
 93		}
 94		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 95		return;
 96	}
 97	BUG();
 98}
 99
100static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
101{
102	/*
103	 * Something tried to access memory that isn't in our memory map.
104	 * Fix it, but check if it's kernel or user first.
105	 */
106	mmap_read_unlock(mm);
107	/* User mode accesses just cause a SIGSEGV */
108	if (user_mode(regs)) {
109		do_trap(regs, SIGSEGV, code, addr);
110		return;
111	}
112
113	no_context(regs, addr);
114}
115
116static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
117{
118	pgd_t *pgd, *pgd_k;
119	pud_t *pud, *pud_k;
120	pmd_t *pmd, *pmd_k;
121	pte_t *pte_k;
122	int offset;
123
124	/* User mode accesses just cause a SIGSEGV */
125	if (user_mode(regs)) {
126		do_trap(regs, SIGSEGV, code, addr);
127		return;
128	}
129
130	/*
131	 * Synchronize this task's top level page-table
132	 * with the 'reference' page table.
133	 *
134	 * Do _not_ use "tsk" here. We might be inside
135	 * an interrupt in the middle of a task switch..
136	 */
137	offset = pgd_index(addr);
138
139	pgd = get_pgd() + offset;
140	pgd_k = init_mm.pgd + offset;
141
142	if (!pgd_present(*pgd_k)) {
143		no_context(regs, addr);
144		return;
145	}
146	set_pgd(pgd, *pgd_k);
147
148	pud = (pud_t *)pgd;
149	pud_k = (pud_t *)pgd_k;
150	if (!pud_present(*pud_k)) {
151		no_context(regs, addr);
152		return;
153	}
154
155	pmd = pmd_offset(pud, addr);
156	pmd_k = pmd_offset(pud_k, addr);
157	if (!pmd_present(*pmd_k)) {
158		no_context(regs, addr);
159		return;
160	}
161	set_pmd(pmd, *pmd_k);
162
163	pte_k = pte_offset_kernel(pmd_k, addr);
164	if (!pte_present(*pte_k)) {
165		no_context(regs, addr);
166		return;
167	}
168
169	flush_tlb_one(addr);
170}
171
172static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
173{
174	if (is_write(regs)) {
175		if (!(vma->vm_flags & VM_WRITE))
176			return true;
177	} else {
178		if (unlikely(!vma_is_accessible(vma)))
179			return true;
180	}
181	return false;
182}
183
184/*
185 * This routine handles page faults.  It determines the address and the
186 * problem, and then passes it off to one of the appropriate routines.
187 */
188asmlinkage void do_page_fault(struct pt_regs *regs)
189{
190	struct task_struct *tsk;
191	struct vm_area_struct *vma;
192	struct mm_struct *mm;
193	unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
194	unsigned int flags = FAULT_FLAG_DEFAULT;
195	int code = SEGV_MAPERR;
196	vm_fault_t fault;
197
198	tsk = current;
199	mm = tsk->mm;
200
201	csky_cmpxchg_fixup(regs);
202
203	if (kprobe_page_fault(regs, tsk->thread.trap_no))
204		return;
205
206	/*
207	 * Fault-in kernel-space virtual memory on-demand.
208	 * The 'reference' page table is init_mm.pgd.
209	 *
210	 * NOTE! We MUST NOT take any locks for this case. We may
211	 * be in an interrupt or a critical region, and should
212	 * only copy the information from the master page table,
213	 * nothing more.
214	 */
215	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216		vmalloc_fault(regs, code, addr);
217		return;
218	}
219
220	/* Enable interrupts if they were enabled in the parent context. */
221	if (likely(regs->sr & BIT(6)))
222		local_irq_enable();
223
224	/*
225	 * If we're in an interrupt, have no user context, or are running
226	 * in an atomic region, then we must not take the fault.
227	 */
228	if (unlikely(faulthandler_disabled() || !mm)) {
229		no_context(regs, addr);
230		return;
231	}
232
233	if (user_mode(regs))
234		flags |= FAULT_FLAG_USER;
235
236	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
237
238	if (is_write(regs))
239		flags |= FAULT_FLAG_WRITE;
240retry:
241	mmap_read_lock(mm);
242	vma = find_vma(mm, addr);
243	if (unlikely(!vma)) {
244		bad_area(regs, mm, code, addr);
245		return;
246	}
247	if (likely(vma->vm_start <= addr))
248		goto good_area;
249	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250		bad_area(regs, mm, code, addr);
251		return;
252	}
253	if (unlikely(expand_stack(vma, addr))) {
254		bad_area(regs, mm, code, addr);
255		return;
256	}
257
258	/*
259	 * Ok, we have a good vm_area for this memory access, so
260	 * we can handle it.
261	 */
262good_area:
263	code = SEGV_ACCERR;
264
265	if (unlikely(access_error(regs, vma))) {
266		bad_area(regs, mm, code, addr);
 
267		return;
268	}
269
270	/*
271	 * If for any reason at all we could not handle the fault,
272	 * make sure we exit gracefully rather than endlessly redo
273	 * the fault.
274	 */
275	fault = handle_mm_fault(vma, addr, flags, regs);
276
277	/*
278	 * If we need to retry but a fatal signal is pending, handle the
279	 * signal first. We do not need to release the mmap_lock because it
280	 * would already be released in __lock_page_or_retry in mm/filemap.c.
281	 */
282	if (fault_signal_pending(fault, regs)) {
283		if (!user_mode(regs))
284			no_context(regs, addr);
285		return;
286	}
287
288	/* The fault is fully completed (including releasing mmap lock) */
289	if (fault & VM_FAULT_COMPLETED)
290		return;
291
292	if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
293		flags |= FAULT_FLAG_TRIED;
294
295		/*
296		 * No need to mmap_read_unlock(mm) as we would
297		 * have already released it in __lock_page_or_retry
298		 * in mm/filemap.c.
299		 */
300		goto retry;
301	}
302
303	mmap_read_unlock(mm);
304
305	if (unlikely(fault & VM_FAULT_ERROR)) {
306		mm_fault_error(regs, addr, fault);
307		return;
308	}
309	return;
310}