Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
  4 *  Lennox Wu <lennox.wu@sunplusct.com>
  5 *  Chen Liqin <liqin.chen@sunplusct.com>
  6 * Copyright (C) 2012 Regents of the University of California
  7 */
  8
  9
 10#include <linux/mm.h>
 11#include <linux/kernel.h>
 12#include <linux/interrupt.h>
 13#include <linux/perf_event.h>
 14#include <linux/signal.h>
 15#include <linux/uaccess.h>
 
 
 
 16
 17#include <asm/ptrace.h>
 18#include <asm/tlbflush.h>
 19
 20#include "../kernel/head.h"
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22/*
 23 * This routine handles page faults.  It determines the address and the
 24 * problem, and then passes it off to one of the appropriate routines.
 25 */
 26asmlinkage void do_page_fault(struct pt_regs *regs)
 27{
 28	struct task_struct *tsk;
 29	struct vm_area_struct *vma;
 30	struct mm_struct *mm;
 31	unsigned long addr, cause;
 32	unsigned int flags = FAULT_FLAG_DEFAULT;
 33	int code = SEGV_MAPERR;
 34	vm_fault_t fault;
 35
 36	cause = regs->cause;
 37	addr = regs->badaddr;
 38
 39	tsk = current;
 40	mm = tsk->mm;
 41
 
 
 
 42	/*
 43	 * Fault-in kernel-space virtual memory on-demand.
 44	 * The 'reference' page table is init_mm.pgd.
 45	 *
 46	 * NOTE! We MUST NOT take any locks for this case. We may
 47	 * be in an interrupt or a critical region, and should
 48	 * only copy the information from the master page table,
 49	 * nothing more.
 50	 */
 51	if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END)))
 52		goto vmalloc_fault;
 
 
 
 53
 54	/* Enable interrupts if they were enabled in the parent context. */
 55	if (likely(regs->status & SR_PIE))
 56		local_irq_enable();
 57
 58	/*
 59	 * If we're in an interrupt, have no user context, or are running
 60	 * in an atomic region, then we must not take the fault.
 61	 */
 62	if (unlikely(faulthandler_disabled() || !mm))
 63		goto no_context;
 
 
 
 64
 65	if (user_mode(regs))
 66		flags |= FAULT_FLAG_USER;
 67
 
 
 
 
 
 
 
 68	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70retry:
 71	mmap_read_lock(mm);
 72	vma = find_vma(mm, addr);
 73	if (unlikely(!vma))
 74		goto bad_area;
 75	if (likely(vma->vm_start <= addr))
 76		goto good_area;
 77	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
 78		goto bad_area;
 79	if (unlikely(expand_stack(vma, addr)))
 80		goto bad_area;
 81
 82	/*
 83	 * Ok, we have a good vm_area for this memory access, so
 84	 * we can handle it.
 85	 */
 86good_area:
 87	code = SEGV_ACCERR;
 88
 89	switch (cause) {
 90	case EXC_INST_PAGE_FAULT:
 91		if (!(vma->vm_flags & VM_EXEC))
 92			goto bad_area;
 93		break;
 94	case EXC_LOAD_PAGE_FAULT:
 95		if (!(vma->vm_flags & VM_READ))
 96			goto bad_area;
 97		break;
 98	case EXC_STORE_PAGE_FAULT:
 99		if (!(vma->vm_flags & VM_WRITE))
100			goto bad_area;
101		flags |= FAULT_FLAG_WRITE;
102		break;
103	default:
104		panic("%s: unhandled cause %lu", __func__, cause);
105	}
106
107	/*
108	 * If for any reason at all we could not handle the fault,
109	 * make sure we exit gracefully rather than endlessly redo
110	 * the fault.
111	 */
112	fault = handle_mm_fault(vma, addr, flags, regs);
113
114	/*
115	 * If we need to retry but a fatal signal is pending, handle the
116	 * signal first. We do not need to release the mmap_lock because it
117	 * would already be released in __lock_page_or_retry in mm/filemap.c.
118	 */
119	if (fault_signal_pending(fault, regs))
 
 
120		return;
121
122	if (unlikely(fault & VM_FAULT_ERROR)) {
123		if (fault & VM_FAULT_OOM)
124			goto out_of_memory;
125		else if (fault & VM_FAULT_SIGBUS)
126			goto do_sigbus;
127		BUG();
128	}
129
130	if (flags & FAULT_FLAG_ALLOW_RETRY) {
131		if (fault & VM_FAULT_RETRY) {
132			flags |= FAULT_FLAG_TRIED;
133
134			/*
135			 * No need to mmap_read_unlock(mm) as we would
136			 * have already released it in __lock_page_or_retry
137			 * in mm/filemap.c.
138			 */
139			goto retry;
140		}
141	}
142
143	mmap_read_unlock(mm);
144	return;
145
146	/*
147	 * Something tried to access memory that isn't in our memory map.
148	 * Fix it, but check if it's kernel or user first.
149	 */
150bad_area:
151	mmap_read_unlock(mm);
152	/* User mode accesses just cause a SIGSEGV */
153	if (user_mode(regs)) {
154		do_trap(regs, SIGSEGV, code, addr);
155		return;
156	}
157
158no_context:
159	/* Are we prepared to handle this kernel fault? */
160	if (fixup_exception(regs))
161		return;
162
163	/*
164	 * Oops. The kernel tried to access some bad page. We'll have to
165	 * terminate things with extreme prejudice.
166	 */
167	bust_spinlocks(1);
168	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
169		(addr < PAGE_SIZE) ? "NULL pointer dereference" :
170		"paging request", addr);
171	die(regs, "Oops");
172	do_exit(SIGKILL);
173
174	/*
175	 * We ran out of memory, call the OOM killer, and return the userspace
176	 * (which will retry the fault, or kill us if we got oom-killed).
177	 */
178out_of_memory:
179	mmap_read_unlock(mm);
180	if (!user_mode(regs))
181		goto no_context;
182	pagefault_out_of_memory();
183	return;
184
185do_sigbus:
186	mmap_read_unlock(mm);
187	/* Kernel mode? Handle exceptions or die */
188	if (!user_mode(regs))
189		goto no_context;
190	do_trap(regs, SIGBUS, BUS_ADRERR, addr);
191	return;
192
193vmalloc_fault:
194	{
195		pgd_t *pgd, *pgd_k;
196		pud_t *pud, *pud_k;
197		p4d_t *p4d, *p4d_k;
198		pmd_t *pmd, *pmd_k;
199		pte_t *pte_k;
200		int index;
201
202		/* User mode accesses just cause a SIGSEGV */
203		if (user_mode(regs))
204			return do_trap(regs, SIGSEGV, code, addr);
205
206		/*
207		 * Synchronize this task's top level page-table
208		 * with the 'reference' page table.
209		 *
210		 * Do _not_ use "tsk->active_mm->pgd" here.
211		 * We might be inside an interrupt in the middle
212		 * of a task switch.
213		 */
214		index = pgd_index(addr);
215		pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
216		pgd_k = init_mm.pgd + index;
217
218		if (!pgd_present(*pgd_k))
219			goto no_context;
220		set_pgd(pgd, *pgd_k);
221
222		p4d = p4d_offset(pgd, addr);
223		p4d_k = p4d_offset(pgd_k, addr);
224		if (!p4d_present(*p4d_k))
225			goto no_context;
226
227		pud = pud_offset(p4d, addr);
228		pud_k = pud_offset(p4d_k, addr);
229		if (!pud_present(*pud_k))
230			goto no_context;
231
232		/*
233		 * Since the vmalloc area is global, it is unnecessary
234		 * to copy individual PTEs
 
235		 */
236		pmd = pmd_offset(pud, addr);
237		pmd_k = pmd_offset(pud_k, addr);
238		if (!pmd_present(*pmd_k))
239			goto no_context;
240		set_pmd(pmd, *pmd_k);
241
242		/*
243		 * Make sure the actual PTE exists as well to
244		 * catch kernel vmalloc-area accesses to non-mapped
245		 * addresses. If we don't do this, this will just
246		 * silently loop forever.
247		 */
248		pte_k = pte_offset_kernel(pmd_k, addr);
249		if (!pte_present(*pte_k))
250			goto no_context;
251
252		/*
253		 * The kernel assumes that TLBs don't cache invalid
254		 * entries, but in RISC-V, SFENCE.VMA specifies an
255		 * ordering constraint, not a cache flush; it is
256		 * necessary even after writing invalid entries.
257		 */
258		local_flush_tlb_page(addr);
259
 
 
 
 
260		return;
261	}
 
262}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
  4 *  Lennox Wu <lennox.wu@sunplusct.com>
  5 *  Chen Liqin <liqin.chen@sunplusct.com>
  6 * Copyright (C) 2012 Regents of the University of California
  7 */
  8
  9
 10#include <linux/mm.h>
 11#include <linux/kernel.h>
 12#include <linux/interrupt.h>
 13#include <linux/perf_event.h>
 14#include <linux/signal.h>
 15#include <linux/uaccess.h>
 16#include <linux/kprobes.h>
 17#include <linux/kfence.h>
 18#include <linux/entry-common.h>
 19
 20#include <asm/ptrace.h>
 21#include <asm/tlbflush.h>
 22
 23#include "../kernel/head.h"
 24
 25static void die_kernel_fault(const char *msg, unsigned long addr,
 26		struct pt_regs *regs)
 27{
 28	bust_spinlocks(1);
 29
 30	pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n", msg,
 31		addr);
 32
 33	bust_spinlocks(0);
 34	die(regs, "Oops");
 35	make_task_dead(SIGKILL);
 36}
 37
 38static inline void no_context(struct pt_regs *regs, unsigned long addr)
 39{
 40	const char *msg;
 41
 42	/* Are we prepared to handle this kernel fault? */
 43	if (fixup_exception(regs))
 44		return;
 45
 46	/*
 47	 * Oops. The kernel tried to access some bad page. We'll have to
 48	 * terminate things with extreme prejudice.
 49	 */
 50	if (addr < PAGE_SIZE)
 51		msg = "NULL pointer dereference";
 52	else {
 53		if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs))
 54			return;
 55
 56		msg = "paging request";
 57	}
 58
 59	die_kernel_fault(msg, addr, regs);
 60}
 61
 62static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
 63{
 64	if (fault & VM_FAULT_OOM) {
 65		/*
 66		 * We ran out of memory, call the OOM killer, and return the userspace
 67		 * (which will retry the fault, or kill us if we got oom-killed).
 68		 */
 69		if (!user_mode(regs)) {
 70			no_context(regs, addr);
 71			return;
 72		}
 73		pagefault_out_of_memory();
 74		return;
 75	} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
 76		/* Kernel mode? Handle exceptions or die */
 77		if (!user_mode(regs)) {
 78			no_context(regs, addr);
 79			return;
 80		}
 81		do_trap(regs, SIGBUS, BUS_ADRERR, addr);
 82		return;
 83	}
 84	BUG();
 85}
 86
 87static inline void
 88bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr)
 89{
 90	/*
 91	 * Something tried to access memory that isn't in our memory map.
 92	 * Fix it, but check if it's kernel or user first.
 93	 */
 94	/* User mode accesses just cause a SIGSEGV */
 95	if (user_mode(regs)) {
 96		do_trap(regs, SIGSEGV, code, addr);
 97		return;
 98	}
 99
100	no_context(regs, addr);
101}
102
103static inline void
104bad_area(struct pt_regs *regs, struct mm_struct *mm, int code,
105	 unsigned long addr)
106{
107	mmap_read_unlock(mm);
108
109	bad_area_nosemaphore(regs, code, addr);
110}
111
112static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
113{
114	pgd_t *pgd, *pgd_k;
115	pud_t *pud_k;
116	p4d_t *p4d_k;
117	pmd_t *pmd_k;
118	pte_t *pte_k;
119	int index;
120	unsigned long pfn;
121
122	/* User mode accesses just cause a SIGSEGV */
123	if (user_mode(regs))
124		return do_trap(regs, SIGSEGV, code, addr);
125
126	/*
127	 * Synchronize this task's top level page-table
128	 * with the 'reference' page table.
129	 *
130	 * Do _not_ use "tsk->active_mm->pgd" here.
131	 * We might be inside an interrupt in the middle
132	 * of a task switch.
133	 */
134	index = pgd_index(addr);
135	pfn = csr_read(CSR_SATP) & SATP_PPN;
136	pgd = (pgd_t *)pfn_to_virt(pfn) + index;
137	pgd_k = init_mm.pgd + index;
138
139	if (!pgd_present(pgdp_get(pgd_k))) {
140		no_context(regs, addr);
141		return;
142	}
143	set_pgd(pgd, pgdp_get(pgd_k));
144
145	p4d_k = p4d_offset(pgd_k, addr);
146	if (!p4d_present(p4dp_get(p4d_k))) {
147		no_context(regs, addr);
148		return;
149	}
150
151	pud_k = pud_offset(p4d_k, addr);
152	if (!pud_present(pudp_get(pud_k))) {
153		no_context(regs, addr);
154		return;
155	}
156	if (pud_leaf(pudp_get(pud_k)))
157		goto flush_tlb;
158
159	/*
160	 * Since the vmalloc area is global, it is unnecessary
161	 * to copy individual PTEs
162	 */
163	pmd_k = pmd_offset(pud_k, addr);
164	if (!pmd_present(pmdp_get(pmd_k))) {
165		no_context(regs, addr);
166		return;
167	}
168	if (pmd_leaf(pmdp_get(pmd_k)))
169		goto flush_tlb;
170
171	/*
172	 * Make sure the actual PTE exists as well to
173	 * catch kernel vmalloc-area accesses to non-mapped
174	 * addresses. If we don't do this, this will just
175	 * silently loop forever.
176	 */
177	pte_k = pte_offset_kernel(pmd_k, addr);
178	if (!pte_present(ptep_get(pte_k))) {
179		no_context(regs, addr);
180		return;
181	}
182
183	/*
184	 * The kernel assumes that TLBs don't cache invalid
185	 * entries, but in RISC-V, SFENCE.VMA specifies an
186	 * ordering constraint, not a cache flush; it is
187	 * necessary even after writing invalid entries.
188	 */
189flush_tlb:
190	local_flush_tlb_page(addr);
191}
192
193static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
194{
195	switch (cause) {
196	case EXC_INST_PAGE_FAULT:
197		if (!(vma->vm_flags & VM_EXEC)) {
198			return true;
199		}
200		break;
201	case EXC_LOAD_PAGE_FAULT:
202		/* Write implies read */
203		if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
204			return true;
205		}
206		break;
207	case EXC_STORE_PAGE_FAULT:
208		if (!(vma->vm_flags & VM_WRITE)) {
209			return true;
210		}
211		break;
212	default:
213		panic("%s: unhandled cause %lu", __func__, cause);
214	}
215	return false;
216}
217
218/*
219 * This routine handles page faults.  It determines the address and the
220 * problem, and then passes it off to one of the appropriate routines.
221 */
222void handle_page_fault(struct pt_regs *regs)
223{
224	struct task_struct *tsk;
225	struct vm_area_struct *vma;
226	struct mm_struct *mm;
227	unsigned long addr, cause;
228	unsigned int flags = FAULT_FLAG_DEFAULT;
229	int code = SEGV_MAPERR;
230	vm_fault_t fault;
231
232	cause = regs->cause;
233	addr = regs->badaddr;
234
235	tsk = current;
236	mm = tsk->mm;
237
238	if (kprobe_page_fault(regs, cause))
239		return;
240
241	/*
242	 * Fault-in kernel-space virtual memory on-demand.
243	 * The 'reference' page table is init_mm.pgd.
244	 *
245	 * NOTE! We MUST NOT take any locks for this case. We may
246	 * be in an interrupt or a critical region, and should
247	 * only copy the information from the master page table,
248	 * nothing more.
249	 */
250	if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) &&
251	    unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) {
252		vmalloc_fault(regs, code, addr);
253		return;
254	}
255
256	/* Enable interrupts if they were enabled in the parent context. */
257	if (!regs_irqs_disabled(regs))
258		local_irq_enable();
259
260	/*
261	 * If we're in an interrupt, have no user context, or are running
262	 * in an atomic region, then we must not take the fault.
263	 */
264	if (unlikely(faulthandler_disabled() || !mm)) {
265		tsk->thread.bad_cause = cause;
266		no_context(regs, addr);
267		return;
268	}
269
270	if (user_mode(regs))
271		flags |= FAULT_FLAG_USER;
272
273	if (!user_mode(regs) && addr < TASK_SIZE && unlikely(!(regs->status & SR_SUM))) {
274		if (fixup_exception(regs))
275			return;
276
277		die_kernel_fault("access to user memory without uaccess routines", addr, regs);
278	}
279
280	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
281
282	if (cause == EXC_STORE_PAGE_FAULT)
283		flags |= FAULT_FLAG_WRITE;
284	else if (cause == EXC_INST_PAGE_FAULT)
285		flags |= FAULT_FLAG_INSTRUCTION;
286	if (!(flags & FAULT_FLAG_USER))
287		goto lock_mmap;
288
289	vma = lock_vma_under_rcu(mm, addr);
290	if (!vma)
291		goto lock_mmap;
292
293	if (unlikely(access_error(cause, vma))) {
294		vma_end_read(vma);
295		goto lock_mmap;
296	}
297
298	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
299	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
300		vma_end_read(vma);
301
302	if (!(fault & VM_FAULT_RETRY)) {
303		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
304		goto done;
305	}
306	count_vm_vma_lock_event(VMA_LOCK_RETRY);
307	if (fault & VM_FAULT_MAJOR)
308		flags |= FAULT_FLAG_TRIED;
309
310	if (fault_signal_pending(fault, regs)) {
311		if (!user_mode(regs))
312			no_context(regs, addr);
313		return;
314	}
315lock_mmap:
316
317retry:
318	vma = lock_mm_and_find_vma(mm, addr, regs);
319	if (unlikely(!vma)) {
320		tsk->thread.bad_cause = cause;
321		bad_area_nosemaphore(regs, code, addr);
322		return;
323	}
 
 
 
 
324
325	/*
326	 * Ok, we have a good vm_area for this memory access, so
327	 * we can handle it.
328	 */
 
329	code = SEGV_ACCERR;
330
331	if (unlikely(access_error(cause, vma))) {
332		tsk->thread.bad_cause = cause;
333		bad_area(regs, mm, code, addr);
334		return;
 
 
 
 
 
 
 
 
 
 
 
 
335	}
336
337	/*
338	 * If for any reason at all we could not handle the fault,
339	 * make sure we exit gracefully rather than endlessly redo
340	 * the fault.
341	 */
342	fault = handle_mm_fault(vma, addr, flags, regs);
343
344	/*
345	 * If we need to retry but a fatal signal is pending, handle the
346	 * signal first. We do not need to release the mmap_lock because it
347	 * would already be released in __lock_page_or_retry in mm/filemap.c.
348	 */
349	if (fault_signal_pending(fault, regs)) {
350		if (!user_mode(regs))
351			no_context(regs, addr);
352		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353	}
354
355	/* The fault is fully completed (including releasing mmap lock) */
356	if (fault & VM_FAULT_COMPLETED)
 
 
 
 
 
 
 
 
 
 
357		return;
 
358
359	if (unlikely(fault & VM_FAULT_RETRY)) {
360		flags |= FAULT_FLAG_TRIED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
362		/*
363		 * No need to mmap_read_unlock(mm) as we would
364		 * have already released it in __lock_page_or_retry
365		 * in mm/filemap.c.
366		 */
367		goto retry;
368	}
 
 
 
 
 
 
 
 
 
 
 
 
 
369
370	mmap_read_unlock(mm);
 
 
 
 
 
 
371
372done:
373	if (unlikely(fault & VM_FAULT_ERROR)) {
374		tsk->thread.bad_cause = cause;
375		mm_fault_error(regs, addr, fault);
376		return;
377	}
378	return;
379}