Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3
  4#include <linux/signal.h>
  5#include <linux/module.h>
  6#include <linux/sched.h>
  7#include <linux/interrupt.h>
  8#include <linux/kernel.h>
  9#include <linux/errno.h>
 10#include <linux/string.h>
 11#include <linux/types.h>
 12#include <linux/ptrace.h>
 13#include <linux/mman.h>
 14#include <linux/mm.h>
 15#include <linux/smp.h>
 16#include <linux/version.h>
 17#include <linux/vt_kern.h>
 18#include <linux/extable.h>
 19#include <linux/uaccess.h>
 20#include <linux/perf_event.h>
 21
 22#include <asm/hardirq.h>
 23#include <asm/mmu_context.h>
 24#include <asm/traps.h>
 25#include <asm/page.h>
 26
 27int fixup_exception(struct pt_regs *regs)
 28{
 29	const struct exception_table_entry *fixup;
 30
 31	fixup = search_exception_tables(instruction_pointer(regs));
 32	if (fixup) {
 33		regs->pc = fixup->nextinsn;
 34
 35		return 1;
 36	}
 37
 38	return 0;
 39}
 40
 41/*
 42 * This routine handles page faults. It determines the address,
 43 * and the problem, and then passes it off to one of the appropriate
 44 * routines.
 45 */
 46asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
 47			      unsigned long mmu_meh)
 48{
 49	struct vm_area_struct *vma = NULL;
 50	struct task_struct *tsk = current;
 51	struct mm_struct *mm = tsk->mm;
 52	int si_code;
 53	int fault;
 54	unsigned long address = mmu_meh & PAGE_MASK;
 55
 56	si_code = SEGV_MAPERR;
 57
 58#ifndef CONFIG_CPU_HAS_TLBI
 59	/*
 60	 * We fault-in kernel-space virtual memory on-demand. The
 61	 * 'reference' page table is init_mm.pgd.
 62	 *
 63	 * NOTE! We MUST NOT take any locks for this case. We may
 64	 * be in an interrupt or a critical region, and should
 65	 * only copy the information from the master page table,
 66	 * nothing more.
 67	 */
 68	if (unlikely(address >= VMALLOC_START) &&
 69	    unlikely(address <= VMALLOC_END)) {
 70		/*
 71		 * Synchronize this task's top level page-table
 72		 * with the 'reference' page table.
 73		 *
 74		 * Do _not_ use "tsk" here. We might be inside
 75		 * an interrupt in the middle of a task switch..
 76		 */
 77		int offset = __pgd_offset(address);
 78		pgd_t *pgd, *pgd_k;
 79		pud_t *pud, *pud_k;
 80		pmd_t *pmd, *pmd_k;
 81		pte_t *pte_k;
 82
 83		unsigned long pgd_base;
 84
 85		pgd_base = (unsigned long)__va(get_pgd());
 86		pgd = (pgd_t *)pgd_base + offset;
 87		pgd_k = init_mm.pgd + offset;
 88
 89		if (!pgd_present(*pgd_k))
 90			goto no_context;
 91		set_pgd(pgd, *pgd_k);
 92
 93		pud = (pud_t *)pgd;
 94		pud_k = (pud_t *)pgd_k;
 95		if (!pud_present(*pud_k))
 96			goto no_context;
 97
 98		pmd = pmd_offset(pud, address);
 99		pmd_k = pmd_offset(pud_k, address);
100		if (!pmd_present(*pmd_k))
101			goto no_context;
102		set_pmd(pmd, *pmd_k);
103
104		pte_k = pte_offset_kernel(pmd_k, address);
105		if (!pte_present(*pte_k))
106			goto no_context;
107		return;
108	}
109#endif
110
111	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
112	/*
113	 * If we're in an interrupt or have no user
114	 * context, we must not take the fault..
115	 */
116	if (in_atomic() || !mm)
117		goto bad_area_nosemaphore;
118
119	down_read(&mm->mmap_sem);
120	vma = find_vma(mm, address);
121	if (!vma)
122		goto bad_area;
123	if (vma->vm_start <= address)
124		goto good_area;
125	if (!(vma->vm_flags & VM_GROWSDOWN))
126		goto bad_area;
127	if (expand_stack(vma, address))
128		goto bad_area;
129	/*
130	 * Ok, we have a good vm_area for this memory access, so
131	 * we can handle it..
132	 */
133good_area:
134	si_code = SEGV_ACCERR;
135
136	if (write) {
137		if (!(vma->vm_flags & VM_WRITE))
138			goto bad_area;
139	} else {
140		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
141			goto bad_area;
142	}
143
144	/*
145	 * If for any reason at all we couldn't handle the fault,
146	 * make sure we exit gracefully rather than endlessly redo
147	 * the fault.
148	 */
149	fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0);
150	if (unlikely(fault & VM_FAULT_ERROR)) {
151		if (fault & VM_FAULT_OOM)
152			goto out_of_memory;
153		else if (fault & VM_FAULT_SIGBUS)
154			goto do_sigbus;
155		else if (fault & VM_FAULT_SIGSEGV)
156			goto bad_area;
157		BUG();
158	}
159	if (fault & VM_FAULT_MAJOR) {
160		tsk->maj_flt++;
161		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
162			      address);
163	} else {
164		tsk->min_flt++;
165		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
166			      address);
167	}
168
169	up_read(&mm->mmap_sem);
170	return;
171
172	/*
173	 * Something tried to access memory that isn't in our memory map..
174	 * Fix it, but check if it's kernel or user first..
175	 */
176bad_area:
177	up_read(&mm->mmap_sem);
178
179bad_area_nosemaphore:
180	/* User mode accesses just cause a SIGSEGV */
181	if (user_mode(regs)) {
182		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
183		return;
184	}
185
186no_context:
187	/* Are we prepared to handle this kernel fault? */
188	if (fixup_exception(regs))
189		return;
190
191	/*
192	 * Oops. The kernel tried to access some bad page. We'll have to
193	 * terminate things with extreme prejudice.
194	 */
195	bust_spinlocks(1);
196	pr_alert("Unable to handle kernel paging request at virtual "
197		 "address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
198	die_if_kernel("Oops", regs, write);
199
200out_of_memory:
201	/*
202	 * We ran out of memory, call the OOM killer, and return the userspace
203	 * (which will retry the fault, or kill us if we got oom-killed).
204	 */
205	pagefault_out_of_memory();
206	return;
207
208do_sigbus:
209	up_read(&mm->mmap_sem);
210
211	/* Kernel mode? Handle exceptions or die */
212	if (!user_mode(regs))
213		goto no_context;
214
215	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
216}