Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Page Fault Handling for ARC (TLB Miss / ProtV)
  3 *
  4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  5 */
  6
  7#include <linux/signal.h>
  8#include <linux/interrupt.h>
  9#include <linux/sched/signal.h>
 10#include <linux/errno.h>
 11#include <linux/ptrace.h>
 12#include <linux/uaccess.h>
 13#include <linux/kdebug.h>
 14#include <linux/perf_event.h>
 15#include <linux/mm_types.h>
 16#include <asm/mmu.h>
 17
 18/*
 19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
 20 * Refer to asm/processor.h for System Memory Map
 21 *
 22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
 23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
 24 */
 25noinline static int handle_kernel_vaddr_fault(unsigned long address)
 26{
 27	/*
 28	 * Synchronize this task's top level page-table
 29	 * with the 'reference' page table.
 30	 */
 31	pgd_t *pgd, *pgd_k;
 32	p4d_t *p4d, *p4d_k;
 33	pud_t *pud, *pud_k;
 34	pmd_t *pmd, *pmd_k;
 35
 36	pgd = pgd_offset_fast(current->active_mm, address);
 37	pgd_k = pgd_offset_k(address);
 38
 39	if (!pgd_present(*pgd_k))
 40		goto bad_area;
 41
 42	p4d = p4d_offset(pgd, address);
 43	p4d_k = p4d_offset(pgd_k, address);
 44	if (!p4d_present(*p4d_k))
 45		goto bad_area;
 46
 47	pud = pud_offset(p4d, address);
 48	pud_k = pud_offset(p4d_k, address);
 49	if (!pud_present(*pud_k))
 50		goto bad_area;
 51
 52	pmd = pmd_offset(pud, address);
 53	pmd_k = pmd_offset(pud_k, address);
 54	if (!pmd_present(*pmd_k))
 55		goto bad_area;
 56
 57	set_pmd(pmd, *pmd_k);
 58
 59	/* XXX: create the TLB entry here */
 60	return 0;
 61
 62bad_area:
 63	return 1;
 64}
 65
 66void do_page_fault(unsigned long address, struct pt_regs *regs)
 67{
 68	struct vm_area_struct *vma = NULL;
 69	struct task_struct *tsk = current;
 70	struct mm_struct *mm = tsk->mm;
 71	int sig, si_code = SEGV_MAPERR;
 72	unsigned int write = 0, exec = 0, mask;
 73	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
 74	unsigned int flags;			/* handle_mm_fault() input */
 75
 76	/*
 
 
 
 77	 * NOTE! We MUST NOT take any locks for this case. We may
 78	 * be in an interrupt or a critical region, and should
 79	 * only copy the information from the master page table,
 80	 * nothing more.
 81	 */
 82	if (address >= VMALLOC_START && !user_mode(regs)) {
 83		if (unlikely(handle_kernel_vaddr_fault(address)))
 84			goto no_context;
 
 85		else
 86			return;
 87	}
 88
 
 
 89	/*
 90	 * If we're in an interrupt or have no user
 91	 * context, we must not take the fault..
 92	 */
 93	if (faulthandler_disabled() || !mm)
 94		goto no_context;
 95
 96	if (regs->ecr_cause & ECR_C_PROTV_STORE)	/* ST/EX */
 97		write = 1;
 98	else if ((regs->ecr_vec == ECR_V_PROTV) &&
 99	         (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
100		exec = 1;
101
102	flags = FAULT_FLAG_DEFAULT;
103	if (user_mode(regs))
104		flags |= FAULT_FLAG_USER;
105	if (write)
106		flags |= FAULT_FLAG_WRITE;
107
108	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
109retry:
110	mmap_read_lock(mm);
111
112	vma = find_vma(mm, address);
113	if (!vma)
114		goto bad_area;
115	if (unlikely(address < vma->vm_start)) {
116		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
117			goto bad_area;
118	}
 
 
119
120	/*
121	 * vm_area is good, now check permissions for this memory access
 
122	 */
123	mask = VM_READ;
124	if (write)
125		mask = VM_WRITE;
126	if (exec)
127		mask = VM_EXEC;
128
129	if (!(vma->vm_flags & mask)) {
130		si_code = SEGV_ACCERR;
131		goto bad_area;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132	}
133
134	fault = handle_mm_fault(vma, address, flags, regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
135
136	/* Quick path to respond to signals */
137	if (fault_signal_pending(fault, regs)) {
138		if (!user_mode(regs))
139			goto no_context;
140		return;
141	}
142
 
 
 
 
 
 
 
 
 
143	/*
144	 * Fault retry nuances, mmap_lock already relinquished by core mm
 
145	 */
146	if (unlikely((fault & VM_FAULT_RETRY) &&
147		     (flags & FAULT_FLAG_ALLOW_RETRY))) {
148		flags |= FAULT_FLAG_TRIED;
149		goto retry;
150	}
151
152bad_area:
153	mmap_read_unlock(mm);
154
155	/*
156	 * Major/minor page fault accounting
157	 * (in case of retry we only land here once)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158	 */
159	if (likely(!(fault & VM_FAULT_ERROR)))
160		/* Normal return path: fault Handled Gracefully */
161		return;
162
163	if (!user_mode(regs))
164		goto no_context;
165
166	if (fault & VM_FAULT_OOM) {
 
 
 
167		pagefault_out_of_memory();
168		return;
169	}
170
171	if (fault & VM_FAULT_SIGBUS) {
172		sig = SIGBUS;
173		si_code = BUS_ADRERR;
174	}
175	else {
176		sig = SIGSEGV;
177	}
178
179	tsk->thread.fault_address = address;
180	force_sig_fault(sig, si_code, (void __user *)address);
181	return;
182
183no_context:
184	if (fixup_exception(regs))
185		return;
186
187	die("Oops", regs, address);
 
 
 
 
 
188}
v3.15
 
  1/* Page Fault Handling for ARC (TLB Miss / ProtV)
  2 *
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4 *
  5 * This program is free software; you can redistribute it and/or modify
  6 * it under the terms of the GNU General Public License version 2 as
  7 * published by the Free Software Foundation.
  8 */
  9
 10#include <linux/signal.h>
 11#include <linux/interrupt.h>
 12#include <linux/sched.h>
 13#include <linux/errno.h>
 14#include <linux/ptrace.h>
 15#include <linux/uaccess.h>
 16#include <linux/kdebug.h>
 17#include <asm/pgalloc.h>
 
 18#include <asm/mmu.h>
 19
 20static int handle_vmalloc_fault(unsigned long address)
 
 
 
 
 
 
 
 21{
 22	/*
 23	 * Synchronize this task's top level page-table
 24	 * with the 'reference' page table.
 25	 */
 26	pgd_t *pgd, *pgd_k;
 
 27	pud_t *pud, *pud_k;
 28	pmd_t *pmd, *pmd_k;
 29
 30	pgd = pgd_offset_fast(current->active_mm, address);
 31	pgd_k = pgd_offset_k(address);
 32
 33	if (!pgd_present(*pgd_k))
 34		goto bad_area;
 35
 36	pud = pud_offset(pgd, address);
 37	pud_k = pud_offset(pgd_k, address);
 
 
 
 
 
 38	if (!pud_present(*pud_k))
 39		goto bad_area;
 40
 41	pmd = pmd_offset(pud, address);
 42	pmd_k = pmd_offset(pud_k, address);
 43	if (!pmd_present(*pmd_k))
 44		goto bad_area;
 45
 46	set_pmd(pmd, *pmd_k);
 47
 48	/* XXX: create the TLB entry here */
 49	return 0;
 50
 51bad_area:
 52	return 1;
 53}
 54
 55void do_page_fault(unsigned long address, struct pt_regs *regs)
 56{
 57	struct vm_area_struct *vma = NULL;
 58	struct task_struct *tsk = current;
 59	struct mm_struct *mm = tsk->mm;
 60	siginfo_t info;
 61	int fault, ret;
 62	int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
 63	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 64
 65	/*
 66	 * We fault-in kernel-space virtual memory on-demand. The
 67	 * 'reference' page table is init_mm.pgd.
 68	 *
 69	 * NOTE! We MUST NOT take any locks for this case. We may
 70	 * be in an interrupt or a critical region, and should
 71	 * only copy the information from the master page table,
 72	 * nothing more.
 73	 */
 74	if (address >= VMALLOC_START && address <= VMALLOC_END) {
 75		ret = handle_vmalloc_fault(address);
 76		if (unlikely(ret))
 77			goto bad_area_nosemaphore;
 78		else
 79			return;
 80	}
 81
 82	info.si_code = SEGV_MAPERR;
 83
 84	/*
 85	 * If we're in an interrupt or have no user
 86	 * context, we must not take the fault..
 87	 */
 88	if (in_atomic() || !mm)
 89		goto no_context;
 90
 
 
 
 
 
 
 
 91	if (user_mode(regs))
 92		flags |= FAULT_FLAG_USER;
 
 
 
 
 93retry:
 94	down_read(&mm->mmap_sem);
 
 95	vma = find_vma(mm, address);
 96	if (!vma)
 97		goto bad_area;
 98	if (vma->vm_start <= address)
 99		goto good_area;
100	if (!(vma->vm_flags & VM_GROWSDOWN))
101		goto bad_area;
102	if (expand_stack(vma, address))
103		goto bad_area;
104
105	/*
106	 * Ok, we have a good vm_area for this memory access, so
107	 * we can handle it..
108	 */
109good_area:
110	info.si_code = SEGV_ACCERR;
111
112	/* Handle protection violation, execute on heap or stack */
 
113
114	if ((regs->ecr_vec == ECR_V_PROTV) &&
115	    (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
116		goto bad_area;
117
118	if (write) {
119		if (!(vma->vm_flags & VM_WRITE))
120			goto bad_area;
121		flags |= FAULT_FLAG_WRITE;
122	} else {
123		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
124			goto bad_area;
125	}
126
127	/*
128	 * If for any reason at all we couldn't handle the fault,
129	 * make sure we exit gracefully rather than endlessly redo
130	 * the fault.
131	 */
132	fault = handle_mm_fault(mm, vma, address, flags);
133
134	/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
135	if (unlikely(fatal_signal_pending(current))) {
136		if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
137			up_read(&mm->mmap_sem);
138		if (user_mode(regs))
139			return;
140	}
141
142	if (likely(!(fault & VM_FAULT_ERROR))) {
143		if (flags & FAULT_FLAG_ALLOW_RETRY) {
144			/* To avoid updating stats twice for retry case */
145			if (fault & VM_FAULT_MAJOR)
146				tsk->maj_flt++;
147			else
148				tsk->min_flt++;
149
150			if (fault & VM_FAULT_RETRY) {
151				flags &= ~FAULT_FLAG_ALLOW_RETRY;
152				flags |= FAULT_FLAG_TRIED;
153				goto retry;
154			}
155		}
156
157		/* Fault Handled Gracefully */
158		up_read(&mm->mmap_sem);
 
 
159		return;
160	}
161
162	/* TBD: switch to pagefault_out_of_memory() */
163	if (fault & VM_FAULT_OOM)
164		goto out_of_memory;
165	else if (fault & VM_FAULT_SIGBUS)
166		goto do_sigbus;
167
168	/* no man's land */
169	BUG();
170
171	/*
172	 * Something tried to access memory that isn't in our memory map..
173	 * Fix it, but check if it's kernel or user first..
174	 */
 
 
 
 
 
 
175bad_area:
176	up_read(&mm->mmap_sem);
177
178bad_area_nosemaphore:
179	/* User mode accesses just cause a SIGSEGV */
180	if (user_mode(regs)) {
181		tsk->thread.fault_address = address;
182		info.si_signo = SIGSEGV;
183		info.si_errno = 0;
184		/* info.si_code has been set above */
185		info.si_addr = (void __user *)address;
186		force_sig_info(SIGSEGV, &info, tsk);
187		return;
188	}
189
190no_context:
191	/* Are we prepared to handle this kernel fault?
192	 *
193	 * (The kernel has valid exception-points in the source
194	 *  when it acesses user-memory. When it fails in one
195	 *  of those points, we find it in a table and do a jump
196	 *  to some fixup code that loads an appropriate error
197	 *  code)
198	 */
199	if (fixup_exception(regs))
 
200		return;
201
202	die("Oops", regs, address);
 
203
204out_of_memory:
205	up_read(&mm->mmap_sem);
206
207	if (user_mode(regs)) {
208		pagefault_out_of_memory();
209		return;
210	}
211
212	goto no_context;
 
 
 
 
 
 
213
214do_sigbus:
215	up_read(&mm->mmap_sem);
 
216
217	if (!user_mode(regs))
218		goto no_context;
 
219
220	tsk->thread.fault_address = address;
221	info.si_signo = SIGBUS;
222	info.si_errno = 0;
223	info.si_code = BUS_ADRERR;
224	info.si_addr = (void __user *)address;
225	force_sig_info(SIGBUS, &info, tsk);
226}