Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Page Fault Handling for ARC (TLB Miss / ProtV)
  3 *
  4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5 */
  6
  7#include <linux/signal.h>
  8#include <linux/interrupt.h>
  9#include <linux/sched/signal.h>
 10#include <linux/errno.h>
 11#include <linux/ptrace.h>
 12#include <linux/uaccess.h>
 13#include <linux/kdebug.h>
 14#include <linux/perf_event.h>
 15#include <linux/mm_types.h>
 
 16#include <asm/mmu.h>
 17
 18/*
 19 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
 20 * Refer to asm/processor.h for System Memory Map
 21 *
 22 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
 23 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
 24 */
 25noinline static int handle_kernel_vaddr_fault(unsigned long address)
 26{
 27	/*
 28	 * Synchronize this task's top level page-table
 29	 * with the 'reference' page table.
 30	 */
 31	pgd_t *pgd, *pgd_k;
 32	p4d_t *p4d, *p4d_k;
 33	pud_t *pud, *pud_k;
 34	pmd_t *pmd, *pmd_k;
 35
 36	pgd = pgd_offset(current->active_mm, address);
 37	pgd_k = pgd_offset_k(address);
 38
 39	if (pgd_none (*pgd_k))
 40		goto bad_area;
 41	if (!pgd_present(*pgd))
 42		set_pgd(pgd, *pgd_k);
 43
 44	p4d = p4d_offset(pgd, address);
 45	p4d_k = p4d_offset(pgd_k, address);
 46	if (p4d_none(*p4d_k))
 47		goto bad_area;
 48	if (!p4d_present(*p4d))
 49		set_p4d(p4d, *p4d_k);
 50
 51	pud = pud_offset(p4d, address);
 52	pud_k = pud_offset(p4d_k, address);
 53	if (pud_none(*pud_k))
 54		goto bad_area;
 55	if (!pud_present(*pud))
 56		set_pud(pud, *pud_k);
 57
 58	pmd = pmd_offset(pud, address);
 59	pmd_k = pmd_offset(pud_k, address);
 60	if (pmd_none(*pmd_k))
 61		goto bad_area;
 62	if (!pmd_present(*pmd))
 63		set_pmd(pmd, *pmd_k);
 64
 65	/* XXX: create the TLB entry here */
 66	return 0;
 67
 68bad_area:
 69	return 1;
 70}
 71
 72void do_page_fault(unsigned long address, struct pt_regs *regs)
 73{
 74	struct vm_area_struct *vma = NULL;
 75	struct task_struct *tsk = current;
 76	struct mm_struct *mm = tsk->mm;
 77	int sig, si_code = SEGV_MAPERR;
 78	unsigned int write = 0, exec = 0, mask;
 79	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
 80	unsigned int flags;			/* handle_mm_fault() input */
 81
 82	/*
 83	 * NOTE! We MUST NOT take any locks for this case. We may
 84	 * be in an interrupt or a critical region, and should
 85	 * only copy the information from the master page table,
 86	 * nothing more.
 87	 */
 88	if (address >= VMALLOC_START && !user_mode(regs)) {
 89		if (unlikely(handle_kernel_vaddr_fault(address)))
 90			goto no_context;
 91		else
 92			return;
 93	}
 94
 95	/*
 96	 * If we're in an interrupt or have no user
 97	 * context, we must not take the fault..
 98	 */
 99	if (faulthandler_disabled() || !mm)
100		goto no_context;
101
102	if (regs->ecr_cause & ECR_C_PROTV_STORE)	/* ST/EX */
103		write = 1;
104	else if ((regs->ecr_vec == ECR_V_PROTV) &&
105	         (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
106		exec = 1;
107
108	flags = FAULT_FLAG_DEFAULT;
109	if (user_mode(regs))
110		flags |= FAULT_FLAG_USER;
111	if (write)
112		flags |= FAULT_FLAG_WRITE;
113
114	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
115retry:
116	mmap_read_lock(mm);
117
118	vma = find_vma(mm, address);
119	if (!vma)
120		goto bad_area;
121	if (unlikely(address < vma->vm_start)) {
122		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
123			goto bad_area;
124	}
125
126	/*
127	 * vm_area is good, now check permissions for this memory access
128	 */
129	mask = VM_READ;
130	if (write)
131		mask = VM_WRITE;
132	if (exec)
133		mask = VM_EXEC;
134
135	if (!(vma->vm_flags & mask)) {
136		si_code = SEGV_ACCERR;
137		goto bad_area;
138	}
139
140	fault = handle_mm_fault(vma, address, flags, regs);
141
142	/* Quick path to respond to signals */
143	if (fault_signal_pending(fault, regs)) {
144		if (!user_mode(regs))
145			goto no_context;
146		return;
147	}
148
149	/* The fault is fully completed (including releasing mmap lock) */
150	if (fault & VM_FAULT_COMPLETED)
151		return;
152
153	/*
154	 * Fault retry nuances, mmap_lock already relinquished by core mm
155	 */
156	if (unlikely(fault & VM_FAULT_RETRY)) {
157		flags |= FAULT_FLAG_TRIED;
158		goto retry;
159	}
160
161bad_area:
162	mmap_read_unlock(mm);
163
 
164	/*
165	 * Major/minor page fault accounting
166	 * (in case of retry we only land here once)
167	 */
168	if (likely(!(fault & VM_FAULT_ERROR)))
169		/* Normal return path: fault Handled Gracefully */
170		return;
171
172	if (!user_mode(regs))
173		goto no_context;
174
175	if (fault & VM_FAULT_OOM) {
176		pagefault_out_of_memory();
177		return;
178	}
179
180	if (fault & VM_FAULT_SIGBUS) {
181		sig = SIGBUS;
182		si_code = BUS_ADRERR;
183	}
184	else {
185		sig = SIGSEGV;
186	}
187
188	tsk->thread.fault_address = address;
189	force_sig_fault(sig, si_code, (void __user *)address);
190	return;
191
192no_context:
193	if (fixup_exception(regs))
194		return;
195
196	die("Oops", regs, address);
197}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Page Fault Handling for ARC (TLB Miss / ProtV)
  3 *
  4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5 */
  6
  7#include <linux/signal.h>
  8#include <linux/interrupt.h>
  9#include <linux/sched/signal.h>
 10#include <linux/errno.h>
 11#include <linux/ptrace.h>
 12#include <linux/uaccess.h>
 13#include <linux/kdebug.h>
 14#include <linux/perf_event.h>
 15#include <linux/mm_types.h>
 16#include <asm/entry.h>
 17#include <asm/mmu.h>
 18
 19/*
 20 * kernel virtual address is required to implement vmalloc/pkmap/fixmap
 21 * Refer to asm/processor.h for System Memory Map
 22 *
 23 * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
 24 * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
 25 */
 26noinline static int handle_kernel_vaddr_fault(unsigned long address)
 27{
 28	/*
 29	 * Synchronize this task's top level page-table
 30	 * with the 'reference' page table.
 31	 */
 32	pgd_t *pgd, *pgd_k;
 33	p4d_t *p4d, *p4d_k;
 34	pud_t *pud, *pud_k;
 35	pmd_t *pmd, *pmd_k;
 36
 37	pgd = pgd_offset(current->active_mm, address);
 38	pgd_k = pgd_offset_k(address);
 39
 40	if (pgd_none (*pgd_k))
 41		goto bad_area;
 42	if (!pgd_present(*pgd))
 43		set_pgd(pgd, *pgd_k);
 44
 45	p4d = p4d_offset(pgd, address);
 46	p4d_k = p4d_offset(pgd_k, address);
 47	if (p4d_none(*p4d_k))
 48		goto bad_area;
 49	if (!p4d_present(*p4d))
 50		set_p4d(p4d, *p4d_k);
 51
 52	pud = pud_offset(p4d, address);
 53	pud_k = pud_offset(p4d_k, address);
 54	if (pud_none(*pud_k))
 55		goto bad_area;
 56	if (!pud_present(*pud))
 57		set_pud(pud, *pud_k);
 58
 59	pmd = pmd_offset(pud, address);
 60	pmd_k = pmd_offset(pud_k, address);
 61	if (pmd_none(*pmd_k))
 62		goto bad_area;
 63	if (!pmd_present(*pmd))
 64		set_pmd(pmd, *pmd_k);
 65
 66	/* XXX: create the TLB entry here */
 67	return 0;
 68
 69bad_area:
 70	return 1;
 71}
 72
 73void do_page_fault(unsigned long address, struct pt_regs *regs)
 74{
 75	struct vm_area_struct *vma = NULL;
 76	struct task_struct *tsk = current;
 77	struct mm_struct *mm = tsk->mm;
 78	int sig, si_code = SEGV_MAPERR;
 79	unsigned int write = 0, exec = 0, mask;
 80	vm_fault_t fault = VM_FAULT_SIGSEGV;	/* handle_mm_fault() output */
 81	unsigned int flags;			/* handle_mm_fault() input */
 82
 83	/*
 84	 * NOTE! We MUST NOT take any locks for this case. We may
 85	 * be in an interrupt or a critical region, and should
 86	 * only copy the information from the master page table,
 87	 * nothing more.
 88	 */
 89	if (address >= VMALLOC_START && !user_mode(regs)) {
 90		if (unlikely(handle_kernel_vaddr_fault(address)))
 91			goto no_context;
 92		else
 93			return;
 94	}
 95
 96	/*
 97	 * If we're in an interrupt or have no user
 98	 * context, we must not take the fault..
 99	 */
100	if (faulthandler_disabled() || !mm)
101		goto no_context;
102
103	if (regs->ecr.cause & ECR_C_PROTV_STORE)	/* ST/EX */
104		write = 1;
105	else if ((regs->ecr.vec == ECR_V_PROTV) &&
106	         (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
107		exec = 1;
108
109	flags = FAULT_FLAG_DEFAULT;
110	if (user_mode(regs))
111		flags |= FAULT_FLAG_USER;
112	if (write)
113		flags |= FAULT_FLAG_WRITE;
114
115	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
116retry:
117	vma = lock_mm_and_find_vma(mm, address, regs);
 
 
118	if (!vma)
119		goto bad_area_nosemaphore;
 
 
 
 
120
121	/*
122	 * vm_area is good, now check permissions for this memory access
123	 */
124	mask = VM_READ;
125	if (write)
126		mask = VM_WRITE;
127	if (exec)
128		mask = VM_EXEC;
129
130	if (!(vma->vm_flags & mask)) {
131		si_code = SEGV_ACCERR;
132		goto bad_area;
133	}
134
135	fault = handle_mm_fault(vma, address, flags, regs);
136
137	/* Quick path to respond to signals */
138	if (fault_signal_pending(fault, regs)) {
139		if (!user_mode(regs))
140			goto no_context;
141		return;
142	}
143
144	/* The fault is fully completed (including releasing mmap lock) */
145	if (fault & VM_FAULT_COMPLETED)
146		return;
147
148	/*
149	 * Fault retry nuances, mmap_lock already relinquished by core mm
150	 */
151	if (unlikely(fault & VM_FAULT_RETRY)) {
152		flags |= FAULT_FLAG_TRIED;
153		goto retry;
154	}
155
156bad_area:
157	mmap_read_unlock(mm);
158
159bad_area_nosemaphore:
160	/*
161	 * Major/minor page fault accounting
162	 * (in case of retry we only land here once)
163	 */
164	if (likely(!(fault & VM_FAULT_ERROR)))
165		/* Normal return path: fault Handled Gracefully */
166		return;
167
168	if (!user_mode(regs))
169		goto no_context;
170
171	if (fault & VM_FAULT_OOM) {
172		pagefault_out_of_memory();
173		return;
174	}
175
176	if (fault & VM_FAULT_SIGBUS) {
177		sig = SIGBUS;
178		si_code = BUS_ADRERR;
179	}
180	else {
181		sig = SIGSEGV;
182	}
183
184	tsk->thread.fault_address = address;
185	force_sig_fault(sig, si_code, (void __user *)address);
186	return;
187
188no_context:
189	if (fixup_exception(regs))
190		return;
191
192	die("Oops", regs, address);
193}