Linux Audio

Check our new training course

Loading...
v6.8
 1/*
 2 * TLB miss handler for SH with an MMU.
 3 *
 4 *  Copyright (C) 1999  Niibe Yutaka
 5 *  Copyright (C) 2003 - 2012  Paul Mundt
 6 *
 7 * This file is subject to the terms and conditions of the GNU General Public
 8 * License.  See the file "COPYING" in the main directory of this archive
 9 * for more details.
10 */
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <asm/mmu_context.h>
16#include <asm/thread_info.h>
 
17
18/*
19 * Called with interrupts disabled.
20 */
21asmlinkage int __kprobes
22handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
23	       unsigned long address)
24{
25	pgd_t *pgd;
26	p4d_t *p4d;
27	pud_t *pud;
28	pmd_t *pmd;
29	pte_t *pte;
30	pte_t entry;
31
32	/*
33	 * We don't take page faults for P1, P2, and parts of P4, these
34	 * are always mapped, whether it be due to legacy behaviour in
35	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
36	 */
37	if (address >= P3SEG && address < P3_ADDR_MAX) {
38		pgd = pgd_offset_k(address);
39	} else {
40		if (unlikely(address >= TASK_SIZE || !current->mm))
41			return 1;
42
43		pgd = pgd_offset(current->mm, address);
44	}
45
46	p4d = p4d_offset(pgd, address);
47	if (p4d_none_or_clear_bad(p4d))
48		return 1;
49	pud = pud_offset(p4d, address);
50	if (pud_none_or_clear_bad(pud))
51		return 1;
52	pmd = pmd_offset(pud, address);
53	if (pmd_none_or_clear_bad(pmd))
54		return 1;
55	pte = pte_offset_kernel(pmd, address);
56	entry = *pte;
57	if (unlikely(pte_none(entry) || pte_not_present(entry)))
58		return 1;
59	if (unlikely(error_code && !pte_write(entry)))
60		return 1;
61
62	if (error_code)
63		entry = pte_mkdirty(entry);
64	entry = pte_mkyoung(entry);
65
66	set_pte(pte, entry);
67
68#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
69	/*
70	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
71	 * the case of an initial page write exception, so we need to
72	 * flush it in order to avoid potential TLB entry duplication.
73	 */
74	if (error_code == FAULT_CODE_INITIAL)
75		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
76#endif
77
78	set_thread_fault_code(error_code);
79	update_mmu_cache(NULL, address, pte);
80
81	return 0;
82}
v6.13.7
 1/*
 2 * TLB miss handler for SH with an MMU.
 3 *
 4 *  Copyright (C) 1999  Niibe Yutaka
 5 *  Copyright (C) 2003 - 2012  Paul Mundt
 6 *
 7 * This file is subject to the terms and conditions of the GNU General Public
 8 * License.  See the file "COPYING" in the main directory of this archive
 9 * for more details.
10 */
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/kprobes.h>
14#include <linux/kdebug.h>
15#include <asm/mmu_context.h>
16#include <asm/thread_info.h>
17#include <asm/tlb.h>
18
19/*
20 * Called with interrupts disabled.
21 */
22asmlinkage int __kprobes
23handle_tlbmiss(struct pt_regs *regs, unsigned long error_code,
24	       unsigned long address)
25{
26	pgd_t *pgd;
27	p4d_t *p4d;
28	pud_t *pud;
29	pmd_t *pmd;
30	pte_t *pte;
31	pte_t entry;
32
33	/*
34	 * We don't take page faults for P1, P2, and parts of P4, these
35	 * are always mapped, whether it be due to legacy behaviour in
36	 * 29-bit mode, or due to PMB configuration in 32-bit mode.
37	 */
38	if (address >= P3SEG && address < P3_ADDR_MAX) {
39		pgd = pgd_offset_k(address);
40	} else {
41		if (unlikely(address >= TASK_SIZE || !current->mm))
42			return 1;
43
44		pgd = pgd_offset(current->mm, address);
45	}
46
47	p4d = p4d_offset(pgd, address);
48	if (p4d_none_or_clear_bad(p4d))
49		return 1;
50	pud = pud_offset(p4d, address);
51	if (pud_none_or_clear_bad(pud))
52		return 1;
53	pmd = pmd_offset(pud, address);
54	if (pmd_none_or_clear_bad(pmd))
55		return 1;
56	pte = pte_offset_kernel(pmd, address);
57	entry = *pte;
58	if (unlikely(pte_none(entry) || pte_not_present(entry)))
59		return 1;
60	if (unlikely(error_code && !pte_write(entry)))
61		return 1;
62
63	if (error_code)
64		entry = pte_mkdirty(entry);
65	entry = pte_mkyoung(entry);
66
67	set_pte(pte, entry);
68
69#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
70	/*
71	 * SH-4 does not set MMUCR.RC to the corresponding TLB entry in
72	 * the case of an initial page write exception, so we need to
73	 * flush it in order to avoid potential TLB entry duplication.
74	 */
75	if (error_code == FAULT_CODE_INITIAL)
76		local_flush_tlb_one(get_asid(), address & PAGE_MASK);
77#endif
78
79	set_thread_fault_code(error_code);
80	update_mmu_cache(NULL, address, pte);
81
82	return 0;
83}