Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 *
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/mm_types.h>
16#include <linux/preempt.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
22#include <asm-generic/mm_hooks.h>
23
24void __check_vmalloc_seq(struct mm_struct *mm);
25
26#ifdef CONFIG_MMU
27static inline void check_vmalloc_seq(struct mm_struct *mm)
28{
29 if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
30 unlikely(atomic_read(&mm->context.vmalloc_seq) !=
31 atomic_read(&init_mm.context.vmalloc_seq)))
32 __check_vmalloc_seq(mm);
33}
34#endif
35
36#ifdef CONFIG_CPU_HAS_ASID
37
38void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
39
40#define init_new_context init_new_context
41static inline int
42init_new_context(struct task_struct *tsk, struct mm_struct *mm)
43{
44 atomic64_set(&mm->context.id, 0);
45 return 0;
46}
47
48#ifdef CONFIG_ARM_ERRATA_798181
49void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
50 cpumask_t *mask);
51#else /* !CONFIG_ARM_ERRATA_798181 */
52static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
53 cpumask_t *mask)
54{
55}
56#endif /* CONFIG_ARM_ERRATA_798181 */
57
58#else /* !CONFIG_CPU_HAS_ASID */
59
60#ifdef CONFIG_MMU
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 check_vmalloc_seq(mm);
66
67 if (irqs_disabled())
68 /*
69 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
70 * high interrupt latencies, defer the call and continue
71 * running with the old mm. Since we only support UP systems
72 * on non-ASID CPUs, the old mm will remain valid until the
73 * finish_arch_post_lock_switch() call.
74 */
75 mm->context.switch_pending = 1;
76 else
77 cpu_switch_mm(mm->pgd, mm);
78}
79
80#ifndef MODULE
81#define finish_arch_post_lock_switch \
82 finish_arch_post_lock_switch
83static inline void finish_arch_post_lock_switch(void)
84{
85 struct mm_struct *mm = current->mm;
86
87 if (mm && mm->context.switch_pending) {
88 /*
89 * Preemption must be disabled during cpu_switch_mm() as we
90 * have some stateful cache flush implementations. Check
91 * switch_pending again in case we were preempted and the
92 * switch to this mm was already done.
93 */
94 preempt_disable();
95 if (mm->context.switch_pending) {
96 mm->context.switch_pending = 0;
97 cpu_switch_mm(mm->pgd, mm);
98 }
99 preempt_enable_no_resched();
100 }
101}
102#endif /* !MODULE */
103
104#endif /* CONFIG_MMU */
105
106#endif /* CONFIG_CPU_HAS_ASID */
107
108#define activate_mm(prev,next) switch_mm(prev, next, NULL)
109
110/*
111 * This is the actual mm switch as far as the scheduler
112 * is concerned. No registers are touched. We avoid
113 * calling the CPU specific function when the mm hasn't
114 * actually changed.
115 */
116static inline void
117switch_mm(struct mm_struct *prev, struct mm_struct *next,
118 struct task_struct *tsk)
119{
120#ifdef CONFIG_MMU
121 unsigned int cpu = smp_processor_id();
122
123 /*
124 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
125 * so check for possible thread migration and invalidate the I-cache
126 * if we're new to this CPU.
127 */
128 if (cache_ops_need_broadcast() &&
129 !cpumask_empty(mm_cpumask(next)) &&
130 !cpumask_test_cpu(cpu, mm_cpumask(next)))
131 __flush_icache_all();
132
133 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
134 check_and_switch_context(next, tsk);
135 if (cache_is_vivt())
136 cpumask_clear_cpu(cpu, mm_cpumask(prev));
137 }
138#endif
139}
140
141#ifdef CONFIG_VMAP_STACK
142static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
143{
144 if (mm != &init_mm)
145 check_vmalloc_seq(mm);
146}
147#define enter_lazy_tlb enter_lazy_tlb
148#endif
149
150#include <asm-generic/mmu_context.h>
151
152#endif
1/*
2 * arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Changelog:
11 * 27-06-1996 RMK Created
12 */
13#ifndef __ASM_ARM_MMU_CONTEXT_H
14#define __ASM_ARM_MMU_CONTEXT_H
15
16#include <linux/compiler.h>
17#include <linux/sched.h>
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21
22void __check_kvm_seq(struct mm_struct *mm);
23
24#ifdef CONFIG_CPU_HAS_ASID
25
26/*
27 * On ARMv6, we have the following structure in the Context ID:
28 *
29 * 31 7 0
30 * +-------------------------+-----------+
31 * | process ID | ASID |
32 * +-------------------------+-----------+
33 * | context ID |
34 * +-------------------------------------+
35 *
36 * The ASID is used to tag entries in the CPU caches and TLBs.
37 * The context ID is used by debuggers and trace logic, and
38 * should be unique within all running processes.
39 */
40#define ASID_BITS 8
41#define ASID_MASK ((~0) << ASID_BITS)
42#define ASID_FIRST_VERSION (1 << ASID_BITS)
43
44extern unsigned int cpu_last_asid;
45#ifdef CONFIG_SMP
46DECLARE_PER_CPU(struct mm_struct *, current_mm);
47#endif
48
49void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
50void __new_context(struct mm_struct *mm);
51
52static inline void check_context(struct mm_struct *mm)
53{
54 /*
55 * This code is executed with interrupts enabled. Therefore,
56 * mm->context.id cannot be updated to the latest ASID version
57 * on a different CPU (and condition below not triggered)
58 * without first getting an IPI to reset the context. The
59 * alternative is to take a read_lock on mm->context.id_lock
60 * (after changing its type to rwlock_t).
61 */
62 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
63 __new_context(mm);
64
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67}
68
69#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
70
71#else
72
73static inline void check_context(struct mm_struct *mm)
74{
75#ifdef CONFIG_MMU
76 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
77 __check_kvm_seq(mm);
78#endif
79}
80
81#define init_new_context(tsk,mm) 0
82
83#endif
84
85#define destroy_context(mm) do { } while(0)
86
87/*
88 * This is called when "tsk" is about to enter lazy TLB mode.
89 *
90 * mm: describes the currently active mm context
91 * tsk: task which is entering lazy tlb
92 * cpu: cpu number which is entering lazy tlb
93 *
94 * tsk->mm will be NULL
95 */
96static inline void
97enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
98{
99}
100
101/*
102 * This is the actual mm switch as far as the scheduler
103 * is concerned. No registers are touched. We avoid
104 * calling the CPU specific function when the mm hasn't
105 * actually changed.
106 */
107static inline void
108switch_mm(struct mm_struct *prev, struct mm_struct *next,
109 struct task_struct *tsk)
110{
111#ifdef CONFIG_MMU
112 unsigned int cpu = smp_processor_id();
113
114#ifdef CONFIG_SMP
115 /* check for possible thread migration */
116 if (!cpumask_empty(mm_cpumask(next)) &&
117 !cpumask_test_cpu(cpu, mm_cpumask(next)))
118 __flush_icache_all();
119#endif
120 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
121#ifdef CONFIG_SMP
122 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
123 *crt_mm = next;
124#endif
125 check_context(next);
126 cpu_switch_mm(next->pgd, next);
127 if (cache_is_vivt())
128 cpumask_clear_cpu(cpu, mm_cpumask(prev));
129 }
130#endif
131}
132
133#define deactivate_mm(tsk,mm) do { } while (0)
134#define activate_mm(prev,next) switch_mm(prev, next, NULL)
135
136/*
137 * We are inserting a "fake" vma for the user-accessible vector page so
138 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139 * But we also want to remove it before the generic code gets to see it
140 * during process exit or the unmapping of it would cause total havoc.
141 * (the macro is used as remove_vma() is static to mm/mmap.c)
142 */
143#define arch_exit_mmap(mm) \
144do { \
145 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146 if (high_vma) { \
147 BUG_ON(high_vma->vm_next); /* it should be last */ \
148 if (high_vma->vm_prev) \
149 high_vma->vm_prev->vm_next = NULL; \
150 else \
151 mm->mmap = NULL; \
152 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153 mm->mmap_cache = NULL; \
154 mm->map_count--; \
155 remove_vma(high_vma); \
156 } \
157} while (0)
158
159static inline void arch_dup_mmap(struct mm_struct *oldmm,
160 struct mm_struct *mm)
161{
162}
163
164#endif