Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 *
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/mm_types.h>
16#include <linux/preempt.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
22#include <asm-generic/mm_hooks.h>
23
24void __check_vmalloc_seq(struct mm_struct *mm);
25
26#ifdef CONFIG_MMU
27static inline void check_vmalloc_seq(struct mm_struct *mm)
28{
29 if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
30 unlikely(atomic_read(&mm->context.vmalloc_seq) !=
31 atomic_read(&init_mm.context.vmalloc_seq)))
32 __check_vmalloc_seq(mm);
33}
34#endif
35
36#ifdef CONFIG_CPU_HAS_ASID
37
38void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
39
40#define init_new_context init_new_context
41static inline int
42init_new_context(struct task_struct *tsk, struct mm_struct *mm)
43{
44 atomic64_set(&mm->context.id, 0);
45 return 0;
46}
47
48#ifdef CONFIG_ARM_ERRATA_798181
49void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
50 cpumask_t *mask);
51#else /* !CONFIG_ARM_ERRATA_798181 */
52static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
53 cpumask_t *mask)
54{
55}
56#endif /* CONFIG_ARM_ERRATA_798181 */
57
58#else /* !CONFIG_CPU_HAS_ASID */
59
60#ifdef CONFIG_MMU
61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
65 check_vmalloc_seq(mm);
66
67 if (irqs_disabled())
68 /*
69 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
70 * high interrupt latencies, defer the call and continue
71 * running with the old mm. Since we only support UP systems
72 * on non-ASID CPUs, the old mm will remain valid until the
73 * finish_arch_post_lock_switch() call.
74 */
75 mm->context.switch_pending = 1;
76 else
77 cpu_switch_mm(mm->pgd, mm);
78}
79
80#ifndef MODULE
81#define finish_arch_post_lock_switch \
82 finish_arch_post_lock_switch
83static inline void finish_arch_post_lock_switch(void)
84{
85 struct mm_struct *mm = current->mm;
86
87 if (mm && mm->context.switch_pending) {
88 /*
89 * Preemption must be disabled during cpu_switch_mm() as we
90 * have some stateful cache flush implementations. Check
91 * switch_pending again in case we were preempted and the
92 * switch to this mm was already done.
93 */
94 preempt_disable();
95 if (mm->context.switch_pending) {
96 mm->context.switch_pending = 0;
97 cpu_switch_mm(mm->pgd, mm);
98 }
99 preempt_enable_no_resched();
100 }
101}
102#endif /* !MODULE */
103
104#endif /* CONFIG_MMU */
105
106#endif /* CONFIG_CPU_HAS_ASID */
107
108#define activate_mm(prev,next) switch_mm(prev, next, NULL)
109
110/*
111 * This is the actual mm switch as far as the scheduler
112 * is concerned. No registers are touched. We avoid
113 * calling the CPU specific function when the mm hasn't
114 * actually changed.
115 */
116static inline void
117switch_mm(struct mm_struct *prev, struct mm_struct *next,
118 struct task_struct *tsk)
119{
120#ifdef CONFIG_MMU
121 unsigned int cpu = smp_processor_id();
122
123 /*
124 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
125 * so check for possible thread migration and invalidate the I-cache
126 * if we're new to this CPU.
127 */
128 if (cache_ops_need_broadcast() &&
129 !cpumask_empty(mm_cpumask(next)) &&
130 !cpumask_test_cpu(cpu, mm_cpumask(next)))
131 __flush_icache_all();
132
133 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
134 check_and_switch_context(next, tsk);
135 if (cache_is_vivt())
136 cpumask_clear_cpu(cpu, mm_cpumask(prev));
137 }
138#endif
139}
140
141#ifdef CONFIG_VMAP_STACK
142static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
143{
144 if (mm != &init_mm)
145 check_vmalloc_seq(mm);
146}
147#define enter_lazy_tlb enter_lazy_tlb
148#endif
149
150#include <asm-generic/mmu_context.h>
151
152#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 *
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/mm_types.h>
16#include <linux/preempt.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
22#include <asm-generic/mm_hooks.h>
23
24void __check_vmalloc_seq(struct mm_struct *mm);
25
26#ifdef CONFIG_CPU_HAS_ASID
27
28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29
30#define init_new_context init_new_context
31static inline int
32init_new_context(struct task_struct *tsk, struct mm_struct *mm)
33{
34 atomic64_set(&mm->context.id, 0);
35 return 0;
36}
37
38#ifdef CONFIG_ARM_ERRATA_798181
39void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
40 cpumask_t *mask);
41#else /* !CONFIG_ARM_ERRATA_798181 */
42static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
43 cpumask_t *mask)
44{
45}
46#endif /* CONFIG_ARM_ERRATA_798181 */
47
48#else /* !CONFIG_CPU_HAS_ASID */
49
50#ifdef CONFIG_MMU
51
52static inline void check_and_switch_context(struct mm_struct *mm,
53 struct task_struct *tsk)
54{
55 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
56 __check_vmalloc_seq(mm);
57
58 if (irqs_disabled())
59 /*
60 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
61 * high interrupt latencies, defer the call and continue
62 * running with the old mm. Since we only support UP systems
63 * on non-ASID CPUs, the old mm will remain valid until the
64 * finish_arch_post_lock_switch() call.
65 */
66 mm->context.switch_pending = 1;
67 else
68 cpu_switch_mm(mm->pgd, mm);
69}
70
71#ifndef MODULE
72#define finish_arch_post_lock_switch \
73 finish_arch_post_lock_switch
74static inline void finish_arch_post_lock_switch(void)
75{
76 struct mm_struct *mm = current->mm;
77
78 if (mm && mm->context.switch_pending) {
79 /*
80 * Preemption must be disabled during cpu_switch_mm() as we
81 * have some stateful cache flush implementations. Check
82 * switch_pending again in case we were preempted and the
83 * switch to this mm was already done.
84 */
85 preempt_disable();
86 if (mm->context.switch_pending) {
87 mm->context.switch_pending = 0;
88 cpu_switch_mm(mm->pgd, mm);
89 }
90 preempt_enable_no_resched();
91 }
92}
93#endif /* !MODULE */
94
95#endif /* CONFIG_MMU */
96
97#endif /* CONFIG_CPU_HAS_ASID */
98
99#define activate_mm(prev,next) switch_mm(prev, next, NULL)
100
101/*
102 * This is the actual mm switch as far as the scheduler
103 * is concerned. No registers are touched. We avoid
104 * calling the CPU specific function when the mm hasn't
105 * actually changed.
106 */
107static inline void
108switch_mm(struct mm_struct *prev, struct mm_struct *next,
109 struct task_struct *tsk)
110{
111#ifdef CONFIG_MMU
112 unsigned int cpu = smp_processor_id();
113
114 /*
115 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
116 * so check for possible thread migration and invalidate the I-cache
117 * if we're new to this CPU.
118 */
119 if (cache_ops_need_broadcast() &&
120 !cpumask_empty(mm_cpumask(next)) &&
121 !cpumask_test_cpu(cpu, mm_cpumask(next)))
122 __flush_icache_all();
123
124 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
125 check_and_switch_context(next, tsk);
126 if (cache_is_vivt())
127 cpumask_clear_cpu(cpu, mm_cpumask(prev));
128 }
129#endif
130}
131
132#include <asm-generic/mmu_context.h>
133
134#endif