Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/mmu_context.h
4 *
5 * Copyright (C) 1996 Russell King.
6 *
7 * Changelog:
8 * 27-06-1996 RMK Created
9 */
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/mm_types.h>
16#include <linux/preempt.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
22#include <asm-generic/mm_hooks.h>
23
24void __check_vmalloc_seq(struct mm_struct *mm);
25
26#ifdef CONFIG_CPU_HAS_ASID
27
28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29static inline int
30init_new_context(struct task_struct *tsk, struct mm_struct *mm)
31{
32 atomic64_set(&mm->context.id, 0);
33 return 0;
34}
35
36#ifdef CONFIG_ARM_ERRATA_798181
37void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
38 cpumask_t *mask);
39#else /* !CONFIG_ARM_ERRATA_798181 */
40static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
41 cpumask_t *mask)
42{
43}
44#endif /* CONFIG_ARM_ERRATA_798181 */
45
46#else /* !CONFIG_CPU_HAS_ASID */
47
48#ifdef CONFIG_MMU
49
50static inline void check_and_switch_context(struct mm_struct *mm,
51 struct task_struct *tsk)
52{
53 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
54 __check_vmalloc_seq(mm);
55
56 if (irqs_disabled())
57 /*
58 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
59 * high interrupt latencies, defer the call and continue
60 * running with the old mm. Since we only support UP systems
61 * on non-ASID CPUs, the old mm will remain valid until the
62 * finish_arch_post_lock_switch() call.
63 */
64 mm->context.switch_pending = 1;
65 else
66 cpu_switch_mm(mm->pgd, mm);
67}
68
69#ifndef MODULE
70#define finish_arch_post_lock_switch \
71 finish_arch_post_lock_switch
72static inline void finish_arch_post_lock_switch(void)
73{
74 struct mm_struct *mm = current->mm;
75
76 if (mm && mm->context.switch_pending) {
77 /*
78 * Preemption must be disabled during cpu_switch_mm() as we
79 * have some stateful cache flush implementations. Check
80 * switch_pending again in case we were preempted and the
81 * switch to this mm was already done.
82 */
83 preempt_disable();
84 if (mm->context.switch_pending) {
85 mm->context.switch_pending = 0;
86 cpu_switch_mm(mm->pgd, mm);
87 }
88 preempt_enable_no_resched();
89 }
90}
91#endif /* !MODULE */
92
93#endif /* CONFIG_MMU */
94
95static inline int
96init_new_context(struct task_struct *tsk, struct mm_struct *mm)
97{
98 return 0;
99}
100
101
102#endif /* CONFIG_CPU_HAS_ASID */
103
104#define destroy_context(mm) do { } while(0)
105#define activate_mm(prev,next) switch_mm(prev, next, NULL)
106
107/*
108 * This is called when "tsk" is about to enter lazy TLB mode.
109 *
110 * mm: describes the currently active mm context
111 * tsk: task which is entering lazy tlb
112 * cpu: cpu number which is entering lazy tlb
113 *
114 * tsk->mm will be NULL
115 */
116static inline void
117enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
118{
119}
120
121/*
122 * This is the actual mm switch as far as the scheduler
123 * is concerned. No registers are touched. We avoid
124 * calling the CPU specific function when the mm hasn't
125 * actually changed.
126 */
127static inline void
128switch_mm(struct mm_struct *prev, struct mm_struct *next,
129 struct task_struct *tsk)
130{
131#ifdef CONFIG_MMU
132 unsigned int cpu = smp_processor_id();
133
134 /*
135 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
136 * so check for possible thread migration and invalidate the I-cache
137 * if we're new to this CPU.
138 */
139 if (cache_ops_need_broadcast() &&
140 !cpumask_empty(mm_cpumask(next)) &&
141 !cpumask_test_cpu(cpu, mm_cpumask(next)))
142 __flush_icache_all();
143
144 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
145 check_and_switch_context(next, tsk);
146 if (cache_is_vivt())
147 cpumask_clear_cpu(cpu, mm_cpumask(prev));
148 }
149#endif
150}
151
152#define deactivate_mm(tsk,mm) do { } while (0)
153
154#endif
1/*
2 * arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Changelog:
11 * 27-06-1996 RMK Created
12 */
13#ifndef __ASM_ARM_MMU_CONTEXT_H
14#define __ASM_ARM_MMU_CONTEXT_H
15
16#include <linux/compiler.h>
17#include <linux/sched.h>
18#include <linux/preempt.h>
19#include <asm/cacheflush.h>
20#include <asm/cachetype.h>
21#include <asm/proc-fns.h>
22#include <asm/smp_plat.h>
23#include <asm-generic/mm_hooks.h>
24
25void __check_vmalloc_seq(struct mm_struct *mm);
26
27#ifdef CONFIG_CPU_HAS_ASID
28
29void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
30static inline int
31init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{
33 atomic64_set(&mm->context.id, 0);
34 return 0;
35}
36
37#ifdef CONFIG_ARM_ERRATA_798181
38void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
39 cpumask_t *mask);
40#else /* !CONFIG_ARM_ERRATA_798181 */
41static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
42 cpumask_t *mask)
43{
44}
45#endif /* CONFIG_ARM_ERRATA_798181 */
46
47#else /* !CONFIG_CPU_HAS_ASID */
48
49#ifdef CONFIG_MMU
50
51static inline void check_and_switch_context(struct mm_struct *mm,
52 struct task_struct *tsk)
53{
54 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
55 __check_vmalloc_seq(mm);
56
57 if (irqs_disabled())
58 /*
59 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
60 * high interrupt latencies, defer the call and continue
61 * running with the old mm. Since we only support UP systems
62 * on non-ASID CPUs, the old mm will remain valid until the
63 * finish_arch_post_lock_switch() call.
64 */
65 mm->context.switch_pending = 1;
66 else
67 cpu_switch_mm(mm->pgd, mm);
68}
69
70#ifndef MODULE
71#define finish_arch_post_lock_switch \
72 finish_arch_post_lock_switch
73static inline void finish_arch_post_lock_switch(void)
74{
75 struct mm_struct *mm = current->mm;
76
77 if (mm && mm->context.switch_pending) {
78 /*
79 * Preemption must be disabled during cpu_switch_mm() as we
80 * have some stateful cache flush implementations. Check
81 * switch_pending again in case we were preempted and the
82 * switch to this mm was already done.
83 */
84 preempt_disable();
85 if (mm->context.switch_pending) {
86 mm->context.switch_pending = 0;
87 cpu_switch_mm(mm->pgd, mm);
88 }
89 preempt_enable_no_resched();
90 }
91}
92#endif /* !MODULE */
93
94#endif /* CONFIG_MMU */
95
96static inline int
97init_new_context(struct task_struct *tsk, struct mm_struct *mm)
98{
99 return 0;
100}
101
102
103#endif /* CONFIG_CPU_HAS_ASID */
104
105#define destroy_context(mm) do { } while(0)
106#define activate_mm(prev,next) switch_mm(prev, next, NULL)
107
108/*
109 * This is called when "tsk" is about to enter lazy TLB mode.
110 *
111 * mm: describes the currently active mm context
112 * tsk: task which is entering lazy tlb
113 * cpu: cpu number which is entering lazy tlb
114 *
115 * tsk->mm will be NULL
116 */
117static inline void
118enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
119{
120}
121
122/*
123 * This is the actual mm switch as far as the scheduler
124 * is concerned. No registers are touched. We avoid
125 * calling the CPU specific function when the mm hasn't
126 * actually changed.
127 */
128static inline void
129switch_mm(struct mm_struct *prev, struct mm_struct *next,
130 struct task_struct *tsk)
131{
132#ifdef CONFIG_MMU
133 unsigned int cpu = smp_processor_id();
134
135 /*
136 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
137 * so check for possible thread migration and invalidate the I-cache
138 * if we're new to this CPU.
139 */
140 if (cache_ops_need_broadcast() &&
141 !cpumask_empty(mm_cpumask(next)) &&
142 !cpumask_test_cpu(cpu, mm_cpumask(next)))
143 __flush_icache_all();
144
145 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
146 check_and_switch_context(next, tsk);
147 if (cache_is_vivt())
148 cpumask_clear_cpu(cpu, mm_cpumask(prev));
149 }
150#endif
151}
152
153#define deactivate_mm(tsk,mm) do { } while (0)
154
155#endif