Loading...
1/*
2 * include/asm-s390/mmu_context.h
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/mmu_context.h"
7 */
8
9#ifndef __S390_MMU_CONTEXT_H
10#define __S390_MMU_CONTEXT_H
11
12#include <asm/pgalloc.h>
13#include <asm/uaccess.h>
14#include <asm/tlbflush.h>
15#include <asm/ctl_reg.h>
16
17static inline int init_new_context(struct task_struct *tsk,
18 struct mm_struct *mm)
19{
20 atomic_set(&mm->context.attach_count, 0);
21 mm->context.flush_mm = 0;
22 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
23#ifdef CONFIG_64BIT
24 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
25#endif
26 if (current->mm && current->mm->context.alloc_pgste) {
27 /*
28 * alloc_pgste indicates, that any NEW context will be created
29 * with extended page tables. The old context is unchanged. The
30 * page table allocation and the page table operations will
31 * look at has_pgste to distinguish normal and extended page
32 * tables. The only way to create extended page tables is to
33 * set alloc_pgste and then create a new context (e.g. dup_mm).
34 * The page table allocation is called after init_new_context
35 * and if has_pgste is set, it will create extended page
36 * tables.
37 */
38 mm->context.has_pgste = 1;
39 mm->context.alloc_pgste = 1;
40 } else {
41 mm->context.has_pgste = 0;
42 mm->context.alloc_pgste = 0;
43 }
44 mm->context.asce_limit = STACK_TOP_MAX;
45 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
46 return 0;
47}
48
49#define destroy_context(mm) do { } while (0)
50
51#ifndef CONFIG_64BIT
52#define LCTL_OPCODE "lctl"
53#else
54#define LCTL_OPCODE "lctlg"
55#endif
56
57static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
58{
59 pgd_t *pgd = mm->pgd;
60
61 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
62 if (user_mode != HOME_SPACE_MODE) {
63 /* Load primary space page table origin. */
64 asm volatile(LCTL_OPCODE" 1,1,%0\n"
65 : : "m" (S390_lowcore.user_asce) );
66 } else
67 /* Load home space page table origin. */
68 asm volatile(LCTL_OPCODE" 13,13,%0"
69 : : "m" (S390_lowcore.user_asce) );
70 set_fs(current->thread.mm_segment);
71}
72
73static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
74 struct task_struct *tsk)
75{
76 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
77 update_mm(next, tsk);
78 atomic_dec(&prev->context.attach_count);
79 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
80 atomic_inc(&next->context.attach_count);
81 /* Check for TLBs not flushed yet */
82 if (next->context.flush_mm)
83 __tlb_flush_mm(next);
84}
85
86#define enter_lazy_tlb(mm,tsk) do { } while (0)
87#define deactivate_mm(tsk,mm) do { } while (0)
88
89static inline void activate_mm(struct mm_struct *prev,
90 struct mm_struct *next)
91{
92 switch_mm(prev, next, current);
93}
94
95static inline void arch_dup_mmap(struct mm_struct *oldmm,
96 struct mm_struct *mm)
97{
98#ifdef CONFIG_64BIT
99 if (oldmm->context.asce_limit < mm->context.asce_limit)
100 crst_table_downgrade(mm, oldmm->context.asce_limit);
101#endif
102}
103
104static inline void arch_exit_mmap(struct mm_struct *mm)
105{
106}
107
108#endif /* __S390_MMU_CONTEXT_H */
1/*
2 * S390 version
3 *
4 * Derived from "include/asm-i386/mmu_context.h"
5 */
6
7#ifndef __S390_MMU_CONTEXT_H
8#define __S390_MMU_CONTEXT_H
9
10#include <asm/pgalloc.h>
11#include <asm/uaccess.h>
12#include <asm/tlbflush.h>
13#include <asm/ctl_reg.h>
14
15static inline int init_new_context(struct task_struct *tsk,
16 struct mm_struct *mm)
17{
18 cpumask_clear(&mm->context.cpu_attach_mask);
19 atomic_set(&mm->context.attach_count, 0);
20 mm->context.flush_mm = 0;
21 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
22#ifdef CONFIG_64BIT
23 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
24#endif
25 mm->context.has_pgste = 0;
26 mm->context.asce_limit = STACK_TOP_MAX;
27 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
28 return 0;
29}
30
31#define destroy_context(mm) do { } while (0)
32
33static inline void update_user_asce(struct mm_struct *mm, int load_primary)
34{
35 pgd_t *pgd = mm->pgd;
36
37 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
38 if (load_primary)
39 __ctl_load(S390_lowcore.user_asce, 1, 1);
40 set_fs(current->thread.mm_segment);
41}
42
43static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
44{
45 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
46
47 if (load_primary)
48 __ctl_load(S390_lowcore.user_asce, 1, 1);
49 __ctl_load(S390_lowcore.user_asce, 7, 7);
50}
51
52static inline void update_primary_asce(struct task_struct *tsk)
53{
54 unsigned long asce;
55
56 __ctl_store(asce, 1, 1);
57 if (asce != S390_lowcore.kernel_asce)
58 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
59 set_tsk_thread_flag(tsk, TIF_ASCE);
60}
61
62static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
63 struct task_struct *tsk)
64{
65 int cpu = smp_processor_id();
66
67 update_primary_asce(tsk);
68 if (prev == next)
69 return;
70 if (MACHINE_HAS_TLB_LC)
71 cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
72 if (atomic_inc_return(&next->context.attach_count) >> 16) {
73 /* Delay update_user_asce until all TLB flushes are done. */
74 set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
75 /* Clear old ASCE by loading the kernel ASCE. */
76 clear_user_asce(next, 0);
77 } else {
78 cpumask_set_cpu(cpu, mm_cpumask(next));
79 update_user_asce(next, 0);
80 if (next->context.flush_mm)
81 /* Flush pending TLBs */
82 __tlb_flush_mm(next);
83 }
84 atomic_dec(&prev->context.attach_count);
85 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
86 if (MACHINE_HAS_TLB_LC)
87 cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
88}
89
90#define finish_arch_post_lock_switch finish_arch_post_lock_switch
91static inline void finish_arch_post_lock_switch(void)
92{
93 struct task_struct *tsk = current;
94 struct mm_struct *mm = tsk->mm;
95
96 if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
97 return;
98 preempt_disable();
99 clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
100 while (atomic_read(&mm->context.attach_count) >> 16)
101 cpu_relax();
102
103 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
104 update_user_asce(mm, 0);
105 if (mm->context.flush_mm)
106 __tlb_flush_mm(mm);
107 preempt_enable();
108}
109
110#define enter_lazy_tlb(mm,tsk) do { } while (0)
111#define deactivate_mm(tsk,mm) do { } while (0)
112
113static inline void activate_mm(struct mm_struct *prev,
114 struct mm_struct *next)
115{
116 switch_mm(prev, next, current);
117}
118
119static inline void arch_dup_mmap(struct mm_struct *oldmm,
120 struct mm_struct *mm)
121{
122#ifdef CONFIG_64BIT
123 if (oldmm->context.asce_limit < mm->context.asce_limit)
124 crst_table_downgrade(mm, oldmm->context.asce_limit);
125#endif
126}
127
128static inline void arch_exit_mmap(struct mm_struct *mm)
129{
130}
131
132#endif /* __S390_MMU_CONTEXT_H */