Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/init.h>
5#include <linux/mm.h>
6#include <linux/module.h>
7#include <linux/sched.h>
8
9#include <asm/mmu_context.h>
10#include <asm/setup.h>
11
12/*
13 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
14 * 1VPN -> 2PFN
15 */
16#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
17#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
18
19void flush_tlb_all(void)
20{
21 tlb_invalid_all();
22}
23
24void flush_tlb_mm(struct mm_struct *mm)
25{
26#ifdef CONFIG_CPU_HAS_TLBI
27 sync_is();
28 asm volatile(
29 "tlbi.asids %0 \n"
30 "sync.i \n"
31 :
32 : "r" (cpu_asid(mm))
33 : "memory");
34#else
35 tlb_invalid_all();
36#endif
37}
38
39/*
40 * MMU operation regs only could invalid tlb entry in jtlb and we
41 * need change asid field to invalid I-utlb & D-utlb.
42 */
43#ifndef CONFIG_CPU_HAS_TLBI
44#define restore_asid_inv_utlb(oldpid, newpid) \
45do { \
46 if (oldpid == newpid) \
47 write_mmu_entryhi(oldpid + 1); \
48 write_mmu_entryhi(oldpid); \
49} while (0)
50#endif
51
52void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
53 unsigned long end)
54{
55 unsigned long newpid = cpu_asid(vma->vm_mm);
56
57 start &= TLB_ENTRY_SIZE_MASK;
58 end += TLB_ENTRY_SIZE - 1;
59 end &= TLB_ENTRY_SIZE_MASK;
60
61#ifdef CONFIG_CPU_HAS_TLBI
62 sync_is();
63 while (start < end) {
64 asm volatile(
65 "tlbi.vas %0 \n"
66 :
67 : "r" (start | newpid)
68 : "memory");
69
70 start += 2*PAGE_SIZE;
71 }
72 asm volatile("sync.i\n");
73#else
74 {
75 unsigned long flags, oldpid;
76
77 local_irq_save(flags);
78 oldpid = read_mmu_entryhi() & ASID_MASK;
79 while (start < end) {
80 int idx;
81
82 write_mmu_entryhi(start | newpid);
83 start += 2*PAGE_SIZE;
84 tlb_probe();
85 idx = read_mmu_index();
86 if (idx >= 0)
87 tlb_invalid_indexed();
88 }
89 restore_asid_inv_utlb(oldpid, newpid);
90 local_irq_restore(flags);
91 }
92#endif
93}
94
95void flush_tlb_kernel_range(unsigned long start, unsigned long end)
96{
97 start &= TLB_ENTRY_SIZE_MASK;
98 end += TLB_ENTRY_SIZE - 1;
99 end &= TLB_ENTRY_SIZE_MASK;
100
101#ifdef CONFIG_CPU_HAS_TLBI
102 sync_is();
103 while (start < end) {
104 asm volatile(
105 "tlbi.vaas %0 \n"
106 :
107 : "r" (start)
108 : "memory");
109
110 start += 2*PAGE_SIZE;
111 }
112 asm volatile("sync.i\n");
113#else
114 {
115 unsigned long flags, oldpid;
116
117 local_irq_save(flags);
118 oldpid = read_mmu_entryhi() & ASID_MASK;
119 while (start < end) {
120 int idx;
121
122 write_mmu_entryhi(start | oldpid);
123 start += 2*PAGE_SIZE;
124 tlb_probe();
125 idx = read_mmu_index();
126 if (idx >= 0)
127 tlb_invalid_indexed();
128 }
129 restore_asid_inv_utlb(oldpid, oldpid);
130 local_irq_restore(flags);
131 }
132#endif
133}
134
135void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
136{
137 int newpid = cpu_asid(vma->vm_mm);
138
139 addr &= TLB_ENTRY_SIZE_MASK;
140
141#ifdef CONFIG_CPU_HAS_TLBI
142 sync_is();
143 asm volatile(
144 "tlbi.vas %0 \n"
145 "sync.i \n"
146 :
147 : "r" (addr | newpid)
148 : "memory");
149#else
150 {
151 int oldpid, idx;
152 unsigned long flags;
153
154 local_irq_save(flags);
155 oldpid = read_mmu_entryhi() & ASID_MASK;
156 write_mmu_entryhi(addr | newpid);
157 tlb_probe();
158 idx = read_mmu_index();
159 if (idx >= 0)
160 tlb_invalid_indexed();
161
162 restore_asid_inv_utlb(oldpid, newpid);
163 local_irq_restore(flags);
164 }
165#endif
166}
167
168void flush_tlb_one(unsigned long addr)
169{
170 addr &= TLB_ENTRY_SIZE_MASK;
171
172#ifdef CONFIG_CPU_HAS_TLBI
173 sync_is();
174 asm volatile(
175 "tlbi.vaas %0 \n"
176 "sync.i \n"
177 :
178 : "r" (addr)
179 : "memory");
180#else
181 {
182 int oldpid, idx;
183 unsigned long flags;
184
185 local_irq_save(flags);
186 oldpid = read_mmu_entryhi() & ASID_MASK;
187 write_mmu_entryhi(addr | oldpid);
188 tlb_probe();
189 idx = read_mmu_index();
190 if (idx >= 0)
191 tlb_invalid_indexed();
192
193 restore_asid_inv_utlb(oldpid, oldpid);
194 local_irq_restore(flags);
195 }
196#endif
197}
198EXPORT_SYMBOL(flush_tlb_one);
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/init.h>
5#include <linux/mm.h>
6#include <linux/module.h>
7#include <linux/sched.h>
8
9#include <asm/mmu_context.h>
10#include <asm/pgtable.h>
11#include <asm/setup.h>
12
13/*
14 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
15 * 1VPN -> 2PFN
16 */
17#define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
18#define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
19
20void flush_tlb_all(void)
21{
22 tlb_invalid_all();
23}
24
25void flush_tlb_mm(struct mm_struct *mm)
26{
27#ifdef CONFIG_CPU_HAS_TLBI
28 asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
29#else
30 tlb_invalid_all();
31#endif
32}
33
34/*
35 * MMU operation regs only could invalid tlb entry in jtlb and we
36 * need change asid field to invalid I-utlb & D-utlb.
37 */
38#ifndef CONFIG_CPU_HAS_TLBI
39#define restore_asid_inv_utlb(oldpid, newpid) \
40do { \
41 if (oldpid == newpid) \
42 write_mmu_entryhi(oldpid + 1); \
43 write_mmu_entryhi(oldpid); \
44} while (0)
45#endif
46
47void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
48 unsigned long end)
49{
50 unsigned long newpid = cpu_asid(vma->vm_mm);
51
52 start &= TLB_ENTRY_SIZE_MASK;
53 end += TLB_ENTRY_SIZE - 1;
54 end &= TLB_ENTRY_SIZE_MASK;
55
56#ifdef CONFIG_CPU_HAS_TLBI
57 while (start < end) {
58 asm volatile("tlbi.vas %0"::"r"(start | newpid));
59 start += 2*PAGE_SIZE;
60 }
61 sync_is();
62#else
63 {
64 unsigned long flags, oldpid;
65
66 local_irq_save(flags);
67 oldpid = read_mmu_entryhi() & ASID_MASK;
68 while (start < end) {
69 int idx;
70
71 write_mmu_entryhi(start | newpid);
72 start += 2*PAGE_SIZE;
73 tlb_probe();
74 idx = read_mmu_index();
75 if (idx >= 0)
76 tlb_invalid_indexed();
77 }
78 restore_asid_inv_utlb(oldpid, newpid);
79 local_irq_restore(flags);
80 }
81#endif
82}
83
84void flush_tlb_kernel_range(unsigned long start, unsigned long end)
85{
86 start &= TLB_ENTRY_SIZE_MASK;
87 end += TLB_ENTRY_SIZE - 1;
88 end &= TLB_ENTRY_SIZE_MASK;
89
90#ifdef CONFIG_CPU_HAS_TLBI
91 while (start < end) {
92 asm volatile("tlbi.vaas %0"::"r"(start));
93 start += 2*PAGE_SIZE;
94 }
95 sync_is();
96#else
97 {
98 unsigned long flags, oldpid;
99
100 local_irq_save(flags);
101 oldpid = read_mmu_entryhi() & ASID_MASK;
102 while (start < end) {
103 int idx;
104
105 write_mmu_entryhi(start | oldpid);
106 start += 2*PAGE_SIZE;
107 tlb_probe();
108 idx = read_mmu_index();
109 if (idx >= 0)
110 tlb_invalid_indexed();
111 }
112 restore_asid_inv_utlb(oldpid, oldpid);
113 local_irq_restore(flags);
114 }
115#endif
116}
117
118void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
119{
120 int newpid = cpu_asid(vma->vm_mm);
121
122 addr &= TLB_ENTRY_SIZE_MASK;
123
124#ifdef CONFIG_CPU_HAS_TLBI
125 asm volatile("tlbi.vas %0"::"r"(addr | newpid));
126 sync_is();
127#else
128 {
129 int oldpid, idx;
130 unsigned long flags;
131
132 local_irq_save(flags);
133 oldpid = read_mmu_entryhi() & ASID_MASK;
134 write_mmu_entryhi(addr | newpid);
135 tlb_probe();
136 idx = read_mmu_index();
137 if (idx >= 0)
138 tlb_invalid_indexed();
139
140 restore_asid_inv_utlb(oldpid, newpid);
141 local_irq_restore(flags);
142 }
143#endif
144}
145
146void flush_tlb_one(unsigned long addr)
147{
148 addr &= TLB_ENTRY_SIZE_MASK;
149
150#ifdef CONFIG_CPU_HAS_TLBI
151 asm volatile("tlbi.vaas %0"::"r"(addr));
152 sync_is();
153#else
154 {
155 int oldpid, idx;
156 unsigned long flags;
157
158 local_irq_save(flags);
159 oldpid = read_mmu_entryhi() & ASID_MASK;
160 write_mmu_entryhi(addr | oldpid);
161 tlb_probe();
162 idx = read_mmu_index();
163 if (idx >= 0)
164 tlb_invalid_indexed();
165
166 restore_asid_inv_utlb(oldpid, oldpid);
167 local_irq_restore(flags);
168 }
169#endif
170}
171EXPORT_SYMBOL(flush_tlb_one);