Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _M68K_TLBFLUSH_H
3#define _M68K_TLBFLUSH_H
4
5#ifdef CONFIG_MMU
6#ifndef CONFIG_SUN3
7
8#include <asm/current.h>
9#include <asm/mcfmmu.h>
10
11static inline void flush_tlb_kernel_page(void *addr)
12{
13 if (CPU_IS_COLDFIRE) {
14 mmu_write(MMUOR, MMUOR_CNL);
15 } else if (CPU_IS_040_OR_060) {
16 set_fc(SUPER_DATA);
17 __asm__ __volatile__(".chip 68040\n\t"
18 "pflush (%0)\n\t"
19 ".chip 68k"
20 : : "a" (addr));
21 set_fc(USER_DATA);
22 } else if (CPU_IS_020_OR_030)
23 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
24}
25
26/*
27 * flush all user-space atc entries.
28 */
29static inline void __flush_tlb(void)
30{
31 if (CPU_IS_COLDFIRE) {
32 mmu_write(MMUOR, MMUOR_CNL);
33 } else if (CPU_IS_040_OR_060) {
34 __asm__ __volatile__(".chip 68040\n\t"
35 "pflushan\n\t"
36 ".chip 68k");
37 } else if (CPU_IS_020_OR_030) {
38 __asm__ __volatile__("pflush #0,#4");
39 }
40}
41
42static inline void __flush_tlb040_one(unsigned long addr)
43{
44 __asm__ __volatile__(".chip 68040\n\t"
45 "pflush (%0)\n\t"
46 ".chip 68k"
47 : : "a" (addr));
48}
49
50static inline void __flush_tlb_one(unsigned long addr)
51{
52 if (CPU_IS_COLDFIRE)
53 mmu_write(MMUOR, MMUOR_CNL);
54 else if (CPU_IS_040_OR_060)
55 __flush_tlb040_one(addr);
56 else if (CPU_IS_020_OR_030)
57 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
58}
59
60#define flush_tlb() __flush_tlb()
61
62/*
63 * flush all atc entries (both kernel and user-space entries).
64 */
65static inline void flush_tlb_all(void)
66{
67 if (CPU_IS_COLDFIRE) {
68 mmu_write(MMUOR, MMUOR_CNL);
69 } else if (CPU_IS_040_OR_060) {
70 __asm__ __volatile__(".chip 68040\n\t"
71 "pflusha\n\t"
72 ".chip 68k");
73 } else if (CPU_IS_020_OR_030) {
74 __asm__ __volatile__("pflusha");
75 }
76}
77
78static inline void flush_tlb_mm(struct mm_struct *mm)
79{
80 if (mm == current->active_mm)
81 __flush_tlb();
82}
83
84static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
85{
86 if (vma->vm_mm == current->active_mm)
87 __flush_tlb_one(addr);
88}
89
90static inline void flush_tlb_range(struct vm_area_struct *vma,
91 unsigned long start, unsigned long end)
92{
93 if (vma->vm_mm == current->active_mm)
94 __flush_tlb();
95}
96
97static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
98{
99 flush_tlb_all();
100}
101
102#else
103
104
105/* Reserved PMEGs. */
106extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
107extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
108extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
109extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
110
111/* Flush all userspace mappings one by one... (why no flush command,
112 sun?) */
113static inline void flush_tlb_all(void)
114{
115 unsigned long addr;
116 unsigned char ctx, oldctx;
117
118 oldctx = sun3_get_context();
119 for (addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
120 for (ctx = 0; ctx < 8; ctx++) {
121 sun3_put_context(ctx);
122 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
123 }
124 }
125
126 sun3_put_context(oldctx);
127 /* erase all of the userspace pmeg maps, we've clobbered them
128 all anyway */
129 for (addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
130 if (pmeg_alloc[addr] == 1) {
131 pmeg_alloc[addr] = 0;
132 pmeg_ctx[addr] = 0;
133 pmeg_vaddr[addr] = 0;
134 }
135 }
136}
137
138/* Clear user TLB entries within the context named in mm */
139static inline void flush_tlb_mm (struct mm_struct *mm)
140{
141 unsigned char oldctx;
142 unsigned char seg;
143 unsigned long i;
144
145 oldctx = sun3_get_context();
146 sun3_put_context(mm->context);
147
148 for (i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
149 seg = sun3_get_segmap(i);
150 if (seg == SUN3_INVALID_PMEG)
151 continue;
152
153 sun3_put_segmap(i, SUN3_INVALID_PMEG);
154 pmeg_alloc[seg] = 0;
155 pmeg_ctx[seg] = 0;
156 pmeg_vaddr[seg] = 0;
157 }
158
159 sun3_put_context(oldctx);
160}
161
162/* Flush a single TLB page. In this case, we're limited to flushing a
163 single PMEG */
164static inline void flush_tlb_page (struct vm_area_struct *vma,
165 unsigned long addr)
166{
167 unsigned char oldctx;
168 unsigned char i;
169
170 oldctx = sun3_get_context();
171 sun3_put_context(vma->vm_mm->context);
172 addr &= ~SUN3_PMEG_MASK;
173 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
174 {
175 pmeg_alloc[i] = 0;
176 pmeg_ctx[i] = 0;
177 pmeg_vaddr[i] = 0;
178 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
179 }
180 sun3_put_context(oldctx);
181
182}
183/* Flush a range of pages from TLB. */
184
185static inline void flush_tlb_range (struct vm_area_struct *vma,
186 unsigned long start, unsigned long end)
187{
188 struct mm_struct *mm = vma->vm_mm;
189 unsigned char seg, oldctx;
190
191 start &= ~SUN3_PMEG_MASK;
192
193 oldctx = sun3_get_context();
194 sun3_put_context(mm->context);
195
196 while(start < end)
197 {
198 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
199 goto next;
200 if(pmeg_ctx[seg] == mm->context) {
201 pmeg_alloc[seg] = 0;
202 pmeg_ctx[seg] = 0;
203 pmeg_vaddr[seg] = 0;
204 }
205 sun3_put_segmap(start, SUN3_INVALID_PMEG);
206 next:
207 start += SUN3_PMEG_SIZE;
208 }
209 sun3_put_context(oldctx);
210}
211
212static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
213{
214 flush_tlb_all();
215}
216
217/* Flush kernel page from TLB. */
218static inline void flush_tlb_kernel_page (unsigned long addr)
219{
220 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
221}
222
223#endif
224
225#else /* !CONFIG_MMU */
226
227/*
228 * flush all user-space atc entries.
229 */
230static inline void __flush_tlb(void)
231{
232 BUG();
233}
234
235static inline void __flush_tlb_one(unsigned long addr)
236{
237 BUG();
238}
239
240#define flush_tlb() __flush_tlb()
241
242/*
243 * flush all atc entries (both kernel and user-space entries).
244 */
245static inline void flush_tlb_all(void)
246{
247 BUG();
248}
249
250static inline void flush_tlb_mm(struct mm_struct *mm)
251{
252 BUG();
253}
254
255static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
256{
257 BUG();
258}
259
260static inline void flush_tlb_range(struct vm_area_struct *vma,
261 unsigned long start, unsigned long end)
262{
263 BUG();
264}
265
266static inline void flush_tlb_kernel_page(unsigned long addr)
267{
268 BUG();
269}
270
271#endif /* CONFIG_MMU */
272
273#endif /* _M68K_TLBFLUSH_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _M68K_TLBFLUSH_H
3#define _M68K_TLBFLUSH_H
4
5#ifdef CONFIG_MMU
6#ifndef CONFIG_SUN3
7
8#include <asm/current.h>
9#include <asm/mcfmmu.h>
10
11static inline void flush_tlb_kernel_page(void *addr)
12{
13 if (CPU_IS_COLDFIRE) {
14 mmu_write(MMUOR, MMUOR_CNL);
15 } else if (CPU_IS_040_OR_060) {
16 set_fc(SUPER_DATA);
17 __asm__ __volatile__(".chip 68040\n\t"
18 "pflush (%0)\n\t"
19 ".chip 68k"
20 : : "a" (addr));
21 set_fc(USER_DATA);
22 } else if (CPU_IS_020_OR_030)
23 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
24}
25
26/*
27 * flush all user-space atc entries.
28 */
29static inline void __flush_tlb(void)
30{
31 if (CPU_IS_COLDFIRE) {
32 mmu_write(MMUOR, MMUOR_CNL);
33 } else if (CPU_IS_040_OR_060) {
34 __asm__ __volatile__(".chip 68040\n\t"
35 "pflushan\n\t"
36 ".chip 68k");
37 } else if (CPU_IS_020_OR_030) {
38 __asm__ __volatile__("pflush #0,#4");
39 }
40}
41
42static inline void __flush_tlb040_one(unsigned long addr)
43{
44 __asm__ __volatile__(".chip 68040\n\t"
45 "pflush (%0)\n\t"
46 ".chip 68k"
47 : : "a" (addr));
48}
49
50static inline void __flush_tlb_one(unsigned long addr)
51{
52 if (CPU_IS_COLDFIRE)
53 mmu_write(MMUOR, MMUOR_CNL);
54 else if (CPU_IS_040_OR_060)
55 __flush_tlb040_one(addr);
56 else if (CPU_IS_020_OR_030)
57 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
58}
59
60#define flush_tlb() __flush_tlb()
61
62/*
63 * flush all atc entries (both kernel and user-space entries).
64 */
65static inline void flush_tlb_all(void)
66{
67 if (CPU_IS_COLDFIRE) {
68 mmu_write(MMUOR, MMUOR_CNL);
69 } else if (CPU_IS_040_OR_060) {
70 __asm__ __volatile__(".chip 68040\n\t"
71 "pflusha\n\t"
72 ".chip 68k");
73 } else if (CPU_IS_020_OR_030) {
74 __asm__ __volatile__("pflusha");
75 }
76}
77
78static inline void flush_tlb_mm(struct mm_struct *mm)
79{
80 if (mm == current->active_mm)
81 __flush_tlb();
82}
83
84static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
85{
86 if (vma->vm_mm == current->active_mm)
87 __flush_tlb_one(addr);
88}
89
90static inline void flush_tlb_range(struct vm_area_struct *vma,
91 unsigned long start, unsigned long end)
92{
93 if (vma->vm_mm == current->active_mm)
94 __flush_tlb();
95}
96
97static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
98{
99 flush_tlb_all();
100}
101
102#else
103
104
105/* Reserved PMEGs. */
106extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
107extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
108extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
109extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
110
111/* Flush all userspace mappings one by one... (why no flush command,
112 sun?) */
113static inline void flush_tlb_all(void)
114{
115 unsigned long addr;
116 unsigned char ctx, oldctx;
117
118 oldctx = sun3_get_context();
119 for (addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
120 for (ctx = 0; ctx < 8; ctx++) {
121 sun3_put_context(ctx);
122 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
123 }
124 }
125
126 sun3_put_context(oldctx);
127 /* erase all of the userspace pmeg maps, we've clobbered them
128 all anyway */
129 for (addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
130 if (pmeg_alloc[addr] == 1) {
131 pmeg_alloc[addr] = 0;
132 pmeg_ctx[addr] = 0;
133 pmeg_vaddr[addr] = 0;
134 }
135 }
136}
137
138/* Clear user TLB entries within the context named in mm */
139static inline void flush_tlb_mm (struct mm_struct *mm)
140{
141 unsigned char oldctx;
142 unsigned char seg;
143 unsigned long i;
144
145 oldctx = sun3_get_context();
146 sun3_put_context(mm->context);
147
148 for (i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
149 seg = sun3_get_segmap(i);
150 if (seg == SUN3_INVALID_PMEG)
151 continue;
152
153 sun3_put_segmap(i, SUN3_INVALID_PMEG);
154 pmeg_alloc[seg] = 0;
155 pmeg_ctx[seg] = 0;
156 pmeg_vaddr[seg] = 0;
157 }
158
159 sun3_put_context(oldctx);
160}
161
162/* Flush a single TLB page. In this case, we're limited to flushing a
163 single PMEG */
164static inline void flush_tlb_page (struct vm_area_struct *vma,
165 unsigned long addr)
166{
167 unsigned char oldctx;
168 unsigned char i;
169
170 oldctx = sun3_get_context();
171 sun3_put_context(vma->vm_mm->context);
172 addr &= ~SUN3_PMEG_MASK;
173 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
174 {
175 pmeg_alloc[i] = 0;
176 pmeg_ctx[i] = 0;
177 pmeg_vaddr[i] = 0;
178 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
179 }
180 sun3_put_context(oldctx);
181
182}
183/* Flush a range of pages from TLB. */
184
185static inline void flush_tlb_range (struct vm_area_struct *vma,
186 unsigned long start, unsigned long end)
187{
188 struct mm_struct *mm = vma->vm_mm;
189 unsigned char seg, oldctx;
190
191 start &= ~SUN3_PMEG_MASK;
192
193 oldctx = sun3_get_context();
194 sun3_put_context(mm->context);
195
196 while(start < end)
197 {
198 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
199 goto next;
200 if(pmeg_ctx[seg] == mm->context) {
201 pmeg_alloc[seg] = 0;
202 pmeg_ctx[seg] = 0;
203 pmeg_vaddr[seg] = 0;
204 }
205 sun3_put_segmap(start, SUN3_INVALID_PMEG);
206 next:
207 start += SUN3_PMEG_SIZE;
208 }
209 sun3_put_context(oldctx);
210}
211
212static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
213{
214 flush_tlb_all();
215}
216
217/* Flush kernel page from TLB. */
218static inline void flush_tlb_kernel_page (unsigned long addr)
219{
220 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
221}
222
223#endif
224
225#else /* !CONFIG_MMU */
226
227/*
228 * flush all user-space atc entries.
229 */
230static inline void __flush_tlb(void)
231{
232 BUG();
233}
234
235static inline void __flush_tlb_one(unsigned long addr)
236{
237 BUG();
238}
239
240#define flush_tlb() __flush_tlb()
241
242/*
243 * flush all atc entries (both kernel and user-space entries).
244 */
245static inline void flush_tlb_all(void)
246{
247 BUG();
248}
249
250static inline void flush_tlb_mm(struct mm_struct *mm)
251{
252 BUG();
253}
254
255static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
256{
257 BUG();
258}
259
260static inline void flush_tlb_range(struct vm_area_struct *vma,
261 unsigned long start, unsigned long end)
262{
263 BUG();
264}
265
266static inline void flush_tlb_kernel_page(unsigned long addr)
267{
268 BUG();
269}
270
271#endif /* CONFIG_MMU */
272
273#endif /* _M68K_TLBFLUSH_H */