Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _M68K_TLBFLUSH_H
  3#define _M68K_TLBFLUSH_H
  4
  5#ifdef CONFIG_MMU
  6#ifndef CONFIG_SUN3
  7
  8#include <asm/current.h>
  9#include <asm/mcfmmu.h>
 10
 11static inline void flush_tlb_kernel_page(void *addr)
 12{
 13	if (CPU_IS_COLDFIRE) {
 14		mmu_write(MMUOR, MMUOR_CNL);
 15	} else if (CPU_IS_040_OR_060) {
 16		mm_segment_t old_fs = get_fs();
 17		set_fs(KERNEL_DS);
 18		__asm__ __volatile__(".chip 68040\n\t"
 19				     "pflush (%0)\n\t"
 20				     ".chip 68k"
 21				     : : "a" (addr));
 22		set_fs(old_fs);
 23	} else if (CPU_IS_020_OR_030)
 24		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
 25}
 26
 27/*
 28 * flush all user-space atc entries.
 29 */
 30static inline void __flush_tlb(void)
 31{
 32	if (CPU_IS_COLDFIRE) {
 33		mmu_write(MMUOR, MMUOR_CNL);
 34	} else if (CPU_IS_040_OR_060) {
 35		__asm__ __volatile__(".chip 68040\n\t"
 36				     "pflushan\n\t"
 37				     ".chip 68k");
 38	} else if (CPU_IS_020_OR_030) {
 39		__asm__ __volatile__("pflush #0,#4");
 40	}
 41}
 42
 43static inline void __flush_tlb040_one(unsigned long addr)
 44{
 45	__asm__ __volatile__(".chip 68040\n\t"
 46			     "pflush (%0)\n\t"
 47			     ".chip 68k"
 48			     : : "a" (addr));
 49}
 50
 51static inline void __flush_tlb_one(unsigned long addr)
 52{
 53	if (CPU_IS_COLDFIRE)
 54		mmu_write(MMUOR, MMUOR_CNL);
 55	else if (CPU_IS_040_OR_060)
 56		__flush_tlb040_one(addr);
 57	else if (CPU_IS_020_OR_030)
 58		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
 59}
 60
 61#define flush_tlb() __flush_tlb()
 62
 63/*
 64 * flush all atc entries (both kernel and user-space entries).
 65 */
 66static inline void flush_tlb_all(void)
 67{
 68	if (CPU_IS_COLDFIRE) {
 69		mmu_write(MMUOR, MMUOR_CNL);
 70	} else if (CPU_IS_040_OR_060) {
 71		__asm__ __volatile__(".chip 68040\n\t"
 72				     "pflusha\n\t"
 73				     ".chip 68k");
 74	} else if (CPU_IS_020_OR_030) {
 75		__asm__ __volatile__("pflusha");
 76	}
 77}
 78
 79static inline void flush_tlb_mm(struct mm_struct *mm)
 80{
 81	if (mm == current->active_mm)
 82		__flush_tlb();
 83}
 84
 85static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 86{
 87	if (vma->vm_mm == current->active_mm) {
 88		mm_segment_t old_fs = force_uaccess_begin();
 89
 90		__flush_tlb_one(addr);
 91		force_uaccess_end(old_fs);
 92	}
 93}
 94
 95static inline void flush_tlb_range(struct vm_area_struct *vma,
 96				   unsigned long start, unsigned long end)
 97{
 98	if (vma->vm_mm == current->active_mm)
 99		__flush_tlb();
100}
101
102static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
103{
104	flush_tlb_all();
105}
106
107#else
108
109
110/* Reserved PMEGs. */
111extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
112extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
113extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
114extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
115
116/* Flush all userspace mappings one by one...  (why no flush command,
117   sun?) */
118static inline void flush_tlb_all(void)
119{
120       unsigned long addr;
121       unsigned char ctx, oldctx;
122
123       oldctx = sun3_get_context();
124       for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
125	       for(ctx = 0; ctx < 8; ctx++) {
126		       sun3_put_context(ctx);
127		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
128	       }
129       }
130
131       sun3_put_context(oldctx);
132       /* erase all of the userspace pmeg maps, we've clobbered them
133	  all anyway */
134       for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
135	       if(pmeg_alloc[addr] == 1) {
136		       pmeg_alloc[addr] = 0;
137		       pmeg_ctx[addr] = 0;
138		       pmeg_vaddr[addr] = 0;
139	       }
140       }
141
142}
143
144/* Clear user TLB entries within the context named in mm */
145static inline void flush_tlb_mm (struct mm_struct *mm)
146{
147     unsigned char oldctx;
148     unsigned char seg;
149     unsigned long i;
150
151     oldctx = sun3_get_context();
152     sun3_put_context(mm->context);
153
154     for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
155	     seg = sun3_get_segmap(i);
156	     if(seg == SUN3_INVALID_PMEG)
157		     continue;
158
159	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
160	     pmeg_alloc[seg] = 0;
161	     pmeg_ctx[seg] = 0;
162	     pmeg_vaddr[seg] = 0;
163     }
164
165     sun3_put_context(oldctx);
166
167}
168
169/* Flush a single TLB page. In this case, we're limited to flushing a
170   single PMEG */
171static inline void flush_tlb_page (struct vm_area_struct *vma,
172				   unsigned long addr)
173{
174	unsigned char oldctx;
175	unsigned char i;
176
177	oldctx = sun3_get_context();
178	sun3_put_context(vma->vm_mm->context);
179	addr &= ~SUN3_PMEG_MASK;
180	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
181	{
182		pmeg_alloc[i] = 0;
183		pmeg_ctx[i] = 0;
184		pmeg_vaddr[i] = 0;
185		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
186	}
187	sun3_put_context(oldctx);
188
189}
190/* Flush a range of pages from TLB. */
191
192static inline void flush_tlb_range (struct vm_area_struct *vma,
193		      unsigned long start, unsigned long end)
194{
195	struct mm_struct *mm = vma->vm_mm;
196	unsigned char seg, oldctx;
197
198	start &= ~SUN3_PMEG_MASK;
199
200	oldctx = sun3_get_context();
201	sun3_put_context(mm->context);
202
203	while(start < end)
204	{
205		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
206		     goto next;
207		if(pmeg_ctx[seg] == mm->context) {
208			pmeg_alloc[seg] = 0;
209			pmeg_ctx[seg] = 0;
210			pmeg_vaddr[seg] = 0;
211		}
212		sun3_put_segmap(start, SUN3_INVALID_PMEG);
213	next:
214		start += SUN3_PMEG_SIZE;
215	}
216}
217
218static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
219{
220	flush_tlb_all();
221}
222
223/* Flush kernel page from TLB. */
224static inline void flush_tlb_kernel_page (unsigned long addr)
225{
226	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
227}
228
229#endif
230
231#else /* !CONFIG_MMU */
232
233/*
234 * flush all user-space atc entries.
235 */
236static inline void __flush_tlb(void)
237{
238	BUG();
239}
240
241static inline void __flush_tlb_one(unsigned long addr)
242{
243	BUG();
244}
245
246#define flush_tlb() __flush_tlb()
247
248/*
249 * flush all atc entries (both kernel and user-space entries).
250 */
251static inline void flush_tlb_all(void)
252{
253	BUG();
254}
255
256static inline void flush_tlb_mm(struct mm_struct *mm)
257{
258	BUG();
259}
260
261static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
262{
263	BUG();
264}
265
266static inline void flush_tlb_range(struct mm_struct *mm,
267				   unsigned long start, unsigned long end)
268{
269	BUG();
270}
271
272static inline void flush_tlb_kernel_page(unsigned long addr)
273{
274	BUG();
275}
276
277#endif /* CONFIG_MMU */
278
279#endif /* _M68K_TLBFLUSH_H */
v4.6
 
  1#ifndef _M68K_TLBFLUSH_H
  2#define _M68K_TLBFLUSH_H
  3
  4#ifdef CONFIG_MMU
  5#ifndef CONFIG_SUN3
  6
  7#include <asm/current.h>
  8#include <asm/mcfmmu.h>
  9
 10static inline void flush_tlb_kernel_page(void *addr)
 11{
 12	if (CPU_IS_COLDFIRE) {
 13		mmu_write(MMUOR, MMUOR_CNL);
 14	} else if (CPU_IS_040_OR_060) {
 15		mm_segment_t old_fs = get_fs();
 16		set_fs(KERNEL_DS);
 17		__asm__ __volatile__(".chip 68040\n\t"
 18				     "pflush (%0)\n\t"
 19				     ".chip 68k"
 20				     : : "a" (addr));
 21		set_fs(old_fs);
 22	} else if (CPU_IS_020_OR_030)
 23		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
 24}
 25
 26/*
 27 * flush all user-space atc entries.
 28 */
 29static inline void __flush_tlb(void)
 30{
 31	if (CPU_IS_COLDFIRE) {
 32		mmu_write(MMUOR, MMUOR_CNL);
 33	} else if (CPU_IS_040_OR_060) {
 34		__asm__ __volatile__(".chip 68040\n\t"
 35				     "pflushan\n\t"
 36				     ".chip 68k");
 37	} else if (CPU_IS_020_OR_030) {
 38		__asm__ __volatile__("pflush #0,#4");
 39	}
 40}
 41
 42static inline void __flush_tlb040_one(unsigned long addr)
 43{
 44	__asm__ __volatile__(".chip 68040\n\t"
 45			     "pflush (%0)\n\t"
 46			     ".chip 68k"
 47			     : : "a" (addr));
 48}
 49
 50static inline void __flush_tlb_one(unsigned long addr)
 51{
 52	if (CPU_IS_COLDFIRE)
 53		mmu_write(MMUOR, MMUOR_CNL);
 54	else if (CPU_IS_040_OR_060)
 55		__flush_tlb040_one(addr);
 56	else if (CPU_IS_020_OR_030)
 57		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
 58}
 59
 60#define flush_tlb() __flush_tlb()
 61
 62/*
 63 * flush all atc entries (both kernel and user-space entries).
 64 */
 65static inline void flush_tlb_all(void)
 66{
 67	if (CPU_IS_COLDFIRE) {
 68		mmu_write(MMUOR, MMUOR_CNL);
 69	} else if (CPU_IS_040_OR_060) {
 70		__asm__ __volatile__(".chip 68040\n\t"
 71				     "pflusha\n\t"
 72				     ".chip 68k");
 73	} else if (CPU_IS_020_OR_030) {
 74		__asm__ __volatile__("pflusha");
 75	}
 76}
 77
 78static inline void flush_tlb_mm(struct mm_struct *mm)
 79{
 80	if (mm == current->active_mm)
 81		__flush_tlb();
 82}
 83
 84static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 85{
 86	if (vma->vm_mm == current->active_mm) {
 87		mm_segment_t old_fs = get_fs();
 88		set_fs(USER_DS);
 89		__flush_tlb_one(addr);
 90		set_fs(old_fs);
 91	}
 92}
 93
 94static inline void flush_tlb_range(struct vm_area_struct *vma,
 95				   unsigned long start, unsigned long end)
 96{
 97	if (vma->vm_mm == current->active_mm)
 98		__flush_tlb();
 99}
100
101static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
102{
103	flush_tlb_all();
104}
105
106#else
107
108
109/* Reserved PMEGs. */
110extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
111extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
112extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
113extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
114
115/* Flush all userspace mappings one by one...  (why no flush command,
116   sun?) */
117static inline void flush_tlb_all(void)
118{
119       unsigned long addr;
120       unsigned char ctx, oldctx;
121
122       oldctx = sun3_get_context();
123       for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
124	       for(ctx = 0; ctx < 8; ctx++) {
125		       sun3_put_context(ctx);
126		       sun3_put_segmap(addr, SUN3_INVALID_PMEG);
127	       }
128       }
129
130       sun3_put_context(oldctx);
131       /* erase all of the userspace pmeg maps, we've clobbered them
132	  all anyway */
133       for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
134	       if(pmeg_alloc[addr] == 1) {
135		       pmeg_alloc[addr] = 0;
136		       pmeg_ctx[addr] = 0;
137		       pmeg_vaddr[addr] = 0;
138	       }
139       }
140
141}
142
143/* Clear user TLB entries within the context named in mm */
144static inline void flush_tlb_mm (struct mm_struct *mm)
145{
146     unsigned char oldctx;
147     unsigned char seg;
148     unsigned long i;
149
150     oldctx = sun3_get_context();
151     sun3_put_context(mm->context);
152
153     for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
154	     seg = sun3_get_segmap(i);
155	     if(seg == SUN3_INVALID_PMEG)
156		     continue;
157
158	     sun3_put_segmap(i, SUN3_INVALID_PMEG);
159	     pmeg_alloc[seg] = 0;
160	     pmeg_ctx[seg] = 0;
161	     pmeg_vaddr[seg] = 0;
162     }
163
164     sun3_put_context(oldctx);
165
166}
167
168/* Flush a single TLB page. In this case, we're limited to flushing a
169   single PMEG */
170static inline void flush_tlb_page (struct vm_area_struct *vma,
171				   unsigned long addr)
172{
173	unsigned char oldctx;
174	unsigned char i;
175
176	oldctx = sun3_get_context();
177	sun3_put_context(vma->vm_mm->context);
178	addr &= ~SUN3_PMEG_MASK;
179	if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
180	{
181		pmeg_alloc[i] = 0;
182		pmeg_ctx[i] = 0;
183		pmeg_vaddr[i] = 0;
184		sun3_put_segmap (addr,  SUN3_INVALID_PMEG);
185	}
186	sun3_put_context(oldctx);
187
188}
189/* Flush a range of pages from TLB. */
190
191static inline void flush_tlb_range (struct vm_area_struct *vma,
192		      unsigned long start, unsigned long end)
193{
194	struct mm_struct *mm = vma->vm_mm;
195	unsigned char seg, oldctx;
196
197	start &= ~SUN3_PMEG_MASK;
198
199	oldctx = sun3_get_context();
200	sun3_put_context(mm->context);
201
202	while(start < end)
203	{
204		if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
205		     goto next;
206		if(pmeg_ctx[seg] == mm->context) {
207			pmeg_alloc[seg] = 0;
208			pmeg_ctx[seg] = 0;
209			pmeg_vaddr[seg] = 0;
210		}
211		sun3_put_segmap(start, SUN3_INVALID_PMEG);
212	next:
213		start += SUN3_PMEG_SIZE;
214	}
215}
216
217static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
218{
219	flush_tlb_all();
220}
221
222/* Flush kernel page from TLB. */
223static inline void flush_tlb_kernel_page (unsigned long addr)
224{
225	sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
226}
227
228#endif
229
230#else /* !CONFIG_MMU */
231
232/*
233 * flush all user-space atc entries.
234 */
235static inline void __flush_tlb(void)
236{
237	BUG();
238}
239
240static inline void __flush_tlb_one(unsigned long addr)
241{
242	BUG();
243}
244
245#define flush_tlb() __flush_tlb()
246
247/*
248 * flush all atc entries (both kernel and user-space entries).
249 */
250static inline void flush_tlb_all(void)
251{
252	BUG();
253}
254
255static inline void flush_tlb_mm(struct mm_struct *mm)
256{
257	BUG();
258}
259
260static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
261{
262	BUG();
263}
264
265static inline void flush_tlb_range(struct mm_struct *mm,
266				   unsigned long start, unsigned long end)
267{
268	BUG();
269}
270
271static inline void flush_tlb_kernel_page(unsigned long addr)
272{
273	BUG();
274}
275
276#endif /* CONFIG_MMU */
277
278#endif /* _M68K_TLBFLUSH_H */