Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Switch a MMU context.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
  9 * Copyright (C) 1999 Silicon Graphics, Inc.
 10 */
 11#ifndef _ASM_MMU_CONTEXT_H
 12#define _ASM_MMU_CONTEXT_H
 13
 14#include <linux/errno.h>
 15#include <linux/sched.h>
 16#include <linux/mm_types.h>
 17#include <linux/smp.h>
 18#include <linux/slab.h>
 19
 20#include <asm/barrier.h>
 21#include <asm/cacheflush.h>
 22#include <asm/dsemul.h>
 23#include <asm/ginvt.h>
 24#include <asm/hazards.h>
 25#include <asm/tlbflush.h>
 
 
 
 
 26#include <asm-generic/mm_hooks.h>
 27
 28#define htw_set_pwbase(pgd)						\
 29do {									\
 30	if (cpu_has_htw) {						\
 31		write_c0_pwbase(pgd);					\
 32		back_to_back_c0_hazard();				\
 33	}								\
 34} while (0)
 35
 36extern void tlbmiss_handler_setup_pgd(unsigned long);
 37extern char tlbmiss_handler_setup_pgd_end[];
 38
 39/* Note: This is also implemented with uasm in arch/mips/kvm/entry.c */
 40#define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
 41do {									\
 
 42	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
 43	htw_set_pwbase((unsigned long)pgd);				\
 44} while (0)
 45
 46#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 47
 48#define TLBMISS_HANDLER_RESTORE()					\
 49	write_c0_xcontext((unsigned long) smp_processor_id() <<		\
 50			  SMP_CPUID_REGSHIFT)
 51
 52#define TLBMISS_HANDLER_SETUP()						\
 53	do {								\
 54		TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);		\
 55		TLBMISS_HANDLER_RESTORE();				\
 
 56	} while (0)
 57
 58#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 59
 60/*
 61 * For the fast tlb miss handlers, we keep a per cpu array of pointers
 62 * to the current pgd for each processor. Also, the proc. id is stuffed
 63 * into the context register.
 64 */
 65extern unsigned long pgd_current[];
 66
 67#define TLBMISS_HANDLER_RESTORE()					\
 68	write_c0_context((unsigned long) smp_processor_id() <<		\
 69			 SMP_CPUID_REGSHIFT)
 70
 71#define TLBMISS_HANDLER_SETUP()						\
 72	TLBMISS_HANDLER_RESTORE();					\
 
 73	back_to_back_c0_hazard();					\
 74	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 75#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
 
 76
 77/*
 78 * The ginvt instruction will invalidate wired entries when its type field
 79 * targets anything other than the entire TLB. That means that if we were to
 80 * allow the kernel to create wired entries with the MMID of current->active_mm
 81 * then those wired entries could be invalidated when we later use ginvt to
 82 * invalidate TLB entries with that MMID.
 83 *
 84 * In order to prevent ginvt from trashing wired entries, we reserve one MMID
 85 * for use by the kernel when creating wired entries. This MMID will never be
 86 * assigned to a struct mm, and we'll never target it with a ginvt instruction.
 87 */
 88#define MMID_KERNEL_WIRED	0
 89
 90/*
 91 *  All unused by hardware upper bits will be considered
 92 *  as a software asid extension.
 93 */
 94static inline u64 asid_version_mask(unsigned int cpu)
 95{
 96	unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
 97
 98	return ~(u64)(asid_mask | (asid_mask - 1));
 99}
100
101static inline u64 asid_first_version(unsigned int cpu)
102{
103	return ~asid_version_mask(cpu) + 1;
104}
105
106static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm)
107{
108	if (cpu_has_mmid)
109		return atomic64_read(&mm->context.mmid);
 
 
110
111	return mm->context.asid[cpu];
112}
113
114static inline void set_cpu_context(unsigned int cpu,
115				   struct mm_struct *mm, u64 ctx)
116{
117	if (cpu_has_mmid)
118		atomic64_set(&mm->context.mmid, ctx);
119	else
120		mm->context.asid[cpu] = ctx;
121}
122
 
 
123#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
124#define cpu_asid(cpu, mm) \
125	(cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
126
127static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
128{
129}
130
131extern void get_new_mmu_context(struct mm_struct *mm);
132extern void check_mmu_context(struct mm_struct *mm);
133extern void check_switch_mmu_context(struct mm_struct *mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
135/*
136 * Initialize the context related info for a new mm_struct
137 * instance.
138 */
139static inline int
140init_new_context(struct task_struct *tsk, struct mm_struct *mm)
141{
142	int i;
143
144	if (cpu_has_mmid) {
145		set_cpu_context(0, mm, 0);
146	} else {
147		for_each_possible_cpu(i)
148			set_cpu_context(i, mm, 0);
149	}
150
151	mm->context.bd_emupage_allocmap = NULL;
152	spin_lock_init(&mm->context.bd_emupage_lock);
153	init_waitqueue_head(&mm->context.bd_emupage_queue);
154
155	return 0;
156}
157
158static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
159			     struct task_struct *tsk)
160{
161	unsigned int cpu = smp_processor_id();
162	unsigned long flags;
 
 
 
 
163	local_irq_save(flags);
 
 
 
 
164
165	htw_stop();
166	check_switch_mmu_context(next);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
168	/*
169	 * Mark current->active_mm as not "active" anymore.
170	 * We don't want to mislead possible IPI tlb flush routines.
171	 */
172	cpumask_clear_cpu(cpu, mm_cpumask(prev));
173	cpumask_set_cpu(cpu, mm_cpumask(next));
174	htw_start();
175
176	local_irq_restore(flags);
177}
178
179/*
180 * Destroy context related info for an mm_struct that is about
181 * to be put to rest.
182 */
183static inline void destroy_context(struct mm_struct *mm)
184{
185	dsemul_mm_cleanup(mm);
186}
187
188#define activate_mm(prev, next)	switch_mm(prev, next, current)
189#define deactivate_mm(tsk, mm)	do { } while (0)
190
 
 
 
 
191static inline void
192drop_mmu_context(struct mm_struct *mm)
193{
194	unsigned long flags;
195	unsigned int cpu;
196	u32 old_mmid;
197	u64 ctx;
 
 
 
 
198
199	local_irq_save(flags);
200
201	cpu = smp_processor_id();
202	ctx = cpu_context(cpu, mm);
203
204	if (!ctx) {
205		/* no-op */
206	} else if (cpu_has_mmid) {
207		/*
208		 * Globally invalidating TLB entries associated with the MMID
209		 * is pretty cheap using the GINVT instruction, so we'll do
210		 * that rather than incur the overhead of allocating a new
211		 * MMID. The latter would be especially difficult since MMIDs
212		 * are global & other CPUs may be actively using ctx.
213		 */
214		htw_stop();
215		old_mmid = read_c0_memorymapid();
216		write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu]));
217		mtc0_tlbw_hazard();
218		ginvt_mmid();
219		sync_ginv();
220		write_c0_memorymapid(old_mmid);
221		instruction_hazard();
222		htw_start();
223	} else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
224		/*
225		 * mm is currently active, so we can't really drop it.
226		 * Instead we bump the ASID.
227		 */
228		htw_stop();
229		get_new_mmu_context(mm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230		write_c0_entryhi(cpu_asid(cpu, mm));
231		htw_start();
232	} else {
233		/* will get a new context next time */
234		set_cpu_context(cpu, mm, 0);
 
 
 
 
 
 
 
 
 
 
 
235	}
236
237	local_irq_restore(flags);
238}
239
240#endif /* _ASM_MMU_CONTEXT_H */
v3.15
  1/*
  2 * Switch a MMU context.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
  9 * Copyright (C) 1999 Silicon Graphics, Inc.
 10 */
 11#ifndef _ASM_MMU_CONTEXT_H
 12#define _ASM_MMU_CONTEXT_H
 13
 14#include <linux/errno.h>
 15#include <linux/sched.h>
 
 16#include <linux/smp.h>
 17#include <linux/slab.h>
 
 
 18#include <asm/cacheflush.h>
 
 
 19#include <asm/hazards.h>
 20#include <asm/tlbflush.h>
 21#ifdef CONFIG_MIPS_MT_SMTC
 22#include <asm/mipsmtregs.h>
 23#include <asm/smtc.h>
 24#endif /* SMTC */
 25#include <asm-generic/mm_hooks.h>
 26
 
 
 
 
 
 
 
 
 
 
 
 
 27#define TLBMISS_HANDLER_SETUP_PGD(pgd)					\
 28do {									\
 29	extern void tlbmiss_handler_setup_pgd(unsigned long);		\
 30	tlbmiss_handler_setup_pgd((unsigned long)(pgd));		\
 
 31} while (0)
 32
 33#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 
 
 
 
 
 34#define TLBMISS_HANDLER_SETUP()						\
 35	do {								\
 36		TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);		\
 37		write_c0_xcontext((unsigned long) smp_processor_id() <<	\
 38						SMP_CPUID_REGSHIFT);	\
 39	} while (0)
 40
 41#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 42
 43/*
 44 * For the fast tlb miss handlers, we keep a per cpu array of pointers
 45 * to the current pgd for each processor. Also, the proc. id is stuffed
 46 * into the context register.
 47 */
 48extern unsigned long pgd_current[];
 49
 
 
 
 
 50#define TLBMISS_HANDLER_SETUP()						\
 51	write_c0_context((unsigned long) smp_processor_id() <<		\
 52						SMP_CPUID_REGSHIFT);	\
 53	back_to_back_c0_hazard();					\
 54	TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
 55#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
 56#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 57
 58#define ASID_INC	0x40
 59#define ASID_MASK	0xfc0
 
 
 
 
 
 
 
 
 
 
 60
 61#elif defined(CONFIG_CPU_R8000)
 
 
 
 
 
 
 62
 63#define ASID_INC	0x10
 64#define ASID_MASK	0xff0
 65
 66#elif defined(CONFIG_MIPS_MT_SMTC)
 
 
 
 67
 68#define ASID_INC	0x1
 69extern unsigned long smtc_asid_mask;
 70#define ASID_MASK	(smtc_asid_mask)
 71#define HW_ASID_MASK	0xff
 72/* End SMTC/34K debug hack */
 73#else /* FIXME: not correct for R6000 */
 74
 75#define ASID_INC	0x1
 76#define ASID_MASK	0xff
 77
 78#endif
 
 
 
 
 
 
 
 79
 80#define cpu_context(cpu, mm)	((mm)->context.asid[cpu])
 81#define cpu_asid(cpu, mm)	(cpu_context((cpu), (mm)) & ASID_MASK)
 82#define asid_cache(cpu)		(cpu_data[cpu].asid_cache)
 
 
 83
 84static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 85{
 86}
 87
 88/*
 89 *  All unused by hardware upper bits will be considered
 90 *  as a software asid extension.
 91 */
 92#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
 93#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
 94
 95#ifndef CONFIG_MIPS_MT_SMTC
 96/* Normal, classic MIPS get_new_mmu_context */
 97static inline void
 98get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 99{
100	extern void kvm_local_flush_tlb_all(void);
101	unsigned long asid = asid_cache(cpu);
102
103	if (! ((asid += ASID_INC) & ASID_MASK) ) {
104		if (cpu_has_vtag_icache)
105			flush_icache_all();
106#ifdef CONFIG_KVM
107		kvm_local_flush_tlb_all();      /* start new asid cycle */
108#else
109		local_flush_tlb_all();	/* start new asid cycle */
110#endif
111		if (!asid)		/* fix version if needed */
112			asid = ASID_FIRST_VERSION;
113	}
114
115	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
116}
117
118#else /* CONFIG_MIPS_MT_SMTC */
119
120#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
121
122#endif /* CONFIG_MIPS_MT_SMTC */
123
124/*
125 * Initialize the context related info for a new mm_struct
126 * instance.
127 */
128static inline int
129init_new_context(struct task_struct *tsk, struct mm_struct *mm)
130{
131	int i;
132
133	for_each_possible_cpu(i)
134		cpu_context(i, mm) = 0;
 
 
 
 
 
 
 
 
135
136	return 0;
137}
138
139static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
140			     struct task_struct *tsk)
141{
142	unsigned int cpu = smp_processor_id();
143	unsigned long flags;
144#ifdef CONFIG_MIPS_MT_SMTC
145	unsigned long oldasid;
146	unsigned long mtflags;
147	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
148	local_irq_save(flags);
149	mtflags = dvpe();
150#else /* Not SMTC */
151	local_irq_save(flags);
152#endif /* CONFIG_MIPS_MT_SMTC */
153
154	/* Check if our ASID is of an older version and thus invalid */
155	if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
156		get_new_mmu_context(next, cpu);
157#ifdef CONFIG_MIPS_MT_SMTC
158	/*
159	 * If the EntryHi ASID being replaced happens to be
160	 * the value flagged at ASID recycling time as having
161	 * an extended life, clear the bit showing it being
162	 * in use by this "CPU", and if that's the last bit,
163	 * free up the ASID value for use and flush any old
164	 * instances of it from the TLB.
165	 */
166	oldasid = (read_c0_entryhi() & ASID_MASK);
167	if(smtc_live_asid[mytlb][oldasid]) {
168		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
169		if(smtc_live_asid[mytlb][oldasid] == 0)
170			smtc_flush_tlb_asid(oldasid);
171	}
172	/*
173	 * Tread softly on EntryHi, and so long as we support
174	 * having ASID_MASK smaller than the hardware maximum,
175	 * make sure no "soft" bits become "hard"...
176	 */
177	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
178			 cpu_asid(cpu, next));
179	ehb(); /* Make sure it propagates to TCStatus */
180	evpe(mtflags);
181#else
182	write_c0_entryhi(cpu_asid(cpu, next));
183#endif /* CONFIG_MIPS_MT_SMTC */
184	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
185
186	/*
187	 * Mark current->active_mm as not "active" anymore.
188	 * We don't want to mislead possible IPI tlb flush routines.
189	 */
190	cpumask_clear_cpu(cpu, mm_cpumask(prev));
191	cpumask_set_cpu(cpu, mm_cpumask(next));
 
192
193	local_irq_restore(flags);
194}
195
196/*
197 * Destroy context related info for an mm_struct that is about
198 * to be put to rest.
199 */
200static inline void destroy_context(struct mm_struct *mm)
201{
 
202}
203
 
204#define deactivate_mm(tsk, mm)	do { } while (0)
205
206/*
207 * After we have set current->mm to a new value, this activates
208 * the context for the new mm so we see the new mappings.
209 */
210static inline void
211activate_mm(struct mm_struct *prev, struct mm_struct *next)
212{
213	unsigned long flags;
214	unsigned int cpu = smp_processor_id();
215
216#ifdef CONFIG_MIPS_MT_SMTC
217	unsigned long oldasid;
218	unsigned long mtflags;
219	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
220#endif /* CONFIG_MIPS_MT_SMTC */
221
222	local_irq_save(flags);
223
224	/* Unconditionally get a new ASID.  */
225	get_new_mmu_context(next, cpu);
226
227#ifdef CONFIG_MIPS_MT_SMTC
228	/* See comments for similar code above */
229	mtflags = dvpe();
230	oldasid = read_c0_entryhi() & ASID_MASK;
231	if(smtc_live_asid[mytlb][oldasid]) {
232		smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
233		if(smtc_live_asid[mytlb][oldasid] == 0)
234			 smtc_flush_tlb_asid(oldasid);
235	}
236	/* See comments for similar code above */
237	write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
238			 cpu_asid(cpu, next));
239	ehb(); /* Make sure it propagates to TCStatus */
240	evpe(mtflags);
241#else
242	write_c0_entryhi(cpu_asid(cpu, next));
243#endif /* CONFIG_MIPS_MT_SMTC */
244	TLBMISS_HANDLER_SETUP_PGD(next->pgd);
245
246	/* mark mmu ownership change */
247	cpumask_clear_cpu(cpu, mm_cpumask(prev));
248	cpumask_set_cpu(cpu, mm_cpumask(next));
249
250	local_irq_restore(flags);
251}
252
253/*
254 * If mm is currently active_mm, we can't really drop it.  Instead,
255 * we will get a new one for it.
256 */
257static inline void
258drop_mmu_context(struct mm_struct *mm, unsigned cpu)
259{
260	unsigned long flags;
261#ifdef CONFIG_MIPS_MT_SMTC
262	unsigned long oldasid;
263	/* Can't use spinlock because called from TLB flush within DVPE */
264	unsigned int prevvpe;
265	int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
266#endif /* CONFIG_MIPS_MT_SMTC */
267
268	local_irq_save(flags);
269
270	if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
271		get_new_mmu_context(mm, cpu);
272#ifdef CONFIG_MIPS_MT_SMTC
273		/* See comments for similar code above */
274		prevvpe = dvpe();
275		oldasid = (read_c0_entryhi() & ASID_MASK);
276		if (smtc_live_asid[mytlb][oldasid]) {
277			smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
278			if(smtc_live_asid[mytlb][oldasid] == 0)
279				smtc_flush_tlb_asid(oldasid);
280		}
281		/* See comments for similar code above */
282		write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
283				| cpu_asid(cpu, mm));
284		ehb(); /* Make sure it propagates to TCStatus */
285		evpe(prevvpe);
286#else /* not CONFIG_MIPS_MT_SMTC */
287		write_c0_entryhi(cpu_asid(cpu, mm));
288#endif /* CONFIG_MIPS_MT_SMTC */
289	} else {
290		/* will get a new context next time */
291#ifndef CONFIG_MIPS_MT_SMTC
292		cpu_context(cpu, mm) = 0;
293#else /* SMTC */
294		int i;
295
296		/* SMTC shares the TLB (and ASIDs) across VPEs */
297		for_each_online_cpu(i) {
298		    if((smtc_status & SMTC_TLB_SHARED)
299		    || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
300			cpu_context(i, mm) = 0;
301		}
302#endif /* CONFIG_MIPS_MT_SMTC */
303	}
 
304	local_irq_restore(flags);
305}
306
307#endif /* _ASM_MMU_CONTEXT_H */