Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/smp.h>
 14#include <linux/mm.h>
 15#include <linux/hugetlb.h>
 16
 17#include <asm/cpu.h>
 18#include <asm/bootinfo.h>
 19#include <asm/mmu_context.h>
 20#include <asm/pgtable.h>
 21#include <asm/system.h>
 22
 23extern void build_tlb_refill_handler(void);
 24
 25/*
 26 * Make sure all entries differ.  If they're not different
 27 * MIPS32 will take revenge ...
 28 */
 29#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
 30
 31/* Atomicity and interruptability */
 32#ifdef CONFIG_MIPS_MT_SMTC
 33
 34#include <asm/smtc.h>
 35#include <asm/mipsmtregs.h>
 36
 37#define ENTER_CRITICAL(flags) \
 38	{ \
 39	unsigned int mvpflags; \
 40	local_irq_save(flags);\
 41	mvpflags = dvpe()
 42#define EXIT_CRITICAL(flags) \
 43	evpe(mvpflags); \
 44	local_irq_restore(flags); \
 45	}
 46#else
 47
 48#define ENTER_CRITICAL(flags) local_irq_save(flags)
 49#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 50
 51#endif /* CONFIG_MIPS_MT_SMTC */
 52
 53#if defined(CONFIG_CPU_LOONGSON2)
 54/*
 55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 56 * unfortrunately, itlb is not totally transparent to software.
 57 */
 58#define FLUSH_ITLB write_c0_diag(4);
 59
 60#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
 61
 62#else
 63
 64#define FLUSH_ITLB
 65#define FLUSH_ITLB_VM(vma)
 66
 67#endif
 68
 69void local_flush_tlb_all(void)
 70{
 71	unsigned long flags;
 72	unsigned long old_ctx;
 73	int entry;
 74
 75	ENTER_CRITICAL(flags);
 76	/* Save old context and create impossible VPN2 value */
 77	old_ctx = read_c0_entryhi();
 78	write_c0_entrylo0(0);
 79	write_c0_entrylo1(0);
 80
 81	entry = read_c0_wired();
 82
 83	/* Blast 'em all away. */
 84	while (entry < current_cpu_data.tlbsize) {
 85		/* Make sure all entries differ. */
 86		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 87		write_c0_index(entry);
 88		mtc0_tlbw_hazard();
 89		tlb_write_indexed();
 90		entry++;
 91	}
 92	tlbw_use_hazard();
 93	write_c0_entryhi(old_ctx);
 94	FLUSH_ITLB;
 95	EXIT_CRITICAL(flags);
 96}
 97
 98/* All entries common to a mm share an asid.  To effectively flush
 99   these entries, we just bump the asid. */
100void local_flush_tlb_mm(struct mm_struct *mm)
101{
102	int cpu;
103
104	preempt_disable();
105
106	cpu = smp_processor_id();
107
108	if (cpu_context(cpu, mm) != 0) {
109		drop_mmu_context(mm, cpu);
110	}
111
112	preempt_enable();
113}
114
115void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116	unsigned long end)
117{
118	struct mm_struct *mm = vma->vm_mm;
119	int cpu = smp_processor_id();
120
121	if (cpu_context(cpu, mm) != 0) {
122		unsigned long size, flags;
 
123
124		ENTER_CRITICAL(flags);
125		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
126		size = (size + 1) >> 1;
 
 
 
 
 
 
 
127		if (size <= current_cpu_data.tlbsize/2) {
128			int oldpid = read_c0_entryhi();
129			int newpid = cpu_asid(cpu, mm);
130
131			start &= (PAGE_MASK << 1);
132			end += ((PAGE_SIZE << 1) - 1);
133			end &= (PAGE_MASK << 1);
134			while (start < end) {
135				int idx;
136
137				write_c0_entryhi(start | newpid);
138				start += (PAGE_SIZE << 1);
 
 
 
139				mtc0_tlbw_hazard();
140				tlb_probe();
141				tlb_probe_hazard();
142				idx = read_c0_index();
143				write_c0_entrylo0(0);
144				write_c0_entrylo1(0);
145				if (idx < 0)
146					continue;
147				/* Make sure all entries differ. */
148				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
149				mtc0_tlbw_hazard();
150				tlb_write_indexed();
151			}
152			tlbw_use_hazard();
153			write_c0_entryhi(oldpid);
154		} else {
155			drop_mmu_context(mm, cpu);
156		}
157		FLUSH_ITLB;
158		EXIT_CRITICAL(flags);
159	}
160}
161
162void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
163{
164	unsigned long size, flags;
165
166	ENTER_CRITICAL(flags);
167	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
168	size = (size + 1) >> 1;
169	if (size <= current_cpu_data.tlbsize / 2) {
170		int pid = read_c0_entryhi();
171
172		start &= (PAGE_MASK << 1);
173		end += ((PAGE_SIZE << 1) - 1);
174		end &= (PAGE_MASK << 1);
175
176		while (start < end) {
177			int idx;
178
179			write_c0_entryhi(start);
180			start += (PAGE_SIZE << 1);
181			mtc0_tlbw_hazard();
182			tlb_probe();
183			tlb_probe_hazard();
184			idx = read_c0_index();
185			write_c0_entrylo0(0);
186			write_c0_entrylo1(0);
187			if (idx < 0)
188				continue;
189			/* Make sure all entries differ. */
190			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
191			mtc0_tlbw_hazard();
192			tlb_write_indexed();
193		}
194		tlbw_use_hazard();
195		write_c0_entryhi(pid);
196	} else {
197		local_flush_tlb_all();
198	}
199	FLUSH_ITLB;
200	EXIT_CRITICAL(flags);
201}
202
203void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
204{
205	int cpu = smp_processor_id();
206
207	if (cpu_context(cpu, vma->vm_mm) != 0) {
208		unsigned long flags;
209		int oldpid, newpid, idx;
210
211		newpid = cpu_asid(cpu, vma->vm_mm);
212		page &= (PAGE_MASK << 1);
213		ENTER_CRITICAL(flags);
214		oldpid = read_c0_entryhi();
215		write_c0_entryhi(page | newpid);
216		mtc0_tlbw_hazard();
217		tlb_probe();
218		tlb_probe_hazard();
219		idx = read_c0_index();
220		write_c0_entrylo0(0);
221		write_c0_entrylo1(0);
222		if (idx < 0)
223			goto finish;
224		/* Make sure all entries differ. */
225		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
226		mtc0_tlbw_hazard();
227		tlb_write_indexed();
228		tlbw_use_hazard();
229
230	finish:
231		write_c0_entryhi(oldpid);
232		FLUSH_ITLB_VM(vma);
233		EXIT_CRITICAL(flags);
234	}
235}
236
237/*
238 * This one is only used for pages with the global bit set so we don't care
239 * much about the ASID.
240 */
241void local_flush_tlb_one(unsigned long page)
242{
243	unsigned long flags;
244	int oldpid, idx;
245
246	ENTER_CRITICAL(flags);
247	oldpid = read_c0_entryhi();
248	page &= (PAGE_MASK << 1);
249	write_c0_entryhi(page);
250	mtc0_tlbw_hazard();
251	tlb_probe();
252	tlb_probe_hazard();
253	idx = read_c0_index();
254	write_c0_entrylo0(0);
255	write_c0_entrylo1(0);
256	if (idx >= 0) {
257		/* Make sure all entries differ. */
258		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
259		mtc0_tlbw_hazard();
260		tlb_write_indexed();
261		tlbw_use_hazard();
262	}
263	write_c0_entryhi(oldpid);
264	FLUSH_ITLB;
265	EXIT_CRITICAL(flags);
266}
267
268/*
269 * We will need multiple versions of update_mmu_cache(), one that just
270 * updates the TLB with the new pte(s), and another which also checks
271 * for the R4k "end of page" hardware bug and does the needy.
272 */
273void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
274{
275	unsigned long flags;
276	pgd_t *pgdp;
277	pud_t *pudp;
278	pmd_t *pmdp;
279	pte_t *ptep;
280	int idx, pid;
281
282	/*
283	 * Handle debugger faulting in for debugee.
284	 */
285	if (current->active_mm != vma->vm_mm)
286		return;
287
288	ENTER_CRITICAL(flags);
289
290	pid = read_c0_entryhi() & ASID_MASK;
291	address &= (PAGE_MASK << 1);
292	write_c0_entryhi(address | pid);
293	pgdp = pgd_offset(vma->vm_mm, address);
294	mtc0_tlbw_hazard();
295	tlb_probe();
296	tlb_probe_hazard();
297	pudp = pud_offset(pgdp, address);
298	pmdp = pmd_offset(pudp, address);
299	idx = read_c0_index();
300#ifdef CONFIG_HUGETLB_PAGE
301	/* this could be a huge page  */
302	if (pmd_huge(*pmdp)) {
303		unsigned long lo;
304		write_c0_pagemask(PM_HUGE_MASK);
305		ptep = (pte_t *)pmdp;
306		lo = pte_to_entrylo(pte_val(*ptep));
307		write_c0_entrylo0(lo);
308		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309
310		mtc0_tlbw_hazard();
311		if (idx < 0)
312			tlb_write_random();
313		else
314			tlb_write_indexed();
315		write_c0_pagemask(PM_DEFAULT_MASK);
316	} else
317#endif
318	{
319		ptep = pte_offset_map(pmdp, address);
320
321#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
322		write_c0_entrylo0(ptep->pte_high);
323		ptep++;
324		write_c0_entrylo1(ptep->pte_high);
325#else
326		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
327		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
328#endif
329		mtc0_tlbw_hazard();
330		if (idx < 0)
331			tlb_write_random();
332		else
333			tlb_write_indexed();
334	}
335	tlbw_use_hazard();
336	FLUSH_ITLB_VM(vma);
337	EXIT_CRITICAL(flags);
338}
339
340void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
341	unsigned long entryhi, unsigned long pagemask)
342{
343	unsigned long flags;
344	unsigned long wired;
345	unsigned long old_pagemask;
346	unsigned long old_ctx;
347
348	ENTER_CRITICAL(flags);
349	/* Save old context and create impossible VPN2 value */
350	old_ctx = read_c0_entryhi();
351	old_pagemask = read_c0_pagemask();
352	wired = read_c0_wired();
353	write_c0_wired(wired + 1);
354	write_c0_index(wired);
355	tlbw_use_hazard();	/* What is the hazard here? */
356	write_c0_pagemask(pagemask);
357	write_c0_entryhi(entryhi);
358	write_c0_entrylo0(entrylo0);
359	write_c0_entrylo1(entrylo1);
360	mtc0_tlbw_hazard();
361	tlb_write_indexed();
362	tlbw_use_hazard();
363
364	write_c0_entryhi(old_ctx);
365	tlbw_use_hazard();	/* What is the hazard here? */
366	write_c0_pagemask(old_pagemask);
367	local_flush_tlb_all();
368	EXIT_CRITICAL(flags);
369}
370
371/*
372 * Used for loading TLB entries before trap_init() has started, when we
373 * don't actually want to add a wired entry which remains throughout the
374 * lifetime of the system
375 */
376
377static int temp_tlb_entry __cpuinitdata;
378
379__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
380			       unsigned long entryhi, unsigned long pagemask)
381{
382	int ret = 0;
383	unsigned long flags;
384	unsigned long wired;
385	unsigned long old_pagemask;
386	unsigned long old_ctx;
387
388	ENTER_CRITICAL(flags);
389	/* Save old context and create impossible VPN2 value */
390	old_ctx = read_c0_entryhi();
391	old_pagemask = read_c0_pagemask();
392	wired = read_c0_wired();
393	if (--temp_tlb_entry < wired) {
394		printk(KERN_WARNING
395		       "No TLB space left for add_temporary_entry\n");
396		ret = -ENOSPC;
397		goto out;
398	}
399
400	write_c0_index(temp_tlb_entry);
401	write_c0_pagemask(pagemask);
402	write_c0_entryhi(entryhi);
403	write_c0_entrylo0(entrylo0);
404	write_c0_entrylo1(entrylo1);
405	mtc0_tlbw_hazard();
406	tlb_write_indexed();
407	tlbw_use_hazard();
408
409	write_c0_entryhi(old_ctx);
410	write_c0_pagemask(old_pagemask);
411out:
412	EXIT_CRITICAL(flags);
413	return ret;
414}
415
416static int __cpuinitdata ntlb;
417static int __init set_ntlb(char *str)
418{
419	get_option(&str, &ntlb);
420	return 1;
421}
422
423__setup("ntlb=", set_ntlb);
424
425void __cpuinit tlb_init(void)
426{
427	/*
428	 * You should never change this register:
429	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
430	 *     the value in the c0_pagemask register.
431	 *   - The entire mm handling assumes the c0_pagemask register to
432	 *     be set to fixed-size pages.
433	 */
434	write_c0_pagemask(PM_DEFAULT_MASK);
435	write_c0_wired(0);
436	if (current_cpu_type() == CPU_R10000 ||
437	    current_cpu_type() == CPU_R12000 ||
438	    current_cpu_type() == CPU_R14000)
439		write_c0_framemask(0);
440
441	if (kernel_uses_smartmips_rixi) {
442		/*
443		 * Enable the no read, no exec bits, and enable large virtual
444		 * address.
445		 */
446		u32 pg = PG_RIE | PG_XIE;
447#ifdef CONFIG_64BIT
448		pg |= PG_ELPA;
449#endif
450		write_c0_pagegrain(pg);
451	}
452
453	temp_tlb_entry = current_cpu_data.tlbsize - 1;
454
455        /* From this point on the ARC firmware is dead.  */
456	local_flush_tlb_all();
457
458	/* Did I tell you that ARC SUCKS?  */
459
460	if (ntlb) {
461		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
462			int wired = current_cpu_data.tlbsize - ntlb;
463			write_c0_wired(wired);
464			write_c0_index(wired-1);
465			printk("Restricting TLB to %d entries\n", ntlb);
466		} else
467			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
468	}
469
470	build_tlb_refill_handler();
471}
v3.5.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/smp.h>
 14#include <linux/mm.h>
 15#include <linux/hugetlb.h>
 16
 17#include <asm/cpu.h>
 18#include <asm/bootinfo.h>
 19#include <asm/mmu_context.h>
 20#include <asm/pgtable.h>
 21#include <asm/tlbmisc.h>
 22
 23extern void build_tlb_refill_handler(void);
 24
 25/*
 26 * Make sure all entries differ.  If they're not different
 27 * MIPS32 will take revenge ...
 28 */
 29#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
 30
 31/* Atomicity and interruptability */
 32#ifdef CONFIG_MIPS_MT_SMTC
 33
 34#include <asm/smtc.h>
 35#include <asm/mipsmtregs.h>
 36
 37#define ENTER_CRITICAL(flags) \
 38	{ \
 39	unsigned int mvpflags; \
 40	local_irq_save(flags);\
 41	mvpflags = dvpe()
 42#define EXIT_CRITICAL(flags) \
 43	evpe(mvpflags); \
 44	local_irq_restore(flags); \
 45	}
 46#else
 47
 48#define ENTER_CRITICAL(flags) local_irq_save(flags)
 49#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 50
 51#endif /* CONFIG_MIPS_MT_SMTC */
 52
 53#if defined(CONFIG_CPU_LOONGSON2)
 54/*
 55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 56 * unfortrunately, itlb is not totally transparent to software.
 57 */
 58#define FLUSH_ITLB write_c0_diag(4);
 59
 60#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
 61
 62#else
 63
 64#define FLUSH_ITLB
 65#define FLUSH_ITLB_VM(vma)
 66
 67#endif
 68
 69void local_flush_tlb_all(void)
 70{
 71	unsigned long flags;
 72	unsigned long old_ctx;
 73	int entry;
 74
 75	ENTER_CRITICAL(flags);
 76	/* Save old context and create impossible VPN2 value */
 77	old_ctx = read_c0_entryhi();
 78	write_c0_entrylo0(0);
 79	write_c0_entrylo1(0);
 80
 81	entry = read_c0_wired();
 82
 83	/* Blast 'em all away. */
 84	while (entry < current_cpu_data.tlbsize) {
 85		/* Make sure all entries differ. */
 86		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 87		write_c0_index(entry);
 88		mtc0_tlbw_hazard();
 89		tlb_write_indexed();
 90		entry++;
 91	}
 92	tlbw_use_hazard();
 93	write_c0_entryhi(old_ctx);
 94	FLUSH_ITLB;
 95	EXIT_CRITICAL(flags);
 96}
 97
 98/* All entries common to a mm share an asid.  To effectively flush
 99   these entries, we just bump the asid. */
100void local_flush_tlb_mm(struct mm_struct *mm)
101{
102	int cpu;
103
104	preempt_disable();
105
106	cpu = smp_processor_id();
107
108	if (cpu_context(cpu, mm) != 0) {
109		drop_mmu_context(mm, cpu);
110	}
111
112	preempt_enable();
113}
114
115void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116	unsigned long end)
117{
118	struct mm_struct *mm = vma->vm_mm;
119	int cpu = smp_processor_id();
120
121	if (cpu_context(cpu, mm) != 0) {
122		unsigned long size, flags;
123		int huge = is_vm_hugetlb_page(vma);
124
125		ENTER_CRITICAL(flags);
126		if (huge) {
127			start = round_down(start, HPAGE_SIZE);
128			end = round_up(end, HPAGE_SIZE);
129			size = (end - start) >> HPAGE_SHIFT;
130		} else {
131			start = round_down(start, PAGE_SIZE << 1);
132			end = round_up(end, PAGE_SIZE << 1);
133			size = (end - start) >> (PAGE_SHIFT + 1);
134		}
135		if (size <= current_cpu_data.tlbsize/2) {
136			int oldpid = read_c0_entryhi();
137			int newpid = cpu_asid(cpu, mm);
138
 
 
 
139			while (start < end) {
140				int idx;
141
142				write_c0_entryhi(start | newpid);
143				if (huge)
144					start += HPAGE_SIZE;
145				else
146					start += (PAGE_SIZE << 1);
147				mtc0_tlbw_hazard();
148				tlb_probe();
149				tlb_probe_hazard();
150				idx = read_c0_index();
151				write_c0_entrylo0(0);
152				write_c0_entrylo1(0);
153				if (idx < 0)
154					continue;
155				/* Make sure all entries differ. */
156				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
157				mtc0_tlbw_hazard();
158				tlb_write_indexed();
159			}
160			tlbw_use_hazard();
161			write_c0_entryhi(oldpid);
162		} else {
163			drop_mmu_context(mm, cpu);
164		}
165		FLUSH_ITLB;
166		EXIT_CRITICAL(flags);
167	}
168}
169
170void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
171{
172	unsigned long size, flags;
173
174	ENTER_CRITICAL(flags);
175	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
176	size = (size + 1) >> 1;
177	if (size <= current_cpu_data.tlbsize / 2) {
178		int pid = read_c0_entryhi();
179
180		start &= (PAGE_MASK << 1);
181		end += ((PAGE_SIZE << 1) - 1);
182		end &= (PAGE_MASK << 1);
183
184		while (start < end) {
185			int idx;
186
187			write_c0_entryhi(start);
188			start += (PAGE_SIZE << 1);
189			mtc0_tlbw_hazard();
190			tlb_probe();
191			tlb_probe_hazard();
192			idx = read_c0_index();
193			write_c0_entrylo0(0);
194			write_c0_entrylo1(0);
195			if (idx < 0)
196				continue;
197			/* Make sure all entries differ. */
198			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
199			mtc0_tlbw_hazard();
200			tlb_write_indexed();
201		}
202		tlbw_use_hazard();
203		write_c0_entryhi(pid);
204	} else {
205		local_flush_tlb_all();
206	}
207	FLUSH_ITLB;
208	EXIT_CRITICAL(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long flags;
217		int oldpid, newpid, idx;
218
219		newpid = cpu_asid(cpu, vma->vm_mm);
220		page &= (PAGE_MASK << 1);
221		ENTER_CRITICAL(flags);
222		oldpid = read_c0_entryhi();
223		write_c0_entryhi(page | newpid);
224		mtc0_tlbw_hazard();
225		tlb_probe();
226		tlb_probe_hazard();
227		idx = read_c0_index();
228		write_c0_entrylo0(0);
229		write_c0_entrylo1(0);
230		if (idx < 0)
231			goto finish;
232		/* Make sure all entries differ. */
233		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
234		mtc0_tlbw_hazard();
235		tlb_write_indexed();
236		tlbw_use_hazard();
237
238	finish:
239		write_c0_entryhi(oldpid);
240		FLUSH_ITLB_VM(vma);
241		EXIT_CRITICAL(flags);
242	}
243}
244
245/*
246 * This one is only used for pages with the global bit set so we don't care
247 * much about the ASID.
248 */
249void local_flush_tlb_one(unsigned long page)
250{
251	unsigned long flags;
252	int oldpid, idx;
253
254	ENTER_CRITICAL(flags);
255	oldpid = read_c0_entryhi();
256	page &= (PAGE_MASK << 1);
257	write_c0_entryhi(page);
258	mtc0_tlbw_hazard();
259	tlb_probe();
260	tlb_probe_hazard();
261	idx = read_c0_index();
262	write_c0_entrylo0(0);
263	write_c0_entrylo1(0);
264	if (idx >= 0) {
265		/* Make sure all entries differ. */
266		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
267		mtc0_tlbw_hazard();
268		tlb_write_indexed();
269		tlbw_use_hazard();
270	}
271	write_c0_entryhi(oldpid);
272	FLUSH_ITLB;
273	EXIT_CRITICAL(flags);
274}
275
276/*
277 * We will need multiple versions of update_mmu_cache(), one that just
278 * updates the TLB with the new pte(s), and another which also checks
279 * for the R4k "end of page" hardware bug and does the needy.
280 */
281void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
282{
283	unsigned long flags;
284	pgd_t *pgdp;
285	pud_t *pudp;
286	pmd_t *pmdp;
287	pte_t *ptep;
288	int idx, pid;
289
290	/*
291	 * Handle debugger faulting in for debugee.
292	 */
293	if (current->active_mm != vma->vm_mm)
294		return;
295
296	ENTER_CRITICAL(flags);
297
298	pid = read_c0_entryhi() & ASID_MASK;
299	address &= (PAGE_MASK << 1);
300	write_c0_entryhi(address | pid);
301	pgdp = pgd_offset(vma->vm_mm, address);
302	mtc0_tlbw_hazard();
303	tlb_probe();
304	tlb_probe_hazard();
305	pudp = pud_offset(pgdp, address);
306	pmdp = pmd_offset(pudp, address);
307	idx = read_c0_index();
308#ifdef CONFIG_HUGETLB_PAGE
309	/* this could be a huge page  */
310	if (pmd_huge(*pmdp)) {
311		unsigned long lo;
312		write_c0_pagemask(PM_HUGE_MASK);
313		ptep = (pte_t *)pmdp;
314		lo = pte_to_entrylo(pte_val(*ptep));
315		write_c0_entrylo0(lo);
316		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
317
318		mtc0_tlbw_hazard();
319		if (idx < 0)
320			tlb_write_random();
321		else
322			tlb_write_indexed();
323		write_c0_pagemask(PM_DEFAULT_MASK);
324	} else
325#endif
326	{
327		ptep = pte_offset_map(pmdp, address);
328
329#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
330		write_c0_entrylo0(ptep->pte_high);
331		ptep++;
332		write_c0_entrylo1(ptep->pte_high);
333#else
334		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
335		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
336#endif
337		mtc0_tlbw_hazard();
338		if (idx < 0)
339			tlb_write_random();
340		else
341			tlb_write_indexed();
342	}
343	tlbw_use_hazard();
344	FLUSH_ITLB_VM(vma);
345	EXIT_CRITICAL(flags);
346}
347
348void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
349		     unsigned long entryhi, unsigned long pagemask)
350{
351	unsigned long flags;
352	unsigned long wired;
353	unsigned long old_pagemask;
354	unsigned long old_ctx;
355
356	ENTER_CRITICAL(flags);
357	/* Save old context and create impossible VPN2 value */
358	old_ctx = read_c0_entryhi();
359	old_pagemask = read_c0_pagemask();
360	wired = read_c0_wired();
361	write_c0_wired(wired + 1);
362	write_c0_index(wired);
363	tlbw_use_hazard();	/* What is the hazard here? */
364	write_c0_pagemask(pagemask);
365	write_c0_entryhi(entryhi);
366	write_c0_entrylo0(entrylo0);
367	write_c0_entrylo1(entrylo1);
368	mtc0_tlbw_hazard();
369	tlb_write_indexed();
370	tlbw_use_hazard();
371
372	write_c0_entryhi(old_ctx);
373	tlbw_use_hazard();	/* What is the hazard here? */
374	write_c0_pagemask(old_pagemask);
375	local_flush_tlb_all();
376	EXIT_CRITICAL(flags);
377}
378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379static int __cpuinitdata ntlb;
380static int __init set_ntlb(char *str)
381{
382	get_option(&str, &ntlb);
383	return 1;
384}
385
386__setup("ntlb=", set_ntlb);
387
388void __cpuinit tlb_init(void)
389{
390	/*
391	 * You should never change this register:
392	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
393	 *     the value in the c0_pagemask register.
394	 *   - The entire mm handling assumes the c0_pagemask register to
395	 *     be set to fixed-size pages.
396	 */
397	write_c0_pagemask(PM_DEFAULT_MASK);
398	write_c0_wired(0);
399	if (current_cpu_type() == CPU_R10000 ||
400	    current_cpu_type() == CPU_R12000 ||
401	    current_cpu_type() == CPU_R14000)
402		write_c0_framemask(0);
403
404	if (kernel_uses_smartmips_rixi) {
405		/*
406		 * Enable the no read, no exec bits, and enable large virtual
407		 * address.
408		 */
409		u32 pg = PG_RIE | PG_XIE;
410#ifdef CONFIG_64BIT
411		pg |= PG_ELPA;
412#endif
413		write_c0_pagegrain(pg);
414	}
 
 
415
416        /* From this point on the ARC firmware is dead.  */
417	local_flush_tlb_all();
418
419	/* Did I tell you that ARC SUCKS?  */
420
421	if (ntlb) {
422		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
423			int wired = current_cpu_data.tlbsize - ntlb;
424			write_c0_wired(wired);
425			write_c0_index(wired-1);
426			printk("Restricting TLB to %d entries\n", ntlb);
427		} else
428			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
429	}
430
431	build_tlb_refill_handler();
432}