Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/cpu_pm.h>
 12#include <linux/init.h>
 13#include <linux/sched.h>
 14#include <linux/smp.h>
 15#include <linux/mm.h>
 16#include <linux/hugetlb.h>
 17#include <linux/export.h>
 18
 19#include <asm/cpu.h>
 20#include <asm/cpu-type.h>
 21#include <asm/bootinfo.h>
 22#include <asm/hazards.h>
 23#include <asm/mmu_context.h>
 24#include <asm/tlb.h>
 25#include <asm/tlbmisc.h>
 26
 27extern void build_tlb_refill_handler(void);
 28
 29/*
 30 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
 31 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
 32 * itlb/dtlb are not totally transparent to software.
 33 */
 34static inline void flush_micro_tlb(void)
 35{
 36	switch (current_cpu_type()) {
 37	case CPU_LOONGSON2EF:
 38		write_c0_diag(LOONGSON_DIAG_ITLB);
 39		break;
 40	case CPU_LOONGSON64:
 41		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
 42		break;
 43	default:
 44		break;
 
 
 
 
 
 45	}
 46}
 47
 48static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
 49{
 50	if (vma->vm_flags & VM_EXEC)
 51		flush_micro_tlb();
 52}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54void local_flush_tlb_all(void)
 55{
 56	unsigned long flags;
 57	unsigned long old_ctx;
 58	int entry, ftlbhighset;
 59
 60	local_irq_save(flags);
 61	/* Save old context and create impossible VPN2 value */
 62	old_ctx = read_c0_entryhi();
 63	htw_stop();
 64	write_c0_entrylo0(0);
 65	write_c0_entrylo1(0);
 66
 67	entry = num_wired_entries();
 68
 69	/*
 70	 * Blast 'em all away.
 71	 * If there are any wired entries, fall back to iterating
 72	 */
 73	if (cpu_has_tlbinv && !entry) {
 74		if (current_cpu_data.tlbsizevtlb) {
 75			write_c0_index(0);
 76			mtc0_tlbw_hazard();
 77			tlbinvf();  /* invalidate VTLB */
 78		}
 79		ftlbhighset = current_cpu_data.tlbsizevtlb +
 80			current_cpu_data.tlbsizeftlbsets;
 81		for (entry = current_cpu_data.tlbsizevtlb;
 82		     entry < ftlbhighset;
 83		     entry++) {
 84			write_c0_index(entry);
 85			mtc0_tlbw_hazard();
 86			tlbinvf();  /* invalidate one FTLB set */
 87		}
 88	} else {
 89		while (entry < current_cpu_data.tlbsize) {
 90			/* Make sure all entries differ. */
 91			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 92			write_c0_index(entry);
 93			mtc0_tlbw_hazard();
 94			tlb_write_indexed();
 95			entry++;
 96		}
 97	}
 98	tlbw_use_hazard();
 99	write_c0_entryhi(old_ctx);
100	htw_start();
101	flush_micro_tlb();
102	local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103}
104EXPORT_SYMBOL(local_flush_tlb_all);
105
106void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
107	unsigned long end)
108{
109	struct mm_struct *mm = vma->vm_mm;
110	int cpu = smp_processor_id();
111
112	if (cpu_context(cpu, mm) != 0) {
113		unsigned long size, flags;
 
114
115		local_irq_save(flags);
116		start = round_down(start, PAGE_SIZE << 1);
117		end = round_up(end, PAGE_SIZE << 1);
118		size = (end - start) >> (PAGE_SHIFT + 1);
119		if (size <= (current_cpu_data.tlbsizeftlbsets ?
120			     current_cpu_data.tlbsize / 8 :
121			     current_cpu_data.tlbsize / 2)) {
122			unsigned long old_entryhi, old_mmid;
 
 
 
 
123			int newpid = cpu_asid(cpu, mm);
124
125			old_entryhi = read_c0_entryhi();
126			if (cpu_has_mmid) {
127				old_mmid = read_c0_memorymapid();
128				write_c0_memorymapid(newpid);
129			}
130
131			htw_stop();
132			while (start < end) {
133				int idx;
134
135				if (cpu_has_mmid)
136					write_c0_entryhi(start);
 
137				else
138					write_c0_entryhi(start | newpid);
139				start += (PAGE_SIZE << 1);
140				mtc0_tlbw_hazard();
141				tlb_probe();
142				tlb_probe_hazard();
143				idx = read_c0_index();
144				write_c0_entrylo0(0);
145				write_c0_entrylo1(0);
146				if (idx < 0)
147					continue;
148				/* Make sure all entries differ. */
149				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
150				mtc0_tlbw_hazard();
151				tlb_write_indexed();
152			}
153			tlbw_use_hazard();
154			write_c0_entryhi(old_entryhi);
155			if (cpu_has_mmid)
156				write_c0_memorymapid(old_mmid);
157			htw_start();
158		} else {
159			drop_mmu_context(mm);
160		}
161		flush_micro_tlb();
162		local_irq_restore(flags);
163	}
164}
165
166void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
167{
168	unsigned long size, flags;
169
170	local_irq_save(flags);
171	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172	size = (size + 1) >> 1;
173	if (size <= (current_cpu_data.tlbsizeftlbsets ?
174		     current_cpu_data.tlbsize / 8 :
175		     current_cpu_data.tlbsize / 2)) {
176		int pid = read_c0_entryhi();
177
178		start &= (PAGE_MASK << 1);
179		end += ((PAGE_SIZE << 1) - 1);
180		end &= (PAGE_MASK << 1);
181		htw_stop();
182
183		while (start < end) {
184			int idx;
185
186			write_c0_entryhi(start);
187			start += (PAGE_SIZE << 1);
188			mtc0_tlbw_hazard();
189			tlb_probe();
190			tlb_probe_hazard();
191			idx = read_c0_index();
192			write_c0_entrylo0(0);
193			write_c0_entrylo1(0);
194			if (idx < 0)
195				continue;
196			/* Make sure all entries differ. */
197			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
198			mtc0_tlbw_hazard();
199			tlb_write_indexed();
200		}
201		tlbw_use_hazard();
202		write_c0_entryhi(pid);
203		htw_start();
204	} else {
205		local_flush_tlb_all();
206	}
207	flush_micro_tlb();
208	local_irq_restore(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long old_mmid;
217		unsigned long flags, old_entryhi;
218		int idx;
219
 
220		page &= (PAGE_MASK << 1);
221		local_irq_save(flags);
222		old_entryhi = read_c0_entryhi();
223		htw_stop();
224		if (cpu_has_mmid) {
225			old_mmid = read_c0_memorymapid();
226			write_c0_entryhi(page);
227			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
228		} else {
229			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
230		}
231		mtc0_tlbw_hazard();
232		tlb_probe();
233		tlb_probe_hazard();
234		idx = read_c0_index();
235		write_c0_entrylo0(0);
236		write_c0_entrylo1(0);
237		if (idx < 0)
238			goto finish;
239		/* Make sure all entries differ. */
240		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
241		mtc0_tlbw_hazard();
242		tlb_write_indexed();
243		tlbw_use_hazard();
244
245	finish:
246		write_c0_entryhi(old_entryhi);
247		if (cpu_has_mmid)
248			write_c0_memorymapid(old_mmid);
249		htw_start();
250		flush_micro_tlb_vm(vma);
251		local_irq_restore(flags);
252	}
253}
254
255/*
256 * This one is only used for pages with the global bit set so we don't care
257 * much about the ASID.
258 */
259void local_flush_tlb_one(unsigned long page)
260{
261	unsigned long flags;
262	int oldpid, idx;
263
264	local_irq_save(flags);
265	oldpid = read_c0_entryhi();
266	htw_stop();
267	page &= (PAGE_MASK << 1);
268	write_c0_entryhi(page);
269	mtc0_tlbw_hazard();
270	tlb_probe();
271	tlb_probe_hazard();
272	idx = read_c0_index();
273	write_c0_entrylo0(0);
274	write_c0_entrylo1(0);
275	if (idx >= 0) {
276		/* Make sure all entries differ. */
277		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
278		mtc0_tlbw_hazard();
279		tlb_write_indexed();
280		tlbw_use_hazard();
281	}
282	write_c0_entryhi(oldpid);
283	htw_start();
284	flush_micro_tlb();
285	local_irq_restore(flags);
286}
287
288/*
289 * We will need multiple versions of update_mmu_cache(), one that just
290 * updates the TLB with the new pte(s), and another which also checks
291 * for the R4k "end of page" hardware bug and does the needy.
292 */
293void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
294{
295	unsigned long flags;
296	pgd_t *pgdp;
297	p4d_t *p4dp;
298	pud_t *pudp;
299	pmd_t *pmdp;
300	pte_t *ptep;
301	int idx, pid;
302
303	/*
304	 * Handle debugger faulting in for debugee.
305	 */
306	if (current->active_mm != vma->vm_mm)
307		return;
308
309	local_irq_save(flags);
310
311	htw_stop();
312	address &= (PAGE_MASK << 1);
313	if (cpu_has_mmid) {
314		write_c0_entryhi(address);
315	} else {
316		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317		write_c0_entryhi(address | pid);
318	}
319	pgdp = pgd_offset(vma->vm_mm, address);
320	mtc0_tlbw_hazard();
321	tlb_probe();
322	tlb_probe_hazard();
323	p4dp = p4d_offset(pgdp, address);
324	pudp = pud_offset(p4dp, address);
325	pmdp = pmd_offset(pudp, address);
326	idx = read_c0_index();
327#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328	/* this could be a huge page  */
329	if (pmd_huge(*pmdp)) {
330		unsigned long lo;
331		write_c0_pagemask(PM_HUGE_MASK);
332		ptep = (pte_t *)pmdp;
333		lo = pte_to_entrylo(pte_val(*ptep));
334		write_c0_entrylo0(lo);
335		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
336
337		mtc0_tlbw_hazard();
338		if (idx < 0)
339			tlb_write_random();
340		else
341			tlb_write_indexed();
342		tlbw_use_hazard();
343		write_c0_pagemask(PM_DEFAULT_MASK);
344	} else
345#endif
346	{
347		ptep = pte_offset_map(pmdp, address);
348
349#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
350#ifdef CONFIG_XPA
351		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
352		if (cpu_has_xpa)
353			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
354		ptep++;
355		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
356		if (cpu_has_xpa)
357			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
358#else
359		write_c0_entrylo0(ptep->pte_high);
360		ptep++;
361		write_c0_entrylo1(ptep->pte_high);
362#endif
363#else
364		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
365		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
366#endif
367		mtc0_tlbw_hazard();
368		if (idx < 0)
369			tlb_write_random();
370		else
371			tlb_write_indexed();
372	}
373	tlbw_use_hazard();
374	htw_start();
375	flush_micro_tlb_vm(vma);
376	local_irq_restore(flags);
377}
378
379void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
380		     unsigned long entryhi, unsigned long pagemask)
381{
382#ifdef CONFIG_XPA
383	panic("Broken for XPA kernels");
384#else
385	unsigned int old_mmid;
386	unsigned long flags;
387	unsigned long wired;
388	unsigned long old_pagemask;
389	unsigned long old_ctx;
390
391	local_irq_save(flags);
392	if (cpu_has_mmid) {
393		old_mmid = read_c0_memorymapid();
394		write_c0_memorymapid(MMID_KERNEL_WIRED);
395	}
396	/* Save old context and create impossible VPN2 value */
397	old_ctx = read_c0_entryhi();
398	htw_stop();
399	old_pagemask = read_c0_pagemask();
400	wired = num_wired_entries();
401	write_c0_wired(wired + 1);
402	write_c0_index(wired);
403	tlbw_use_hazard();	/* What is the hazard here? */
404	write_c0_pagemask(pagemask);
405	write_c0_entryhi(entryhi);
406	write_c0_entrylo0(entrylo0);
407	write_c0_entrylo1(entrylo1);
408	mtc0_tlbw_hazard();
409	tlb_write_indexed();
410	tlbw_use_hazard();
411
412	write_c0_entryhi(old_ctx);
413	if (cpu_has_mmid)
414		write_c0_memorymapid(old_mmid);
415	tlbw_use_hazard();	/* What is the hazard here? */
416	htw_start();
417	write_c0_pagemask(old_pagemask);
418	local_flush_tlb_all();
419	local_irq_restore(flags);
420#endif
421}
422
423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
424
425int has_transparent_hugepage(void)
426{
427	static unsigned int mask = -1;
428
429	if (mask == -1) {	/* first call comes during __init */
430		unsigned long flags;
431
432		local_irq_save(flags);
433		write_c0_pagemask(PM_HUGE_MASK);
434		back_to_back_c0_hazard();
435		mask = read_c0_pagemask();
436		write_c0_pagemask(PM_DEFAULT_MASK);
437		local_irq_restore(flags);
438	}
439	return mask == PM_HUGE_MASK;
440}
441EXPORT_SYMBOL(has_transparent_hugepage);
442
443#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
444
445/*
446 * Used for loading TLB entries before trap_init() has started, when we
447 * don't actually want to add a wired entry which remains throughout the
448 * lifetime of the system
449 */
450
451int temp_tlb_entry;
452
453__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
454			       unsigned long entryhi, unsigned long pagemask)
455{
456	int ret = 0;
457	unsigned long flags;
458	unsigned long wired;
459	unsigned long old_pagemask;
460	unsigned long old_ctx;
461
462	local_irq_save(flags);
463	/* Save old context and create impossible VPN2 value */
464	htw_stop();
465	old_ctx = read_c0_entryhi();
466	old_pagemask = read_c0_pagemask();
467	wired = num_wired_entries();
468	if (--temp_tlb_entry < wired) {
469		printk(KERN_WARNING
470		       "No TLB space left for add_temporary_entry\n");
471		ret = -ENOSPC;
472		goto out;
473	}
474
475	write_c0_index(temp_tlb_entry);
476	write_c0_pagemask(pagemask);
477	write_c0_entryhi(entryhi);
478	write_c0_entrylo0(entrylo0);
479	write_c0_entrylo1(entrylo1);
480	mtc0_tlbw_hazard();
481	tlb_write_indexed();
482	tlbw_use_hazard();
483
484	write_c0_entryhi(old_ctx);
485	write_c0_pagemask(old_pagemask);
486	htw_start();
487out:
488	local_irq_restore(flags);
489	return ret;
490}
491
492static int ntlb;
493static int __init set_ntlb(char *str)
494{
495	get_option(&str, &ntlb);
496	return 1;
497}
498
499__setup("ntlb=", set_ntlb);
500
501/*
502 * Configure TLB (for init or after a CPU has been powered off).
503 */
504static void r4k_tlb_configure(void)
505{
506	/*
507	 * You should never change this register:
508	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
509	 *     the value in the c0_pagemask register.
510	 *   - The entire mm handling assumes the c0_pagemask register to
511	 *     be set to fixed-size pages.
512	 */
513	write_c0_pagemask(PM_DEFAULT_MASK);
514	back_to_back_c0_hazard();
515	if (read_c0_pagemask() != PM_DEFAULT_MASK)
516		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
517
518	write_c0_wired(0);
519	if (current_cpu_type() == CPU_R10000 ||
520	    current_cpu_type() == CPU_R12000 ||
521	    current_cpu_type() == CPU_R14000 ||
522	    current_cpu_type() == CPU_R16000)
523		write_c0_framemask(0);
524
525	if (cpu_has_rixi) {
526		/*
527		 * Enable the no read, no exec bits, and enable large physical
528		 * address.
529		 */
 
530#ifdef CONFIG_64BIT
531		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
532#else
533		set_c0_pagegrain(PG_RIE | PG_XIE);
534#endif
 
535	}
536
537	temp_tlb_entry = current_cpu_data.tlbsize - 1;
538
539	/* From this point on the ARC firmware is dead.	 */
540	local_flush_tlb_all();
541
542	/* Did I tell you that ARC SUCKS?  */
543}
544
545void tlb_init(void)
546{
547	r4k_tlb_configure();
548
549	if (ntlb) {
550		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
551			int wired = current_cpu_data.tlbsize - ntlb;
552			write_c0_wired(wired);
553			write_c0_index(wired-1);
554			printk("Restricting TLB to %d entries\n", ntlb);
555		} else
556			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
557	}
558
559	build_tlb_refill_handler();
560}
561
562static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
563			       void *v)
564{
565	switch (cmd) {
566	case CPU_PM_ENTER_FAILED:
567	case CPU_PM_EXIT:
568		r4k_tlb_configure();
569		break;
570	}
571
572	return NOTIFY_OK;
573}
574
575static struct notifier_block r4k_tlb_pm_notifier_block = {
576	.notifier_call = r4k_tlb_pm_notifier,
577};
578
579static int __init r4k_tlb_init_pm(void)
580{
581	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
582}
583arch_initcall(r4k_tlb_init_pm);
v3.5.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 
 11#include <linux/init.h>
 12#include <linux/sched.h>
 13#include <linux/smp.h>
 14#include <linux/mm.h>
 15#include <linux/hugetlb.h>
 
 16
 17#include <asm/cpu.h>
 
 18#include <asm/bootinfo.h>
 
 19#include <asm/mmu_context.h>
 20#include <asm/pgtable.h>
 21#include <asm/tlbmisc.h>
 22
 23extern void build_tlb_refill_handler(void);
 24
 25/*
 26 * Make sure all entries differ.  If they're not different
 27 * MIPS32 will take revenge ...
 
 28 */
 29#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
 30
 31/* Atomicity and interruptability */
 32#ifdef CONFIG_MIPS_MT_SMTC
 33
 34#include <asm/smtc.h>
 35#include <asm/mipsmtregs.h>
 36
 37#define ENTER_CRITICAL(flags) \
 38	{ \
 39	unsigned int mvpflags; \
 40	local_irq_save(flags);\
 41	mvpflags = dvpe()
 42#define EXIT_CRITICAL(flags) \
 43	evpe(mvpflags); \
 44	local_irq_restore(flags); \
 45	}
 46#else
 47
 48#define ENTER_CRITICAL(flags) local_irq_save(flags)
 49#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 50
 51#endif /* CONFIG_MIPS_MT_SMTC */
 52
 53#if defined(CONFIG_CPU_LOONGSON2)
 54/*
 55 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
 56 * unfortrunately, itlb is not totally transparent to software.
 57 */
 58#define FLUSH_ITLB write_c0_diag(4);
 59
 60#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
 61
 62#else
 63
 64#define FLUSH_ITLB
 65#define FLUSH_ITLB_VM(vma)
 66
 67#endif
 68
 69void local_flush_tlb_all(void)
 70{
 71	unsigned long flags;
 72	unsigned long old_ctx;
 73	int entry;
 74
 75	ENTER_CRITICAL(flags);
 76	/* Save old context and create impossible VPN2 value */
 77	old_ctx = read_c0_entryhi();
 
 78	write_c0_entrylo0(0);
 79	write_c0_entrylo1(0);
 80
 81	entry = read_c0_wired();
 82
 83	/* Blast 'em all away. */
 84	while (entry < current_cpu_data.tlbsize) {
 85		/* Make sure all entries differ. */
 86		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 87		write_c0_index(entry);
 88		mtc0_tlbw_hazard();
 89		tlb_write_indexed();
 90		entry++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91	}
 92	tlbw_use_hazard();
 93	write_c0_entryhi(old_ctx);
 94	FLUSH_ITLB;
 95	EXIT_CRITICAL(flags);
 96}
 97
 98/* All entries common to a mm share an asid.  To effectively flush
 99   these entries, we just bump the asid. */
100void local_flush_tlb_mm(struct mm_struct *mm)
101{
102	int cpu;
103
104	preempt_disable();
105
106	cpu = smp_processor_id();
107
108	if (cpu_context(cpu, mm) != 0) {
109		drop_mmu_context(mm, cpu);
110	}
111
112	preempt_enable();
113}
 
114
115void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
116	unsigned long end)
117{
118	struct mm_struct *mm = vma->vm_mm;
119	int cpu = smp_processor_id();
120
121	if (cpu_context(cpu, mm) != 0) {
122		unsigned long size, flags;
123		int huge = is_vm_hugetlb_page(vma);
124
125		ENTER_CRITICAL(flags);
126		if (huge) {
127			start = round_down(start, HPAGE_SIZE);
128			end = round_up(end, HPAGE_SIZE);
129			size = (end - start) >> HPAGE_SHIFT;
130		} else {
131			start = round_down(start, PAGE_SIZE << 1);
132			end = round_up(end, PAGE_SIZE << 1);
133			size = (end - start) >> (PAGE_SHIFT + 1);
134		}
135		if (size <= current_cpu_data.tlbsize/2) {
136			int oldpid = read_c0_entryhi();
137			int newpid = cpu_asid(cpu, mm);
138
 
 
 
 
 
 
 
139			while (start < end) {
140				int idx;
141
142				write_c0_entryhi(start | newpid);
143				if (huge)
144					start += HPAGE_SIZE;
145				else
146					start += (PAGE_SIZE << 1);
 
147				mtc0_tlbw_hazard();
148				tlb_probe();
149				tlb_probe_hazard();
150				idx = read_c0_index();
151				write_c0_entrylo0(0);
152				write_c0_entrylo1(0);
153				if (idx < 0)
154					continue;
155				/* Make sure all entries differ. */
156				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
157				mtc0_tlbw_hazard();
158				tlb_write_indexed();
159			}
160			tlbw_use_hazard();
161			write_c0_entryhi(oldpid);
 
 
 
162		} else {
163			drop_mmu_context(mm, cpu);
164		}
165		FLUSH_ITLB;
166		EXIT_CRITICAL(flags);
167	}
168}
169
170void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
171{
172	unsigned long size, flags;
173
174	ENTER_CRITICAL(flags);
175	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
176	size = (size + 1) >> 1;
177	if (size <= current_cpu_data.tlbsize / 2) {
 
 
178		int pid = read_c0_entryhi();
179
180		start &= (PAGE_MASK << 1);
181		end += ((PAGE_SIZE << 1) - 1);
182		end &= (PAGE_MASK << 1);
 
183
184		while (start < end) {
185			int idx;
186
187			write_c0_entryhi(start);
188			start += (PAGE_SIZE << 1);
189			mtc0_tlbw_hazard();
190			tlb_probe();
191			tlb_probe_hazard();
192			idx = read_c0_index();
193			write_c0_entrylo0(0);
194			write_c0_entrylo1(0);
195			if (idx < 0)
196				continue;
197			/* Make sure all entries differ. */
198			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
199			mtc0_tlbw_hazard();
200			tlb_write_indexed();
201		}
202		tlbw_use_hazard();
203		write_c0_entryhi(pid);
 
204	} else {
205		local_flush_tlb_all();
206	}
207	FLUSH_ITLB;
208	EXIT_CRITICAL(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long flags;
217		int oldpid, newpid, idx;
 
218
219		newpid = cpu_asid(cpu, vma->vm_mm);
220		page &= (PAGE_MASK << 1);
221		ENTER_CRITICAL(flags);
222		oldpid = read_c0_entryhi();
223		write_c0_entryhi(page | newpid);
 
 
 
 
 
 
 
224		mtc0_tlbw_hazard();
225		tlb_probe();
226		tlb_probe_hazard();
227		idx = read_c0_index();
228		write_c0_entrylo0(0);
229		write_c0_entrylo1(0);
230		if (idx < 0)
231			goto finish;
232		/* Make sure all entries differ. */
233		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
234		mtc0_tlbw_hazard();
235		tlb_write_indexed();
236		tlbw_use_hazard();
237
238	finish:
239		write_c0_entryhi(oldpid);
240		FLUSH_ITLB_VM(vma);
241		EXIT_CRITICAL(flags);
 
 
 
242	}
243}
244
245/*
246 * This one is only used for pages with the global bit set so we don't care
247 * much about the ASID.
248 */
249void local_flush_tlb_one(unsigned long page)
250{
251	unsigned long flags;
252	int oldpid, idx;
253
254	ENTER_CRITICAL(flags);
255	oldpid = read_c0_entryhi();
 
256	page &= (PAGE_MASK << 1);
257	write_c0_entryhi(page);
258	mtc0_tlbw_hazard();
259	tlb_probe();
260	tlb_probe_hazard();
261	idx = read_c0_index();
262	write_c0_entrylo0(0);
263	write_c0_entrylo1(0);
264	if (idx >= 0) {
265		/* Make sure all entries differ. */
266		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
267		mtc0_tlbw_hazard();
268		tlb_write_indexed();
269		tlbw_use_hazard();
270	}
271	write_c0_entryhi(oldpid);
272	FLUSH_ITLB;
273	EXIT_CRITICAL(flags);
 
274}
275
276/*
277 * We will need multiple versions of update_mmu_cache(), one that just
278 * updates the TLB with the new pte(s), and another which also checks
279 * for the R4k "end of page" hardware bug and does the needy.
280 */
281void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
282{
283	unsigned long flags;
284	pgd_t *pgdp;
 
285	pud_t *pudp;
286	pmd_t *pmdp;
287	pte_t *ptep;
288	int idx, pid;
289
290	/*
291	 * Handle debugger faulting in for debugee.
292	 */
293	if (current->active_mm != vma->vm_mm)
294		return;
295
296	ENTER_CRITICAL(flags);
297
298	pid = read_c0_entryhi() & ASID_MASK;
299	address &= (PAGE_MASK << 1);
300	write_c0_entryhi(address | pid);
 
 
 
 
 
301	pgdp = pgd_offset(vma->vm_mm, address);
302	mtc0_tlbw_hazard();
303	tlb_probe();
304	tlb_probe_hazard();
305	pudp = pud_offset(pgdp, address);
 
306	pmdp = pmd_offset(pudp, address);
307	idx = read_c0_index();
308#ifdef CONFIG_HUGETLB_PAGE
309	/* this could be a huge page  */
310	if (pmd_huge(*pmdp)) {
311		unsigned long lo;
312		write_c0_pagemask(PM_HUGE_MASK);
313		ptep = (pte_t *)pmdp;
314		lo = pte_to_entrylo(pte_val(*ptep));
315		write_c0_entrylo0(lo);
316		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
317
318		mtc0_tlbw_hazard();
319		if (idx < 0)
320			tlb_write_random();
321		else
322			tlb_write_indexed();
 
323		write_c0_pagemask(PM_DEFAULT_MASK);
324	} else
325#endif
326	{
327		ptep = pte_offset_map(pmdp, address);
328
329#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 
 
 
 
 
 
 
 
330		write_c0_entrylo0(ptep->pte_high);
331		ptep++;
332		write_c0_entrylo1(ptep->pte_high);
 
333#else
334		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
335		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
336#endif
337		mtc0_tlbw_hazard();
338		if (idx < 0)
339			tlb_write_random();
340		else
341			tlb_write_indexed();
342	}
343	tlbw_use_hazard();
344	FLUSH_ITLB_VM(vma);
345	EXIT_CRITICAL(flags);
 
346}
347
348void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
349		     unsigned long entryhi, unsigned long pagemask)
350{
 
 
 
 
351	unsigned long flags;
352	unsigned long wired;
353	unsigned long old_pagemask;
354	unsigned long old_ctx;
355
356	ENTER_CRITICAL(flags);
 
 
 
 
357	/* Save old context and create impossible VPN2 value */
358	old_ctx = read_c0_entryhi();
 
359	old_pagemask = read_c0_pagemask();
360	wired = read_c0_wired();
361	write_c0_wired(wired + 1);
362	write_c0_index(wired);
363	tlbw_use_hazard();	/* What is the hazard here? */
364	write_c0_pagemask(pagemask);
365	write_c0_entryhi(entryhi);
366	write_c0_entrylo0(entrylo0);
367	write_c0_entrylo1(entrylo1);
368	mtc0_tlbw_hazard();
369	tlb_write_indexed();
370	tlbw_use_hazard();
371
372	write_c0_entryhi(old_ctx);
 
 
373	tlbw_use_hazard();	/* What is the hazard here? */
 
374	write_c0_pagemask(old_pagemask);
375	local_flush_tlb_all();
376	EXIT_CRITICAL(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377}
378
379static int __cpuinitdata ntlb;
380static int __init set_ntlb(char *str)
381{
382	get_option(&str, &ntlb);
383	return 1;
384}
385
386__setup("ntlb=", set_ntlb);
387
388void __cpuinit tlb_init(void)
 
 
 
389{
390	/*
391	 * You should never change this register:
392	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
393	 *     the value in the c0_pagemask register.
394	 *   - The entire mm handling assumes the c0_pagemask register to
395	 *     be set to fixed-size pages.
396	 */
397	write_c0_pagemask(PM_DEFAULT_MASK);
 
 
 
 
398	write_c0_wired(0);
399	if (current_cpu_type() == CPU_R10000 ||
400	    current_cpu_type() == CPU_R12000 ||
401	    current_cpu_type() == CPU_R14000)
 
402		write_c0_framemask(0);
403
404	if (kernel_uses_smartmips_rixi) {
405		/*
406		 * Enable the no read, no exec bits, and enable large virtual
407		 * address.
408		 */
409		u32 pg = PG_RIE | PG_XIE;
410#ifdef CONFIG_64BIT
411		pg |= PG_ELPA;
 
 
412#endif
413		write_c0_pagegrain(pg);
414	}
415
416        /* From this point on the ARC firmware is dead.  */
 
 
417	local_flush_tlb_all();
418
419	/* Did I tell you that ARC SUCKS?  */
 
 
 
 
 
420
421	if (ntlb) {
422		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
423			int wired = current_cpu_data.tlbsize - ntlb;
424			write_c0_wired(wired);
425			write_c0_index(wired-1);
426			printk("Restricting TLB to %d entries\n", ntlb);
427		} else
428			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
429	}
430
431	build_tlb_refill_handler();
432}