Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/cpu_pm.h>
 12#include <linux/init.h>
 13#include <linux/sched.h>
 14#include <linux/smp.h>
 15#include <linux/mm.h>
 16#include <linux/hugetlb.h>
 17#include <linux/export.h>
 18
 19#include <asm/cpu.h>
 20#include <asm/cpu-type.h>
 21#include <asm/bootinfo.h>
 22#include <asm/hazards.h>
 23#include <asm/mmu_context.h>
 24#include <asm/pgtable.h>
 25#include <asm/tlb.h>
 26#include <asm/tlbmisc.h>
 27
 28extern void build_tlb_refill_handler(void);
 29
 30/*
 31 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
 32 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
 33 * itlb/dtlb are not totally transparent to software.
 34 */
 35static inline void flush_micro_tlb(void)
 36{
 37	switch (current_cpu_type()) {
 38	case CPU_LOONGSON2:
 39		write_c0_diag(LOONGSON_DIAG_ITLB);
 40		break;
 41	case CPU_LOONGSON3:
 42		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
 43		break;
 44	default:
 45		break;
 46	}
 47}
 48
 49static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
 50{
 51	if (vma->vm_flags & VM_EXEC)
 52		flush_micro_tlb();
 53}
 54
 55void local_flush_tlb_all(void)
 56{
 57	unsigned long flags;
 58	unsigned long old_ctx;
 59	int entry, ftlbhighset;
 60
 61	local_irq_save(flags);
 62	/* Save old context and create impossible VPN2 value */
 63	old_ctx = read_c0_entryhi();
 64	htw_stop();
 65	write_c0_entrylo0(0);
 66	write_c0_entrylo1(0);
 67
 68	entry = num_wired_entries();
 69
 70	/*
 71	 * Blast 'em all away.
 72	 * If there are any wired entries, fall back to iterating
 73	 */
 74	if (cpu_has_tlbinv && !entry) {
 75		if (current_cpu_data.tlbsizevtlb) {
 76			write_c0_index(0);
 77			mtc0_tlbw_hazard();
 78			tlbinvf();  /* invalidate VTLB */
 79		}
 80		ftlbhighset = current_cpu_data.tlbsizevtlb +
 81			current_cpu_data.tlbsizeftlbsets;
 82		for (entry = current_cpu_data.tlbsizevtlb;
 83		     entry < ftlbhighset;
 84		     entry++) {
 85			write_c0_index(entry);
 86			mtc0_tlbw_hazard();
 87			tlbinvf();  /* invalidate one FTLB set */
 88		}
 89	} else {
 90		while (entry < current_cpu_data.tlbsize) {
 91			/* Make sure all entries differ. */
 92			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 93			write_c0_index(entry);
 94			mtc0_tlbw_hazard();
 95			tlb_write_indexed();
 96			entry++;
 97		}
 98	}
 99	tlbw_use_hazard();
100	write_c0_entryhi(old_ctx);
101	htw_start();
102	flush_micro_tlb();
103	local_irq_restore(flags);
104}
105EXPORT_SYMBOL(local_flush_tlb_all);
106
107void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
108	unsigned long end)
109{
110	struct mm_struct *mm = vma->vm_mm;
111	int cpu = smp_processor_id();
112
113	if (cpu_context(cpu, mm) != 0) {
114		unsigned long size, flags;
115
116		local_irq_save(flags);
117		start = round_down(start, PAGE_SIZE << 1);
118		end = round_up(end, PAGE_SIZE << 1);
119		size = (end - start) >> (PAGE_SHIFT + 1);
120		if (size <= (current_cpu_data.tlbsizeftlbsets ?
121			     current_cpu_data.tlbsize / 8 :
122			     current_cpu_data.tlbsize / 2)) {
123			unsigned long old_entryhi, uninitialized_var(old_mmid);
124			int newpid = cpu_asid(cpu, mm);
125
126			old_entryhi = read_c0_entryhi();
127			if (cpu_has_mmid) {
128				old_mmid = read_c0_memorymapid();
129				write_c0_memorymapid(newpid);
130			}
131
132			htw_stop();
133			while (start < end) {
134				int idx;
135
136				if (cpu_has_mmid)
137					write_c0_entryhi(start);
138				else
139					write_c0_entryhi(start | newpid);
140				start += (PAGE_SIZE << 1);
141				mtc0_tlbw_hazard();
142				tlb_probe();
143				tlb_probe_hazard();
144				idx = read_c0_index();
145				write_c0_entrylo0(0);
146				write_c0_entrylo1(0);
147				if (idx < 0)
148					continue;
149				/* Make sure all entries differ. */
150				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
151				mtc0_tlbw_hazard();
152				tlb_write_indexed();
153			}
154			tlbw_use_hazard();
155			write_c0_entryhi(old_entryhi);
156			if (cpu_has_mmid)
157				write_c0_memorymapid(old_mmid);
158			htw_start();
159		} else {
160			drop_mmu_context(mm);
161		}
162		flush_micro_tlb();
163		local_irq_restore(flags);
164	}
165}
166
167void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
168{
169	unsigned long size, flags;
170
171	local_irq_save(flags);
172	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
173	size = (size + 1) >> 1;
174	if (size <= (current_cpu_data.tlbsizeftlbsets ?
175		     current_cpu_data.tlbsize / 8 :
176		     current_cpu_data.tlbsize / 2)) {
177		int pid = read_c0_entryhi();
178
179		start &= (PAGE_MASK << 1);
180		end += ((PAGE_SIZE << 1) - 1);
181		end &= (PAGE_MASK << 1);
182		htw_stop();
183
184		while (start < end) {
185			int idx;
186
187			write_c0_entryhi(start);
188			start += (PAGE_SIZE << 1);
189			mtc0_tlbw_hazard();
190			tlb_probe();
191			tlb_probe_hazard();
192			idx = read_c0_index();
193			write_c0_entrylo0(0);
194			write_c0_entrylo1(0);
195			if (idx < 0)
196				continue;
197			/* Make sure all entries differ. */
198			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
199			mtc0_tlbw_hazard();
200			tlb_write_indexed();
201		}
202		tlbw_use_hazard();
203		write_c0_entryhi(pid);
204		htw_start();
205	} else {
206		local_flush_tlb_all();
207	}
208	flush_micro_tlb();
209	local_irq_restore(flags);
210}
211
212void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
213{
214	int cpu = smp_processor_id();
215
216	if (cpu_context(cpu, vma->vm_mm) != 0) {
217		unsigned long uninitialized_var(old_mmid);
218		unsigned long flags, old_entryhi;
219		int idx;
220
221		page &= (PAGE_MASK << 1);
222		local_irq_save(flags);
223		old_entryhi = read_c0_entryhi();
224		htw_stop();
225		if (cpu_has_mmid) {
226			old_mmid = read_c0_memorymapid();
227			write_c0_entryhi(page);
228			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
229		} else {
230			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
231		}
232		mtc0_tlbw_hazard();
233		tlb_probe();
234		tlb_probe_hazard();
235		idx = read_c0_index();
236		write_c0_entrylo0(0);
237		write_c0_entrylo1(0);
238		if (idx < 0)
239			goto finish;
240		/* Make sure all entries differ. */
241		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
242		mtc0_tlbw_hazard();
243		tlb_write_indexed();
244		tlbw_use_hazard();
245
246	finish:
247		write_c0_entryhi(old_entryhi);
248		if (cpu_has_mmid)
249			write_c0_memorymapid(old_mmid);
250		htw_start();
251		flush_micro_tlb_vm(vma);
252		local_irq_restore(flags);
253	}
254}
255
256/*
257 * This one is only used for pages with the global bit set so we don't care
258 * much about the ASID.
259 */
260void local_flush_tlb_one(unsigned long page)
261{
262	unsigned long flags;
263	int oldpid, idx;
264
265	local_irq_save(flags);
266	oldpid = read_c0_entryhi();
267	htw_stop();
268	page &= (PAGE_MASK << 1);
269	write_c0_entryhi(page);
270	mtc0_tlbw_hazard();
271	tlb_probe();
272	tlb_probe_hazard();
273	idx = read_c0_index();
274	write_c0_entrylo0(0);
275	write_c0_entrylo1(0);
276	if (idx >= 0) {
277		/* Make sure all entries differ. */
278		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
279		mtc0_tlbw_hazard();
280		tlb_write_indexed();
281		tlbw_use_hazard();
282	}
283	write_c0_entryhi(oldpid);
284	htw_start();
285	flush_micro_tlb();
286	local_irq_restore(flags);
287}
288
289/*
290 * We will need multiple versions of update_mmu_cache(), one that just
291 * updates the TLB with the new pte(s), and another which also checks
292 * for the R4k "end of page" hardware bug and does the needy.
293 */
294void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
295{
296	unsigned long flags;
297	pgd_t *pgdp;
 
298	pud_t *pudp;
299	pmd_t *pmdp;
300	pte_t *ptep;
301	int idx, pid;
302
303	/*
304	 * Handle debugger faulting in for debugee.
305	 */
306	if (current->active_mm != vma->vm_mm)
307		return;
308
309	local_irq_save(flags);
310
311	htw_stop();
312	address &= (PAGE_MASK << 1);
313	if (cpu_has_mmid) {
314		write_c0_entryhi(address);
315	} else {
316		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317		write_c0_entryhi(address | pid);
318	}
319	pgdp = pgd_offset(vma->vm_mm, address);
320	mtc0_tlbw_hazard();
321	tlb_probe();
322	tlb_probe_hazard();
323	pudp = pud_offset(pgdp, address);
 
324	pmdp = pmd_offset(pudp, address);
325	idx = read_c0_index();
326#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
327	/* this could be a huge page  */
328	if (pmd_huge(*pmdp)) {
329		unsigned long lo;
330		write_c0_pagemask(PM_HUGE_MASK);
331		ptep = (pte_t *)pmdp;
332		lo = pte_to_entrylo(pte_val(*ptep));
333		write_c0_entrylo0(lo);
334		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
335
336		mtc0_tlbw_hazard();
337		if (idx < 0)
338			tlb_write_random();
339		else
340			tlb_write_indexed();
341		tlbw_use_hazard();
342		write_c0_pagemask(PM_DEFAULT_MASK);
343	} else
344#endif
345	{
346		ptep = pte_offset_map(pmdp, address);
347
348#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
349#ifdef CONFIG_XPA
350		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
351		if (cpu_has_xpa)
352			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
353		ptep++;
354		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
355		if (cpu_has_xpa)
356			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
357#else
358		write_c0_entrylo0(ptep->pte_high);
359		ptep++;
360		write_c0_entrylo1(ptep->pte_high);
361#endif
362#else
363		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
364		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
365#endif
366		mtc0_tlbw_hazard();
367		if (idx < 0)
368			tlb_write_random();
369		else
370			tlb_write_indexed();
371	}
372	tlbw_use_hazard();
373	htw_start();
374	flush_micro_tlb_vm(vma);
375	local_irq_restore(flags);
376}
377
378void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
379		     unsigned long entryhi, unsigned long pagemask)
380{
381#ifdef CONFIG_XPA
382	panic("Broken for XPA kernels");
383#else
384	unsigned int uninitialized_var(old_mmid);
385	unsigned long flags;
386	unsigned long wired;
387	unsigned long old_pagemask;
388	unsigned long old_ctx;
389
390	local_irq_save(flags);
391	if (cpu_has_mmid) {
392		old_mmid = read_c0_memorymapid();
393		write_c0_memorymapid(MMID_KERNEL_WIRED);
394	}
395	/* Save old context and create impossible VPN2 value */
396	old_ctx = read_c0_entryhi();
397	htw_stop();
398	old_pagemask = read_c0_pagemask();
399	wired = num_wired_entries();
400	write_c0_wired(wired + 1);
401	write_c0_index(wired);
402	tlbw_use_hazard();	/* What is the hazard here? */
403	write_c0_pagemask(pagemask);
404	write_c0_entryhi(entryhi);
405	write_c0_entrylo0(entrylo0);
406	write_c0_entrylo1(entrylo1);
407	mtc0_tlbw_hazard();
408	tlb_write_indexed();
409	tlbw_use_hazard();
410
411	write_c0_entryhi(old_ctx);
412	if (cpu_has_mmid)
413		write_c0_memorymapid(old_mmid);
414	tlbw_use_hazard();	/* What is the hazard here? */
415	htw_start();
416	write_c0_pagemask(old_pagemask);
417	local_flush_tlb_all();
418	local_irq_restore(flags);
419#endif
420}
421
422#ifdef CONFIG_TRANSPARENT_HUGEPAGE
423
424int has_transparent_hugepage(void)
425{
426	static unsigned int mask = -1;
427
428	if (mask == -1) {	/* first call comes during __init */
429		unsigned long flags;
430
431		local_irq_save(flags);
432		write_c0_pagemask(PM_HUGE_MASK);
433		back_to_back_c0_hazard();
434		mask = read_c0_pagemask();
435		write_c0_pagemask(PM_DEFAULT_MASK);
436		local_irq_restore(flags);
437	}
438	return mask == PM_HUGE_MASK;
439}
440
441#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
442
443/*
444 * Used for loading TLB entries before trap_init() has started, when we
445 * don't actually want to add a wired entry which remains throughout the
446 * lifetime of the system
447 */
448
449int temp_tlb_entry;
450
451__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
452			       unsigned long entryhi, unsigned long pagemask)
453{
454	int ret = 0;
455	unsigned long flags;
456	unsigned long wired;
457	unsigned long old_pagemask;
458	unsigned long old_ctx;
459
460	local_irq_save(flags);
461	/* Save old context and create impossible VPN2 value */
462	htw_stop();
463	old_ctx = read_c0_entryhi();
464	old_pagemask = read_c0_pagemask();
465	wired = num_wired_entries();
466	if (--temp_tlb_entry < wired) {
467		printk(KERN_WARNING
468		       "No TLB space left for add_temporary_entry\n");
469		ret = -ENOSPC;
470		goto out;
471	}
472
473	write_c0_index(temp_tlb_entry);
474	write_c0_pagemask(pagemask);
475	write_c0_entryhi(entryhi);
476	write_c0_entrylo0(entrylo0);
477	write_c0_entrylo1(entrylo1);
478	mtc0_tlbw_hazard();
479	tlb_write_indexed();
480	tlbw_use_hazard();
481
482	write_c0_entryhi(old_ctx);
483	write_c0_pagemask(old_pagemask);
484	htw_start();
485out:
486	local_irq_restore(flags);
487	return ret;
488}
489
490static int ntlb;
491static int __init set_ntlb(char *str)
492{
493	get_option(&str, &ntlb);
494	return 1;
495}
496
497__setup("ntlb=", set_ntlb);
498
499/*
500 * Configure TLB (for init or after a CPU has been powered off).
501 */
502static void r4k_tlb_configure(void)
503{
504	/*
505	 * You should never change this register:
506	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
507	 *     the value in the c0_pagemask register.
508	 *   - The entire mm handling assumes the c0_pagemask register to
509	 *     be set to fixed-size pages.
510	 */
511	write_c0_pagemask(PM_DEFAULT_MASK);
512	back_to_back_c0_hazard();
513	if (read_c0_pagemask() != PM_DEFAULT_MASK)
514		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
515
516	write_c0_wired(0);
517	if (current_cpu_type() == CPU_R10000 ||
518	    current_cpu_type() == CPU_R12000 ||
519	    current_cpu_type() == CPU_R14000 ||
520	    current_cpu_type() == CPU_R16000)
521		write_c0_framemask(0);
522
523	if (cpu_has_rixi) {
524		/*
525		 * Enable the no read, no exec bits, and enable large physical
526		 * address.
527		 */
528#ifdef CONFIG_64BIT
529		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
530#else
531		set_c0_pagegrain(PG_RIE | PG_XIE);
532#endif
533	}
534
535	temp_tlb_entry = current_cpu_data.tlbsize - 1;
536
537	/* From this point on the ARC firmware is dead.	 */
538	local_flush_tlb_all();
539
540	/* Did I tell you that ARC SUCKS?  */
541}
542
543void tlb_init(void)
544{
545	r4k_tlb_configure();
546
547	if (ntlb) {
548		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
549			int wired = current_cpu_data.tlbsize - ntlb;
550			write_c0_wired(wired);
551			write_c0_index(wired-1);
552			printk("Restricting TLB to %d entries\n", ntlb);
553		} else
554			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
555	}
556
557	build_tlb_refill_handler();
558}
559
560static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
561			       void *v)
562{
563	switch (cmd) {
564	case CPU_PM_ENTER_FAILED:
565	case CPU_PM_EXIT:
566		r4k_tlb_configure();
567		break;
568	}
569
570	return NOTIFY_OK;
571}
572
573static struct notifier_block r4k_tlb_pm_notifier_block = {
574	.notifier_call = r4k_tlb_pm_notifier,
575};
576
577static int __init r4k_tlb_init_pm(void)
578{
579	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
580}
581arch_initcall(r4k_tlb_init_pm);
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
  8 * Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/cpu_pm.h>
 12#include <linux/init.h>
 13#include <linux/sched.h>
 14#include <linux/smp.h>
 15#include <linux/mm.h>
 16#include <linux/hugetlb.h>
 17#include <linux/export.h>
 18
 19#include <asm/cpu.h>
 20#include <asm/cpu-type.h>
 21#include <asm/bootinfo.h>
 22#include <asm/hazards.h>
 23#include <asm/mmu_context.h>
 
 24#include <asm/tlb.h>
 25#include <asm/tlbmisc.h>
 26
 27extern void build_tlb_refill_handler(void);
 28
 29/*
 30 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
 31 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
 32 * itlb/dtlb are not totally transparent to software.
 33 */
 34static inline void flush_micro_tlb(void)
 35{
 36	switch (current_cpu_type()) {
 37	case CPU_LOONGSON2EF:
 38		write_c0_diag(LOONGSON_DIAG_ITLB);
 39		break;
 40	case CPU_LOONGSON64:
 41		write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
 42		break;
 43	default:
 44		break;
 45	}
 46}
 47
 48static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
 49{
 50	if (vma->vm_flags & VM_EXEC)
 51		flush_micro_tlb();
 52}
 53
 54void local_flush_tlb_all(void)
 55{
 56	unsigned long flags;
 57	unsigned long old_ctx;
 58	int entry, ftlbhighset;
 59
 60	local_irq_save(flags);
 61	/* Save old context and create impossible VPN2 value */
 62	old_ctx = read_c0_entryhi();
 63	htw_stop();
 64	write_c0_entrylo0(0);
 65	write_c0_entrylo1(0);
 66
 67	entry = num_wired_entries();
 68
 69	/*
 70	 * Blast 'em all away.
 71	 * If there are any wired entries, fall back to iterating
 72	 */
 73	if (cpu_has_tlbinv && !entry) {
 74		if (current_cpu_data.tlbsizevtlb) {
 75			write_c0_index(0);
 76			mtc0_tlbw_hazard();
 77			tlbinvf();  /* invalidate VTLB */
 78		}
 79		ftlbhighset = current_cpu_data.tlbsizevtlb +
 80			current_cpu_data.tlbsizeftlbsets;
 81		for (entry = current_cpu_data.tlbsizevtlb;
 82		     entry < ftlbhighset;
 83		     entry++) {
 84			write_c0_index(entry);
 85			mtc0_tlbw_hazard();
 86			tlbinvf();  /* invalidate one FTLB set */
 87		}
 88	} else {
 89		while (entry < current_cpu_data.tlbsize) {
 90			/* Make sure all entries differ. */
 91			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
 92			write_c0_index(entry);
 93			mtc0_tlbw_hazard();
 94			tlb_write_indexed();
 95			entry++;
 96		}
 97	}
 98	tlbw_use_hazard();
 99	write_c0_entryhi(old_ctx);
100	htw_start();
101	flush_micro_tlb();
102	local_irq_restore(flags);
103}
104EXPORT_SYMBOL(local_flush_tlb_all);
105
106void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
107	unsigned long end)
108{
109	struct mm_struct *mm = vma->vm_mm;
110	int cpu = smp_processor_id();
111
112	if (cpu_context(cpu, mm) != 0) {
113		unsigned long size, flags;
114
115		local_irq_save(flags);
116		start = round_down(start, PAGE_SIZE << 1);
117		end = round_up(end, PAGE_SIZE << 1);
118		size = (end - start) >> (PAGE_SHIFT + 1);
119		if (size <= (current_cpu_data.tlbsizeftlbsets ?
120			     current_cpu_data.tlbsize / 8 :
121			     current_cpu_data.tlbsize / 2)) {
122			unsigned long old_entryhi, old_mmid;
123			int newpid = cpu_asid(cpu, mm);
124
125			old_entryhi = read_c0_entryhi();
126			if (cpu_has_mmid) {
127				old_mmid = read_c0_memorymapid();
128				write_c0_memorymapid(newpid);
129			}
130
131			htw_stop();
132			while (start < end) {
133				int idx;
134
135				if (cpu_has_mmid)
136					write_c0_entryhi(start);
137				else
138					write_c0_entryhi(start | newpid);
139				start += (PAGE_SIZE << 1);
140				mtc0_tlbw_hazard();
141				tlb_probe();
142				tlb_probe_hazard();
143				idx = read_c0_index();
144				write_c0_entrylo0(0);
145				write_c0_entrylo1(0);
146				if (idx < 0)
147					continue;
148				/* Make sure all entries differ. */
149				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
150				mtc0_tlbw_hazard();
151				tlb_write_indexed();
152			}
153			tlbw_use_hazard();
154			write_c0_entryhi(old_entryhi);
155			if (cpu_has_mmid)
156				write_c0_memorymapid(old_mmid);
157			htw_start();
158		} else {
159			drop_mmu_context(mm);
160		}
161		flush_micro_tlb();
162		local_irq_restore(flags);
163	}
164}
165
166void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
167{
168	unsigned long size, flags;
169
170	local_irq_save(flags);
171	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
172	size = (size + 1) >> 1;
173	if (size <= (current_cpu_data.tlbsizeftlbsets ?
174		     current_cpu_data.tlbsize / 8 :
175		     current_cpu_data.tlbsize / 2)) {
176		int pid = read_c0_entryhi();
177
178		start &= (PAGE_MASK << 1);
179		end += ((PAGE_SIZE << 1) - 1);
180		end &= (PAGE_MASK << 1);
181		htw_stop();
182
183		while (start < end) {
184			int idx;
185
186			write_c0_entryhi(start);
187			start += (PAGE_SIZE << 1);
188			mtc0_tlbw_hazard();
189			tlb_probe();
190			tlb_probe_hazard();
191			idx = read_c0_index();
192			write_c0_entrylo0(0);
193			write_c0_entrylo1(0);
194			if (idx < 0)
195				continue;
196			/* Make sure all entries differ. */
197			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
198			mtc0_tlbw_hazard();
199			tlb_write_indexed();
200		}
201		tlbw_use_hazard();
202		write_c0_entryhi(pid);
203		htw_start();
204	} else {
205		local_flush_tlb_all();
206	}
207	flush_micro_tlb();
208	local_irq_restore(flags);
209}
210
211void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
212{
213	int cpu = smp_processor_id();
214
215	if (cpu_context(cpu, vma->vm_mm) != 0) {
216		unsigned long old_mmid;
217		unsigned long flags, old_entryhi;
218		int idx;
219
220		page &= (PAGE_MASK << 1);
221		local_irq_save(flags);
222		old_entryhi = read_c0_entryhi();
223		htw_stop();
224		if (cpu_has_mmid) {
225			old_mmid = read_c0_memorymapid();
226			write_c0_entryhi(page);
227			write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
228		} else {
229			write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
230		}
231		mtc0_tlbw_hazard();
232		tlb_probe();
233		tlb_probe_hazard();
234		idx = read_c0_index();
235		write_c0_entrylo0(0);
236		write_c0_entrylo1(0);
237		if (idx < 0)
238			goto finish;
239		/* Make sure all entries differ. */
240		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
241		mtc0_tlbw_hazard();
242		tlb_write_indexed();
243		tlbw_use_hazard();
244
245	finish:
246		write_c0_entryhi(old_entryhi);
247		if (cpu_has_mmid)
248			write_c0_memorymapid(old_mmid);
249		htw_start();
250		flush_micro_tlb_vm(vma);
251		local_irq_restore(flags);
252	}
253}
254
255/*
256 * This one is only used for pages with the global bit set so we don't care
257 * much about the ASID.
258 */
259void local_flush_tlb_one(unsigned long page)
260{
261	unsigned long flags;
262	int oldpid, idx;
263
264	local_irq_save(flags);
265	oldpid = read_c0_entryhi();
266	htw_stop();
267	page &= (PAGE_MASK << 1);
268	write_c0_entryhi(page);
269	mtc0_tlbw_hazard();
270	tlb_probe();
271	tlb_probe_hazard();
272	idx = read_c0_index();
273	write_c0_entrylo0(0);
274	write_c0_entrylo1(0);
275	if (idx >= 0) {
276		/* Make sure all entries differ. */
277		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
278		mtc0_tlbw_hazard();
279		tlb_write_indexed();
280		tlbw_use_hazard();
281	}
282	write_c0_entryhi(oldpid);
283	htw_start();
284	flush_micro_tlb();
285	local_irq_restore(flags);
286}
287
288/*
289 * We will need multiple versions of update_mmu_cache(), one that just
290 * updates the TLB with the new pte(s), and another which also checks
291 * for the R4k "end of page" hardware bug and does the needy.
292 */
293void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
294{
295	unsigned long flags;
296	pgd_t *pgdp;
297	p4d_t *p4dp;
298	pud_t *pudp;
299	pmd_t *pmdp;
300	pte_t *ptep;
301	int idx, pid;
302
303	/*
304	 * Handle debugger faulting in for debugee.
305	 */
306	if (current->active_mm != vma->vm_mm)
307		return;
308
309	local_irq_save(flags);
310
311	htw_stop();
312	address &= (PAGE_MASK << 1);
313	if (cpu_has_mmid) {
314		write_c0_entryhi(address);
315	} else {
316		pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317		write_c0_entryhi(address | pid);
318	}
319	pgdp = pgd_offset(vma->vm_mm, address);
320	mtc0_tlbw_hazard();
321	tlb_probe();
322	tlb_probe_hazard();
323	p4dp = p4d_offset(pgdp, address);
324	pudp = pud_offset(p4dp, address);
325	pmdp = pmd_offset(pudp, address);
326	idx = read_c0_index();
327#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328	/* this could be a huge page  */
329	if (pmd_huge(*pmdp)) {
330		unsigned long lo;
331		write_c0_pagemask(PM_HUGE_MASK);
332		ptep = (pte_t *)pmdp;
333		lo = pte_to_entrylo(pte_val(*ptep));
334		write_c0_entrylo0(lo);
335		write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
336
337		mtc0_tlbw_hazard();
338		if (idx < 0)
339			tlb_write_random();
340		else
341			tlb_write_indexed();
342		tlbw_use_hazard();
343		write_c0_pagemask(PM_DEFAULT_MASK);
344	} else
345#endif
346	{
347		ptep = pte_offset_map(pmdp, address);
348
349#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
350#ifdef CONFIG_XPA
351		write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
352		if (cpu_has_xpa)
353			writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
354		ptep++;
355		write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
356		if (cpu_has_xpa)
357			writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
358#else
359		write_c0_entrylo0(ptep->pte_high);
360		ptep++;
361		write_c0_entrylo1(ptep->pte_high);
362#endif
363#else
364		write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
365		write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
366#endif
367		mtc0_tlbw_hazard();
368		if (idx < 0)
369			tlb_write_random();
370		else
371			tlb_write_indexed();
372	}
373	tlbw_use_hazard();
374	htw_start();
375	flush_micro_tlb_vm(vma);
376	local_irq_restore(flags);
377}
378
379void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
380		     unsigned long entryhi, unsigned long pagemask)
381{
382#ifdef CONFIG_XPA
383	panic("Broken for XPA kernels");
384#else
385	unsigned int old_mmid;
386	unsigned long flags;
387	unsigned long wired;
388	unsigned long old_pagemask;
389	unsigned long old_ctx;
390
391	local_irq_save(flags);
392	if (cpu_has_mmid) {
393		old_mmid = read_c0_memorymapid();
394		write_c0_memorymapid(MMID_KERNEL_WIRED);
395	}
396	/* Save old context and create impossible VPN2 value */
397	old_ctx = read_c0_entryhi();
398	htw_stop();
399	old_pagemask = read_c0_pagemask();
400	wired = num_wired_entries();
401	write_c0_wired(wired + 1);
402	write_c0_index(wired);
403	tlbw_use_hazard();	/* What is the hazard here? */
404	write_c0_pagemask(pagemask);
405	write_c0_entryhi(entryhi);
406	write_c0_entrylo0(entrylo0);
407	write_c0_entrylo1(entrylo1);
408	mtc0_tlbw_hazard();
409	tlb_write_indexed();
410	tlbw_use_hazard();
411
412	write_c0_entryhi(old_ctx);
413	if (cpu_has_mmid)
414		write_c0_memorymapid(old_mmid);
415	tlbw_use_hazard();	/* What is the hazard here? */
416	htw_start();
417	write_c0_pagemask(old_pagemask);
418	local_flush_tlb_all();
419	local_irq_restore(flags);
420#endif
421}
422
423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
424
425int has_transparent_hugepage(void)
426{
427	static unsigned int mask = -1;
428
429	if (mask == -1) {	/* first call comes during __init */
430		unsigned long flags;
431
432		local_irq_save(flags);
433		write_c0_pagemask(PM_HUGE_MASK);
434		back_to_back_c0_hazard();
435		mask = read_c0_pagemask();
436		write_c0_pagemask(PM_DEFAULT_MASK);
437		local_irq_restore(flags);
438	}
439	return mask == PM_HUGE_MASK;
440}
441
442#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
443
444/*
445 * Used for loading TLB entries before trap_init() has started, when we
446 * don't actually want to add a wired entry which remains throughout the
447 * lifetime of the system
448 */
449
450int temp_tlb_entry;
451
452__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
453			       unsigned long entryhi, unsigned long pagemask)
454{
455	int ret = 0;
456	unsigned long flags;
457	unsigned long wired;
458	unsigned long old_pagemask;
459	unsigned long old_ctx;
460
461	local_irq_save(flags);
462	/* Save old context and create impossible VPN2 value */
463	htw_stop();
464	old_ctx = read_c0_entryhi();
465	old_pagemask = read_c0_pagemask();
466	wired = num_wired_entries();
467	if (--temp_tlb_entry < wired) {
468		printk(KERN_WARNING
469		       "No TLB space left for add_temporary_entry\n");
470		ret = -ENOSPC;
471		goto out;
472	}
473
474	write_c0_index(temp_tlb_entry);
475	write_c0_pagemask(pagemask);
476	write_c0_entryhi(entryhi);
477	write_c0_entrylo0(entrylo0);
478	write_c0_entrylo1(entrylo1);
479	mtc0_tlbw_hazard();
480	tlb_write_indexed();
481	tlbw_use_hazard();
482
483	write_c0_entryhi(old_ctx);
484	write_c0_pagemask(old_pagemask);
485	htw_start();
486out:
487	local_irq_restore(flags);
488	return ret;
489}
490
491static int ntlb;
492static int __init set_ntlb(char *str)
493{
494	get_option(&str, &ntlb);
495	return 1;
496}
497
498__setup("ntlb=", set_ntlb);
499
500/*
501 * Configure TLB (for init or after a CPU has been powered off).
502 */
503static void r4k_tlb_configure(void)
504{
505	/*
506	 * You should never change this register:
507	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
508	 *     the value in the c0_pagemask register.
509	 *   - The entire mm handling assumes the c0_pagemask register to
510	 *     be set to fixed-size pages.
511	 */
512	write_c0_pagemask(PM_DEFAULT_MASK);
513	back_to_back_c0_hazard();
514	if (read_c0_pagemask() != PM_DEFAULT_MASK)
515		panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
516
517	write_c0_wired(0);
518	if (current_cpu_type() == CPU_R10000 ||
519	    current_cpu_type() == CPU_R12000 ||
520	    current_cpu_type() == CPU_R14000 ||
521	    current_cpu_type() == CPU_R16000)
522		write_c0_framemask(0);
523
524	if (cpu_has_rixi) {
525		/*
526		 * Enable the no read, no exec bits, and enable large physical
527		 * address.
528		 */
529#ifdef CONFIG_64BIT
530		set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
531#else
532		set_c0_pagegrain(PG_RIE | PG_XIE);
533#endif
534	}
535
536	temp_tlb_entry = current_cpu_data.tlbsize - 1;
537
538	/* From this point on the ARC firmware is dead.	 */
539	local_flush_tlb_all();
540
541	/* Did I tell you that ARC SUCKS?  */
542}
543
544void tlb_init(void)
545{
546	r4k_tlb_configure();
547
548	if (ntlb) {
549		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
550			int wired = current_cpu_data.tlbsize - ntlb;
551			write_c0_wired(wired);
552			write_c0_index(wired-1);
553			printk("Restricting TLB to %d entries\n", ntlb);
554		} else
555			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
556	}
557
558	build_tlb_refill_handler();
559}
560
561static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
562			       void *v)
563{
564	switch (cmd) {
565	case CPU_PM_ENTER_FAILED:
566	case CPU_PM_EXIT:
567		r4k_tlb_configure();
568		break;
569	}
570
571	return NOTIFY_OK;
572}
573
574static struct notifier_block r4k_tlb_pm_notifier_block = {
575	.notifier_call = r4k_tlb_pm_notifier,
576};
577
578static int __init r4k_tlb_init_pm(void)
579{
580	return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
581}
582arch_initcall(r4k_tlb_init_pm);