Loading...
1#ifndef __ASM_TLB_H
2#define __ASM_TLB_H
3
4/*
5 * MIPS doesn't need any special per-pte or per-vma handling, except
6 * we need to flush cache for area to be unmapped.
7 */
8#define tlb_start_vma(tlb, vma) \
9 do { \
10 if (!tlb->fullmm) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
12 } while (0)
13#define tlb_end_vma(tlb, vma) do { } while (0)
14#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
15
16/*
17 * .. because we flush the whole mm when it fills up.
18 */
19#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
20
21#include <asm-generic/tlb.h>
22
23#endif /* __ASM_TLB_H */
1#ifndef __ASM_TLB_H
2#define __ASM_TLB_H
3
4#include <asm/cpu-features.h>
5#include <asm/mipsregs.h>
6
7/*
8 * MIPS doesn't need any special per-pte or per-vma handling, except
9 * we need to flush cache for area to be unmapped.
10 */
11#define tlb_start_vma(tlb, vma) \
12 do { \
13 if (!tlb->fullmm) \
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
15 } while (0)
16#define tlb_end_vma(tlb, vma) do { } while (0)
17#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
18
19/*
20 * .. because we flush the whole mm when it fills up.
21 */
22#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
23
24#define UNIQUE_ENTRYHI(idx) \
25 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
26 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
27
28static inline unsigned int num_wired_entries(void)
29{
30 unsigned int wired = read_c0_wired();
31
32 if (cpu_has_mips_r6)
33 wired &= MIPSR6_WIRED_WIRED;
34
35 return wired;
36}
37
38#include <asm-generic/tlb.h>
39
40#endif /* __ASM_TLB_H */