Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_TLB_H
3#define __ASM_TLB_H
4
5#include <asm/cpu-features.h>
6#include <asm/mipsregs.h>
7
8#define _UNIQUE_ENTRYHI(base, idx) \
9 (((base) + ((idx) << (PAGE_SHIFT + 1))) | \
10 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
11#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
12#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
13
14static inline unsigned int num_wired_entries(void)
15{
16 unsigned int wired = read_c0_wired();
17
18 if (cpu_has_mips_r6)
19 wired &= MIPSR6_WIRED_WIRED;
20
21 return wired;
22}
23
24#include <asm-generic/tlb.h>
25
26#endif /* __ASM_TLB_H */
1#ifndef __ASM_TLB_H
2#define __ASM_TLB_H
3
4#include <asm/cpu-features.h>
5#include <asm/mipsregs.h>
6
7/*
8 * MIPS doesn't need any special per-pte or per-vma handling, except
9 * we need to flush cache for area to be unmapped.
10 */
11#define tlb_start_vma(tlb, vma) \
12 do { \
13 if (!tlb->fullmm) \
14 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
15 } while (0)
16#define tlb_end_vma(tlb, vma) do { } while (0)
17#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
18
19/*
20 * .. because we flush the whole mm when it fills up.
21 */
22#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
23
24#define UNIQUE_ENTRYHI(idx) \
25 ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
26 (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
27
28static inline unsigned int num_wired_entries(void)
29{
30 unsigned int wired = read_c0_wired();
31
32 if (cpu_has_mips_r6)
33 wired &= MIPSR6_WIRED_WIRED;
34
35 return wired;
36}
37
38#include <asm-generic/tlb.h>
39
40#endif /* __ASM_TLB_H */