Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SPARC64_TLBFLUSH_H
3#define _SPARC64_TLBFLUSH_H
4
5#include <asm/mmu_context.h>
6
7/* TSB flush operations. */
8
9#define TLB_BATCH_NR 192
10
11struct tlb_batch {
12 unsigned int hugepage_shift;
13 struct mm_struct *mm;
14 unsigned long tlb_nr;
15 unsigned long active;
16 unsigned long vaddrs[TLB_BATCH_NR];
17};
18
19void flush_tsb_kernel_range(unsigned long start, unsigned long end);
20void flush_tsb_user(struct tlb_batch *tb);
21void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
22 unsigned int hugepage_shift);
23
24/* TLB flush operations. */
25
26static inline void flush_tlb_mm(struct mm_struct *mm)
27{
28}
29
30static inline void flush_tlb_page(struct vm_area_struct *vma,
31 unsigned long vmaddr)
32{
33}
34
35static inline void flush_tlb_range(struct vm_area_struct *vma,
36 unsigned long start, unsigned long end)
37{
38}
39
40void flush_tlb_kernel_range(unsigned long start, unsigned long end);
41
42#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
43
44void flush_tlb_pending(void);
45void arch_enter_lazy_mmu_mode(void);
46void arch_leave_lazy_mmu_mode(void);
47#define arch_flush_lazy_mmu_mode() do {} while (0)
48
49/* Local cpu only. */
50void __flush_tlb_all(void);
51void __flush_tlb_page(unsigned long context, unsigned long vaddr);
52void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
53
54#ifndef CONFIG_SMP
55
56static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
57{
58 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
59}
60
61#else /* CONFIG_SMP */
62
63void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
64void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
65
66#define global_flush_tlb_page(mm, vaddr) \
67 smp_flush_tlb_page(mm, vaddr)
68
69#endif /* ! CONFIG_SMP */
70
71#endif /* _SPARC64_TLBFLUSH_H */
1#ifndef _SPARC64_TLBFLUSH_H
2#define _SPARC64_TLBFLUSH_H
3
4#include <asm/mmu_context.h>
5
6/* TSB flush operations. */
7
8#define TLB_BATCH_NR 192
9
10struct tlb_batch {
11 bool huge;
12 struct mm_struct *mm;
13 unsigned long tlb_nr;
14 unsigned long active;
15 unsigned long vaddrs[TLB_BATCH_NR];
16};
17
18void flush_tsb_kernel_range(unsigned long start, unsigned long end);
19void flush_tsb_user(struct tlb_batch *tb);
20void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, bool huge);
21
22/* TLB flush operations. */
23
24static inline void flush_tlb_mm(struct mm_struct *mm)
25{
26}
27
28static inline void flush_tlb_page(struct vm_area_struct *vma,
29 unsigned long vmaddr)
30{
31}
32
33static inline void flush_tlb_range(struct vm_area_struct *vma,
34 unsigned long start, unsigned long end)
35{
36}
37
38void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39
40#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
41
42void flush_tlb_pending(void);
43void arch_enter_lazy_mmu_mode(void);
44void arch_leave_lazy_mmu_mode(void);
45#define arch_flush_lazy_mmu_mode() do {} while (0)
46
47/* Local cpu only. */
48void __flush_tlb_all(void);
49void __flush_tlb_page(unsigned long context, unsigned long vaddr);
50void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
51
52#ifndef CONFIG_SMP
53
54static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
55{
56 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
57}
58
59#else /* CONFIG_SMP */
60
61void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
62void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
63
64#define global_flush_tlb_page(mm, vaddr) \
65 smp_flush_tlb_page(mm, vaddr)
66
67#endif /* ! CONFIG_SMP */
68
69#endif /* _SPARC64_TLBFLUSH_H */