Loading...
1#ifndef __UM_TLB_H
2#define __UM_TLB_H
3
4#include <linux/pagemap.h>
5#include <linux/swap.h>
6#include <asm/percpu.h>
7#include <asm/pgalloc.h>
8#include <asm/tlbflush.h>
9
10#define tlb_start_vma(tlb, vma) do { } while (0)
11#define tlb_end_vma(tlb, vma) do { } while (0)
12#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
13
14/* struct mmu_gather is an opaque type used by the mm code for passing around
15 * any data needed by arch specific code for tlb_remove_page.
16 */
17struct mmu_gather {
18 struct mm_struct *mm;
19 unsigned int need_flush; /* Really unmapped some ptes? */
20 unsigned long start;
21 unsigned long end;
22 unsigned int fullmm; /* non-zero means full mm flush */
23};
24
25static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
26 unsigned long address)
27{
28 if (tlb->start > address)
29 tlb->start = address;
30 if (tlb->end < address + PAGE_SIZE)
31 tlb->end = address + PAGE_SIZE;
32}
33
34static inline void init_tlb_gather(struct mmu_gather *tlb)
35{
36 tlb->need_flush = 0;
37
38 tlb->start = TASK_SIZE;
39 tlb->end = 0;
40
41 if (tlb->fullmm) {
42 tlb->start = 0;
43 tlb->end = TASK_SIZE;
44 }
45}
46
47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
49{
50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush;
52
53 init_tlb_gather(tlb);
54}
55
56extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
57 unsigned long end);
58
59static inline void
60tlb_flush_mmu(struct mmu_gather *tlb)
61{
62 if (!tlb->need_flush)
63 return;
64
65 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
66 init_tlb_gather(tlb);
67}
68
69/* tlb_finish_mmu
70 * Called at the end of the shootdown operation to free up any resources
71 * that were required.
72 */
73static inline void
74tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
75{
76 tlb_flush_mmu(tlb);
77
78 /* keep the page table cache within bounds */
79 check_pgt_cache();
80}
81
82/* tlb_remove_page
83 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
84 * while handling the additional races in SMP caused by other CPUs
85 * caching valid mappings in their TLBs.
86 */
87static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
88{
89 tlb->need_flush = 1;
90 free_page_and_swap_cache(page);
91 return 1; /* avoid calling tlb_flush_mmu */
92}
93
94static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
95{
96 __tlb_remove_page(tlb, page);
97}
98
99/**
100 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
101 *
102 * Record the fact that pte's were really umapped in ->need_flush, so we can
103 * later optimise away the tlb invalidate. This helps when userspace is
104 * unmapping already-unmapped pages, which happens quite a lot.
105 */
106#define tlb_remove_tlb_entry(tlb, ptep, address) \
107 do { \
108 tlb->need_flush = 1; \
109 __tlb_remove_tlb_entry(tlb, ptep, address); \
110 } while (0)
111
112#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
113
114#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
115
116#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
117
118#define tlb_migrate_finish(mm) do {} while (0)
119
120#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __UM_TLB_H
3#define __UM_TLB_H
4
5#include <linux/pagemap.h>
6#include <linux/swap.h>
7#include <asm/percpu.h>
8#include <asm/pgalloc.h>
9#include <asm/tlbflush.h>
10
11#define tlb_start_vma(tlb, vma) do { } while (0)
12#define tlb_end_vma(tlb, vma) do { } while (0)
13#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
14
15/* struct mmu_gather is an opaque type used by the mm code for passing around
16 * any data needed by arch specific code for tlb_remove_page.
17 */
18struct mmu_gather {
19 struct mm_struct *mm;
20 unsigned int need_flush; /* Really unmapped some ptes? */
21 unsigned long start;
22 unsigned long end;
23 unsigned int fullmm; /* non-zero means full mm flush */
24};
25
26static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
27 unsigned long address)
28{
29 if (tlb->start > address)
30 tlb->start = address;
31 if (tlb->end < address + PAGE_SIZE)
32 tlb->end = address + PAGE_SIZE;
33}
34
35static inline void init_tlb_gather(struct mmu_gather *tlb)
36{
37 tlb->need_flush = 0;
38
39 tlb->start = TASK_SIZE;
40 tlb->end = 0;
41
42 if (tlb->fullmm) {
43 tlb->start = 0;
44 tlb->end = TASK_SIZE;
45 }
46}
47
48static inline void
49arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
50 unsigned long start, unsigned long end)
51{
52 tlb->mm = mm;
53 tlb->start = start;
54 tlb->end = end;
55 tlb->fullmm = !(start | (end+1));
56
57 init_tlb_gather(tlb);
58}
59
60extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
61 unsigned long end);
62
63static inline void
64tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
65{
66 flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
67}
68
69static inline void
70tlb_flush_mmu_free(struct mmu_gather *tlb)
71{
72 init_tlb_gather(tlb);
73}
74
75static inline void
76tlb_flush_mmu(struct mmu_gather *tlb)
77{
78 if (!tlb->need_flush)
79 return;
80
81 tlb_flush_mmu_tlbonly(tlb);
82 tlb_flush_mmu_free(tlb);
83}
84
85/* arch_tlb_finish_mmu
86 * Called at the end of the shootdown operation to free up any resources
87 * that were required.
88 */
89static inline void
90arch_tlb_finish_mmu(struct mmu_gather *tlb,
91 unsigned long start, unsigned long end, bool force)
92{
93 if (force) {
94 tlb->start = start;
95 tlb->end = end;
96 tlb->need_flush = 1;
97 }
98 tlb_flush_mmu(tlb);
99
100 /* keep the page table cache within bounds */
101 check_pgt_cache();
102}
103
104/* tlb_remove_page
105 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
106 * while handling the additional races in SMP caused by other CPUs
107 * caching valid mappings in their TLBs.
108 */
109static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
110{
111 tlb->need_flush = 1;
112 free_page_and_swap_cache(page);
113 return false; /* avoid calling tlb_flush_mmu */
114}
115
116static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
117{
118 __tlb_remove_page(tlb, page);
119}
120
121static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
122 struct page *page, int page_size)
123{
124 return __tlb_remove_page(tlb, page);
125}
126
127static inline void tlb_remove_page_size(struct mmu_gather *tlb,
128 struct page *page, int page_size)
129{
130 return tlb_remove_page(tlb, page);
131}
132
133/**
134 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
135 *
136 * Record the fact that pte's were really umapped in ->need_flush, so we can
137 * later optimise away the tlb invalidate. This helps when userspace is
138 * unmapping already-unmapped pages, which happens quite a lot.
139 */
140#define tlb_remove_tlb_entry(tlb, ptep, address) \
141 do { \
142 tlb->need_flush = 1; \
143 __tlb_remove_tlb_entry(tlb, ptep, address); \
144 } while (0)
145
146#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
147 tlb_remove_tlb_entry(tlb, ptep, address)
148
149#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
150static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
151 unsigned int page_size)
152{
153}
154
155#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
156
157#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
158
159#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
160
161#define tlb_migrate_finish(mm) do {} while (0)
162
163#endif