Loading...
1#ifndef _ASM_IA64_TLB_H
2#define _ASM_IA64_TLB_H
3/*
4 * Based on <asm-generic/tlb.h>.
5 *
6 * Copyright (C) 2002-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 */
9/*
10 * Removing a translation from a page table (including TLB-shootdown) is a four-step
11 * procedure:
12 *
13 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
14 * (this is a no-op on ia64).
15 * (2) Clear the relevant portions of the page-table
16 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
17 * (4) Release the pages that were freed up in step (2).
18 *
19 * Note that the ordering of these steps is crucial to avoid races on MP machines.
20 *
21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template:
24 *
25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26 * {
27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma);
29 * for each page-table-entry PTE that needs to be removed do {
30 * tlb_remove_tlb_entry(tlb, pte, address);
31 * if (pte refers to a normal page) {
32 * tlb_remove_page(tlb, page);
33 * }
34 * }
35 * tlb_end_vma(tlb, vma);
36 * }
37 * }
38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
39 */
40#include <linux/mm.h>
41#include <linux/pagemap.h>
42#include <linux/swap.h>
43
44#include <asm/pgalloc.h>
45#include <asm/processor.h>
46#include <asm/tlbflush.h>
47#include <asm/machvec.h>
48
49/*
50 * If we can't allocate a page to make a big batch of page pointers
51 * to work on, then just handle a few from the on-stack structure.
52 */
53#define IA64_GATHER_BUNDLE 8
54
55struct mmu_gather {
56 struct mm_struct *mm;
57 unsigned int nr;
58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start, end;
62 unsigned long start_addr;
63 unsigned long end_addr;
64 struct page **pages;
65 struct page *local[IA64_GATHER_BUNDLE];
66};
67
68struct ia64_tr_entry {
69 u64 ifa;
70 u64 itir;
71 u64 pte;
72 u64 rr;
73}; /*Record for tr entry!*/
74
75extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
76extern void ia64_ptr_entry(u64 target_mask, int slot);
77
78extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
79
80/*
81 region register macros
82*/
83#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
84#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
85#define RR_VE_MASK 0x0000000000000001L
86#define RR_VE_SHIFT 0
87#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
88#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
89#define RR_PS_MASK 0x00000000000000fcL
90#define RR_PS_SHIFT 2
91#define RR_RID_MASK 0x00000000ffffff00L
92#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
93
94static inline void
95ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
96{
97 tlb->need_flush = 0;
98
99 if (tlb->fullmm) {
100 /*
101 * Tearing down the entire address space. This happens both as a result
102 * of exit() and execve(). The latter case necessitates the call to
103 * flush_tlb_mm() here.
104 */
105 flush_tlb_mm(tlb->mm);
106 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
107 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
108 {
109 /*
110 * If we flush more than a tera-byte or across regions, we're probably
111 * better off just flushing the entire TLB(s). This should be very rare
112 * and is not worth optimizing for.
113 */
114 flush_tlb_all();
115 } else {
116 /*
117 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
118 * vma pointer.
119 */
120 struct vm_area_struct vma;
121
122 vma.vm_mm = tlb->mm;
123 /* flush the address range from the tlb: */
124 flush_tlb_range(&vma, start, end);
125 /* now flush the virt. page-table area mapping the address range: */
126 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
127 }
128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134 unsigned long i;
135 unsigned int nr;
136
137 /* lastly, release the freed pages */
138 nr = tlb->nr;
139
140 tlb->nr = 0;
141 tlb->start_addr = ~0UL;
142 for (i = 0; i < nr; ++i)
143 free_page_and_swap_cache(tlb->pages[i]);
144}
145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153 if (!tlb->need_flush)
154 return;
155 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156 ia64_tlb_flush_mmu_free(tlb);
157}
158
159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
160{
161 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
162
163 if (addr) {
164 tlb->pages = (void *)addr;
165 tlb->max = PAGE_SIZE / sizeof(void *);
166 }
167}
168
169
170static inline void
171tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
172{
173 tlb->mm = mm;
174 tlb->max = ARRAY_SIZE(tlb->local);
175 tlb->pages = tlb->local;
176 tlb->nr = 0;
177 tlb->fullmm = !(start | (end+1));
178 tlb->start = start;
179 tlb->end = end;
180 tlb->start_addr = ~0UL;
181}
182
183/*
184 * Called at the end of the shootdown operation to free up any resources that were
185 * collected.
186 */
187static inline void
188tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
189{
190 /*
191 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
192 * tlb->end_addr.
193 */
194 ia64_tlb_flush_mmu(tlb, start, end);
195
196 /* keep the page table cache within bounds */
197 check_pgt_cache();
198
199 if (tlb->pages != tlb->local)
200 free_pages((unsigned long)tlb->pages, 0);
201}
202
203/*
204 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
205 * must be delayed until after the TLB has been flushed (see comments at the beginning of
206 * this file).
207 */
208static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
209{
210 tlb->need_flush = 1;
211
212 if (!tlb->nr && tlb->pages == tlb->local)
213 __tlb_alloc_page(tlb);
214
215 tlb->pages[tlb->nr++] = page;
216 VM_BUG_ON(tlb->nr > tlb->max);
217
218 return tlb->max - tlb->nr;
219}
220
221static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
222{
223 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
224}
225
226static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
227{
228 ia64_tlb_flush_mmu_free(tlb);
229}
230
231static inline void tlb_flush_mmu(struct mmu_gather *tlb)
232{
233 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
234}
235
236static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
237{
238 if (!__tlb_remove_page(tlb, page))
239 tlb_flush_mmu(tlb);
240}
241
242/*
243 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
244 * PTE, not just those pointing to (normal) physical memory.
245 */
246static inline void
247__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
248{
249 if (tlb->start_addr == ~0UL)
250 tlb->start_addr = address;
251 tlb->end_addr = address + PAGE_SIZE;
252}
253
254#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
255
256#define tlb_start_vma(tlb, vma) do { } while (0)
257#define tlb_end_vma(tlb, vma) do { } while (0)
258
259#define tlb_remove_tlb_entry(tlb, ptep, addr) \
260do { \
261 tlb->need_flush = 1; \
262 __tlb_remove_tlb_entry(tlb, ptep, addr); \
263} while (0)
264
265#define pte_free_tlb(tlb, ptep, address) \
266do { \
267 tlb->need_flush = 1; \
268 __pte_free_tlb(tlb, ptep, address); \
269} while (0)
270
271#define pmd_free_tlb(tlb, ptep, address) \
272do { \
273 tlb->need_flush = 1; \
274 __pmd_free_tlb(tlb, ptep, address); \
275} while (0)
276
277#define pud_free_tlb(tlb, pudp, address) \
278do { \
279 tlb->need_flush = 1; \
280 __pud_free_tlb(tlb, pudp, address); \
281} while (0)
282
283#endif /* _ASM_IA64_TLB_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_TLB_H
3#define _ASM_IA64_TLB_H
4/*
5 * Based on <asm-generic/tlb.h>.
6 *
7 * Copyright (C) 2002-2003 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 */
10/*
11 * Removing a translation from a page table (including TLB-shootdown) is a four-step
12 * procedure:
13 *
14 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
15 * (this is a no-op on ia64).
16 * (2) Clear the relevant portions of the page-table
17 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
18 * (4) Release the pages that were freed up in step (2).
19 *
20 * Note that the ordering of these steps is crucial to avoid races on MP machines.
21 *
22 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
23 * unmapping a portion of the virtual address space, these hooks are called according to
24 * the following template:
25 *
26 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
27 * {
28 * for each vma that needs a shootdown do {
29 * tlb_start_vma(tlb, vma);
30 * for each page-table-entry PTE that needs to be removed do {
31 * tlb_remove_tlb_entry(tlb, pte, address);
32 * if (pte refers to a normal page) {
33 * tlb_remove_page(tlb, page);
34 * }
35 * }
36 * tlb_end_vma(tlb, vma);
37 * }
38 * }
39 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
40 */
41#include <linux/mm.h>
42#include <linux/pagemap.h>
43#include <linux/swap.h>
44
45#include <asm/pgalloc.h>
46#include <asm/processor.h>
47#include <asm/tlbflush.h>
48#include <asm/machvec.h>
49
50/*
51 * If we can't allocate a page to make a big batch of page pointers
52 * to work on, then just handle a few from the on-stack structure.
53 */
54#define IA64_GATHER_BUNDLE 8
55
56struct mmu_gather {
57 struct mm_struct *mm;
58 unsigned int nr;
59 unsigned int max;
60 unsigned char fullmm; /* non-zero means full mm flush */
61 unsigned char need_flush; /* really unmapped some PTEs? */
62 unsigned long start, end;
63 unsigned long start_addr;
64 unsigned long end_addr;
65 struct page **pages;
66 struct page *local[IA64_GATHER_BUNDLE];
67};
68
69struct ia64_tr_entry {
70 u64 ifa;
71 u64 itir;
72 u64 pte;
73 u64 rr;
74}; /*Record for tr entry!*/
75
76extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
77extern void ia64_ptr_entry(u64 target_mask, int slot);
78
79extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
80
81/*
82 region register macros
83*/
84#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
85#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
86#define RR_VE_MASK 0x0000000000000001L
87#define RR_VE_SHIFT 0
88#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
89#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
90#define RR_PS_MASK 0x00000000000000fcL
91#define RR_PS_SHIFT 2
92#define RR_RID_MASK 0x00000000ffffff00L
93#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
94
95static inline void
96ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
97{
98 tlb->need_flush = 0;
99
100 if (tlb->fullmm) {
101 /*
102 * Tearing down the entire address space. This happens both as a result
103 * of exit() and execve(). The latter case necessitates the call to
104 * flush_tlb_mm() here.
105 */
106 flush_tlb_mm(tlb->mm);
107 } else if (unlikely (end - start >= 1024*1024*1024*1024UL
108 || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
109 {
110 /*
111 * If we flush more than a tera-byte or across regions, we're probably
112 * better off just flushing the entire TLB(s). This should be very rare
113 * and is not worth optimizing for.
114 */
115 flush_tlb_all();
116 } else {
117 /*
118 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
119 * vma pointer.
120 */
121 struct vm_area_struct vma;
122
123 vma.vm_mm = tlb->mm;
124 /* flush the address range from the tlb: */
125 flush_tlb_range(&vma, start, end);
126 /* now flush the virt. page-table area mapping the address range: */
127 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
128 }
129
130}
131
132static inline void
133ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
134{
135 unsigned long i;
136 unsigned int nr;
137
138 /* lastly, release the freed pages */
139 nr = tlb->nr;
140
141 tlb->nr = 0;
142 tlb->start_addr = ~0UL;
143 for (i = 0; i < nr; ++i)
144 free_page_and_swap_cache(tlb->pages[i]);
145}
146
147/*
148 * Flush the TLB for address range START to END and, if not in fast mode, release the
149 * freed pages that where gathered up to this point.
150 */
151static inline void
152ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
153{
154 if (!tlb->need_flush)
155 return;
156 ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
157 ia64_tlb_flush_mmu_free(tlb);
158}
159
160static inline void __tlb_alloc_page(struct mmu_gather *tlb)
161{
162 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
163
164 if (addr) {
165 tlb->pages = (void *)addr;
166 tlb->max = PAGE_SIZE / sizeof(void *);
167 }
168}
169
170
171static inline void
172arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
173 unsigned long start, unsigned long end)
174{
175 tlb->mm = mm;
176 tlb->max = ARRAY_SIZE(tlb->local);
177 tlb->pages = tlb->local;
178 tlb->nr = 0;
179 tlb->fullmm = !(start | (end+1));
180 tlb->start = start;
181 tlb->end = end;
182 tlb->start_addr = ~0UL;
183}
184
185/*
186 * Called at the end of the shootdown operation to free up any resources that were
187 * collected.
188 */
189static inline void
190arch_tlb_finish_mmu(struct mmu_gather *tlb,
191 unsigned long start, unsigned long end, bool force)
192{
193 if (force)
194 tlb->need_flush = 1;
195 /*
196 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
197 * tlb->end_addr.
198 */
199 ia64_tlb_flush_mmu(tlb, start, end);
200
201 /* keep the page table cache within bounds */
202 check_pgt_cache();
203
204 if (tlb->pages != tlb->local)
205 free_pages((unsigned long)tlb->pages, 0);
206}
207
208/*
209 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
210 * must be delayed until after the TLB has been flushed (see comments at the beginning of
211 * this file).
212 */
213static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
214{
215 tlb->need_flush = 1;
216
217 if (!tlb->nr && tlb->pages == tlb->local)
218 __tlb_alloc_page(tlb);
219
220 tlb->pages[tlb->nr++] = page;
221 VM_WARN_ON(tlb->nr > tlb->max);
222 if (tlb->nr == tlb->max)
223 return true;
224 return false;
225}
226
227static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
228{
229 ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
230}
231
232static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
233{
234 ia64_tlb_flush_mmu_free(tlb);
235}
236
237static inline void tlb_flush_mmu(struct mmu_gather *tlb)
238{
239 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
240}
241
242static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
243{
244 if (__tlb_remove_page(tlb, page))
245 tlb_flush_mmu(tlb);
246}
247
248static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
249 struct page *page, int page_size)
250{
251 return __tlb_remove_page(tlb, page);
252}
253
254static inline void tlb_remove_page_size(struct mmu_gather *tlb,
255 struct page *page, int page_size)
256{
257 return tlb_remove_page(tlb, page);
258}
259
260/*
261 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
262 * PTE, not just those pointing to (normal) physical memory.
263 */
264static inline void
265__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
266{
267 if (tlb->start_addr == ~0UL)
268 tlb->start_addr = address;
269 tlb->end_addr = address + PAGE_SIZE;
270}
271
272#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
273
274#define tlb_start_vma(tlb, vma) do { } while (0)
275#define tlb_end_vma(tlb, vma) do { } while (0)
276
277#define tlb_remove_tlb_entry(tlb, ptep, addr) \
278do { \
279 tlb->need_flush = 1; \
280 __tlb_remove_tlb_entry(tlb, ptep, addr); \
281} while (0)
282
283#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
284 tlb_remove_tlb_entry(tlb, ptep, address)
285
286#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
287static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
288 unsigned int page_size)
289{
290}
291
292#define pte_free_tlb(tlb, ptep, address) \
293do { \
294 tlb->need_flush = 1; \
295 __pte_free_tlb(tlb, ptep, address); \
296} while (0)
297
298#define pmd_free_tlb(tlb, ptep, address) \
299do { \
300 tlb->need_flush = 1; \
301 __pmd_free_tlb(tlb, ptep, address); \
302} while (0)
303
304#define pud_free_tlb(tlb, pudp, address) \
305do { \
306 tlb->need_flush = 1; \
307 __pud_free_tlb(tlb, pudp, address); \
308} while (0)
309
310#endif /* _ASM_IA64_TLB_H */