Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* include/asm-generic/tlb.h
3 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10 */
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
16#include <asm/pgalloc.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20/*
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
23 * the loaded mm.
24 */
25#ifndef nmi_uaccess_okay
26# define nmi_uaccess_okay() true
27#endif
28
29#ifdef CONFIG_MMU
30
31/*
32 * Generic MMU-gather implementation.
33 *
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
36 *
37 * This correct ordering is:
38 *
39 * 1) unhook page
40 * 2) TLB invalidate page
41 * 3) free page
42 *
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
46 *
47 * The mmu_gather API consists of:
48 *
49 * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
50 *
51 * Finish in particular will issue a (final) TLB invalidate and free
52 * all (remaining) queued pages.
53 *
54 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
55 *
56 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
57 * there's large holes between the VMAs.
58 *
59 * - tlb_remove_page() / __tlb_remove_page()
60 * - tlb_remove_page_size() / __tlb_remove_page_size()
61 *
62 * __tlb_remove_page_size() is the basic primitive that queues a page for
63 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
64 * boolean indicating if the queue is (now) full and a call to
65 * tlb_flush_mmu() is required.
66 *
67 * tlb_remove_page() and tlb_remove_page_size() imply the call to
68 * tlb_flush_mmu() when required and has no return value.
69 *
70 * - tlb_change_page_size()
71 *
72 * call before __tlb_remove_page*() to set the current page-size; implies a
73 * possible tlb_flush_mmu() call.
74 *
75 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
76 *
77 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
78 * related state, like the range)
79 *
80 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
81 * whatever pages are still batched.
82 *
83 * - mmu_gather::fullmm
84 *
85 * A flag set by tlb_gather_mmu() to indicate we're going to free
86 * the entire mm; this allows a number of optimizations.
87 *
88 * - We can ignore tlb_{start,end}_vma(); because we don't
89 * care about ranges. Everything will be shot down.
90 *
91 * - (RISC) architectures that use ASIDs can cycle to a new ASID
92 * and delay the invalidation until ASID space runs out.
93 *
94 * - mmu_gather::need_flush_all
95 *
96 * A flag that can be set by the arch code if it wants to force
97 * flush the entire TLB irrespective of the range. For instance
98 * x86-PAE needs this when changing top-level entries.
99 *
100 * And allows the architecture to provide and implement tlb_flush():
101 *
102 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
103 * use of:
104 *
105 * - mmu_gather::start / mmu_gather::end
106 *
107 * which provides the range that needs to be flushed to cover the pages to
108 * be freed.
109 *
110 * - mmu_gather::freed_tables
111 *
112 * set when we freed page table pages
113 *
114 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
115 *
116 * returns the smallest TLB entry size unmapped in this range.
117 *
118 * If an architecture does not provide tlb_flush() a default implementation
119 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
120 * specified, in which case we'll default to flush_tlb_mm().
121 *
122 * Additionally there are a few opt-in features:
123 *
124 * HAVE_MMU_GATHER_PAGE_SIZE
125 *
126 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
127 * changes the size and provides mmu_gather::page_size to tlb_flush().
128 *
129 * HAVE_RCU_TABLE_FREE
130 *
131 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
132 * for page directores (__p*_free_tlb()). This provides separate freeing of
133 * the page-table pages themselves in a semi-RCU fashion (see comment below).
134 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
135 * and therefore doesn't naturally serialize with software page-table walkers.
136 *
137 * When used, an architecture is expected to provide __tlb_remove_table()
138 * which does the actual freeing of these pages.
139 *
140 * HAVE_RCU_TABLE_NO_INVALIDATE
141 *
142 * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
143 * freeing the page-table pages. This can be avoided if you use
144 * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
145 * page-tables natively.
146 *
147 * MMU_GATHER_NO_RANGE
148 *
149 * Use this if your architecture lacks an efficient flush_tlb_range().
150 */
151
152#ifdef CONFIG_HAVE_RCU_TABLE_FREE
153/*
154 * Semi RCU freeing of the page directories.
155 *
156 * This is needed by some architectures to implement software pagetable walkers.
157 *
158 * gup_fast() and other software pagetable walkers do a lockless page-table
159 * walk and therefore needs some synchronization with the freeing of the page
160 * directories. The chosen means to accomplish that is by disabling IRQs over
161 * the walk.
162 *
163 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
164 * since we unlink the page, flush TLBs, free the page. Since the disabling of
165 * IRQs delays the completion of the TLB flush we can never observe an already
166 * freed page.
167 *
168 * Architectures that do not have this (PPC) need to delay the freeing by some
169 * other means, this is that means.
170 *
171 * What we do is batch the freed directory pages (tables) and RCU free them.
172 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
173 * holds off grace periods.
174 *
175 * However, in order to batch these pages we need to allocate storage, this
176 * allocation is deep inside the MM code and can thus easily fail on memory
177 * pressure. To guarantee progress we fall back to single table freeing, see
178 * the implementation of tlb_remove_table_one().
179 *
180 */
181struct mmu_table_batch {
182 struct rcu_head rcu;
183 unsigned int nr;
184 void *tables[0];
185};
186
187#define MAX_TABLE_BATCH \
188 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
189
190extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
191
192#endif
193
194#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
195/*
196 * If we can't allocate a page to make a big batch of page pointers
197 * to work on, then just handle a few from the on-stack structure.
198 */
199#define MMU_GATHER_BUNDLE 8
200
201struct mmu_gather_batch {
202 struct mmu_gather_batch *next;
203 unsigned int nr;
204 unsigned int max;
205 struct page *pages[0];
206};
207
208#define MAX_GATHER_BATCH \
209 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
210
211/*
212 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
213 * lockups for non-preemptible kernels on huge machines when a lot of memory
214 * is zapped during unmapping.
215 * 10K pages freed at once should be safe even without a preemption point.
216 */
217#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
218
219extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
220 int page_size);
221#endif
222
223/*
224 * struct mmu_gather is an opaque type used by the mm code for passing around
225 * any data needed by arch specific code for tlb_remove_page.
226 */
227struct mmu_gather {
228 struct mm_struct *mm;
229
230#ifdef CONFIG_HAVE_RCU_TABLE_FREE
231 struct mmu_table_batch *batch;
232#endif
233
234 unsigned long start;
235 unsigned long end;
236 /*
237 * we are in the middle of an operation to clear
238 * a full mm and can make some optimizations
239 */
240 unsigned int fullmm : 1;
241
242 /*
243 * we have performed an operation which
244 * requires a complete flush of the tlb
245 */
246 unsigned int need_flush_all : 1;
247
248 /*
249 * we have removed page directories
250 */
251 unsigned int freed_tables : 1;
252
253 /*
254 * at which levels have we cleared entries?
255 */
256 unsigned int cleared_ptes : 1;
257 unsigned int cleared_pmds : 1;
258 unsigned int cleared_puds : 1;
259 unsigned int cleared_p4ds : 1;
260
261 /*
262 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
263 */
264 unsigned int vma_exec : 1;
265 unsigned int vma_huge : 1;
266
267 unsigned int batch_count;
268
269#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
270 struct mmu_gather_batch *active;
271 struct mmu_gather_batch local;
272 struct page *__pages[MMU_GATHER_BUNDLE];
273
274#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
275 unsigned int page_size;
276#endif
277#endif
278};
279
280void arch_tlb_gather_mmu(struct mmu_gather *tlb,
281 struct mm_struct *mm, unsigned long start, unsigned long end);
282void tlb_flush_mmu(struct mmu_gather *tlb);
283void arch_tlb_finish_mmu(struct mmu_gather *tlb,
284 unsigned long start, unsigned long end, bool force);
285
286static inline void __tlb_adjust_range(struct mmu_gather *tlb,
287 unsigned long address,
288 unsigned int range_size)
289{
290 tlb->start = min(tlb->start, address);
291 tlb->end = max(tlb->end, address + range_size);
292}
293
294static inline void __tlb_reset_range(struct mmu_gather *tlb)
295{
296 if (tlb->fullmm) {
297 tlb->start = tlb->end = ~0;
298 } else {
299 tlb->start = TASK_SIZE;
300 tlb->end = 0;
301 }
302 tlb->freed_tables = 0;
303 tlb->cleared_ptes = 0;
304 tlb->cleared_pmds = 0;
305 tlb->cleared_puds = 0;
306 tlb->cleared_p4ds = 0;
307 /*
308 * Do not reset mmu_gather::vma_* fields here, we do not
309 * call into tlb_start_vma() again to set them if there is an
310 * intermediate flush.
311 */
312}
313
314#ifdef CONFIG_MMU_GATHER_NO_RANGE
315
316#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
317#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
318#endif
319
320/*
321 * When an architecture does not have efficient means of range flushing TLBs
322 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
323 * range small. We equally don't have to worry about page granularity or other
324 * things.
325 *
326 * All we need to do is issue a full flush for any !0 range.
327 */
328static inline void tlb_flush(struct mmu_gather *tlb)
329{
330 if (tlb->end)
331 flush_tlb_mm(tlb->mm);
332}
333
334static inline void
335tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
336
337#define tlb_end_vma tlb_end_vma
338static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
339
340#else /* CONFIG_MMU_GATHER_NO_RANGE */
341
342#ifndef tlb_flush
343
344#if defined(tlb_start_vma) || defined(tlb_end_vma)
345#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
346#endif
347
348/*
349 * When an architecture does not provide its own tlb_flush() implementation
350 * but does have a reasonably efficient flush_vma_range() implementation
351 * use that.
352 */
353static inline void tlb_flush(struct mmu_gather *tlb)
354{
355 if (tlb->fullmm || tlb->need_flush_all) {
356 flush_tlb_mm(tlb->mm);
357 } else if (tlb->end) {
358 struct vm_area_struct vma = {
359 .vm_mm = tlb->mm,
360 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
361 (tlb->vma_huge ? VM_HUGETLB : 0),
362 };
363
364 flush_tlb_range(&vma, tlb->start, tlb->end);
365 }
366}
367
368static inline void
369tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
370{
371 /*
372 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
373 * mips-4k) flush only large pages.
374 *
375 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
376 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
377 * range.
378 *
379 * We rely on tlb_end_vma() to issue a flush, such that when we reset
380 * these values the batch is empty.
381 */
382 tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
383 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
384}
385
386#else
387
388static inline void
389tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
390
391#endif
392
393#endif /* CONFIG_MMU_GATHER_NO_RANGE */
394
395static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
396{
397 if (!tlb->end)
398 return;
399
400 tlb_flush(tlb);
401 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
402 __tlb_reset_range(tlb);
403}
404
405static inline void tlb_remove_page_size(struct mmu_gather *tlb,
406 struct page *page, int page_size)
407{
408 if (__tlb_remove_page_size(tlb, page, page_size))
409 tlb_flush_mmu(tlb);
410}
411
412static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
413{
414 return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
415}
416
417/* tlb_remove_page
418 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
419 * required.
420 */
421static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
422{
423 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
424}
425
426static inline void tlb_change_page_size(struct mmu_gather *tlb,
427 unsigned int page_size)
428{
429#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
430 if (tlb->page_size && tlb->page_size != page_size) {
431 if (!tlb->fullmm)
432 tlb_flush_mmu(tlb);
433 }
434
435 tlb->page_size = page_size;
436#endif
437}
438
439static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
440{
441 if (tlb->cleared_ptes)
442 return PAGE_SHIFT;
443 if (tlb->cleared_pmds)
444 return PMD_SHIFT;
445 if (tlb->cleared_puds)
446 return PUD_SHIFT;
447 if (tlb->cleared_p4ds)
448 return P4D_SHIFT;
449
450 return PAGE_SHIFT;
451}
452
453static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
454{
455 return 1UL << tlb_get_unmap_shift(tlb);
456}
457
458/*
459 * In the case of tlb vma handling, we can optimise these away in the
460 * case where we're doing a full MM flush. When we're doing a munmap,
461 * the vmas are adjusted to only cover the region to be torn down.
462 */
463#ifndef tlb_start_vma
464static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
465{
466 if (tlb->fullmm)
467 return;
468
469 tlb_update_vma_flags(tlb, vma);
470 flush_cache_range(vma, vma->vm_start, vma->vm_end);
471}
472#endif
473
474#ifndef tlb_end_vma
475static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
476{
477 if (tlb->fullmm)
478 return;
479
480 /*
481 * Do a TLB flush and reset the range at VMA boundaries; this avoids
482 * the ranges growing with the unused space between consecutive VMAs,
483 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
484 * this.
485 */
486 tlb_flush_mmu_tlbonly(tlb);
487}
488#endif
489
490#ifndef __tlb_remove_tlb_entry
491#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
492#endif
493
494/**
495 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
496 *
497 * Record the fact that pte's were really unmapped by updating the range,
498 * so we can later optimise away the tlb invalidate. This helps when
499 * userspace is unmapping already-unmapped pages, which happens quite a lot.
500 */
501#define tlb_remove_tlb_entry(tlb, ptep, address) \
502 do { \
503 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
504 tlb->cleared_ptes = 1; \
505 __tlb_remove_tlb_entry(tlb, ptep, address); \
506 } while (0)
507
508#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
509 do { \
510 unsigned long _sz = huge_page_size(h); \
511 __tlb_adjust_range(tlb, address, _sz); \
512 if (_sz == PMD_SIZE) \
513 tlb->cleared_pmds = 1; \
514 else if (_sz == PUD_SIZE) \
515 tlb->cleared_puds = 1; \
516 __tlb_remove_tlb_entry(tlb, ptep, address); \
517 } while (0)
518
519/**
520 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
521 * This is a nop so far, because only x86 needs it.
522 */
523#ifndef __tlb_remove_pmd_tlb_entry
524#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
525#endif
526
527#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
528 do { \
529 __tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
530 tlb->cleared_pmds = 1; \
531 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
532 } while (0)
533
534/**
535 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
536 * invalidation. This is a nop so far, because only x86 needs it.
537 */
538#ifndef __tlb_remove_pud_tlb_entry
539#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
540#endif
541
542#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
543 do { \
544 __tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
545 tlb->cleared_puds = 1; \
546 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
547 } while (0)
548
549/*
550 * For things like page tables caches (ie caching addresses "inside" the
551 * page tables, like x86 does), for legacy reasons, flushing an
552 * individual page had better flush the page table caches behind it. This
553 * is definitely how x86 works, for example. And if you have an
554 * architected non-legacy page table cache (which I'm not aware of
555 * anybody actually doing), you're going to have some architecturally
556 * explicit flushing for that, likely *separate* from a regular TLB entry
557 * flush, and thus you'd need more than just some range expansion..
558 *
559 * So if we ever find an architecture
560 * that would want something that odd, I think it is up to that
561 * architecture to do its own odd thing, not cause pain for others
562 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
563 *
564 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
565 */
566
567#ifndef pte_free_tlb
568#define pte_free_tlb(tlb, ptep, address) \
569 do { \
570 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
571 tlb->freed_tables = 1; \
572 tlb->cleared_pmds = 1; \
573 __pte_free_tlb(tlb, ptep, address); \
574 } while (0)
575#endif
576
577#ifndef pmd_free_tlb
578#define pmd_free_tlb(tlb, pmdp, address) \
579 do { \
580 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
581 tlb->freed_tables = 1; \
582 tlb->cleared_puds = 1; \
583 __pmd_free_tlb(tlb, pmdp, address); \
584 } while (0)
585#endif
586
587#ifndef __ARCH_HAS_4LEVEL_HACK
588#ifndef pud_free_tlb
589#define pud_free_tlb(tlb, pudp, address) \
590 do { \
591 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
592 tlb->freed_tables = 1; \
593 tlb->cleared_p4ds = 1; \
594 __pud_free_tlb(tlb, pudp, address); \
595 } while (0)
596#endif
597#endif
598
599#ifndef __ARCH_HAS_5LEVEL_HACK
600#ifndef p4d_free_tlb
601#define p4d_free_tlb(tlb, pudp, address) \
602 do { \
603 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
604 tlb->freed_tables = 1; \
605 __p4d_free_tlb(tlb, pudp, address); \
606 } while (0)
607#endif
608#endif
609
610#endif /* CONFIG_MMU */
611
612#endif /* _ASM_GENERIC__TLB_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* include/asm-generic/tlb.h
3 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10 */
11#ifndef _ASM_GENERIC__TLB_H
12#define _ASM_GENERIC__TLB_H
13
14#include <linux/mmu_notifier.h>
15#include <linux/swap.h>
16#include <linux/hugetlb_inline.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19
20/*
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
23 * the loaded mm.
24 */
25#ifndef nmi_uaccess_okay
26# define nmi_uaccess_okay() true
27#endif
28
29#ifdef CONFIG_MMU
30
31/*
32 * Generic MMU-gather implementation.
33 *
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
36 *
37 * This correct ordering is:
38 *
39 * 1) unhook page
40 * 2) TLB invalidate page
41 * 3) free page
42 *
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
46 *
47 * The mmu_gather API consists of:
48 *
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
50 *
51 * start and finish a mmu_gather
52 *
53 * Finish in particular will issue a (final) TLB invalidate and free
54 * all (remaining) queued pages.
55 *
56 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
57 *
58 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
59 * there's large holes between the VMAs.
60 *
61 * - tlb_remove_table()
62 *
63 * tlb_remove_table() is the basic primitive to free page-table directories
64 * (__p*_free_tlb()). In it's most primitive form it is an alias for
65 * tlb_remove_page() below, for when page directories are pages and have no
66 * additional constraints.
67 *
68 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
69 *
70 * - tlb_remove_page() / __tlb_remove_page()
71 * - tlb_remove_page_size() / __tlb_remove_page_size()
72 * - __tlb_remove_folio_pages()
73 *
74 * __tlb_remove_page_size() is the basic primitive that queues a page for
75 * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
76 * boolean indicating if the queue is (now) full and a call to
77 * tlb_flush_mmu() is required.
78 *
79 * tlb_remove_page() and tlb_remove_page_size() imply the call to
80 * tlb_flush_mmu() when required and has no return value.
81 *
82 * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however,
83 * instead of removing a single page, remove the given number of consecutive
84 * pages that are all part of the same (large) folio: just like calling
85 * __tlb_remove_page() on each page individually.
86 *
87 * - tlb_change_page_size()
88 *
89 * call before __tlb_remove_page*() to set the current page-size; implies a
90 * possible tlb_flush_mmu() call.
91 *
92 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
93 *
94 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
95 * related state, like the range)
96 *
97 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
98 * whatever pages are still batched.
99 *
100 * - mmu_gather::fullmm
101 *
102 * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
103 * the entire mm; this allows a number of optimizations.
104 *
105 * - We can ignore tlb_{start,end}_vma(); because we don't
106 * care about ranges. Everything will be shot down.
107 *
108 * - (RISC) architectures that use ASIDs can cycle to a new ASID
109 * and delay the invalidation until ASID space runs out.
110 *
111 * - mmu_gather::need_flush_all
112 *
113 * A flag that can be set by the arch code if it wants to force
114 * flush the entire TLB irrespective of the range. For instance
115 * x86-PAE needs this when changing top-level entries.
116 *
117 * And allows the architecture to provide and implement tlb_flush():
118 *
119 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
120 * use of:
121 *
122 * - mmu_gather::start / mmu_gather::end
123 *
124 * which provides the range that needs to be flushed to cover the pages to
125 * be freed.
126 *
127 * - mmu_gather::freed_tables
128 *
129 * set when we freed page table pages
130 *
131 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
132 *
133 * returns the smallest TLB entry size unmapped in this range.
134 *
135 * If an architecture does not provide tlb_flush() a default implementation
136 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
137 * specified, in which case we'll default to flush_tlb_mm().
138 *
139 * Additionally there are a few opt-in features:
140 *
141 * MMU_GATHER_PAGE_SIZE
142 *
143 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
144 * changes the size and provides mmu_gather::page_size to tlb_flush().
145 *
146 * This might be useful if your architecture has size specific TLB
147 * invalidation instructions.
148 *
149 * MMU_GATHER_TABLE_FREE
150 *
151 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
152 * for page directores (__p*_free_tlb()).
153 *
154 * Useful if your architecture has non-page page directories.
155 *
156 * When used, an architecture is expected to provide __tlb_remove_table()
157 * which does the actual freeing of these pages.
158 *
159 * MMU_GATHER_RCU_TABLE_FREE
160 *
161 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
162 * comment below).
163 *
164 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
165 * and therefore doesn't naturally serialize with software page-table walkers.
166 *
167 * MMU_GATHER_NO_FLUSH_CACHE
168 *
169 * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
170 * before unmapping a VMA.
171 *
172 * NOTE: strictly speaking we shouldn't have this knob and instead rely on
173 * flush_cache_range() being a NOP, except Sparc64 seems to be
174 * different here.
175 *
176 * MMU_GATHER_MERGE_VMAS
177 *
178 * Indicates the architecture wants to merge ranges over VMAs; typical when
179 * multiple range invalidates are more expensive than a full invalidate.
180 *
181 * MMU_GATHER_NO_RANGE
182 *
183 * Use this if your architecture lacks an efficient flush_tlb_range(). This
184 * option implies MMU_GATHER_MERGE_VMAS above.
185 *
186 * MMU_GATHER_NO_GATHER
187 *
188 * If the option is set the mmu_gather will not track individual pages for
189 * delayed page free anymore. A platform that enables the option needs to
190 * provide its own implementation of the __tlb_remove_page_size() function to
191 * free pages.
192 *
193 * This is useful if your architecture already flushes TLB entries in the
194 * various ptep_get_and_clear() functions.
195 */
196
197#ifdef CONFIG_MMU_GATHER_TABLE_FREE
198
199struct mmu_table_batch {
200#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
201 struct rcu_head rcu;
202#endif
203 unsigned int nr;
204 void *tables[];
205};
206
207#define MAX_TABLE_BATCH \
208 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
209
210extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
211
212#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
213
214/*
215 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
216 * page directories and we can use the normal page batching to free them.
217 */
218#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
219
220#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
221
222#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
223/*
224 * This allows an architecture that does not use the linux page-tables for
225 * hardware to skip the TLBI when freeing page tables.
226 */
227#ifndef tlb_needs_table_invalidate
228#define tlb_needs_table_invalidate() (true)
229#endif
230
231void tlb_remove_table_sync_one(void);
232
233#else
234
235#ifdef tlb_needs_table_invalidate
236#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
237#endif
238
239static inline void tlb_remove_table_sync_one(void) { }
240
241#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
242
243
244#ifndef CONFIG_MMU_GATHER_NO_GATHER
245/*
246 * If we can't allocate a page to make a big batch of page pointers
247 * to work on, then just handle a few from the on-stack structure.
248 */
249#define MMU_GATHER_BUNDLE 8
250
251struct mmu_gather_batch {
252 struct mmu_gather_batch *next;
253 unsigned int nr;
254 unsigned int max;
255 struct encoded_page *encoded_pages[];
256};
257
258#define MAX_GATHER_BATCH \
259 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
260
261/*
262 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
263 * lockups for non-preemptible kernels on huge machines when a lot of memory
264 * is zapped during unmapping.
265 * 10K pages freed at once should be safe even without a preemption point.
266 */
267#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
268
269extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
270 bool delay_rmap, int page_size);
271bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
272 unsigned int nr_pages, bool delay_rmap);
273
274#ifdef CONFIG_SMP
275/*
276 * This both sets 'delayed_rmap', and returns true. It would be an inline
277 * function, except we define it before the 'struct mmu_gather'.
278 */
279#define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
280extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
281#endif
282
283#endif
284
285/*
286 * We have a no-op version of the rmap removal that doesn't
287 * delay anything. That is used on S390, which flushes remote
288 * TLBs synchronously, and on UP, which doesn't have any
289 * remote TLBs to flush and is not preemptible due to this
290 * all happening under the page table lock.
291 */
292#ifndef tlb_delay_rmap
293#define tlb_delay_rmap(tlb) (false)
294static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
295#endif
296
297/*
298 * struct mmu_gather is an opaque type used by the mm code for passing around
299 * any data needed by arch specific code for tlb_remove_page.
300 */
301struct mmu_gather {
302 struct mm_struct *mm;
303
304#ifdef CONFIG_MMU_GATHER_TABLE_FREE
305 struct mmu_table_batch *batch;
306#endif
307
308 unsigned long start;
309 unsigned long end;
310 /*
311 * we are in the middle of an operation to clear
312 * a full mm and can make some optimizations
313 */
314 unsigned int fullmm : 1;
315
316 /*
317 * we have performed an operation which
318 * requires a complete flush of the tlb
319 */
320 unsigned int need_flush_all : 1;
321
322 /*
323 * we have removed page directories
324 */
325 unsigned int freed_tables : 1;
326
327 /*
328 * Do we have pending delayed rmap removals?
329 */
330 unsigned int delayed_rmap : 1;
331
332 /*
333 * at which levels have we cleared entries?
334 */
335 unsigned int cleared_ptes : 1;
336 unsigned int cleared_pmds : 1;
337 unsigned int cleared_puds : 1;
338 unsigned int cleared_p4ds : 1;
339
340 /*
341 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
342 */
343 unsigned int vma_exec : 1;
344 unsigned int vma_huge : 1;
345 unsigned int vma_pfn : 1;
346
347 unsigned int batch_count;
348
349#ifndef CONFIG_MMU_GATHER_NO_GATHER
350 struct mmu_gather_batch *active;
351 struct mmu_gather_batch local;
352 struct page *__pages[MMU_GATHER_BUNDLE];
353
354#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
355 unsigned int page_size;
356#endif
357#endif
358};
359
360void tlb_flush_mmu(struct mmu_gather *tlb);
361
362static inline void __tlb_adjust_range(struct mmu_gather *tlb,
363 unsigned long address,
364 unsigned int range_size)
365{
366 tlb->start = min(tlb->start, address);
367 tlb->end = max(tlb->end, address + range_size);
368}
369
370static inline void __tlb_reset_range(struct mmu_gather *tlb)
371{
372 if (tlb->fullmm) {
373 tlb->start = tlb->end = ~0;
374 } else {
375 tlb->start = TASK_SIZE;
376 tlb->end = 0;
377 }
378 tlb->freed_tables = 0;
379 tlb->cleared_ptes = 0;
380 tlb->cleared_pmds = 0;
381 tlb->cleared_puds = 0;
382 tlb->cleared_p4ds = 0;
383 /*
384 * Do not reset mmu_gather::vma_* fields here, we do not
385 * call into tlb_start_vma() again to set them if there is an
386 * intermediate flush.
387 */
388}
389
390#ifdef CONFIG_MMU_GATHER_NO_RANGE
391
392#if defined(tlb_flush)
393#error MMU_GATHER_NO_RANGE relies on default tlb_flush()
394#endif
395
396/*
397 * When an architecture does not have efficient means of range flushing TLBs
398 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
399 * range small. We equally don't have to worry about page granularity or other
400 * things.
401 *
402 * All we need to do is issue a full flush for any !0 range.
403 */
404static inline void tlb_flush(struct mmu_gather *tlb)
405{
406 if (tlb->end)
407 flush_tlb_mm(tlb->mm);
408}
409
410#else /* CONFIG_MMU_GATHER_NO_RANGE */
411
412#ifndef tlb_flush
413/*
414 * When an architecture does not provide its own tlb_flush() implementation
415 * but does have a reasonably efficient flush_vma_range() implementation
416 * use that.
417 */
418static inline void tlb_flush(struct mmu_gather *tlb)
419{
420 if (tlb->fullmm || tlb->need_flush_all) {
421 flush_tlb_mm(tlb->mm);
422 } else if (tlb->end) {
423 struct vm_area_struct vma = {
424 .vm_mm = tlb->mm,
425 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
426 (tlb->vma_huge ? VM_HUGETLB : 0),
427 };
428
429 flush_tlb_range(&vma, tlb->start, tlb->end);
430 }
431}
432#endif
433
434#endif /* CONFIG_MMU_GATHER_NO_RANGE */
435
436static inline void
437tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
438{
439 /*
440 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
441 * mips-4k) flush only large pages.
442 *
443 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
444 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
445 * range.
446 *
447 * We rely on tlb_end_vma() to issue a flush, such that when we reset
448 * these values the batch is empty.
449 */
450 tlb->vma_huge = is_vm_hugetlb_page(vma);
451 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
452 tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
453}
454
455static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
456{
457 /*
458 * Anything calling __tlb_adjust_range() also sets at least one of
459 * these bits.
460 */
461 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
462 tlb->cleared_puds || tlb->cleared_p4ds))
463 return;
464
465 tlb_flush(tlb);
466 __tlb_reset_range(tlb);
467}
468
469static inline void tlb_remove_page_size(struct mmu_gather *tlb,
470 struct page *page, int page_size)
471{
472 if (__tlb_remove_page_size(tlb, page, false, page_size))
473 tlb_flush_mmu(tlb);
474}
475
476static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb,
477 struct page *page, bool delay_rmap)
478{
479 return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE);
480}
481
482/* tlb_remove_page
483 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
484 * required.
485 */
486static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
487{
488 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
489}
490
491static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
492{
493 tlb_remove_table(tlb, pt);
494}
495
496/* Like tlb_remove_ptdesc, but for page-like page directories. */
497static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
498{
499 tlb_remove_page(tlb, ptdesc_page(pt));
500}
501
502static inline void tlb_change_page_size(struct mmu_gather *tlb,
503 unsigned int page_size)
504{
505#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
506 if (tlb->page_size && tlb->page_size != page_size) {
507 if (!tlb->fullmm && !tlb->need_flush_all)
508 tlb_flush_mmu(tlb);
509 }
510
511 tlb->page_size = page_size;
512#endif
513}
514
515static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
516{
517 if (tlb->cleared_ptes)
518 return PAGE_SHIFT;
519 if (tlb->cleared_pmds)
520 return PMD_SHIFT;
521 if (tlb->cleared_puds)
522 return PUD_SHIFT;
523 if (tlb->cleared_p4ds)
524 return P4D_SHIFT;
525
526 return PAGE_SHIFT;
527}
528
529static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
530{
531 return 1UL << tlb_get_unmap_shift(tlb);
532}
533
534/*
535 * In the case of tlb vma handling, we can optimise these away in the
536 * case where we're doing a full MM flush. When we're doing a munmap,
537 * the vmas are adjusted to only cover the region to be torn down.
538 */
539static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
540{
541 if (tlb->fullmm)
542 return;
543
544 tlb_update_vma_flags(tlb, vma);
545#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
546 flush_cache_range(vma, vma->vm_start, vma->vm_end);
547#endif
548}
549
550static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
551{
552 if (tlb->fullmm)
553 return;
554
555 /*
556 * VM_PFNMAP is more fragile because the core mm will not track the
557 * page mapcount -- there might not be page-frames for these PFNs after
558 * all. Force flush TLBs for such ranges to avoid munmap() vs
559 * unmap_mapping_range() races.
560 */
561 if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
562 /*
563 * Do a TLB flush and reset the range at VMA boundaries; this avoids
564 * the ranges growing with the unused space between consecutive VMAs.
565 */
566 tlb_flush_mmu_tlbonly(tlb);
567 }
568}
569
570/*
571 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
572 * and set corresponding cleared_*.
573 */
574static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
575 unsigned long address, unsigned long size)
576{
577 __tlb_adjust_range(tlb, address, size);
578 tlb->cleared_ptes = 1;
579}
580
581static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
582 unsigned long address, unsigned long size)
583{
584 __tlb_adjust_range(tlb, address, size);
585 tlb->cleared_pmds = 1;
586}
587
588static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
589 unsigned long address, unsigned long size)
590{
591 __tlb_adjust_range(tlb, address, size);
592 tlb->cleared_puds = 1;
593}
594
595static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
596 unsigned long address, unsigned long size)
597{
598 __tlb_adjust_range(tlb, address, size);
599 tlb->cleared_p4ds = 1;
600}
601
602#ifndef __tlb_remove_tlb_entry
603static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
604{
605}
606#endif
607
608/**
609 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
610 *
611 * Record the fact that pte's were really unmapped by updating the range,
612 * so we can later optimise away the tlb invalidate. This helps when
613 * userspace is unmapping already-unmapped pages, which happens quite a lot.
614 */
615#define tlb_remove_tlb_entry(tlb, ptep, address) \
616 do { \
617 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
618 __tlb_remove_tlb_entry(tlb, ptep, address); \
619 } while (0)
620
621/**
622 * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
623 * later tlb invalidation.
624 *
625 * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
626 * consecutive ptes instead of only a single one.
627 */
628static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
629 pte_t *ptep, unsigned int nr, unsigned long address)
630{
631 tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
632 for (;;) {
633 __tlb_remove_tlb_entry(tlb, ptep, address);
634 if (--nr == 0)
635 break;
636 ptep++;
637 address += PAGE_SIZE;
638 }
639}
640
641#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
642 do { \
643 unsigned long _sz = huge_page_size(h); \
644 if (_sz >= P4D_SIZE) \
645 tlb_flush_p4d_range(tlb, address, _sz); \
646 else if (_sz >= PUD_SIZE) \
647 tlb_flush_pud_range(tlb, address, _sz); \
648 else if (_sz >= PMD_SIZE) \
649 tlb_flush_pmd_range(tlb, address, _sz); \
650 else \
651 tlb_flush_pte_range(tlb, address, _sz); \
652 __tlb_remove_tlb_entry(tlb, ptep, address); \
653 } while (0)
654
655/**
656 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
657 * This is a nop so far, because only x86 needs it.
658 */
659#ifndef __tlb_remove_pmd_tlb_entry
660#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
661#endif
662
663#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
664 do { \
665 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
666 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
667 } while (0)
668
669/**
670 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
671 * invalidation. This is a nop so far, because only x86 needs it.
672 */
673#ifndef __tlb_remove_pud_tlb_entry
674#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
675#endif
676
677#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
678 do { \
679 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
680 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
681 } while (0)
682
683/*
684 * For things like page tables caches (ie caching addresses "inside" the
685 * page tables, like x86 does), for legacy reasons, flushing an
686 * individual page had better flush the page table caches behind it. This
687 * is definitely how x86 works, for example. And if you have an
688 * architected non-legacy page table cache (which I'm not aware of
689 * anybody actually doing), you're going to have some architecturally
690 * explicit flushing for that, likely *separate* from a regular TLB entry
691 * flush, and thus you'd need more than just some range expansion..
692 *
693 * So if we ever find an architecture
694 * that would want something that odd, I think it is up to that
695 * architecture to do its own odd thing, not cause pain for others
696 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
697 *
698 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
699 */
700
701#ifndef pte_free_tlb
702#define pte_free_tlb(tlb, ptep, address) \
703 do { \
704 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
705 tlb->freed_tables = 1; \
706 __pte_free_tlb(tlb, ptep, address); \
707 } while (0)
708#endif
709
710#ifndef pmd_free_tlb
711#define pmd_free_tlb(tlb, pmdp, address) \
712 do { \
713 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
714 tlb->freed_tables = 1; \
715 __pmd_free_tlb(tlb, pmdp, address); \
716 } while (0)
717#endif
718
719#ifndef pud_free_tlb
720#define pud_free_tlb(tlb, pudp, address) \
721 do { \
722 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
723 tlb->freed_tables = 1; \
724 __pud_free_tlb(tlb, pudp, address); \
725 } while (0)
726#endif
727
728#ifndef p4d_free_tlb
729#define p4d_free_tlb(tlb, pudp, address) \
730 do { \
731 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
732 tlb->freed_tables = 1; \
733 __p4d_free_tlb(tlb, pudp, address); \
734 } while (0)
735#endif
736
737#ifndef pte_needs_flush
738static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
739{
740 return true;
741}
742#endif
743
744#ifndef huge_pmd_needs_flush
745static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
746{
747 return true;
748}
749#endif
750
751#endif /* CONFIG_MMU */
752
753#endif /* _ASM_GENERIC__TLB_H */