Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v4.10.11
 
  1/* include/asm-generic/tlb.h
  2 *
  3 *	Generic TLB shootdown code
  4 *
  5 * Copyright 2001 Red Hat, Inc.
  6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  7 *
  8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the License, or (at your option) any later version.
 14 */
 15#ifndef _ASM_GENERIC__TLB_H
 16#define _ASM_GENERIC__TLB_H
 17
 
 18#include <linux/swap.h>
 19#include <asm/pgalloc.h>
 20#include <asm/tlbflush.h>
 
 21
 22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 23/*
 24 * Semi RCU freeing of the page directories.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25 *
 26 * This is needed by some architectures to implement software pagetable walkers.
 
 27 *
 28 * gup_fast() and other software pagetable walkers do a lockless page-table
 29 * walk and therefore needs some synchronization with the freeing of the page
 30 * directories. The chosen means to accomplish that is by disabling IRQs over
 31 * the walk.
 32 *
 33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
 34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
 35 * IRQs delays the completion of the TLB flush we can never observe an already
 36 * freed page.
 37 *
 38 * Architectures that do not have this (PPC) need to delay the freeing by some
 39 * other means, this is that means.
 40 *
 41 * What we do is batch the freed directory pages (tables) and RCU free them.
 42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
 43 * holds off grace periods.
 44 *
 45 * However, in order to batch these pages we need to allocate storage, this
 46 * allocation is deep inside the MM code and can thus easily fail on memory
 47 * pressure. To guarantee progress we fall back to single table freeing, see
 48 * the implementation of tlb_remove_table_one().
 49 *
 
 
 
 
 
 
 
 
 
 50 */
 
 
 
 51struct mmu_table_batch {
 
 52	struct rcu_head		rcu;
 
 53	unsigned int		nr;
 54	void			*tables[0];
 55};
 56
 57#define MAX_TABLE_BATCH		\
 58	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
 59
 60extern void tlb_table_flush(struct mmu_gather *tlb);
 61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63#endif
 64
 
 
 
 
 65/*
 66 * If we can't allocate a page to make a big batch of page pointers
 67 * to work on, then just handle a few from the on-stack structure.
 68 */
 69#define MMU_GATHER_BUNDLE	8
 70
 71struct mmu_gather_batch {
 72	struct mmu_gather_batch	*next;
 73	unsigned int		nr;
 74	unsigned int		max;
 75	struct page		*pages[0];
 76};
 77
 78#define MAX_GATHER_BATCH	\
 79	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
 80
 81/*
 82 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
 83 * lockups for non-preemptible kernels on huge machines when a lot of memory
 84 * is zapped during unmapping.
 85 * 10K pages freed at once should be safe even without a preemption point.
 86 */
 87#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
 88
 89/* struct mmu_gather is an opaque type used by the mm code for passing around
 
 
 
 
 
 90 * any data needed by arch specific code for tlb_remove_page.
 91 */
 92struct mmu_gather {
 93	struct mm_struct	*mm;
 94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 
 95	struct mmu_table_batch	*batch;
 96#endif
 
 97	unsigned long		start;
 98	unsigned long		end;
 99	/* we are in the middle of an operation to clear
100	 * a full mm and can make some optimizations */
101	unsigned int		fullmm : 1,
102	/* we have performed an operation which
103	 * requires a complete flush of the tlb */
104				need_flush_all : 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
106	struct mmu_gather_batch *active;
107	struct mmu_gather_batch	local;
108	struct page		*__pages[MMU_GATHER_BUNDLE];
109	unsigned int		batch_count;
110	int page_size;
111};
112
113#define HAVE_GENERIC_MMU_GATHER
 
 
 
 
114
115void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
116void tlb_flush_mmu(struct mmu_gather *tlb);
117void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
118							unsigned long end);
119extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
120				   int page_size);
121
122static inline void __tlb_adjust_range(struct mmu_gather *tlb,
123				      unsigned long address,
124				      unsigned int range_size)
125{
126	tlb->start = min(tlb->start, address);
127	tlb->end = max(tlb->end, address + range_size);
128}
129
130static inline void __tlb_reset_range(struct mmu_gather *tlb)
131{
132	if (tlb->fullmm) {
133		tlb->start = tlb->end = ~0;
134	} else {
135		tlb->start = TASK_SIZE;
136		tlb->end = 0;
137	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138}
139
140static inline void tlb_remove_page_size(struct mmu_gather *tlb,
141					struct page *page, int page_size)
142{
143	if (__tlb_remove_page_size(tlb, page, page_size))
144		tlb_flush_mmu(tlb);
145}
146
147static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
148{
149	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
150}
151
152/* tlb_remove_page
153 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
154 *	required.
155 */
156static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
157{
158	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
159}
160
161#ifndef tlb_remove_check_page_size_change
162#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
163static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
164						     unsigned int page_size)
165{
166	/*
167	 * We don't care about page size change, just update
168	 * mmu_gather page size here so that debug checks
169	 * doesn't throw false warning.
170	 */
171#ifdef CONFIG_DEBUG_VM
172	tlb->page_size = page_size;
173#endif
174}
175#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
177/*
178 * In the case of tlb vma handling, we can optimise these away in the
179 * case where we're doing a full MM flush.  When we're doing a munmap,
180 * the vmas are adjusted to only cover the region to be torn down.
181 */
182#ifndef tlb_start_vma
183#define tlb_start_vma(tlb, vma) do { } while (0)
184#endif
 
 
185
186#define __tlb_end_vma(tlb, vma)					\
187	do {							\
188		if (!tlb->fullmm && tlb->end) {			\
189			tlb_flush(tlb);				\
190			__tlb_reset_range(tlb);			\
191		}						\
192	} while (0)
193
194#ifndef tlb_end_vma
195#define tlb_end_vma	__tlb_end_vma
 
 
 
 
 
 
 
 
 
 
 
 
196#endif
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198#ifndef __tlb_remove_tlb_entry
199#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
200#endif
201
202/**
203 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
204 *
205 * Record the fact that pte's were really unmapped by updating the range,
206 * so we can later optimise away the tlb invalidate.   This helps when
207 * userspace is unmapping already-unmapped pages, which happens quite a lot.
208 */
209#define tlb_remove_tlb_entry(tlb, ptep, address)		\
210	do {							\
211		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
212		__tlb_remove_tlb_entry(tlb, ptep, address);	\
213	} while (0)
214
215#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	     \
216	do {							     \
217		__tlb_adjust_range(tlb, address, huge_page_size(h)); \
218		__tlb_remove_tlb_entry(tlb, ptep, address);	     \
 
 
 
 
219	} while (0)
220
221/**
222 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
223 * This is a nop so far, because only x86 needs it.
224 */
225#ifndef __tlb_remove_pmd_tlb_entry
226#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
227#endif
228
229#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
230	do {								\
231		__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);	\
232		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
233	} while (0)
234
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235/*
236 * For things like page tables caches (ie caching addresses "inside" the
237 * page tables, like x86 does), for legacy reasons, flushing an
238 * individual page had better flush the page table caches behind it. This
239 * is definitely how x86 works, for example. And if you have an
240 * architected non-legacy page table cache (which I'm not aware of
241 * anybody actually doing), you're going to have some architecturally
242 * explicit flushing for that, likely *separate* from a regular TLB entry
243 * flush, and thus you'd need more than just some range expansion..
244 *
245 * So if we ever find an architecture
246 * that would want something that odd, I think it is up to that
247 * architecture to do its own odd thing, not cause pain for others
248 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
249 *
250 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
251 */
252
 
253#define pte_free_tlb(tlb, ptep, address)			\
254	do {							\
255		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
 
256		__pte_free_tlb(tlb, ptep, address);		\
257	} while (0)
 
 
 
 
 
 
 
 
 
 
258
259#ifndef __ARCH_HAS_4LEVEL_HACK
260#define pud_free_tlb(tlb, pudp, address)			\
261	do {							\
262		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
 
263		__pud_free_tlb(tlb, pudp, address);		\
264	} while (0)
265#endif
266
267#define pmd_free_tlb(tlb, pmdp, address)			\
 
268	do {							\
269		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
270		__pmd_free_tlb(tlb, pmdp, address);		\
 
271	} while (0)
 
272
273#define tlb_migrate_finish(mm) do {} while (0)
274
275#endif /* _ASM_GENERIC__TLB_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/* include/asm-generic/tlb.h
  3 *
  4 *	Generic TLB shootdown code
  5 *
  6 * Copyright 2001 Red Hat, Inc.
  7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  8 *
  9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
 
 
 
 
 
 10 */
 11#ifndef _ASM_GENERIC__TLB_H
 12#define _ASM_GENERIC__TLB_H
 13
 14#include <linux/mmu_notifier.h>
 15#include <linux/swap.h>
 16#include <linux/hugetlb_inline.h>
 17#include <asm/tlbflush.h>
 18#include <asm/cacheflush.h>
 19
 
 20/*
 21 * Blindly accessing user memory from NMI context can be dangerous
 22 * if we're in the middle of switching the current user task or switching
 23 * the loaded mm.
 24 */
 25#ifndef nmi_uaccess_okay
 26# define nmi_uaccess_okay() true
 27#endif
 28
 29#ifdef CONFIG_MMU
 30
 31/*
 32 * Generic MMU-gather implementation.
 33 *
 34 * The mmu_gather data structure is used by the mm code to implement the
 35 * correct and efficient ordering of freeing pages and TLB invalidations.
 36 *
 37 * This correct ordering is:
 38 *
 39 *  1) unhook page
 40 *  2) TLB invalidate page
 41 *  3) free page
 42 *
 43 * That is, we must never free a page before we have ensured there are no live
 44 * translations left to it. Otherwise it might be possible to observe (or
 45 * worse, change) the page content after it has been reused.
 46 *
 47 * The mmu_gather API consists of:
 48 *
 49 *  - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu()
 50 *
 51 *    start and finish a mmu_gather
 52 *
 53 *    Finish in particular will issue a (final) TLB invalidate and free
 54 *    all (remaining) queued pages.
 55 *
 56 *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
 57 *
 58 *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
 59 *    there's large holes between the VMAs.
 60 *
 61 *  - tlb_remove_table()
 62 *
 63 *    tlb_remove_table() is the basic primitive to free page-table directories
 64 *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
 65 *    tlb_remove_page() below, for when page directories are pages and have no
 66 *    additional constraints.
 67 *
 68 *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
 69 *
 70 *  - tlb_remove_page() / __tlb_remove_page()
 71 *  - tlb_remove_page_size() / __tlb_remove_page_size()
 72 *
 73 *    __tlb_remove_page_size() is the basic primitive that queues a page for
 74 *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
 75 *    boolean indicating if the queue is (now) full and a call to
 76 *    tlb_flush_mmu() is required.
 77 *
 78 *    tlb_remove_page() and tlb_remove_page_size() imply the call to
 79 *    tlb_flush_mmu() when required and has no return value.
 80 *
 81 *  - tlb_change_page_size()
 82 *
 83 *    call before __tlb_remove_page*() to set the current page-size; implies a
 84 *    possible tlb_flush_mmu() call.
 85 *
 86 *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
 87 *
 88 *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
 89 *                              related state, like the range)
 90 *
 91 *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
 92 *			whatever pages are still batched.
 93 *
 94 *  - mmu_gather::fullmm
 95 *
 96 *    A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
 97 *    the entire mm; this allows a number of optimizations.
 98 *
 99 *    - We can ignore tlb_{start,end}_vma(); because we don't
100 *      care about ranges. Everything will be shot down.
101 *
102 *    - (RISC) architectures that use ASIDs can cycle to a new ASID
103 *      and delay the invalidation until ASID space runs out.
104 *
105 *  - mmu_gather::need_flush_all
106 *
107 *    A flag that can be set by the arch code if it wants to force
108 *    flush the entire TLB irrespective of the range. For instance
109 *    x86-PAE needs this when changing top-level entries.
110 *
111 * And allows the architecture to provide and implement tlb_flush():
112 *
113 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
114 * use of:
115 *
116 *  - mmu_gather::start / mmu_gather::end
117 *
118 *    which provides the range that needs to be flushed to cover the pages to
119 *    be freed.
120 *
121 *  - mmu_gather::freed_tables
122 *
123 *    set when we freed page table pages
124 *
125 *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
126 *
127 *    returns the smallest TLB entry size unmapped in this range.
128 *
129 * If an architecture does not provide tlb_flush() a default implementation
130 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
131 * specified, in which case we'll default to flush_tlb_mm().
132 *
133 * Additionally there are a few opt-in features:
134 *
135 *  MMU_GATHER_PAGE_SIZE
136 *
137 *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
138 *  changes the size and provides mmu_gather::page_size to tlb_flush().
139 *
140 *  This might be useful if your architecture has size specific TLB
141 *  invalidation instructions.
142 *
143 *  MMU_GATHER_TABLE_FREE
144 *
145 *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
146 *  for page directores (__p*_free_tlb()).
147 *
148 *  Useful if your architecture has non-page page directories.
149 *
150 *  When used, an architecture is expected to provide __tlb_remove_table()
151 *  which does the actual freeing of these pages.
152 *
153 *  MMU_GATHER_RCU_TABLE_FREE
 
 
 
154 *
155 *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
156 *  comment below).
 
 
157 *
158 *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
159 *  and therefore doesn't naturally serialize with software page-table walkers.
160 *
161 *  MMU_GATHER_NO_RANGE
 
 
162 *
163 *  Use this if your architecture lacks an efficient flush_tlb_range().
 
 
 
164 *
165 *  MMU_GATHER_NO_GATHER
166 *
167 *  If the option is set the mmu_gather will not track individual pages for
168 *  delayed page free anymore. A platform that enables the option needs to
169 *  provide its own implementation of the __tlb_remove_page_size() function to
170 *  free pages.
171 *
172 *  This is useful if your architecture already flushes TLB entries in the
173 *  various ptep_get_and_clear() functions.
174 */
175
176#ifdef CONFIG_MMU_GATHER_TABLE_FREE
177
178struct mmu_table_batch {
179#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
180	struct rcu_head		rcu;
181#endif
182	unsigned int		nr;
183	void			*tables[0];
184};
185
186#define MAX_TABLE_BATCH		\
187	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
188
 
189extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
190
191#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
192
193/*
194 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
195 * page directories and we can use the normal page batching to free them.
196 */
197#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
198
199#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
200
201#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
202/*
203 * This allows an architecture that does not use the linux page-tables for
204 * hardware to skip the TLBI when freeing page tables.
205 */
206#ifndef tlb_needs_table_invalidate
207#define tlb_needs_table_invalidate() (true)
208#endif
209
210#else
211
212#ifdef tlb_needs_table_invalidate
213#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
214#endif
215
216#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
217
218
219#ifndef CONFIG_MMU_GATHER_NO_GATHER
220/*
221 * If we can't allocate a page to make a big batch of page pointers
222 * to work on, then just handle a few from the on-stack structure.
223 */
224#define MMU_GATHER_BUNDLE	8
225
226struct mmu_gather_batch {
227	struct mmu_gather_batch	*next;
228	unsigned int		nr;
229	unsigned int		max;
230	struct page		*pages[0];
231};
232
233#define MAX_GATHER_BATCH	\
234	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
235
236/*
237 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
238 * lockups for non-preemptible kernels on huge machines when a lot of memory
239 * is zapped during unmapping.
240 * 10K pages freed at once should be safe even without a preemption point.
241 */
242#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
243
244extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
245				   int page_size);
246#endif
247
248/*
249 * struct mmu_gather is an opaque type used by the mm code for passing around
250 * any data needed by arch specific code for tlb_remove_page.
251 */
252struct mmu_gather {
253	struct mm_struct	*mm;
254
255#ifdef CONFIG_MMU_GATHER_TABLE_FREE
256	struct mmu_table_batch	*batch;
257#endif
258
259	unsigned long		start;
260	unsigned long		end;
261	/*
262	 * we are in the middle of an operation to clear
263	 * a full mm and can make some optimizations
264	 */
265	unsigned int		fullmm : 1;
266
267	/*
268	 * we have performed an operation which
269	 * requires a complete flush of the tlb
270	 */
271	unsigned int		need_flush_all : 1;
272
273	/*
274	 * we have removed page directories
275	 */
276	unsigned int		freed_tables : 1;
277
278	/*
279	 * at which levels have we cleared entries?
280	 */
281	unsigned int		cleared_ptes : 1;
282	unsigned int		cleared_pmds : 1;
283	unsigned int		cleared_puds : 1;
284	unsigned int		cleared_p4ds : 1;
285
286	/*
287	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
288	 */
289	unsigned int		vma_exec : 1;
290	unsigned int		vma_huge : 1;
291
292	unsigned int		batch_count;
293
294#ifndef CONFIG_MMU_GATHER_NO_GATHER
295	struct mmu_gather_batch *active;
296	struct mmu_gather_batch	local;
297	struct page		*__pages[MMU_GATHER_BUNDLE];
 
 
 
298
299#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
300	unsigned int page_size;
301#endif
302#endif
303};
304
 
305void tlb_flush_mmu(struct mmu_gather *tlb);
 
 
 
 
306
307static inline void __tlb_adjust_range(struct mmu_gather *tlb,
308				      unsigned long address,
309				      unsigned int range_size)
310{
311	tlb->start = min(tlb->start, address);
312	tlb->end = max(tlb->end, address + range_size);
313}
314
315static inline void __tlb_reset_range(struct mmu_gather *tlb)
316{
317	if (tlb->fullmm) {
318		tlb->start = tlb->end = ~0;
319	} else {
320		tlb->start = TASK_SIZE;
321		tlb->end = 0;
322	}
323	tlb->freed_tables = 0;
324	tlb->cleared_ptes = 0;
325	tlb->cleared_pmds = 0;
326	tlb->cleared_puds = 0;
327	tlb->cleared_p4ds = 0;
328	/*
329	 * Do not reset mmu_gather::vma_* fields here, we do not
330	 * call into tlb_start_vma() again to set them if there is an
331	 * intermediate flush.
332	 */
333}
334
335#ifdef CONFIG_MMU_GATHER_NO_RANGE
336
337#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
338#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
339#endif
340
341/*
342 * When an architecture does not have efficient means of range flushing TLBs
343 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
344 * range small. We equally don't have to worry about page granularity or other
345 * things.
346 *
347 * All we need to do is issue a full flush for any !0 range.
348 */
349static inline void tlb_flush(struct mmu_gather *tlb)
350{
351	if (tlb->end)
352		flush_tlb_mm(tlb->mm);
353}
354
355static inline void
356tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
357
358#define tlb_end_vma tlb_end_vma
359static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
360
361#else /* CONFIG_MMU_GATHER_NO_RANGE */
362
363#ifndef tlb_flush
364
365#if defined(tlb_start_vma) || defined(tlb_end_vma)
366#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
367#endif
368
369/*
370 * When an architecture does not provide its own tlb_flush() implementation
371 * but does have a reasonably efficient flush_vma_range() implementation
372 * use that.
373 */
374static inline void tlb_flush(struct mmu_gather *tlb)
375{
376	if (tlb->fullmm || tlb->need_flush_all) {
377		flush_tlb_mm(tlb->mm);
378	} else if (tlb->end) {
379		struct vm_area_struct vma = {
380			.vm_mm = tlb->mm,
381			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
382				    (tlb->vma_huge ? VM_HUGETLB : 0),
383		};
384
385		flush_tlb_range(&vma, tlb->start, tlb->end);
386	}
387}
388
389static inline void
390tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
391{
392	/*
393	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
394	 * mips-4k) flush only large pages.
395	 *
396	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
397	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
398	 * range.
399	 *
400	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
401	 * these values the batch is empty.
402	 */
403	tlb->vma_huge = is_vm_hugetlb_page(vma);
404	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
405}
406
407#else
408
409static inline void
410tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
411
412#endif
413
414#endif /* CONFIG_MMU_GATHER_NO_RANGE */
415
416static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
417{
418	/*
419	 * Anything calling __tlb_adjust_range() also sets at least one of
420	 * these bits.
421	 */
422	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
423	      tlb->cleared_puds || tlb->cleared_p4ds))
424		return;
425
426	tlb_flush(tlb);
427	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
428	__tlb_reset_range(tlb);
429}
430
431static inline void tlb_remove_page_size(struct mmu_gather *tlb,
432					struct page *page, int page_size)
433{
434	if (__tlb_remove_page_size(tlb, page, page_size))
435		tlb_flush_mmu(tlb);
436}
437
438static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
439{
440	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
441}
442
443/* tlb_remove_page
444 *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
445 *	required.
446 */
447static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
448{
449	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
450}
451
452static inline void tlb_change_page_size(struct mmu_gather *tlb,
 
 
453						     unsigned int page_size)
454{
455#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
456	if (tlb->page_size && tlb->page_size != page_size) {
457		if (!tlb->fullmm && !tlb->need_flush_all)
458			tlb_flush_mmu(tlb);
459	}
460
461	tlb->page_size = page_size;
462#endif
463}
464
465static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
466{
467	if (tlb->cleared_ptes)
468		return PAGE_SHIFT;
469	if (tlb->cleared_pmds)
470		return PMD_SHIFT;
471	if (tlb->cleared_puds)
472		return PUD_SHIFT;
473	if (tlb->cleared_p4ds)
474		return P4D_SHIFT;
475
476	return PAGE_SHIFT;
477}
478
479static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
480{
481	return 1UL << tlb_get_unmap_shift(tlb);
482}
483
484/*
485 * In the case of tlb vma handling, we can optimise these away in the
486 * case where we're doing a full MM flush.  When we're doing a munmap,
487 * the vmas are adjusted to only cover the region to be torn down.
488 */
489#ifndef tlb_start_vma
490static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
491{
492	if (tlb->fullmm)
493		return;
494
495	tlb_update_vma_flags(tlb, vma);
496	flush_cache_range(vma, vma->vm_start, vma->vm_end);
497}
498#endif
 
 
 
499
500#ifndef tlb_end_vma
501static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
502{
503	if (tlb->fullmm)
504		return;
505
506	/*
507	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
508	 * the ranges growing with the unused space between consecutive VMAs,
509	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
510	 * this.
511	 */
512	tlb_flush_mmu_tlbonly(tlb);
513}
514#endif
515
516/*
517 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
518 * and set corresponding cleared_*.
519 */
520static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
521				     unsigned long address, unsigned long size)
522{
523	__tlb_adjust_range(tlb, address, size);
524	tlb->cleared_ptes = 1;
525}
526
527static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
528				     unsigned long address, unsigned long size)
529{
530	__tlb_adjust_range(tlb, address, size);
531	tlb->cleared_pmds = 1;
532}
533
534static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
535				     unsigned long address, unsigned long size)
536{
537	__tlb_adjust_range(tlb, address, size);
538	tlb->cleared_puds = 1;
539}
540
541static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
542				     unsigned long address, unsigned long size)
543{
544	__tlb_adjust_range(tlb, address, size);
545	tlb->cleared_p4ds = 1;
546}
547
548#ifndef __tlb_remove_tlb_entry
549#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
550#endif
551
552/**
553 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
554 *
555 * Record the fact that pte's were really unmapped by updating the range,
556 * so we can later optimise away the tlb invalidate.   This helps when
557 * userspace is unmapping already-unmapped pages, which happens quite a lot.
558 */
559#define tlb_remove_tlb_entry(tlb, ptep, address)		\
560	do {							\
561		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
562		__tlb_remove_tlb_entry(tlb, ptep, address);	\
563	} while (0)
564
565#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
566	do {							\
567		unsigned long _sz = huge_page_size(h);		\
568		if (_sz == PMD_SIZE)				\
569			tlb_flush_pmd_range(tlb, address, _sz);	\
570		else if (_sz == PUD_SIZE)			\
571			tlb_flush_pud_range(tlb, address, _sz);	\
572		__tlb_remove_tlb_entry(tlb, ptep, address);	\
573	} while (0)
574
575/**
576 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
577 * This is a nop so far, because only x86 needs it.
578 */
579#ifndef __tlb_remove_pmd_tlb_entry
580#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
581#endif
582
583#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
584	do {								\
585		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
586		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
587	} while (0)
588
589/**
590 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
591 * invalidation. This is a nop so far, because only x86 needs it.
592 */
593#ifndef __tlb_remove_pud_tlb_entry
594#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
595#endif
596
597#define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
598	do {								\
599		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
600		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
601	} while (0)
602
603/*
604 * For things like page tables caches (ie caching addresses "inside" the
605 * page tables, like x86 does), for legacy reasons, flushing an
606 * individual page had better flush the page table caches behind it. This
607 * is definitely how x86 works, for example. And if you have an
608 * architected non-legacy page table cache (which I'm not aware of
609 * anybody actually doing), you're going to have some architecturally
610 * explicit flushing for that, likely *separate* from a regular TLB entry
611 * flush, and thus you'd need more than just some range expansion..
612 *
613 * So if we ever find an architecture
614 * that would want something that odd, I think it is up to that
615 * architecture to do its own odd thing, not cause pain for others
616 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
617 *
618 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
619 */
620
621#ifndef pte_free_tlb
622#define pte_free_tlb(tlb, ptep, address)			\
623	do {							\
624		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
625		tlb->freed_tables = 1;				\
626		__pte_free_tlb(tlb, ptep, address);		\
627	} while (0)
628#endif
629
630#ifndef pmd_free_tlb
631#define pmd_free_tlb(tlb, pmdp, address)			\
632	do {							\
633		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
634		tlb->freed_tables = 1;				\
635		__pmd_free_tlb(tlb, pmdp, address);		\
636	} while (0)
637#endif
638
639#ifndef pud_free_tlb
640#define pud_free_tlb(tlb, pudp, address)			\
641	do {							\
642		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
643		tlb->freed_tables = 1;				\
644		__pud_free_tlb(tlb, pudp, address);		\
645	} while (0)
646#endif
647
648#ifndef p4d_free_tlb
649#define p4d_free_tlb(tlb, pudp, address)			\
650	do {							\
651		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
652		tlb->freed_tables = 1;				\
653		__p4d_free_tlb(tlb, pudp, address);		\
654	} while (0)
655#endif
656
657#endif /* CONFIG_MMU */
658
659#endif /* _ASM_GENERIC__TLB_H */