Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v5.9
  1#include <linux/gfp.h>
  2#include <linux/highmem.h>
  3#include <linux/kernel.h>
  4#include <linux/mmdebug.h>
  5#include <linux/mm_types.h>
  6#include <linux/pagemap.h>
  7#include <linux/rcupdate.h>
  8#include <linux/smp.h>
  9#include <linux/swap.h>
 10
 11#include <asm/pgalloc.h>
 12#include <asm/tlb.h>
 13
 14#ifndef CONFIG_MMU_GATHER_NO_GATHER
 15
 16static bool tlb_next_batch(struct mmu_gather *tlb)
 17{
 18	struct mmu_gather_batch *batch;
 19
 20	batch = tlb->active;
 21	if (batch->next) {
 22		tlb->active = batch->next;
 23		return true;
 24	}
 25
 26	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
 27		return false;
 28
 29	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 30	if (!batch)
 31		return false;
 32
 33	tlb->batch_count++;
 34	batch->next = NULL;
 35	batch->nr   = 0;
 36	batch->max  = MAX_GATHER_BATCH;
 37
 38	tlb->active->next = batch;
 39	tlb->active = batch;
 40
 41	return true;
 42}
 43
 44static void tlb_batch_pages_flush(struct mmu_gather *tlb)
 45{
 46	struct mmu_gather_batch *batch;
 47
 48	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
 49		free_pages_and_swap_cache(batch->pages, batch->nr);
 50		batch->nr = 0;
 51	}
 52	tlb->active = &tlb->local;
 53}
 54
 55static void tlb_batch_list_free(struct mmu_gather *tlb)
 56{
 57	struct mmu_gather_batch *batch, *next;
 58
 59	for (batch = tlb->local.next; batch; batch = next) {
 60		next = batch->next;
 61		free_pages((unsigned long)batch, 0);
 62	}
 63	tlb->local.next = NULL;
 64}
 65
 66bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
 67{
 68	struct mmu_gather_batch *batch;
 69
 70	VM_BUG_ON(!tlb->end);
 71
 72#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 73	VM_WARN_ON(tlb->page_size != page_size);
 74#endif
 75
 76	batch = tlb->active;
 77	/*
 78	 * Add the page and check if we are full. If so
 79	 * force a flush.
 80	 */
 81	batch->pages[batch->nr++] = page;
 82	if (batch->nr == batch->max) {
 83		if (!tlb_next_batch(tlb))
 84			return true;
 85		batch = tlb->active;
 86	}
 87	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 88
 89	return false;
 90}
 91
 92#endif /* MMU_GATHER_NO_GATHER */
 93
 94#ifdef CONFIG_MMU_GATHER_TABLE_FREE
 95
 96static void __tlb_remove_table_free(struct mmu_table_batch *batch)
 97{
 98	int i;
 99
100	for (i = 0; i < batch->nr; i++)
101		__tlb_remove_table(batch->tables[i]);
102
103	free_page((unsigned long)batch);
104}
105
106#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
107
108/*
109 * Semi RCU freeing of the page directories.
110 *
111 * This is needed by some architectures to implement software pagetable walkers.
112 *
113 * gup_fast() and other software pagetable walkers do a lockless page-table
114 * walk and therefore needs some synchronization with the freeing of the page
115 * directories. The chosen means to accomplish that is by disabling IRQs over
116 * the walk.
117 *
118 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
119 * since we unlink the page, flush TLBs, free the page. Since the disabling of
120 * IRQs delays the completion of the TLB flush we can never observe an already
121 * freed page.
122 *
123 * Architectures that do not have this (PPC) need to delay the freeing by some
124 * other means, this is that means.
125 *
126 * What we do is batch the freed directory pages (tables) and RCU free them.
127 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
128 * holds off grace periods.
129 *
130 * However, in order to batch these pages we need to allocate storage, this
131 * allocation is deep inside the MM code and can thus easily fail on memory
132 * pressure. To guarantee progress we fall back to single table freeing, see
133 * the implementation of tlb_remove_table_one().
134 *
135 */
 
 
 
 
 
 
 
 
 
 
 
136
137static void tlb_remove_table_smp_sync(void *arg)
138{
139	/* Simply deliver the interrupt */
140}
141
142static void tlb_remove_table_sync_one(void)
143{
144	/*
145	 * This isn't an RCU grace period and hence the page-tables cannot be
146	 * assumed to be actually RCU-freed.
147	 *
148	 * It is however sufficient for software page-table walkers that rely on
149	 * IRQ disabling.
150	 */
151	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
 
152}
153
154static void tlb_remove_table_rcu(struct rcu_head *head)
155{
156	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
157}
158
159static void tlb_remove_table_free(struct mmu_table_batch *batch)
160{
161	call_rcu(&batch->rcu, tlb_remove_table_rcu);
162}
163
164#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
165
166static void tlb_remove_table_sync_one(void) { }
167
168static void tlb_remove_table_free(struct mmu_table_batch *batch)
169{
170	__tlb_remove_table_free(batch);
171}
172
173#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
174
175/*
176 * If we want tlb_remove_table() to imply TLB invalidates.
177 */
178static inline void tlb_table_invalidate(struct mmu_gather *tlb)
179{
180	if (tlb_needs_table_invalidate()) {
181		/*
182		 * Invalidate page-table caches used by hardware walkers. Then
183		 * we still need to RCU-sched wait while freeing the pages
184		 * because software walkers can still be in-flight.
185		 */
186		tlb_flush_mmu_tlbonly(tlb);
187	}
188}
189
190static void tlb_remove_table_one(void *table)
191{
192	tlb_remove_table_sync_one();
193	__tlb_remove_table(table);
194}
195
196static void tlb_table_flush(struct mmu_gather *tlb)
197{
198	struct mmu_table_batch **batch = &tlb->batch;
199
200	if (*batch) {
201		tlb_table_invalidate(tlb);
202		tlb_remove_table_free(*batch);
203		*batch = NULL;
204	}
205}
206
207void tlb_remove_table(struct mmu_gather *tlb, void *table)
208{
209	struct mmu_table_batch **batch = &tlb->batch;
210
211	if (*batch == NULL) {
212		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
213		if (*batch == NULL) {
214			tlb_table_invalidate(tlb);
215			tlb_remove_table_one(table);
216			return;
217		}
218		(*batch)->nr = 0;
219	}
220
221	(*batch)->tables[(*batch)->nr++] = table;
222	if ((*batch)->nr == MAX_TABLE_BATCH)
223		tlb_table_flush(tlb);
224}
225
226static inline void tlb_table_init(struct mmu_gather *tlb)
227{
228	tlb->batch = NULL;
229}
230
231#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
232
233static inline void tlb_table_flush(struct mmu_gather *tlb) { }
234static inline void tlb_table_init(struct mmu_gather *tlb) { }
235
236#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
237
238static void tlb_flush_mmu_free(struct mmu_gather *tlb)
239{
 
240	tlb_table_flush(tlb);
241#ifndef CONFIG_MMU_GATHER_NO_GATHER
 
242	tlb_batch_pages_flush(tlb);
243#endif
244}
245
246void tlb_flush_mmu(struct mmu_gather *tlb)
247{
248	tlb_flush_mmu_tlbonly(tlb);
249	tlb_flush_mmu_free(tlb);
250}
251
252/**
253 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
254 * @tlb: the mmu_gather structure to initialize
255 * @mm: the mm_struct of the target address space
256 * @start: start of the region that will be removed from the page-table
257 * @end: end of the region that will be removed from the page-table
258 *
259 * Called to initialize an (on-stack) mmu_gather structure for page-table
260 * tear-down from @mm. The @start and @end are set to 0 and -1
261 * respectively when @mm is without users and we're going to destroy
262 * the full address space (exit/execve).
263 */
264void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
265			unsigned long start, unsigned long end)
266{
267	tlb->mm = mm;
268
269	/* Is it from 0 to ~0? */
270	tlb->fullmm     = !(start | (end+1));
271
272#ifndef CONFIG_MMU_GATHER_NO_GATHER
273	tlb->need_flush_all = 0;
274	tlb->local.next = NULL;
275	tlb->local.nr   = 0;
276	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
277	tlb->active     = &tlb->local;
278	tlb->batch_count = 0;
279#endif
280
281	tlb_table_init(tlb);
282#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
 
 
283	tlb->page_size = 0;
284#endif
285
286	__tlb_reset_range(tlb);
287	inc_tlb_flush_pending(tlb->mm);
288}
289
290/**
291 * tlb_finish_mmu - finish an mmu_gather structure
292 * @tlb: the mmu_gather structure to finish
293 * @start: start of the region that will be removed from the page-table
294 * @end: end of the region that will be removed from the page-table
295 *
296 * Called at the end of the shootdown operation to free up any resources that
297 * were required.
298 */
299void tlb_finish_mmu(struct mmu_gather *tlb,
300		unsigned long start, unsigned long end)
301{
302	/*
303	 * If there are parallel threads are doing PTE changes on same range
304	 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
305	 * flush by batching, one thread may end up seeing inconsistent PTEs
306	 * and result in having stale TLB entries.  So flush TLB forcefully
307	 * if we detect parallel PTE batching threads.
308	 *
309	 * However, some syscalls, e.g. munmap(), may free page tables, this
310	 * needs force flush everything in the given range. Otherwise this
311	 * may result in having stale TLB entries for some architectures,
312	 * e.g. aarch64, that could specify flush what level TLB.
313	 */
314	if (mm_tlb_flush_nested(tlb->mm)) {
315		/*
316		 * The aarch64 yields better performance with fullmm by
317		 * avoiding multiple CPUs spamming TLBI messages at the
318		 * same time.
319		 *
320		 * On x86 non-fullmm doesn't yield significant difference
321		 * against fullmm.
322		 */
323		tlb->fullmm = 1;
324		__tlb_reset_range(tlb);
325		tlb->freed_tables = 1;
326	}
327
328	tlb_flush_mmu(tlb);
329
330#ifndef CONFIG_MMU_GATHER_NO_GATHER
331	tlb_batch_list_free(tlb);
332#endif
333	dec_tlb_flush_pending(tlb->mm);
334}
v5.4
  1#include <linux/gfp.h>
  2#include <linux/highmem.h>
  3#include <linux/kernel.h>
  4#include <linux/mmdebug.h>
  5#include <linux/mm_types.h>
  6#include <linux/pagemap.h>
  7#include <linux/rcupdate.h>
  8#include <linux/smp.h>
  9#include <linux/swap.h>
 10
 11#include <asm/pgalloc.h>
 12#include <asm/tlb.h>
 13
 14#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 15
 16static bool tlb_next_batch(struct mmu_gather *tlb)
 17{
 18	struct mmu_gather_batch *batch;
 19
 20	batch = tlb->active;
 21	if (batch->next) {
 22		tlb->active = batch->next;
 23		return true;
 24	}
 25
 26	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
 27		return false;
 28
 29	batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 30	if (!batch)
 31		return false;
 32
 33	tlb->batch_count++;
 34	batch->next = NULL;
 35	batch->nr   = 0;
 36	batch->max  = MAX_GATHER_BATCH;
 37
 38	tlb->active->next = batch;
 39	tlb->active = batch;
 40
 41	return true;
 42}
 43
 44static void tlb_batch_pages_flush(struct mmu_gather *tlb)
 45{
 46	struct mmu_gather_batch *batch;
 47
 48	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
 49		free_pages_and_swap_cache(batch->pages, batch->nr);
 50		batch->nr = 0;
 51	}
 52	tlb->active = &tlb->local;
 53}
 54
 55static void tlb_batch_list_free(struct mmu_gather *tlb)
 56{
 57	struct mmu_gather_batch *batch, *next;
 58
 59	for (batch = tlb->local.next; batch; batch = next) {
 60		next = batch->next;
 61		free_pages((unsigned long)batch, 0);
 62	}
 63	tlb->local.next = NULL;
 64}
 65
 66bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
 67{
 68	struct mmu_gather_batch *batch;
 69
 70	VM_BUG_ON(!tlb->end);
 71
 72#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
 73	VM_WARN_ON(tlb->page_size != page_size);
 74#endif
 75
 76	batch = tlb->active;
 77	/*
 78	 * Add the page and check if we are full. If so
 79	 * force a flush.
 80	 */
 81	batch->pages[batch->nr++] = page;
 82	if (batch->nr == batch->max) {
 83		if (!tlb_next_batch(tlb))
 84			return true;
 85		batch = tlb->active;
 86	}
 87	VM_BUG_ON_PAGE(batch->nr > batch->max, page);
 88
 89	return false;
 90}
 91
 92#endif /* HAVE_MMU_GATHER_NO_GATHER */
 93
 94#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 95
 96/*
 97 * See the comment near struct mmu_table_batch.
 98 */
 
 
 
 
 
 
 
 
 99
100/*
101 * If we want tlb_remove_table() to imply TLB invalidates.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102 */
103static inline void tlb_table_invalidate(struct mmu_gather *tlb)
104{
105#ifndef CONFIG_HAVE_RCU_TABLE_NO_INVALIDATE
106	/*
107	 * Invalidate page-table caches used by hardware walkers. Then we still
108	 * need to RCU-sched wait while freeing the pages because software
109	 * walkers can still be in-flight.
110	 */
111	tlb_flush_mmu_tlbonly(tlb);
112#endif
113}
114
115static void tlb_remove_table_smp_sync(void *arg)
116{
117	/* Simply deliver the interrupt */
118}
119
120static void tlb_remove_table_one(void *table)
121{
122	/*
123	 * This isn't an RCU grace period and hence the page-tables cannot be
124	 * assumed to be actually RCU-freed.
125	 *
126	 * It is however sufficient for software page-table walkers that rely on
127	 * IRQ disabling. See the comment near struct mmu_table_batch.
128	 */
129	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
130	__tlb_remove_table(table);
131}
132
133static void tlb_remove_table_rcu(struct rcu_head *head)
134{
135	struct mmu_table_batch *batch;
136	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
138	batch = container_of(head, struct mmu_table_batch, rcu);
139
140	for (i = 0; i < batch->nr; i++)
141		__tlb_remove_table(batch->tables[i]);
 
 
 
 
 
 
 
 
 
 
 
 
142
143	free_page((unsigned long)batch);
 
 
 
144}
145
146static void tlb_table_flush(struct mmu_gather *tlb)
147{
148	struct mmu_table_batch **batch = &tlb->batch;
149
150	if (*batch) {
151		tlb_table_invalidate(tlb);
152		call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
153		*batch = NULL;
154	}
155}
156
157void tlb_remove_table(struct mmu_gather *tlb, void *table)
158{
159	struct mmu_table_batch **batch = &tlb->batch;
160
161	if (*batch == NULL) {
162		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
163		if (*batch == NULL) {
164			tlb_table_invalidate(tlb);
165			tlb_remove_table_one(table);
166			return;
167		}
168		(*batch)->nr = 0;
169	}
170
171	(*batch)->tables[(*batch)->nr++] = table;
172	if ((*batch)->nr == MAX_TABLE_BATCH)
173		tlb_table_flush(tlb);
174}
175
176#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
 
 
 
 
 
 
 
 
 
177
178static void tlb_flush_mmu_free(struct mmu_gather *tlb)
179{
180#ifdef CONFIG_HAVE_RCU_TABLE_FREE
181	tlb_table_flush(tlb);
182#endif
183#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
184	tlb_batch_pages_flush(tlb);
185#endif
186}
187
188void tlb_flush_mmu(struct mmu_gather *tlb)
189{
190	tlb_flush_mmu_tlbonly(tlb);
191	tlb_flush_mmu_free(tlb);
192}
193
194/**
195 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
196 * @tlb: the mmu_gather structure to initialize
197 * @mm: the mm_struct of the target address space
198 * @start: start of the region that will be removed from the page-table
199 * @end: end of the region that will be removed from the page-table
200 *
201 * Called to initialize an (on-stack) mmu_gather structure for page-table
202 * tear-down from @mm. The @start and @end are set to 0 and -1
203 * respectively when @mm is without users and we're going to destroy
204 * the full address space (exit/execve).
205 */
206void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
207			unsigned long start, unsigned long end)
208{
209	tlb->mm = mm;
210
211	/* Is it from 0 to ~0? */
212	tlb->fullmm     = !(start | (end+1));
213
214#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
215	tlb->need_flush_all = 0;
216	tlb->local.next = NULL;
217	tlb->local.nr   = 0;
218	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
219	tlb->active     = &tlb->local;
220	tlb->batch_count = 0;
221#endif
222
223#ifdef CONFIG_HAVE_RCU_TABLE_FREE
224	tlb->batch = NULL;
225#endif
226#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
227	tlb->page_size = 0;
228#endif
229
230	__tlb_reset_range(tlb);
231	inc_tlb_flush_pending(tlb->mm);
232}
233
234/**
235 * tlb_finish_mmu - finish an mmu_gather structure
236 * @tlb: the mmu_gather structure to finish
237 * @start: start of the region that will be removed from the page-table
238 * @end: end of the region that will be removed from the page-table
239 *
240 * Called at the end of the shootdown operation to free up any resources that
241 * were required.
242 */
243void tlb_finish_mmu(struct mmu_gather *tlb,
244		unsigned long start, unsigned long end)
245{
246	/*
247	 * If there are parallel threads are doing PTE changes on same range
248	 * under non-exclusive lock (e.g., mmap_sem read-side) but defer TLB
249	 * flush by batching, one thread may end up seeing inconsistent PTEs
250	 * and result in having stale TLB entries.  So flush TLB forcefully
251	 * if we detect parallel PTE batching threads.
252	 *
253	 * However, some syscalls, e.g. munmap(), may free page tables, this
254	 * needs force flush everything in the given range. Otherwise this
255	 * may result in having stale TLB entries for some architectures,
256	 * e.g. aarch64, that could specify flush what level TLB.
257	 */
258	if (mm_tlb_flush_nested(tlb->mm)) {
259		/*
260		 * The aarch64 yields better performance with fullmm by
261		 * avoiding multiple CPUs spamming TLBI messages at the
262		 * same time.
263		 *
264		 * On x86 non-fullmm doesn't yield significant difference
265		 * against fullmm.
266		 */
267		tlb->fullmm = 1;
268		__tlb_reset_range(tlb);
269		tlb->freed_tables = 1;
270	}
271
272	tlb_flush_mmu(tlb);
273
274#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
275	tlb_batch_list_free(tlb);
276#endif
277	dec_tlb_flush_pending(tlb->mm);
278}