Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  arch/arm/include/asm/tlb.h
  3 *
  4 *  Copyright (C) 2002 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 *  Experimentation shows that on a StrongARM, it appears to be faster
 11 *  to use the "invalidate whole tlb" rather than "invalidate single
 12 *  tlb" for this.
 13 *
 14 *  This appears true for both the process fork+exit case, as well as
 15 *  the munmap-large-area case.
 16 */
 17#ifndef __ASMARM_TLB_H
 18#define __ASMARM_TLB_H
 19
 20#include <asm/cacheflush.h>
 21
 22#ifndef CONFIG_MMU
 23
 24#include <linux/pagemap.h>
 25
 26#define tlb_flush(tlb)	((void) tlb)
 27
 28#include <asm-generic/tlb.h>
 29
 30#else /* !CONFIG_MMU */
 31
 32#include <linux/swap.h>
 33#include <asm/pgalloc.h>
 34#include <asm/tlbflush.h>
 35
 36#define MMU_GATHER_BUNDLE	8
 37
 38/*
 39 * TLB handling.  This allows us to remove pages from the page
 40 * tables, and efficiently handle the TLB issues.
 41 */
 42struct mmu_gather {
 43	struct mm_struct	*mm;
 44	unsigned int		fullmm;
 45	struct vm_area_struct	*vma;
 46	unsigned long		start, end;
 47	unsigned long		range_start;
 48	unsigned long		range_end;
 49	unsigned int		nr;
 50	unsigned int		max;
 51	struct page		**pages;
 52	struct page		*local[MMU_GATHER_BUNDLE];
 53};
 54
 55DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
 56
 57/*
 58 * This is unnecessarily complex.  There's three ways the TLB shootdown
 59 * code is used:
 60 *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
 61 *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
 62 *     tlb->vma will be non-NULL.
 63 *  2. Unmapping all vmas.  See exit_mmap().
 64 *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
 65 *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
 66 *  3. Unmapping argument pages.  See shift_arg_pages().
 67 *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
 68 *     tlb->vma will be NULL.
 69 */
 70static inline void tlb_flush(struct mmu_gather *tlb)
 71{
 72	if (tlb->fullmm || !tlb->vma)
 73		flush_tlb_mm(tlb->mm);
 74	else if (tlb->range_end > 0) {
 75		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
 76		tlb->range_start = TASK_SIZE;
 77		tlb->range_end = 0;
 78	}
 79}
 80
 81static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
 82{
 83	if (!tlb->fullmm) {
 84		if (addr < tlb->range_start)
 85			tlb->range_start = addr;
 86		if (addr + PAGE_SIZE > tlb->range_end)
 87			tlb->range_end = addr + PAGE_SIZE;
 88	}
 89}
 90
 91static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 92{
 93	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 94
 95	if (addr) {
 96		tlb->pages = (void *)addr;
 97		tlb->max = PAGE_SIZE / sizeof(struct page *);
 98	}
 99}
100
101static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
102{
103	tlb_flush(tlb);
104}
105
106static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
107{
108	free_pages_and_swap_cache(tlb->pages, tlb->nr);
109	tlb->nr = 0;
110	if (tlb->pages == tlb->local)
111		__tlb_alloc_page(tlb);
112}
113
114static inline void tlb_flush_mmu(struct mmu_gather *tlb)
115{
116	tlb_flush_mmu_tlbonly(tlb);
117	tlb_flush_mmu_free(tlb);
118}
119
120static inline void
121tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
122{
123	tlb->mm = mm;
124	tlb->fullmm = !(start | (end+1));
125	tlb->start = start;
126	tlb->end = end;
127	tlb->vma = NULL;
128	tlb->max = ARRAY_SIZE(tlb->local);
129	tlb->pages = tlb->local;
130	tlb->nr = 0;
131	__tlb_alloc_page(tlb);
132}
133
134static inline void
135tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
136{
137	tlb_flush_mmu(tlb);
138
139	/* keep the page table cache within bounds */
140	check_pgt_cache();
141
142	if (tlb->pages != tlb->local)
143		free_pages((unsigned long)tlb->pages, 0);
144}
145
146/*
147 * Memorize the range for the TLB flush.
148 */
149static inline void
150tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
151{
152	tlb_add_flush(tlb, addr);
153}
154
155/*
156 * In the case of tlb vma handling, we can optimise these away in the
157 * case where we're doing a full MM flush.  When we're doing a munmap,
158 * the vmas are adjusted to only cover the region to be torn down.
159 */
160static inline void
161tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
162{
163	if (!tlb->fullmm) {
164		flush_cache_range(vma, vma->vm_start, vma->vm_end);
165		tlb->vma = vma;
166		tlb->range_start = TASK_SIZE;
167		tlb->range_end = 0;
168	}
169}
170
171static inline void
172tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
173{
174	if (!tlb->fullmm)
175		tlb_flush(tlb);
176}
177
178static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
179{
180	tlb->pages[tlb->nr++] = page;
181	VM_BUG_ON(tlb->nr > tlb->max);
182	return tlb->max - tlb->nr;
183}
184
185static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
186{
187	if (!__tlb_remove_page(tlb, page))
188		tlb_flush_mmu(tlb);
189}
190
191static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
192	unsigned long addr)
193{
194	pgtable_page_dtor(pte);
195
196#ifdef CONFIG_ARM_LPAE
197	tlb_add_flush(tlb, addr);
198#else
199	/*
200	 * With the classic ARM MMU, a pte page has two corresponding pmd
201	 * entries, each covering 1MB.
202	 */
203	addr &= PMD_MASK;
204	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
205	tlb_add_flush(tlb, addr + SZ_1M);
206#endif
207
208	tlb_remove_page(tlb, pte);
209}
210
211static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
212				  unsigned long addr)
213{
214#ifdef CONFIG_ARM_LPAE
215	tlb_add_flush(tlb, addr);
216	tlb_remove_page(tlb, virt_to_page(pmdp));
217#endif
218}
219
220static inline void
221tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
222{
223	tlb_add_flush(tlb, addr);
224}
225
226#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
227#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
228#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
229
230#define tlb_migrate_finish(mm)		do { } while (0)
231
232#endif /* CONFIG_MMU */
233#endif
v5.4
 1/* SPDX-License-Identifier: GPL-2.0-only */
 2/*
 3 *  arch/arm/include/asm/tlb.h
 4 *
 5 *  Copyright (C) 2002 Russell King
 6 *
 
 
 
 
 7 *  Experimentation shows that on a StrongARM, it appears to be faster
 8 *  to use the "invalidate whole tlb" rather than "invalidate single
 9 *  tlb" for this.
10 *
11 *  This appears true for both the process fork+exit case, as well as
12 *  the munmap-large-area case.
13 */
14#ifndef __ASMARM_TLB_H
15#define __ASMARM_TLB_H
16
17#include <asm/cacheflush.h>
18
19#ifndef CONFIG_MMU
20
21#include <linux/pagemap.h>
22
23#define tlb_flush(tlb)	((void) tlb)
24
25#include <asm-generic/tlb.h>
26
27#else /* !CONFIG_MMU */
28
29#include <linux/swap.h>
30#include <asm/pgalloc.h>
31#include <asm/tlbflush.h>
32
33static inline void __tlb_remove_table(void *_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34{
35	free_page_and_swap_cache((struct page *)_table);
 
 
 
 
 
 
36}
37
38#include <asm-generic/tlb.h>
 
 
 
 
 
 
 
39
40#ifndef CONFIG_HAVE_RCU_TABLE_FREE
41#define tlb_remove_table(tlb, entry) tlb_remove_page(tlb, entry)
42#endif
 
 
 
 
 
 
 
 
 
 
 
 
43
44static inline void
45__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
46{
47	pgtable_pte_page_dtor(pte);
 
 
 
 
 
 
 
48
49#ifndef CONFIG_ARM_LPAE
 
 
50	/*
51	 * With the classic ARM MMU, a pte page has two corresponding pmd
52	 * entries, each covering 1MB.
53	 */
54	addr = (addr & PMD_MASK) + SZ_1M;
55	__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
 
56#endif
57
58	tlb_remove_table(tlb, pte);
59}
60
61static inline void
62__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
63{
64#ifdef CONFIG_ARM_LPAE
65	struct page *page = virt_to_page(pmdp);
 
 
 
66
67	tlb_remove_table(tlb, page);
68#endif
 
 
69}
 
 
 
 
 
 
70
71#endif /* CONFIG_MMU */
72#endif