Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* arch/sparc64/mm/tlb.c
3 *
4 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/preempt.h>
12#include <linux/pagemap.h>
13
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
21static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22
23void flush_tlb_pending(void)
24{
25 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26 struct mm_struct *mm = tb->mm;
27
28 if (!tb->tlb_nr)
29 goto out;
30
31 flush_tsb_user(tb);
32
33 if (CTX_VALID(mm->context)) {
34 if (tb->tlb_nr == 1) {
35 global_flush_tlb_page(mm, tb->vaddrs[0]);
36 } else {
37#ifdef CONFIG_SMP
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39 &tb->vaddrs[0]);
40#else
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42 tb->tlb_nr, &tb->vaddrs[0]);
43#endif
44 }
45 }
46
47 tb->tlb_nr = 0;
48
49out:
50 put_cpu_var(tlb_batch);
51}
52
53void arch_enter_lazy_mmu_mode(void)
54{
55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56
57 tb->active = 1;
58}
59
60void arch_leave_lazy_mmu_mode(void)
61{
62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63
64 if (tb->tlb_nr)
65 flush_tlb_pending();
66 tb->active = 0;
67}
68
69static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
70 bool exec, unsigned int hugepage_shift)
71{
72 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
73 unsigned long nr;
74
75 vaddr &= PAGE_MASK;
76 if (exec)
77 vaddr |= 0x1UL;
78
79 nr = tb->tlb_nr;
80
81 if (unlikely(nr != 0 && mm != tb->mm)) {
82 flush_tlb_pending();
83 nr = 0;
84 }
85
86 if (!tb->active) {
87 flush_tsb_user_page(mm, vaddr, hugepage_shift);
88 global_flush_tlb_page(mm, vaddr);
89 goto out;
90 }
91
92 if (nr == 0) {
93 tb->mm = mm;
94 tb->hugepage_shift = hugepage_shift;
95 }
96
97 if (tb->hugepage_shift != hugepage_shift) {
98 flush_tlb_pending();
99 tb->hugepage_shift = hugepage_shift;
100 nr = 0;
101 }
102
103 tb->vaddrs[nr] = vaddr;
104 tb->tlb_nr = ++nr;
105 if (nr >= TLB_BATCH_NR)
106 flush_tlb_pending();
107
108out:
109 put_cpu_var(tlb_batch);
110}
111
112void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
113 pte_t *ptep, pte_t orig, int fullmm,
114 unsigned int hugepage_shift)
115{
116 if (tlb_type != hypervisor &&
117 pte_dirty(orig)) {
118 unsigned long paddr, pfn = pte_pfn(orig);
119 struct address_space *mapping;
120 struct page *page;
121 struct folio *folio;
122
123 if (!pfn_valid(pfn))
124 goto no_cache_flush;
125
126 page = pfn_to_page(pfn);
127 if (PageReserved(page))
128 goto no_cache_flush;
129
130 /* A real file page? */
131 folio = page_folio(page);
132 mapping = folio_flush_mapping(folio);
133 if (!mapping)
134 goto no_cache_flush;
135
136 paddr = (unsigned long) page_address(page);
137 if ((paddr ^ vaddr) & (1 << 13))
138 flush_dcache_folio_all(mm, folio);
139 }
140
141no_cache_flush:
142 if (!fullmm)
143 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
144}
145
146#ifdef CONFIG_TRANSPARENT_HUGEPAGE
147static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
148 pmd_t pmd)
149{
150 unsigned long end;
151 pte_t *pte;
152
153 pte = pte_offset_map(&pmd, vaddr);
154 if (!pte)
155 return;
156 end = vaddr + HPAGE_SIZE;
157 while (vaddr < end) {
158 if (pte_val(*pte) & _PAGE_VALID) {
159 bool exec = pte_exec(*pte);
160
161 tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
162 }
163 pte++;
164 vaddr += PAGE_SIZE;
165 }
166 pte_unmap(pte);
167}
168
169
170static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
171 pmd_t orig, pmd_t pmd)
172{
173 if (mm == &init_mm)
174 return;
175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 /*
178 * Note that this routine only sets pmds for THP pages.
179 * Hugetlb pages are handled elsewhere. We need to check
180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
183 * hugetlb_pte_count.
184 */
185 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186 if (is_huge_zero_page(pmd_page(pmd)))
187 mm->context.hugetlb_pte_count++;
188 else
189 mm->context.thp_pte_count++;
190 } else {
191 if (is_huge_zero_page(pmd_page(orig)))
192 mm->context.hugetlb_pte_count--;
193 else
194 mm->context.thp_pte_count--;
195 }
196
197 /* Do not try to allocate the TSB hash table if we
198 * don't have one already. We have various locks held
199 * and thus we'll end up doing a GFP_KERNEL allocation
200 * in an atomic context.
201 *
202 * Instead, we let the first TLB miss on a hugepage
203 * take care of this.
204 */
205 }
206
207 if (!pmd_none(orig)) {
208 addr &= HPAGE_MASK;
209 if (pmd_trans_huge(orig)) {
210 pte_t orig_pte = __pte(pmd_val(orig));
211 bool exec = pte_exec(orig_pte);
212
213 tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
214 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
215 REAL_HPAGE_SHIFT);
216 } else {
217 tlb_batch_pmd_scan(mm, addr, orig);
218 }
219 }
220}
221
222void set_pmd_at(struct mm_struct *mm, unsigned long addr,
223 pmd_t *pmdp, pmd_t pmd)
224{
225 pmd_t orig = *pmdp;
226
227 *pmdp = pmd;
228 __set_pmd_acct(mm, addr, orig, pmd);
229}
230
231static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
232 unsigned long address, pmd_t *pmdp, pmd_t pmd)
233{
234 pmd_t old;
235
236 do {
237 old = *pmdp;
238 } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
239 __set_pmd_acct(vma->vm_mm, address, old, pmd);
240
241 return old;
242}
243
244/*
245 * This routine is only called when splitting a THP
246 */
247pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
248 pmd_t *pmdp)
249{
250 pmd_t old, entry;
251
252 entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
253 old = pmdp_establish(vma, address, pmdp, entry);
254 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
255
256 /*
257 * set_pmd_at() will not be called in a way to decrement
258 * thp_pte_count when splitting a THP, so do it now.
259 * Sanity check pmd before doing the actual decrement.
260 */
261 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
262 !is_huge_zero_page(pmd_page(entry)))
263 (vma->vm_mm)->context.thp_pte_count--;
264
265 return old;
266}
267
268void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
269 pgtable_t pgtable)
270{
271 struct list_head *lh = (struct list_head *) pgtable;
272
273 assert_spin_locked(&mm->page_table_lock);
274
275 /* FIFO */
276 if (!pmd_huge_pte(mm, pmdp))
277 INIT_LIST_HEAD(lh);
278 else
279 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
280 pmd_huge_pte(mm, pmdp) = pgtable;
281}
282
283pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
284{
285 struct list_head *lh;
286 pgtable_t pgtable;
287
288 assert_spin_locked(&mm->page_table_lock);
289
290 /* FIFO */
291 pgtable = pmd_huge_pte(mm, pmdp);
292 lh = (struct list_head *) pgtable;
293 if (list_empty(lh))
294 pmd_huge_pte(mm, pmdp) = NULL;
295 else {
296 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
297 list_del(lh);
298 }
299 pte_val(pgtable[0]) = 0;
300 pte_val(pgtable[1]) = 0;
301
302 return pgtable;
303}
304#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/percpu.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/preempt.h>
11
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
21static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22
23void flush_tlb_pending(void)
24{
25 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26 struct mm_struct *mm = tb->mm;
27
28 if (!tb->tlb_nr)
29 goto out;
30
31 flush_tsb_user(tb);
32
33 if (CTX_VALID(mm->context)) {
34 if (tb->tlb_nr == 1) {
35 global_flush_tlb_page(mm, tb->vaddrs[0]);
36 } else {
37#ifdef CONFIG_SMP
38 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39 &tb->vaddrs[0]);
40#else
41 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42 tb->tlb_nr, &tb->vaddrs[0]);
43#endif
44 }
45 }
46
47 tb->tlb_nr = 0;
48
49out:
50 put_cpu_var(tlb_batch);
51}
52
53void arch_enter_lazy_mmu_mode(void)
54{
55 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56
57 tb->active = 1;
58}
59
60void arch_leave_lazy_mmu_mode(void)
61{
62 struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63
64 if (tb->tlb_nr)
65 flush_tlb_pending();
66 tb->active = 0;
67}
68
69static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
70 bool exec)
71{
72 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
73 unsigned long nr;
74
75 vaddr &= PAGE_MASK;
76 if (exec)
77 vaddr |= 0x1UL;
78
79 nr = tb->tlb_nr;
80
81 if (unlikely(nr != 0 && mm != tb->mm)) {
82 flush_tlb_pending();
83 nr = 0;
84 }
85
86 if (!tb->active) {
87 flush_tsb_user_page(mm, vaddr);
88 global_flush_tlb_page(mm, vaddr);
89 goto out;
90 }
91
92 if (nr == 0)
93 tb->mm = mm;
94
95 tb->vaddrs[nr] = vaddr;
96 tb->tlb_nr = ++nr;
97 if (nr >= TLB_BATCH_NR)
98 flush_tlb_pending();
99
100out:
101 put_cpu_var(tlb_batch);
102}
103
104void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
105 pte_t *ptep, pte_t orig, int fullmm)
106{
107 if (tlb_type != hypervisor &&
108 pte_dirty(orig)) {
109 unsigned long paddr, pfn = pte_pfn(orig);
110 struct address_space *mapping;
111 struct page *page;
112
113 if (!pfn_valid(pfn))
114 goto no_cache_flush;
115
116 page = pfn_to_page(pfn);
117 if (PageReserved(page))
118 goto no_cache_flush;
119
120 /* A real file page? */
121 mapping = page_mapping(page);
122 if (!mapping)
123 goto no_cache_flush;
124
125 paddr = (unsigned long) page_address(page);
126 if ((paddr ^ vaddr) & (1 << 13))
127 flush_dcache_page_all(mm, page);
128 }
129
130no_cache_flush:
131 if (!fullmm)
132 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
133}
134
135#ifdef CONFIG_TRANSPARENT_HUGEPAGE
136static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
137 pmd_t pmd)
138{
139 unsigned long end;
140 pte_t *pte;
141
142 pte = pte_offset_map(&pmd, vaddr);
143 end = vaddr + HPAGE_SIZE;
144 while (vaddr < end) {
145 if (pte_val(*pte) & _PAGE_VALID) {
146 bool exec = pte_exec(*pte);
147
148 tlb_batch_add_one(mm, vaddr, exec);
149 }
150 pte++;
151 vaddr += PAGE_SIZE;
152 }
153 pte_unmap(pte);
154}
155
156void set_pmd_at(struct mm_struct *mm, unsigned long addr,
157 pmd_t *pmdp, pmd_t pmd)
158{
159 pmd_t orig = *pmdp;
160
161 *pmdp = pmd;
162
163 if (mm == &init_mm)
164 return;
165
166 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
167 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
168 mm->context.huge_pte_count++;
169 else
170 mm->context.huge_pte_count--;
171
172 /* Do not try to allocate the TSB hash table if we
173 * don't have one already. We have various locks held
174 * and thus we'll end up doing a GFP_KERNEL allocation
175 * in an atomic context.
176 *
177 * Instead, we let the first TLB miss on a hugepage
178 * take care of this.
179 */
180 }
181
182 if (!pmd_none(orig)) {
183 addr &= HPAGE_MASK;
184 if (pmd_trans_huge(orig)) {
185 pte_t orig_pte = __pte(pmd_val(orig));
186 bool exec = pte_exec(orig_pte);
187
188 tlb_batch_add_one(mm, addr, exec);
189 tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
190 } else {
191 tlb_batch_pmd_scan(mm, addr, orig);
192 }
193 }
194}
195
196void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
197 pmd_t *pmdp)
198{
199 pmd_t entry = *pmdp;
200
201 pmd_val(entry) &= ~_PAGE_VALID;
202
203 set_pmd_at(vma->vm_mm, address, pmdp, entry);
204 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
205}
206
207void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
208 pgtable_t pgtable)
209{
210 struct list_head *lh = (struct list_head *) pgtable;
211
212 assert_spin_locked(&mm->page_table_lock);
213
214 /* FIFO */
215 if (!pmd_huge_pte(mm, pmdp))
216 INIT_LIST_HEAD(lh);
217 else
218 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
219 pmd_huge_pte(mm, pmdp) = pgtable;
220}
221
222pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
223{
224 struct list_head *lh;
225 pgtable_t pgtable;
226
227 assert_spin_locked(&mm->page_table_lock);
228
229 /* FIFO */
230 pgtable = pmd_huge_pte(mm, pmdp);
231 lh = (struct list_head *) pgtable;
232 if (list_empty(lh))
233 pmd_huge_pte(mm, pmdp) = NULL;
234 else {
235 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
236 list_del(lh);
237 }
238 pte_val(pgtable[0]) = 0;
239 pte_val(pgtable[1]) = 0;
240
241 return pgtable;
242}
243#endif /* CONFIG_TRANSPARENT_HUGEPAGE */