Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* arch/sparc64/mm/tlb.c
  2 *
  3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/init.h>
  8#include <linux/percpu.h>
  9#include <linux/mm.h>
 10#include <linux/swap.h>
 11#include <linux/preempt.h>
 12
 13#include <asm/pgtable.h>
 14#include <asm/pgalloc.h>
 15#include <asm/tlbflush.h>
 16#include <asm/cacheflush.h>
 17#include <asm/mmu_context.h>
 18#include <asm/tlb.h>
 19
 20/* Heavily inspired by the ppc64 code.  */
 21
 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 23
 24void flush_tlb_pending(void)
 25{
 26	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 
 27
 28	if (tb->tlb_nr) {
 29		flush_tsb_user(tb);
 30
 31		if (CTX_VALID(tb->mm->context)) {
 
 
 
 
 
 32#ifdef CONFIG_SMP
 33			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
 34					      &tb->vaddrs[0]);
 35#else
 36			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
 37					    tb->tlb_nr, &tb->vaddrs[0]);
 38#endif
 39		}
 40		tb->tlb_nr = 0;
 41	}
 42
 
 
 
 43	put_cpu_var(tlb_batch);
 44}
 45
 46void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
 47		   pte_t *ptep, pte_t orig, int fullmm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48{
 49	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 50	unsigned long nr;
 51
 52	vaddr &= PAGE_MASK;
 53	if (pte_exec(orig))
 54		vaddr |= 0x1UL;
 55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56	if (tlb_type != hypervisor &&
 57	    pte_dirty(orig)) {
 58		unsigned long paddr, pfn = pte_pfn(orig);
 59		struct address_space *mapping;
 60		struct page *page;
 61
 62		if (!pfn_valid(pfn))
 63			goto no_cache_flush;
 64
 65		page = pfn_to_page(pfn);
 66		if (PageReserved(page))
 67			goto no_cache_flush;
 68
 69		/* A real file page? */
 70		mapping = page_mapping(page);
 71		if (!mapping)
 72			goto no_cache_flush;
 73
 74		paddr = (unsigned long) page_address(page);
 75		if ((paddr ^ vaddr) & (1 << 13))
 76			flush_dcache_page_all(mm, page);
 77	}
 78
 79no_cache_flush:
 
 
 
 80
 81	if (fullmm) {
 82		put_cpu_var(tlb_batch);
 83		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84	}
 
 
 85
 86	nr = tb->tlb_nr;
 87
 88	if (unlikely(nr != 0 && mm != tb->mm)) {
 89		flush_tlb_pending();
 90		nr = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91	}
 92
 93	if (nr == 0)
 94		tb->mm = mm;
 
 
 
 
 
 
 
 
 
 
 
 
 95
 96	tb->vaddrs[nr] = vaddr;
 97	tb->tlb_nr = ++nr;
 98	if (nr >= TLB_BATCH_NR)
 99		flush_tlb_pending();
100
101	put_cpu_var(tlb_batch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* arch/sparc64/mm/tlb.c
  3 *
  4 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  5 */
  6
  7#include <linux/kernel.h>
 
  8#include <linux/percpu.h>
  9#include <linux/mm.h>
 10#include <linux/swap.h>
 11#include <linux/preempt.h>
 12
 13#include <asm/pgtable.h>
 14#include <asm/pgalloc.h>
 15#include <asm/tlbflush.h>
 16#include <asm/cacheflush.h>
 17#include <asm/mmu_context.h>
 18#include <asm/tlb.h>
 19
 20/* Heavily inspired by the ppc64 code.  */
 21
 22static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 23
 24void flush_tlb_pending(void)
 25{
 26	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 27	struct mm_struct *mm = tb->mm;
 28
 29	if (!tb->tlb_nr)
 30		goto out;
 31
 32	flush_tsb_user(tb);
 33
 34	if (CTX_VALID(mm->context)) {
 35		if (tb->tlb_nr == 1) {
 36			global_flush_tlb_page(mm, tb->vaddrs[0]);
 37		} else {
 38#ifdef CONFIG_SMP
 39			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
 40					      &tb->vaddrs[0]);
 41#else
 42			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
 43					    tb->tlb_nr, &tb->vaddrs[0]);
 44#endif
 45		}
 
 46	}
 47
 48	tb->tlb_nr = 0;
 49
 50out:
 51	put_cpu_var(tlb_batch);
 52}
 53
 54void arch_enter_lazy_mmu_mode(void)
 55{
 56	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 57
 58	tb->active = 1;
 59}
 60
 61void arch_leave_lazy_mmu_mode(void)
 62{
 63	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 64
 65	if (tb->tlb_nr)
 66		flush_tlb_pending();
 67	tb->active = 0;
 68}
 69
 70static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
 71			      bool exec, unsigned int hugepage_shift)
 72{
 73	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 74	unsigned long nr;
 75
 76	vaddr &= PAGE_MASK;
 77	if (exec)
 78		vaddr |= 0x1UL;
 79
 80	nr = tb->tlb_nr;
 81
 82	if (unlikely(nr != 0 && mm != tb->mm)) {
 83		flush_tlb_pending();
 84		nr = 0;
 85	}
 86
 87	if (!tb->active) {
 88		flush_tsb_user_page(mm, vaddr, hugepage_shift);
 89		global_flush_tlb_page(mm, vaddr);
 90		goto out;
 91	}
 92
 93	if (nr == 0) {
 94		tb->mm = mm;
 95		tb->hugepage_shift = hugepage_shift;
 96	}
 97
 98	if (tb->hugepage_shift != hugepage_shift) {
 99		flush_tlb_pending();
100		tb->hugepage_shift = hugepage_shift;
101		nr = 0;
102	}
103
104	tb->vaddrs[nr] = vaddr;
105	tb->tlb_nr = ++nr;
106	if (nr >= TLB_BATCH_NR)
107		flush_tlb_pending();
108
109out:
110	put_cpu_var(tlb_batch);
111}
112
113void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
114		   pte_t *ptep, pte_t orig, int fullmm,
115		   unsigned int hugepage_shift)
116{
117	if (tlb_type != hypervisor &&
118	    pte_dirty(orig)) {
119		unsigned long paddr, pfn = pte_pfn(orig);
120		struct address_space *mapping;
121		struct page *page;
122
123		if (!pfn_valid(pfn))
124			goto no_cache_flush;
125
126		page = pfn_to_page(pfn);
127		if (PageReserved(page))
128			goto no_cache_flush;
129
130		/* A real file page? */
131		mapping = page_mapping_file(page);
132		if (!mapping)
133			goto no_cache_flush;
134
135		paddr = (unsigned long) page_address(page);
136		if ((paddr ^ vaddr) & (1 << 13))
137			flush_dcache_page_all(mm, page);
138	}
139
140no_cache_flush:
141	if (!fullmm)
142		tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
143}
144
145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
146static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
147			       pmd_t pmd)
148{
149	unsigned long end;
150	pte_t *pte;
151
152	pte = pte_offset_map(&pmd, vaddr);
153	end = vaddr + HPAGE_SIZE;
154	while (vaddr < end) {
155		if (pte_val(*pte) & _PAGE_VALID) {
156			bool exec = pte_exec(*pte);
157
158			tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
159		}
160		pte++;
161		vaddr += PAGE_SIZE;
162	}
163	pte_unmap(pte);
164}
165
 
166
167static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
168			   pmd_t orig, pmd_t pmd)
169{
170	if (mm == &init_mm)
171		return;
172
173	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
174		/*
175		 * Note that this routine only sets pmds for THP pages.
176		 * Hugetlb pages are handled elsewhere.  We need to check
177		 * for huge zero page.  Huge zero pages are like hugetlb
178		 * pages in that there is no RSS, but there is the need
179		 * for TSB entries.  So, huge zero page counts go into
180		 * hugetlb_pte_count.
181		 */
182		if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
183			if (is_huge_zero_page(pmd_page(pmd)))
184				mm->context.hugetlb_pte_count++;
185			else
186				mm->context.thp_pte_count++;
187		} else {
188			if (is_huge_zero_page(pmd_page(orig)))
189				mm->context.hugetlb_pte_count--;
190			else
191				mm->context.thp_pte_count--;
192		}
193
194		/* Do not try to allocate the TSB hash table if we
195		 * don't have one already.  We have various locks held
196		 * and thus we'll end up doing a GFP_KERNEL allocation
197		 * in an atomic context.
198		 *
199		 * Instead, we let the first TLB miss on a hugepage
200		 * take care of this.
201		 */
202	}
203
204	if (!pmd_none(orig)) {
205		addr &= HPAGE_MASK;
206		if (pmd_trans_huge(orig)) {
207			pte_t orig_pte = __pte(pmd_val(orig));
208			bool exec = pte_exec(orig_pte);
209
210			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
211			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
212					  REAL_HPAGE_SHIFT);
213		} else {
214			tlb_batch_pmd_scan(mm, addr, orig);
215		}
216	}
217}
218
219void set_pmd_at(struct mm_struct *mm, unsigned long addr,
220		pmd_t *pmdp, pmd_t pmd)
221{
222	pmd_t orig = *pmdp;
223
224	*pmdp = pmd;
225	__set_pmd_acct(mm, addr, orig, pmd);
226}
227
228static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
229		unsigned long address, pmd_t *pmdp, pmd_t pmd)
230{
231	pmd_t old;
232
233	do {
234		old = *pmdp;
235	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
236	__set_pmd_acct(vma->vm_mm, address, old, pmd);
237
238	return old;
239}
240
241/*
242 * This routine is only called when splitting a THP
243 */
244pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
245		     pmd_t *pmdp)
246{
247	pmd_t old, entry;
248
249	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
250	old = pmdp_establish(vma, address, pmdp, entry);
251	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
252
253	/*
254	 * set_pmd_at() will not be called in a way to decrement
255	 * thp_pte_count when splitting a THP, so do it now.
256	 * Sanity check pmd before doing the actual decrement.
257	 */
258	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
259	    !is_huge_zero_page(pmd_page(entry)))
260		(vma->vm_mm)->context.thp_pte_count--;
261
262	return old;
263}
264
265void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
266				pgtable_t pgtable)
267{
268	struct list_head *lh = (struct list_head *) pgtable;
269
270	assert_spin_locked(&mm->page_table_lock);
271
272	/* FIFO */
273	if (!pmd_huge_pte(mm, pmdp))
274		INIT_LIST_HEAD(lh);
275	else
276		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
277	pmd_huge_pte(mm, pmdp) = pgtable;
278}
279
280pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
281{
282	struct list_head *lh;
283	pgtable_t pgtable;
284
285	assert_spin_locked(&mm->page_table_lock);
286
287	/* FIFO */
288	pgtable = pmd_huge_pte(mm, pmdp);
289	lh = (struct list_head *) pgtable;
290	if (list_empty(lh))
291		pmd_huge_pte(mm, pmdp) = NULL;
292	else {
293		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
294		list_del(lh);
295	}
296	pte_val(pgtable[0]) = 0;
297	pte_val(pgtable[1]) = 0;
298
299	return pgtable;
300}
301#endif /* CONFIG_TRANSPARENT_HUGEPAGE */