Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * This file contains the routines for flushing entries from the
  3 * TLB and MMU hash table.
  4 *
  5 *  Derived from arch/ppc64/mm/init.c:
  6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  7 *
  8 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  9 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 10 *    Copyright (C) 1996 Paul Mackerras
 11 *
 12 *  Derived from "arch/i386/mm/init.c"
 13 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 14 *
 15 *  Dave Engebretsen <engebret@us.ibm.com>
 16 *      Rework for PPC64 port.
 17 *
 18 *  This program is free software; you can redistribute it and/or
 19 *  modify it under the terms of the GNU General Public License
 20 *  as published by the Free Software Foundation; either version
 21 *  2 of the License, or (at your option) any later version.
 22 */
 23
 24#include <linux/kernel.h>
 25#include <linux/mm.h>
 26#include <linux/percpu.h>
 27#include <linux/hardirq.h>
 28#include <asm/pgalloc.h>
 29#include <asm/tlbflush.h>
 30#include <asm/tlb.h>
 31#include <asm/bug.h>
 32
 33DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
 34
 35/*
 36 * A linux PTE was changed and the corresponding hash table entry
 37 * neesd to be flushed. This function will either perform the flush
 38 * immediately or will batch it up if the current CPU has an active
 39 * batch on it.
 40 */
 41void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 42		     pte_t *ptep, unsigned long pte, int huge)
 43{
 44	unsigned long vpn;
 45	struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
 46	unsigned long vsid;
 47	unsigned int psize;
 48	int ssize;
 49	real_pte_t rpte;
 50	int i;
 51
 52	i = batch->index;
 53
 54	/* Get page size (maybe move back to caller).
 55	 *
 56	 * NOTE: when using special 64K mappings in 4K environment like
 57	 * for SPEs, we obtain the page size from the slice, which thus
 58	 * must still exist (and thus the VMA not reused) at the time
 59	 * of this call
 60	 */
 61	if (huge) {
 62#ifdef CONFIG_HUGETLB_PAGE
 63		psize = get_slice_psize(mm, addr);
 64		/* Mask the address for the correct page size */
 65		addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
 66#else
 67		BUG();
 68		psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
 69#endif
 70	} else {
 71		psize = pte_pagesize_index(mm, addr, pte);
 72		/* Mask the address for the standard page size.  If we
 73		 * have a 64k page kernel, but the hardware does not
 74		 * support 64k pages, this might be different from the
 75		 * hardware page size encoded in the slice table. */
 76		addr &= PAGE_MASK;
 77	}
 78
 79
 80	/* Build full vaddr */
 81	if (!is_kernel_addr(addr)) {
 82		ssize = user_segment_size(addr);
 83		vsid = get_vsid(mm->context.id, addr, ssize);
 84	} else {
 85		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
 86		ssize = mmu_kernel_ssize;
 87	}
 88	WARN_ON(vsid == 0);
 89	vpn = hpt_vpn(addr, vsid, ssize);
 90	rpte = __real_pte(__pte(pte), ptep);
 91
 92	/*
 93	 * Check if we have an active batch on this CPU. If not, just
 94	 * flush now and return. For now, we don global invalidates
 95	 * in that case, might be worth testing the mm cpu mask though
 96	 * and decide to use local invalidates instead...
 97	 */
 98	if (!batch->active) {
 99		flush_hash_page(vpn, rpte, psize, ssize, 0);
100		put_cpu_var(ppc64_tlb_batch);
101		return;
102	}
103
104	/*
105	 * This can happen when we are in the middle of a TLB batch and
106	 * we encounter memory pressure (eg copy_page_range when it tries
107	 * to allocate a new pte). If we have to reclaim memory and end
108	 * up scanning and resetting referenced bits then our batch context
109	 * will change mid stream.
110	 *
111	 * We also need to ensure only one page size is present in a given
112	 * batch
113	 */
114	if (i != 0 && (mm != batch->mm || batch->psize != psize ||
115		       batch->ssize != ssize)) {
116		__flush_tlb_pending(batch);
117		i = 0;
118	}
119	if (i == 0) {
120		batch->mm = mm;
121		batch->psize = psize;
122		batch->ssize = ssize;
123	}
124	batch->pte[i] = rpte;
125	batch->vpn[i] = vpn;
126	batch->index = ++i;
127	if (i >= PPC64_TLB_BATCH_NR)
128		__flush_tlb_pending(batch);
129	put_cpu_var(ppc64_tlb_batch);
130}
131
132/*
133 * This function is called when terminating an mmu batch or when a batch
134 * is full. It will perform the flush of all the entries currently stored
135 * in a batch.
136 *
137 * Must be called from within some kind of spinlock/non-preempt region...
138 */
139void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
140{
141	const struct cpumask *tmp;
142	int i, local = 0;
143
144	i = batch->index;
145	tmp = cpumask_of(smp_processor_id());
146	if (cpumask_equal(mm_cpumask(batch->mm), tmp))
147		local = 1;
148	if (i == 1)
149		flush_hash_page(batch->vpn[0], batch->pte[0],
150				batch->psize, batch->ssize, local);
151	else
152		flush_hash_range(i, local);
153	batch->index = 0;
154}
155
156void tlb_flush(struct mmu_gather *tlb)
157{
158	struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch);
159
160	/* If there's a TLB batch pending, then we must flush it because the
161	 * pages are going to be freed and we really don't want to have a CPU
162	 * access a freed page because it has a stale TLB
163	 */
164	if (tlbbatch->index)
165		__flush_tlb_pending(tlbbatch);
166
167	put_cpu_var(ppc64_tlb_batch);
168}
169
170/**
171 * __flush_hash_table_range - Flush all HPTEs for a given address range
172 *                            from the hash table (and the TLB). But keeps
173 *                            the linux PTEs intact.
174 *
175 * @mm		: mm_struct of the target address space (generally init_mm)
176 * @start	: starting address
177 * @end         : ending address (not included in the flush)
178 *
179 * This function is mostly to be used by some IO hotplug code in order
180 * to remove all hash entries from a given address range used to map IO
181 * space on a removed PCI-PCI bidge without tearing down the full mapping
182 * since 64K pages may overlap with other bridges when using 64K pages
183 * with 4K HW pages on IO space.
184 *
185 * Because of that usage pattern, it is implemented for small size rather
186 * than speed.
187 */
188void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
189			      unsigned long end)
190{
191	int hugepage_shift;
192	unsigned long flags;
193
194	start = _ALIGN_DOWN(start, PAGE_SIZE);
195	end = _ALIGN_UP(end, PAGE_SIZE);
196
197	BUG_ON(!mm->pgd);
198
199	/* Note: Normally, we should only ever use a batch within a
200	 * PTE locked section. This violates the rule, but will work
201	 * since we don't actually modify the PTEs, we just flush the
202	 * hash while leaving the PTEs intact (including their reference
203	 * to being hashed). This is not the most performance oriented
204	 * way to do things but is fine for our needs here.
205	 */
206	local_irq_save(flags);
207	arch_enter_lazy_mmu_mode();
208	for (; start < end; start += PAGE_SIZE) {
209		pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
210							&hugepage_shift);
211		unsigned long pte;
212
213		if (ptep == NULL)
214			continue;
215		pte = pte_val(*ptep);
216		if (!(pte & _PAGE_HASHPTE))
217			continue;
218		if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
219			hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
220		else
221			hpte_need_flush(mm, start, ptep, pte, 0);
222	}
223	arch_leave_lazy_mmu_mode();
224	local_irq_restore(flags);
225}
226
227void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
228{
229	pte_t *pte;
230	pte_t *start_pte;
231	unsigned long flags;
232
233	addr = _ALIGN_DOWN(addr, PMD_SIZE);
234	/* Note: Normally, we should only ever use a batch within a
235	 * PTE locked section. This violates the rule, but will work
236	 * since we don't actually modify the PTEs, we just flush the
237	 * hash while leaving the PTEs intact (including their reference
238	 * to being hashed). This is not the most performance oriented
239	 * way to do things but is fine for our needs here.
240	 */
241	local_irq_save(flags);
242	arch_enter_lazy_mmu_mode();
243	start_pte = pte_offset_map(pmd, addr);
244	for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
245		unsigned long pteval = pte_val(*pte);
246		if (pteval & _PAGE_HASHPTE)
247			hpte_need_flush(mm, addr, pte, pteval, 0);
248		addr += PAGE_SIZE;
249	}
250	arch_leave_lazy_mmu_mode();
251	local_irq_restore(flags);
252}