Linux Audio

Check our new training course

Loading...
v5.9
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_IA64_TLB_H
 3#define _ASM_IA64_TLB_H
 4/*
 5 * Based on <asm-generic/tlb.h>.
 6 *
 7 * Copyright (C) 2002-2003 Hewlett-Packard Co
 8 *	David Mosberger-Tang <davidm@hpl.hp.com>
 9 */
10/*
11 * Removing a translation from a page table (including TLB-shootdown) is a four-step
12 * procedure:
13 *
14 *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
15 *	    (this is a no-op on ia64).
16 *	(2) Clear the relevant portions of the page-table
17 *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
18 *	(4) Release the pages that were freed up in step (2).
19 *
20 * Note that the ordering of these steps is crucial to avoid races on MP machines.
21 *
22 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
23 * unmapping a portion of the virtual address space, these hooks are called according to
24 * the following template:
25 *
26 *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
27 *	{
28 *	  for each vma that needs a shootdown do {
29 *	    tlb_start_vma(tlb, vma);
30 *	      for each page-table-entry PTE that needs to be removed do {
31 *		tlb_remove_tlb_entry(tlb, pte, address);
32 *		if (pte refers to a normal page) {
33 *		  tlb_remove_page(tlb, page);
34 *		}
35 *	      }
36 *	    tlb_end_vma(tlb, vma);
37 *	  }
38 *	}
39 *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
40 */
41#include <linux/mm.h>
42#include <linux/pagemap.h>
43#include <linux/swap.h>
44
 
45#include <asm/processor.h>
46#include <asm/tlbflush.h>
 
47
48#include <asm-generic/tlb.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
50#endif /* _ASM_IA64_TLB_H */
v4.10.11
 
  1#ifndef _ASM_IA64_TLB_H
  2#define _ASM_IA64_TLB_H
  3/*
  4 * Based on <asm-generic/tlb.h>.
  5 *
  6 * Copyright (C) 2002-2003 Hewlett-Packard Co
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 */
  9/*
 10 * Removing a translation from a page table (including TLB-shootdown) is a four-step
 11 * procedure:
 12 *
 13 *	(1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
 14 *	    (this is a no-op on ia64).
 15 *	(2) Clear the relevant portions of the page-table
 16 *	(3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
 17 *	(4) Release the pages that were freed up in step (2).
 18 *
 19 * Note that the ordering of these steps is crucial to avoid races on MP machines.
 20 *
 21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
 22 * unmapping a portion of the virtual address space, these hooks are called according to
 23 * the following template:
 24 *
 25 *	tlb <- tlb_gather_mmu(mm, start, end);		// start unmap for address space MM
 26 *	{
 27 *	  for each vma that needs a shootdown do {
 28 *	    tlb_start_vma(tlb, vma);
 29 *	      for each page-table-entry PTE that needs to be removed do {
 30 *		tlb_remove_tlb_entry(tlb, pte, address);
 31 *		if (pte refers to a normal page) {
 32 *		  tlb_remove_page(tlb, page);
 33 *		}
 34 *	      }
 35 *	    tlb_end_vma(tlb, vma);
 36 *	  }
 37 *	}
 38 *	tlb_finish_mmu(tlb, start, end);	// finish unmap for address space MM
 39 */
 40#include <linux/mm.h>
 41#include <linux/pagemap.h>
 42#include <linux/swap.h>
 43
 44#include <asm/pgalloc.h>
 45#include <asm/processor.h>
 46#include <asm/tlbflush.h>
 47#include <asm/machvec.h>
 48
 49/*
 50 * If we can't allocate a page to make a big batch of page pointers
 51 * to work on, then just handle a few from the on-stack structure.
 52 */
 53#define	IA64_GATHER_BUNDLE	8
 54
 55struct mmu_gather {
 56	struct mm_struct	*mm;
 57	unsigned int		nr;
 58	unsigned int		max;
 59	unsigned char		fullmm;		/* non-zero means full mm flush */
 60	unsigned char		need_flush;	/* really unmapped some PTEs? */
 61	unsigned long		start, end;
 62	unsigned long		start_addr;
 63	unsigned long		end_addr;
 64	struct page		**pages;
 65	struct page		*local[IA64_GATHER_BUNDLE];
 66};
 67
 68struct ia64_tr_entry {
 69	u64 ifa;
 70	u64 itir;
 71	u64 pte;
 72	u64 rr;
 73}; /*Record for tr entry!*/
 74
 75extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
 76extern void ia64_ptr_entry(u64 target_mask, int slot);
 77
 78extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
 79
 80/*
 81 region register macros
 82*/
 83#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)
 84#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)
 85#define RR_VE_MASK	0x0000000000000001L
 86#define RR_VE_SHIFT	0
 87#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)
 88#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)
 89#define RR_PS_MASK	0x00000000000000fcL
 90#define RR_PS_SHIFT	2
 91#define RR_RID_MASK	0x00000000ffffff00L
 92#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
 93
 94static inline void
 95ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 96{
 97	tlb->need_flush = 0;
 98
 99	if (tlb->fullmm) {
100		/*
101		 * Tearing down the entire address space.  This happens both as a result
102		 * of exit() and execve().  The latter case necessitates the call to
103		 * flush_tlb_mm() here.
104		 */
105		flush_tlb_mm(tlb->mm);
106	} else if (unlikely (end - start >= 1024*1024*1024*1024UL
107			     || REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
108	{
109		/*
110		 * If we flush more than a tera-byte or across regions, we're probably
111		 * better off just flushing the entire TLB(s).  This should be very rare
112		 * and is not worth optimizing for.
113		 */
114		flush_tlb_all();
115	} else {
116		/*
117		 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a
118		 * vma pointer.
119		 */
120		struct vm_area_struct vma;
121
122		vma.vm_mm = tlb->mm;
123		/* flush the address range from the tlb: */
124		flush_tlb_range(&vma, start, end);
125		/* now flush the virt. page-table area mapping the address range: */
126		flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
127	}
128
129}
130
131static inline void
132ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
133{
134	unsigned long i;
135	unsigned int nr;
136
137	/* lastly, release the freed pages */
138	nr = tlb->nr;
139
140	tlb->nr = 0;
141	tlb->start_addr = ~0UL;
142	for (i = 0; i < nr; ++i)
143		free_page_and_swap_cache(tlb->pages[i]);
144}
145
146/*
147 * Flush the TLB for address range START to END and, if not in fast mode, release the
148 * freed pages that where gathered up to this point.
149 */
150static inline void
151ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
152{
153	if (!tlb->need_flush)
154		return;
155	ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
156	ia64_tlb_flush_mmu_free(tlb);
157}
158
159static inline void __tlb_alloc_page(struct mmu_gather *tlb)
160{
161	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
162
163	if (addr) {
164		tlb->pages = (void *)addr;
165		tlb->max = PAGE_SIZE / sizeof(void *);
166	}
167}
168
169
170static inline void
171tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
172{
173	tlb->mm = mm;
174	tlb->max = ARRAY_SIZE(tlb->local);
175	tlb->pages = tlb->local;
176	tlb->nr = 0;
177	tlb->fullmm = !(start | (end+1));
178	tlb->start = start;
179	tlb->end = end;
180	tlb->start_addr = ~0UL;
181}
182
183/*
184 * Called at the end of the shootdown operation to free up any resources that were
185 * collected.
186 */
187static inline void
188tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
189{
190	/*
191	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
192	 * tlb->end_addr.
193	 */
194	ia64_tlb_flush_mmu(tlb, start, end);
195
196	/* keep the page table cache within bounds */
197	check_pgt_cache();
198
199	if (tlb->pages != tlb->local)
200		free_pages((unsigned long)tlb->pages, 0);
201}
202
203/*
204 * Logically, this routine frees PAGE.  On MP machines, the actual freeing of the page
205 * must be delayed until after the TLB has been flushed (see comments at the beginning of
206 * this file).
207 */
208static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
209{
210	tlb->need_flush = 1;
211
212	if (!tlb->nr && tlb->pages == tlb->local)
213		__tlb_alloc_page(tlb);
214
215	tlb->pages[tlb->nr++] = page;
216	VM_WARN_ON(tlb->nr > tlb->max);
217	if (tlb->nr == tlb->max)
218		return true;
219	return false;
220}
221
222static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
223{
224	ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
225}
226
227static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
228{
229	ia64_tlb_flush_mmu_free(tlb);
230}
231
232static inline void tlb_flush_mmu(struct mmu_gather *tlb)
233{
234	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
235}
236
237static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
238{
239	if (__tlb_remove_page(tlb, page))
240		tlb_flush_mmu(tlb);
241}
242
243static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
244					  struct page *page, int page_size)
245{
246	return __tlb_remove_page(tlb, page);
247}
248
249static inline void tlb_remove_page_size(struct mmu_gather *tlb,
250					struct page *page, int page_size)
251{
252	return tlb_remove_page(tlb, page);
253}
254
255/*
256 * Remove TLB entry for PTE mapped at virtual address ADDRESS.  This is called for any
257 * PTE, not just those pointing to (normal) physical memory.
258 */
259static inline void
260__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
261{
262	if (tlb->start_addr == ~0UL)
263		tlb->start_addr = address;
264	tlb->end_addr = address + PAGE_SIZE;
265}
266
267#define tlb_migrate_finish(mm)	platform_tlb_migrate_finish(mm)
268
269#define tlb_start_vma(tlb, vma)			do { } while (0)
270#define tlb_end_vma(tlb, vma)			do { } while (0)
271
272#define tlb_remove_tlb_entry(tlb, ptep, addr)		\
273do {							\
274	tlb->need_flush = 1;				\
275	__tlb_remove_tlb_entry(tlb, ptep, addr);	\
276} while (0)
277
278#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
279	tlb_remove_tlb_entry(tlb, ptep, address)
280
281#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
282static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
283						     unsigned int page_size)
284{
285}
286
287#define pte_free_tlb(tlb, ptep, address)		\
288do {							\
289	tlb->need_flush = 1;				\
290	__pte_free_tlb(tlb, ptep, address);		\
291} while (0)
292
293#define pmd_free_tlb(tlb, ptep, address)		\
294do {							\
295	tlb->need_flush = 1;				\
296	__pmd_free_tlb(tlb, ptep, address);		\
297} while (0)
298
299#define pud_free_tlb(tlb, pudp, address)		\
300do {							\
301	tlb->need_flush = 1;				\
302	__pud_free_tlb(tlb, pudp, address);		\
303} while (0)
304
305#endif /* _ASM_IA64_TLB_H */