Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PGTABLE_32_H
 10#define _ASM_PGTABLE_32_H
 11
 12#include <asm/addrspace.h>
 13#include <asm/page.h>
 14
 15#include <linux/linkage.h>
 16#include <asm/cachectl.h>
 17#include <asm/fixmap.h>
 18
 19#define __ARCH_USE_5LEVEL_HACK
 20#include <asm-generic/pgtable-nopmd.h>
 21
 22#ifdef CONFIG_HIGHMEM
 23#include <asm/highmem.h>
 24#endif
 25
 26/*
 27 * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
 28 *
 29 *  We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
 30 * our 2-level table layout would normally have a PGD entry cover a contiguous
 31 * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
 32 * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
 33 * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
 34 * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
 35 *  To correct for this, when huge pages are enabled, we halve the number of
 36 * pointers a PTE page holds, making its last half go to waste. Correspondingly,
 37 * we double the number of PGD pages. Overall, page table memory overhead
 38 * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
 39 *
 40 * NOTE: We don't yet support huge pages if extended-addressing is enabled
 41 *       (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
 42 */
 43
 44extern int temp_tlb_entry;
 45
 46/*
 47 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
 48 *	starting at the top and working down. This is for populating the
 49 *	TLB before trap_init() puts the TLB miss handler in place. It
 50 *	should be used only for entries matching the actual page tables,
 51 *	to prevent inconsistencies.
 52 */
 53extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 54			       unsigned long entryhi, unsigned long pagemask);
 55
 56/*
 57 * Basically we have the same two-level (which is the logical three level
 58 * Linux page table layout folded) page tables as the i386.  Some day
 59 * when we have proper page coloring support we can have a 1% quicker
 60 * tlb refill handling mechanism, but for now it is a bit slower but
 61 * works even with the cache aliasing problem the R4k and above have.
 62 */
 63
 64/* PGDIR_SHIFT determines what a third-level page table entry can map */
 65#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
 66# define PGDIR_SHIFT	(2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
 67#else
 68# define PGDIR_SHIFT	(2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
 69#endif
 70
 71#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 72#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 73
 74/*
 75 * Entries per page directory level: we use two-level, so
 76 * we don't really have any PUD/PMD directory physically.
 77 */
 78#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
 79# define __PGD_ORDER	(32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
 80#else
 81# define __PGD_ORDER	(32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
 82#endif
 83
 84#define PGD_ORDER	(__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
 85#define PUD_ORDER	aieeee_attempt_to_allocate_pud
 86#define PMD_ORDER	aieeee_attempt_to_allocate_pmd
 87#define PTE_ORDER	0
 88
 89#define PTRS_PER_PGD	(USER_PTRS_PER_PGD * 2)
 90#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
 91# define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
 92#else
 93# define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 94#endif
 95
 96#define USER_PTRS_PER_PGD	(0x80000000UL/PGDIR_SIZE)
 97#define FIRST_USER_ADDRESS	0UL
 98
 99#define VMALLOC_START	  MAP_BASE
100
101#define PKMAP_END	((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
102#define PKMAP_BASE	(PKMAP_END - PAGE_SIZE * LAST_PKMAP)
103
104#ifdef CONFIG_HIGHMEM
105# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
106#else
107# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
108#endif
109
110#ifdef CONFIG_PHYS_ADDR_T_64BIT
111#define pte_ERROR(e) \
112	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
113#else
114#define pte_ERROR(e) \
115	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
116#endif
117#define pgd_ERROR(e) \
118	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
119
120extern void load_pgd(unsigned long pg_dir);
121
122extern pte_t invalid_pte_table[PTRS_PER_PTE];
123
124/*
125 * Empty pgd/pmd entries point to the invalid_pte_table.
126 */
127static inline int pmd_none(pmd_t pmd)
128{
129	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
130}
131
132static inline int pmd_bad(pmd_t pmd)
133{
134#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
135	/* pmd_huge(pmd) but inline */
136	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
137		return 0;
138#endif
139
140	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
141		return 1;
142
143	return 0;
144}
145
146static inline int pmd_present(pmd_t pmd)
147{
148	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
149}
150
151static inline void pmd_clear(pmd_t *pmdp)
152{
153	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
154}
155
156#if defined(CONFIG_XPA)
157
158#define pte_pfn(x)		(((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
159static inline pte_t
160pfn_pte(unsigned long pfn, pgprot_t prot)
161{
162	pte_t pte;
163
164	pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
165				(pgprot_val(prot) & ~_PFNX_MASK);
166	pte.pte_high = (pfn << _PFN_SHIFT) |
167				(pgprot_val(prot) & ~_PFN_MASK);
168	return pte;
169}
170
171#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
172
173#define pte_pfn(x)		((unsigned long)((x).pte_high >> 6))
174
175static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
176{
177	pte_t pte;
178
179	pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
180	pte.pte_low = pgprot_val(prot);
181
182	return pte;
183}
184
185#else
186
187#ifdef CONFIG_CPU_VR41XX
188#define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
189#define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
190#else
191#define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))
192#define pfn_pte(pfn, prot)	__pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
193#define pfn_pmd(pfn, prot)	__pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
194#endif
195#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
196
197#define pte_page(x)		pfn_to_page(pte_pfn(x))
198
199#define __pgd_offset(address)	pgd_index(address)
200#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
201#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
202
203/* to find an entry in a kernel page-table-directory */
204#define pgd_offset_k(address) pgd_offset(&init_mm, address)
205
206#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
207#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
208
209/* to find an entry in a page-table-directory */
210#define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
211
212/* Find an entry in the third-level page table.. */
213#define __pte_offset(address)						\
214	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
215#define pte_offset(dir, address)					\
216	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
217#define pte_offset_kernel(dir, address)					\
218	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
219
220#define pte_offset_map(dir, address)					\
221	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
222#define pte_unmap(pte) ((void)(pte))
223
224#if defined(CONFIG_CPU_R3K_TLB)
225
226/* Swap entries must have VALID bit cleared. */
227#define __swp_type(x)			(((x).val >> 10) & 0x1f)
228#define __swp_offset(x)			((x).val >> 15)
229#define __swp_entry(type,offset)	((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
230#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
231#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
232
233#else
234
235#if defined(CONFIG_XPA)
236
237/* Swap entries must have VALID and GLOBAL bits cleared. */
238#define __swp_type(x)			(((x).val >> 4) & 0x1f)
239#define __swp_offset(x)			 ((x).val >> 9)
240#define __swp_entry(type,offset)	((swp_entry_t)  { ((type) << 4) | ((offset) << 9) })
241#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
242#define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
243
244#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
245
246/* Swap entries must have VALID and GLOBAL bits cleared. */
247#define __swp_type(x)			(((x).val >> 2) & 0x1f)
248#define __swp_offset(x)			 ((x).val >> 7)
249#define __swp_entry(type, offset)	((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
250#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
251#define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
252
253#else
254/*
255 * Constraints:
256 *      _PAGE_PRESENT at bit 0
257 *      _PAGE_MODIFIED at bit 4
258 *      _PAGE_GLOBAL at bit 6
259 *      _PAGE_VALID at bit 7
260 */
261#define __swp_type(x)			(((x).val >> 8) & 0x1f)
262#define __swp_offset(x)			 ((x).val >> 13)
263#define __swp_entry(type,offset)	((swp_entry_t)	{ ((type) << 8) | ((offset) << 13) })
264#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
265#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
266
267#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
268
269#endif /* defined(CONFIG_CPU_R3K_TLB) */
270
271#endif /* _ASM_PGTABLE_32_H */
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PGTABLE_32_H
 10#define _ASM_PGTABLE_32_H
 11
 12#include <asm/addrspace.h>
 13#include <asm/page.h>
 14
 15#include <linux/linkage.h>
 16#include <asm/cachectl.h>
 17#include <asm/fixmap.h>
 18
 19#define __ARCH_USE_5LEVEL_HACK
 20#include <asm-generic/pgtable-nopmd.h>
 21
 22#ifdef CONFIG_HIGHMEM
 23#include <asm/highmem.h>
 24#endif
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26extern int temp_tlb_entry;
 27
 28/*
 29 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
 30 *	starting at the top and working down. This is for populating the
 31 *	TLB before trap_init() puts the TLB miss handler in place. It
 32 *	should be used only for entries matching the actual page tables,
 33 *	to prevent inconsistencies.
 34 */
 35extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
 36			       unsigned long entryhi, unsigned long pagemask);
 37
 38/*
 39 * Basically we have the same two-level (which is the logical three level
 40 * Linux page table layout folded) page tables as the i386.  Some day
 41 * when we have proper page coloring support we can have a 1% quicker
 42 * tlb refill handling mechanism, but for now it is a bit slower but
 43 * works even with the cache aliasing problem the R4k and above have.
 44 */
 45
 46/* PGDIR_SHIFT determines what a third-level page table entry can map */
 47#define PGDIR_SHIFT	(2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
 
 
 
 
 
 48#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 49#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 50
 51/*
 52 * Entries per page directory level: we use two-level, so
 53 * we don't really have any PUD/PMD directory physically.
 54 */
 55#define __PGD_ORDER	(32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
 
 
 
 
 
 56#define PGD_ORDER	(__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
 57#define PUD_ORDER	aieeee_attempt_to_allocate_pud
 58#define PMD_ORDER	1
 59#define PTE_ORDER	0
 60
 61#define PTRS_PER_PGD	(USER_PTRS_PER_PGD * 2)
 62#define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
 
 
 
 
 63
 64#define USER_PTRS_PER_PGD	(0x80000000UL/PGDIR_SIZE)
 65#define FIRST_USER_ADDRESS	0UL
 66
 67#define VMALLOC_START	  MAP_BASE
 68
 69#define PKMAP_END	((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
 70#define PKMAP_BASE	(PKMAP_END - PAGE_SIZE * LAST_PKMAP)
 71
 72#ifdef CONFIG_HIGHMEM
 73# define VMALLOC_END	(PKMAP_BASE-2*PAGE_SIZE)
 74#else
 75# define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
 76#endif
 77
 78#ifdef CONFIG_PHYS_ADDR_T_64BIT
 79#define pte_ERROR(e) \
 80	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
 81#else
 82#define pte_ERROR(e) \
 83	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 84#endif
 85#define pgd_ERROR(e) \
 86	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 87
 88extern void load_pgd(unsigned long pg_dir);
 89
 90extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
 91
 92/*
 93 * Empty pgd/pmd entries point to the invalid_pte_table.
 94 */
 95static inline int pmd_none(pmd_t pmd)
 96{
 97	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
 98}
 99
100#define pmd_bad(pmd)		(pmd_val(pmd) & ~PAGE_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
101
102static inline int pmd_present(pmd_t pmd)
103{
104	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
105}
106
107static inline void pmd_clear(pmd_t *pmdp)
108{
109	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
110}
111
112#if defined(CONFIG_XPA)
113
114#define pte_pfn(x)		(((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
115static inline pte_t
116pfn_pte(unsigned long pfn, pgprot_t prot)
117{
118	pte_t pte;
119
120	pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
121				(pgprot_val(prot) & ~_PFNX_MASK);
122	pte.pte_high = (pfn << _PFN_SHIFT) |
123				(pgprot_val(prot) & ~_PFN_MASK);
124	return pte;
125}
126
127#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
128
129#define pte_pfn(x)		((unsigned long)((x).pte_high >> 6))
130
131static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
132{
133	pte_t pte;
134
135	pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
136	pte.pte_low = pgprot_val(prot);
137
138	return pte;
139}
140
141#else
142
143#ifdef CONFIG_CPU_VR41XX
144#define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
145#define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
146#else
147#define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))
148#define pfn_pte(pfn, prot)	__pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
 
149#endif
150#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
151
152#define pte_page(x)		pfn_to_page(pte_pfn(x))
153
154#define __pgd_offset(address)	pgd_index(address)
155#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
156#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
157
158/* to find an entry in a kernel page-table-directory */
159#define pgd_offset_k(address) pgd_offset(&init_mm, address)
160
161#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 
162
163/* to find an entry in a page-table-directory */
164#define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
165
166/* Find an entry in the third-level page table.. */
167#define __pte_offset(address)						\
168	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
169#define pte_offset(dir, address)					\
170	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
171#define pte_offset_kernel(dir, address)					\
172	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
173
174#define pte_offset_map(dir, address)					\
175	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
176#define pte_unmap(pte) ((void)(pte))
177
178#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
179
180/* Swap entries must have VALID bit cleared. */
181#define __swp_type(x)			(((x).val >> 10) & 0x1f)
182#define __swp_offset(x)			((x).val >> 15)
183#define __swp_entry(type,offset)	((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
184#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
185#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
186
187#else
188
189#if defined(CONFIG_XPA)
190
191/* Swap entries must have VALID and GLOBAL bits cleared. */
192#define __swp_type(x)			(((x).val >> 4) & 0x1f)
193#define __swp_offset(x)			 ((x).val >> 9)
194#define __swp_entry(type,offset)	((swp_entry_t)  { ((type) << 4) | ((offset) << 9) })
195#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
196#define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
197
198#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
199
200/* Swap entries must have VALID and GLOBAL bits cleared. */
201#define __swp_type(x)			(((x).val >> 2) & 0x1f)
202#define __swp_offset(x)			 ((x).val >> 7)
203#define __swp_entry(type, offset)	((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
204#define __pte_to_swp_entry(pte)		((swp_entry_t) { (pte).pte_high })
205#define __swp_entry_to_pte(x)		((pte_t) { 0, (x).val })
206
207#else
208/*
209 * Constraints:
210 *      _PAGE_PRESENT at bit 0
211 *      _PAGE_MODIFIED at bit 4
212 *      _PAGE_GLOBAL at bit 6
213 *      _PAGE_VALID at bit 7
214 */
215#define __swp_type(x)			(((x).val >> 8) & 0x1f)
216#define __swp_offset(x)			 ((x).val >> 13)
217#define __swp_entry(type,offset)	((swp_entry_t)	{ ((type) << 8) | ((offset) << 13) })
218#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
219#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
220
221#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
222
223#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
224
225#endif /* _ASM_PGTABLE_32_H */