Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_32_H
10#define _ASM_PGTABLE_32_H
11
12#include <asm/addrspace.h>
13#include <asm/page.h>
14
15#include <linux/linkage.h>
16#include <asm/cachectl.h>
17#include <asm/fixmap.h>
18
19#include <asm-generic/pgtable-nopmd.h>
20
21/*
22 * - add_wired_entry() add a fixed TLB entry, and move wired register
23 */
24extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
25 unsigned long entryhi, unsigned long pagemask);
26
27/*
28 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
29 * starting at the top and working down. This is for populating the
30 * TLB before trap_init() puts the TLB miss handler in place. It
31 * should be used only for entries matching the actual page tables,
32 * to prevent inconsistencies.
33 */
34extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
35 unsigned long entryhi, unsigned long pagemask);
36
37
38/* Basically we have the same two-level (which is the logical three level
39 * Linux page table layout folded) page tables as the i386. Some day
40 * when we have proper page coloring support we can have a 1% quicker
41 * tlb refill handling mechanism, but for now it is a bit slower but
42 * works even with the cache aliasing problem the R4k and above have.
43 */
44
45/* PGDIR_SHIFT determines what a third-level page table entry can map */
46#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
47#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
48#define PGDIR_MASK (~(PGDIR_SIZE-1))
49
50/*
51 * Entries per page directory level: we use two-level, so
52 * we don't really have any PUD/PMD directory physically.
53 */
54#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
55#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
56#define PUD_ORDER aieeee_attempt_to_allocate_pud
57#define PMD_ORDER 1
58#define PTE_ORDER 0
59
60#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
61#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
62
63#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
64#define FIRST_USER_ADDRESS 0
65
66#define VMALLOC_START MAP_BASE
67
68#define PKMAP_BASE (0xfe000000UL)
69
70#ifdef CONFIG_HIGHMEM
71# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
72#else
73# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
74#endif
75
76#ifdef CONFIG_64BIT_PHYS_ADDR
77#define pte_ERROR(e) \
78 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
79#else
80#define pte_ERROR(e) \
81 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
82#endif
83#define pgd_ERROR(e) \
84 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
85
86extern void load_pgd(unsigned long pg_dir);
87
88extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
89
90/*
91 * Empty pgd/pmd entries point to the invalid_pte_table.
92 */
93static inline int pmd_none(pmd_t pmd)
94{
95 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
96}
97
98#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
99
100static inline int pmd_present(pmd_t pmd)
101{
102 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
103}
104
105static inline void pmd_clear(pmd_t *pmdp)
106{
107 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
108}
109
110#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
111#define pte_page(x) pfn_to_page(pte_pfn(x))
112#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
113static inline pte_t
114pfn_pte(unsigned long pfn, pgprot_t prot)
115{
116 pte_t pte;
117 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
118 pte.pte_low = pgprot_val(prot);
119 return pte;
120}
121
122#else
123
124#define pte_page(x) pfn_to_page(pte_pfn(x))
125
126#ifdef CONFIG_CPU_VR41XX
127#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
128#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
129#else
130#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
131#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
132#endif
133#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
134
135#define __pgd_offset(address) pgd_index(address)
136#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
137#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
138
139/* to find an entry in a kernel page-table-directory */
140#define pgd_offset_k(address) pgd_offset(&init_mm, address)
141
142#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
143
144/* to find an entry in a page-table-directory */
145#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
146
147/* Find an entry in the third-level page table.. */
148#define __pte_offset(address) \
149 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
150#define pte_offset(dir, address) \
151 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
152#define pte_offset_kernel(dir, address) \
153 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
154
155#define pte_offset_map(dir, address) \
156 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
157#define pte_unmap(pte) ((void)(pte))
158
159#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
160
161/* Swap entries must have VALID bit cleared. */
162#define __swp_type(x) (((x).val >> 10) & 0x1f)
163#define __swp_offset(x) ((x).val >> 15)
164#define __swp_entry(type,offset) \
165 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
166
167/*
168 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
169 */
170#define PTE_FILE_MAX_BITS 28
171
172#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
173 (((_pte).pte >> 2 ) & 0x38) | \
174 (((_pte).pte >> 10) << 6 ))
175
176#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
177 (((off) & 0x38) << 2 ) | \
178 (((off) >> 6 ) << 10) | \
179 _PAGE_FILE })
180
181#else
182
183/* Swap entries must have VALID and GLOBAL bits cleared. */
184#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
185#define __swp_type(x) (((x).val >> 2) & 0x1f)
186#define __swp_offset(x) ((x).val >> 7)
187#define __swp_entry(type,offset) \
188 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
189#else
190#define __swp_type(x) (((x).val >> 8) & 0x1f)
191#define __swp_offset(x) ((x).val >> 13)
192#define __swp_entry(type,offset) \
193 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
194#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
195
196#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
197/*
198 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
199 */
200#define PTE_FILE_MAX_BITS 30
201
202#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
203#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
204
205#else
206/*
207 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
208 */
209#define PTE_FILE_MAX_BITS 28
210
211#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
212 (((_pte).pte >> 2) & 0x8) | \
213 (((_pte).pte >> 8) << 4))
214
215#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
216 (((off) & 0x8) << 2) | \
217 (((off) >> 4) << 8) | \
218 _PAGE_FILE })
219#endif
220
221#endif
222
223#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
224#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
225#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
226#else
227#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
228#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
229#endif
230
231#endif /* _ASM_PGTABLE_32_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_32_H
10#define _ASM_PGTABLE_32_H
11
12#include <asm/addrspace.h>
13#include <asm/page.h>
14
15#include <linux/linkage.h>
16#include <asm/cachectl.h>
17#include <asm/fixmap.h>
18
19#define __ARCH_USE_5LEVEL_HACK
20#include <asm-generic/pgtable-nopmd.h>
21
22#ifdef CONFIG_HIGHMEM
23#include <asm/highmem.h>
24#endif
25
26extern int temp_tlb_entry;
27
28/*
29 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
30 * starting at the top and working down. This is for populating the
31 * TLB before trap_init() puts the TLB miss handler in place. It
32 * should be used only for entries matching the actual page tables,
33 * to prevent inconsistencies.
34 */
35extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
36 unsigned long entryhi, unsigned long pagemask);
37
38/*
39 * Basically we have the same two-level (which is the logical three level
40 * Linux page table layout folded) page tables as the i386. Some day
41 * when we have proper page coloring support we can have a 1% quicker
42 * tlb refill handling mechanism, but for now it is a bit slower but
43 * works even with the cache aliasing problem the R4k and above have.
44 */
45
46/* PGDIR_SHIFT determines what a third-level page table entry can map */
47#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE-1))
50
51/*
52 * Entries per page directory level: we use two-level, so
53 * we don't really have any PUD/PMD directory physically.
54 */
55#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
56#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
57#define PUD_ORDER aieeee_attempt_to_allocate_pud
58#define PMD_ORDER 1
59#define PTE_ORDER 0
60
61#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
62#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
63
64#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
65#define FIRST_USER_ADDRESS 0UL
66
67#define VMALLOC_START MAP_BASE
68
69#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
70#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
71
72#ifdef CONFIG_HIGHMEM
73# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
74#else
75# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
76#endif
77
78#ifdef CONFIG_PHYS_ADDR_T_64BIT
79#define pte_ERROR(e) \
80 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
81#else
82#define pte_ERROR(e) \
83 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
84#endif
85#define pgd_ERROR(e) \
86 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
87
88extern void load_pgd(unsigned long pg_dir);
89
90extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
91
92/*
93 * Empty pgd/pmd entries point to the invalid_pte_table.
94 */
95static inline int pmd_none(pmd_t pmd)
96{
97 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
98}
99
100#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
101
102static inline int pmd_present(pmd_t pmd)
103{
104 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
105}
106
107static inline void pmd_clear(pmd_t *pmdp)
108{
109 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
110}
111
112#if defined(CONFIG_XPA)
113
114#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
115static inline pte_t
116pfn_pte(unsigned long pfn, pgprot_t prot)
117{
118 pte_t pte;
119
120 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
121 (pgprot_val(prot) & ~_PFNX_MASK);
122 pte.pte_high = (pfn << _PFN_SHIFT) |
123 (pgprot_val(prot) & ~_PFN_MASK);
124 return pte;
125}
126
127#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
128
129#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
130
131static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
132{
133 pte_t pte;
134
135 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
136 pte.pte_low = pgprot_val(prot);
137
138 return pte;
139}
140
141#else
142
143#ifdef CONFIG_CPU_VR41XX
144#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
145#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
146#else
147#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
148#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
149#endif
150#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
151
152#define pte_page(x) pfn_to_page(pte_pfn(x))
153
154#define __pgd_offset(address) pgd_index(address)
155#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
156#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
157
158/* to find an entry in a kernel page-table-directory */
159#define pgd_offset_k(address) pgd_offset(&init_mm, address)
160
161#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
162
163/* to find an entry in a page-table-directory */
164#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
165
166/* Find an entry in the third-level page table.. */
167#define __pte_offset(address) \
168 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
169#define pte_offset(dir, address) \
170 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
171#define pte_offset_kernel(dir, address) \
172 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
173
174#define pte_offset_map(dir, address) \
175 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
176#define pte_unmap(pte) ((void)(pte))
177
178#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
179
180/* Swap entries must have VALID bit cleared. */
181#define __swp_type(x) (((x).val >> 10) & 0x1f)
182#define __swp_offset(x) ((x).val >> 15)
183#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
184#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
185#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
186
187#else
188
189#if defined(CONFIG_XPA)
190
191/* Swap entries must have VALID and GLOBAL bits cleared. */
192#define __swp_type(x) (((x).val >> 4) & 0x1f)
193#define __swp_offset(x) ((x).val >> 9)
194#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
195#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
196#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
197
198#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
199
200/* Swap entries must have VALID and GLOBAL bits cleared. */
201#define __swp_type(x) (((x).val >> 2) & 0x1f)
202#define __swp_offset(x) ((x).val >> 7)
203#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
204#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
205#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
206
207#else
208/*
209 * Constraints:
210 * _PAGE_PRESENT at bit 0
211 * _PAGE_MODIFIED at bit 4
212 * _PAGE_GLOBAL at bit 6
213 * _PAGE_VALID at bit 7
214 */
215#define __swp_type(x) (((x).val >> 8) & 0x1f)
216#define __swp_offset(x) ((x).val >> 13)
217#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
218#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
219#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
220
221#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
222
223#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
224
225#endif /* _ASM_PGTABLE_32_H */