Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_32_H
10#define _ASM_PGTABLE_32_H
11
12#include <asm/addrspace.h>
13#include <asm/page.h>
14
15#include <linux/linkage.h>
16#include <asm/cachectl.h>
17#include <asm/fixmap.h>
18
19#include <asm-generic/pgtable-nopmd.h>
20
21/*
22 * Basically we have the same two-level (which is the logical three level
23 * Linux page table layout folded) page tables as the i386. Some day
24 * when we have proper page coloring support we can have a 1% quicker
25 * tlb refill handling mechanism, but for now it is a bit slower but
26 * works even with the cache aliasing problem the R4k and above have.
27 */
28
29/* PGDIR_SHIFT determines what a third-level page table entry can map */
30#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
31#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
32#define PGDIR_MASK (~(PGDIR_SIZE-1))
33
34/*
35 * Entries per page directory level: we use two-level, so
36 * we don't really have any PUD/PMD directory physically.
37 */
38#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
39#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
40#define PUD_ORDER aieeee_attempt_to_allocate_pud
41#define PMD_ORDER 1
42#define PTE_ORDER 0
43
44#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
45#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
46
47#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
48#define FIRST_USER_ADDRESS 0
49
50#define VMALLOC_START MAP_BASE
51
52#define PKMAP_BASE (0xfe000000UL)
53
54#ifdef CONFIG_HIGHMEM
55# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
56#else
57# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
58#endif
59
60#ifdef CONFIG_64BIT_PHYS_ADDR
61#define pte_ERROR(e) \
62 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
63#else
64#define pte_ERROR(e) \
65 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
66#endif
67#define pgd_ERROR(e) \
68 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
69
70extern void load_pgd(unsigned long pg_dir);
71
72extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
73
74/*
75 * Empty pgd/pmd entries point to the invalid_pte_table.
76 */
77static inline int pmd_none(pmd_t pmd)
78{
79 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
80}
81
82#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
83
84static inline int pmd_present(pmd_t pmd)
85{
86 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
87}
88
89static inline void pmd_clear(pmd_t *pmdp)
90{
91 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
92}
93
94#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
95#define pte_page(x) pfn_to_page(pte_pfn(x))
96#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
97static inline pte_t
98pfn_pte(unsigned long pfn, pgprot_t prot)
99{
100 pte_t pte;
101 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
102 pte.pte_low = pgprot_val(prot);
103 return pte;
104}
105
106#else
107
108#define pte_page(x) pfn_to_page(pte_pfn(x))
109
110#ifdef CONFIG_CPU_VR41XX
111#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
112#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
113#else
114#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
115#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
116#endif
117#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
118
119#define __pgd_offset(address) pgd_index(address)
120#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
121#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
122
123/* to find an entry in a kernel page-table-directory */
124#define pgd_offset_k(address) pgd_offset(&init_mm, address)
125
126#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
127
128/* to find an entry in a page-table-directory */
129#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
130
131/* Find an entry in the third-level page table.. */
132#define __pte_offset(address) \
133 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
134#define pte_offset(dir, address) \
135 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
136#define pte_offset_kernel(dir, address) \
137 ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
138
139#define pte_offset_map(dir, address) \
140 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
141#define pte_unmap(pte) ((void)(pte))
142
143#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
144
145/* Swap entries must have VALID bit cleared. */
146#define __swp_type(x) (((x).val >> 10) & 0x1f)
147#define __swp_offset(x) ((x).val >> 15)
148#define __swp_entry(type,offset) \
149 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
150
151/*
152 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
153 */
154#define PTE_FILE_MAX_BITS 28
155
156#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
157 (((_pte).pte >> 2 ) & 0x38) | \
158 (((_pte).pte >> 10) << 6 ))
159
160#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
161 (((off) & 0x38) << 2 ) | \
162 (((off) >> 6 ) << 10) | \
163 _PAGE_FILE })
164
165#else
166
167/* Swap entries must have VALID and GLOBAL bits cleared. */
168#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
169#define __swp_type(x) (((x).val >> 2) & 0x1f)
170#define __swp_offset(x) ((x).val >> 7)
171#define __swp_entry(type,offset) \
172 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
173#else
174#define __swp_type(x) (((x).val >> 8) & 0x1f)
175#define __swp_offset(x) ((x).val >> 13)
176#define __swp_entry(type,offset) \
177 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
178#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
179
180#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
181/*
182 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
183 */
184#define PTE_FILE_MAX_BITS 30
185
186#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
187#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
188
189#else
190/*
191 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
192 */
193#define PTE_FILE_MAX_BITS 28
194
195#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
196 (((_pte).pte >> 2) & 0x8) | \
197 (((_pte).pte >> 8) << 4))
198
199#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
200 (((off) & 0x8) << 2) | \
201 (((off) >> 4) << 8) | \
202 _PAGE_FILE })
203#endif
204
205#endif
206
207#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
208#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
209#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
210#else
211#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
212#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
213#endif
214
215#endif /* _ASM_PGTABLE_32_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PGTABLE_32_H
10#define _ASM_PGTABLE_32_H
11
12#include <asm/addrspace.h>
13#include <asm/page.h>
14
15#include <linux/linkage.h>
16#include <asm/cachectl.h>
17#include <asm/fixmap.h>
18
19#include <asm-generic/pgtable-nopmd.h>
20
21#ifdef CONFIG_HIGHMEM
22#include <asm/highmem.h>
23#endif
24
25/*
26 * Regarding 32-bit MIPS huge page support (and the tradeoff it entails):
27 *
28 * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size,
29 * our 2-level table layout would normally have a PGD entry cover a contiguous
30 * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t
31 * pointers, each pointing to a 4KB physical page). The problem is that 4MB,
32 * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page
33 * support, not one of the standard supported sizes (1MB,4MB,16MB,...).
34 * To correct for this, when huge pages are enabled, we halve the number of
35 * pointers a PTE page holds, making its last half go to waste. Correspondingly,
36 * we double the number of PGD pages. Overall, page table memory overhead
37 * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly.
38 *
39 * NOTE: We don't yet support huge pages if extended-addressing is enabled
40 * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic).
41 */
42
43extern int temp_tlb_entry;
44
45/*
46 * - add_temporary_entry() add a temporary TLB entry. We use TLB entries
47 * starting at the top and working down. This is for populating the
48 * TLB before trap_init() puts the TLB miss handler in place. It
49 * should be used only for entries matching the actual page tables,
50 * to prevent inconsistencies.
51 */
52extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
53 unsigned long entryhi, unsigned long pagemask);
54
55/*
56 * Basically we have the same two-level (which is the logical three level
57 * Linux page table layout folded) page tables as the i386. Some day
58 * when we have proper page coloring support we can have a 1% quicker
59 * tlb refill handling mechanism, but for now it is a bit slower but
60 * works even with the cache aliasing problem the R4k and above have.
61 */
62
63/* PGDIR_SHIFT determines what a third-level page table entry can map */
64#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
65# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1)
66#else
67# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2)
68#endif
69
70#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
71#define PGDIR_MASK (~(PGDIR_SIZE-1))
72
73/*
74 * Entries per page directory level: we use two-level, so
75 * we don't really have any PUD/PMD directory physically.
76 */
77#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
78# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1)
79#else
80# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2)
81#endif
82
83#define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0)
84#define PUD_ORDER aieeee_attempt_to_allocate_pud
85#define PMD_ORDER aieeee_attempt_to_allocate_pmd
86#define PTE_ORDER 0
87
88#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
89#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
90# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2)
91#else
92# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
93#endif
94
95#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
96#define FIRST_USER_ADDRESS 0UL
97
98#define VMALLOC_START MAP_BASE
99
100#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
101#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
102
103#ifdef CONFIG_HIGHMEM
104# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
105#else
106# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
107#endif
108
109#ifdef CONFIG_PHYS_ADDR_T_64BIT
110#define pte_ERROR(e) \
111 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
112#else
113#define pte_ERROR(e) \
114 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
115#endif
116#define pgd_ERROR(e) \
117 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
118
119extern void load_pgd(unsigned long pg_dir);
120
121extern pte_t invalid_pte_table[PTRS_PER_PTE];
122
123/*
124 * Empty pgd/pmd entries point to the invalid_pte_table.
125 */
126static inline int pmd_none(pmd_t pmd)
127{
128 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
129}
130
131static inline int pmd_bad(pmd_t pmd)
132{
133#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
134 /* pmd_huge(pmd) but inline */
135 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
136 return 0;
137#endif
138
139 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
140 return 1;
141
142 return 0;
143}
144
145static inline int pmd_present(pmd_t pmd)
146{
147 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
148}
149
150static inline void pmd_clear(pmd_t *pmdp)
151{
152 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
153}
154
155#if defined(CONFIG_XPA)
156
157#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
158static inline pte_t
159pfn_pte(unsigned long pfn, pgprot_t prot)
160{
161 pte_t pte;
162
163 pte.pte_low = (pfn >> _PAGE_PRESENT_SHIFT) |
164 (pgprot_val(prot) & ~_PFNX_MASK);
165 pte.pte_high = (pfn << _PFN_SHIFT) |
166 (pgprot_val(prot) & ~_PFN_MASK);
167 return pte;
168}
169
170#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
171
172#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
173
174static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
175{
176 pte_t pte;
177
178 pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
179 pte.pte_low = pgprot_val(prot);
180
181 return pte;
182}
183
184#else
185
186#ifdef CONFIG_CPU_VR41XX
187#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
188#define pfn_pte(pfn, prot) __pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
189#else
190#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
191#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
192#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
193#endif
194#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
195
196#define pte_page(x) pfn_to_page(pte_pfn(x))
197
198#if defined(CONFIG_CPU_R3K_TLB)
199
200/* Swap entries must have VALID bit cleared. */
201#define __swp_type(x) (((x).val >> 10) & 0x1f)
202#define __swp_offset(x) ((x).val >> 15)
203#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
204#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
205#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
206
207#else
208
209#if defined(CONFIG_XPA)
210
211/* Swap entries must have VALID and GLOBAL bits cleared. */
212#define __swp_type(x) (((x).val >> 4) & 0x1f)
213#define __swp_offset(x) ((x).val >> 9)
214#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 9) })
215#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
216#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
217
218#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
219
220/* Swap entries must have VALID and GLOBAL bits cleared. */
221#define __swp_type(x) (((x).val >> 2) & 0x1f)
222#define __swp_offset(x) ((x).val >> 7)
223#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
224#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
225#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
226
227#else
228/*
229 * Constraints:
230 * _PAGE_PRESENT at bit 0
231 * _PAGE_MODIFIED at bit 4
232 * _PAGE_GLOBAL at bit 6
233 * _PAGE_VALID at bit 7
234 */
235#define __swp_type(x) (((x).val >> 8) & 0x1f)
236#define __swp_offset(x) ((x).val >> 13)
237#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
238#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
239#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
240
241#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
242
243#endif /* defined(CONFIG_CPU_R3K_TLB) */
244
245#endif /* _ASM_PGTABLE_32_H */