Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PGTABLE_64_H
 10#define _ASM_PGTABLE_64_H
 11
 12#include <linux/compiler.h>
 13#include <linux/linkage.h>
 14
 15#include <asm/addrspace.h>
 16#include <asm/page.h>
 17#include <asm/cachectl.h>
 18#include <asm/fixmap.h>
 19
 20#define __ARCH_USE_5LEVEL_HACK
 21#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
 22#include <asm-generic/pgtable-nopmd.h>
 23#elif !(defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_MIPS_VA_BITS_48))
 24#include <asm-generic/pgtable-nopud.h>
 25#endif
 26
 27/*
 28 * Each address space has 2 4K pages as its page directory, giving 1024
 29 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
 30 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
 31 * tables. Each page table is also a single 4K page, giving 512 (==
 32 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
 33 * invalid_pmd_table, each pmd entry is initialized to point to
 34 * invalid_pte_table, each pte is initialized to 0.
 
 
 
 
 
 35 *
 36 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
 37 * The layout is identical to userspace except it's indexed with the
 38 * fault address - VMALLOC_START.
 39 */
 40
 41
 42/* PGDIR_SHIFT determines what a third-level page table entry can map */
 43#ifdef __PAGETABLE_PMD_FOLDED
 44#define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
 45#else
 46
 47/* PMD_SHIFT determines the size of the area a second-level page table can map */
 48#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
 49#define PMD_SIZE	(1UL << PMD_SHIFT)
 50#define PMD_MASK	(~(PMD_SIZE-1))
 51
 52# ifdef __PAGETABLE_PUD_FOLDED
 53# define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
 54# endif
 55#endif
 56
 57#ifndef __PAGETABLE_PUD_FOLDED
 58#define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
 59#define PUD_SIZE	(1UL << PUD_SHIFT)
 60#define PUD_MASK	(~(PUD_SIZE-1))
 61#define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
 62#endif
 63
 64#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 65#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 66
 67/*
 68 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
 69 * permits us mapping 40 bits of virtual address space.
 70 *
 71 * We used to implement 41 bits by having an order 1 pmd level but that seemed
 72 * rather pointless.
 73 *
 74 * For 8kB page size we use a 3 level page tree which permits a total of
 75 * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
 76 * two levels would be easy to implement.
 77 *
 78 * For 16kB page size we use a 2 level page tree which permits a total of
 79 * 36 bits of virtual address space.  We could add a third level but it seems
 80 * like at the moment there's no need for this.
 81 *
 82 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
 83 * of virtual address space.
 84 */
 85#ifdef CONFIG_PAGE_SIZE_4KB
 86# ifdef CONFIG_MIPS_VA_BITS_48
 87#  define PGD_ORDER		0
 88#  define PUD_ORDER		0
 89# else
 90#  define PGD_ORDER		1
 91#  define PUD_ORDER		aieeee_attempt_to_allocate_pud
 92# endif
 93#define PMD_ORDER		0
 94#define PTE_ORDER		0
 95#endif
 96#ifdef CONFIG_PAGE_SIZE_8KB
 97#define PGD_ORDER		0
 98#define PUD_ORDER		aieeee_attempt_to_allocate_pud
 99#define PMD_ORDER		0
100#define PTE_ORDER		0
101#endif
102#ifdef CONFIG_PAGE_SIZE_16KB
103#ifdef CONFIG_MIPS_VA_BITS_48
104#define PGD_ORDER               1
105#else
106#define PGD_ORDER               0
107#endif
108#define PUD_ORDER		aieeee_attempt_to_allocate_pud
109#define PMD_ORDER		0
110#define PTE_ORDER		0
111#endif
112#ifdef CONFIG_PAGE_SIZE_32KB
113#define PGD_ORDER		0
114#define PUD_ORDER		aieeee_attempt_to_allocate_pud
115#define PMD_ORDER		0
116#define PTE_ORDER		0
117#endif
118#ifdef CONFIG_PAGE_SIZE_64KB
119#define PGD_ORDER		0
120#define PUD_ORDER		aieeee_attempt_to_allocate_pud
121#ifdef CONFIG_MIPS_VA_BITS_48
122#define PMD_ORDER		0
123#else
124#define PMD_ORDER		aieeee_attempt_to_allocate_pmd
125#endif
126#define PTE_ORDER		0
127#endif
128
129#define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
130#ifndef __PAGETABLE_PUD_FOLDED
131#define PTRS_PER_PUD	((PAGE_SIZE << PUD_ORDER) / sizeof(pud_t))
132#endif
133#ifndef __PAGETABLE_PMD_FOLDED
134#define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
135#endif
136#define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
137
138#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
 
 
 
 
139#define FIRST_USER_ADDRESS	0UL
140
141/*
142 * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
143 * the first couple of pages so NULL pointer dereferences will still
144 * reliably trap.
145 */
146#define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
147#define VMALLOC_END	\
148	(MAP_BASE + \
149	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
150	     (1UL << cpu_vmbits)) - (1UL << 32))
151
152#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
153	VMALLOC_START != CKSSEG
154/* Load modules into 32bit-compatible segment. */
155#define MODULE_START	CKSSEG
156#define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
157#endif
158
159#define pte_ERROR(e) \
160	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
161#ifndef __PAGETABLE_PMD_FOLDED
162#define pmd_ERROR(e) \
163	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
164#endif
165#ifndef __PAGETABLE_PUD_FOLDED
166#define pud_ERROR(e) \
167	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
168#endif
169#define pgd_ERROR(e) \
170	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
171
172extern pte_t invalid_pte_table[PTRS_PER_PTE];
 
173
174#ifndef __PAGETABLE_PUD_FOLDED
175/*
176 * For 4-level pagetables we defines these ourselves, for 3-level the
177 * definitions are below, for 2-level the
178 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
179 */
180typedef struct { unsigned long pud; } pud_t;
181#define pud_val(x)	((x).pud)
182#define __pud(x)	((pud_t) { (x) })
183
184extern pud_t invalid_pud_table[PTRS_PER_PUD];
185
186/*
187 * Empty pgd entries point to the invalid_pud_table.
188 */
189static inline int pgd_none(pgd_t pgd)
190{
191	return pgd_val(pgd) == (unsigned long)invalid_pud_table;
192}
193
194static inline int pgd_bad(pgd_t pgd)
195{
196	if (unlikely(pgd_val(pgd) & ~PAGE_MASK))
197		return 1;
198
199	return 0;
200}
201
202static inline int pgd_present(pgd_t pgd)
203{
204	return pgd_val(pgd) != (unsigned long)invalid_pud_table;
205}
206
207static inline void pgd_clear(pgd_t *pgdp)
208{
209	pgd_val(*pgdp) = (unsigned long)invalid_pud_table;
210}
211
212#define pud_index(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
213
214static inline unsigned long pgd_page_vaddr(pgd_t pgd)
215{
216	return pgd_val(pgd);
217}
218
219static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
220{
221	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
222}
223
224static inline void set_pgd(pgd_t *pgd, pgd_t pgdval)
225{
226	*pgd = pgdval;
227}
228
229#endif
230
231#ifndef __PAGETABLE_PMD_FOLDED
232/*
233 * For 3-level pagetables we defines these ourselves, for 2-level the
234 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
235 */
236typedef struct { unsigned long pmd; } pmd_t;
237#define pmd_val(x)	((x).pmd)
238#define __pmd(x)	((pmd_t) { (x) } )
239
240
241extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
 
242#endif
243
244/*
245 * Empty pgd/pmd entries point to the invalid_pte_table.
246 */
247static inline int pmd_none(pmd_t pmd)
248{
249	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
250}
251
252static inline int pmd_bad(pmd_t pmd)
253{
254#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
255	/* pmd_huge(pmd) but inline */
256	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
257		return 0;
258#endif
259
260	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
261		return 1;
262
263	return 0;
264}
265
266static inline int pmd_present(pmd_t pmd)
267{
268	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
269}
270
271static inline void pmd_clear(pmd_t *pmdp)
272{
273	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
274}
275#ifndef __PAGETABLE_PMD_FOLDED
276
277/*
278 * Empty pud entries point to the invalid_pmd_table.
279 */
280static inline int pud_none(pud_t pud)
281{
282	return pud_val(pud) == (unsigned long) invalid_pmd_table;
283}
284
285static inline int pud_bad(pud_t pud)
286{
287	return pud_val(pud) & ~PAGE_MASK;
288}
289
290static inline int pud_present(pud_t pud)
291{
292	return pud_val(pud) != (unsigned long) invalid_pmd_table;
293}
294
295static inline void pud_clear(pud_t *pudp)
296{
297	pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
298}
299#endif
300
301#define pte_page(x)		pfn_to_page(pte_pfn(x))
302
303#ifdef CONFIG_CPU_VR41XX
304#define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
305#define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
306#else
307#define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))
308#define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
309#define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
310#endif
311
312#define __pgd_offset(address)	pgd_index(address)
313#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
314#define __pmd_offset(address)	pmd_index(address)
315
316/* to find an entry in a kernel page-table-directory */
317#define pgd_offset_k(address) pgd_offset(&init_mm, address)
318
319#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
320#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
321
322/* to find an entry in a page-table-directory */
323#define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
324
325#ifndef __PAGETABLE_PMD_FOLDED
326static inline unsigned long pud_page_vaddr(pud_t pud)
327{
328	return pud_val(pud);
329}
330#define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
331#define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
332
333/* Find an entry in the second-level page table.. */
334static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
335{
336	return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
337}
338#endif
339
340/* Find an entry in the third-level page table.. */
341#define __pte_offset(address)						\
342	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
343#define pte_offset(dir, address)					\
344	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
345#define pte_offset_kernel(dir, address)					\
346	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
347#define pte_offset_map(dir, address)					\
348	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
349#define pte_unmap(pte) ((void)(pte))
350
351/*
352 * Initialize a new pgd / pmd table with invalid pointers.
353 */
354extern void pgd_init(unsigned long page);
355extern void pud_init(unsigned long page, unsigned long pagetable);
356extern void pmd_init(unsigned long page, unsigned long pagetable);
357
358/*
359 * Non-present pages:  high 40 bits are offset, next 8 bits type,
360 * low 16 bits zero.
361 */
362static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
363{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
364
365#define __swp_type(x)		(((x).val >> 16) & 0xff)
366#define __swp_offset(x)		((x).val >> 24)
367#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
368#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
369#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
 
 
 
 
 
 
 
 
 
370
371#endif /* _ASM_PGTABLE_64_H */
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PGTABLE_64_H
 10#define _ASM_PGTABLE_64_H
 11
 
 12#include <linux/linkage.h>
 13
 14#include <asm/addrspace.h>
 15#include <asm/page.h>
 16#include <asm/cachectl.h>
 17#include <asm/fixmap.h>
 18
 19#ifdef CONFIG_PAGE_SIZE_64KB
 
 20#include <asm-generic/pgtable-nopmd.h>
 21#else
 22#include <asm-generic/pgtable-nopud.h>
 23#endif
 24
 25/*
 26 * Each address space has 2 4K pages as its page directory, giving 1024
 27 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
 28 * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
 29 * tables. Each page table is also a single 4K page, giving 512 (==
 30 * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
 31 * invalid_pmd_table, each pmd entry is initialized to point to
 32 * invalid_pte_table, each pte is initialized to 0. When memory is low,
 33 * and a pmd table or a page table allocation fails, empty_bad_pmd_table
 34 * and empty_bad_page_table is returned back to higher layer code, so
 35 * that the failure is recognized later on. Linux does not seem to
 36 * handle these failures very well though. The empty_bad_page_table has
 37 * invalid pte entries in it, to force page faults.
 38 *
 39 * Kernel mappings: kernel mappings are held in the swapper_pg_table.
 40 * The layout is identical to userspace except it's indexed with the
 41 * fault address - VMALLOC_START.
 42 */
 43
 44
 45/* PGDIR_SHIFT determines what a third-level page table entry can map */
 46#ifdef __PAGETABLE_PMD_FOLDED
 47#define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
 48#else
 49
 50/* PMD_SHIFT determines the size of the area a second-level page table can map */
 51#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
 52#define PMD_SIZE	(1UL << PMD_SHIFT)
 53#define PMD_MASK	(~(PMD_SIZE-1))
 54
 
 
 
 
 55
 56#define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
 
 
 
 
 57#endif
 
 58#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 59#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 60
 61/*
 62 * For 4kB page size we use a 3 level page tree and an 8kB pud, which
 63 * permits us mapping 40 bits of virtual address space.
 64 *
 65 * We used to implement 41 bits by having an order 1 pmd level but that seemed
 66 * rather pointless.
 67 *
 68 * For 8kB page size we use a 3 level page tree which permits a total of
 69 * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
 70 * two levels would be easy to implement.
 71 *
 72 * For 16kB page size we use a 2 level page tree which permits a total of
 73 * 36 bits of virtual address space.  We could add a third level but it seems
 74 * like at the moment there's no need for this.
 75 *
 76 * For 64kB page size we use a 2 level page table tree for a total of 42 bits
 77 * of virtual address space.
 78 */
 79#ifdef CONFIG_PAGE_SIZE_4KB
 80#define PGD_ORDER		1
 81#define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
 
 
 
 
 82#define PMD_ORDER		0
 83#define PTE_ORDER		0
 84#endif
 85#ifdef CONFIG_PAGE_SIZE_8KB
 86#define PGD_ORDER		0
 87#define PUD_ORDER		aieeee_attempt_to_allocate_pud
 88#define PMD_ORDER		0
 89#define PTE_ORDER		0
 90#endif
 91#ifdef CONFIG_PAGE_SIZE_16KB
 92#define PGD_ORDER		0
 
 
 
 
 93#define PUD_ORDER		aieeee_attempt_to_allocate_pud
 94#define PMD_ORDER		0
 95#define PTE_ORDER		0
 96#endif
 97#ifdef CONFIG_PAGE_SIZE_32KB
 98#define PGD_ORDER		0
 99#define PUD_ORDER		aieeee_attempt_to_allocate_pud
100#define PMD_ORDER		0
101#define PTE_ORDER		0
102#endif
103#ifdef CONFIG_PAGE_SIZE_64KB
104#define PGD_ORDER		0
105#define PUD_ORDER		aieeee_attempt_to_allocate_pud
 
 
 
106#define PMD_ORDER		aieeee_attempt_to_allocate_pmd
 
107#define PTE_ORDER		0
108#endif
109
110#define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
 
 
 
111#ifndef __PAGETABLE_PMD_FOLDED
112#define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
113#endif
114#define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
115
116#if PGDIR_SIZE >= TASK_SIZE64
117#define USER_PTRS_PER_PGD       (1)
118#else
119#define USER_PTRS_PER_PGD	(TASK_SIZE64 / PGDIR_SIZE)
120#endif
121#define FIRST_USER_ADDRESS	0UL
122
123/*
124 * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
125 * the first couple of pages so NULL pointer dereferences will still
126 * reliably trap.
127 */
128#define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
129#define VMALLOC_END	\
130	(MAP_BASE + \
131	 min(PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
132	     (1UL << cpu_vmbits)) - (1UL << 32))
133
134#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
135	VMALLOC_START != CKSSEG
136/* Load modules into 32bit-compatible segment. */
137#define MODULE_START	CKSSEG
138#define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
139#endif
140
141#define pte_ERROR(e) \
142	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
143#ifndef __PAGETABLE_PMD_FOLDED
144#define pmd_ERROR(e) \
145	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
146#endif
 
 
 
 
147#define pgd_ERROR(e) \
148	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
149
150extern pte_t invalid_pte_table[PTRS_PER_PTE];
151extern pte_t empty_bad_page_table[PTRS_PER_PTE];
152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
154#ifndef __PAGETABLE_PMD_FOLDED
155/*
156 * For 3-level pagetables we defines these ourselves, for 2-level the
157 * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
158 */
159typedef struct { unsigned long pmd; } pmd_t;
160#define pmd_val(x)	((x).pmd)
161#define __pmd(x)	((pmd_t) { (x) } )
162
163
164extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
165extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];
166#endif
167
168/*
169 * Empty pgd/pmd entries point to the invalid_pte_table.
170 */
171static inline int pmd_none(pmd_t pmd)
172{
173	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
174}
175
176#define pmd_bad(pmd)		(pmd_val(pmd) & ~PAGE_MASK)
 
 
 
 
 
 
 
 
 
 
 
 
177
178static inline int pmd_present(pmd_t pmd)
179{
180	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
181}
182
183static inline void pmd_clear(pmd_t *pmdp)
184{
185	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
186}
187#ifndef __PAGETABLE_PMD_FOLDED
188
189/*
190 * Empty pud entries point to the invalid_pmd_table.
191 */
192static inline int pud_none(pud_t pud)
193{
194	return pud_val(pud) == (unsigned long) invalid_pmd_table;
195}
196
197static inline int pud_bad(pud_t pud)
198{
199	return pud_val(pud) & ~PAGE_MASK;
200}
201
202static inline int pud_present(pud_t pud)
203{
204	return pud_val(pud) != (unsigned long) invalid_pmd_table;
205}
206
207static inline void pud_clear(pud_t *pudp)
208{
209	pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
210}
211#endif
212
213#define pte_page(x)		pfn_to_page(pte_pfn(x))
214
215#ifdef CONFIG_CPU_VR41XX
216#define pte_pfn(x)		((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
217#define pfn_pte(pfn, prot)	__pte(((pfn) << (PAGE_SHIFT + 2)) | pgprot_val(prot))
218#else
219#define pte_pfn(x)		((unsigned long)((x).pte >> _PFN_SHIFT))
220#define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
 
221#endif
222
223#define __pgd_offset(address)	pgd_index(address)
224#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
225#define __pmd_offset(address)	pmd_index(address)
226
227/* to find an entry in a kernel page-table-directory */
228#define pgd_offset_k(address) pgd_offset(&init_mm, address)
229
230#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
231#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
232
233/* to find an entry in a page-table-directory */
234#define pgd_offset(mm, addr)	((mm)->pgd + pgd_index(addr))
235
236#ifndef __PAGETABLE_PMD_FOLDED
237static inline unsigned long pud_page_vaddr(pud_t pud)
238{
239	return pud_val(pud);
240}
241#define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
242#define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
243
244/* Find an entry in the second-level page table.. */
245static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
246{
247	return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
248}
249#endif
250
251/* Find an entry in the third-level page table.. */
252#define __pte_offset(address)						\
253	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
254#define pte_offset(dir, address)					\
255	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
256#define pte_offset_kernel(dir, address)					\
257	((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
258#define pte_offset_map(dir, address)					\
259	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
260#define pte_unmap(pte) ((void)(pte))
261
262/*
263 * Initialize a new pgd / pmd table with invalid pointers.
264 */
265extern void pgd_init(unsigned long page);
 
266extern void pmd_init(unsigned long page, unsigned long pagetable);
267
268/*
269 * Non-present pages:  high 24 bits are offset, next 8 bits type,
270 * low 32 bits zero.
271 */
272static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
273{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
274
275#define __swp_type(x)		(((x).val >> 32) & 0xff)
276#define __swp_offset(x)		((x).val >> 40)
277#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
278#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
279#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
280
281/*
282 * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
283 * make things easier, and only use the upper 56 bits for the page offset...
284 */
285#define PTE_FILE_MAX_BITS	56
286
287#define pte_to_pgoff(_pte)	((_pte).pte >> 8)
288#define pgoff_to_pte(off)	((pte_t) { ((off) << 8) | _PAGE_FILE })
289
290#endif /* _ASM_PGTABLE_64_H */