Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef _ALPHA_PGTABLE_H
  2#define _ALPHA_PGTABLE_H
  3
  4#include <asm-generic/4level-fixup.h>
  5
  6/*
  7 * This file contains the functions and defines necessary to modify and use
  8 * the Alpha page table tree.
  9 *
 10 * This hopefully works with any standard Alpha page-size, as defined
 11 * in <asm/page.h> (currently 8192).
 12 */
 13#include <linux/mmzone.h>
 14
 15#include <asm/page.h>
 16#include <asm/processor.h>	/* For TASK_SIZE */
 17#include <asm/machvec.h>
 
 18
 19struct mm_struct;
 20struct vm_area_struct;
 21
 22/* Certain architectures need to do special things when PTEs
 23 * within a page table are directly modified.  Thus, the following
 24 * hook is made available.
 25 */
 26#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 27#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 28
 29/* PMD_SHIFT determines the size of the area a second-level page table can map */
 30#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
 31#define PMD_SIZE	(1UL << PMD_SHIFT)
 32#define PMD_MASK	(~(PMD_SIZE-1))
 33
 34/* PGDIR_SHIFT determines what a third-level page table entry can map */
 35#define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
 36#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 37#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 38
 39/*
 40 * Entries per page directory level:  the Alpha is three-level, with
 41 * all levels having a one-page page table.
 42 */
 43#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
 44#define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
 45#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
 46#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
 47#define FIRST_USER_ADDRESS	0
 48
 49/* Number of pointers that fit on a page:  this will go away. */
 50#define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
 51
 52#ifdef CONFIG_ALPHA_LARGE_VMALLOC
 53#define VMALLOC_START		0xfffffe0000000000
 54#else
 55#define VMALLOC_START		(-2*PGDIR_SIZE)
 56#endif
 57#define VMALLOC_END		(-PGDIR_SIZE)
 58
 59/*
 60 * OSF/1 PAL-code-imposed page table bits
 61 */
 62#define _PAGE_VALID	0x0001
 63#define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
 64#define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
 65#define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
 66#define _PAGE_ASM	0x0010
 67#define _PAGE_KRE	0x0100	/* xxx - see below on the "accessed" bit */
 68#define _PAGE_URE	0x0200	/* xxx */
 69#define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
 70#define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
 71
 72/* .. and these are ours ... */
 73#define _PAGE_DIRTY	0x20000
 74#define _PAGE_ACCESSED	0x40000
 75#define _PAGE_FILE	0x80000	/* set:pagecache, unset:swap */
 
 
 76
 77/*
 78 * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
 79 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 80 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 81 * the KRE/URE bits to watch for it. That way we don't need to overload the
 82 * KWE/UWE bits with both handling dirty and accessed.
 83 *
 84 * Note that the kernel uses the accessed bit just to check whether to page
 85 * out a page or not, so it doesn't have to be exact anyway.
 86 */
 87
 88#define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 89#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 90
 91#define _PFN_MASK	0xFFFFFFFF00000000UL
 92
 93#define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 94#define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 95
 96/*
 97 * All the normal masks have the "page accessed" bits on, as any time they are used,
 98 * the page is accessed. They are cleared only by the page-out routines
 99 */
100#define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
101#define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
102#define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
103#define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
104#define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
105
106#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
107
108#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
109#define _PAGE_S(x) _PAGE_NORMAL(x)
110
111/*
112 * The hardware can handle write-only mappings, but as the Alpha
113 * architecture does byte-wide writes with a read-modify-write
114 * sequence, it's not practical to have write-without-read privs.
115 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
116 * arch/alpha/mm/fault.c)
117 */
118	/* xwr */
119#define __P000	_PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
120#define __P001	_PAGE_P(_PAGE_FOE | _PAGE_FOW)
121#define __P010	_PAGE_P(_PAGE_FOE)
122#define __P011	_PAGE_P(_PAGE_FOE)
123#define __P100	_PAGE_P(_PAGE_FOW | _PAGE_FOR)
124#define __P101	_PAGE_P(_PAGE_FOW)
125#define __P110	_PAGE_P(0)
126#define __P111	_PAGE_P(0)
127
128#define __S000	_PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
129#define __S001	_PAGE_S(_PAGE_FOE | _PAGE_FOW)
130#define __S010	_PAGE_S(_PAGE_FOE)
131#define __S011	_PAGE_S(_PAGE_FOE)
132#define __S100	_PAGE_S(_PAGE_FOW | _PAGE_FOR)
133#define __S101	_PAGE_S(_PAGE_FOW)
134#define __S110	_PAGE_S(0)
135#define __S111	_PAGE_S(0)
136
137/*
138 * pgprot_noncached() is only for infiniband pci support, and a real
139 * implementation for RAM would be more complicated.
140 */
141#define pgprot_noncached(prot)	(prot)
142
143/*
144 * BAD_PAGETABLE is used when we need a bogus page-table, while
145 * BAD_PAGE is used for a bogus page.
146 *
147 * ZERO_PAGE is a global shared page that is always zero:  used
148 * for zero-mapped memory areas etc..
149 */
150extern pte_t __bad_page(void);
151extern pmd_t * __bad_pagetable(void);
152
153extern unsigned long __zero_page(void);
154
155#define BAD_PAGETABLE	__bad_pagetable()
156#define BAD_PAGE	__bad_page()
157#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
158
159/* number of bits that fit into a memory pointer */
160#define BITS_PER_PTR			(8*sizeof(unsigned long))
161
162/* to align the pointer to a pointer address */
163#define PTR_MASK			(~(sizeof(void*)-1))
164
165/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
166#define SIZEOF_PTR_LOG2			3
167
168/* to find an entry in a page-table */
169#define PAGE_PTR(address)		\
170  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
171
172/*
173 * On certain platforms whose physical address space can overlap KSEG,
174 * namely EV6 and above, we must re-twiddle the physaddr to restore the
175 * correct high-order bits.
176 *
177 * This is extremely confusing until you realize that this is actually
178 * just working around a userspace bug.  The X server was intending to
179 * provide the physical address but instead provided the KSEG address.
180 * Or tried to, except it's not representable.
181 * 
182 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
183 * a safe thing to do.  Come the first core logic that does put something
184 * in this area -- memory or whathaveyou -- then this hack will have
185 * to go away.  So be prepared!
186 */
187
188#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
189#error "EV6-only feature in a generic kernel"
190#endif
191#if defined(CONFIG_ALPHA_GENERIC) || \
192    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
193#define KSEG_PFN	(0xc0000000000UL >> PAGE_SHIFT)
194#define PHYS_TWIDDLE(pfn) \
195  ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
196  ? ((pfn) ^= KSEG_PFN) : (pfn))
197#else
198#define PHYS_TWIDDLE(pfn) (pfn)
199#endif
200
201/*
202 * Conversion functions:  convert a page and protection to a page entry,
203 * and a page entry and page directory to the page they refer to.
204 */
205#ifndef CONFIG_DISCONTIGMEM
206#define page_to_pa(page)	(((page) - mem_map) << PAGE_SHIFT)
 
207
208#define pte_pfn(pte)	(pte_val(pte) >> 32)
209#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
210#define mk_pte(page, pgprot)						\
211({									\
212	pte_t pte;							\
213									\
214	pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);	\
215	pte;								\
216})
217#endif
218
219extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
220{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
221
222extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
223{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
224
225extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
226{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
227
228extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
229{ pgd_val(*pgdp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
230
231
232extern inline unsigned long
233pmd_page_vaddr(pmd_t pmd)
234{
235	return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
236}
237
238#ifndef CONFIG_DISCONTIGMEM
239#define pmd_page(pmd)	(mem_map + ((pmd_val(pmd) & _PFN_MASK) >> 32))
240#define pgd_page(pgd)	(mem_map + ((pgd_val(pgd) & _PFN_MASK) >> 32))
241#endif
242
243extern inline unsigned long pgd_page_vaddr(pgd_t pgd)
244{ return PAGE_OFFSET + ((pgd_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)); }
 
 
245
246extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
247extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
248extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
249{
250	pte_val(*ptep) = 0;
251}
252
253extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
254extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
255extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
256extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
257
258extern inline int pgd_none(pgd_t pgd)		{ return !pgd_val(pgd); }
259extern inline int pgd_bad(pgd_t pgd)		{ return (pgd_val(pgd) & ~_PFN_MASK) != _PAGE_TABLE; }
260extern inline int pgd_present(pgd_t pgd)	{ return pgd_val(pgd) & _PAGE_VALID; }
261extern inline void pgd_clear(pgd_t * pgdp)	{ pgd_val(*pgdp) = 0; }
262
263/*
264 * The following only work if pte_present() is true.
265 * Undefined behaviour if not..
266 */
267extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
268extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
269extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
270extern inline int pte_file(pte_t pte)		{ return pte_val(pte) & _PAGE_FILE; }
271extern inline int pte_special(pte_t pte)	{ return 0; }
272
273extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
274extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
275extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
276extern inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) &= ~_PAGE_FOW; return pte; }
277extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
278extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
279extern inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
280
281#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
282
283/* to find an entry in a kernel page-table-directory */
284#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
285
286/* to find an entry in a page-table-directory. */
287#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
288#define pgd_offset(mm, address)	((mm)->pgd+pgd_index(address))
289
290/*
291 * The smp_read_barrier_depends() in the following functions are required to
292 * order the load of *dir (the pointer in the top level page table) with any
293 * subsequent load of the returned pmd_t *ret (ret is data dependent on *dir).
294 *
295 * If this ordering is not enforced, the CPU might load an older value of
296 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
297 * more details.
298 *
299 * Note that we never change the mm->pgd pointer after the task is running, so
300 * pgd_offset does not require such a barrier.
301 */
302
303/* Find an entry in the second-level page table.. */
304extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
305{
306	pmd_t *ret = (pmd_t *) pgd_page_vaddr(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
307	smp_read_barrier_depends(); /* see above */
308	return ret;
309}
 
310
311/* Find an entry in the third-level page table.. */
312extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
313{
314	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
315		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
316	smp_read_barrier_depends(); /* see above */
317	return ret;
318}
319
320#define pte_offset_map(dir,addr)	pte_offset_kernel((dir),(addr))
321#define pte_unmap(pte)			do { } while (0)
322
323extern pgd_t swapper_pg_dir[1024];
324
325/*
326 * The Alpha doesn't have any external MMU info:  the kernel page
327 * tables contain all the necessary information.
328 */
329extern inline void update_mmu_cache(struct vm_area_struct * vma,
330	unsigned long address, pte_t *ptep)
331{
332}
333
 
 
 
 
 
 
334/*
335 * Non-present pages:  high 24 bits are offset, next 8 bits type,
336 * low 32 bits zero.
 
 
 
 
 
 
 
 
 
 
 
 
337 */
338extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
339{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
340
341#define __swp_type(x)		(((x).val >> 32) & 0xff)
342#define __swp_offset(x)		((x).val >> 40)
343#define __swp_entry(type, off)	((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
344#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
345#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
346
347#define pte_to_pgoff(pte)	(pte_val(pte) >> 32)
348#define pgoff_to_pte(off)	((pte_t) { ((off) << 32) | _PAGE_FILE })
349
350#define PTE_FILE_MAX_BITS	32
351
352#ifndef CONFIG_DISCONTIGMEM
353#define kern_addr_valid(addr)	(1)
354#endif
 
 
355
356#define io_remap_pfn_range(vma, start, pfn, size, prot)	\
357		remap_pfn_range(vma, start, pfn, size, prot)
 
 
 
358
359#define pte_ERROR(e) \
360	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
361#define pmd_ERROR(e) \
362	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
363#define pgd_ERROR(e) \
364	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
365
366extern void paging_init(void);
367
368#include <asm-generic/pgtable.h>
369
370/*
371 * No page table caches to initialise
372 */
373#define pgtable_cache_init()	do { } while (0)
374
375/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
376#define HAVE_ARCH_UNMAPPED_AREA
377
378#endif /* _ALPHA_PGTABLE_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ALPHA_PGTABLE_H
  3#define _ALPHA_PGTABLE_H
  4
  5#include <asm-generic/pgtable-nopud.h>
  6
  7/*
  8 * This file contains the functions and defines necessary to modify and use
  9 * the Alpha page table tree.
 10 *
 11 * This hopefully works with any standard Alpha page-size, as defined
 12 * in <asm/page.h> (currently 8192).
 13 */
 14#include <linux/mmzone.h>
 15
 16#include <asm/page.h>
 17#include <asm/processor.h>	/* For TASK_SIZE */
 18#include <asm/machvec.h>
 19#include <asm/setup.h>
 20
 21struct mm_struct;
 22struct vm_area_struct;
 23
 24/* Certain architectures need to do special things when PTEs
 25 * within a page table are directly modified.  Thus, the following
 26 * hook is made available.
 27 */
 28#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
 
 29
 30/* PMD_SHIFT determines the size of the area a second-level page table can map */
 31#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
 32#define PMD_SIZE	(1UL << PMD_SHIFT)
 33#define PMD_MASK	(~(PMD_SIZE-1))
 34
 35/* PGDIR_SHIFT determines what a third-level page table entry can map */
 36#define PGDIR_SHIFT	(PAGE_SHIFT + 2*(PAGE_SHIFT-3))
 37#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
 38#define PGDIR_MASK	(~(PGDIR_SIZE-1))
 39
 40/*
 41 * Entries per page directory level:  the Alpha is three-level, with
 42 * all levels having a one-page page table.
 43 */
 44#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
 45#define PTRS_PER_PMD	(1UL << (PAGE_SHIFT-3))
 46#define PTRS_PER_PGD	(1UL << (PAGE_SHIFT-3))
 47#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
 
 48
 49/* Number of pointers that fit on a page:  this will go away. */
 50#define PTRS_PER_PAGE	(1UL << (PAGE_SHIFT-3))
 51
 52#ifdef CONFIG_ALPHA_LARGE_VMALLOC
 53#define VMALLOC_START		0xfffffe0000000000
 54#else
 55#define VMALLOC_START		(-2*PGDIR_SIZE)
 56#endif
 57#define VMALLOC_END		(-PGDIR_SIZE)
 58
 59/*
 60 * OSF/1 PAL-code-imposed page table bits
 61 */
 62#define _PAGE_VALID	0x0001
 63#define _PAGE_FOR	0x0002	/* used for page protection (fault on read) */
 64#define _PAGE_FOW	0x0004	/* used for page protection (fault on write) */
 65#define _PAGE_FOE	0x0008	/* used for page protection (fault on exec) */
 66#define _PAGE_ASM	0x0010
 67#define _PAGE_KRE	0x0100	/* xxx - see below on the "accessed" bit */
 68#define _PAGE_URE	0x0200	/* xxx */
 69#define _PAGE_KWE	0x1000	/* used to do the dirty bit in software */
 70#define _PAGE_UWE	0x2000	/* used to do the dirty bit in software */
 71
 72/* .. and these are ours ... */
 73#define _PAGE_DIRTY	0x20000
 74#define _PAGE_ACCESSED	0x40000
 75
 76/* We borrow bit 39 to store the exclusive marker in swap PTEs. */
 77#define _PAGE_SWP_EXCLUSIVE	0x8000000000UL
 78
 79/*
 80 * NOTE! The "accessed" bit isn't necessarily exact:  it can be kept exactly
 81 * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it.
 82 * Under Linux/AXP, the "accessed" bit just means "read", and I'll just use
 83 * the KRE/URE bits to watch for it. That way we don't need to overload the
 84 * KWE/UWE bits with both handling dirty and accessed.
 85 *
 86 * Note that the kernel uses the accessed bit just to check whether to page
 87 * out a page or not, so it doesn't have to be exact anyway.
 88 */
 89
 90#define __DIRTY_BITS	(_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE)
 91#define __ACCESS_BITS	(_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE)
 92
 93#define _PFN_MASK	0xFFFFFFFF00000000UL
 94
 95#define _PAGE_TABLE	(_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS)
 96#define _PAGE_CHG_MASK	(_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS)
 97
 98/*
 99 * All the normal masks have the "page accessed" bits on, as any time they are used,
100 * the page is accessed. They are cleared only by the page-out routines
101 */
102#define PAGE_NONE	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE)
103#define PAGE_SHARED	__pgprot(_PAGE_VALID | __ACCESS_BITS)
104#define PAGE_COPY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
105#define PAGE_READONLY	__pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
106#define PAGE_KERNEL	__pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
107
108#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
109
110#define _PAGE_P(x) _PAGE_NORMAL((x) | (((x) & _PAGE_FOW)?0:_PAGE_FOW))
111#define _PAGE_S(x) _PAGE_NORMAL(x)
112
113/*
114 * The hardware can handle write-only mappings, but as the Alpha
115 * architecture does byte-wide writes with a read-modify-write
116 * sequence, it's not practical to have write-without-read privs.
117 * Thus the "-w- -> rw-" and "-wx -> rwx" mapping here (and in
118 * arch/alpha/mm/fault.c)
119 */
120	/* xwr */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
122/*
123 * pgprot_noncached() is only for infiniband pci support, and a real
124 * implementation for RAM would be more complicated.
125 */
126#define pgprot_noncached(prot)	(prot)
127
128/*
129 * BAD_PAGETABLE is used when we need a bogus page-table, while
130 * BAD_PAGE is used for a bogus page.
131 *
132 * ZERO_PAGE is a global shared page that is always zero:  used
133 * for zero-mapped memory areas etc..
134 */
135extern pte_t __bad_page(void);
136extern pmd_t * __bad_pagetable(void);
137
138extern unsigned long __zero_page(void);
139
140#define BAD_PAGETABLE	__bad_pagetable()
141#define BAD_PAGE	__bad_page()
142#define ZERO_PAGE(vaddr)	(virt_to_page(ZERO_PGE))
143
144/* number of bits that fit into a memory pointer */
145#define BITS_PER_PTR			(8*sizeof(unsigned long))
146
147/* to align the pointer to a pointer address */
148#define PTR_MASK			(~(sizeof(void*)-1))
149
150/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
151#define SIZEOF_PTR_LOG2			3
152
153/* to find an entry in a page-table */
154#define PAGE_PTR(address)		\
155  ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
156
157/*
158 * On certain platforms whose physical address space can overlap KSEG,
159 * namely EV6 and above, we must re-twiddle the physaddr to restore the
160 * correct high-order bits.
161 *
162 * This is extremely confusing until you realize that this is actually
163 * just working around a userspace bug.  The X server was intending to
164 * provide the physical address but instead provided the KSEG address.
165 * Or tried to, except it's not representable.
166 * 
167 * On Tsunami there's nothing meaningful at 0x40000000000, so this is
168 * a safe thing to do.  Come the first core logic that does put something
169 * in this area -- memory or whathaveyou -- then this hack will have
170 * to go away.  So be prepared!
171 */
172
173#if defined(CONFIG_ALPHA_GENERIC) && defined(USE_48_BIT_KSEG)
174#error "EV6-only feature in a generic kernel"
175#endif
176#if defined(CONFIG_ALPHA_GENERIC) || \
177    (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
178#define KSEG_PFN	(0xc0000000000UL >> PAGE_SHIFT)
179#define PHYS_TWIDDLE(pfn) \
180  ((((pfn) & KSEG_PFN) == (0x40000000000UL >> PAGE_SHIFT)) \
181  ? ((pfn) ^= KSEG_PFN) : (pfn))
182#else
183#define PHYS_TWIDDLE(pfn) (pfn)
184#endif
185
186/*
187 * Conversion functions:  convert a page and protection to a page entry,
188 * and a page entry and page directory to the page they refer to.
189 */
190#define page_to_pa(page)	(page_to_pfn(page) << PAGE_SHIFT)
191#define PFN_PTE_SHIFT		32
192#define pte_pfn(pte)		(pte_val(pte) >> PFN_PTE_SHIFT)
193
 
194#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
195#define mk_pte(page, pgprot)						\
196({									\
197	pte_t pte;							\
198									\
199	pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot);	\
200	pte;								\
201})
 
202
203extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
204{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
205
206extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
207{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
208
209extern inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
210{ pmd_val(*pmdp) = _PAGE_TABLE | ((((unsigned long) ptep) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
211
212extern inline void pud_set(pud_t * pudp, pmd_t * pmdp)
213{ pud_val(*pudp) = _PAGE_TABLE | ((((unsigned long) pmdp) - PAGE_OFFSET) << (32-PAGE_SHIFT)); }
214
215
216extern inline unsigned long
217pmd_page_vaddr(pmd_t pmd)
218{
219	return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
220}
221
222#define pmd_pfn(pmd)	(pmd_val(pmd) >> 32)
223#define pmd_page(pmd)	(pfn_to_page(pmd_val(pmd) >> 32))
224#define pud_page(pud)	(pfn_to_page(pud_val(pud) >> 32))
 
225
226extern inline pmd_t *pud_pgtable(pud_t pgd)
227{
228	return (pmd_t *)(PAGE_OFFSET + ((pud_val(pgd) & _PFN_MASK) >> (32-PAGE_SHIFT)));
229}
230
231extern inline int pte_none(pte_t pte)		{ return !pte_val(pte); }
232extern inline int pte_present(pte_t pte)	{ return pte_val(pte) & _PAGE_VALID; }
233extern inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
234{
235	pte_val(*ptep) = 0;
236}
237
238extern inline int pmd_none(pmd_t pmd)		{ return !pmd_val(pmd); }
239extern inline int pmd_bad(pmd_t pmd)		{ return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; }
240extern inline int pmd_present(pmd_t pmd)	{ return pmd_val(pmd) & _PAGE_VALID; }
241extern inline void pmd_clear(pmd_t * pmdp)	{ pmd_val(*pmdp) = 0; }
242
243extern inline int pud_none(pud_t pud)		{ return !pud_val(pud); }
244extern inline int pud_bad(pud_t pud)		{ return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; }
245extern inline int pud_present(pud_t pud)	{ return pud_val(pud) & _PAGE_VALID; }
246extern inline void pud_clear(pud_t * pudp)	{ pud_val(*pudp) = 0; }
247
248/*
249 * The following only work if pte_present() is true.
250 * Undefined behaviour if not..
251 */
252extern inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_FOW); }
253extern inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
254extern inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
 
 
255
256extern inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_FOW; return pte; }
257extern inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
258extern inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~(__ACCESS_BITS); return pte; }
259extern inline pte_t pte_mkwrite_novma(pte_t pte){ pte_val(pte) &= ~_PAGE_FOW; return pte; }
260extern inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= __DIRTY_BITS; return pte; }
261extern inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= __ACCESS_BITS; return pte; }
 
 
 
 
 
 
 
 
 
 
262
263/*
264 * The smp_rmb() in the following functions are required to order the load of
265 * *dir (the pointer in the top level page table) with any subsequent load of
266 * the returned pmd_t *ret (ret is data dependent on *dir).
267 *
268 * If this ordering is not enforced, the CPU might load an older value of
269 * *ret, which may be uninitialized data. See mm/memory.c:__pte_alloc for
270 * more details.
271 *
272 * Note that we never change the mm->pgd pointer after the task is running, so
273 * pgd_offset does not require such a barrier.
274 */
275
276/* Find an entry in the second-level page table.. */
277extern inline pmd_t * pmd_offset(pud_t * dir, unsigned long address)
278{
279	pmd_t *ret = pud_pgtable(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PAGE - 1));
280	smp_rmb(); /* see above */
281	return ret;
282}
283#define pmd_offset pmd_offset
284
285/* Find an entry in the third-level page table.. */
286extern inline pte_t * pte_offset_kernel(pmd_t * dir, unsigned long address)
287{
288	pte_t *ret = (pte_t *) pmd_page_vaddr(*dir)
289		+ ((address >> PAGE_SHIFT) & (PTRS_PER_PAGE - 1));
290	smp_rmb(); /* see above */
291	return ret;
292}
293#define pte_offset_kernel pte_offset_kernel
 
 
294
295extern pgd_t swapper_pg_dir[1024];
296
297/*
298 * The Alpha doesn't have any external MMU info:  the kernel page
299 * tables contain all the necessary information.
300 */
301extern inline void update_mmu_cache(struct vm_area_struct * vma,
302	unsigned long address, pte_t *ptep)
303{
304}
305
306static inline void update_mmu_cache_range(struct vm_fault *vmf,
307		struct vm_area_struct *vma, unsigned long address,
308		pte_t *ptep, unsigned int nr)
309{
310}
311
312/*
313 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
314 * are !pte_none() && !pte_present().
315 *
316 * Format of swap PTEs:
317 *
318 *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
319 *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
320 *   <------------------- offset ------------------> E <--- type -->
321 *
322 *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
323 *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
324 *   <--------------------------- zeroes -------------------------->
325 *
326 *   E is the exclusive marker that is not stored in swap entries.
327 */
328extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
329{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 32) | (offset << 40); return pte; }
330
331#define __swp_type(x)		(((x).val >> 32) & 0x7f)
332#define __swp_offset(x)		((x).val >> 40)
333#define __swp_entry(type, off)	((swp_entry_t) { pte_val(mk_swap_pte((type), (off))) })
334#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
335#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
336
337static inline int pte_swp_exclusive(pte_t pte)
338{
339	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
340}
341
342static inline pte_t pte_swp_mkexclusive(pte_t pte)
343{
344	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
345	return pte;
346}
347
348static inline pte_t pte_swp_clear_exclusive(pte_t pte)
349{
350	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
351	return pte;
352}
353
354#define pte_ERROR(e) \
355	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
356#define pmd_ERROR(e) \
357	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
358#define pgd_ERROR(e) \
359	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
360
361extern void paging_init(void);
 
 
 
 
 
 
 
362
363/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT.  */
364#define HAVE_ARCH_UNMAPPED_AREA
365
366#endif /* _ALPHA_PGTABLE_H */