Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PARISC_PGTABLE_H
3#define _PARISC_PGTABLE_H
4
5#include <asm/page.h>
6
7#if CONFIG_PGTABLE_LEVELS == 3
8#include <asm-generic/pgtable-nopud.h>
9#elif CONFIG_PGTABLE_LEVELS == 2
10#include <asm-generic/pgtable-nopmd.h>
11#endif
12
13#include <asm/fixmap.h>
14
15#ifndef __ASSEMBLY__
16/*
17 * we simulate an x86-style page table for the linux mm code
18 */
19
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/mm_types.h>
23#include <asm/processor.h>
24#include <asm/cache.h>
25
26/*
27 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
28 * memory. For the return value to be meaningful, ADDR must be >=
29 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
30 * require a hash-, or multi-level tree-lookup or something of that
31 * sort) but it guarantees to return TRUE only if accessing the page
32 * at that address does not cause an error. Note that there may be
33 * addresses for which kern_addr_valid() returns FALSE even though an
34 * access would not cause an error (e.g., this is typically true for
35 * memory mapped I/O regions.
36 *
37 * XXX Need to implement this for parisc.
38 */
39#define kern_addr_valid(addr) (1)
40
41/* This is for the serialization of PxTLB broadcasts. At least on the N class
42 * systems, only one PxTLB inter processor broadcast can be active at any one
43 * time on the Merced bus. */
44extern spinlock_t pa_tlb_flush_lock;
45#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
46extern int pa_serialize_tlb_flushes;
47#else
48#define pa_serialize_tlb_flushes (0)
49#endif
50
51#define purge_tlb_start(flags) do { \
52 if (pa_serialize_tlb_flushes) \
53 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
54 else \
55 local_irq_save(flags); \
56 } while (0)
57#define purge_tlb_end(flags) do { \
58 if (pa_serialize_tlb_flushes) \
59 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
60 else \
61 local_irq_restore(flags); \
62 } while (0)
63
64/* Purge data and instruction TLB entries. The TLB purge instructions
65 * are slow on SMP machines since the purge must be broadcast to all CPUs.
66 */
67
68static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
69{
70 unsigned long flags;
71
72 purge_tlb_start(flags);
73 mtsp(mm->context, 1);
74 pdtlb(addr);
75 pitlb(addr);
76 purge_tlb_end(flags);
77}
78
79/* Certain architectures need to do special things when PTEs
80 * within a page table are directly modified. Thus, the following
81 * hook is made available.
82 */
83#define set_pte(pteptr, pteval) \
84 do { \
85 *(pteptr) = (pteval); \
86 barrier(); \
87 } while(0)
88
89#define set_pte_at(mm, addr, pteptr, pteval) \
90 do { \
91 *(pteptr) = (pteval); \
92 purge_tlb_entries(mm, addr); \
93 } while (0)
94
95#endif /* !__ASSEMBLY__ */
96
97#define pte_ERROR(e) \
98 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
99#if CONFIG_PGTABLE_LEVELS == 3
100#define pmd_ERROR(e) \
101 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
102#endif
103#define pgd_ERROR(e) \
104 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
105
106/* This is the size of the initially mapped kernel memory */
107#if defined(CONFIG_64BIT)
108#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
109#else
110#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
111#endif
112#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
113
114#if CONFIG_PGTABLE_LEVELS == 3
115#define PMD_ORDER 1
116#define PGD_ORDER 0
117#else
118#define PGD_ORDER 1
119#endif
120
121/* Definitions for 3rd level (we use PLD here for Page Lower directory
122 * because PTE_SHIFT is used lower down to mean shift that has to be
123 * done to get usable bits out of the PTE) */
124#define PLD_SHIFT PAGE_SHIFT
125#define PLD_SIZE PAGE_SIZE
126#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
127#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
128
129/* Definitions for 2nd level */
130#if CONFIG_PGTABLE_LEVELS == 3
131#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
132#define PMD_SIZE (1UL << PMD_SHIFT)
133#define PMD_MASK (~(PMD_SIZE-1))
134#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
135#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
136#else
137#define BITS_PER_PMD 0
138#endif
139
140/* Definitions for 1st level */
141#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
142#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
143#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
144#else
145#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
146#endif
147#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
148#define PGDIR_MASK (~(PGDIR_SIZE-1))
149#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
150#define USER_PTRS_PER_PGD PTRS_PER_PGD
151
152#ifdef CONFIG_64BIT
153#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
154#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
155#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
156#else
157#define MAX_ADDRBITS (BITS_PER_LONG)
158#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
159#define SPACEID_SHIFT 0
160#endif
161
162/* This calculates the number of initial pages we need for the initial
163 * page tables */
164#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
165# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
166#else
167# define PT_INITIAL (1) /* all initial PTEs fit into one page */
168#endif
169
170/*
171 * pgd entries used up by user/kernel:
172 */
173
174/* NB: The tlb miss handlers make certain assumptions about the order */
175/* of the following bits, so be careful (One example, bits 25-31 */
176/* are moved together in one instruction). */
177
178#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
179#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
180#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
181#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
182#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
183#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
184#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
185#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
186#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
187#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
188#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
189#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
190
191/* N.B. The bits are defined in terms of a 32 bit word above, so the */
192/* following macro is ok for both 32 and 64 bit. */
193
194#define xlate_pabit(x) (31 - x)
195
196/* this defines the shift to the usable bits in the PTE it is set so
197 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
198 * to zero */
199#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
200
201/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
202#define PFN_PTE_SHIFT 12
203
204#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
205#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
206#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
207#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
208#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
209#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
210#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
211#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
212#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
213#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
214#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
215#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
216#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
217
218#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
219#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
220#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
221#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
222#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
223#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
224
225/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
226 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
227 * for a few meta-information bits, so we shift the address to be
228 * able to effectively address 40/42/44-bits of physical address space
229 * depending on 4k/16k/64k PAGE_SIZE */
230#define _PxD_PRESENT_BIT 31
231#define _PxD_VALID_BIT 30
232
233#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
234#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
235#define PxD_FLAG_MASK (0xf)
236#define PxD_FLAG_SHIFT (4)
237#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
238
239#ifndef __ASSEMBLY__
240
241#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER)
242#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
243/* Others seem to make this executable, I don't know if that's correct
244 or not. The stack is mapped this way though so this is necessary
245 in the short term - dhd@linuxcare.com, 2000-08-08 */
246#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
247#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
248#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
249#define PAGE_COPY PAGE_EXECREAD
250#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
251#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
252#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
253#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
254#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
255#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
256#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
257
258
259/*
260 * We could have an execute only page using "gateway - promote to priv
261 * level 3", but that is kind of silly. So, the way things are defined
262 * now, we must always have read permission for pages with execute
263 * permission. For the fun of it we'll go ahead and support write only
264 * pages.
265 */
266
267 /*xwr*/
268#define __P000 PAGE_NONE
269#define __P001 PAGE_READONLY
270#define __P010 __P000 /* copy on write */
271#define __P011 __P001 /* copy on write */
272#define __P100 PAGE_EXECREAD
273#define __P101 PAGE_EXECREAD
274#define __P110 __P100 /* copy on write */
275#define __P111 __P101 /* copy on write */
276
277#define __S000 PAGE_NONE
278#define __S001 PAGE_READONLY
279#define __S010 PAGE_WRITEONLY
280#define __S011 PAGE_SHARED
281#define __S100 PAGE_EXECREAD
282#define __S101 PAGE_EXECREAD
283#define __S110 PAGE_RWX
284#define __S111 PAGE_RWX
285
286
287extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
288
289/* initial page tables for 0-8MB for kernel */
290
291extern pte_t pg0[];
292
293/* zero page used for uninitialized stuff */
294
295extern unsigned long *empty_zero_page;
296
297/*
298 * ZERO_PAGE is a global shared page that is always zero: used
299 * for zero-mapped memory areas etc..
300 */
301
302#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
303
304#define pte_none(x) (pte_val(x) == 0)
305#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
306#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
307
308#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
309#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
310#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
311#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
312#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
313#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
314
315#define pmd_none(x) (!pmd_val(x))
316#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
317#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
318static inline void pmd_clear(pmd_t *pmd) {
319 set_pmd(pmd, __pmd(0));
320}
321
322
323
324#if CONFIG_PGTABLE_LEVELS == 3
325#define pud_pgtable(pud) ((pmd_t *) __va(pud_address(pud)))
326#define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
327
328/* For 64 bit we have three level tables */
329
330#define pud_none(x) (!pud_val(x))
331#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
332#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
333static inline void pud_clear(pud_t *pud) {
334 set_pud(pud, __pud(0));
335}
336#endif
337
338/*
339 * The following only work if pte_present() is true.
340 * Undefined behaviour if not..
341 */
342static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
343static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
344static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
345
346static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
347static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
348static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
349static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
350static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
351static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
352
353/*
354 * Huge pte definitions.
355 */
356#ifdef CONFIG_HUGETLB_PAGE
357#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
358#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
359 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
360#else
361#define pte_huge(pte) (0)
362#define pte_mkhuge(pte) (pte)
363#endif
364
365
366/*
367 * Conversion functions: convert a page and protection to a page entry,
368 * and a page entry and page directory to the page they refer to.
369 */
370#define __mk_pte(addr,pgprot) \
371({ \
372 pte_t __pte; \
373 \
374 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
375 \
376 __pte; \
377})
378
379#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
380
381static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
382{
383 pte_t pte;
384 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
385 return pte;
386}
387
388static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
389{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
390
391/* Permanent address of a page. On parisc we don't have highmem. */
392
393#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
394
395#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
396
397static inline unsigned long pmd_page_vaddr(pmd_t pmd)
398{
399 return ((unsigned long) __va(pmd_address(pmd)));
400}
401
402#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
403#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
404
405/* Find an entry in the second-level page table.. */
406
407extern void paging_init (void);
408
409/* Used for deferring calls to flush_dcache_page() */
410
411#define PG_dcache_dirty PG_arch_1
412
413extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
414
415/* Encode and de-code a swap entry */
416
417#define __swp_type(x) ((x).val & 0x1f)
418#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
419 (((x).val >> 8) & ~0x7) )
420#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
421 ((offset & 0x7) << 6) | \
422 ((offset & ~0x7) << 8) })
423#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
424#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
425
426static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
427{
428 pte_t pte;
429
430 if (!pte_young(*ptep))
431 return 0;
432
433 pte = *ptep;
434 if (!pte_young(pte)) {
435 return 0;
436 }
437 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
438 return 1;
439}
440
441struct mm_struct;
442static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
443{
444 pte_t old_pte;
445
446 old_pte = *ptep;
447 set_pte_at(mm, addr, ptep, __pte(0));
448
449 return old_pte;
450}
451
452static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
453{
454 set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
455}
456
457#define pte_same(A,B) (pte_val(A) == pte_val(B))
458
459struct seq_file;
460extern void arch_report_meminfo(struct seq_file *m);
461
462#endif /* !__ASSEMBLY__ */
463
464
465/* TLB page size encoding - see table 3-1 in parisc20.pdf */
466#define _PAGE_SIZE_ENCODING_4K 0
467#define _PAGE_SIZE_ENCODING_16K 1
468#define _PAGE_SIZE_ENCODING_64K 2
469#define _PAGE_SIZE_ENCODING_256K 3
470#define _PAGE_SIZE_ENCODING_1M 4
471#define _PAGE_SIZE_ENCODING_4M 5
472#define _PAGE_SIZE_ENCODING_16M 6
473#define _PAGE_SIZE_ENCODING_64M 7
474
475#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
476# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
477#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
478# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
479#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
480# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
481#endif
482
483
484#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
485
486/* We provide our own get_unmapped_area to provide cache coherency */
487
488#define HAVE_ARCH_UNMAPPED_AREA
489#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
490
491#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
492#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
493#define __HAVE_ARCH_PTEP_SET_WRPROTECT
494#define __HAVE_ARCH_PTE_SAME
495
496#endif /* _PARISC_PGTABLE_H */
1#ifndef _PARISC_PGTABLE_H
2#define _PARISC_PGTABLE_H
3
4#include <asm-generic/4level-fixup.h>
5
6#include <asm/fixmap.h>
7
8#ifndef __ASSEMBLY__
9/*
10 * we simulate an x86-style page table for the linux mm code
11 */
12
13#include <linux/bitops.h>
14#include <linux/spinlock.h>
15#include <linux/mm_types.h>
16#include <asm/processor.h>
17#include <asm/cache.h>
18
19extern spinlock_t pa_dbit_lock;
20
21/*
22 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
23 * memory. For the return value to be meaningful, ADDR must be >=
24 * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
25 * require a hash-, or multi-level tree-lookup or something of that
26 * sort) but it guarantees to return TRUE only if accessing the page
27 * at that address does not cause an error. Note that there may be
28 * addresses for which kern_addr_valid() returns FALSE even though an
29 * access would not cause an error (e.g., this is typically true for
30 * memory mapped I/O regions.
31 *
32 * XXX Need to implement this for parisc.
33 */
34#define kern_addr_valid(addr) (1)
35
36/* Certain architectures need to do special things when PTEs
37 * within a page table are directly modified. Thus, the following
38 * hook is made available.
39 */
40#define set_pte(pteptr, pteval) \
41 do{ \
42 *(pteptr) = (pteval); \
43 } while(0)
44
45extern void purge_tlb_entries(struct mm_struct *, unsigned long);
46
47#define set_pte_at(mm, addr, ptep, pteval) \
48 do { \
49 unsigned long flags; \
50 spin_lock_irqsave(&pa_dbit_lock, flags); \
51 set_pte(ptep, pteval); \
52 purge_tlb_entries(mm, addr); \
53 spin_unlock_irqrestore(&pa_dbit_lock, flags); \
54 } while (0)
55
56#endif /* !__ASSEMBLY__ */
57
58#include <asm/page.h>
59
60#define pte_ERROR(e) \
61 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
62#define pmd_ERROR(e) \
63 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
64#define pgd_ERROR(e) \
65 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
66
67/* This is the size of the initially mapped kernel memory */
68#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
69#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
70
71#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
72#define PT_NLEVELS 3
73#define PGD_ORDER 1 /* Number of pages per pgd */
74#define PMD_ORDER 1 /* Number of pages per pmd */
75#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */
76#else
77#define PT_NLEVELS 2
78#define PGD_ORDER 1 /* Number of pages per pgd */
79#define PGD_ALLOC_ORDER PGD_ORDER
80#endif
81
82/* Definitions for 3rd level (we use PLD here for Page Lower directory
83 * because PTE_SHIFT is used lower down to mean shift that has to be
84 * done to get usable bits out of the PTE) */
85#define PLD_SHIFT PAGE_SHIFT
86#define PLD_SIZE PAGE_SIZE
87#define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY)
88#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
89
90/* Definitions for 2nd level */
91#define pgtable_cache_init() do { } while (0)
92
93#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
94#define PMD_SIZE (1UL << PMD_SHIFT)
95#define PMD_MASK (~(PMD_SIZE-1))
96#if PT_NLEVELS == 3
97#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
98#else
99#define BITS_PER_PMD 0
100#endif
101#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
102
103/* Definitions for 1st level */
104#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
105#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
106#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
107#else
108#define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY)
109#endif
110#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
111#define PGDIR_MASK (~(PGDIR_SIZE-1))
112#define PTRS_PER_PGD (1UL << BITS_PER_PGD)
113#define USER_PTRS_PER_PGD PTRS_PER_PGD
114
115#ifdef CONFIG_64BIT
116#define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD)
117#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
118#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
119#else
120#define MAX_ADDRBITS (BITS_PER_LONG)
121#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
122#define SPACEID_SHIFT 0
123#endif
124
125/* This calculates the number of initial pages we need for the initial
126 * page tables */
127#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT)
128# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT))
129#else
130# define PT_INITIAL (1) /* all initial PTEs fit into one page */
131#endif
132
133/*
134 * pgd entries used up by user/kernel:
135 */
136
137#define FIRST_USER_ADDRESS 0
138
139/* NB: The tlb miss handlers make certain assumptions about the order */
140/* of the following bits, so be careful (One example, bits 25-31 */
141/* are moved together in one instruction). */
142
143#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
144#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
145#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
146#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
147#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
148#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
149#define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */
150#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
151#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
152#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
153#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
154/* bit 21 was formerly the FLUSH bit but is now unused */
155#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
156
157/* N.B. The bits are defined in terms of a 32 bit word above, so the */
158/* following macro is ok for both 32 and 64 bit. */
159
160#define xlate_pabit(x) (31 - x)
161
162/* this defines the shift to the usable bits in the PTE it is set so
163 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set
164 * to zero */
165#define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT)
166
167/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */
168#define PFN_PTE_SHIFT 12
169
170
171/* this is how many bits may be used by the file functions */
172#define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT)
173
174#define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT)
175#define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE })
176
177#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
178#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
179#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
180#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
181#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
182#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
183#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
184#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
185#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
186#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
187#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
188#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
189#define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT))
190
191#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
192#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
193#define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
194#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC)
195#define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE)
196#define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE)
197
198/* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds
199 * are page-aligned, we don't care about the PAGE_OFFSET bits, except
200 * for a few meta-information bits, so we shift the address to be
201 * able to effectively address 40/42/44-bits of physical address space
202 * depending on 4k/16k/64k PAGE_SIZE */
203#define _PxD_PRESENT_BIT 31
204#define _PxD_ATTACHED_BIT 30
205#define _PxD_VALID_BIT 29
206
207#define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT))
208#define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT))
209#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
210#define PxD_FLAG_MASK (0xf)
211#define PxD_FLAG_SHIFT (4)
212#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
213
214#ifndef __ASSEMBLY__
215
216#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
217#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
218/* Others seem to make this executable, I don't know if that's correct
219 or not. The stack is mapped this way though so this is necessary
220 in the short term - dhd@linuxcare.com, 2000-08-08 */
221#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
222#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
223#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
224#define PAGE_COPY PAGE_EXECREAD
225#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
226#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
227#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
228#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
229#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
230#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
231#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
232
233
234/*
235 * We could have an execute only page using "gateway - promote to priv
236 * level 3", but that is kind of silly. So, the way things are defined
237 * now, we must always have read permission for pages with execute
238 * permission. For the fun of it we'll go ahead and support write only
239 * pages.
240 */
241
242 /*xwr*/
243#define __P000 PAGE_NONE
244#define __P001 PAGE_READONLY
245#define __P010 __P000 /* copy on write */
246#define __P011 __P001 /* copy on write */
247#define __P100 PAGE_EXECREAD
248#define __P101 PAGE_EXECREAD
249#define __P110 __P100 /* copy on write */
250#define __P111 __P101 /* copy on write */
251
252#define __S000 PAGE_NONE
253#define __S001 PAGE_READONLY
254#define __S010 PAGE_WRITEONLY
255#define __S011 PAGE_SHARED
256#define __S100 PAGE_EXECREAD
257#define __S101 PAGE_EXECREAD
258#define __S110 PAGE_RWX
259#define __S111 PAGE_RWX
260
261
262extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
263
264/* initial page tables for 0-8MB for kernel */
265
266extern pte_t pg0[];
267
268/* zero page used for uninitialized stuff */
269
270extern unsigned long *empty_zero_page;
271
272/*
273 * ZERO_PAGE is a global shared page that is always zero: used
274 * for zero-mapped memory areas etc..
275 */
276
277#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
278
279#define pte_none(x) (pte_val(x) == 0)
280#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
281#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
282
283#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
284#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
285#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
286#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
287
288#if PT_NLEVELS == 3
289/* The first entry of the permanent pmd is not there if it contains
290 * the gateway marker */
291#define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED)
292#else
293#define pmd_none(x) (!pmd_val(x))
294#endif
295#define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID))
296#define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT)
297static inline void pmd_clear(pmd_t *pmd) {
298#if PT_NLEVELS == 3
299 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
300 /* This is the entry pointing to the permanent pmd
301 * attached to the pgd; cannot clear it */
302 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
303 else
304#endif
305 __pmd_val_set(*pmd, 0);
306}
307
308
309
310#if PT_NLEVELS == 3
311#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
312#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
313
314/* For 64 bit we have three level tables */
315
316#define pgd_none(x) (!pgd_val(x))
317#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
318#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
319static inline void pgd_clear(pgd_t *pgd) {
320#if PT_NLEVELS == 3
321 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
322 /* This is the permanent pmd attached to the pgd; cannot
323 * free it */
324 return;
325#endif
326 __pgd_val_set(*pgd, 0);
327}
328#else
329/*
330 * The "pgd_xxx()" functions here are trivial for a folded two-level
331 * setup: the pgd is never bad, and a pmd always exists (as it's folded
332 * into the pgd entry)
333 */
334static inline int pgd_none(pgd_t pgd) { return 0; }
335static inline int pgd_bad(pgd_t pgd) { return 0; }
336static inline int pgd_present(pgd_t pgd) { return 1; }
337static inline void pgd_clear(pgd_t * pgdp) { }
338#endif
339
340/*
341 * The following only work if pte_present() is true.
342 * Undefined behaviour if not..
343 */
344static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
345static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
346static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
347static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
348static inline int pte_special(pte_t pte) { return 0; }
349
350static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
351static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
352static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; }
353static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; }
354static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
355static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; }
356static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
357
358/*
359 * Conversion functions: convert a page and protection to a page entry,
360 * and a page entry and page directory to the page they refer to.
361 */
362#define __mk_pte(addr,pgprot) \
363({ \
364 pte_t __pte; \
365 \
366 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \
367 \
368 __pte; \
369})
370
371#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
372
373static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
374{
375 pte_t pte;
376 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
377 return pte;
378}
379
380static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
381{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
382
383/* Permanent address of a page. On parisc we don't have highmem. */
384
385#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT)
386
387#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
388
389#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd)))
390
391#define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd)))
392#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
393
394#define pgd_index(address) ((address) >> PGDIR_SHIFT)
395
396/* to find an entry in a page-table-directory */
397#define pgd_offset(mm, address) \
398((mm)->pgd + ((address) >> PGDIR_SHIFT))
399
400/* to find an entry in a kernel page-table-directory */
401#define pgd_offset_k(address) pgd_offset(&init_mm, address)
402
403/* Find an entry in the second-level page table.. */
404
405#if PT_NLEVELS == 3
406#define pmd_offset(dir,address) \
407((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
408#else
409#define pmd_offset(dir,addr) ((pmd_t *) dir)
410#endif
411
412/* Find an entry in the third-level page table.. */
413#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
414#define pte_offset_kernel(pmd, address) \
415 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address))
416#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
417#define pte_unmap(pte) do { } while (0)
418
419#define pte_unmap(pte) do { } while (0)
420#define pte_unmap_nested(pte) do { } while (0)
421
422extern void paging_init (void);
423
424/* Used for deferring calls to flush_dcache_page() */
425
426#define PG_dcache_dirty PG_arch_1
427
428extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
429
430/* Encode and de-code a swap entry */
431
432#define __swp_type(x) ((x).val & 0x1f)
433#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
434 (((x).val >> 8) & ~0x7) )
435#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
436 ((offset & 0x7) << 6) | \
437 ((offset & ~0x7) << 8) })
438#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
439#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
440
441static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
442{
443 pte_t pte;
444 unsigned long flags;
445
446 if (!pte_young(*ptep))
447 return 0;
448
449 spin_lock_irqsave(&pa_dbit_lock, flags);
450 pte = *ptep;
451 if (!pte_young(pte)) {
452 spin_unlock_irqrestore(&pa_dbit_lock, flags);
453 return 0;
454 }
455 set_pte(ptep, pte_mkold(pte));
456 purge_tlb_entries(vma->vm_mm, addr);
457 spin_unlock_irqrestore(&pa_dbit_lock, flags);
458 return 1;
459}
460
461struct mm_struct;
462static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
463{
464 pte_t old_pte;
465 unsigned long flags;
466
467 spin_lock_irqsave(&pa_dbit_lock, flags);
468 old_pte = *ptep;
469 pte_clear(mm,addr,ptep);
470 purge_tlb_entries(mm, addr);
471 spin_unlock_irqrestore(&pa_dbit_lock, flags);
472
473 return old_pte;
474}
475
476static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
477{
478 unsigned long flags;
479 spin_lock_irqsave(&pa_dbit_lock, flags);
480 set_pte(ptep, pte_wrprotect(*ptep));
481 purge_tlb_entries(mm, addr);
482 spin_unlock_irqrestore(&pa_dbit_lock, flags);
483}
484
485#define pte_same(A,B) (pte_val(A) == pte_val(B))
486
487#endif /* !__ASSEMBLY__ */
488
489
490/* TLB page size encoding - see table 3-1 in parisc20.pdf */
491#define _PAGE_SIZE_ENCODING_4K 0
492#define _PAGE_SIZE_ENCODING_16K 1
493#define _PAGE_SIZE_ENCODING_64K 2
494#define _PAGE_SIZE_ENCODING_256K 3
495#define _PAGE_SIZE_ENCODING_1M 4
496#define _PAGE_SIZE_ENCODING_4M 5
497#define _PAGE_SIZE_ENCODING_16M 6
498#define _PAGE_SIZE_ENCODING_64M 7
499
500#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
501# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K
502#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB)
503# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K
504#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB)
505# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K
506#endif
507
508
509#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE)
510
511/* We provide our own get_unmapped_area to provide cache coherency */
512
513#define HAVE_ARCH_UNMAPPED_AREA
514#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
515
516#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
517#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
518#define __HAVE_ARCH_PTEP_SET_WRPROTECT
519#define __HAVE_ARCH_PTE_SAME
520#include <asm-generic/pgtable.h>
521
522#endif /* _PARISC_PGTABLE_H */