Linux Audio

Check our new training course

Loading...
v3.1
  1#ifndef __ASM_SH_PGTABLE_64_H
  2#define __ASM_SH_PGTABLE_64_H
  3
  4/*
  5 * include/asm-sh/pgtable_64.h
  6 *
  7 * This file contains the functions and defines necessary to modify and use
  8 * the SuperH page table tree.
  9 *
 10 * Copyright (C) 2000, 2001  Paolo Alberelli
 11 * Copyright (C) 2003, 2004  Paul Mundt
 12 * Copyright (C) 2003, 2004  Richard Curnow
 13 *
 14 * This file is subject to the terms and conditions of the GNU General Public
 15 * License.  See the file "COPYING" in the main directory of this archive
 16 * for more details.
 17 */
 18#include <linux/threads.h>
 19#include <asm/processor.h>
 20#include <asm/page.h>
 21
 22/*
 23 * Error outputs.
 24 */
 25#define pte_ERROR(e) \
 26	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
 27#define pgd_ERROR(e) \
 28	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 29
 30/*
 31 * Table setting routines. Used within arch/mm only.
 32 */
 33#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 34
 35static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
 36{
 37	unsigned long long x = ((unsigned long long) pteval.pte_low);
 38	unsigned long long *xp = (unsigned long long *) pteptr;
 39	/*
 40	 * Sign-extend based on NPHYS.
 41	 */
 42	*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
 43}
 44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 45
 46/*
 47 * PGD defines. Top level.
 48 */
 49
 50/* To find an entry in a generic PGD. */
 51#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 52#define __pgd_offset(address) pgd_index(address)
 53#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 54
 55/* To find an entry in a kernel PGD. */
 56#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 57
 58#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 59#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 60
 61/*
 62 * PMD level access routines. Same notes as above.
 63 */
 64#define _PMD_EMPTY		0x0
 65/* Either the PMD is empty or present, it's not paged out */
 66#define pmd_present(pmd_entry)	(pmd_val(pmd_entry) & _PAGE_PRESENT)
 67#define pmd_clear(pmd_entry_p)	(set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
 68#define pmd_none(pmd_entry)	(pmd_val((pmd_entry)) == _PMD_EMPTY)
 69#define pmd_bad(pmd_entry)	((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 70
 71#define pmd_page_vaddr(pmd_entry) \
 72	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
 73
 74#define pmd_page(pmd) \
 75	(virt_to_page(pmd_val(pmd)))
 76
 77/* PMD to PTE dereferencing */
 78#define pte_index(address) \
 79		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 80
 81#define __pte_offset(address)	pte_index(address)
 82
 83#define pte_offset_kernel(dir, addr) \
 84		((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
 85
 86#define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr)
 87#define pte_unmap(pte)		do { } while (0)
 88
 89#ifndef __ASSEMBLY__
 90#define IOBASE_VADDR	0xff000000
 91#define IOBASE_END	0xffffffff
 92
 93/*
 94 * PTEL coherent flags.
 95 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
 96 */
 97/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
 98   positions, to avoid expensive bit shuffling on every refill.  The remaining
 99   bits are used for s/w purposes and masked out on each refill.
100
101   Note, the PTE slots are used to hold data of type swp_entry_t when a page is
102   swapped out.  Only the _PAGE_PRESENT flag is significant when the page is
103   swapped out, and it must be placed so that it doesn't overlap either the
104   type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type
105   at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This
106   scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit
107   [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
108   into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */
109#define _PAGE_WT	0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */
110#define _PAGE_DEVICE	0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
111#define _PAGE_CACHABLE	0x002  /* CB1: uncachable/cachable */
112#define _PAGE_PRESENT	0x004  /* software: page referenced */
113#define _PAGE_FILE	0x004  /* software: only when !present */
114#define _PAGE_SIZE0	0x008  /* SZ0-bit : size of page */
115#define _PAGE_SIZE1	0x010  /* SZ1-bit : size of page */
116#define _PAGE_SHARED	0x020  /* software: reflects PTEH's SH */
117#define _PAGE_READ	0x040  /* PR0-bit : read access allowed */
118#define _PAGE_EXECUTE	0x080  /* PR1-bit : execute access allowed */
119#define _PAGE_WRITE	0x100  /* PR2-bit : write access allowed */
120#define _PAGE_USER	0x200  /* PR3-bit : user space access allowed */
121#define _PAGE_DIRTY	0x400  /* software: page accessed in write */
122#define _PAGE_ACCESSED	0x800  /* software: page referenced */
123
124/* Wrapper for extended mode pgprot twiddling */
125#define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
126
127/*
128 * We can use the sign-extended bits in the PTEL to get 32 bits of
129 * software flags. This works for now because no implementations uses
130 * anything above the PPN field.
131 */
132#define _PAGE_WIRED	_PAGE_EXT(0x001) /* software: wire the tlb entry */
133#define _PAGE_SPECIAL	_PAGE_EXT(0x002)
134
135#define _PAGE_CLEAR_FLAGS	(_PAGE_PRESENT | _PAGE_FILE | _PAGE_SHARED | \
136				 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
137
138/* Mask which drops software flags */
139#define _PAGE_FLAGS_HARDWARE_MASK	(NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
140
141/*
142 * HugeTLB support
143 */
144#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
145#define _PAGE_SZHUGE	(_PAGE_SIZE0)
146#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
147#define _PAGE_SZHUGE	(_PAGE_SIZE1)
148#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
149#define _PAGE_SZHUGE	(_PAGE_SIZE0 | _PAGE_SIZE1)
150#endif
151
152/*
153 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
154 * to make pte_mkhuge() happy.
155 */
156#ifndef _PAGE_SZHUGE
157# define _PAGE_SZHUGE	(0)
158#endif
159
160/*
161 * Default flags for a Kernel page.
162 * This is fundametally also SHARED because the main use of this define
163 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
164 * contextless.
165 *
166 * _PAGE_EXECUTE is required for modules
167 *
168 */
169#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
170			 _PAGE_EXECUTE | \
171			 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
172			 _PAGE_SHARED)
173
174/* Default flags for a User page */
175#define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER)
176
177#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
178			 _PAGE_SPECIAL)
179
180/*
181 * We have full permissions (Read/Write/Execute/Shared).
182 */
183#define _PAGE_COMMON	(_PAGE_PRESENT | _PAGE_USER | \
184			 _PAGE_CACHABLE | _PAGE_ACCESSED)
185
186#define PAGE_NONE	__pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
187#define PAGE_SHARED	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
188				 _PAGE_SHARED)
189#define PAGE_EXECREAD	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
190
191/*
192 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
193 * protection mode for the stack.
194 */
195#define PAGE_COPY	PAGE_EXECREAD
196
197#define PAGE_READONLY	__pgprot(_PAGE_COMMON | _PAGE_READ)
198#define PAGE_WRITEONLY	__pgprot(_PAGE_COMMON | _PAGE_WRITE)
199#define PAGE_RWX	__pgprot(_PAGE_COMMON | _PAGE_READ | \
200				 _PAGE_WRITE | _PAGE_EXECUTE)
201#define PAGE_KERNEL	__pgprot(_KERNPG_TABLE)
202
203#define PAGE_KERNEL_NOCACHE \
204			__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
205				 _PAGE_EXECUTE | _PAGE_ACCESSED | \
206				 _PAGE_DIRTY | _PAGE_SHARED)
207
208/* Make it a device mapping for maximum safety (e.g. for mapping device
209   registers into user-space via /dev/map).  */
210#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
211#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
212
213/*
214 * PTE level access routines.
215 *
216 * Note1:
217 * It's the tree walk leaf. This is physical address to be stored.
218 *
219 * Note 2:
220 * Regarding the choice of _PTE_EMPTY:
221
222   We must choose a bit pattern that cannot be valid, whether or not the page
223   is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped
224   out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
225   left for us to select.  If we force bit[7]==0 when swapped out, we could use
226   the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if
227   we force bit[7]==1 when swapped out, we can use all zeroes to indicate
228   empty.  This is convenient, because the page tables get cleared to zero
229   when they are allocated.
230
231 */
232#define _PTE_EMPTY	0x0
233#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
234#define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
235#define pte_none(x)	(pte_val(x) == _PTE_EMPTY)
236
237/*
238 * Some definitions to translate between mem_map, PTEs, and page
239 * addresses:
240 */
241
242/*
243 * Given a PTE, return the index of the mem_map[] entry corresponding
244 * to the page frame the PTE. Get the absolute physical address, make
245 * a relative physical address and translate it to an index.
246 */
247#define pte_pagenr(x)		(((unsigned long) (pte_val(x)) - \
248				 __MEMORY_START) >> PAGE_SHIFT)
249
250/*
251 * Given a PTE, return the "struct page *".
252 */
253#define pte_page(x)		(mem_map + pte_pagenr(x))
254
255/*
256 * Return number of (down rounded) MB corresponding to x pages.
257 */
258#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
259
260
261/*
262 * The following have defined behavior only work if pte_present() is true.
263 */
264static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_DIRTY; }
265static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
266static inline int pte_file(pte_t pte)   { return pte_val(pte) & _PAGE_FILE; }
267static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
268static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
269
270static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
271static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
272static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
273static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
274static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
275static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
276static inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
277static inline pte_t pte_mkspecial(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
278
279/*
280 * Conversion functions: convert a page and protection to a page entry.
281 *
282 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
283 */
284#define mk_pte(page,pgprot)							\
285({										\
286	pte_t __pte;								\
287										\
288	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\
289		__MEMORY_START | pgprot_val((pgprot))));			\
290	__pte;									\
291})
292
293/*
294 * This takes a (absolute) physical page address that is used
295 * by the remapping functions
296 */
297#define mk_pte_phys(physpage, pgprot) \
298({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
299
300static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
301{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
302
303/* Encode and decode a swap entry */
304#define __swp_type(x)			(((x).val & 3) + (((x).val >> 1) & 0x3c))
305#define __swp_offset(x)			((x).val >> 8)
306#define __swp_entry(type, offset)	((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
307#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
308#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
309
310/* Encode and decode a nonlinear file mapping entry */
311#define PTE_FILE_MAX_BITS		29
312#define pte_to_pgoff(pte)		(pte_val(pte))
313#define pgoff_to_pte(off)		((pte_t) { (off) | _PAGE_FILE })
314
315#endif /* !__ASSEMBLY__ */
316
317#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
318#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
319
320#endif /* __ASM_SH_PGTABLE_64_H */
v4.6
  1#ifndef __ASM_SH_PGTABLE_64_H
  2#define __ASM_SH_PGTABLE_64_H
  3
  4/*
  5 * include/asm-sh/pgtable_64.h
  6 *
  7 * This file contains the functions and defines necessary to modify and use
  8 * the SuperH page table tree.
  9 *
 10 * Copyright (C) 2000, 2001  Paolo Alberelli
 11 * Copyright (C) 2003, 2004  Paul Mundt
 12 * Copyright (C) 2003, 2004  Richard Curnow
 13 *
 14 * This file is subject to the terms and conditions of the GNU General Public
 15 * License.  See the file "COPYING" in the main directory of this archive
 16 * for more details.
 17 */
 18#include <linux/threads.h>
 19#include <asm/processor.h>
 20#include <asm/page.h>
 21
 22/*
 23 * Error outputs.
 24 */
 25#define pte_ERROR(e) \
 26	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
 27#define pgd_ERROR(e) \
 28	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 29
 30/*
 31 * Table setting routines. Used within arch/mm only.
 32 */
 33#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
 34
 35static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
 36{
 37	unsigned long long x = ((unsigned long long) pteval.pte_low);
 38	unsigned long long *xp = (unsigned long long *) pteptr;
 39	/*
 40	 * Sign-extend based on NPHYS.
 41	 */
 42	*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
 43}
 44#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
 45
 46/*
 47 * PGD defines. Top level.
 48 */
 49
 50/* To find an entry in a generic PGD. */
 51#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 52#define __pgd_offset(address) pgd_index(address)
 53#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
 54
 55/* To find an entry in a kernel PGD. */
 56#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 57
 58#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 59#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
 60
 61/*
 62 * PMD level access routines. Same notes as above.
 63 */
 64#define _PMD_EMPTY		0x0
 65/* Either the PMD is empty or present, it's not paged out */
 66#define pmd_present(pmd_entry)	(pmd_val(pmd_entry) & _PAGE_PRESENT)
 67#define pmd_clear(pmd_entry_p)	(set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
 68#define pmd_none(pmd_entry)	(pmd_val((pmd_entry)) == _PMD_EMPTY)
 69#define pmd_bad(pmd_entry)	((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 70
 71#define pmd_page_vaddr(pmd_entry) \
 72	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
 73
 74#define pmd_page(pmd) \
 75	(virt_to_page(pmd_val(pmd)))
 76
 77/* PMD to PTE dereferencing */
 78#define pte_index(address) \
 79		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 80
 81#define __pte_offset(address)	pte_index(address)
 82
 83#define pte_offset_kernel(dir, addr) \
 84		((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
 85
 86#define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr)
 87#define pte_unmap(pte)		do { } while (0)
 88
 89#ifndef __ASSEMBLY__
 
 
 
 90/*
 91 * PTEL coherent flags.
 92 * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
 93 */
 94/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
 95   positions, to avoid expensive bit shuffling on every refill.  The remaining
 96   bits are used for s/w purposes and masked out on each refill.
 97
 98   Note, the PTE slots are used to hold data of type swp_entry_t when a page is
 99   swapped out.  Only the _PAGE_PRESENT flag is significant when the page is
100   swapped out, and it must be placed so that it doesn't overlap either the
101   type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type
102   at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This
103   scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit
104   [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
105   into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */
106#define _PAGE_WT	0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */
107#define _PAGE_DEVICE	0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
108#define _PAGE_CACHABLE	0x002  /* CB1: uncachable/cachable */
109#define _PAGE_PRESENT	0x004  /* software: page referenced */
 
110#define _PAGE_SIZE0	0x008  /* SZ0-bit : size of page */
111#define _PAGE_SIZE1	0x010  /* SZ1-bit : size of page */
112#define _PAGE_SHARED	0x020  /* software: reflects PTEH's SH */
113#define _PAGE_READ	0x040  /* PR0-bit : read access allowed */
114#define _PAGE_EXECUTE	0x080  /* PR1-bit : execute access allowed */
115#define _PAGE_WRITE	0x100  /* PR2-bit : write access allowed */
116#define _PAGE_USER	0x200  /* PR3-bit : user space access allowed */
117#define _PAGE_DIRTY	0x400  /* software: page accessed in write */
118#define _PAGE_ACCESSED	0x800  /* software: page referenced */
119
120/* Wrapper for extended mode pgprot twiddling */
121#define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
122
123/*
124 * We can use the sign-extended bits in the PTEL to get 32 bits of
125 * software flags. This works for now because no implementations uses
126 * anything above the PPN field.
127 */
128#define _PAGE_WIRED	_PAGE_EXT(0x001) /* software: wire the tlb entry */
129#define _PAGE_SPECIAL	_PAGE_EXT(0x002)
130
131#define _PAGE_CLEAR_FLAGS	(_PAGE_PRESENT | _PAGE_SHARED | \
132				 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
133
134/* Mask which drops software flags */
135#define _PAGE_FLAGS_HARDWARE_MASK	(NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
136
137/*
138 * HugeTLB support
139 */
140#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
141#define _PAGE_SZHUGE	(_PAGE_SIZE0)
142#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
143#define _PAGE_SZHUGE	(_PAGE_SIZE1)
144#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
145#define _PAGE_SZHUGE	(_PAGE_SIZE0 | _PAGE_SIZE1)
146#endif
147
148/*
149 * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
150 * to make pte_mkhuge() happy.
151 */
152#ifndef _PAGE_SZHUGE
153# define _PAGE_SZHUGE	(0)
154#endif
155
156/*
157 * Default flags for a Kernel page.
158 * This is fundametally also SHARED because the main use of this define
159 * (other than for PGD/PMD entries) is for the VMALLOC pool which is
160 * contextless.
161 *
162 * _PAGE_EXECUTE is required for modules
163 *
164 */
165#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
166			 _PAGE_EXECUTE | \
167			 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
168			 _PAGE_SHARED)
169
170/* Default flags for a User page */
171#define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER)
172
173#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
174			 _PAGE_SPECIAL)
175
176/*
177 * We have full permissions (Read/Write/Execute/Shared).
178 */
179#define _PAGE_COMMON	(_PAGE_PRESENT | _PAGE_USER | \
180			 _PAGE_CACHABLE | _PAGE_ACCESSED)
181
182#define PAGE_NONE	__pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
183#define PAGE_SHARED	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
184				 _PAGE_SHARED)
185#define PAGE_EXECREAD	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
186
187/*
188 * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
189 * protection mode for the stack.
190 */
191#define PAGE_COPY	PAGE_EXECREAD
192
193#define PAGE_READONLY	__pgprot(_PAGE_COMMON | _PAGE_READ)
194#define PAGE_WRITEONLY	__pgprot(_PAGE_COMMON | _PAGE_WRITE)
195#define PAGE_RWX	__pgprot(_PAGE_COMMON | _PAGE_READ | \
196				 _PAGE_WRITE | _PAGE_EXECUTE)
197#define PAGE_KERNEL	__pgprot(_KERNPG_TABLE)
198
199#define PAGE_KERNEL_NOCACHE \
200			__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
201				 _PAGE_EXECUTE | _PAGE_ACCESSED | \
202				 _PAGE_DIRTY | _PAGE_SHARED)
203
204/* Make it a device mapping for maximum safety (e.g. for mapping device
205   registers into user-space via /dev/map).  */
206#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
207#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
208
209/*
210 * PTE level access routines.
211 *
212 * Note1:
213 * It's the tree walk leaf. This is physical address to be stored.
214 *
215 * Note 2:
216 * Regarding the choice of _PTE_EMPTY:
217
218   We must choose a bit pattern that cannot be valid, whether or not the page
219   is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped
220   out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
221   left for us to select.  If we force bit[7]==0 when swapped out, we could use
222   the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if
223   we force bit[7]==1 when swapped out, we can use all zeroes to indicate
224   empty.  This is convenient, because the page tables get cleared to zero
225   when they are allocated.
226
227 */
228#define _PTE_EMPTY	0x0
229#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
230#define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
231#define pte_none(x)	(pte_val(x) == _PTE_EMPTY)
232
233/*
234 * Some definitions to translate between mem_map, PTEs, and page
235 * addresses:
236 */
237
238/*
239 * Given a PTE, return the index of the mem_map[] entry corresponding
240 * to the page frame the PTE. Get the absolute physical address, make
241 * a relative physical address and translate it to an index.
242 */
243#define pte_pagenr(x)		(((unsigned long) (pte_val(x)) - \
244				 __MEMORY_START) >> PAGE_SHIFT)
245
246/*
247 * Given a PTE, return the "struct page *".
248 */
249#define pte_page(x)		(mem_map + pte_pagenr(x))
250
251/*
252 * Return number of (down rounded) MB corresponding to x pages.
253 */
254#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
255
256
257/*
258 * The following have defined behavior only work if pte_present() is true.
259 */
260static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_DIRTY; }
261static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
 
262static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
263static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
264
265static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
266static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
267static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
268static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
269static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
270static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
271static inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
272static inline pte_t pte_mkspecial(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
273
274/*
275 * Conversion functions: convert a page and protection to a page entry.
276 *
277 * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
278 */
279#define mk_pte(page,pgprot)							\
280({										\
281	pte_t __pte;								\
282										\
283	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\
284		__MEMORY_START | pgprot_val((pgprot))));			\
285	__pte;									\
286})
287
288/*
289 * This takes a (absolute) physical page address that is used
290 * by the remapping functions
291 */
292#define mk_pte_phys(physpage, pgprot) \
293({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
294
295static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
296{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
297
298/* Encode and decode a swap entry */
299#define __swp_type(x)			(((x).val & 3) + (((x).val >> 1) & 0x3c))
300#define __swp_offset(x)			((x).val >> 8)
301#define __swp_entry(type, offset)	((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
302#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
303#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
 
 
 
 
 
304
305#endif /* !__ASSEMBLY__ */
306
307#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
308#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
309
310#endif /* __ASM_SH_PGTABLE_64_H */