Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _SPARC_PGTABLE_H
  3#define _SPARC_PGTABLE_H
  4
  5/*  asm/pgtable.h:  Defines and functions used to work
  6 *                        with Sparc page tables.
  7 *
  8 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  9 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 10 */
 11
 12#include <linux/const.h>
 13
 14#define PMD_SHIFT		18
 15#define PMD_SIZE        	(1UL << PMD_SHIFT)
 16#define PMD_MASK        	(~(PMD_SIZE-1))
 17#define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
 18
 19#define PGDIR_SHIFT     	24
 20#define PGDIR_SIZE      	(1UL << PGDIR_SHIFT)
 21#define PGDIR_MASK      	(~(PGDIR_SIZE-1))
 22#define PGDIR_ALIGN(__addr) 	(((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
 23
 24#ifndef __ASSEMBLY__
 25#include <asm-generic/pgtable-nopud.h>
 26
 27#include <linux/spinlock.h>
 28#include <linux/mm_types.h>
 29#include <asm/types.h>
 30#include <asm/pgtsrmmu.h>
 31#include <asm/vaddrs.h>
 32#include <asm/oplib.h>
 33#include <asm/cpu_type.h>
 34
 35
 36struct vm_area_struct;
 37struct page;
 38
 39void load_mmu(void);
 40unsigned long calc_highpages(void);
 41unsigned long __init bootmem_init(unsigned long *pages_avail);
 42
 43#define pte_ERROR(e)   __builtin_trap()
 44#define pmd_ERROR(e)   __builtin_trap()
 45#define pgd_ERROR(e)   __builtin_trap()
 46
 47#define PTRS_PER_PTE    	64
 48#define PTRS_PER_PMD    	64
 49#define PTRS_PER_PGD    	256
 50#define USER_PTRS_PER_PGD	PAGE_OFFSET / PGDIR_SIZE
 
 
 
 
 
 
 
 
 51#define PTE_SIZE		(PTRS_PER_PTE*4)
 52
 53#define PAGE_NONE	SRMMU_PAGE_NONE
 54#define PAGE_SHARED	SRMMU_PAGE_SHARED
 55#define PAGE_COPY	SRMMU_PAGE_COPY
 56#define PAGE_READONLY	SRMMU_PAGE_RDONLY
 57#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
 58
 59/* Top-level page directory - dummy used by init-mm.
 60 * srmmu.c will assign the real one (which is dynamically sized) */
 61#define swapper_pg_dir NULL
 62
 63void paging_init(void);
 64
 65extern unsigned long ptr_in_current_pgd;
 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67/* First physical page can be anywhere, the following is needed so that
 68 * va-->pa and vice versa conversions work properly without performance
 69 * hit for all __pa()/__va() operations.
 70 */
 71extern unsigned long phys_base;
 72extern unsigned long pfn_base;
 73
 74/*
 75 * ZERO_PAGE is a global shared page that is always zero: used
 76 * for zero-mapped memory areas etc..
 77 */
 78extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 79
 80#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 81
 82/*
 83 * In general all page table modifications should use the V8 atomic
 84 * swap instruction.  This insures the mmu and the cpu are in sync
 85 * with respect to ref/mod bits in the page tables.
 86 */
 87static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
 88{
 89	__asm__ __volatile__("swap [%2], %0" :
 90			"=&r" (value) : "0" (value), "r" (addr) : "memory");
 91	return value;
 92}
 93
 94/* Certain architectures need to do special things when pte's
 95 * within a page table are directly modified.  Thus, the following
 96 * hook is made available.
 97 */
 98
 99static inline void set_pte(pte_t *ptep, pte_t pteval)
100{
101	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
102}
103
104#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
105
106static inline int srmmu_device_memory(unsigned long x)
107{
108	return ((x & 0xF0000000) != 0);
109}
110
111static inline unsigned long pmd_pfn(pmd_t pmd)
112{
113	return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
114}
115
116static inline struct page *pmd_page(pmd_t pmd)
117{
118	if (srmmu_device_memory(pmd_val(pmd)))
119		BUG();
120	return pfn_to_page(pmd_pfn(pmd));
121}
122
123static inline unsigned long __pmd_page(pmd_t pmd)
124{
125	unsigned long v;
126
127	if (srmmu_device_memory(pmd_val(pmd)))
128		BUG();
129
130	v = pmd_val(pmd) & SRMMU_PTD_PMASK;
131	return (unsigned long)__nocache_va(v << 4);
132}
133
134static inline unsigned long pmd_page_vaddr(pmd_t pmd)
135{
136	unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
137	return (unsigned long)__nocache_va(v << 4);
138}
139
140static inline pmd_t *pud_pgtable(pud_t pud)
141{
142	if (srmmu_device_memory(pud_val(pud))) {
143		return (pmd_t *)~0;
144	} else {
145		unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
146		return (pmd_t *)__nocache_va(v << 4);
147	}
148}
149
150static inline int pte_present(pte_t pte)
151{
152	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
153}
154
155static inline int pte_none(pte_t pte)
156{
157	return !pte_val(pte);
158}
159
160static inline void __pte_clear(pte_t *ptep)
161{
162	set_pte(ptep, __pte(0));
163}
164
165static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
166{
167	__pte_clear(ptep);
168}
169
170static inline int pmd_bad(pmd_t pmd)
171{
172	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
173}
174
175static inline int pmd_present(pmd_t pmd)
176{
177	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
178}
179
180static inline int pmd_none(pmd_t pmd)
181{
182	return !pmd_val(pmd);
183}
184
185static inline void pmd_clear(pmd_t *pmdp)
186{
187	set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
 
 
188}
189
190static inline int pud_none(pud_t pud)
191{
192	return !(pud_val(pud) & 0xFFFFFFF);
193}
194
195static inline int pud_bad(pud_t pud)
196{
197	return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
198}
199
200static inline int pud_present(pud_t pud)
201{
202	return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
203}
204
205static inline void pud_clear(pud_t *pudp)
206{
207	set_pte((pte_t *)pudp, __pte(0));
208}
209
210/*
211 * The following only work if pte_present() is true.
212 * Undefined behaviour if not..
213 */
214static inline int pte_write(pte_t pte)
215{
216	return pte_val(pte) & SRMMU_WRITE;
217}
218
219static inline int pte_dirty(pte_t pte)
220{
221	return pte_val(pte) & SRMMU_DIRTY;
222}
223
224static inline int pte_young(pte_t pte)
225{
226	return pte_val(pte) & SRMMU_REF;
227}
228
 
 
 
 
 
229static inline pte_t pte_wrprotect(pte_t pte)
230{
231	return __pte(pte_val(pte) & ~SRMMU_WRITE);
232}
233
234static inline pte_t pte_mkclean(pte_t pte)
235{
236	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
237}
238
239static inline pte_t pte_mkold(pte_t pte)
240{
241	return __pte(pte_val(pte) & ~SRMMU_REF);
242}
243
244static inline pte_t pte_mkwrite(pte_t pte)
245{
246	return __pte(pte_val(pte) | SRMMU_WRITE);
247}
248
249static inline pte_t pte_mkdirty(pte_t pte)
250{
251	return __pte(pte_val(pte) | SRMMU_DIRTY);
252}
253
254static inline pte_t pte_mkyoung(pte_t pte)
255{
256	return __pte(pte_val(pte) | SRMMU_REF);
257}
258
 
 
259#define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
260
261static inline unsigned long pte_pfn(pte_t pte)
262{
263	if (srmmu_device_memory(pte_val(pte))) {
264		/* Just return something that will cause
265		 * pfn_valid() to return false.  This makes
266		 * copy_one_pte() to just directly copy to
267		 * PTE over.
268		 */
269		return ~0UL;
270	}
271	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
272}
273
274#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
275
276/*
277 * Conversion functions: convert a page and protection to a page entry,
278 * and a page entry and page directory to the page they refer to.
279 */
280static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
281{
282	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
283}
284
285static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
286{
287	return __pte(((page) >> 4) | pgprot_val(pgprot));
288}
289
290static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
291{
292	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
293}
294
295#define pgprot_noncached pgprot_noncached
296static inline pgprot_t pgprot_noncached(pgprot_t prot)
297{
298	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
299	return prot;
300}
301
302static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
303static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
304{
305	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
306		pgprot_val(newprot));
307}
308
309/* only used by the huge vmap code, should never be called */
310#define pud_page(pud)			NULL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311
312struct seq_file;
313void mmu_info(struct seq_file *m);
314
315/* Fault handler stuff... */
316#define FAULT_CODE_PROT     0x1
317#define FAULT_CODE_WRITE    0x2
318#define FAULT_CODE_USER     0x4
319
320#define update_mmu_cache(vma, address, ptep) do { } while (0)
321
322void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
323                      unsigned long xva, unsigned int len);
324void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
325
326/* Encode and de-code a swap entry */
327static inline unsigned long __swp_type(swp_entry_t entry)
328{
329	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
330}
331
332static inline unsigned long __swp_offset(swp_entry_t entry)
333{
334	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
335}
336
337static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
338{
339	return (swp_entry_t) {
340		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
341		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
342}
343
344#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
345#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
346
347static inline unsigned long
348__get_phys (unsigned long addr)
349{
350	switch (sparc_cpu_model){
351	case sun4m:
352	case sun4d:
353		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
354	default:
355		return 0;
356	}
357}
358
359static inline int
360__get_iospace (unsigned long addr)
361{
362	switch (sparc_cpu_model){
363	case sun4m:
364	case sun4d:
365		return (srmmu_get_pte (addr) >> 28);
366	default:
367		return -1;
368	}
369}
370
 
 
 
 
 
 
371/*
372 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
373 * its high 4 bits.  These macros/functions put it there or get it from there.
374 */
375#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
376#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
377#define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
378
379int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
380		    unsigned long, pgprot_t);
381
382static inline int io_remap_pfn_range(struct vm_area_struct *vma,
383				     unsigned long from, unsigned long pfn,
384				     unsigned long size, pgprot_t prot)
385{
386	unsigned long long offset, space, phys_base;
387
388	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
389	space = GET_IOSPACE(pfn);
390	phys_base = offset | (space << 32ULL);
391
392	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
393}
394#define io_remap_pfn_range io_remap_pfn_range
395
396#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
397#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
398({									  \
399	int __changed = !pte_same(*(__ptep), __entry);			  \
400	if (__changed) {						  \
401		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
402		flush_tlb_page(__vma, __address);			  \
403	}								  \
404	__changed;							  \
405})
406
 
 
407#endif /* !(__ASSEMBLY__) */
408
409#define VMALLOC_START           _AC(0xfe600000,UL)
410#define VMALLOC_END             _AC(0xffc00000,UL)
411
412/* We provide our own get_unmapped_area to cope with VA holes for userland */
413#define HAVE_ARCH_UNMAPPED_AREA
414
415#define pmd_pgtable(pmd)	((pgtable_t)__pmd_page(pmd))
 
 
 
416
417#endif /* !(_SPARC_PGTABLE_H) */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _SPARC_PGTABLE_H
  3#define _SPARC_PGTABLE_H
  4
  5/*  asm/pgtable.h:  Defines and functions used to work
  6 *                        with Sparc page tables.
  7 *
  8 *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  9 *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 10 */
 11
 12#include <linux/const.h>
 13
 
 
 
 
 
 
 
 
 
 
 14#ifndef __ASSEMBLY__
 15#include <asm-generic/4level-fixup.h>
 16
 17#include <linux/spinlock.h>
 18#include <linux/mm_types.h>
 19#include <asm/types.h>
 20#include <asm/pgtsrmmu.h>
 21#include <asm/vaddrs.h>
 22#include <asm/oplib.h>
 23#include <asm/cpu_type.h>
 24
 25
 26struct vm_area_struct;
 27struct page;
 28
 29void load_mmu(void);
 30unsigned long calc_highpages(void);
 31unsigned long __init bootmem_init(unsigned long *pages_avail);
 32
 33#define pte_ERROR(e)   __builtin_trap()
 34#define pmd_ERROR(e)   __builtin_trap()
 35#define pgd_ERROR(e)   __builtin_trap()
 36
 37#define PMD_SHIFT		22
 38#define PMD_SIZE        	(1UL << PMD_SHIFT)
 39#define PMD_MASK        	(~(PMD_SIZE-1))
 40#define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
 41#define PGDIR_SHIFT     	SRMMU_PGDIR_SHIFT
 42#define PGDIR_SIZE      	SRMMU_PGDIR_SIZE
 43#define PGDIR_MASK      	SRMMU_PGDIR_MASK
 44#define PTRS_PER_PTE    	1024
 45#define PTRS_PER_PMD    	SRMMU_PTRS_PER_PMD
 46#define PTRS_PER_PGD    	SRMMU_PTRS_PER_PGD
 47#define USER_PTRS_PER_PGD	PAGE_OFFSET / SRMMU_PGDIR_SIZE
 48#define FIRST_USER_ADDRESS	0UL
 49#define PTE_SIZE		(PTRS_PER_PTE*4)
 50
 51#define PAGE_NONE	SRMMU_PAGE_NONE
 52#define PAGE_SHARED	SRMMU_PAGE_SHARED
 53#define PAGE_COPY	SRMMU_PAGE_COPY
 54#define PAGE_READONLY	SRMMU_PAGE_RDONLY
 55#define PAGE_KERNEL	SRMMU_PAGE_KERNEL
 56
 57/* Top-level page directory - dummy used by init-mm.
 58 * srmmu.c will assign the real one (which is dynamically sized) */
 59#define swapper_pg_dir NULL
 60
 61void paging_init(void);
 62
 63extern unsigned long ptr_in_current_pgd;
 64
 65/*         xwr */
 66#define __P000  PAGE_NONE
 67#define __P001  PAGE_READONLY
 68#define __P010  PAGE_COPY
 69#define __P011  PAGE_COPY
 70#define __P100  PAGE_READONLY
 71#define __P101  PAGE_READONLY
 72#define __P110  PAGE_COPY
 73#define __P111  PAGE_COPY
 74
 75#define __S000	PAGE_NONE
 76#define __S001	PAGE_READONLY
 77#define __S010	PAGE_SHARED
 78#define __S011	PAGE_SHARED
 79#define __S100	PAGE_READONLY
 80#define __S101	PAGE_READONLY
 81#define __S110	PAGE_SHARED
 82#define __S111	PAGE_SHARED
 83
 84/* First physical page can be anywhere, the following is needed so that
 85 * va-->pa and vice versa conversions work properly without performance
 86 * hit for all __pa()/__va() operations.
 87 */
 88extern unsigned long phys_base;
 89extern unsigned long pfn_base;
 90
 91/*
 92 * ZERO_PAGE is a global shared page that is always zero: used
 93 * for zero-mapped memory areas etc..
 94 */
 95extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 96
 97#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 98
 99/*
100 * In general all page table modifications should use the V8 atomic
101 * swap instruction.  This insures the mmu and the cpu are in sync
102 * with respect to ref/mod bits in the page tables.
103 */
104static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
105{
106	__asm__ __volatile__("swap [%2], %0" :
107			"=&r" (value) : "0" (value), "r" (addr) : "memory");
108	return value;
109}
110
111/* Certain architectures need to do special things when pte's
112 * within a page table are directly modified.  Thus, the following
113 * hook is made available.
114 */
115
116static inline void set_pte(pte_t *ptep, pte_t pteval)
117{
118	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
119}
120
121#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
122
123static inline int srmmu_device_memory(unsigned long x)
124{
125	return ((x & 0xF0000000) != 0);
126}
127
 
 
 
 
 
128static inline struct page *pmd_page(pmd_t pmd)
129{
130	if (srmmu_device_memory(pmd_val(pmd)))
131		BUG();
132	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133}
134
135static inline unsigned long pgd_page_vaddr(pgd_t pgd)
136{
137	if (srmmu_device_memory(pgd_val(pgd))) {
138		return ~0;
139	} else {
140		unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
141		return (unsigned long)__nocache_va(v << 4);
142	}
143}
144
145static inline int pte_present(pte_t pte)
146{
147	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
148}
149
150static inline int pte_none(pte_t pte)
151{
152	return !pte_val(pte);
153}
154
155static inline void __pte_clear(pte_t *ptep)
156{
157	set_pte(ptep, __pte(0));
158}
159
160static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
161{
162	__pte_clear(ptep);
163}
164
165static inline int pmd_bad(pmd_t pmd)
166{
167	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
168}
169
170static inline int pmd_present(pmd_t pmd)
171{
172	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
173}
174
175static inline int pmd_none(pmd_t pmd)
176{
177	return !pmd_val(pmd);
178}
179
180static inline void pmd_clear(pmd_t *pmdp)
181{
182	int i;
183	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
184		set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
185}
186
187static inline int pgd_none(pgd_t pgd)          
188{
189	return !(pgd_val(pgd) & 0xFFFFFFF);
190}
191
192static inline int pgd_bad(pgd_t pgd)
193{
194	return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
195}
196
197static inline int pgd_present(pgd_t pgd)
198{
199	return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
200}
201
202static inline void pgd_clear(pgd_t *pgdp)
203{
204	set_pte((pte_t *)pgdp, __pte(0));
205}
206
207/*
208 * The following only work if pte_present() is true.
209 * Undefined behaviour if not..
210 */
211static inline int pte_write(pte_t pte)
212{
213	return pte_val(pte) & SRMMU_WRITE;
214}
215
216static inline int pte_dirty(pte_t pte)
217{
218	return pte_val(pte) & SRMMU_DIRTY;
219}
220
221static inline int pte_young(pte_t pte)
222{
223	return pte_val(pte) & SRMMU_REF;
224}
225
226static inline int pte_special(pte_t pte)
227{
228	return 0;
229}
230
231static inline pte_t pte_wrprotect(pte_t pte)
232{
233	return __pte(pte_val(pte) & ~SRMMU_WRITE);
234}
235
236static inline pte_t pte_mkclean(pte_t pte)
237{
238	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
239}
240
241static inline pte_t pte_mkold(pte_t pte)
242{
243	return __pte(pte_val(pte) & ~SRMMU_REF);
244}
245
246static inline pte_t pte_mkwrite(pte_t pte)
247{
248	return __pte(pte_val(pte) | SRMMU_WRITE);
249}
250
251static inline pte_t pte_mkdirty(pte_t pte)
252{
253	return __pte(pte_val(pte) | SRMMU_DIRTY);
254}
255
256static inline pte_t pte_mkyoung(pte_t pte)
257{
258	return __pte(pte_val(pte) | SRMMU_REF);
259}
260
261#define pte_mkspecial(pte)    (pte)
262
263#define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
264
265static inline unsigned long pte_pfn(pte_t pte)
266{
267	if (srmmu_device_memory(pte_val(pte))) {
268		/* Just return something that will cause
269		 * pfn_valid() to return false.  This makes
270		 * copy_one_pte() to just directly copy to
271		 * PTE over.
272		 */
273		return ~0UL;
274	}
275	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
276}
277
278#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
279
280/*
281 * Conversion functions: convert a page and protection to a page entry,
282 * and a page entry and page directory to the page they refer to.
283 */
284static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
285{
286	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
287}
288
289static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
290{
291	return __pte(((page) >> 4) | pgprot_val(pgprot));
292}
293
294static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
295{
296	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
297}
298
299#define pgprot_noncached pgprot_noncached
300static inline pgprot_t pgprot_noncached(pgprot_t prot)
301{
302	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
303	return prot;
304}
305
306static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
307static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
308{
309	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
310		pgprot_val(newprot));
311}
312
313#define pgd_index(address) ((address) >> PGDIR_SHIFT)
314
315/* to find an entry in a page-table-directory */
316#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
317
318/* to find an entry in a kernel page-table-directory */
319#define pgd_offset_k(address) pgd_offset(&init_mm, address)
320
321/* Find an entry in the second-level page table.. */
322static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
323{
324	return (pmd_t *) pgd_page_vaddr(*dir) +
325		((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
326}
327
328/* Find an entry in the third-level page table.. */
329pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
330
331/*
332 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
333 */
334#define pte_offset_map(d, a)		pte_offset_kernel(d,a)
335#define pte_unmap(pte)		do{}while(0)
336
337struct seq_file;
338void mmu_info(struct seq_file *m);
339
340/* Fault handler stuff... */
341#define FAULT_CODE_PROT     0x1
342#define FAULT_CODE_WRITE    0x2
343#define FAULT_CODE_USER     0x4
344
345#define update_mmu_cache(vma, address, ptep) do { } while (0)
346
347void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
348                      unsigned long xva, unsigned int len);
349void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
350
351/* Encode and de-code a swap entry */
352static inline unsigned long __swp_type(swp_entry_t entry)
353{
354	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
355}
356
357static inline unsigned long __swp_offset(swp_entry_t entry)
358{
359	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
360}
361
362static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
363{
364	return (swp_entry_t) {
365		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
366		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
367}
368
369#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
370#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
371
372static inline unsigned long
373__get_phys (unsigned long addr)
374{
375	switch (sparc_cpu_model){
376	case sun4m:
377	case sun4d:
378		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
379	default:
380		return 0;
381	}
382}
383
384static inline int
385__get_iospace (unsigned long addr)
386{
387	switch (sparc_cpu_model){
388	case sun4m:
389	case sun4d:
390		return (srmmu_get_pte (addr) >> 28);
391	default:
392		return -1;
393	}
394}
395
396extern unsigned long *sparc_valid_addr_bitmap;
397
398/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
399#define kern_addr_valid(addr) \
400	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
401
402/*
403 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
404 * its high 4 bits.  These macros/functions put it there or get it from there.
405 */
406#define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
407#define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
408#define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
409
410int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
411		    unsigned long, pgprot_t);
412
413static inline int io_remap_pfn_range(struct vm_area_struct *vma,
414				     unsigned long from, unsigned long pfn,
415				     unsigned long size, pgprot_t prot)
416{
417	unsigned long long offset, space, phys_base;
418
419	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
420	space = GET_IOSPACE(pfn);
421	phys_base = offset | (space << 32ULL);
422
423	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
424}
425#define io_remap_pfn_range io_remap_pfn_range 
426
427#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
428#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
429({									  \
430	int __changed = !pte_same(*(__ptep), __entry);			  \
431	if (__changed) {						  \
432		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
433		flush_tlb_page(__vma, __address);			  \
434	}								  \
435	__changed;							  \
436})
437
438#include <asm-generic/pgtable.h>
439
440#endif /* !(__ASSEMBLY__) */
441
442#define VMALLOC_START           _AC(0xfe600000,UL)
443#define VMALLOC_END             _AC(0xffc00000,UL)
444
445/* We provide our own get_unmapped_area to cope with VA holes for userland */
446#define HAVE_ARCH_UNMAPPED_AREA
447
448/*
449 * No page table caches to initialise
450 */
451#define pgtable_cache_init()	do { } while (0)
452
453#endif /* !(_SPARC_PGTABLE_H) */