Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SPARC_PGTABLE_H
3#define _SPARC_PGTABLE_H
4
5/* asm/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
7 *
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 */
11
12#include <linux/const.h>
13
14#define PMD_SHIFT 18
15#define PMD_SIZE (1UL << PMD_SHIFT)
16#define PMD_MASK (~(PMD_SIZE-1))
17#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
18
19#define PGDIR_SHIFT 24
20#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
21#define PGDIR_MASK (~(PGDIR_SIZE-1))
22#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23
24#ifndef __ASSEMBLY__
25#include <asm-generic/pgtable-nopud.h>
26
27#include <linux/spinlock.h>
28#include <linux/mm_types.h>
29#include <asm/types.h>
30#include <asm/pgtsrmmu.h>
31#include <asm/vaddrs.h>
32#include <asm/oplib.h>
33#include <asm/cpu_type.h>
34
35
36struct vm_area_struct;
37struct page;
38
39void load_mmu(void);
40unsigned long calc_highpages(void);
41unsigned long __init bootmem_init(unsigned long *pages_avail);
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47#define PTRS_PER_PTE 64
48#define PTRS_PER_PMD 64
49#define PTRS_PER_PGD 256
50#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
51#define PTE_SIZE (PTRS_PER_PTE*4)
52
53#define PAGE_NONE SRMMU_PAGE_NONE
54#define PAGE_SHARED SRMMU_PAGE_SHARED
55#define PAGE_COPY SRMMU_PAGE_COPY
56#define PAGE_READONLY SRMMU_PAGE_RDONLY
57#define PAGE_KERNEL SRMMU_PAGE_KERNEL
58
59/* Top-level page directory - dummy used by init-mm.
60 * srmmu.c will assign the real one (which is dynamically sized) */
61#define swapper_pg_dir NULL
62
63void paging_init(void);
64
65extern unsigned long ptr_in_current_pgd;
66
67/* First physical page can be anywhere, the following is needed so that
68 * va-->pa and vice versa conversions work properly without performance
69 * hit for all __pa()/__va() operations.
70 */
71extern unsigned long phys_base;
72extern unsigned long pfn_base;
73
74/*
75 * ZERO_PAGE is a global shared page that is always zero: used
76 * for zero-mapped memory areas etc..
77 */
78extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
79
80#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
81
82/*
83 * In general all page table modifications should use the V8 atomic
84 * swap instruction. This insures the mmu and the cpu are in sync
85 * with respect to ref/mod bits in the page tables.
86 */
87static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
88{
89 __asm__ __volatile__("swap [%2], %0" :
90 "=&r" (value) : "0" (value), "r" (addr) : "memory");
91 return value;
92}
93
94/* Certain architectures need to do special things when pte's
95 * within a page table are directly modified. Thus, the following
96 * hook is made available.
97 */
98
99static inline void set_pte(pte_t *ptep, pte_t pteval)
100{
101 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
102}
103
104static inline int srmmu_device_memory(unsigned long x)
105{
106 return ((x & 0xF0000000) != 0);
107}
108
109static inline unsigned long pmd_pfn(pmd_t pmd)
110{
111 return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
112}
113
114static inline struct page *pmd_page(pmd_t pmd)
115{
116 if (srmmu_device_memory(pmd_val(pmd)))
117 BUG();
118 return pfn_to_page(pmd_pfn(pmd));
119}
120
121static inline unsigned long __pmd_page(pmd_t pmd)
122{
123 unsigned long v;
124
125 if (srmmu_device_memory(pmd_val(pmd)))
126 BUG();
127
128 v = pmd_val(pmd) & SRMMU_PTD_PMASK;
129 return (unsigned long)__nocache_va(v << 4);
130}
131
132static inline unsigned long pmd_page_vaddr(pmd_t pmd)
133{
134 unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
135 return (unsigned long)__nocache_va(v << 4);
136}
137
138static inline pmd_t *pud_pgtable(pud_t pud)
139{
140 if (srmmu_device_memory(pud_val(pud))) {
141 return (pmd_t *)~0;
142 } else {
143 unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
144 return (pmd_t *)__nocache_va(v << 4);
145 }
146}
147
148static inline int pte_present(pte_t pte)
149{
150 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
151}
152
153static inline int pte_none(pte_t pte)
154{
155 return !pte_val(pte);
156}
157
158static inline void __pte_clear(pte_t *ptep)
159{
160 set_pte(ptep, __pte(0));
161}
162
163static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164{
165 __pte_clear(ptep);
166}
167
168static inline int pmd_bad(pmd_t pmd)
169{
170 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
171}
172
173static inline int pmd_present(pmd_t pmd)
174{
175 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
176}
177
178static inline int pmd_none(pmd_t pmd)
179{
180 return !pmd_val(pmd);
181}
182
183static inline void pmd_clear(pmd_t *pmdp)
184{
185 set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
186}
187
188static inline int pud_none(pud_t pud)
189{
190 return !(pud_val(pud) & 0xFFFFFFF);
191}
192
193static inline int pud_bad(pud_t pud)
194{
195 return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
196}
197
198static inline int pud_present(pud_t pud)
199{
200 return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
201}
202
203static inline void pud_clear(pud_t *pudp)
204{
205 set_pte((pte_t *)pudp, __pte(0));
206}
207
208/*
209 * The following only work if pte_present() is true.
210 * Undefined behaviour if not..
211 */
212static inline int pte_write(pte_t pte)
213{
214 return pte_val(pte) & SRMMU_WRITE;
215}
216
217static inline int pte_dirty(pte_t pte)
218{
219 return pte_val(pte) & SRMMU_DIRTY;
220}
221
222static inline int pte_young(pte_t pte)
223{
224 return pte_val(pte) & SRMMU_REF;
225}
226
227static inline pte_t pte_wrprotect(pte_t pte)
228{
229 return __pte(pte_val(pte) & ~SRMMU_WRITE);
230}
231
232static inline pte_t pte_mkclean(pte_t pte)
233{
234 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
235}
236
237static inline pte_t pte_mkold(pte_t pte)
238{
239 return __pte(pte_val(pte) & ~SRMMU_REF);
240}
241
242static inline pte_t pte_mkwrite_novma(pte_t pte)
243{
244 return __pte(pte_val(pte) | SRMMU_WRITE);
245}
246
247static inline pte_t pte_mkdirty(pte_t pte)
248{
249 return __pte(pte_val(pte) | SRMMU_DIRTY);
250}
251
252static inline pte_t pte_mkyoung(pte_t pte)
253{
254 return __pte(pte_val(pte) | SRMMU_REF);
255}
256
257#define PFN_PTE_SHIFT (PAGE_SHIFT - 4)
258#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
259
260static inline unsigned long pte_pfn(pte_t pte)
261{
262 if (srmmu_device_memory(pte_val(pte))) {
263 /* Just return something that will cause
264 * pfn_valid() to return false. This makes
265 * copy_one_pte() to just directly copy to
266 * PTE over.
267 */
268 return ~0UL;
269 }
270 return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
271}
272
273#define pte_page(pte) pfn_to_page(pte_pfn(pte))
274
275/*
276 * Conversion functions: convert a page and protection to a page entry,
277 * and a page entry and page directory to the page they refer to.
278 */
279static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
280{
281 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
282}
283
284static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
285{
286 return __pte(((page) >> 4) | pgprot_val(pgprot));
287}
288
289static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
290{
291 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
292}
293
294#define pgprot_noncached pgprot_noncached
295static inline pgprot_t pgprot_noncached(pgprot_t prot)
296{
297 pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
298 return prot;
299}
300
301static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
302static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
303{
304 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
305 pgprot_val(newprot));
306}
307
308/* only used by the huge vmap code, should never be called */
309#define pud_page(pud) NULL
310
311struct seq_file;
312void mmu_info(struct seq_file *m);
313
314/* Fault handler stuff... */
315#define FAULT_CODE_PROT 0x1
316#define FAULT_CODE_WRITE 0x2
317#define FAULT_CODE_USER 0x4
318
319#define update_mmu_cache(vma, address, ptep) do { } while (0)
320#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
321
322void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
323 unsigned long xva, unsigned int len);
324void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
325
326/*
327 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
328 * are !pte_none() && !pte_present().
329 *
330 * Format of swap PTEs:
331 *
332 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
333 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
334 * <-------------- offset ---------------> < type -> E 0 0 0 0 0 0
335 */
336static inline unsigned long __swp_type(swp_entry_t entry)
337{
338 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
339}
340
341static inline unsigned long __swp_offset(swp_entry_t entry)
342{
343 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
344}
345
346static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
347{
348 return (swp_entry_t) {
349 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
350 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
351}
352
353#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
354#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
355
356static inline int pte_swp_exclusive(pte_t pte)
357{
358 return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
359}
360
361static inline pte_t pte_swp_mkexclusive(pte_t pte)
362{
363 return __pte(pte_val(pte) | SRMMU_SWP_EXCLUSIVE);
364}
365
366static inline pte_t pte_swp_clear_exclusive(pte_t pte)
367{
368 return __pte(pte_val(pte) & ~SRMMU_SWP_EXCLUSIVE);
369}
370
371static inline unsigned long
372__get_phys (unsigned long addr)
373{
374 switch (sparc_cpu_model){
375 case sun4m:
376 case sun4d:
377 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378 default:
379 return 0;
380 }
381}
382
383static inline int
384__get_iospace (unsigned long addr)
385{
386 switch (sparc_cpu_model){
387 case sun4m:
388 case sun4d:
389 return (srmmu_get_pte (addr) >> 28);
390 default:
391 return -1;
392 }
393}
394
395/*
396 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
397 * its high 4 bits. These macros/functions put it there or get it from there.
398 */
399#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
400#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
401#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
402
403int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
404 unsigned long, pgprot_t);
405
406static inline int io_remap_pfn_range(struct vm_area_struct *vma,
407 unsigned long from, unsigned long pfn,
408 unsigned long size, pgprot_t prot)
409{
410 unsigned long long offset, space, phys_base;
411
412 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
413 space = GET_IOSPACE(pfn);
414 phys_base = offset | (space << 32ULL);
415
416 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
417}
418#define io_remap_pfn_range io_remap_pfn_range
419
420#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422({ \
423 int __changed = !pte_same(*(__ptep), __entry); \
424 if (__changed) { \
425 set_pte(__ptep, __entry); \
426 flush_tlb_page(__vma, __address); \
427 } \
428 __changed; \
429})
430
431#endif /* !(__ASSEMBLY__) */
432
433#define VMALLOC_START _AC(0xfe600000,UL)
434#define VMALLOC_END _AC(0xffc00000,UL)
435
436/* We provide our own get_unmapped_area to cope with VA holes for userland */
437#define HAVE_ARCH_UNMAPPED_AREA
438
439#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
440
441#endif /* !(_SPARC_PGTABLE_H) */
1#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/const.h>
12
13#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/mm_types.h>
18#include <asm/types.h>
19#include <asm/pgtsrmmu.h>
20#include <asm/vaddrs.h>
21#include <asm/oplib.h>
22#include <asm/cpu_type.h>
23
24
25struct vm_area_struct;
26struct page;
27
28void load_mmu(void);
29unsigned long calc_highpages(void);
30unsigned long __init bootmem_init(unsigned long *pages_avail);
31
32#define pte_ERROR(e) __builtin_trap()
33#define pmd_ERROR(e) __builtin_trap()
34#define pgd_ERROR(e) __builtin_trap()
35
36#define PMD_SHIFT 22
37#define PMD_SIZE (1UL << PMD_SHIFT)
38#define PMD_MASK (~(PMD_SIZE-1))
39#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
40#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
41#define PGDIR_SIZE SRMMU_PGDIR_SIZE
42#define PGDIR_MASK SRMMU_PGDIR_MASK
43#define PTRS_PER_PTE 1024
44#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
45#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
46#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
47#define FIRST_USER_ADDRESS 0UL
48#define PTE_SIZE (PTRS_PER_PTE*4)
49
50#define PAGE_NONE SRMMU_PAGE_NONE
51#define PAGE_SHARED SRMMU_PAGE_SHARED
52#define PAGE_COPY SRMMU_PAGE_COPY
53#define PAGE_READONLY SRMMU_PAGE_RDONLY
54#define PAGE_KERNEL SRMMU_PAGE_KERNEL
55
56/* Top-level page directory - dummy used by init-mm.
57 * srmmu.c will assign the real one (which is dynamically sized) */
58#define swapper_pg_dir NULL
59
60void paging_init(void);
61
62extern unsigned long ptr_in_current_pgd;
63
64/* xwr */
65#define __P000 PAGE_NONE
66#define __P001 PAGE_READONLY
67#define __P010 PAGE_COPY
68#define __P011 PAGE_COPY
69#define __P100 PAGE_READONLY
70#define __P101 PAGE_READONLY
71#define __P110 PAGE_COPY
72#define __P111 PAGE_COPY
73
74#define __S000 PAGE_NONE
75#define __S001 PAGE_READONLY
76#define __S010 PAGE_SHARED
77#define __S011 PAGE_SHARED
78#define __S100 PAGE_READONLY
79#define __S101 PAGE_READONLY
80#define __S110 PAGE_SHARED
81#define __S111 PAGE_SHARED
82
83/* First physical page can be anywhere, the following is needed so that
84 * va-->pa and vice versa conversions work properly without performance
85 * hit for all __pa()/__va() operations.
86 */
87extern unsigned long phys_base;
88extern unsigned long pfn_base;
89
90/*
91 * ZERO_PAGE is a global shared page that is always zero: used
92 * for zero-mapped memory areas etc..
93 */
94extern unsigned long empty_zero_page;
95
96#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
97
98/*
99 * In general all page table modifications should use the V8 atomic
100 * swap instruction. This insures the mmu and the cpu are in sync
101 * with respect to ref/mod bits in the page tables.
102 */
103static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
104{
105 __asm__ __volatile__("swap [%2], %0" :
106 "=&r" (value) : "0" (value), "r" (addr) : "memory");
107 return value;
108}
109
110/* Certain architectures need to do special things when pte's
111 * within a page table are directly modified. Thus, the following
112 * hook is made available.
113 */
114
115static inline void set_pte(pte_t *ptep, pte_t pteval)
116{
117 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
118}
119
120#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
121
122static inline int srmmu_device_memory(unsigned long x)
123{
124 return ((x & 0xF0000000) != 0);
125}
126
127static inline struct page *pmd_page(pmd_t pmd)
128{
129 if (srmmu_device_memory(pmd_val(pmd)))
130 BUG();
131 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
132}
133
134static inline unsigned long pgd_page_vaddr(pgd_t pgd)
135{
136 if (srmmu_device_memory(pgd_val(pgd))) {
137 return ~0;
138 } else {
139 unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
140 return (unsigned long)__nocache_va(v << 4);
141 }
142}
143
144static inline int pte_present(pte_t pte)
145{
146 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
147}
148
149static inline int pte_none(pte_t pte)
150{
151 return !pte_val(pte);
152}
153
154static inline void __pte_clear(pte_t *ptep)
155{
156 set_pte(ptep, __pte(0));
157}
158
159static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
160{
161 __pte_clear(ptep);
162}
163
164static inline int pmd_bad(pmd_t pmd)
165{
166 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
167}
168
169static inline int pmd_present(pmd_t pmd)
170{
171 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
172}
173
174static inline int pmd_none(pmd_t pmd)
175{
176 return !pmd_val(pmd);
177}
178
179static inline void pmd_clear(pmd_t *pmdp)
180{
181 int i;
182 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
183 set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
184}
185
186static inline int pgd_none(pgd_t pgd)
187{
188 return !(pgd_val(pgd) & 0xFFFFFFF);
189}
190
191static inline int pgd_bad(pgd_t pgd)
192{
193 return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
194}
195
196static inline int pgd_present(pgd_t pgd)
197{
198 return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
199}
200
201static inline void pgd_clear(pgd_t *pgdp)
202{
203 set_pte((pte_t *)pgdp, __pte(0));
204}
205
206/*
207 * The following only work if pte_present() is true.
208 * Undefined behaviour if not..
209 */
210static inline int pte_write(pte_t pte)
211{
212 return pte_val(pte) & SRMMU_WRITE;
213}
214
215static inline int pte_dirty(pte_t pte)
216{
217 return pte_val(pte) & SRMMU_DIRTY;
218}
219
220static inline int pte_young(pte_t pte)
221{
222 return pte_val(pte) & SRMMU_REF;
223}
224
225static inline int pte_special(pte_t pte)
226{
227 return 0;
228}
229
230static inline pte_t pte_wrprotect(pte_t pte)
231{
232 return __pte(pte_val(pte) & ~SRMMU_WRITE);
233}
234
235static inline pte_t pte_mkclean(pte_t pte)
236{
237 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
238}
239
240static inline pte_t pte_mkold(pte_t pte)
241{
242 return __pte(pte_val(pte) & ~SRMMU_REF);
243}
244
245static inline pte_t pte_mkwrite(pte_t pte)
246{
247 return __pte(pte_val(pte) | SRMMU_WRITE);
248}
249
250static inline pte_t pte_mkdirty(pte_t pte)
251{
252 return __pte(pte_val(pte) | SRMMU_DIRTY);
253}
254
255static inline pte_t pte_mkyoung(pte_t pte)
256{
257 return __pte(pte_val(pte) | SRMMU_REF);
258}
259
260#define pte_mkspecial(pte) (pte)
261
262#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
263
264static inline unsigned long pte_pfn(pte_t pte)
265{
266 if (srmmu_device_memory(pte_val(pte))) {
267 /* Just return something that will cause
268 * pfn_valid() to return false. This makes
269 * copy_one_pte() to just directly copy to
270 * PTE over.
271 */
272 return ~0UL;
273 }
274 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
275}
276
277#define pte_page(pte) pfn_to_page(pte_pfn(pte))
278
279/*
280 * Conversion functions: convert a page and protection to a page entry,
281 * and a page entry and page directory to the page they refer to.
282 */
283static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
284{
285 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
286}
287
288static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
289{
290 return __pte(((page) >> 4) | pgprot_val(pgprot));
291}
292
293static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
294{
295 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
296}
297
298#define pgprot_noncached pgprot_noncached
299static inline pgprot_t pgprot_noncached(pgprot_t prot)
300{
301 pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
302 return prot;
303}
304
305static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
306static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
307{
308 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
309 pgprot_val(newprot));
310}
311
312#define pgd_index(address) ((address) >> PGDIR_SHIFT)
313
314/* to find an entry in a page-table-directory */
315#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
316
317/* to find an entry in a kernel page-table-directory */
318#define pgd_offset_k(address) pgd_offset(&init_mm, address)
319
320/* Find an entry in the second-level page table.. */
321static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
322{
323 return (pmd_t *) pgd_page_vaddr(*dir) +
324 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
325}
326
327/* Find an entry in the third-level page table.. */
328pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
329
330/*
331 * This shortcut works on sun4m (and sun4d) because the nocache area is static.
332 */
333#define pte_offset_map(d, a) pte_offset_kernel(d,a)
334#define pte_unmap(pte) do{}while(0)
335
336struct seq_file;
337void mmu_info(struct seq_file *m);
338
339/* Fault handler stuff... */
340#define FAULT_CODE_PROT 0x1
341#define FAULT_CODE_WRITE 0x2
342#define FAULT_CODE_USER 0x4
343
344#define update_mmu_cache(vma, address, ptep) do { } while (0)
345
346void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
347 unsigned long xva, unsigned int len);
348void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
349
350/* Encode and de-code a swap entry */
351static inline unsigned long __swp_type(swp_entry_t entry)
352{
353 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
354}
355
356static inline unsigned long __swp_offset(swp_entry_t entry)
357{
358 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
359}
360
361static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
362{
363 return (swp_entry_t) {
364 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
365 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
366}
367
368#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
369#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
370
371static inline unsigned long
372__get_phys (unsigned long addr)
373{
374 switch (sparc_cpu_model){
375 case sun4m:
376 case sun4d:
377 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378 default:
379 return 0;
380 }
381}
382
383static inline int
384__get_iospace (unsigned long addr)
385{
386 switch (sparc_cpu_model){
387 case sun4m:
388 case sun4d:
389 return (srmmu_get_pte (addr) >> 28);
390 default:
391 return -1;
392 }
393}
394
395extern unsigned long *sparc_valid_addr_bitmap;
396
397/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
398#define kern_addr_valid(addr) \
399 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
400
401/*
402 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
403 * its high 4 bits. These macros/functions put it there or get it from there.
404 */
405#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
406#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
407#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
408
409int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
410 unsigned long, pgprot_t);
411
412static inline int io_remap_pfn_range(struct vm_area_struct *vma,
413 unsigned long from, unsigned long pfn,
414 unsigned long size, pgprot_t prot)
415{
416 unsigned long long offset, space, phys_base;
417
418 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
419 space = GET_IOSPACE(pfn);
420 phys_base = offset | (space << 32ULL);
421
422 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
423}
424#define io_remap_pfn_range io_remap_pfn_range
425
426#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
427#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
428({ \
429 int __changed = !pte_same(*(__ptep), __entry); \
430 if (__changed) { \
431 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
432 flush_tlb_page(__vma, __address); \
433 } \
434 __changed; \
435})
436
437#include <asm-generic/pgtable.h>
438
439#endif /* !(__ASSEMBLY__) */
440
441#define VMALLOC_START _AC(0xfe600000,UL)
442#define VMALLOC_END _AC(0xffc00000,UL)
443
444/* We provide our own get_unmapped_area to cope with VA holes for userland */
445#define HAVE_ARCH_UNMAPPED_AREA
446
447/*
448 * No page table caches to initialise
449 */
450#define pgtable_cache_init() do { } while (0)
451
452#endif /* !(_SPARC_PGTABLE_H) */