Loading...
1#ifndef _SPARC_PGTABLE_H
2#define _SPARC_PGTABLE_H
3
4/* asm/pgtable.h: Defines and functions used to work
5 * with Sparc page tables.
6 *
7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/const.h>
12
13#ifndef __ASSEMBLY__
14#include <asm-generic/4level-fixup.h>
15
16#include <linux/spinlock.h>
17#include <linux/swap.h>
18#include <asm/types.h>
19#include <asm/pgtsun4c.h>
20#include <asm/pgtsrmmu.h>
21#include <asm/vac-ops.h>
22#include <asm/oplib.h>
23#include <asm/btfixup.h>
24#include <asm/system.h>
25
26
27struct vm_area_struct;
28struct page;
29
30extern void load_mmu(void);
31extern unsigned long calc_highpages(void);
32
33BTFIXUPDEF_SIMM13(pgdir_shift)
34BTFIXUPDEF_SETHI(pgdir_size)
35BTFIXUPDEF_SETHI(pgdir_mask)
36
37BTFIXUPDEF_SIMM13(ptrs_per_pmd)
38BTFIXUPDEF_SIMM13(ptrs_per_pgd)
39BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
40
41#define pte_ERROR(e) __builtin_trap()
42#define pmd_ERROR(e) __builtin_trap()
43#define pgd_ERROR(e) __builtin_trap()
44
45BTFIXUPDEF_INT(page_none)
46BTFIXUPDEF_INT(page_copy)
47BTFIXUPDEF_INT(page_readonly)
48BTFIXUPDEF_INT(page_kernel)
49
50#define PMD_SHIFT SUN4C_PMD_SHIFT
51#define PMD_SIZE (1UL << PMD_SHIFT)
52#define PMD_MASK (~(PMD_SIZE-1))
53#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
54#define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
55#define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
56#define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
57#define PTRS_PER_PTE 1024
58#define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
59#define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
60#define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
61#define FIRST_USER_ADDRESS 0
62#define PTE_SIZE (PTRS_PER_PTE*4)
63
64#define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
65extern pgprot_t PAGE_SHARED;
66#define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
67#define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
68
69extern unsigned long page_kernel;
70
71#ifdef MODULE
72#define PAGE_KERNEL page_kernel
73#else
74#define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
75#endif
76
77/* Top-level page directory */
78extern pgd_t swapper_pg_dir[1024];
79
80extern void paging_init(void);
81
82/* Page table for 0-4MB for everybody, on the Sparc this
83 * holds the same as on the i386.
84 */
85extern pte_t pg0[1024];
86extern pte_t pg1[1024];
87extern pte_t pg2[1024];
88extern pte_t pg3[1024];
89
90extern unsigned long ptr_in_current_pgd;
91
92/* Here is a trick, since mmap.c need the initializer elements for
93 * protection_map[] to be constant at compile time, I set the following
94 * to all zeros. I set it to the real values after I link in the
95 * appropriate MMU page table routines at boot time.
96 */
97#define __P000 __pgprot(0)
98#define __P001 __pgprot(0)
99#define __P010 __pgprot(0)
100#define __P011 __pgprot(0)
101#define __P100 __pgprot(0)
102#define __P101 __pgprot(0)
103#define __P110 __pgprot(0)
104#define __P111 __pgprot(0)
105
106#define __S000 __pgprot(0)
107#define __S001 __pgprot(0)
108#define __S010 __pgprot(0)
109#define __S011 __pgprot(0)
110#define __S100 __pgprot(0)
111#define __S101 __pgprot(0)
112#define __S110 __pgprot(0)
113#define __S111 __pgprot(0)
114
115extern int num_contexts;
116
117/* First physical page can be anywhere, the following is needed so that
118 * va-->pa and vice versa conversions work properly without performance
119 * hit for all __pa()/__va() operations.
120 */
121extern unsigned long phys_base;
122extern unsigned long pfn_base;
123
124/*
125 * BAD_PAGETABLE is used when we need a bogus page-table, while
126 * BAD_PAGE is used for a bogus page.
127 *
128 * ZERO_PAGE is a global shared page that is always zero: used
129 * for zero-mapped memory areas etc..
130 */
131extern pte_t * __bad_pagetable(void);
132extern pte_t __bad_page(void);
133extern unsigned long empty_zero_page;
134
135#define BAD_PAGETABLE __bad_pagetable()
136#define BAD_PAGE __bad_page()
137#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
138
139/*
140 */
141BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t)
142BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t)
143
144#define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
145#define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd)
146
147BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
148BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
149
150static inline int pte_none(pte_t pte)
151{
152 return !pte_val(pte);
153}
154
155#define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
156#define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte)
157
158BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
159BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
160BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
161
162static inline int pmd_none(pmd_t pmd)
163{
164 return !pmd_val(pmd);
165}
166
167#define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
168#define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
169#define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
170
171BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
172BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
173BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
174BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
175
176#define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
177#define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
178#define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
179#define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
180
181/*
182 * The following only work if pte_present() is true.
183 * Undefined behaviour if not..
184 */
185BTFIXUPDEF_HALF(pte_writei)
186BTFIXUPDEF_HALF(pte_dirtyi)
187BTFIXUPDEF_HALF(pte_youngi)
188
189static int pte_write(pte_t pte) __attribute_const__;
190static inline int pte_write(pte_t pte)
191{
192 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
193}
194
195static int pte_dirty(pte_t pte) __attribute_const__;
196static inline int pte_dirty(pte_t pte)
197{
198 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
199}
200
201static int pte_young(pte_t pte) __attribute_const__;
202static inline int pte_young(pte_t pte)
203{
204 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
205}
206
207/*
208 * The following only work if pte_present() is not true.
209 */
210BTFIXUPDEF_HALF(pte_filei)
211
212static int pte_file(pte_t pte) __attribute_const__;
213static inline int pte_file(pte_t pte)
214{
215 return pte_val(pte) & BTFIXUP_HALF(pte_filei);
216}
217
218static inline int pte_special(pte_t pte)
219{
220 return 0;
221}
222
223/*
224 */
225BTFIXUPDEF_HALF(pte_wrprotecti)
226BTFIXUPDEF_HALF(pte_mkcleani)
227BTFIXUPDEF_HALF(pte_mkoldi)
228
229static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
230static inline pte_t pte_wrprotect(pte_t pte)
231{
232 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
233}
234
235static pte_t pte_mkclean(pte_t pte) __attribute_const__;
236static inline pte_t pte_mkclean(pte_t pte)
237{
238 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
239}
240
241static pte_t pte_mkold(pte_t pte) __attribute_const__;
242static inline pte_t pte_mkold(pte_t pte)
243{
244 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
245}
246
247BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
248BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
249BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
250
251#define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
252#define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
253#define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
254
255#define pte_mkspecial(pte) (pte)
256
257#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
258
259BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t)
260#define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte)
261#define pte_page(pte) pfn_to_page(pte_pfn(pte))
262
263/*
264 * Conversion functions: convert a page and protection to a page entry,
265 * and a page entry and page directory to the page they refer to.
266 */
267BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
268
269BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
270BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
271BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t)
272
273#define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
274#define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
275#define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
276
277#define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot)
278
279BTFIXUPDEF_INT(pte_modify_mask)
280
281static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
282static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
283{
284 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
285 pgprot_val(newprot));
286}
287
288#define pgd_index(address) ((address) >> PGDIR_SHIFT)
289
290/* to find an entry in a page-table-directory */
291#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
292
293/* to find an entry in a kernel page-table-directory */
294#define pgd_offset_k(address) pgd_offset(&init_mm, address)
295
296/* Find an entry in the second-level page table.. */
297BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
298#define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
299
300/* Find an entry in the third-level page table.. */
301BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long)
302#define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr)
303
304/*
305 * This shortcut works on sun4m (and sun4d) because the nocache area is static,
306 * and sun4c is guaranteed to have no highmem anyway.
307 */
308#define pte_offset_map(d, a) pte_offset_kernel(d,a)
309#define pte_unmap(pte) do{}while(0)
310
311/* Certain architectures need to do special things when pte's
312 * within a page table are directly modified. Thus, the following
313 * hook is made available.
314 */
315
316BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
317
318#define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
319#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
320
321struct seq_file;
322BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
323
324#define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
325
326/* Fault handler stuff... */
327#define FAULT_CODE_PROT 0x1
328#define FAULT_CODE_WRITE 0x2
329#define FAULT_CODE_USER 0x4
330
331BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
332
333#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
334
335BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
336 unsigned long, unsigned int)
337BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
338#define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len)
339#define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len)
340
341extern int invalid_segment;
342
343/* Encode and de-code a swap entry */
344BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
345BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
346BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
347
348#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
349#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
350#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
351
352#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
353#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
354
355/* file-offset-in-pte helpers */
356BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte);
357BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff);
358
359#define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte)
360#define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off)
361
362/*
363 * This is made a constant because mm/fremap.c required a constant.
364 * Note that layout of these bits is different between sun4c.c and srmmu.c.
365 */
366#define PTE_FILE_MAX_BITS 24
367
368/*
369 */
370struct ctx_list {
371 struct ctx_list *next;
372 struct ctx_list *prev;
373 unsigned int ctx_number;
374 struct mm_struct *ctx_mm;
375};
376
377extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
378extern struct ctx_list ctx_free; /* Head of free list */
379extern struct ctx_list ctx_used; /* Head of used contexts list */
380
381#define NO_CONTEXT -1
382
383static inline void remove_from_ctx_list(struct ctx_list *entry)
384{
385 entry->next->prev = entry->prev;
386 entry->prev->next = entry->next;
387}
388
389static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
390{
391 entry->next = head;
392 (entry->prev = head->prev)->next = entry;
393 head->prev = entry;
394}
395#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
396#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
397
398static inline unsigned long
399__get_phys (unsigned long addr)
400{
401 switch (sparc_cpu_model){
402 case sun4:
403 case sun4c:
404 return sun4c_get_pte (addr) << PAGE_SHIFT;
405 case sun4m:
406 case sun4d:
407 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
408 default:
409 return 0;
410 }
411}
412
413static inline int
414__get_iospace (unsigned long addr)
415{
416 switch (sparc_cpu_model){
417 case sun4:
418 case sun4c:
419 return -1; /* Don't check iospace on sun4c */
420 case sun4m:
421 case sun4d:
422 return (srmmu_get_pte (addr) >> 28);
423 default:
424 return -1;
425 }
426}
427
428extern unsigned long *sparc_valid_addr_bitmap;
429
430/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
431#define kern_addr_valid(addr) \
432 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
433
434extern int io_remap_pfn_range(struct vm_area_struct *vma,
435 unsigned long from, unsigned long pfn,
436 unsigned long size, pgprot_t prot);
437
438/*
439 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
440 * its high 4 bits. These macros/functions put it there or get it from there.
441 */
442#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
443#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
444#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
445
446#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
447#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
448({ \
449 int __changed = !pte_same(*(__ptep), __entry); \
450 if (__changed) { \
451 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
452 flush_tlb_page(__vma, __address); \
453 } \
454 (sparc_cpu_model == sun4c) || __changed; \
455})
456
457#include <asm-generic/pgtable.h>
458
459#endif /* !(__ASSEMBLY__) */
460
461#define VMALLOC_START _AC(0xfe600000,UL)
462/* XXX Alter this when I get around to fixing sun4c - Anton */
463#define VMALLOC_END _AC(0xffc00000,UL)
464
465
466/* We provide our own get_unmapped_area to cope with VA holes for userland */
467#define HAVE_ARCH_UNMAPPED_AREA
468
469/*
470 * No page table caches to initialise
471 */
472#define pgtable_cache_init() do { } while (0)
473
474#endif /* !(_SPARC_PGTABLE_H) */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SPARC_PGTABLE_H
3#define _SPARC_PGTABLE_H
4
5/* asm/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
7 *
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 */
11
12#include <linux/const.h>
13
14#define PMD_SHIFT 18
15#define PMD_SIZE (1UL << PMD_SHIFT)
16#define PMD_MASK (~(PMD_SIZE-1))
17#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
18
19#define PGDIR_SHIFT 24
20#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
21#define PGDIR_MASK (~(PGDIR_SIZE-1))
22#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23
24#ifndef __ASSEMBLY__
25#include <asm-generic/pgtable-nopud.h>
26
27#include <linux/spinlock.h>
28#include <linux/mm_types.h>
29#include <asm/types.h>
30#include <asm/pgtsrmmu.h>
31#include <asm/vaddrs.h>
32#include <asm/oplib.h>
33#include <asm/cpu_type.h>
34
35
36struct vm_area_struct;
37struct page;
38
39void load_mmu(void);
40unsigned long calc_highpages(void);
41unsigned long __init bootmem_init(unsigned long *pages_avail);
42
43#define pte_ERROR(e) __builtin_trap()
44#define pmd_ERROR(e) __builtin_trap()
45#define pgd_ERROR(e) __builtin_trap()
46
47#define PTRS_PER_PTE 64
48#define PTRS_PER_PMD 64
49#define PTRS_PER_PGD 256
50#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
51#define PTE_SIZE (PTRS_PER_PTE*4)
52
53#define PAGE_NONE SRMMU_PAGE_NONE
54#define PAGE_SHARED SRMMU_PAGE_SHARED
55#define PAGE_COPY SRMMU_PAGE_COPY
56#define PAGE_READONLY SRMMU_PAGE_RDONLY
57#define PAGE_KERNEL SRMMU_PAGE_KERNEL
58
59/* Top-level page directory - dummy used by init-mm.
60 * srmmu.c will assign the real one (which is dynamically sized) */
61#define swapper_pg_dir NULL
62
63void paging_init(void);
64
65extern unsigned long ptr_in_current_pgd;
66
67/* First physical page can be anywhere, the following is needed so that
68 * va-->pa and vice versa conversions work properly without performance
69 * hit for all __pa()/__va() operations.
70 */
71extern unsigned long phys_base;
72extern unsigned long pfn_base;
73
74/*
75 * ZERO_PAGE is a global shared page that is always zero: used
76 * for zero-mapped memory areas etc..
77 */
78extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
79
80#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
81
82/*
83 * In general all page table modifications should use the V8 atomic
84 * swap instruction. This insures the mmu and the cpu are in sync
85 * with respect to ref/mod bits in the page tables.
86 */
87static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
88{
89 __asm__ __volatile__("swap [%2], %0" :
90 "=&r" (value) : "0" (value), "r" (addr) : "memory");
91 return value;
92}
93
94/* Certain architectures need to do special things when pte's
95 * within a page table are directly modified. Thus, the following
96 * hook is made available.
97 */
98
99static inline void set_pte(pte_t *ptep, pte_t pteval)
100{
101 srmmu_swap((unsigned long *)ptep, pte_val(pteval));
102}
103
104static inline int srmmu_device_memory(unsigned long x)
105{
106 return ((x & 0xF0000000) != 0);
107}
108
109static inline unsigned long pmd_pfn(pmd_t pmd)
110{
111 return (pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4);
112}
113
114static inline struct page *pmd_page(pmd_t pmd)
115{
116 if (srmmu_device_memory(pmd_val(pmd)))
117 BUG();
118 return pfn_to_page(pmd_pfn(pmd));
119}
120
121static inline unsigned long __pmd_page(pmd_t pmd)
122{
123 unsigned long v;
124
125 if (srmmu_device_memory(pmd_val(pmd)))
126 BUG();
127
128 v = pmd_val(pmd) & SRMMU_PTD_PMASK;
129 return (unsigned long)__nocache_va(v << 4);
130}
131
132static inline unsigned long pmd_page_vaddr(pmd_t pmd)
133{
134 unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
135 return (unsigned long)__nocache_va(v << 4);
136}
137
138static inline pmd_t *pud_pgtable(pud_t pud)
139{
140 if (srmmu_device_memory(pud_val(pud))) {
141 return (pmd_t *)~0;
142 } else {
143 unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
144 return (pmd_t *)__nocache_va(v << 4);
145 }
146}
147
148static inline int pte_present(pte_t pte)
149{
150 return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
151}
152
153static inline int pte_none(pte_t pte)
154{
155 return !pte_val(pte);
156}
157
158static inline void __pte_clear(pte_t *ptep)
159{
160 set_pte(ptep, __pte(0));
161}
162
163static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164{
165 __pte_clear(ptep);
166}
167
168static inline int pmd_bad(pmd_t pmd)
169{
170 return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
171}
172
173static inline int pmd_present(pmd_t pmd)
174{
175 return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
176}
177
178static inline int pmd_none(pmd_t pmd)
179{
180 return !pmd_val(pmd);
181}
182
183static inline void pmd_clear(pmd_t *pmdp)
184{
185 set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
186}
187
188static inline int pud_none(pud_t pud)
189{
190 return !(pud_val(pud) & 0xFFFFFFF);
191}
192
193static inline int pud_bad(pud_t pud)
194{
195 return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
196}
197
198static inline int pud_present(pud_t pud)
199{
200 return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
201}
202
203static inline void pud_clear(pud_t *pudp)
204{
205 set_pte((pte_t *)pudp, __pte(0));
206}
207
208/*
209 * The following only work if pte_present() is true.
210 * Undefined behaviour if not..
211 */
212static inline int pte_write(pte_t pte)
213{
214 return pte_val(pte) & SRMMU_WRITE;
215}
216
217static inline int pte_dirty(pte_t pte)
218{
219 return pte_val(pte) & SRMMU_DIRTY;
220}
221
222static inline int pte_young(pte_t pte)
223{
224 return pte_val(pte) & SRMMU_REF;
225}
226
227static inline pte_t pte_wrprotect(pte_t pte)
228{
229 return __pte(pte_val(pte) & ~SRMMU_WRITE);
230}
231
232static inline pte_t pte_mkclean(pte_t pte)
233{
234 return __pte(pte_val(pte) & ~SRMMU_DIRTY);
235}
236
237static inline pte_t pte_mkold(pte_t pte)
238{
239 return __pte(pte_val(pte) & ~SRMMU_REF);
240}
241
242static inline pte_t pte_mkwrite_novma(pte_t pte)
243{
244 return __pte(pte_val(pte) | SRMMU_WRITE);
245}
246
247static inline pte_t pte_mkdirty(pte_t pte)
248{
249 return __pte(pte_val(pte) | SRMMU_DIRTY);
250}
251
252static inline pte_t pte_mkyoung(pte_t pte)
253{
254 return __pte(pte_val(pte) | SRMMU_REF);
255}
256
257#define PFN_PTE_SHIFT (PAGE_SHIFT - 4)
258#define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
259
260static inline unsigned long pte_pfn(pte_t pte)
261{
262 if (srmmu_device_memory(pte_val(pte))) {
263 /* Just return something that will cause
264 * pfn_valid() to return false. This makes
265 * copy_one_pte() to just directly copy to
266 * PTE over.
267 */
268 return ~0UL;
269 }
270 return (pte_val(pte) & SRMMU_PTE_PMASK) >> PFN_PTE_SHIFT;
271}
272
273#define pte_page(pte) pfn_to_page(pte_pfn(pte))
274
275/*
276 * Conversion functions: convert a page and protection to a page entry,
277 * and a page entry and page directory to the page they refer to.
278 */
279static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
280{
281 return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
282}
283
284static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
285{
286 return __pte(((page) >> 4) | pgprot_val(pgprot));
287}
288
289static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
290{
291 return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
292}
293
294#define pgprot_noncached pgprot_noncached
295static inline pgprot_t pgprot_noncached(pgprot_t prot)
296{
297 pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
298 return prot;
299}
300
301static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
302static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
303{
304 return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
305 pgprot_val(newprot));
306}
307
308/* only used by the huge vmap code, should never be called */
309#define pud_page(pud) NULL
310
311struct seq_file;
312void mmu_info(struct seq_file *m);
313
314/* Fault handler stuff... */
315#define FAULT_CODE_PROT 0x1
316#define FAULT_CODE_WRITE 0x2
317#define FAULT_CODE_USER 0x4
318
319#define update_mmu_cache(vma, address, ptep) do { } while (0)
320#define update_mmu_cache_range(vmf, vma, address, ptep, nr) do { } while (0)
321
322void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
323 unsigned long xva, unsigned int len);
324void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
325
326/*
327 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
328 * are !pte_none() && !pte_present().
329 *
330 * Format of swap PTEs:
331 *
332 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
333 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
334 * <-------------- offset ---------------> < type -> E 0 0 0 0 0 0
335 */
336static inline unsigned long __swp_type(swp_entry_t entry)
337{
338 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
339}
340
341static inline unsigned long __swp_offset(swp_entry_t entry)
342{
343 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
344}
345
346static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
347{
348 return (swp_entry_t) {
349 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
350 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
351}
352
353#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
354#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
355
356static inline int pte_swp_exclusive(pte_t pte)
357{
358 return pte_val(pte) & SRMMU_SWP_EXCLUSIVE;
359}
360
361static inline pte_t pte_swp_mkexclusive(pte_t pte)
362{
363 return __pte(pte_val(pte) | SRMMU_SWP_EXCLUSIVE);
364}
365
366static inline pte_t pte_swp_clear_exclusive(pte_t pte)
367{
368 return __pte(pte_val(pte) & ~SRMMU_SWP_EXCLUSIVE);
369}
370
371static inline unsigned long
372__get_phys (unsigned long addr)
373{
374 switch (sparc_cpu_model){
375 case sun4m:
376 case sun4d:
377 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
378 default:
379 return 0;
380 }
381}
382
383static inline int
384__get_iospace (unsigned long addr)
385{
386 switch (sparc_cpu_model){
387 case sun4m:
388 case sun4d:
389 return (srmmu_get_pte (addr) >> 28);
390 default:
391 return -1;
392 }
393}
394
395/*
396 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
397 * its high 4 bits. These macros/functions put it there or get it from there.
398 */
399#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
400#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
401#define GET_PFN(pfn) (pfn & 0x0fffffffUL)
402
403int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
404 unsigned long, pgprot_t);
405
406static inline int io_remap_pfn_range(struct vm_area_struct *vma,
407 unsigned long from, unsigned long pfn,
408 unsigned long size, pgprot_t prot)
409{
410 unsigned long long offset, space, phys_base;
411
412 offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
413 space = GET_IOSPACE(pfn);
414 phys_base = offset | (space << 32ULL);
415
416 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
417}
418#define io_remap_pfn_range io_remap_pfn_range
419
420#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422({ \
423 int __changed = !pte_same(*(__ptep), __entry); \
424 if (__changed) { \
425 set_pte(__ptep, __entry); \
426 flush_tlb_page(__vma, __address); \
427 } \
428 __changed; \
429})
430
431#endif /* !(__ASSEMBLY__) */
432
433#define VMALLOC_START _AC(0xfe600000,UL)
434#define VMALLOC_END _AC(0xffc00000,UL)
435
436/* We provide our own get_unmapped_area to cope with VA holes for userland */
437#define HAVE_ARCH_UNMAPPED_AREA
438
439#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
440
441#endif /* !(_SPARC_PGTABLE_H) */