Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#include <linux/mm_types.h>
12#include <linux/mmzone.h>
13#ifdef CONFIG_32BIT
14#include <asm/pgtable-32.h>
15#endif
16#ifdef CONFIG_64BIT
17#include <asm/pgtable-64.h>
18#endif
19
20#include <asm/cmpxchg.h>
21#include <asm/io.h>
22#include <asm/pgtable-bits.h>
23#include <asm/cpu-features.h>
24
25struct mm_struct;
26struct vm_area_struct;
27
28#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
29 _page_cachable_default)
30#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
31 _page_cachable_default)
32#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
33 _page_cachable_default)
34#define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
35 _page_cachable_default)
36#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37 _PAGE_GLOBAL | _page_cachable_default)
38#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
39 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
40#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
41 _page_cachable_default)
42#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
43 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
44
45/*
46 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
47 * execute, and consider it to be the same as read. Also, write
48 * permissions imply read permissions. This is the closest we can get
49 * by reasonable means..
50 */
51
52/*
53 * Dummy values to fill the table in mmap.c
54 * The real values will be generated at runtime
55 */
56#define __P000 __pgprot(0)
57#define __P001 __pgprot(0)
58#define __P010 __pgprot(0)
59#define __P011 __pgprot(0)
60#define __P100 __pgprot(0)
61#define __P101 __pgprot(0)
62#define __P110 __pgprot(0)
63#define __P111 __pgprot(0)
64
65#define __S000 __pgprot(0)
66#define __S001 __pgprot(0)
67#define __S010 __pgprot(0)
68#define __S011 __pgprot(0)
69#define __S100 __pgprot(0)
70#define __S101 __pgprot(0)
71#define __S110 __pgprot(0)
72#define __S111 __pgprot(0)
73
74extern unsigned long _page_cachable_default;
75
76/*
77 * ZERO_PAGE is a global shared page that is always zero; used
78 * for zero-mapped memory areas etc..
79 */
80
81extern unsigned long empty_zero_page;
82extern unsigned long zero_page_mask;
83
84#define ZERO_PAGE(vaddr) \
85 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
86#define __HAVE_COLOR_ZERO_PAGE
87
88extern void paging_init(void);
89
90/*
91 * Conversion functions: convert a page and protection to a page entry,
92 * and a page entry and page directory to the page they refer to.
93 */
94#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
95
96#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
97#ifndef CONFIG_TRANSPARENT_HUGEPAGE
98#define pmd_page(pmd) __pmd_page(pmd)
99#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
100
101#define pmd_page_vaddr(pmd) pmd_val(pmd)
102
103#define htw_stop() \
104do { \
105 unsigned long flags; \
106 \
107 if (cpu_has_htw) { \
108 local_irq_save(flags); \
109 if(!raw_current_cpu_data.htw_seq++) { \
110 write_c0_pwctl(read_c0_pwctl() & \
111 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
112 back_to_back_c0_hazard(); \
113 } \
114 local_irq_restore(flags); \
115 } \
116} while(0)
117
118#define htw_start() \
119do { \
120 unsigned long flags; \
121 \
122 if (cpu_has_htw) { \
123 local_irq_save(flags); \
124 if (!--raw_current_cpu_data.htw_seq) { \
125 write_c0_pwctl(read_c0_pwctl() | \
126 (1 << MIPS_PWCTL_PWEN_SHIFT)); \
127 back_to_back_c0_hazard(); \
128 } \
129 local_irq_restore(flags); \
130 } \
131} while(0)
132
133static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
134 pte_t *ptep, pte_t pteval);
135
136#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
137
138#ifdef CONFIG_XPA
139# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
140#else
141# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
142#endif
143
144#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
145#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
146
147static inline void set_pte(pte_t *ptep, pte_t pte)
148{
149 ptep->pte_high = pte.pte_high;
150 smp_wmb();
151 ptep->pte_low = pte.pte_low;
152
153#ifdef CONFIG_XPA
154 if (pte.pte_high & _PAGE_GLOBAL) {
155#else
156 if (pte.pte_low & _PAGE_GLOBAL) {
157#endif
158 pte_t *buddy = ptep_buddy(ptep);
159 /*
160 * Make sure the buddy is global too (if it's !none,
161 * it better already be global)
162 */
163 if (pte_none(*buddy)) {
164 if (!IS_ENABLED(CONFIG_XPA))
165 buddy->pte_low |= _PAGE_GLOBAL;
166 buddy->pte_high |= _PAGE_GLOBAL;
167 }
168 }
169}
170
171static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
172{
173 pte_t null = __pte(0);
174
175 htw_stop();
176 /* Preserve global status for the pair */
177 if (IS_ENABLED(CONFIG_XPA)) {
178 if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
179 null.pte_high = _PAGE_GLOBAL;
180 } else {
181 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
182 null.pte_low = null.pte_high = _PAGE_GLOBAL;
183 }
184
185 set_pte_at(mm, addr, ptep, null);
186 htw_start();
187}
188#else
189
190#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
191#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
192#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
193
194/*
195 * Certain architectures need to do special things when pte's
196 * within a page table are directly modified. Thus, the following
197 * hook is made available.
198 */
199static inline void set_pte(pte_t *ptep, pte_t pteval)
200{
201 *ptep = pteval;
202#if !defined(CONFIG_CPU_R3K_TLB)
203 if (pte_val(pteval) & _PAGE_GLOBAL) {
204 pte_t *buddy = ptep_buddy(ptep);
205 /*
206 * Make sure the buddy is global too (if it's !none,
207 * it better already be global)
208 */
209# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
210 cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
211# else
212 cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
213# endif
214 }
215#endif
216}
217
218static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
219{
220 htw_stop();
221#if !defined(CONFIG_CPU_R3K_TLB)
222 /* Preserve global status for the pair */
223 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
224 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
225 else
226#endif
227 set_pte_at(mm, addr, ptep, __pte(0));
228 htw_start();
229}
230#endif
231
232static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
233 pte_t *ptep, pte_t pteval)
234{
235 extern void __update_cache(unsigned long address, pte_t pte);
236
237 if (!pte_present(pteval))
238 goto cache_sync_done;
239
240 if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
241 goto cache_sync_done;
242
243 __update_cache(addr, pteval);
244cache_sync_done:
245 set_pte(ptep, pteval);
246}
247
248/*
249 * (pmds are folded into puds so this doesn't get actually called,
250 * but the define is needed for a generic inline function.)
251 */
252#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
253
254#ifndef __PAGETABLE_PMD_FOLDED
255/*
256 * (puds are folded into pgds so this doesn't get actually called,
257 * but the define is needed for a generic inline function.)
258 */
259#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
260#endif
261
262#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
263#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
264#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
265
266/*
267 * We used to declare this array with size but gcc 3.3 and older are not able
268 * to find that this expression is a constant, so the size is dropped.
269 */
270extern pgd_t swapper_pg_dir[];
271
272/*
273 * The following only work if pte_present() is true.
274 * Undefined behaviour if not..
275 */
276#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
277static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
278static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
279static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
280static inline int pte_special(pte_t pte) { return pte.pte_low & _PAGE_SPECIAL; }
281
282static inline pte_t pte_wrprotect(pte_t pte)
283{
284 pte.pte_low &= ~_PAGE_WRITE;
285 if (!IS_ENABLED(CONFIG_XPA))
286 pte.pte_low &= ~_PAGE_SILENT_WRITE;
287 pte.pte_high &= ~_PAGE_SILENT_WRITE;
288 return pte;
289}
290
291static inline pte_t pte_mkclean(pte_t pte)
292{
293 pte.pte_low &= ~_PAGE_MODIFIED;
294 if (!IS_ENABLED(CONFIG_XPA))
295 pte.pte_low &= ~_PAGE_SILENT_WRITE;
296 pte.pte_high &= ~_PAGE_SILENT_WRITE;
297 return pte;
298}
299
300static inline pte_t pte_mkold(pte_t pte)
301{
302 pte.pte_low &= ~_PAGE_ACCESSED;
303 if (!IS_ENABLED(CONFIG_XPA))
304 pte.pte_low &= ~_PAGE_SILENT_READ;
305 pte.pte_high &= ~_PAGE_SILENT_READ;
306 return pte;
307}
308
309static inline pte_t pte_mkwrite(pte_t pte)
310{
311 pte.pte_low |= _PAGE_WRITE;
312 if (pte.pte_low & _PAGE_MODIFIED) {
313 if (!IS_ENABLED(CONFIG_XPA))
314 pte.pte_low |= _PAGE_SILENT_WRITE;
315 pte.pte_high |= _PAGE_SILENT_WRITE;
316 }
317 return pte;
318}
319
320static inline pte_t pte_mkdirty(pte_t pte)
321{
322 pte.pte_low |= _PAGE_MODIFIED;
323 if (pte.pte_low & _PAGE_WRITE) {
324 if (!IS_ENABLED(CONFIG_XPA))
325 pte.pte_low |= _PAGE_SILENT_WRITE;
326 pte.pte_high |= _PAGE_SILENT_WRITE;
327 }
328 return pte;
329}
330
331static inline pte_t pte_mkyoung(pte_t pte)
332{
333 pte.pte_low |= _PAGE_ACCESSED;
334 if (!(pte.pte_low & _PAGE_NO_READ)) {
335 if (!IS_ENABLED(CONFIG_XPA))
336 pte.pte_low |= _PAGE_SILENT_READ;
337 pte.pte_high |= _PAGE_SILENT_READ;
338 }
339 return pte;
340}
341
342static inline pte_t pte_mkspecial(pte_t pte)
343{
344 pte.pte_low |= _PAGE_SPECIAL;
345 return pte;
346}
347#else
348static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
349static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
350static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
351static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
352
353static inline pte_t pte_wrprotect(pte_t pte)
354{
355 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
356 return pte;
357}
358
359static inline pte_t pte_mkclean(pte_t pte)
360{
361 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
362 return pte;
363}
364
365static inline pte_t pte_mkold(pte_t pte)
366{
367 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
368 return pte;
369}
370
371static inline pte_t pte_mkwrite(pte_t pte)
372{
373 pte_val(pte) |= _PAGE_WRITE;
374 if (pte_val(pte) & _PAGE_MODIFIED)
375 pte_val(pte) |= _PAGE_SILENT_WRITE;
376 return pte;
377}
378
379static inline pte_t pte_mkdirty(pte_t pte)
380{
381 pte_val(pte) |= _PAGE_MODIFIED;
382 if (pte_val(pte) & _PAGE_WRITE)
383 pte_val(pte) |= _PAGE_SILENT_WRITE;
384 return pte;
385}
386
387static inline pte_t pte_mkyoung(pte_t pte)
388{
389 pte_val(pte) |= _PAGE_ACCESSED;
390 if (!(pte_val(pte) & _PAGE_NO_READ))
391 pte_val(pte) |= _PAGE_SILENT_READ;
392 return pte;
393}
394
395static inline pte_t pte_mkspecial(pte_t pte)
396{
397 pte_val(pte) |= _PAGE_SPECIAL;
398 return pte;
399}
400
401#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
402static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
403
404static inline pte_t pte_mkhuge(pte_t pte)
405{
406 pte_val(pte) |= _PAGE_HUGE;
407 return pte;
408}
409#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
410#endif
411
412/*
413 * Macro to make mark a page protection value as "uncacheable". Note
414 * that "protection" is really a misnomer here as the protection value
415 * contains the memory attribute bits, dirty bits, and various other
416 * bits as well.
417 */
418#define pgprot_noncached pgprot_noncached
419
420static inline pgprot_t pgprot_noncached(pgprot_t _prot)
421{
422 unsigned long prot = pgprot_val(_prot);
423
424 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
425
426 return __pgprot(prot);
427}
428
429#define pgprot_writecombine pgprot_writecombine
430
431static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
432{
433 unsigned long prot = pgprot_val(_prot);
434
435 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
436 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
437
438 return __pgprot(prot);
439}
440
441/*
442 * Conversion functions: convert a page and protection to a page entry,
443 * and a page entry and page directory to the page they refer to.
444 */
445#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
446
447#if defined(CONFIG_XPA)
448static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
449{
450 pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
451 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
452 pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
453 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
454 return pte;
455}
456#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
457static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
458{
459 pte.pte_low &= _PAGE_CHG_MASK;
460 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
461 pte.pte_low |= pgprot_val(newprot);
462 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
463 return pte;
464}
465#else
466static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
467{
468 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
469 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
470}
471#endif
472
473
474extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
475 pte_t pte);
476
477static inline void update_mmu_cache(struct vm_area_struct *vma,
478 unsigned long address, pte_t *ptep)
479{
480 pte_t pte = *ptep;
481 __update_tlb(vma, address, pte);
482}
483
484static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
485 unsigned long address, pmd_t *pmdp)
486{
487 pte_t pte = *(pte_t *)pmdp;
488
489 __update_tlb(vma, address, pte);
490}
491
492#define kern_addr_valid(addr) (1)
493
494#ifdef CONFIG_PHYS_ADDR_T_64BIT
495extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
496
497static inline int io_remap_pfn_range(struct vm_area_struct *vma,
498 unsigned long vaddr,
499 unsigned long pfn,
500 unsigned long size,
501 pgprot_t prot)
502{
503 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
504 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
505}
506#define io_remap_pfn_range io_remap_pfn_range
507#endif
508
509#ifdef CONFIG_TRANSPARENT_HUGEPAGE
510
511/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
512#define pmdp_establish generic_pmdp_establish
513
514#define has_transparent_hugepage has_transparent_hugepage
515extern int has_transparent_hugepage(void);
516
517static inline int pmd_trans_huge(pmd_t pmd)
518{
519 return !!(pmd_val(pmd) & _PAGE_HUGE);
520}
521
522static inline pmd_t pmd_mkhuge(pmd_t pmd)
523{
524 pmd_val(pmd) |= _PAGE_HUGE;
525
526 return pmd;
527}
528
529extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
530 pmd_t *pmdp, pmd_t pmd);
531
532#define pmd_write pmd_write
533static inline int pmd_write(pmd_t pmd)
534{
535 return !!(pmd_val(pmd) & _PAGE_WRITE);
536}
537
538static inline pmd_t pmd_wrprotect(pmd_t pmd)
539{
540 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
541 return pmd;
542}
543
544static inline pmd_t pmd_mkwrite(pmd_t pmd)
545{
546 pmd_val(pmd) |= _PAGE_WRITE;
547 if (pmd_val(pmd) & _PAGE_MODIFIED)
548 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
549
550 return pmd;
551}
552
553static inline int pmd_dirty(pmd_t pmd)
554{
555 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
556}
557
558static inline pmd_t pmd_mkclean(pmd_t pmd)
559{
560 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
561 return pmd;
562}
563
564static inline pmd_t pmd_mkdirty(pmd_t pmd)
565{
566 pmd_val(pmd) |= _PAGE_MODIFIED;
567 if (pmd_val(pmd) & _PAGE_WRITE)
568 pmd_val(pmd) |= _PAGE_SILENT_WRITE;
569
570 return pmd;
571}
572
573static inline int pmd_young(pmd_t pmd)
574{
575 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
576}
577
578static inline pmd_t pmd_mkold(pmd_t pmd)
579{
580 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
581
582 return pmd;
583}
584
585static inline pmd_t pmd_mkyoung(pmd_t pmd)
586{
587 pmd_val(pmd) |= _PAGE_ACCESSED;
588
589 if (!(pmd_val(pmd) & _PAGE_NO_READ))
590 pmd_val(pmd) |= _PAGE_SILENT_READ;
591
592 return pmd;
593}
594
595/* Extern to avoid header file madness */
596extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
597
598static inline unsigned long pmd_pfn(pmd_t pmd)
599{
600 return pmd_val(pmd) >> _PFN_SHIFT;
601}
602
603static inline struct page *pmd_page(pmd_t pmd)
604{
605 if (pmd_trans_huge(pmd))
606 return pfn_to_page(pmd_pfn(pmd));
607
608 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
609}
610
611static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
612{
613 pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
614 (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
615 return pmd;
616}
617
618static inline pmd_t pmd_mknotpresent(pmd_t pmd)
619{
620 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
621
622 return pmd;
623}
624
625/*
626 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
627 * different prototype.
628 */
629#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
630static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
631 unsigned long address, pmd_t *pmdp)
632{
633 pmd_t old = *pmdp;
634
635 pmd_clear(pmdp);
636
637 return old;
638}
639
640#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
641
642#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
643
644#include <asm-generic/pgtable.h>
645
646/*
647 * uncached accelerated TLB map for video memory access
648 */
649#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
650#define __HAVE_PHYS_MEM_ACCESS_PROT
651
652struct file;
653pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
654 unsigned long size, pgprot_t vma_prot);
655#endif
656
657/*
658 * We provide our own get_unmapped area to cope with the virtual aliasing
659 * constraints placed on us by the cache architecture.
660 */
661#define HAVE_ARCH_UNMAPPED_AREA
662#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
663
664#endif /* _ASM_PGTABLE_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Ralf Baechle
7 */
8#ifndef _ASM_PGTABLE_H
9#define _ASM_PGTABLE_H
10
11#ifdef CONFIG_32BIT
12#include <asm/pgtable-32.h>
13#endif
14#ifdef CONFIG_64BIT
15#include <asm/pgtable-64.h>
16#endif
17
18#include <asm/io.h>
19#include <asm/pgtable-bits.h>
20
21struct mm_struct;
22struct vm_area_struct;
23
24#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
25#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
26 _page_cachable_default)
27#define PAGE_COPY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
28 (kernel_uses_smartmips_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default)
29#define PAGE_READONLY __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | \
30 _page_cachable_default)
31#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
32 _PAGE_GLOBAL | _page_cachable_default)
33#define PAGE_USERIO __pgprot(_PAGE_PRESENT | (kernel_uses_smartmips_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \
34 _page_cachable_default)
35#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
36 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
37
38/*
39 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
40 * execute, and consider it to be the same as read. Also, write
41 * permissions imply read permissions. This is the closest we can get
42 * by reasonable means..
43 */
44
45/*
46 * Dummy values to fill the table in mmap.c
47 * The real values will be generated at runtime
48 */
49#define __P000 __pgprot(0)
50#define __P001 __pgprot(0)
51#define __P010 __pgprot(0)
52#define __P011 __pgprot(0)
53#define __P100 __pgprot(0)
54#define __P101 __pgprot(0)
55#define __P110 __pgprot(0)
56#define __P111 __pgprot(0)
57
58#define __S000 __pgprot(0)
59#define __S001 __pgprot(0)
60#define __S010 __pgprot(0)
61#define __S011 __pgprot(0)
62#define __S100 __pgprot(0)
63#define __S101 __pgprot(0)
64#define __S110 __pgprot(0)
65#define __S111 __pgprot(0)
66
67extern unsigned long _page_cachable_default;
68
69/*
70 * ZERO_PAGE is a global shared page that is always zero; used
71 * for zero-mapped memory areas etc..
72 */
73
74extern unsigned long empty_zero_page;
75extern unsigned long zero_page_mask;
76
77#define ZERO_PAGE(vaddr) \
78 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
79
80#define is_zero_pfn is_zero_pfn
81static inline int is_zero_pfn(unsigned long pfn)
82{
83 extern unsigned long zero_pfn;
84 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
85 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
86}
87
88#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
89
90extern void paging_init(void);
91
92/*
93 * Conversion functions: convert a page and protection to a page entry,
94 * and a page entry and page directory to the page they refer to.
95 */
96#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
97#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
98#define pmd_page_vaddr(pmd) pmd_val(pmd)
99
100#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
101
102#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
103#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
104
105static inline void set_pte(pte_t *ptep, pte_t pte)
106{
107 ptep->pte_high = pte.pte_high;
108 smp_wmb();
109 ptep->pte_low = pte.pte_low;
110 //printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
111
112 if (pte.pte_low & _PAGE_GLOBAL) {
113 pte_t *buddy = ptep_buddy(ptep);
114 /*
115 * Make sure the buddy is global too (if it's !none,
116 * it better already be global)
117 */
118 if (pte_none(*buddy)) {
119 buddy->pte_low |= _PAGE_GLOBAL;
120 buddy->pte_high |= _PAGE_GLOBAL;
121 }
122 }
123}
124#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
125
126static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
127{
128 pte_t null = __pte(0);
129
130 /* Preserve global status for the pair */
131 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
132 null.pte_low = null.pte_high = _PAGE_GLOBAL;
133
134 set_pte_at(mm, addr, ptep, null);
135}
136#else
137
138#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
139#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
140
141/*
142 * Certain architectures need to do special things when pte's
143 * within a page table are directly modified. Thus, the following
144 * hook is made available.
145 */
146static inline void set_pte(pte_t *ptep, pte_t pteval)
147{
148 *ptep = pteval;
149#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
150 if (pte_val(pteval) & _PAGE_GLOBAL) {
151 pte_t *buddy = ptep_buddy(ptep);
152 /*
153 * Make sure the buddy is global too (if it's !none,
154 * it better already be global)
155 */
156 if (pte_none(*buddy))
157 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
158 }
159#endif
160}
161#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
162
163static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
164{
165#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
166 /* Preserve global status for the pair */
167 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
168 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
169 else
170#endif
171 set_pte_at(mm, addr, ptep, __pte(0));
172}
173#endif
174
175/*
176 * (pmds are folded into puds so this doesn't get actually called,
177 * but the define is needed for a generic inline function.)
178 */
179#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
180
181#ifndef __PAGETABLE_PMD_FOLDED
182/*
183 * (puds are folded into pgds so this doesn't get actually called,
184 * but the define is needed for a generic inline function.)
185 */
186#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
187#endif
188
189#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
190#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
191#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
192
193/*
194 * We used to declare this array with size but gcc 3.3 and older are not able
195 * to find that this expression is a constant, so the size is dropped.
196 */
197extern pgd_t swapper_pg_dir[];
198
199/*
200 * The following only work if pte_present() is true.
201 * Undefined behaviour if not..
202 */
203#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
204static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
205static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
206static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
207static inline int pte_file(pte_t pte) { return pte.pte_low & _PAGE_FILE; }
208
209static inline pte_t pte_wrprotect(pte_t pte)
210{
211 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
212 pte.pte_high &= ~_PAGE_SILENT_WRITE;
213 return pte;
214}
215
216static inline pte_t pte_mkclean(pte_t pte)
217{
218 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
219 pte.pte_high &= ~_PAGE_SILENT_WRITE;
220 return pte;
221}
222
223static inline pte_t pte_mkold(pte_t pte)
224{
225 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
226 pte.pte_high &= ~_PAGE_SILENT_READ;
227 return pte;
228}
229
230static inline pte_t pte_mkwrite(pte_t pte)
231{
232 pte.pte_low |= _PAGE_WRITE;
233 if (pte.pte_low & _PAGE_MODIFIED) {
234 pte.pte_low |= _PAGE_SILENT_WRITE;
235 pte.pte_high |= _PAGE_SILENT_WRITE;
236 }
237 return pte;
238}
239
240static inline pte_t pte_mkdirty(pte_t pte)
241{
242 pte.pte_low |= _PAGE_MODIFIED;
243 if (pte.pte_low & _PAGE_WRITE) {
244 pte.pte_low |= _PAGE_SILENT_WRITE;
245 pte.pte_high |= _PAGE_SILENT_WRITE;
246 }
247 return pte;
248}
249
250static inline pte_t pte_mkyoung(pte_t pte)
251{
252 pte.pte_low |= _PAGE_ACCESSED;
253 if (pte.pte_low & _PAGE_READ) {
254 pte.pte_low |= _PAGE_SILENT_READ;
255 pte.pte_high |= _PAGE_SILENT_READ;
256 }
257 return pte;
258}
259#else
260static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
261static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
262static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
263static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
264
265static inline pte_t pte_wrprotect(pte_t pte)
266{
267 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
268 return pte;
269}
270
271static inline pte_t pte_mkclean(pte_t pte)
272{
273 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE);
274 return pte;
275}
276
277static inline pte_t pte_mkold(pte_t pte)
278{
279 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
280 return pte;
281}
282
283static inline pte_t pte_mkwrite(pte_t pte)
284{
285 pte_val(pte) |= _PAGE_WRITE;
286 if (pte_val(pte) & _PAGE_MODIFIED)
287 pte_val(pte) |= _PAGE_SILENT_WRITE;
288 return pte;
289}
290
291static inline pte_t pte_mkdirty(pte_t pte)
292{
293 pte_val(pte) |= _PAGE_MODIFIED;
294 if (pte_val(pte) & _PAGE_WRITE)
295 pte_val(pte) |= _PAGE_SILENT_WRITE;
296 return pte;
297}
298
299static inline pte_t pte_mkyoung(pte_t pte)
300{
301 pte_val(pte) |= _PAGE_ACCESSED;
302 if (kernel_uses_smartmips_rixi) {
303 if (!(pte_val(pte) & _PAGE_NO_READ))
304 pte_val(pte) |= _PAGE_SILENT_READ;
305 } else {
306 if (pte_val(pte) & _PAGE_READ)
307 pte_val(pte) |= _PAGE_SILENT_READ;
308 }
309 return pte;
310}
311
312#ifdef _PAGE_HUGE
313static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
314
315static inline pte_t pte_mkhuge(pte_t pte)
316{
317 pte_val(pte) |= _PAGE_HUGE;
318 return pte;
319}
320#endif /* _PAGE_HUGE */
321#endif
322static inline int pte_special(pte_t pte) { return 0; }
323static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
324
325/*
326 * Macro to make mark a page protection value as "uncacheable". Note
327 * that "protection" is really a misnomer here as the protection value
328 * contains the memory attribute bits, dirty bits, and various other
329 * bits as well.
330 */
331#define pgprot_noncached pgprot_noncached
332
333static inline pgprot_t pgprot_noncached(pgprot_t _prot)
334{
335 unsigned long prot = pgprot_val(_prot);
336
337 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
338
339 return __pgprot(prot);
340}
341
342/*
343 * Conversion functions: convert a page and protection to a page entry,
344 * and a page entry and page directory to the page they refer to.
345 */
346#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
347
348#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
349static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
350{
351 pte.pte_low &= _PAGE_CHG_MASK;
352 pte.pte_high &= ~0x3f;
353 pte.pte_low |= pgprot_val(newprot);
354 pte.pte_high |= pgprot_val(newprot) & 0x3f;
355 return pte;
356}
357#else
358static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
359{
360 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
361}
362#endif
363
364
365extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
366 pte_t pte);
367extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
368 pte_t pte);
369
370static inline void update_mmu_cache(struct vm_area_struct *vma,
371 unsigned long address, pte_t *ptep)
372{
373 pte_t pte = *ptep;
374 __update_tlb(vma, address, pte);
375 __update_cache(vma, address, pte);
376}
377
378#define kern_addr_valid(addr) (1)
379
380#ifdef CONFIG_64BIT_PHYS_ADDR
381extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
382
383static inline int io_remap_pfn_range(struct vm_area_struct *vma,
384 unsigned long vaddr,
385 unsigned long pfn,
386 unsigned long size,
387 pgprot_t prot)
388{
389 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
390 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
391}
392#else
393#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
394 remap_pfn_range(vma, vaddr, pfn, size, prot)
395#endif
396
397#include <asm-generic/pgtable.h>
398
399/*
400 * uncached accelerated TLB map for video memory access
401 */
402#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
403#define __HAVE_PHYS_MEM_ACCESS_PROT
404
405struct file;
406pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
407 unsigned long size, pgprot_t vma_prot);
408int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
409 unsigned long size, pgprot_t *vma_prot);
410#endif
411
412/*
413 * We provide our own get_unmapped area to cope with the virtual aliasing
414 * constraints placed on us by the cache architecture.
415 */
416#define HAVE_ARCH_UNMAPPED_AREA
417#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
418
419/*
420 * No page table caches to initialise
421 */
422#define pgtable_cache_init() do { } while (0)
423
424#endif /* _ASM_PGTABLE_H */