Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2003 Ralf Baechle
  7 */
  8#ifndef _ASM_PGTABLE_H
  9#define _ASM_PGTABLE_H
 10
 11#include <linux/mm_types.h>
 12#include <linux/mmzone.h>
 13#ifdef CONFIG_32BIT
 14#include <asm/pgtable-32.h>
 15#endif
 16#ifdef CONFIG_64BIT
 17#include <asm/pgtable-64.h>
 18#endif
 19
 
 20#include <asm/io.h>
 21#include <asm/pgtable-bits.h>
 
 22
 23struct mm_struct;
 24struct vm_area_struct;
 25
 26#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
 27#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
 28				 _page_cachable_default)
 29#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
 30				 _page_cachable_default)
 31#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
 32				 _page_cachable_default)
 33#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 34				 _PAGE_GLOBAL | _page_cachable_default)
 35#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 36				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
 37#define PAGE_USERIO	__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
 38				 _page_cachable_default)
 39#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
 40			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
 41
 42/*
 43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
 44 * execute, and consider it to be the same as read. Also, write
 45 * permissions imply read permissions. This is the closest we can get
 46 * by reasonable means..
 47 */
 48
 49/*
 50 * Dummy values to fill the table in mmap.c
 51 * The real values will be generated at runtime
 52 */
 53#define __P000 __pgprot(0)
 54#define __P001 __pgprot(0)
 55#define __P010 __pgprot(0)
 56#define __P011 __pgprot(0)
 57#define __P100 __pgprot(0)
 58#define __P101 __pgprot(0)
 59#define __P110 __pgprot(0)
 60#define __P111 __pgprot(0)
 61
 62#define __S000 __pgprot(0)
 63#define __S001 __pgprot(0)
 64#define __S010 __pgprot(0)
 65#define __S011 __pgprot(0)
 66#define __S100 __pgprot(0)
 67#define __S101 __pgprot(0)
 68#define __S110 __pgprot(0)
 69#define __S111 __pgprot(0)
 70
 71extern unsigned long _page_cachable_default;
 
 72
 73/*
 74 * ZERO_PAGE is a global shared page that is always zero; used
 75 * for zero-mapped memory areas etc..
 76 */
 77
 78extern unsigned long empty_zero_page;
 79extern unsigned long zero_page_mask;
 80
 81#define ZERO_PAGE(vaddr) \
 82	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
 83#define __HAVE_COLOR_ZERO_PAGE
 84
 85extern void paging_init(void);
 86
 87/*
 88 * Conversion functions: convert a page and protection to a page entry,
 89 * and a page entry and page directory to the page they refer to.
 90 */
 91#define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
 92
 93#define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
 94#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 95#define pmd_page(pmd)		__pmd_page(pmd)
 96#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 97
 98#define pmd_page_vaddr(pmd)	pmd_val(pmd)
 99
100#define htw_stop()							\
101do {									\
102	unsigned long flags;						\
103									\
104	if (cpu_has_htw) {						\
105		local_irq_save(flags);					\
106		if(!raw_current_cpu_data.htw_seq++) {			\
107			write_c0_pwctl(read_c0_pwctl() &		\
108				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
109			back_to_back_c0_hazard();			\
110		}							\
111		local_irq_restore(flags);				\
112	}								\
113} while(0)
114
115#define htw_start()							\
116do {									\
117	unsigned long flags;						\
118									\
119	if (cpu_has_htw) {						\
120		local_irq_save(flags);					\
121		if (!--raw_current_cpu_data.htw_seq) {			\
122			write_c0_pwctl(read_c0_pwctl() |		\
123				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
124			back_to_back_c0_hazard();			\
125		}							\
126		local_irq_restore(flags);				\
127	}								\
128} while(0)
129
 
 
 
130#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
131
132#define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
 
 
 
 
 
133#define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
 
134
135static inline void set_pte(pte_t *ptep, pte_t pte)
136{
137	ptep->pte_high = pte.pte_high;
138	smp_wmb();
139	ptep->pte_low = pte.pte_low;
140
 
141	if (pte.pte_high & _PAGE_GLOBAL) {
 
 
 
142		pte_t *buddy = ptep_buddy(ptep);
143		/*
144		 * Make sure the buddy is global too (if it's !none,
145		 * it better already be global)
146		 */
147		if (pte_none(*buddy))
 
 
148			buddy->pte_high |= _PAGE_GLOBAL;
 
149	}
150}
151#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
152
153static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
154{
155	pte_t null = __pte(0);
156
157	htw_stop();
158	/* Preserve global status for the pair */
159	if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
160		null.pte_high = _PAGE_GLOBAL;
 
 
 
 
 
161
162	set_pte_at(mm, addr, ptep, null);
163	htw_start();
164}
165#else
166
167#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
168#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
 
169
170/*
171 * Certain architectures need to do special things when pte's
172 * within a page table are directly modified.  Thus, the following
173 * hook is made available.
174 */
175static inline void set_pte(pte_t *ptep, pte_t pteval)
176{
177	*ptep = pteval;
178#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
179	if (pte_val(pteval) & _PAGE_GLOBAL) {
180		pte_t *buddy = ptep_buddy(ptep);
181		/*
182		 * Make sure the buddy is global too (if it's !none,
183		 * it better already be global)
184		 */
185#ifdef CONFIG_SMP
186		/*
187		 * For SMP, multiple CPUs can race, so we need to do
188		 * this atomically.
189		 */
190#ifdef CONFIG_64BIT
191#define LL_INSN "lld"
192#define SC_INSN "scd"
193#else /* CONFIG_32BIT */
194#define LL_INSN "ll"
195#define SC_INSN "sc"
196#endif
197		unsigned long page_global = _PAGE_GLOBAL;
198		unsigned long tmp;
199
200		__asm__ __volatile__ (
201			"	.set	push\n"
202			"	.set	noreorder\n"
203			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
204			"	bnez	%[tmp], 2f\n"
205			"	 or	%[tmp], %[tmp], %[global]\n"
206			"	" SC_INSN "	%[tmp], %[buddy]\n"
207			"	beqz	%[tmp], 1b\n"
208			"	 nop\n"
209			"2:\n"
210			"	.set pop"
211			: [buddy] "+m" (buddy->pte),
212			  [tmp] "=&r" (tmp)
213			: [global] "r" (page_global));
214#else /* !CONFIG_SMP */
215		if (pte_none(*buddy))
216			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
217#endif /* CONFIG_SMP */
218	}
219#endif
220}
221#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
222
223static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
224{
225	htw_stop();
226#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
227	/* Preserve global status for the pair */
228	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
229		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
230	else
231#endif
232		set_pte_at(mm, addr, ptep, __pte(0));
233	htw_start();
234}
235#endif
236
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237/*
238 * (pmds are folded into puds so this doesn't get actually called,
239 * but the define is needed for a generic inline function.)
240 */
241#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
242
243#ifndef __PAGETABLE_PMD_FOLDED
244/*
245 * (puds are folded into pgds so this doesn't get actually called,
246 * but the define is needed for a generic inline function.)
247 */
248#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
249#endif
250
251#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
252#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
253#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
254
255/*
256 * We used to declare this array with size but gcc 3.3 and older are not able
257 * to find that this expression is a constant, so the size is dropped.
258 */
259extern pgd_t swapper_pg_dir[];
260
261/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262 * The following only work if pte_present() is true.
263 * Undefined behaviour if not..
264 */
265#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
266static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
267static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
268static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
269
270static inline pte_t pte_wrprotect(pte_t pte)
271{
272	pte.pte_low  &= ~_PAGE_WRITE;
 
 
273	pte.pte_high &= ~_PAGE_SILENT_WRITE;
274	return pte;
275}
276
277static inline pte_t pte_mkclean(pte_t pte)
278{
279	pte.pte_low  &= ~_PAGE_MODIFIED;
 
 
280	pte.pte_high &= ~_PAGE_SILENT_WRITE;
281	return pte;
282}
283
284static inline pte_t pte_mkold(pte_t pte)
285{
286	pte.pte_low  &= ~_PAGE_ACCESSED;
 
 
287	pte.pte_high &= ~_PAGE_SILENT_READ;
288	return pte;
289}
290
291static inline pte_t pte_mkwrite(pte_t pte)
292{
293	pte.pte_low |= _PAGE_WRITE;
294	if (pte.pte_low & _PAGE_MODIFIED)
 
 
295		pte.pte_high |= _PAGE_SILENT_WRITE;
 
296	return pte;
297}
298
299static inline pte_t pte_mkdirty(pte_t pte)
300{
301	pte.pte_low |= _PAGE_MODIFIED;
302	if (pte.pte_low & _PAGE_WRITE)
 
 
303		pte.pte_high |= _PAGE_SILENT_WRITE;
 
304	return pte;
305}
306
307static inline pte_t pte_mkyoung(pte_t pte)
308{
309	pte.pte_low |= _PAGE_ACCESSED;
310	if (pte.pte_low & _PAGE_READ)
 
 
311		pte.pte_high |= _PAGE_SILENT_READ;
 
312	return pte;
313}
314#else
315static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
316static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
317static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
318
319static inline pte_t pte_wrprotect(pte_t pte)
320{
321	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
322	return pte;
323}
324
325static inline pte_t pte_mkclean(pte_t pte)
326{
327	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
328	return pte;
329}
330
331static inline pte_t pte_mkold(pte_t pte)
332{
333	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
334	return pte;
335}
336
337static inline pte_t pte_mkwrite(pte_t pte)
338{
339	pte_val(pte) |= _PAGE_WRITE;
340	if (pte_val(pte) & _PAGE_MODIFIED)
341		pte_val(pte) |= _PAGE_SILENT_WRITE;
342	return pte;
343}
344
345static inline pte_t pte_mkdirty(pte_t pte)
346{
347	pte_val(pte) |= _PAGE_MODIFIED;
348	if (pte_val(pte) & _PAGE_WRITE)
349		pte_val(pte) |= _PAGE_SILENT_WRITE;
350	return pte;
351}
352
353static inline pte_t pte_mkyoung(pte_t pte)
354{
355	pte_val(pte) |= _PAGE_ACCESSED;
356#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
357	if (!(pte_val(pte) & _PAGE_NO_READ))
358		pte_val(pte) |= _PAGE_SILENT_READ;
359	else
360#endif
361	if (pte_val(pte) & _PAGE_READ)
362		pte_val(pte) |= _PAGE_SILENT_READ;
363	return pte;
364}
365
 
 
366#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
367static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
368
369static inline pte_t pte_mkhuge(pte_t pte)
370{
371	pte_val(pte) |= _PAGE_HUGE;
372	return pte;
373}
374#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375#endif
376static inline int pte_special(pte_t pte)	{ return 0; }
377static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
378
379/*
380 * Macro to make mark a page protection value as "uncacheable".	 Note
381 * that "protection" is really a misnomer here as the protection value
382 * contains the memory attribute bits, dirty bits, and various other
383 * bits as well.
384 */
385#define pgprot_noncached pgprot_noncached
386
387static inline pgprot_t pgprot_noncached(pgprot_t _prot)
388{
389	unsigned long prot = pgprot_val(_prot);
390
391	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
392
393	return __pgprot(prot);
394}
395
396#define pgprot_writecombine pgprot_writecombine
397
398static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
399{
400	unsigned long prot = pgprot_val(_prot);
401
402	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
403	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
404
405	return __pgprot(prot);
406}
407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408/*
409 * Conversion functions: convert a page and protection to a page entry,
410 * and a page entry and page directory to the page they refer to.
411 */
412#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
413
414#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
415static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
416{
417	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
418	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
419	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
420	pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
 
 
 
 
 
 
 
 
 
421	return pte;
422}
423#else
424static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
425{
426	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
 
 
 
 
427}
428#endif
429
430
431extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
432	pte_t pte);
433extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
434	pte_t pte);
435
436static inline void update_mmu_cache(struct vm_area_struct *vma,
437	unsigned long address, pte_t *ptep)
438{
439	pte_t pte = *ptep;
440	__update_tlb(vma, address, pte);
441	__update_cache(vma, address, pte);
442}
443
 
 
 
444static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
445	unsigned long address, pmd_t *pmdp)
446{
447	pte_t pte = *(pte_t *)pmdp;
448
449	__update_tlb(vma, address, pte);
450}
451
452#define kern_addr_valid(addr)	(1)
453
454#ifdef CONFIG_PHYS_ADDR_T_64BIT
455extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
456
457static inline int io_remap_pfn_range(struct vm_area_struct *vma,
458		unsigned long vaddr,
459		unsigned long pfn,
460		unsigned long size,
461		pgprot_t prot)
462{
463	phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
464	return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
465}
466#define io_remap_pfn_range io_remap_pfn_range
467#endif
 
 
468
469#ifdef CONFIG_TRANSPARENT_HUGEPAGE
470
 
 
 
 
471extern int has_transparent_hugepage(void);
472
473static inline int pmd_trans_huge(pmd_t pmd)
474{
475	return !!(pmd_val(pmd) & _PAGE_HUGE);
476}
477
478static inline pmd_t pmd_mkhuge(pmd_t pmd)
479{
480	pmd_val(pmd) |= _PAGE_HUGE;
481
482	return pmd;
483}
484
485extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
486		       pmd_t *pmdp, pmd_t pmd);
487
488#define __HAVE_ARCH_PMD_WRITE
489static inline int pmd_write(pmd_t pmd)
490{
491	return !!(pmd_val(pmd) & _PAGE_WRITE);
492}
493
494static inline pmd_t pmd_wrprotect(pmd_t pmd)
495{
496	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
497	return pmd;
498}
499
500static inline pmd_t pmd_mkwrite(pmd_t pmd)
501{
502	pmd_val(pmd) |= _PAGE_WRITE;
503	if (pmd_val(pmd) & _PAGE_MODIFIED)
504		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
505
506	return pmd;
507}
508
509static inline int pmd_dirty(pmd_t pmd)
510{
511	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
512}
513
514static inline pmd_t pmd_mkclean(pmd_t pmd)
515{
516	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
517	return pmd;
518}
519
520static inline pmd_t pmd_mkdirty(pmd_t pmd)
521{
522	pmd_val(pmd) |= _PAGE_MODIFIED;
523	if (pmd_val(pmd) & _PAGE_WRITE)
524		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
525
526	return pmd;
527}
528
529static inline int pmd_young(pmd_t pmd)
530{
531	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
532}
533
534static inline pmd_t pmd_mkold(pmd_t pmd)
535{
536	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
537
538	return pmd;
539}
540
541static inline pmd_t pmd_mkyoung(pmd_t pmd)
542{
543	pmd_val(pmd) |= _PAGE_ACCESSED;
544
545#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
546	if (!(pmd_val(pmd) & _PAGE_NO_READ))
547		pmd_val(pmd) |= _PAGE_SILENT_READ;
548	else
549#endif
550	if (pmd_val(pmd) & _PAGE_READ)
551		pmd_val(pmd) |= _PAGE_SILENT_READ;
552
553	return pmd;
554}
555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556/* Extern to avoid header file madness */
557extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
558
559static inline unsigned long pmd_pfn(pmd_t pmd)
560{
561	return pmd_val(pmd) >> _PFN_SHIFT;
562}
563
564static inline struct page *pmd_page(pmd_t pmd)
565{
566	if (pmd_trans_huge(pmd))
567		return pfn_to_page(pmd_pfn(pmd));
568
569	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
570}
571
572static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
573{
574	pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
 
575	return pmd;
576}
577
578static inline pmd_t pmd_mknotpresent(pmd_t pmd)
579{
580	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
581
582	return pmd;
583}
584
585/*
586 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
587 * different prototype.
588 */
589#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
590static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
591					    unsigned long address, pmd_t *pmdp)
592{
593	pmd_t old = *pmdp;
594
595	pmd_clear(pmdp);
596
597	return old;
598}
599
600#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
601
602#include <asm-generic/pgtable.h>
603
604/*
605 * uncached accelerated TLB map for video memory access
606 */
607#ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
608#define __HAVE_PHYS_MEM_ACCESS_PROT
609
610struct file;
611pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
612		unsigned long size, pgprot_t vma_prot);
613int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
614		unsigned long size, pgprot_t *vma_prot);
615#endif
616
 
 
617/*
618 * We provide our own get_unmapped area to cope with the virtual aliasing
619 * constraints placed on us by the cache architecture.
620 */
621#define HAVE_ARCH_UNMAPPED_AREA
622#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
623
624/*
625 * No page table caches to initialise
626 */
627#define pgtable_cache_init()	do { } while (0)
628
629#endif /* _ASM_PGTABLE_H */
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2003 Ralf Baechle
  7 */
  8#ifndef _ASM_PGTABLE_H
  9#define _ASM_PGTABLE_H
 10
 11#include <linux/mm_types.h>
 12#include <linux/mmzone.h>
 13#ifdef CONFIG_32BIT
 14#include <asm/pgtable-32.h>
 15#endif
 16#ifdef CONFIG_64BIT
 17#include <asm/pgtable-64.h>
 18#endif
 19
 20#include <asm/cmpxchg.h>
 21#include <asm/io.h>
 22#include <asm/pgtable-bits.h>
 23#include <asm/cpu-features.h>
 24
 25struct mm_struct;
 26struct vm_area_struct;
 27
 28#define PAGE_SHARED	vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED)
 29
 
 
 
 
 
 30#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 31				 _PAGE_GLOBAL | _page_cachable_default)
 32#define PAGE_KERNEL_NC	__pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
 33				 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
 
 
 34#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
 35			__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
 36
 37/*
 38 * If _PAGE_NO_EXEC is not defined, we can't do page protection for
 39 * execute, and consider it to be the same as read. Also, write
 40 * permissions imply read permissions. This is the closest we can get
 41 * by reasonable means..
 42 */
 43
 44/*
 45 * Dummy values to fill the table in mmap.c
 46 * The real values will be generated at runtime
 47 */
 48#define __P000 __pgprot(0)
 49#define __P001 __pgprot(0)
 50#define __P010 __pgprot(0)
 51#define __P011 __pgprot(0)
 52#define __P100 __pgprot(0)
 53#define __P101 __pgprot(0)
 54#define __P110 __pgprot(0)
 55#define __P111 __pgprot(0)
 56
 57#define __S000 __pgprot(0)
 58#define __S001 __pgprot(0)
 59#define __S010 __pgprot(0)
 60#define __S011 __pgprot(0)
 61#define __S100 __pgprot(0)
 62#define __S101 __pgprot(0)
 63#define __S110 __pgprot(0)
 64#define __S111 __pgprot(0)
 65
 66extern unsigned long _page_cachable_default;
 67extern void __update_cache(unsigned long address, pte_t pte);
 68
 69/*
 70 * ZERO_PAGE is a global shared page that is always zero; used
 71 * for zero-mapped memory areas etc..
 72 */
 73
 74extern unsigned long empty_zero_page;
 75extern unsigned long zero_page_mask;
 76
 77#define ZERO_PAGE(vaddr) \
 78	(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
 79#define __HAVE_COLOR_ZERO_PAGE
 80
 81extern void paging_init(void);
 82
 83/*
 84 * Conversion functions: convert a page and protection to a page entry,
 85 * and a page entry and page directory to the page they refer to.
 86 */
 87#define pmd_phys(pmd)		virt_to_phys((void *)pmd_val(pmd))
 88
 89#define __pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
 90#ifndef CONFIG_TRANSPARENT_HUGEPAGE
 91#define pmd_page(pmd)		__pmd_page(pmd)
 92#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
 93
 94#define pmd_page_vaddr(pmd)	pmd_val(pmd)
 95
 96#define htw_stop()							\
 97do {									\
 98	unsigned long __flags;						\
 99									\
100	if (cpu_has_htw) {						\
101		local_irq_save(__flags);				\
102		if(!raw_current_cpu_data.htw_seq++) {			\
103			write_c0_pwctl(read_c0_pwctl() &		\
104				       ~(1 << MIPS_PWCTL_PWEN_SHIFT));	\
105			back_to_back_c0_hazard();			\
106		}							\
107		local_irq_restore(__flags);				\
108	}								\
109} while(0)
110
111#define htw_start()							\
112do {									\
113	unsigned long __flags;						\
114									\
115	if (cpu_has_htw) {						\
116		local_irq_save(__flags);				\
117		if (!--raw_current_cpu_data.htw_seq) {			\
118			write_c0_pwctl(read_c0_pwctl() |		\
119				       (1 << MIPS_PWCTL_PWEN_SHIFT));	\
120			back_to_back_c0_hazard();			\
121		}							\
122		local_irq_restore(__flags);				\
123	}								\
124} while(0)
125
126static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
127			      pte_t *ptep, pte_t pteval);
128
129#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
130
131#ifdef CONFIG_XPA
132# define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
133#else
134# define pte_none(pte)		(!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
135#endif
136
137#define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
138#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
139
140static inline void set_pte(pte_t *ptep, pte_t pte)
141{
142	ptep->pte_high = pte.pte_high;
143	smp_wmb();
144	ptep->pte_low = pte.pte_low;
145
146#ifdef CONFIG_XPA
147	if (pte.pte_high & _PAGE_GLOBAL) {
148#else
149	if (pte.pte_low & _PAGE_GLOBAL) {
150#endif
151		pte_t *buddy = ptep_buddy(ptep);
152		/*
153		 * Make sure the buddy is global too (if it's !none,
154		 * it better already be global)
155		 */
156		if (pte_none(*buddy)) {
157			if (!IS_ENABLED(CONFIG_XPA))
158				buddy->pte_low |= _PAGE_GLOBAL;
159			buddy->pte_high |= _PAGE_GLOBAL;
160		}
161	}
162}
 
163
164static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
165{
166	pte_t null = __pte(0);
167
168	htw_stop();
169	/* Preserve global status for the pair */
170	if (IS_ENABLED(CONFIG_XPA)) {
171		if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
172			null.pte_high = _PAGE_GLOBAL;
173	} else {
174		if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
175			null.pte_low = null.pte_high = _PAGE_GLOBAL;
176	}
177
178	set_pte_at(mm, addr, ptep, null);
179	htw_start();
180}
181#else
182
183#define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
184#define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
185#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
186
187/*
188 * Certain architectures need to do special things when pte's
189 * within a page table are directly modified.  Thus, the following
190 * hook is made available.
191 */
192static inline void set_pte(pte_t *ptep, pte_t pteval)
193{
194	*ptep = pteval;
195#if !defined(CONFIG_CPU_R3K_TLB)
196	if (pte_val(pteval) & _PAGE_GLOBAL) {
197		pte_t *buddy = ptep_buddy(ptep);
198		/*
199		 * Make sure the buddy is global too (if it's !none,
200		 * it better already be global)
201		 */
202# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
203		cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
204# else
205		cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
206# endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207	}
208#endif
209}
 
210
211static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
212{
213	htw_stop();
214#if !defined(CONFIG_CPU_R3K_TLB)
215	/* Preserve global status for the pair */
216	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
217		set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
218	else
219#endif
220		set_pte_at(mm, addr, ptep, __pte(0));
221	htw_start();
222}
223#endif
224
225static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
226			      pte_t *ptep, pte_t pteval)
227{
228
229	if (!pte_present(pteval))
230		goto cache_sync_done;
231
232	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
233		goto cache_sync_done;
234
235	__update_cache(addr, pteval);
236cache_sync_done:
237	set_pte(ptep, pteval);
238}
239
240/*
241 * (pmds are folded into puds so this doesn't get actually called,
242 * but the define is needed for a generic inline function.)
243 */
244#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
245
246#ifndef __PAGETABLE_PMD_FOLDED
247/*
248 * (puds are folded into pgds so this doesn't get actually called,
249 * but the define is needed for a generic inline function.)
250 */
251#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
252#endif
253
254#define PGD_T_LOG2	(__builtin_ffs(sizeof(pgd_t)) - 1)
255#define PMD_T_LOG2	(__builtin_ffs(sizeof(pmd_t)) - 1)
256#define PTE_T_LOG2	(__builtin_ffs(sizeof(pte_t)) - 1)
257
258/*
259 * We used to declare this array with size but gcc 3.3 and older are not able
260 * to find that this expression is a constant, so the size is dropped.
261 */
262extern pgd_t swapper_pg_dir[];
263
264/*
265 * Platform specific pte_special() and pte_mkspecial() definitions
266 * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
267 */
268#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
269#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
270static inline int pte_special(pte_t pte)
271{
272	return pte.pte_low & _PAGE_SPECIAL;
273}
274
275static inline pte_t pte_mkspecial(pte_t pte)
276{
277	pte.pte_low |= _PAGE_SPECIAL;
278	return pte;
279}
280#else
281static inline int pte_special(pte_t pte)
282{
283	return pte_val(pte) & _PAGE_SPECIAL;
284}
285
286static inline pte_t pte_mkspecial(pte_t pte)
287{
288	pte_val(pte) |= _PAGE_SPECIAL;
289	return pte;
290}
291#endif
292#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
293
294/*
295 * The following only work if pte_present() is true.
296 * Undefined behaviour if not..
297 */
298#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
299static inline int pte_write(pte_t pte)	{ return pte.pte_low & _PAGE_WRITE; }
300static inline int pte_dirty(pte_t pte)	{ return pte.pte_low & _PAGE_MODIFIED; }
301static inline int pte_young(pte_t pte)	{ return pte.pte_low & _PAGE_ACCESSED; }
302
303static inline pte_t pte_wrprotect(pte_t pte)
304{
305	pte.pte_low  &= ~_PAGE_WRITE;
306	if (!IS_ENABLED(CONFIG_XPA))
307		pte.pte_low &= ~_PAGE_SILENT_WRITE;
308	pte.pte_high &= ~_PAGE_SILENT_WRITE;
309	return pte;
310}
311
312static inline pte_t pte_mkclean(pte_t pte)
313{
314	pte.pte_low  &= ~_PAGE_MODIFIED;
315	if (!IS_ENABLED(CONFIG_XPA))
316		pte.pte_low &= ~_PAGE_SILENT_WRITE;
317	pte.pte_high &= ~_PAGE_SILENT_WRITE;
318	return pte;
319}
320
321static inline pte_t pte_mkold(pte_t pte)
322{
323	pte.pte_low  &= ~_PAGE_ACCESSED;
324	if (!IS_ENABLED(CONFIG_XPA))
325		pte.pte_low &= ~_PAGE_SILENT_READ;
326	pte.pte_high &= ~_PAGE_SILENT_READ;
327	return pte;
328}
329
330static inline pte_t pte_mkwrite(pte_t pte)
331{
332	pte.pte_low |= _PAGE_WRITE;
333	if (pte.pte_low & _PAGE_MODIFIED) {
334		if (!IS_ENABLED(CONFIG_XPA))
335			pte.pte_low |= _PAGE_SILENT_WRITE;
336		pte.pte_high |= _PAGE_SILENT_WRITE;
337	}
338	return pte;
339}
340
341static inline pte_t pte_mkdirty(pte_t pte)
342{
343	pte.pte_low |= _PAGE_MODIFIED;
344	if (pte.pte_low & _PAGE_WRITE) {
345		if (!IS_ENABLED(CONFIG_XPA))
346			pte.pte_low |= _PAGE_SILENT_WRITE;
347		pte.pte_high |= _PAGE_SILENT_WRITE;
348	}
349	return pte;
350}
351
352static inline pte_t pte_mkyoung(pte_t pte)
353{
354	pte.pte_low |= _PAGE_ACCESSED;
355	if (!(pte.pte_low & _PAGE_NO_READ)) {
356		if (!IS_ENABLED(CONFIG_XPA))
357			pte.pte_low |= _PAGE_SILENT_READ;
358		pte.pte_high |= _PAGE_SILENT_READ;
359	}
360	return pte;
361}
362#else
363static inline int pte_write(pte_t pte)	{ return pte_val(pte) & _PAGE_WRITE; }
364static inline int pte_dirty(pte_t pte)	{ return pte_val(pte) & _PAGE_MODIFIED; }
365static inline int pte_young(pte_t pte)	{ return pte_val(pte) & _PAGE_ACCESSED; }
366
367static inline pte_t pte_wrprotect(pte_t pte)
368{
369	pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
370	return pte;
371}
372
373static inline pte_t pte_mkclean(pte_t pte)
374{
375	pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
376	return pte;
377}
378
379static inline pte_t pte_mkold(pte_t pte)
380{
381	pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
382	return pte;
383}
384
385static inline pte_t pte_mkwrite(pte_t pte)
386{
387	pte_val(pte) |= _PAGE_WRITE;
388	if (pte_val(pte) & _PAGE_MODIFIED)
389		pte_val(pte) |= _PAGE_SILENT_WRITE;
390	return pte;
391}
392
393static inline pte_t pte_mkdirty(pte_t pte)
394{
395	pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
396	if (pte_val(pte) & _PAGE_WRITE)
397		pte_val(pte) |= _PAGE_SILENT_WRITE;
398	return pte;
399}
400
401static inline pte_t pte_mkyoung(pte_t pte)
402{
403	pte_val(pte) |= _PAGE_ACCESSED;
 
404	if (!(pte_val(pte) & _PAGE_NO_READ))
405		pte_val(pte) |= _PAGE_SILENT_READ;
 
 
 
 
406	return pte;
407}
408
409#define pte_sw_mkyoung	pte_mkyoung
410
411#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
412static inline int pte_huge(pte_t pte)	{ return pte_val(pte) & _PAGE_HUGE; }
413
414static inline pte_t pte_mkhuge(pte_t pte)
415{
416	pte_val(pte) |= _PAGE_HUGE;
417	return pte;
418}
419#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
420
421#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
422static inline bool pte_soft_dirty(pte_t pte)
423{
424	return pte_val(pte) & _PAGE_SOFT_DIRTY;
425}
426#define pte_swp_soft_dirty pte_soft_dirty
427
428static inline pte_t pte_mksoft_dirty(pte_t pte)
429{
430	pte_val(pte) |= _PAGE_SOFT_DIRTY;
431	return pte;
432}
433#define pte_swp_mksoft_dirty pte_mksoft_dirty
434
435static inline pte_t pte_clear_soft_dirty(pte_t pte)
436{
437	pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
438	return pte;
439}
440#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
441
442#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
443
444#endif
 
 
445
446/*
447 * Macro to make mark a page protection value as "uncacheable".	 Note
448 * that "protection" is really a misnomer here as the protection value
449 * contains the memory attribute bits, dirty bits, and various other
450 * bits as well.
451 */
452#define pgprot_noncached pgprot_noncached
453
454static inline pgprot_t pgprot_noncached(pgprot_t _prot)
455{
456	unsigned long prot = pgprot_val(_prot);
457
458	prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
459
460	return __pgprot(prot);
461}
462
463#define pgprot_writecombine pgprot_writecombine
464
465static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
466{
467	unsigned long prot = pgprot_val(_prot);
468
469	/* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
470	prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
471
472	return __pgprot(prot);
473}
474
475static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
476						unsigned long address)
477{
478}
479
480#define __HAVE_ARCH_PTE_SAME
481static inline int pte_same(pte_t pte_a, pte_t pte_b)
482{
483	return pte_val(pte_a) == pte_val(pte_b);
484}
485
486#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
487static inline int ptep_set_access_flags(struct vm_area_struct *vma,
488					unsigned long address, pte_t *ptep,
489					pte_t entry, int dirty)
490{
491	if (!pte_same(*ptep, entry))
492		set_pte_at(vma->vm_mm, address, ptep, entry);
493	/*
494	 * update_mmu_cache will unconditionally execute, handling both
495	 * the case that the PTE changed and the spurious fault case.
496	 */
497	return true;
498}
499
500/*
501 * Conversion functions: convert a page and protection to a page entry,
502 * and a page entry and page directory to the page they refer to.
503 */
504#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
505
506#if defined(CONFIG_XPA)
507static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
508{
509	pte.pte_low  &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
510	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
511	pte.pte_low  |= pgprot_val(newprot) & ~_PFNX_MASK;
512	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
513	return pte;
514}
515#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
516static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
517{
518	pte.pte_low  &= _PAGE_CHG_MASK;
519	pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
520	pte.pte_low  |= pgprot_val(newprot);
521	pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
522	return pte;
523}
524#else
525static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
526{
527	pte_val(pte) &= _PAGE_CHG_MASK;
528	pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
529	if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
530		pte_val(pte) |= _PAGE_SILENT_READ;
531	return pte;
532}
533#endif
534
535
536extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
537	pte_t pte);
 
 
538
539static inline void update_mmu_cache(struct vm_area_struct *vma,
540	unsigned long address, pte_t *ptep)
541{
542	pte_t pte = *ptep;
543	__update_tlb(vma, address, pte);
 
544}
545
546#define	__HAVE_ARCH_UPDATE_MMU_TLB
547#define update_mmu_tlb	update_mmu_cache
548
549static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
550	unsigned long address, pmd_t *pmdp)
551{
552	pte_t pte = *(pte_t *)pmdp;
553
554	__update_tlb(vma, address, pte);
555}
556
557#define kern_addr_valid(addr)	(1)
558
559/*
560 * Allow physical addresses to be fixed up to help 36-bit peripherals.
561 */
562#ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
563phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
564int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
565		unsigned long pfn, unsigned long size, pgprot_t prot);
 
 
 
 
 
566#define io_remap_pfn_range io_remap_pfn_range
567#else
568#define fixup_bigphys_addr(addr, size)	(addr)
569#endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
570
571#ifdef CONFIG_TRANSPARENT_HUGEPAGE
572
573/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
574#define pmdp_establish generic_pmdp_establish
575
576#define has_transparent_hugepage has_transparent_hugepage
577extern int has_transparent_hugepage(void);
578
579static inline int pmd_trans_huge(pmd_t pmd)
580{
581	return !!(pmd_val(pmd) & _PAGE_HUGE);
582}
583
584static inline pmd_t pmd_mkhuge(pmd_t pmd)
585{
586	pmd_val(pmd) |= _PAGE_HUGE;
587
588	return pmd;
589}
590
591extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
592		       pmd_t *pmdp, pmd_t pmd);
593
594#define pmd_write pmd_write
595static inline int pmd_write(pmd_t pmd)
596{
597	return !!(pmd_val(pmd) & _PAGE_WRITE);
598}
599
600static inline pmd_t pmd_wrprotect(pmd_t pmd)
601{
602	pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
603	return pmd;
604}
605
606static inline pmd_t pmd_mkwrite(pmd_t pmd)
607{
608	pmd_val(pmd) |= _PAGE_WRITE;
609	if (pmd_val(pmd) & _PAGE_MODIFIED)
610		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
611
612	return pmd;
613}
614
615static inline int pmd_dirty(pmd_t pmd)
616{
617	return !!(pmd_val(pmd) & _PAGE_MODIFIED);
618}
619
620static inline pmd_t pmd_mkclean(pmd_t pmd)
621{
622	pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
623	return pmd;
624}
625
626static inline pmd_t pmd_mkdirty(pmd_t pmd)
627{
628	pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
629	if (pmd_val(pmd) & _PAGE_WRITE)
630		pmd_val(pmd) |= _PAGE_SILENT_WRITE;
631
632	return pmd;
633}
634
635static inline int pmd_young(pmd_t pmd)
636{
637	return !!(pmd_val(pmd) & _PAGE_ACCESSED);
638}
639
640static inline pmd_t pmd_mkold(pmd_t pmd)
641{
642	pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
643
644	return pmd;
645}
646
647static inline pmd_t pmd_mkyoung(pmd_t pmd)
648{
649	pmd_val(pmd) |= _PAGE_ACCESSED;
650
 
651	if (!(pmd_val(pmd) & _PAGE_NO_READ))
652		pmd_val(pmd) |= _PAGE_SILENT_READ;
 
 
 
 
653
654	return pmd;
655}
656
657#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
658static inline int pmd_soft_dirty(pmd_t pmd)
659{
660	return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
661}
662
663static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
664{
665	pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
666	return pmd;
667}
668
669static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
670{
671	pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
672	return pmd;
673}
674
675#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
676
677/* Extern to avoid header file madness */
678extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
679
680static inline unsigned long pmd_pfn(pmd_t pmd)
681{
682	return pmd_val(pmd) >> _PFN_SHIFT;
683}
684
685static inline struct page *pmd_page(pmd_t pmd)
686{
687	if (pmd_trans_huge(pmd))
688		return pfn_to_page(pmd_pfn(pmd));
689
690	return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
691}
692
693static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
694{
695	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
696		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
697	return pmd;
698}
699
700static inline pmd_t pmd_mkinvalid(pmd_t pmd)
701{
702	pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
703
704	return pmd;
705}
706
707/*
708 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
709 * different prototype.
710 */
711#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
712static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
713					    unsigned long address, pmd_t *pmdp)
714{
715	pmd_t old = *pmdp;
716
717	pmd_clear(pmdp);
718
719	return old;
720}
721
722#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
723
724#ifdef _PAGE_HUGE
725#define pmd_leaf(pmd)	((pmd_val(pmd) & _PAGE_HUGE) != 0)
726#define pud_leaf(pud)	((pud_val(pud) & _PAGE_HUGE) != 0)
 
 
 
 
 
 
 
 
 
 
727#endif
728
729#define gup_fast_permitted(start, end)	(!cpu_has_dc_aliases)
730
731/*
732 * We provide our own get_unmapped area to cope with the virtual aliasing
733 * constraints placed on us by the cache architecture.
734 */
735#define HAVE_ARCH_UNMAPPED_AREA
736#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
 
 
 
 
737
738#endif /* _ASM_PGTABLE_H */