Linux Audio

Check our new training course

Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_POWERPC_PGTABLE_H
  3#define _ASM_POWERPC_PGTABLE_H
  4
  5#ifndef __ASSEMBLY__
  6#include <linux/mmdebug.h>
  7#include <linux/mmzone.h>
  8#include <asm/processor.h>		/* For TASK_SIZE */
  9#include <asm/mmu.h>
 10#include <asm/page.h>
 11#include <asm/tlbflush.h>
 12
 13struct mm_struct;
 14
 15#endif /* !__ASSEMBLY__ */
 16
 17#ifdef CONFIG_PPC_BOOK3S
 18#include <asm/book3s/pgtable.h>
 19#else
 20#include <asm/nohash/pgtable.h>
 21#endif /* !CONFIG_PPC_BOOK3S */
 22
 23/*
 24 * Protection used for kernel text. We want the debuggers to be able to
 25 * set breakpoints anywhere, so don't write protect the kernel text
 26 * on platforms where such control is possible.
 27 */
 28#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
 29	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 30#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
 31#else
 32#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
 33#endif
 34
 35/* Make modules code happy. We don't set RO yet */
 36#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
 37
 38/* Advertise special mapping type for AGP */
 39#define PAGE_AGP		(PAGE_KERNEL_NC)
 40#define HAVE_PAGE_AGP
 41
 42#ifndef __ASSEMBLY__
 43
 44#define PFN_PTE_SHIFT		PTE_RPN_SHIFT
 45
 46void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 47		pte_t pte, unsigned int nr);
 48#define set_ptes set_ptes
 49#define update_mmu_cache(vma, addr, ptep) \
 50	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
 51
 52#ifndef MAX_PTRS_PER_PGD
 53#define MAX_PTRS_PER_PGD PTRS_PER_PGD
 54#endif
 55
 56/* Keep these as a macros to avoid include dependency mess */
 57#define pte_page(x)		pfn_to_page(pte_pfn(x))
 58#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 59
 60static inline unsigned long pte_pfn(pte_t pte)
 61{
 62	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
 63}
 64
 65/*
 66 * Select all bits except the pfn
 67 */
 
 68static inline pgprot_t pte_pgprot(pte_t pte)
 69{
 70	unsigned long pte_flags;
 71
 72	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
 73	return __pgprot(pte_flags);
 74}
 75
 76static inline pgprot_t pgprot_nx(pgprot_t prot)
 77{
 78	return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
 79}
 80#define pgprot_nx pgprot_nx
 81
 82#ifndef pmd_page_vaddr
 83static inline const void *pmd_page_vaddr(pmd_t pmd)
 84{
 85	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
 86}
 87#define pmd_page_vaddr pmd_page_vaddr
 88#endif
 89/*
 90 * ZERO_PAGE is a global shared page that is always zero: used
 91 * for zero-mapped memory areas etc..
 92 */
 93extern unsigned long empty_zero_page[];
 94#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 95
 96extern pgd_t swapper_pg_dir[];
 97
 98extern void paging_init(void);
 99void poking_init(void);
100
101extern unsigned long ioremap_bot;
102extern const pgprot_t protection_map[16];
103
104/* can we use this in kvm */
105unsigned long vmalloc_to_phys(void *vmalloc_addr);
106
107void pgtable_cache_add(unsigned int shift);
108
 
 
 
109pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
110
111#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
112void mark_initmem_nx(void);
113#else
114static inline void mark_initmem_nx(void) { }
115#endif
116
117#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
118int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
119			  pte_t *ptep, pte_t entry, int dirty);
120
121pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
122				pgprot_t vma_prot);
123
124struct file;
125static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
126					    unsigned long size, pgprot_t vma_prot)
127{
128	return __phys_mem_access_prot(pfn, size, vma_prot);
129}
130#define __HAVE_PHYS_MEM_ACCESS_PROT
131
132void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
133
134/*
135 * This gets called at the end of handling a page fault, when
136 * the kernel has put a new PTE into the page table for the process.
137 * We use it to ensure coherency between the i-cache and d-cache
138 * for the page which has just been mapped in.
139 * On machines which use an MMU hash table, we use this to put a
140 * corresponding HPTE into the hash table ahead of time, instead of
141 * waiting for the inevitable extra hash-table miss exception.
142 */
143static inline void update_mmu_cache_range(struct vm_fault *vmf,
144		struct vm_area_struct *vma, unsigned long address,
145		pte_t *ptep, unsigned int nr)
146{
147	if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
148	    (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
149		__update_mmu_cache(vma, address, ptep);
150}
151
152/*
153 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
154 * so we are sure it is included when arriving here.
155 */
156#ifdef PTE_FRAG_NR
157static inline void *pte_frag_get(mm_context_t *ctx)
158{
159	return ctx->pte_frag;
160}
161
162static inline void pte_frag_set(mm_context_t *ctx, void *p)
163{
164	ctx->pte_frag = p;
165}
166#else
167#define PTE_FRAG_NR		1
168#define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
169#define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
170
171static inline void *pte_frag_get(mm_context_t *ctx)
172{
173	return NULL;
174}
175
176static inline void pte_frag_set(mm_context_t *ctx, void *p)
177{
178}
179#endif
180
181#define pmd_pgtable pmd_pgtable
182static inline pgtable_t pmd_pgtable(pmd_t pmd)
183{
184	return (pgtable_t)pmd_page_vaddr(pmd);
185}
186
187#ifdef CONFIG_PPC64
188int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
189bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
190			   unsigned long page_size);
191/*
192 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
193 * some of the restrictions. We don't check for PMD_SIZE because our
194 * vmemmap allocation code can fallback correctly. The pageblock
195 * alignment requirement is met using altmap->reserve blocks.
196 */
197#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
198static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
199{
200	if (!radix_enabled())
201		return false;
202	/*
203	 * With 4K page size and 2M PMD_SIZE, we can align
204	 * things better with memory block size value
205	 * starting from 128MB. Hence align things with PMD_SIZE.
206	 */
207	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
208		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
209	return true;
210}
211
212#endif /* CONFIG_PPC64 */
213
214#endif /* __ASSEMBLY__ */
215
216#endif /* _ASM_POWERPC_PGTABLE_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_POWERPC_PGTABLE_H
  3#define _ASM_POWERPC_PGTABLE_H
  4
  5#ifndef __ASSEMBLY__
  6#include <linux/mmdebug.h>
  7#include <linux/mmzone.h>
  8#include <asm/processor.h>		/* For TASK_SIZE */
  9#include <asm/mmu.h>
 10#include <asm/page.h>
 11#include <asm/tlbflush.h>
 12
 13struct mm_struct;
 14
 15#endif /* !__ASSEMBLY__ */
 16
 17#ifdef CONFIG_PPC_BOOK3S
 18#include <asm/book3s/pgtable.h>
 19#else
 20#include <asm/nohash/pgtable.h>
 21#endif /* !CONFIG_PPC_BOOK3S */
 22
 23/*
 24 * Protection used for kernel text. We want the debuggers to be able to
 25 * set breakpoints anywhere, so don't write protect the kernel text
 26 * on platforms where such control is possible.
 27 */
 28#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
 29	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
 30#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
 31#else
 32#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
 33#endif
 34
 35/* Make modules code happy. We don't set RO yet */
 36#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
 37
 38/* Advertise special mapping type for AGP */
 39#define PAGE_AGP		(PAGE_KERNEL_NC)
 40#define HAVE_PAGE_AGP
 41
 42#ifndef __ASSEMBLY__
 43
 44#define PFN_PTE_SHIFT		PTE_RPN_SHIFT
 45
 46void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 47		pte_t pte, unsigned int nr);
 48#define set_ptes set_ptes
 49#define update_mmu_cache(vma, addr, ptep) \
 50	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
 51
 52#ifndef MAX_PTRS_PER_PGD
 53#define MAX_PTRS_PER_PGD PTRS_PER_PGD
 54#endif
 55
 56/* Keep these as a macros to avoid include dependency mess */
 57#define pte_page(x)		pfn_to_page(pte_pfn(x))
 58#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
 59
 60static inline unsigned long pte_pfn(pte_t pte)
 61{
 62	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
 63}
 64
 65/*
 66 * Select all bits except the pfn
 67 */
 68#define pte_pgprot pte_pgprot
 69static inline pgprot_t pte_pgprot(pte_t pte)
 70{
 71	unsigned long pte_flags;
 72
 73	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
 74	return __pgprot(pte_flags);
 75}
 76
 77static inline pgprot_t pgprot_nx(pgprot_t prot)
 78{
 79	return pte_pgprot(pte_exprotect(__pte(pgprot_val(prot))));
 80}
 81#define pgprot_nx pgprot_nx
 82
 83#ifndef pmd_page_vaddr
 84static inline const void *pmd_page_vaddr(pmd_t pmd)
 85{
 86	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
 87}
 88#define pmd_page_vaddr pmd_page_vaddr
 89#endif
 90/*
 91 * ZERO_PAGE is a global shared page that is always zero: used
 92 * for zero-mapped memory areas etc..
 93 */
 94extern unsigned long empty_zero_page[];
 95#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 96
 97extern pgd_t swapper_pg_dir[];
 98
 99extern void paging_init(void);
100void poking_init(void);
101
102extern unsigned long ioremap_bot;
103extern const pgprot_t protection_map[16];
104
105/* can we use this in kvm */
106unsigned long vmalloc_to_phys(void *vmalloc_addr);
107
108void pgtable_cache_add(unsigned int shift);
109
110#ifdef CONFIG_PPC32
111void __init *early_alloc_pgtable(unsigned long size);
112#endif
113pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
114
115#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
116void mark_initmem_nx(void);
117#else
118static inline void mark_initmem_nx(void) { }
119#endif
120
121#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
122int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
123			  pte_t *ptep, pte_t entry, int dirty);
124
125pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
126				pgprot_t vma_prot);
127
128struct file;
129static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
130					    unsigned long size, pgprot_t vma_prot)
131{
132	return __phys_mem_access_prot(pfn, size, vma_prot);
133}
134#define __HAVE_PHYS_MEM_ACCESS_PROT
135
136void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
137
138/*
139 * This gets called at the end of handling a page fault, when
140 * the kernel has put a new PTE into the page table for the process.
141 * We use it to ensure coherency between the i-cache and d-cache
142 * for the page which has just been mapped in.
143 * On machines which use an MMU hash table, we use this to put a
144 * corresponding HPTE into the hash table ahead of time, instead of
145 * waiting for the inevitable extra hash-table miss exception.
146 */
147static inline void update_mmu_cache_range(struct vm_fault *vmf,
148		struct vm_area_struct *vma, unsigned long address,
149		pte_t *ptep, unsigned int nr)
150{
151	if ((mmu_has_feature(MMU_FTR_HPTE_TABLE) && !radix_enabled()) ||
152	    (IS_ENABLED(CONFIG_PPC_E500) && IS_ENABLED(CONFIG_HUGETLB_PAGE)))
153		__update_mmu_cache(vma, address, ptep);
154}
155
156/*
157 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
158 * so we are sure it is included when arriving here.
159 */
160#ifdef PTE_FRAG_NR
161static inline void *pte_frag_get(mm_context_t *ctx)
162{
163	return ctx->pte_frag;
164}
165
166static inline void pte_frag_set(mm_context_t *ctx, void *p)
167{
168	ctx->pte_frag = p;
169}
170#else
171#define PTE_FRAG_NR		1
172#define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
173#define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
174
175static inline void *pte_frag_get(mm_context_t *ctx)
176{
177	return NULL;
178}
179
180static inline void pte_frag_set(mm_context_t *ctx, void *p)
181{
182}
183#endif
184
185#define pmd_pgtable pmd_pgtable
186static inline pgtable_t pmd_pgtable(pmd_t pmd)
187{
188	return (pgtable_t)pmd_page_vaddr(pmd);
189}
190
191#ifdef CONFIG_PPC64
192int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
193bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
194			   unsigned long page_size);
195/*
196 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
197 * some of the restrictions. We don't check for PMD_SIZE because our
198 * vmemmap allocation code can fallback correctly. The pageblock
199 * alignment requirement is met using altmap->reserve blocks.
200 */
201#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
202static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
203{
204	if (!radix_enabled())
205		return false;
206	/*
207	 * With 4K page size and 2M PMD_SIZE, we can align
208	 * things better with memory block size value
209	 * starting from 128MB. Hence align things with PMD_SIZE.
210	 */
211	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
212		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
213	return true;
214}
215
216#endif /* CONFIG_PPC64 */
217
218#endif /* __ASSEMBLY__ */
219
220#endif /* _ASM_POWERPC_PGTABLE_H */