Linux Audio

Check our new training course

Loading...
v6.9.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_POWERPC_HUGETLB_H
 3#define _ASM_POWERPC_HUGETLB_H
 4
 5#ifdef CONFIG_HUGETLB_PAGE
 6#include <asm/page.h>
 
 
 
 7
 8#ifdef CONFIG_PPC_BOOK3S_64
 
 9#include <asm/book3s/64/hugetlb.h>
10#elif defined(CONFIG_PPC_E500)
11#include <asm/nohash/hugetlb-e500.h>
12#elif defined(CONFIG_PPC_8xx)
13#include <asm/nohash/32/hugetlb-8xx.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14#endif /* CONFIG_PPC_BOOK3S_64 */
15
16extern bool hugetlb_disabled;
17
18void __init hugetlbpage_init_defaultsize(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
20int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
21			   unsigned long len);
22
23static inline int is_hugepage_only_range(struct mm_struct *mm,
24					 unsigned long addr,
25					 unsigned long len)
26{
27	if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
28		return slice_is_hugepage_only_range(mm, addr, len);
29	return 0;
30}
31#define is_hugepage_only_range is_hugepage_only_range
32
33#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
 
 
 
 
 
 
 
 
 
 
 
34void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
35			    unsigned long end, unsigned long floor,
36			    unsigned long ceiling);
37
38#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
40					    unsigned long addr, pte_t *ptep)
41{
 
42	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
 
 
 
43}
44
45#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
46static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
47					  unsigned long addr, pte_t *ptep)
48{
49	pte_t pte;
50
51	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
52	flush_hugetlb_page(vma, addr);
53	return pte;
54}
55
56#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
57int huge_ptep_set_access_flags(struct vm_area_struct *vma,
58			       unsigned long addr, pte_t *ptep,
59			       pte_t pte, int dirty);
60
61void gigantic_hugetlb_cma_reserve(void) __init;
62#include <asm-generic/hugetlb.h>
 
 
63
64#else /* ! CONFIG_HUGETLB_PAGE */
65static inline void flush_hugetlb_page(struct vm_area_struct *vma,
66				      unsigned long vmaddr)
67{
 
 
 
 
 
 
 
 
 
 
 
68}
69
70#define hugepd_shift(x) 0
71static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
72				    unsigned pdshift)
73{
74	return NULL;
75}
76
 
 
 
77
78static inline void __init gigantic_hugetlb_cma_reserve(void)
 
 
79{
80}
81
82static inline void __init hugetlbpage_init_defaultsize(void)
 
 
83{
 
84}
85#endif /* CONFIG_HUGETLB_PAGE */
86
87#endif /* _ASM_POWERPC_HUGETLB_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_POWERPC_HUGETLB_H
  3#define _ASM_POWERPC_HUGETLB_H
  4
  5#ifdef CONFIG_HUGETLB_PAGE
  6#include <asm/page.h>
  7#include <asm-generic/hugetlb.h>
  8
  9extern struct kmem_cache *hugepte_cache;
 10
 11#ifdef CONFIG_PPC_BOOK3S_64
 12
 13#include <asm/book3s/64/hugetlb.h>
 14/*
 15 * This should work for other subarchs too. But right now we use the
 16 * new format only for 64bit book3s
 17 */
 18static inline pte_t *hugepd_page(hugepd_t hpd)
 19{
 20	BUG_ON(!hugepd_ok(hpd));
 21	/*
 22	 * We have only four bits to encode, MMU page size
 23	 */
 24	BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
 25	return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
 26}
 27
 28static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
 29{
 30	return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
 31}
 32
 33static inline unsigned int hugepd_shift(hugepd_t hpd)
 34{
 35	return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
 36}
 37static inline void flush_hugetlb_page(struct vm_area_struct *vma,
 38				      unsigned long vmaddr)
 39{
 40	if (radix_enabled())
 41		return radix__flush_hugetlb_page(vma, vmaddr);
 42}
 43
 44#else
 45
 46static inline pte_t *hugepd_page(hugepd_t hpd)
 47{
 48	BUG_ON(!hugepd_ok(hpd));
 49#ifdef CONFIG_PPC_8xx
 50	return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK);
 51#else
 52	return (pte_t *)((hpd_val(hpd) &
 53			  ~HUGEPD_SHIFT_MASK) | PD_HUGE);
 54#endif
 55}
 56
 57static inline unsigned int hugepd_shift(hugepd_t hpd)
 58{
 59#ifdef CONFIG_PPC_8xx
 60	return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
 61#else
 62	return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
 63#endif
 64}
 65
 66#endif /* CONFIG_PPC_BOOK3S_64 */
 67
 
 68
 69static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
 70				    unsigned pdshift)
 71{
 72	/*
 73	 * On FSL BookE, we have multiple higher-level table entries that
 74	 * point to the same hugepte.  Just use the first one since they're all
 75	 * identical.  So for that case, idx=0.
 76	 */
 77	unsigned long idx = 0;
 78
 79	pte_t *dir = hugepd_page(hpd);
 80#ifndef CONFIG_PPC_FSL_BOOK3E
 81	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
 82#endif
 83
 84	return dir + idx;
 85}
 86
 87pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
 88				 unsigned long addr, unsigned *shift);
 89
 90void flush_dcache_icache_hugepage(struct page *page);
 91
 92int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
 93			   unsigned long len);
 94
 95static inline int is_hugepage_only_range(struct mm_struct *mm,
 96					 unsigned long addr,
 97					 unsigned long len)
 98{
 99	if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
100		return slice_is_hugepage_only_range(mm, addr, len);
101	return 0;
102}
 
103
104void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
105			    pte_t pte);
106#ifdef CONFIG_PPC_8xx
107static inline void flush_hugetlb_page(struct vm_area_struct *vma,
108				      unsigned long vmaddr)
109{
110	flush_tlb_page(vma, vmaddr);
111}
112#else
113void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
114#endif
115
116void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
117			    unsigned long end, unsigned long floor,
118			    unsigned long ceiling);
119
120/*
121 * If the arch doesn't supply something else, assume that hugepage
122 * size aligned regions are ok without further preparation.
123 */
124static inline int prepare_hugepage_range(struct file *file,
125			unsigned long addr, unsigned long len)
126{
127	struct hstate *h = hstate_file(file);
128	if (len & ~huge_page_mask(h))
129		return -EINVAL;
130	if (addr & ~huge_page_mask(h))
131		return -EINVAL;
132	return 0;
133}
134
135static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
136				   pte_t *ptep, pte_t pte)
137{
138	set_pte_at(mm, addr, ptep, pte);
139}
140
141static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
142					    unsigned long addr, pte_t *ptep)
143{
144#ifdef CONFIG_PPC64
145	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
146#else
147	return __pte(pte_update(ptep, ~0UL, 0));
148#endif
149}
150
151static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
152					 unsigned long addr, pte_t *ptep)
 
153{
154	pte_t pte;
 
155	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
156	flush_hugetlb_page(vma, addr);
 
157}
158
159static inline int huge_pte_none(pte_t pte)
160{
161	return pte_none(pte);
162}
163
164static inline pte_t huge_pte_wrprotect(pte_t pte)
165{
166	return pte_wrprotect(pte);
167}
168
169static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
170					     unsigned long addr, pte_t *ptep,
171					     pte_t pte, int dirty)
172{
173#ifdef HUGETLB_NEED_PRELOAD
174	/*
175	 * The "return 1" forces a call of update_mmu_cache, which will write a
176	 * TLB entry.  Without this, platforms that don't do a write of the TLB
177	 * entry in the TLB miss handler asm will fault ad infinitum.
178	 */
179	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
180	return 1;
181#else
182	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
183#endif
184}
185
186static inline pte_t huge_ptep_get(pte_t *ptep)
 
 
187{
188	return *ptep;
189}
190
191static inline void arch_clear_hugepage_flags(struct page *page)
192{
193}
194
195#else /* ! CONFIG_HUGETLB_PAGE */
196static inline void flush_hugetlb_page(struct vm_area_struct *vma,
197				      unsigned long vmaddr)
198{
199}
200
201#define hugepd_shift(x) 0
202static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
203				    unsigned pdshift)
204{
205	return 0;
206}
207#endif /* CONFIG_HUGETLB_PAGE */
208
209#endif /* _ASM_POWERPC_HUGETLB_H */