Loading...
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
4#ifdef CONFIG_HUGETLB_PAGE
5#include <asm/page.h>
6#include <asm-generic/hugetlb.h>
7
8extern struct kmem_cache *hugepte_cache;
9
10#ifdef CONFIG_PPC_BOOK3S_64
11/*
12 * This should work for other subarchs too. But right now we use the
13 * new format only for 64bit book3s
14 */
15static inline pte_t *hugepd_page(hugepd_t hpd)
16{
17 BUG_ON(!hugepd_ok(hpd));
18 /*
19 * We have only four bits to encode, MMU page size
20 */
21 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
22 return __va(hpd.pd & HUGEPD_ADDR_MASK);
23}
24
25static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
26{
27 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
28}
29
30static inline unsigned int hugepd_shift(hugepd_t hpd)
31{
32 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
33}
34
35#else
36
37static inline pte_t *hugepd_page(hugepd_t hpd)
38{
39 BUG_ON(!hugepd_ok(hpd));
40 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
41}
42
43static inline unsigned int hugepd_shift(hugepd_t hpd)
44{
45 return hpd.pd & HUGEPD_SHIFT_MASK;
46}
47
48#endif /* CONFIG_PPC_BOOK3S_64 */
49
50
51static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
52 unsigned pdshift)
53{
54 /*
55 * On FSL BookE, we have multiple higher-level table entries that
56 * point to the same hugepte. Just use the first one since they're all
57 * identical. So for that case, idx=0.
58 */
59 unsigned long idx = 0;
60
61 pte_t *dir = hugepd_page(hpd);
62#ifndef CONFIG_PPC_FSL_BOOK3E
63 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
64#endif
65
66 return dir + idx;
67}
68
69pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
70 unsigned long addr, unsigned *shift);
71
72void flush_dcache_icache_hugepage(struct page *page);
73
74#if defined(CONFIG_PPC_MM_SLICES)
75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
76 unsigned long len);
77#else
78static inline int is_hugepage_only_range(struct mm_struct *mm,
79 unsigned long addr,
80 unsigned long len)
81{
82 return 0;
83}
84#endif
85
86void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
87 pte_t pte);
88void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
89
90void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
91 unsigned long end, unsigned long floor,
92 unsigned long ceiling);
93
94/*
95 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
96 * to override the version in mm/hugetlb.c
97 */
98#define vma_mmu_pagesize vma_mmu_pagesize
99
100/*
101 * If the arch doesn't supply something else, assume that hugepage
102 * size aligned regions are ok without further preparation.
103 */
104static inline int prepare_hugepage_range(struct file *file,
105 unsigned long addr, unsigned long len)
106{
107 struct hstate *h = hstate_file(file);
108 if (len & ~huge_page_mask(h))
109 return -EINVAL;
110 if (addr & ~huge_page_mask(h))
111 return -EINVAL;
112 return 0;
113}
114
115static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
116 pte_t *ptep, pte_t pte)
117{
118 set_pte_at(mm, addr, ptep, pte);
119}
120
121static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
122 unsigned long addr, pte_t *ptep)
123{
124#ifdef CONFIG_PPC64
125 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
126#else
127 return __pte(pte_update(ptep, ~0UL, 0));
128#endif
129}
130
131static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
132 unsigned long addr, pte_t *ptep)
133{
134 pte_t pte;
135 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
136 flush_tlb_page(vma, addr);
137}
138
139static inline int huge_pte_none(pte_t pte)
140{
141 return pte_none(pte);
142}
143
144static inline pte_t huge_pte_wrprotect(pte_t pte)
145{
146 return pte_wrprotect(pte);
147}
148
149static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
150 unsigned long addr, pte_t *ptep,
151 pte_t pte, int dirty)
152{
153#ifdef HUGETLB_NEED_PRELOAD
154 /*
155 * The "return 1" forces a call of update_mmu_cache, which will write a
156 * TLB entry. Without this, platforms that don't do a write of the TLB
157 * entry in the TLB miss handler asm will fault ad infinitum.
158 */
159 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
160 return 1;
161#else
162 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
163#endif
164}
165
166static inline pte_t huge_ptep_get(pte_t *ptep)
167{
168 return *ptep;
169}
170
171static inline void arch_clear_hugepage_flags(struct page *page)
172{
173}
174
175#else /* ! CONFIG_HUGETLB_PAGE */
176static inline void flush_hugetlb_page(struct vm_area_struct *vma,
177 unsigned long vmaddr)
178{
179}
180
181#define hugepd_shift(x) 0
182static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
183 unsigned pdshift)
184{
185 return 0;
186}
187#endif /* CONFIG_HUGETLB_PAGE */
188
189/*
190 * FSL Book3E platforms require special gpage handling - the gpages
191 * are reserved early in the boot process by memblock instead of via
192 * the .dts as on IBM platforms.
193 */
194#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
195extern void __init reserve_hugetlb_gpages(void);
196#else
197static inline void reserve_hugetlb_gpages(void)
198{
199}
200#endif
201
202#endif /* _ASM_POWERPC_HUGETLB_H */
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
4#ifdef CONFIG_HUGETLB_PAGE
5#include <asm/page.h>
6
7extern struct kmem_cache *hugepte_cache;
8
9static inline pte_t *hugepd_page(hugepd_t hpd)
10{
11 BUG_ON(!hugepd_ok(hpd));
12 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
13}
14
15static inline unsigned int hugepd_shift(hugepd_t hpd)
16{
17 return hpd.pd & HUGEPD_SHIFT_MASK;
18}
19
20static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
21 unsigned pdshift)
22{
23 /*
24 * On FSL BookE, we have multiple higher-level table entries that
25 * point to the same hugepte. Just use the first one since they're all
26 * identical. So for that case, idx=0.
27 */
28 unsigned long idx = 0;
29
30 pte_t *dir = hugepd_page(*hpdp);
31#ifndef CONFIG_PPC_FSL_BOOK3E
32 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
33#endif
34
35 return dir + idx;
36}
37
38pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
39 unsigned long addr, unsigned *shift);
40
41void flush_dcache_icache_hugepage(struct page *page);
42
43#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
44int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
45 unsigned long len);
46#else
47static inline int is_hugepage_only_range(struct mm_struct *mm,
48 unsigned long addr,
49 unsigned long len)
50{
51 return 0;
52}
53#endif
54
55void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
56 pte_t pte);
57void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
58
59void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
60 unsigned long end, unsigned long floor,
61 unsigned long ceiling);
62
63/*
64 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
65 * to override the version in mm/hugetlb.c
66 */
67#define vma_mmu_pagesize vma_mmu_pagesize
68
69/*
70 * If the arch doesn't supply something else, assume that hugepage
71 * size aligned regions are ok without further preparation.
72 */
73static inline int prepare_hugepage_range(struct file *file,
74 unsigned long addr, unsigned long len)
75{
76 struct hstate *h = hstate_file(file);
77 if (len & ~huge_page_mask(h))
78 return -EINVAL;
79 if (addr & ~huge_page_mask(h))
80 return -EINVAL;
81 return 0;
82}
83
84static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
85{
86}
87
88
89static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep, pte_t pte)
91{
92 set_pte_at(mm, addr, ptep, pte);
93}
94
95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97{
98#ifdef CONFIG_PPC64
99 return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
100#else
101 return __pte(pte_update(ptep, ~0UL, 0));
102#endif
103}
104
105static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
106 unsigned long addr, pte_t *ptep)
107{
108 pte_t pte;
109 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
110 flush_tlb_page(vma, addr);
111}
112
113static inline int huge_pte_none(pte_t pte)
114{
115 return pte_none(pte);
116}
117
118static inline pte_t huge_pte_wrprotect(pte_t pte)
119{
120 return pte_wrprotect(pte);
121}
122
123static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
124 unsigned long addr, pte_t *ptep,
125 pte_t pte, int dirty)
126{
127#ifdef HUGETLB_NEED_PRELOAD
128 /*
129 * The "return 1" forces a call of update_mmu_cache, which will write a
130 * TLB entry. Without this, platforms that don't do a write of the TLB
131 * entry in the TLB miss handler asm will fault ad infinitum.
132 */
133 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
134 return 1;
135#else
136 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
137#endif
138}
139
140static inline pte_t huge_ptep_get(pte_t *ptep)
141{
142 return *ptep;
143}
144
145static inline int arch_prepare_hugepage(struct page *page)
146{
147 return 0;
148}
149
150static inline void arch_release_hugepage(struct page *page)
151{
152}
153
154#else /* ! CONFIG_HUGETLB_PAGE */
155static inline void flush_hugetlb_page(struct vm_area_struct *vma,
156 unsigned long vmaddr)
157{
158}
159#endif /* CONFIG_HUGETLB_PAGE */
160
161
162/*
163 * FSL Book3E platforms require special gpage handling - the gpages
164 * are reserved early in the boot process by memblock instead of via
165 * the .dts as on IBM platforms.
166 */
167#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
168extern void __init reserve_hugetlb_gpages(void);
169#else
170static inline void reserve_hugetlb_gpages(void)
171{
172}
173#endif
174
175#endif /* _ASM_POWERPC_HUGETLB_H */