Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_HUGETLB_H
3#define _ASM_POWERPC_HUGETLB_H
4
5#ifdef CONFIG_HUGETLB_PAGE
6#include <asm/page.h>
7
8#ifdef CONFIG_PPC_BOOK3S_64
9#include <asm/book3s/64/hugetlb.h>
10#elif defined(CONFIG_PPC_E500)
11#include <asm/nohash/hugetlb-e500.h>
12#elif defined(CONFIG_PPC_8xx)
13#include <asm/nohash/32/hugetlb-8xx.h>
14#endif /* CONFIG_PPC_BOOK3S_64 */
15
16extern bool hugetlb_disabled;
17
18void __init hugetlbpage_init_defaultsize(void);
19
20int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
21 unsigned long len);
22
23static inline int is_hugepage_only_range(struct mm_struct *mm,
24 unsigned long addr,
25 unsigned long len)
26{
27 if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
28 return slice_is_hugepage_only_range(mm, addr, len);
29 return 0;
30}
31#define is_hugepage_only_range is_hugepage_only_range
32
33#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
34void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
35 unsigned long end, unsigned long floor,
36 unsigned long ceiling);
37
38#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
39static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
40 unsigned long addr, pte_t *ptep)
41{
42 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
43}
44
45#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
46static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
47 unsigned long addr, pte_t *ptep)
48{
49 pte_t pte;
50
51 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
52 flush_hugetlb_page(vma, addr);
53 return pte;
54}
55
56#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
57int huge_ptep_set_access_flags(struct vm_area_struct *vma,
58 unsigned long addr, pte_t *ptep,
59 pte_t pte, int dirty);
60
61void gigantic_hugetlb_cma_reserve(void) __init;
62#include <asm-generic/hugetlb.h>
63
64#else /* ! CONFIG_HUGETLB_PAGE */
65static inline void flush_hugetlb_page(struct vm_area_struct *vma,
66 unsigned long vmaddr)
67{
68}
69
70#define hugepd_shift(x) 0
71static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
72 unsigned pdshift)
73{
74 return NULL;
75}
76
77
78static inline void __init gigantic_hugetlb_cma_reserve(void)
79{
80}
81
82static inline void __init hugetlbpage_init_defaultsize(void)
83{
84}
85#endif /* CONFIG_HUGETLB_PAGE */
86
87#endif /* _ASM_POWERPC_HUGETLB_H */
1#ifndef _ASM_POWERPC_HUGETLB_H
2#define _ASM_POWERPC_HUGETLB_H
3
4#include <asm/page.h>
5
6pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
7 unsigned long addr, unsigned *shift);
8
9void flush_dcache_icache_hugepage(struct page *page);
10
11int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
12 unsigned long len);
13
14void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
15 unsigned long end, unsigned long floor,
16 unsigned long ceiling);
17
18/*
19 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
20 * to override the version in mm/hugetlb.c
21 */
22#define vma_mmu_pagesize vma_mmu_pagesize
23
24/*
25 * If the arch doesn't supply something else, assume that hugepage
26 * size aligned regions are ok without further preparation.
27 */
28static inline int prepare_hugepage_range(struct file *file,
29 unsigned long addr, unsigned long len)
30{
31 struct hstate *h = hstate_file(file);
32 if (len & ~huge_page_mask(h))
33 return -EINVAL;
34 if (addr & ~huge_page_mask(h))
35 return -EINVAL;
36 return 0;
37}
38
39static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
40{
41}
42
43
44static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, pte_t pte)
46{
47 set_pte_at(mm, addr, ptep, pte);
48}
49
50static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
51 unsigned long addr, pte_t *ptep)
52{
53 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
54 return __pte(old);
55}
56
57static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t pte;
61 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62 flush_tlb_page(vma, addr);
63}
64
65static inline int huge_pte_none(pte_t pte)
66{
67 return pte_none(pte);
68}
69
70static inline pte_t huge_pte_wrprotect(pte_t pte)
71{
72 return pte_wrprotect(pte);
73}
74
75static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
76 unsigned long addr, pte_t *ptep,
77 pte_t pte, int dirty)
78{
79 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
80}
81
82static inline pte_t huge_ptep_get(pte_t *ptep)
83{
84 return *ptep;
85}
86
87static inline int arch_prepare_hugepage(struct page *page)
88{
89 return 0;
90}
91
92static inline void arch_release_hugepage(struct page *page)
93{
94}
95
96#endif /* _ASM_POWERPC_HUGETLB_H */