Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_SPARC64_HUGETLB_H
3#define _ASM_SPARC64_HUGETLB_H
4
5#include <asm/page.h>
6
7#ifdef CONFIG_HUGETLB_PAGE
8struct pud_huge_patch_entry {
9 unsigned int addr;
10 unsigned int insn;
11};
12extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
13#endif
14
15#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
16void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17 pte_t *ptep, pte_t pte);
18
19#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
20pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
21 pte_t *ptep);
22
23#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
24static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
25 unsigned long addr, pte_t *ptep)
26{
27 return *ptep;
28}
29
30#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
31static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
32 unsigned long addr, pte_t *ptep)
33{
34 pte_t old_pte = *ptep;
35 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
36}
37
38#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
39static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
40 unsigned long addr, pte_t *ptep,
41 pte_t pte, int dirty)
42{
43 int changed = !pte_same(*ptep, pte);
44 if (changed) {
45 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
46 flush_tlb_page(vma, addr);
47 }
48 return changed;
49}
50
51#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
52void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
53 unsigned long end, unsigned long floor,
54 unsigned long ceiling);
55
56#include <asm-generic/hugetlb.h>
57
58#endif /* _ASM_SPARC64_HUGETLB_H */
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
15{
16}
17
18static inline int is_hugepage_only_range(struct mm_struct *mm,
19 unsigned long addr,
20 unsigned long len) {
21 return 0;
22}
23
24/*
25 * If the arch doesn't supply something else, assume that hugepage
26 * size aligned regions are ok without further preparation.
27 */
28static inline int prepare_hugepage_range(struct file *file,
29 unsigned long addr, unsigned long len)
30{
31 if (len & ~HPAGE_MASK)
32 return -EINVAL;
33 if (addr & ~HPAGE_MASK)
34 return -EINVAL;
35 return 0;
36}
37
38static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
39 unsigned long addr, unsigned long end,
40 unsigned long floor,
41 unsigned long ceiling)
42{
43 free_pgd_range(tlb, addr, end, floor, ceiling);
44}
45
46static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
47 unsigned long addr, pte_t *ptep)
48{
49}
50
51static inline int huge_pte_none(pte_t pte)
52{
53 return pte_none(pte);
54}
55
56static inline pte_t huge_pte_wrprotect(pte_t pte)
57{
58 return pte_wrprotect(pte);
59}
60
61static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
62 unsigned long addr, pte_t *ptep)
63{
64 pte_t old_pte = *ptep;
65 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
66}
67
68static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
69 unsigned long addr, pte_t *ptep,
70 pte_t pte, int dirty)
71{
72 int changed = !pte_same(*ptep, pte);
73 if (changed) {
74 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
75 flush_tlb_page(vma, addr);
76 }
77 return changed;
78}
79
80static inline pte_t huge_ptep_get(pte_t *ptep)
81{
82 return *ptep;
83}
84
85static inline int arch_prepare_hugepage(struct page *page)
86{
87 return 0;
88}
89
90static inline void arch_release_hugepage(struct page *page)
91{
92}
93
94static inline void arch_clear_hugepage_flags(struct page *page)
95{
96}
97
98#endif /* _ASM_SPARC64_HUGETLB_H */