Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_SPARC64_HUGETLB_H
3#define _ASM_SPARC64_HUGETLB_H
4
5#include <asm/page.h>
6
7#ifdef CONFIG_HUGETLB_PAGE
8struct pud_huge_patch_entry {
9 unsigned int addr;
10 unsigned int insn;
11};
12extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
13#endif
14
15#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
16void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17 pte_t *ptep, pte_t pte);
18
19#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
20pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
21 pte_t *ptep);
22
23#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
24static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
25 unsigned long addr, pte_t *ptep)
26{
27 return *ptep;
28}
29
30#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
31static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
32 unsigned long addr, pte_t *ptep)
33{
34 pte_t old_pte = *ptep;
35 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
36}
37
38#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
39static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
40 unsigned long addr, pte_t *ptep,
41 pte_t pte, int dirty)
42{
43 int changed = !pte_same(*ptep, pte);
44 if (changed) {
45 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
46 flush_tlb_page(vma, addr);
47 }
48 return changed;
49}
50
51#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
52void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
53 unsigned long end, unsigned long floor,
54 unsigned long ceiling);
55
56#include <asm-generic/hugetlb.h>
57
58#endif /* _ASM_SPARC64_HUGETLB_H */
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
35 unsigned long addr, pte_t *ptep)
36{
37}
38
39static inline int huge_pte_none(pte_t pte)
40{
41 return pte_none(pte);
42}
43
44static inline pte_t huge_pte_wrprotect(pte_t pte)
45{
46 return pte_wrprotect(pte);
47}
48
49static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
50 unsigned long addr, pte_t *ptep)
51{
52 pte_t old_pte = *ptep;
53 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
54}
55
56static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
57 unsigned long addr, pte_t *ptep,
58 pte_t pte, int dirty)
59{
60 int changed = !pte_same(*ptep, pte);
61 if (changed) {
62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
63 flush_tlb_page(vma, addr);
64 }
65 return changed;
66}
67
68static inline pte_t huge_ptep_get(pte_t *ptep)
69{
70 return *ptep;
71}
72
73static inline void arch_clear_hugepage_flags(struct page *page)
74{
75}
76
77void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
78 unsigned long end, unsigned long floor,
79 unsigned long ceiling);
80
81#endif /* _ASM_SPARC64_HUGETLB_H */