Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_SPARC64_HUGETLB_H
3#define _ASM_SPARC64_HUGETLB_H
4
5#include <asm/page.h>
6
7#ifdef CONFIG_HUGETLB_PAGE
8struct pud_huge_patch_entry {
9 unsigned int addr;
10 unsigned int insn;
11};
12extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
13#endif
14
15#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
16void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17 pte_t *ptep, pte_t pte);
18
19#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
20pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
21 pte_t *ptep);
22
23#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
24static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
25 unsigned long addr, pte_t *ptep)
26{
27 return *ptep;
28}
29
30#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
31static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
32 unsigned long addr, pte_t *ptep)
33{
34 pte_t old_pte = *ptep;
35 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
36}
37
38#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
39static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
40 unsigned long addr, pte_t *ptep,
41 pte_t pte, int dirty)
42{
43 int changed = !pte_same(*ptep, pte);
44 if (changed) {
45 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
46 flush_tlb_page(vma, addr);
47 }
48 return changed;
49}
50
51#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
52void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
53 unsigned long end, unsigned long floor,
54 unsigned long ceiling);
55
56#include <asm-generic/hugetlb.h>
57
58#endif /* _ASM_SPARC64_HUGETLB_H */
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
35 unsigned long addr, unsigned long end,
36 unsigned long floor,
37 unsigned long ceiling)
38{
39 free_pgd_range(tlb, addr, end, floor, ceiling);
40}
41
42static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
43 unsigned long addr, pte_t *ptep)
44{
45}
46
47static inline int huge_pte_none(pte_t pte)
48{
49 return pte_none(pte);
50}
51
52static inline pte_t huge_pte_wrprotect(pte_t pte)
53{
54 return pte_wrprotect(pte);
55}
56
57static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 pte_t old_pte = *ptep;
61 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 int changed = !pte_same(*ptep, pte);
69 if (changed) {
70 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
71 flush_tlb_page(vma, addr);
72 }
73 return changed;
74}
75
76static inline pte_t huge_ptep_get(pte_t *ptep)
77{
78 return *ptep;
79}
80
81static inline void arch_clear_hugepage_flags(struct page *page)
82{
83}
84
85#endif /* _ASM_SPARC64_HUGETLB_H */