Loading...
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5
6
7void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
8 pte_t *ptep, pte_t pte);
9
10pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
11 pte_t *ptep);
12
13void hugetlb_prefault_arch_hook(struct mm_struct *mm);
14
15static inline int is_hugepage_only_range(struct mm_struct *mm,
16 unsigned long addr,
17 unsigned long len) {
18 return 0;
19}
20
21/*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
27{
28 if (len & ~HPAGE_MASK)
29 return -EINVAL;
30 if (addr & ~HPAGE_MASK)
31 return -EINVAL;
32 return 0;
33}
34
35static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
36 unsigned long addr, unsigned long end,
37 unsigned long floor,
38 unsigned long ceiling)
39{
40 free_pgd_range(tlb, addr, end, floor, ceiling);
41}
42
43static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
44 unsigned long addr, pte_t *ptep)
45{
46}
47
48static inline int huge_pte_none(pte_t pte)
49{
50 return pte_none(pte);
51}
52
53static inline pte_t huge_pte_wrprotect(pte_t pte)
54{
55 return pte_wrprotect(pte);
56}
57
58static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
59 unsigned long addr, pte_t *ptep)
60{
61 ptep_set_wrprotect(mm, addr, ptep);
62}
63
64static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
65 unsigned long addr, pte_t *ptep,
66 pte_t pte, int dirty)
67{
68 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
69}
70
71static inline pte_t huge_ptep_get(pte_t *ptep)
72{
73 return *ptep;
74}
75
76static inline int arch_prepare_hugepage(struct page *page)
77{
78 return 0;
79}
80
81static inline void arch_release_hugepage(struct page *page)
82{
83}
84
85#endif /* _ASM_SPARC64_HUGETLB_H */
1#ifndef _ASM_SPARC64_HUGETLB_H
2#define _ASM_SPARC64_HUGETLB_H
3
4#include <asm/page.h>
5#include <asm-generic/hugetlb.h>
6
7
8void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
9 pte_t *ptep, pte_t pte);
10
11pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
12 pte_t *ptep);
13
14static inline int is_hugepage_only_range(struct mm_struct *mm,
15 unsigned long addr,
16 unsigned long len) {
17 return 0;
18}
19
20/*
21 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation.
23 */
24static inline int prepare_hugepage_range(struct file *file,
25 unsigned long addr, unsigned long len)
26{
27 if (len & ~HPAGE_MASK)
28 return -EINVAL;
29 if (addr & ~HPAGE_MASK)
30 return -EINVAL;
31 return 0;
32}
33
34static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
35 unsigned long addr, pte_t *ptep)
36{
37}
38
39static inline int huge_pte_none(pte_t pte)
40{
41 return pte_none(pte);
42}
43
44static inline pte_t huge_pte_wrprotect(pte_t pte)
45{
46 return pte_wrprotect(pte);
47}
48
49static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
50 unsigned long addr, pte_t *ptep)
51{
52 pte_t old_pte = *ptep;
53 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
54}
55
56static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
57 unsigned long addr, pte_t *ptep,
58 pte_t pte, int dirty)
59{
60 int changed = !pte_same(*ptep, pte);
61 if (changed) {
62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
63 flush_tlb_page(vma, addr);
64 }
65 return changed;
66}
67
68static inline pte_t huge_ptep_get(pte_t *ptep)
69{
70 return *ptep;
71}
72
73static inline void arch_clear_hugepage_flags(struct page *page)
74{
75}
76
77void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
78 unsigned long end, unsigned long floor,
79 unsigned long ceiling);
80
81#endif /* _ASM_SPARC64_HUGETLB_H */