Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9#ifndef __ASM_HUGETLB_H
10#define __ASM_HUGETLB_H
11
12#include <asm/page.h>
13
14#define __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
15static inline int prepare_hugepage_range(struct file *file,
16 unsigned long addr,
17 unsigned long len)
18{
19 unsigned long task_size = STACK_TOP;
20 struct hstate *h = hstate_file(file);
21
22 if (len & ~huge_page_mask(h))
23 return -EINVAL;
24 if (addr & ~huge_page_mask(h))
25 return -EINVAL;
26 if (len > task_size)
27 return -ENOMEM;
28 if (task_size - len < addr)
29 return -EINVAL;
30 return 0;
31}
32
33#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
34static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
35 unsigned long addr, pte_t *ptep)
36{
37 pte_t clear;
38 pte_t pte = *ptep;
39
40 pte_val(clear) = (unsigned long)invalid_pte_table;
41 set_pte_at(mm, addr, ptep, clear);
42 return pte;
43}
44
45#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
46static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
47 unsigned long addr, pte_t *ptep)
48{
49 pte_t pte;
50
51 /*
52 * clear the huge pte entry firstly, so that the other smp threads will
53 * not get old pte entry after finishing flush_tlb_page and before
54 * setting new huge pte entry
55 */
56 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
57 flush_tlb_page(vma, addr);
58 return pte;
59}
60
61#define __HAVE_ARCH_HUGE_PTE_NONE
62static inline int huge_pte_none(pte_t pte)
63{
64 unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
65 return !val || (val == (unsigned long)invalid_pte_table);
66}
67
68#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
69static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
70 unsigned long addr,
71 pte_t *ptep, pte_t pte,
72 int dirty)
73{
74 int changed = !pte_same(*ptep, pte);
75
76 if (changed) {
77 set_pte_at(vma->vm_mm, addr, ptep, pte);
78 /*
79 * There could be some standard sized pages in there,
80 * get them all.
81 */
82 flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
83 }
84 return changed;
85}
86
87#include <asm-generic/hugetlb.h>
88
89#endif /* __ASM_HUGETLB_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9#ifndef __ASM_HUGETLB_H
10#define __ASM_HUGETLB_H
11
12#include <asm/page.h>
13
14
15static inline int is_hugepage_only_range(struct mm_struct *mm,
16 unsigned long addr,
17 unsigned long len)
18{
19 return 0;
20}
21
22static inline int prepare_hugepage_range(struct file *file,
23 unsigned long addr,
24 unsigned long len)
25{
26 unsigned long task_size = STACK_TOP;
27 struct hstate *h = hstate_file(file);
28
29 if (len & ~huge_page_mask(h))
30 return -EINVAL;
31 if (addr & ~huge_page_mask(h))
32 return -EINVAL;
33 if (len > task_size)
34 return -ENOMEM;
35 if (task_size - len < addr)
36 return -EINVAL;
37 return 0;
38}
39
40static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
41{
42}
43
44static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
45 unsigned long addr,
46 unsigned long end,
47 unsigned long floor,
48 unsigned long ceiling)
49{
50 free_pgd_range(tlb, addr, end, floor, ceiling);
51}
52
53static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
54 pte_t *ptep, pte_t pte)
55{
56 set_pte_at(mm, addr, ptep, pte);
57}
58
59static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
60 unsigned long addr, pte_t *ptep)
61{
62 pte_t clear;
63 pte_t pte = *ptep;
64
65 pte_val(clear) = (unsigned long)invalid_pte_table;
66 set_pte_at(mm, addr, ptep, clear);
67 return pte;
68}
69
70static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
71 unsigned long addr, pte_t *ptep)
72{
73 flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
74}
75
76static inline int huge_pte_none(pte_t pte)
77{
78 unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
79 return !val || (val == (unsigned long)invalid_pte_table);
80}
81
82static inline pte_t huge_pte_wrprotect(pte_t pte)
83{
84 return pte_wrprotect(pte);
85}
86
87static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
88 unsigned long addr, pte_t *ptep)
89{
90 ptep_set_wrprotect(mm, addr, ptep);
91}
92
93static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
94 unsigned long addr,
95 pte_t *ptep, pte_t pte,
96 int dirty)
97{
98 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
99}
100
101static inline pte_t huge_ptep_get(pte_t *ptep)
102{
103 return *ptep;
104}
105
106static inline int arch_prepare_hugepage(struct page *page)
107{
108 return 0;
109}
110
111static inline void arch_release_hugepage(struct page *page)
112{
113}
114
115#endif /* __ASM_HUGETLB_H */