Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-32 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6 */
7
8#include <linux/init.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/sched/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/err.h>
15#include <linux/sysctl.h>
16#include <linux/compat.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/elf.h>
21
22#if 0 /* This is just for testing */
23struct page *
24follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
25{
26 unsigned long start = address;
27 int length = 1;
28 int nr;
29 struct page *page;
30 struct vm_area_struct *vma;
31
32 vma = find_vma(mm, addr);
33 if (!vma || !is_vm_hugetlb_page(vma))
34 return ERR_PTR(-EINVAL);
35
36 pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
37
38 /* hugetlb should be locked, and hence, prefaulted */
39 WARN_ON(!pte || pte_none(*pte));
40
41 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
42
43 WARN_ON(!PageHead(page));
44
45 return page;
46}
47
48int pmd_huge(pmd_t pmd)
49{
50 return 0;
51}
52
53int pud_huge(pud_t pud)
54{
55 return 0;
56}
57
58#else
59
60/*
61 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
62 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
63 * Otherwise, returns 0.
64 */
65int pmd_huge(pmd_t pmd)
66{
67 return !pmd_none(pmd) &&
68 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
69}
70
71int pud_huge(pud_t pud)
72{
73 return !!(pud_val(pud) & _PAGE_PSE);
74}
75#endif
76
77#ifdef CONFIG_HUGETLB_PAGE
78static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
79 unsigned long addr, unsigned long len,
80 unsigned long pgoff, unsigned long flags)
81{
82 struct hstate *h = hstate_file(file);
83 struct vm_unmapped_area_info info;
84
85 info.flags = 0;
86 info.length = len;
87 info.low_limit = get_mmap_base(1);
88
89 /*
90 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
91 * in the full address space.
92 */
93 info.high_limit = in_32bit_syscall() ?
94 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
95
96 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
97 info.align_offset = 0;
98 return vm_unmapped_area(&info);
99}
100
101static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
102 unsigned long addr, unsigned long len,
103 unsigned long pgoff, unsigned long flags)
104{
105 struct hstate *h = hstate_file(file);
106 struct vm_unmapped_area_info info;
107
108 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
109 info.length = len;
110 info.low_limit = PAGE_SIZE;
111 info.high_limit = get_mmap_base(0);
112
113 /*
114 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
115 * in the full address space.
116 */
117 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
118 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
119
120 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
121 info.align_offset = 0;
122 addr = vm_unmapped_area(&info);
123
124 /*
125 * A failed mmap() very likely causes application failure,
126 * so fall back to the bottom-up function here. This scenario
127 * can happen with large stack limits and large mmap()
128 * allocations.
129 */
130 if (addr & ~PAGE_MASK) {
131 VM_BUG_ON(addr != -ENOMEM);
132 info.flags = 0;
133 info.low_limit = TASK_UNMAPPED_BASE;
134 info.high_limit = TASK_SIZE_LOW;
135 addr = vm_unmapped_area(&info);
136 }
137
138 return addr;
139}
140
141unsigned long
142hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
143 unsigned long len, unsigned long pgoff, unsigned long flags)
144{
145 struct hstate *h = hstate_file(file);
146 struct mm_struct *mm = current->mm;
147 struct vm_area_struct *vma;
148
149 if (len & ~huge_page_mask(h))
150 return -EINVAL;
151
152 if (len > TASK_SIZE)
153 return -ENOMEM;
154
155 /* No address checking. See comment at mmap_address_hint_valid() */
156 if (flags & MAP_FIXED) {
157 if (prepare_hugepage_range(file, addr, len))
158 return -EINVAL;
159 return addr;
160 }
161
162 if (addr) {
163 addr &= huge_page_mask(h);
164 if (!mmap_address_hint_valid(addr, len))
165 goto get_unmapped_area;
166
167 vma = find_vma(mm, addr);
168 if (!vma || addr + len <= vm_start_gap(vma))
169 return addr;
170 }
171
172get_unmapped_area:
173 if (mm->get_unmapped_area == arch_get_unmapped_area)
174 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
175 pgoff, flags);
176 else
177 return hugetlb_get_unmapped_area_topdown(file, addr, len,
178 pgoff, flags);
179}
180#endif /* CONFIG_HUGETLB_PAGE */
181
182#ifdef CONFIG_X86_64
183bool __init arch_hugetlb_valid_size(unsigned long size)
184{
185 if (size == PMD_SIZE)
186 return true;
187 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
188 return true;
189 else
190 return false;
191}
192
193#ifdef CONFIG_CONTIG_ALLOC
194static __init int gigantic_pages_init(void)
195{
196 /* With compaction or CMA we can allocate gigantic pages at runtime */
197 if (boot_cpu_has(X86_FEATURE_GBPAGES))
198 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
199 return 0;
200}
201arch_initcall(gigantic_pages_init);
202#endif
203#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-32 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6 */
7
8#include <linux/init.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/sched/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/err.h>
15#include <linux/sysctl.h>
16#include <linux/compat.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/elf.h>
21
22/*
23 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
24 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
25 * Otherwise, returns 0.
26 */
27int pmd_huge(pmd_t pmd)
28{
29 return !pmd_none(pmd) &&
30 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
31}
32
33/*
34 * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal
35 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
36 * Otherwise, returns 0.
37 */
38int pud_huge(pud_t pud)
39{
40#if CONFIG_PGTABLE_LEVELS > 2
41 return !pud_none(pud) &&
42 (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
43#else
44 return 0;
45#endif
46}
47
48#ifdef CONFIG_HUGETLB_PAGE
49static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
50 unsigned long addr, unsigned long len,
51 unsigned long pgoff, unsigned long flags)
52{
53 struct hstate *h = hstate_file(file);
54 struct vm_unmapped_area_info info;
55
56 info.flags = 0;
57 info.length = len;
58 info.low_limit = get_mmap_base(1);
59
60 /*
61 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
62 * in the full address space.
63 */
64 info.high_limit = in_32bit_syscall() ?
65 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
66
67 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
68 info.align_offset = 0;
69 return vm_unmapped_area(&info);
70}
71
72static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
73 unsigned long addr, unsigned long len,
74 unsigned long pgoff, unsigned long flags)
75{
76 struct hstate *h = hstate_file(file);
77 struct vm_unmapped_area_info info;
78
79 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
80 info.length = len;
81 info.low_limit = PAGE_SIZE;
82 info.high_limit = get_mmap_base(0);
83
84 /*
85 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
86 * in the full address space.
87 */
88 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
89 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
90
91 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
92 info.align_offset = 0;
93 addr = vm_unmapped_area(&info);
94
95 /*
96 * A failed mmap() very likely causes application failure,
97 * so fall back to the bottom-up function here. This scenario
98 * can happen with large stack limits and large mmap()
99 * allocations.
100 */
101 if (addr & ~PAGE_MASK) {
102 VM_BUG_ON(addr != -ENOMEM);
103 info.flags = 0;
104 info.low_limit = TASK_UNMAPPED_BASE;
105 info.high_limit = TASK_SIZE_LOW;
106 addr = vm_unmapped_area(&info);
107 }
108
109 return addr;
110}
111
112unsigned long
113hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
114 unsigned long len, unsigned long pgoff, unsigned long flags)
115{
116 struct hstate *h = hstate_file(file);
117 struct mm_struct *mm = current->mm;
118 struct vm_area_struct *vma;
119
120 if (len & ~huge_page_mask(h))
121 return -EINVAL;
122
123 if (len > TASK_SIZE)
124 return -ENOMEM;
125
126 /* No address checking. See comment at mmap_address_hint_valid() */
127 if (flags & MAP_FIXED) {
128 if (prepare_hugepage_range(file, addr, len))
129 return -EINVAL;
130 return addr;
131 }
132
133 if (addr) {
134 addr &= huge_page_mask(h);
135 if (!mmap_address_hint_valid(addr, len))
136 goto get_unmapped_area;
137
138 vma = find_vma(mm, addr);
139 if (!vma || addr + len <= vm_start_gap(vma))
140 return addr;
141 }
142
143get_unmapped_area:
144 if (mm->get_unmapped_area == arch_get_unmapped_area)
145 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
146 pgoff, flags);
147 else
148 return hugetlb_get_unmapped_area_topdown(file, addr, len,
149 pgoff, flags);
150}
151#endif /* CONFIG_HUGETLB_PAGE */
152
153#ifdef CONFIG_X86_64
154bool __init arch_hugetlb_valid_size(unsigned long size)
155{
156 if (size == PMD_SIZE)
157 return true;
158 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
159 return true;
160 else
161 return false;
162}
163
164#ifdef CONFIG_CONTIG_ALLOC
165static __init int gigantic_pages_init(void)
166{
167 /* With compaction or CMA we can allocate gigantic pages at runtime */
168 if (boot_cpu_has(X86_FEATURE_GBPAGES))
169 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
170 return 0;
171}
172arch_initcall(gigantic_pages_init);
173#endif
174#endif