Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-32 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6 */
7
8#include <linux/init.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/sched/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/err.h>
15#include <linux/sysctl.h>
16#include <linux/compat.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/pgalloc.h>
21#include <asm/elf.h>
22#include <asm/mpx.h>
23
24#if 0 /* This is just for testing */
25struct page *
26follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
27{
28 unsigned long start = address;
29 int length = 1;
30 int nr;
31 struct page *page;
32 struct vm_area_struct *vma;
33
34 vma = find_vma(mm, addr);
35 if (!vma || !is_vm_hugetlb_page(vma))
36 return ERR_PTR(-EINVAL);
37
38 pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
39
40 /* hugetlb should be locked, and hence, prefaulted */
41 WARN_ON(!pte || pte_none(*pte));
42
43 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
44
45 WARN_ON(!PageHead(page));
46
47 return page;
48}
49
50int pmd_huge(pmd_t pmd)
51{
52 return 0;
53}
54
55int pud_huge(pud_t pud)
56{
57 return 0;
58}
59
60#else
61
62/*
63 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
64 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
65 * Otherwise, returns 0.
66 */
67int pmd_huge(pmd_t pmd)
68{
69 return !pmd_none(pmd) &&
70 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
71}
72
73int pud_huge(pud_t pud)
74{
75 return !!(pud_val(pud) & _PAGE_PSE);
76}
77#endif
78
79#ifdef CONFIG_HUGETLB_PAGE
80static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
81 unsigned long addr, unsigned long len,
82 unsigned long pgoff, unsigned long flags)
83{
84 struct hstate *h = hstate_file(file);
85 struct vm_unmapped_area_info info;
86
87 info.flags = 0;
88 info.length = len;
89 info.low_limit = get_mmap_base(1);
90
91 /*
92 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
93 * in the full address space.
94 */
95 info.high_limit = in_32bit_syscall() ?
96 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
97
98 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
99 info.align_offset = 0;
100 return vm_unmapped_area(&info);
101}
102
103static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
104 unsigned long addr, unsigned long len,
105 unsigned long pgoff, unsigned long flags)
106{
107 struct hstate *h = hstate_file(file);
108 struct vm_unmapped_area_info info;
109
110 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
111 info.length = len;
112 info.low_limit = PAGE_SIZE;
113 info.high_limit = get_mmap_base(0);
114
115 /*
116 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
117 * in the full address space.
118 */
119 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
120 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
121
122 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
123 info.align_offset = 0;
124 addr = vm_unmapped_area(&info);
125
126 /*
127 * A failed mmap() very likely causes application failure,
128 * so fall back to the bottom-up function here. This scenario
129 * can happen with large stack limits and large mmap()
130 * allocations.
131 */
132 if (addr & ~PAGE_MASK) {
133 VM_BUG_ON(addr != -ENOMEM);
134 info.flags = 0;
135 info.low_limit = TASK_UNMAPPED_BASE;
136 info.high_limit = TASK_SIZE_LOW;
137 addr = vm_unmapped_area(&info);
138 }
139
140 return addr;
141}
142
143unsigned long
144hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
145 unsigned long len, unsigned long pgoff, unsigned long flags)
146{
147 struct hstate *h = hstate_file(file);
148 struct mm_struct *mm = current->mm;
149 struct vm_area_struct *vma;
150
151 if (len & ~huge_page_mask(h))
152 return -EINVAL;
153
154 addr = mpx_unmapped_area_check(addr, len, flags);
155 if (IS_ERR_VALUE(addr))
156 return addr;
157
158 if (len > TASK_SIZE)
159 return -ENOMEM;
160
161 /* No address checking. See comment at mmap_address_hint_valid() */
162 if (flags & MAP_FIXED) {
163 if (prepare_hugepage_range(file, addr, len))
164 return -EINVAL;
165 return addr;
166 }
167
168 if (addr) {
169 addr &= huge_page_mask(h);
170 if (!mmap_address_hint_valid(addr, len))
171 goto get_unmapped_area;
172
173 vma = find_vma(mm, addr);
174 if (!vma || addr + len <= vm_start_gap(vma))
175 return addr;
176 }
177
178get_unmapped_area:
179 if (mm->get_unmapped_area == arch_get_unmapped_area)
180 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
181 pgoff, flags);
182 else
183 return hugetlb_get_unmapped_area_topdown(file, addr, len,
184 pgoff, flags);
185}
186#endif /* CONFIG_HUGETLB_PAGE */
187
188#ifdef CONFIG_X86_64
189static __init int setup_hugepagesz(char *opt)
190{
191 unsigned long ps = memparse(opt, &opt);
192 if (ps == PMD_SIZE) {
193 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
194 } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
195 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
196 } else {
197 hugetlb_bad_size();
198 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
199 ps >> 20);
200 return 0;
201 }
202 return 1;
203}
204__setup("hugepagesz=", setup_hugepagesz);
205
206#ifdef CONFIG_CONTIG_ALLOC
207static __init int gigantic_pages_init(void)
208{
209 /* With compaction or CMA we can allocate gigantic pages at runtime */
210 if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
211 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
212 return 0;
213}
214arch_initcall(gigantic_pages_init);
215#endif
216#endif
1/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
12#include <linux/err.h>
13#include <linux/sysctl.h>
14#include <asm/mman.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17#include <asm/pgalloc.h>
18
19#if 0 /* This is just for testing */
20struct page *
21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22{
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
28
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
32
33 pte = huge_pte_offset(mm, address);
34
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
37
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40 WARN_ON(!PageHead(page));
41
42 return page;
43}
44
45int pmd_huge(pmd_t pmd)
46{
47 return 0;
48}
49
50int pud_huge(pud_t pud)
51{
52 return 0;
53}
54
55#else
56
57/*
58 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
59 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
60 * Otherwise, returns 0.
61 */
62int pmd_huge(pmd_t pmd)
63{
64 return !pmd_none(pmd) &&
65 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
66}
67
68int pud_huge(pud_t pud)
69{
70 return !!(pud_val(pud) & _PAGE_PSE);
71}
72#endif
73
74#ifdef CONFIG_HUGETLB_PAGE
75static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
76 unsigned long addr, unsigned long len,
77 unsigned long pgoff, unsigned long flags)
78{
79 struct hstate *h = hstate_file(file);
80 struct vm_unmapped_area_info info;
81
82 info.flags = 0;
83 info.length = len;
84 info.low_limit = current->mm->mmap_legacy_base;
85 info.high_limit = TASK_SIZE;
86 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
87 info.align_offset = 0;
88 return vm_unmapped_area(&info);
89}
90
91static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
92 unsigned long addr0, unsigned long len,
93 unsigned long pgoff, unsigned long flags)
94{
95 struct hstate *h = hstate_file(file);
96 struct vm_unmapped_area_info info;
97 unsigned long addr;
98
99 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
100 info.length = len;
101 info.low_limit = PAGE_SIZE;
102 info.high_limit = current->mm->mmap_base;
103 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
104 info.align_offset = 0;
105 addr = vm_unmapped_area(&info);
106
107 /*
108 * A failed mmap() very likely causes application failure,
109 * so fall back to the bottom-up function here. This scenario
110 * can happen with large stack limits and large mmap()
111 * allocations.
112 */
113 if (addr & ~PAGE_MASK) {
114 VM_BUG_ON(addr != -ENOMEM);
115 info.flags = 0;
116 info.low_limit = TASK_UNMAPPED_BASE;
117 info.high_limit = TASK_SIZE;
118 addr = vm_unmapped_area(&info);
119 }
120
121 return addr;
122}
123
124unsigned long
125hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
126 unsigned long len, unsigned long pgoff, unsigned long flags)
127{
128 struct hstate *h = hstate_file(file);
129 struct mm_struct *mm = current->mm;
130 struct vm_area_struct *vma;
131
132 if (len & ~huge_page_mask(h))
133 return -EINVAL;
134 if (len > TASK_SIZE)
135 return -ENOMEM;
136
137 if (flags & MAP_FIXED) {
138 if (prepare_hugepage_range(file, addr, len))
139 return -EINVAL;
140 return addr;
141 }
142
143 if (addr) {
144 addr = ALIGN(addr, huge_page_size(h));
145 vma = find_vma(mm, addr);
146 if (TASK_SIZE - len >= addr &&
147 (!vma || addr + len <= vma->vm_start))
148 return addr;
149 }
150 if (mm->get_unmapped_area == arch_get_unmapped_area)
151 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
152 pgoff, flags);
153 else
154 return hugetlb_get_unmapped_area_topdown(file, addr, len,
155 pgoff, flags);
156}
157#endif /* CONFIG_HUGETLB_PAGE */
158
159#ifdef CONFIG_X86_64
160static __init int setup_hugepagesz(char *opt)
161{
162 unsigned long ps = memparse(opt, &opt);
163 if (ps == PMD_SIZE) {
164 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
165 } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
166 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
167 } else {
168 hugetlb_bad_size();
169 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
170 ps >> 20);
171 return 0;
172 }
173 return 1;
174}
175__setup("hugepagesz=", setup_hugepagesz);
176
177#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
178static __init int gigantic_pages_init(void)
179{
180 /* With compaction or CMA we can allocate gigantic pages at runtime */
181 if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
182 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
183 return 0;
184}
185arch_initcall(gigantic_pages_init);
186#endif
187#endif