Loading...
1/*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
12#include <linux/err.h>
13#include <linux/sysctl.h>
14#include <asm/mman.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17#include <asm/pgalloc.h>
18
19#if 0 /* This is just for testing */
20struct page *
21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22{
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
28
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
32
33 pte = huge_pte_offset(mm, address);
34
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
37
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40 WARN_ON(!PageHead(page));
41
42 return page;
43}
44
45int pmd_huge(pmd_t pmd)
46{
47 return 0;
48}
49
50int pud_huge(pud_t pud)
51{
52 return 0;
53}
54
55struct page *
56follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57 pmd_t *pmd, int write)
58{
59 return NULL;
60}
61
62int pmd_huge_support(void)
63{
64 return 0;
65}
66#else
67
68struct page *
69follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
70{
71 return ERR_PTR(-EINVAL);
72}
73
74int pmd_huge(pmd_t pmd)
75{
76 return !!(pmd_val(pmd) & _PAGE_PSE);
77}
78
79int pud_huge(pud_t pud)
80{
81 return !!(pud_val(pud) & _PAGE_PSE);
82}
83
84int pmd_huge_support(void)
85{
86 return 1;
87}
88#endif
89
90#ifdef CONFIG_HUGETLB_PAGE
91static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
92 unsigned long addr, unsigned long len,
93 unsigned long pgoff, unsigned long flags)
94{
95 struct hstate *h = hstate_file(file);
96 struct vm_unmapped_area_info info;
97
98 info.flags = 0;
99 info.length = len;
100 info.low_limit = current->mm->mmap_legacy_base;
101 info.high_limit = TASK_SIZE;
102 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
103 info.align_offset = 0;
104 return vm_unmapped_area(&info);
105}
106
107static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
108 unsigned long addr0, unsigned long len,
109 unsigned long pgoff, unsigned long flags)
110{
111 struct hstate *h = hstate_file(file);
112 struct vm_unmapped_area_info info;
113 unsigned long addr;
114
115 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
116 info.length = len;
117 info.low_limit = PAGE_SIZE;
118 info.high_limit = current->mm->mmap_base;
119 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
120 info.align_offset = 0;
121 addr = vm_unmapped_area(&info);
122
123 /*
124 * A failed mmap() very likely causes application failure,
125 * so fall back to the bottom-up function here. This scenario
126 * can happen with large stack limits and large mmap()
127 * allocations.
128 */
129 if (addr & ~PAGE_MASK) {
130 VM_BUG_ON(addr != -ENOMEM);
131 info.flags = 0;
132 info.low_limit = TASK_UNMAPPED_BASE;
133 info.high_limit = TASK_SIZE;
134 addr = vm_unmapped_area(&info);
135 }
136
137 return addr;
138}
139
140unsigned long
141hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
142 unsigned long len, unsigned long pgoff, unsigned long flags)
143{
144 struct hstate *h = hstate_file(file);
145 struct mm_struct *mm = current->mm;
146 struct vm_area_struct *vma;
147
148 if (len & ~huge_page_mask(h))
149 return -EINVAL;
150 if (len > TASK_SIZE)
151 return -ENOMEM;
152
153 if (flags & MAP_FIXED) {
154 if (prepare_hugepage_range(file, addr, len))
155 return -EINVAL;
156 return addr;
157 }
158
159 if (addr) {
160 addr = ALIGN(addr, huge_page_size(h));
161 vma = find_vma(mm, addr);
162 if (TASK_SIZE - len >= addr &&
163 (!vma || addr + len <= vma->vm_start))
164 return addr;
165 }
166 if (mm->get_unmapped_area == arch_get_unmapped_area)
167 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
168 pgoff, flags);
169 else
170 return hugetlb_get_unmapped_area_topdown(file, addr, len,
171 pgoff, flags);
172}
173#endif /* CONFIG_HUGETLB_PAGE */
174
175#ifdef CONFIG_X86_64
176static __init int setup_hugepagesz(char *opt)
177{
178 unsigned long ps = memparse(opt, &opt);
179 if (ps == PMD_SIZE) {
180 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
181 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
182 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
183 } else {
184 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
185 ps >> 20);
186 return 0;
187 }
188 return 1;
189}
190__setup("hugepagesz=", setup_hugepagesz);
191#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-32 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
6 */
7
8#include <linux/init.h>
9#include <linux/fs.h>
10#include <linux/mm.h>
11#include <linux/sched/mm.h>
12#include <linux/hugetlb.h>
13#include <linux/pagemap.h>
14#include <linux/err.h>
15#include <linux/sysctl.h>
16#include <linux/compat.h>
17#include <asm/mman.h>
18#include <asm/tlb.h>
19#include <asm/tlbflush.h>
20#include <asm/elf.h>
21
22#if 0 /* This is just for testing */
23struct page *
24follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
25{
26 unsigned long start = address;
27 int length = 1;
28 int nr;
29 struct page *page;
30 struct vm_area_struct *vma;
31
32 vma = find_vma(mm, addr);
33 if (!vma || !is_vm_hugetlb_page(vma))
34 return ERR_PTR(-EINVAL);
35
36 pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
37
38 /* hugetlb should be locked, and hence, prefaulted */
39 WARN_ON(!pte || pte_none(*pte));
40
41 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
42
43 WARN_ON(!PageHead(page));
44
45 return page;
46}
47
48int pmd_huge(pmd_t pmd)
49{
50 return 0;
51}
52
53int pud_huge(pud_t pud)
54{
55 return 0;
56}
57
58#else
59
60/*
61 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
62 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
63 * Otherwise, returns 0.
64 */
65int pmd_huge(pmd_t pmd)
66{
67 return !pmd_none(pmd) &&
68 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
69}
70
71int pud_huge(pud_t pud)
72{
73 return !!(pud_val(pud) & _PAGE_PSE);
74}
75#endif
76
77#ifdef CONFIG_HUGETLB_PAGE
78static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
79 unsigned long addr, unsigned long len,
80 unsigned long pgoff, unsigned long flags)
81{
82 struct hstate *h = hstate_file(file);
83 struct vm_unmapped_area_info info;
84
85 info.flags = 0;
86 info.length = len;
87 info.low_limit = get_mmap_base(1);
88
89 /*
90 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
91 * in the full address space.
92 */
93 info.high_limit = in_32bit_syscall() ?
94 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
95
96 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
97 info.align_offset = 0;
98 return vm_unmapped_area(&info);
99}
100
101static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
102 unsigned long addr, unsigned long len,
103 unsigned long pgoff, unsigned long flags)
104{
105 struct hstate *h = hstate_file(file);
106 struct vm_unmapped_area_info info;
107
108 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
109 info.length = len;
110 info.low_limit = PAGE_SIZE;
111 info.high_limit = get_mmap_base(0);
112
113 /*
114 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
115 * in the full address space.
116 */
117 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
118 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
119
120 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
121 info.align_offset = 0;
122 addr = vm_unmapped_area(&info);
123
124 /*
125 * A failed mmap() very likely causes application failure,
126 * so fall back to the bottom-up function here. This scenario
127 * can happen with large stack limits and large mmap()
128 * allocations.
129 */
130 if (addr & ~PAGE_MASK) {
131 VM_BUG_ON(addr != -ENOMEM);
132 info.flags = 0;
133 info.low_limit = TASK_UNMAPPED_BASE;
134 info.high_limit = TASK_SIZE_LOW;
135 addr = vm_unmapped_area(&info);
136 }
137
138 return addr;
139}
140
141unsigned long
142hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
143 unsigned long len, unsigned long pgoff, unsigned long flags)
144{
145 struct hstate *h = hstate_file(file);
146 struct mm_struct *mm = current->mm;
147 struct vm_area_struct *vma;
148
149 if (len & ~huge_page_mask(h))
150 return -EINVAL;
151
152 if (len > TASK_SIZE)
153 return -ENOMEM;
154
155 /* No address checking. See comment at mmap_address_hint_valid() */
156 if (flags & MAP_FIXED) {
157 if (prepare_hugepage_range(file, addr, len))
158 return -EINVAL;
159 return addr;
160 }
161
162 if (addr) {
163 addr &= huge_page_mask(h);
164 if (!mmap_address_hint_valid(addr, len))
165 goto get_unmapped_area;
166
167 vma = find_vma(mm, addr);
168 if (!vma || addr + len <= vm_start_gap(vma))
169 return addr;
170 }
171
172get_unmapped_area:
173 if (mm->get_unmapped_area == arch_get_unmapped_area)
174 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
175 pgoff, flags);
176 else
177 return hugetlb_get_unmapped_area_topdown(file, addr, len,
178 pgoff, flags);
179}
180#endif /* CONFIG_HUGETLB_PAGE */
181
182#ifdef CONFIG_X86_64
183bool __init arch_hugetlb_valid_size(unsigned long size)
184{
185 if (size == PMD_SIZE)
186 return true;
187 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
188 return true;
189 else
190 return false;
191}
192
193#ifdef CONFIG_CONTIG_ALLOC
194static __init int gigantic_pages_init(void)
195{
196 /* With compaction or CMA we can allocate gigantic pages at runtime */
197 if (boot_cpu_has(X86_FEATURE_GBPAGES))
198 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
199 return 0;
200}
201arch_initcall(gigantic_pages_init);
202#endif
203#endif