Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/hugetlb.h>
23#include <linux/pagemap.h>
24#include <linux/slab.h>
25#include <linux/err.h>
26#include <linux/sysctl.h>
27#include <linux/mman.h>
28#include <asm/tlb.h>
29#include <asm/tlbflush.h>
30
31pte_t *huge_pte_alloc(struct mm_struct *mm,
32 unsigned long addr, unsigned long sz)
33{
34 pgd_t *pgd;
35 pud_t *pud;
36 pte_t *pte = NULL;
37
38 /* We do not yet support multiple huge page sizes. */
39 BUG_ON(sz != PMD_SIZE);
40
41 pgd = pgd_offset(mm, addr);
42 pud = pud_alloc(mm, pgd, addr);
43 if (pud)
44 pte = (pte_t *) pmd_alloc(mm, pud, addr);
45 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
46
47 return pte;
48}
49
50pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
51{
52 pgd_t *pgd;
53 pud_t *pud;
54 pmd_t *pmd = NULL;
55
56 pgd = pgd_offset(mm, addr);
57 if (pgd_present(*pgd)) {
58 pud = pud_offset(pgd, addr);
59 if (pud_present(*pud))
60 pmd = pmd_offset(pud, addr);
61 }
62 return (pte_t *) pmd;
63}
64
65#ifdef HUGETLB_TEST
66struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
67 int write)
68{
69 unsigned long start = address;
70 int length = 1;
71 int nr;
72 struct page *page;
73 struct vm_area_struct *vma;
74
75 vma = find_vma(mm, addr);
76 if (!vma || !is_vm_hugetlb_page(vma))
77 return ERR_PTR(-EINVAL);
78
79 pte = huge_pte_offset(mm, address);
80
81 /* hugetlb should be locked, and hence, prefaulted */
82 WARN_ON(!pte || pte_none(*pte));
83
84 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
85
86 WARN_ON(!PageHead(page));
87
88 return page;
89}
90
91int pmd_huge(pmd_t pmd)
92{
93 return 0;
94}
95
96int pud_huge(pud_t pud)
97{
98 return 0;
99}
100
101struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
102 pmd_t *pmd, int write)
103{
104 return NULL;
105}
106
107#else
108
109struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
110 int write)
111{
112 return ERR_PTR(-EINVAL);
113}
114
115int pmd_huge(pmd_t pmd)
116{
117 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
118}
119
120int pud_huge(pud_t pud)
121{
122 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
123}
124
125struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
126 pmd_t *pmd, int write)
127{
128 struct page *page;
129
130 page = pte_page(*(pte_t *)pmd);
131 if (page)
132 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
133 return page;
134}
135
136struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
137 pud_t *pud, int write)
138{
139 struct page *page;
140
141 page = pte_page(*(pte_t *)pud);
142 if (page)
143 page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
144 return page;
145}
146
147int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
148{
149 return 0;
150}
151
152#endif
153
154#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
155static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
156 unsigned long addr, unsigned long len,
157 unsigned long pgoff, unsigned long flags)
158{
159 struct hstate *h = hstate_file(file);
160 struct mm_struct *mm = current->mm;
161 struct vm_area_struct *vma;
162 unsigned long start_addr;
163
164 if (len > mm->cached_hole_size) {
165 start_addr = mm->free_area_cache;
166 } else {
167 start_addr = TASK_UNMAPPED_BASE;
168 mm->cached_hole_size = 0;
169 }
170
171full_search:
172 addr = ALIGN(start_addr, huge_page_size(h));
173
174 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
175 /* At this point: (!vma || addr < vma->vm_end). */
176 if (TASK_SIZE - len < addr) {
177 /*
178 * Start a new search - just in case we missed
179 * some holes.
180 */
181 if (start_addr != TASK_UNMAPPED_BASE) {
182 start_addr = TASK_UNMAPPED_BASE;
183 mm->cached_hole_size = 0;
184 goto full_search;
185 }
186 return -ENOMEM;
187 }
188 if (!vma || addr + len <= vma->vm_start) {
189 mm->free_area_cache = addr + len;
190 return addr;
191 }
192 if (addr + mm->cached_hole_size < vma->vm_start)
193 mm->cached_hole_size = vma->vm_start - addr;
194 addr = ALIGN(vma->vm_end, huge_page_size(h));
195 }
196}
197
198static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
199 unsigned long addr0, unsigned long len,
200 unsigned long pgoff, unsigned long flags)
201{
202 struct hstate *h = hstate_file(file);
203 struct mm_struct *mm = current->mm;
204 struct vm_area_struct *vma, *prev_vma;
205 unsigned long base = mm->mmap_base, addr = addr0;
206 unsigned long largest_hole = mm->cached_hole_size;
207 int first_time = 1;
208
209 /* don't allow allocations above current base */
210 if (mm->free_area_cache > base)
211 mm->free_area_cache = base;
212
213 if (len <= largest_hole) {
214 largest_hole = 0;
215 mm->free_area_cache = base;
216 }
217try_again:
218 /* make sure it can fit in the remaining address space */
219 if (mm->free_area_cache < len)
220 goto fail;
221
222 /* either no address requested or can't fit in requested address hole */
223 addr = (mm->free_area_cache - len) & huge_page_mask(h);
224 do {
225 /*
226 * Lookup failure means no vma is above this address,
227 * i.e. return with success:
228 */
229 vma = find_vma_prev(mm, addr, &prev_vma);
230 if (!vma) {
231 return addr;
232 break;
233 }
234
235 /*
236 * new region fits between prev_vma->vm_end and
237 * vma->vm_start, use it:
238 */
239 if (addr + len <= vma->vm_start &&
240 (!prev_vma || (addr >= prev_vma->vm_end))) {
241 /* remember the address as a hint for next time */
242 mm->cached_hole_size = largest_hole;
243 mm->free_area_cache = addr;
244 return addr;
245 } else {
246 /* pull free_area_cache down to the first hole */
247 if (mm->free_area_cache == vma->vm_end) {
248 mm->free_area_cache = vma->vm_start;
249 mm->cached_hole_size = largest_hole;
250 }
251 }
252
253 /* remember the largest hole we saw so far */
254 if (addr + largest_hole < vma->vm_start)
255 largest_hole = vma->vm_start - addr;
256
257 /* try just below the current vma->vm_start */
258 addr = (vma->vm_start - len) & huge_page_mask(h);
259
260 } while (len <= vma->vm_start);
261
262fail:
263 /*
264 * if hint left us with no space for the requested
265 * mapping then try again:
266 */
267 if (first_time) {
268 mm->free_area_cache = base;
269 largest_hole = 0;
270 first_time = 0;
271 goto try_again;
272 }
273 /*
274 * A failed mmap() very likely causes application failure,
275 * so fall back to the bottom-up function here. This scenario
276 * can happen with large stack limits and large mmap()
277 * allocations.
278 */
279 mm->free_area_cache = TASK_UNMAPPED_BASE;
280 mm->cached_hole_size = ~0UL;
281 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
282 len, pgoff, flags);
283
284 /*
285 * Restore the topdown base:
286 */
287 mm->free_area_cache = base;
288 mm->cached_hole_size = ~0UL;
289
290 return addr;
291}
292
293unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
294 unsigned long len, unsigned long pgoff, unsigned long flags)
295{
296 struct hstate *h = hstate_file(file);
297 struct mm_struct *mm = current->mm;
298 struct vm_area_struct *vma;
299
300 if (len & ~huge_page_mask(h))
301 return -EINVAL;
302 if (len > TASK_SIZE)
303 return -ENOMEM;
304
305 if (flags & MAP_FIXED) {
306 if (prepare_hugepage_range(file, addr, len))
307 return -EINVAL;
308 return addr;
309 }
310
311 if (addr) {
312 addr = ALIGN(addr, huge_page_size(h));
313 vma = find_vma(mm, addr);
314 if (TASK_SIZE - len >= addr &&
315 (!vma || addr + len <= vma->vm_start))
316 return addr;
317 }
318 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
319 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
320 pgoff, flags);
321 else
322 return hugetlb_get_unmapped_area_topdown(file, addr, len,
323 pgoff, flags);
324}
325
326static __init int setup_hugepagesz(char *opt)
327{
328 unsigned long ps = memparse(opt, &opt);
329 if (ps == PMD_SIZE) {
330 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
331 } else if (ps == PUD_SIZE) {
332 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
333 } else {
334 pr_err("hugepagesz: Unsupported page size %lu M\n",
335 ps >> 20);
336 return 0;
337 }
338 return 1;
339}
340__setup("hugepagesz=", setup_hugepagesz);
341
342#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19#include <linux/init.h>
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/hugetlb.h>
23#include <linux/pagemap.h>
24#include <linux/slab.h>
25#include <linux/err.h>
26#include <linux/sysctl.h>
27#include <linux/mman.h>
28#include <asm/tlb.h>
29#include <asm/tlbflush.h>
30#include <asm/setup.h>
31
32#ifdef CONFIG_HUGETLB_SUPER_PAGES
33
34/*
35 * Provide an additional huge page size (in addition to the regular default
36 * huge page size) if no "hugepagesz" arguments are specified.
37 * Note that it must be smaller than the default huge page size so
38 * that it's possible to allocate them on demand from the buddy allocator.
39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
40 * or not define it at all.
41 */
42#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
43
44/* "Extra" page-size multipliers, one per level of the page table. */
45int huge_shift[HUGE_SHIFT_ENTRIES] = {
46#ifdef ADDITIONAL_HUGE_SIZE
47#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
48 [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
49#endif
50};
51
52#endif
53
54pte_t *huge_pte_alloc(struct mm_struct *mm,
55 unsigned long addr, unsigned long sz)
56{
57 pgd_t *pgd;
58 pud_t *pud;
59
60 addr &= -sz; /* Mask off any low bits in the address. */
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
64
65#ifdef CONFIG_HUGETLB_SUPER_PAGES
66 if (sz >= PGDIR_SIZE) {
67 BUG_ON(sz != PGDIR_SIZE &&
68 sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
69 return (pte_t *)pud;
70 } else {
71 pmd_t *pmd = pmd_alloc(mm, pud, addr);
72 if (sz >= PMD_SIZE) {
73 BUG_ON(sz != PMD_SIZE &&
74 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
75 return (pte_t *)pmd;
76 }
77 else {
78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
79 panic("Unexpected page size %#lx\n", sz);
80 return pte_alloc_map(mm, pmd, addr);
81 }
82 }
83#else
84 BUG_ON(sz != PMD_SIZE);
85 return (pte_t *) pmd_alloc(mm, pud, addr);
86#endif
87}
88
89static pte_t *get_pte(pte_t *base, int index, int level)
90{
91 pte_t *ptep = base + index;
92#ifdef CONFIG_HUGETLB_SUPER_PAGES
93 if (!pte_present(*ptep) && huge_shift[level] != 0) {
94 unsigned long mask = -1UL << huge_shift[level];
95 pte_t *super_ptep = base + (index & mask);
96 pte_t pte = *super_ptep;
97 if (pte_present(pte) && pte_super(pte))
98 ptep = super_ptep;
99 }
100#endif
101 return ptep;
102}
103
104pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
105{
106 pgd_t *pgd;
107 pud_t *pud;
108 pmd_t *pmd;
109#ifdef CONFIG_HUGETLB_SUPER_PAGES
110 pte_t *pte;
111#endif
112
113 /* Get the top-level page table entry. */
114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
115
116 /* We don't have four levels. */
117 pud = pud_offset(pgd, addr);
118#ifndef __PAGETABLE_PUD_FOLDED
119# error support fourth page table level
120#endif
121 if (!pud_present(*pud))
122 return NULL;
123
124 /* Check for an L0 huge PTE, if we have three levels. */
125#ifndef __PAGETABLE_PMD_FOLDED
126 if (pud_huge(*pud))
127 return (pte_t *)pud;
128
129 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
130 pmd_index(addr), 1);
131 if (!pmd_present(*pmd))
132 return NULL;
133#else
134 pmd = pmd_offset(pud, addr);
135#endif
136
137 /* Check for an L1 huge PTE. */
138 if (pmd_huge(*pmd))
139 return (pte_t *)pmd;
140
141#ifdef CONFIG_HUGETLB_SUPER_PAGES
142 /* Check for an L2 huge PTE. */
143 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
144 if (!pte_present(*pte))
145 return NULL;
146 if (pte_super(*pte))
147 return pte;
148#endif
149
150 return NULL;
151}
152
153int pmd_huge(pmd_t pmd)
154{
155 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
156}
157
158int pud_huge(pud_t pud)
159{
160 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
161}
162
163#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
164static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
165 unsigned long addr, unsigned long len,
166 unsigned long pgoff, unsigned long flags)
167{
168 struct hstate *h = hstate_file(file);
169 struct vm_unmapped_area_info info;
170
171 info.flags = 0;
172 info.length = len;
173 info.low_limit = TASK_UNMAPPED_BASE;
174 info.high_limit = TASK_SIZE;
175 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
176 info.align_offset = 0;
177 return vm_unmapped_area(&info);
178}
179
180static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
181 unsigned long addr0, unsigned long len,
182 unsigned long pgoff, unsigned long flags)
183{
184 struct hstate *h = hstate_file(file);
185 struct vm_unmapped_area_info info;
186 unsigned long addr;
187
188 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
189 info.length = len;
190 info.low_limit = PAGE_SIZE;
191 info.high_limit = current->mm->mmap_base;
192 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
193 info.align_offset = 0;
194 addr = vm_unmapped_area(&info);
195
196 /*
197 * A failed mmap() very likely causes application failure,
198 * so fall back to the bottom-up function here. This scenario
199 * can happen with large stack limits and large mmap()
200 * allocations.
201 */
202 if (addr & ~PAGE_MASK) {
203 VM_BUG_ON(addr != -ENOMEM);
204 info.flags = 0;
205 info.low_limit = TASK_UNMAPPED_BASE;
206 info.high_limit = TASK_SIZE;
207 addr = vm_unmapped_area(&info);
208 }
209
210 return addr;
211}
212
213unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
214 unsigned long len, unsigned long pgoff, unsigned long flags)
215{
216 struct hstate *h = hstate_file(file);
217 struct mm_struct *mm = current->mm;
218 struct vm_area_struct *vma;
219
220 if (len & ~huge_page_mask(h))
221 return -EINVAL;
222 if (len > TASK_SIZE)
223 return -ENOMEM;
224
225 if (flags & MAP_FIXED) {
226 if (prepare_hugepage_range(file, addr, len))
227 return -EINVAL;
228 return addr;
229 }
230
231 if (addr) {
232 addr = ALIGN(addr, huge_page_size(h));
233 vma = find_vma(mm, addr);
234 if (TASK_SIZE - len >= addr &&
235 (!vma || addr + len <= vma->vm_start))
236 return addr;
237 }
238 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
239 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
240 pgoff, flags);
241 else
242 return hugetlb_get_unmapped_area_topdown(file, addr, len,
243 pgoff, flags);
244}
245#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
246
247#ifdef CONFIG_HUGETLB_SUPER_PAGES
248static __init int __setup_hugepagesz(unsigned long ps)
249{
250 int log_ps = __builtin_ctzl(ps);
251 int level, base_shift;
252
253 if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
254 pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
255 ps);
256 return -EINVAL;
257 }
258
259 if (ps > 64*1024*1024*1024UL) {
260 pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
261 ps >> 20);
262 return -EINVAL;
263 } else if (ps >= PUD_SIZE) {
264 static long hv_jpage_size;
265 if (hv_jpage_size == 0)
266 hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
267 if (hv_jpage_size != PUD_SIZE) {
268 pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
269 PUD_SIZE >> 20, hv_jpage_size);
270 return -EINVAL;
271 }
272 level = 0;
273 base_shift = PUD_SHIFT;
274 } else if (ps >= PMD_SIZE) {
275 level = 1;
276 base_shift = PMD_SHIFT;
277 } else if (ps > PAGE_SIZE) {
278 level = 2;
279 base_shift = PAGE_SHIFT;
280 } else {
281 pr_err("hugepagesz: huge page size %ld too small\n", ps);
282 return -EINVAL;
283 }
284
285 if (log_ps != base_shift) {
286 int shift_val = log_ps - base_shift;
287 if (huge_shift[level] != 0) {
288 int old_shift = base_shift + huge_shift[level];
289 pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
290 ps >> 20, (1UL << old_shift) >> 20);
291 return -EINVAL;
292 }
293 if (hv_set_pte_super_shift(level, shift_val) != 0) {
294 pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
295 ps >> 20);
296 return -EINVAL;
297 }
298 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
299 huge_shift[level] = shift_val;
300 }
301
302 hugetlb_add_hstate(log_ps - PAGE_SHIFT);
303
304 return 0;
305}
306
307static bool saw_hugepagesz;
308
309static __init int setup_hugepagesz(char *opt)
310{
311 if (!saw_hugepagesz) {
312 saw_hugepagesz = true;
313 memset(huge_shift, 0, sizeof(huge_shift));
314 }
315 return __setup_hugepagesz(memparse(opt, NULL));
316}
317__setup("hugepagesz=", setup_hugepagesz);
318
319#ifdef ADDITIONAL_HUGE_SIZE
320/*
321 * Provide an additional huge page size if no "hugepagesz" args are given.
322 * In that case, all the cores have properly set up their hv super_shift
323 * already, but we need to notify the hugetlb code to enable the
324 * new huge page size from the Linux point of view.
325 */
326static __init int add_default_hugepagesz(void)
327{
328 if (!saw_hugepagesz) {
329 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
330 ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
331 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
332 ADDITIONAL_HUGE_SIZE);
333 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
334 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
335 }
336 return 0;
337}
338arch_initcall(add_default_hugepagesz);
339#endif
340
341#endif /* CONFIG_HUGETLB_SUPER_PAGES */