Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/mmdebug.h>
23#include <linux/sched/signal.h>
24#include <linux/rmap.h>
25#include <linux/string_helpers.h>
26#include <linux/swap.h>
27#include <linux/swapops.h>
28#include <linux/jhash.h>
29#include <linux/numa.h>
30
31#include <asm/page.h>
32#include <asm/pgtable.h>
33#include <asm/tlb.h>
34
35#include <linux/io.h>
36#include <linux/hugetlb.h>
37#include <linux/hugetlb_cgroup.h>
38#include <linux/node.h>
39#include <linux/userfaultfd_k.h>
40#include <linux/page_owner.h>
41#include "internal.h"
42
43int hugetlb_max_hstate __read_mostly;
44unsigned int default_hstate_idx;
45struct hstate hstates[HUGE_MAX_HSTATE];
46/*
47 * Minimum page order among possible hugepage sizes, set to a proper value
48 * at boot time.
49 */
50static unsigned int minimum_order __read_mostly = UINT_MAX;
51
52__initdata LIST_HEAD(huge_boot_pages);
53
54/* for command line parsing */
55static struct hstate * __initdata parsed_hstate;
56static unsigned long __initdata default_hstate_max_huge_pages;
57static unsigned long __initdata default_hstate_size;
58static bool __initdata parsed_valid_hugepagesz = true;
59
60/*
61 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
62 * free_huge_pages, and surplus_huge_pages.
63 */
64DEFINE_SPINLOCK(hugetlb_lock);
65
66/*
67 * Serializes faults on the same logical page. This is used to
68 * prevent spurious OOMs when the hugepage pool is fully utilized.
69 */
70static int num_fault_mutexes;
71struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
72
73/* Forward declaration */
74static int hugetlb_acct_memory(struct hstate *h, long delta);
75
76static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
77{
78 bool free = (spool->count == 0) && (spool->used_hpages == 0);
79
80 spin_unlock(&spool->lock);
81
82 /* If no pages are used, and no other handles to the subpool
83 * remain, give up any reservations mased on minimum size and
84 * free the subpool */
85 if (free) {
86 if (spool->min_hpages != -1)
87 hugetlb_acct_memory(spool->hstate,
88 -spool->min_hpages);
89 kfree(spool);
90 }
91}
92
93struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
94 long min_hpages)
95{
96 struct hugepage_subpool *spool;
97
98 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
99 if (!spool)
100 return NULL;
101
102 spin_lock_init(&spool->lock);
103 spool->count = 1;
104 spool->max_hpages = max_hpages;
105 spool->hstate = h;
106 spool->min_hpages = min_hpages;
107
108 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
109 kfree(spool);
110 return NULL;
111 }
112 spool->rsv_hpages = min_hpages;
113
114 return spool;
115}
116
117void hugepage_put_subpool(struct hugepage_subpool *spool)
118{
119 spin_lock(&spool->lock);
120 BUG_ON(!spool->count);
121 spool->count--;
122 unlock_or_release_subpool(spool);
123}
124
125/*
126 * Subpool accounting for allocating and reserving pages.
127 * Return -ENOMEM if there are not enough resources to satisfy the
128 * the request. Otherwise, return the number of pages by which the
129 * global pools must be adjusted (upward). The returned value may
130 * only be different than the passed value (delta) in the case where
131 * a subpool minimum size must be manitained.
132 */
133static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
134 long delta)
135{
136 long ret = delta;
137
138 if (!spool)
139 return ret;
140
141 spin_lock(&spool->lock);
142
143 if (spool->max_hpages != -1) { /* maximum size accounting */
144 if ((spool->used_hpages + delta) <= spool->max_hpages)
145 spool->used_hpages += delta;
146 else {
147 ret = -ENOMEM;
148 goto unlock_ret;
149 }
150 }
151
152 /* minimum size accounting */
153 if (spool->min_hpages != -1 && spool->rsv_hpages) {
154 if (delta > spool->rsv_hpages) {
155 /*
156 * Asking for more reserves than those already taken on
157 * behalf of subpool. Return difference.
158 */
159 ret = delta - spool->rsv_hpages;
160 spool->rsv_hpages = 0;
161 } else {
162 ret = 0; /* reserves already accounted for */
163 spool->rsv_hpages -= delta;
164 }
165 }
166
167unlock_ret:
168 spin_unlock(&spool->lock);
169 return ret;
170}
171
172/*
173 * Subpool accounting for freeing and unreserving pages.
174 * Return the number of global page reservations that must be dropped.
175 * The return value may only be different than the passed value (delta)
176 * in the case where a subpool minimum size must be maintained.
177 */
178static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
179 long delta)
180{
181 long ret = delta;
182
183 if (!spool)
184 return delta;
185
186 spin_lock(&spool->lock);
187
188 if (spool->max_hpages != -1) /* maximum size accounting */
189 spool->used_hpages -= delta;
190
191 /* minimum size accounting */
192 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
193 if (spool->rsv_hpages + delta <= spool->min_hpages)
194 ret = 0;
195 else
196 ret = spool->rsv_hpages + delta - spool->min_hpages;
197
198 spool->rsv_hpages += delta;
199 if (spool->rsv_hpages > spool->min_hpages)
200 spool->rsv_hpages = spool->min_hpages;
201 }
202
203 /*
204 * If hugetlbfs_put_super couldn't free spool due to an outstanding
205 * quota reference, free it now.
206 */
207 unlock_or_release_subpool(spool);
208
209 return ret;
210}
211
212static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
213{
214 return HUGETLBFS_SB(inode->i_sb)->spool;
215}
216
217static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
218{
219 return subpool_inode(file_inode(vma->vm_file));
220}
221
222/*
223 * Region tracking -- allows tracking of reservations and instantiated pages
224 * across the pages in a mapping.
225 *
226 * The region data structures are embedded into a resv_map and protected
227 * by a resv_map's lock. The set of regions within the resv_map represent
228 * reservations for huge pages, or huge pages that have already been
229 * instantiated within the map. The from and to elements are huge page
230 * indicies into the associated mapping. from indicates the starting index
231 * of the region. to represents the first index past the end of the region.
232 *
233 * For example, a file region structure with from == 0 and to == 4 represents
234 * four huge pages in a mapping. It is important to note that the to element
235 * represents the first element past the end of the region. This is used in
236 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
237 *
238 * Interval notation of the form [from, to) will be used to indicate that
239 * the endpoint from is inclusive and to is exclusive.
240 */
241struct file_region {
242 struct list_head link;
243 long from;
244 long to;
245};
246
247/*
248 * Add the huge page range represented by [f, t) to the reserve
249 * map. In the normal case, existing regions will be expanded
250 * to accommodate the specified range. Sufficient regions should
251 * exist for expansion due to the previous call to region_chg
252 * with the same range. However, it is possible that region_del
253 * could have been called after region_chg and modifed the map
254 * in such a way that no region exists to be expanded. In this
255 * case, pull a region descriptor from the cache associated with
256 * the map and use that for the new range.
257 *
258 * Return the number of new huge pages added to the map. This
259 * number is greater than or equal to zero.
260 */
261static long region_add(struct resv_map *resv, long f, long t)
262{
263 struct list_head *head = &resv->regions;
264 struct file_region *rg, *nrg, *trg;
265 long add = 0;
266
267 spin_lock(&resv->lock);
268 /* Locate the region we are either in or before. */
269 list_for_each_entry(rg, head, link)
270 if (f <= rg->to)
271 break;
272
273 /*
274 * If no region exists which can be expanded to include the
275 * specified range, the list must have been modified by an
276 * interleving call to region_del(). Pull a region descriptor
277 * from the cache and use it for this range.
278 */
279 if (&rg->link == head || t < rg->from) {
280 VM_BUG_ON(resv->region_cache_count <= 0);
281
282 resv->region_cache_count--;
283 nrg = list_first_entry(&resv->region_cache, struct file_region,
284 link);
285 list_del(&nrg->link);
286
287 nrg->from = f;
288 nrg->to = t;
289 list_add(&nrg->link, rg->link.prev);
290
291 add += t - f;
292 goto out_locked;
293 }
294
295 /* Round our left edge to the current segment if it encloses us. */
296 if (f > rg->from)
297 f = rg->from;
298
299 /* Check for and consume any regions we now overlap with. */
300 nrg = rg;
301 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
302 if (&rg->link == head)
303 break;
304 if (rg->from > t)
305 break;
306
307 /* If this area reaches higher then extend our area to
308 * include it completely. If this is not the first area
309 * which we intend to reuse, free it. */
310 if (rg->to > t)
311 t = rg->to;
312 if (rg != nrg) {
313 /* Decrement return value by the deleted range.
314 * Another range will span this area so that by
315 * end of routine add will be >= zero
316 */
317 add -= (rg->to - rg->from);
318 list_del(&rg->link);
319 kfree(rg);
320 }
321 }
322
323 add += (nrg->from - f); /* Added to beginning of region */
324 nrg->from = f;
325 add += t - nrg->to; /* Added to end of region */
326 nrg->to = t;
327
328out_locked:
329 resv->adds_in_progress--;
330 spin_unlock(&resv->lock);
331 VM_BUG_ON(add < 0);
332 return add;
333}
334
335/*
336 * Examine the existing reserve map and determine how many
337 * huge pages in the specified range [f, t) are NOT currently
338 * represented. This routine is called before a subsequent
339 * call to region_add that will actually modify the reserve
340 * map to add the specified range [f, t). region_chg does
341 * not change the number of huge pages represented by the
342 * map. However, if the existing regions in the map can not
343 * be expanded to represent the new range, a new file_region
344 * structure is added to the map as a placeholder. This is
345 * so that the subsequent region_add call will have all the
346 * regions it needs and will not fail.
347 *
348 * Upon entry, region_chg will also examine the cache of region descriptors
349 * associated with the map. If there are not enough descriptors cached, one
350 * will be allocated for the in progress add operation.
351 *
352 * Returns the number of huge pages that need to be added to the existing
353 * reservation map for the range [f, t). This number is greater or equal to
354 * zero. -ENOMEM is returned if a new file_region structure or cache entry
355 * is needed and can not be allocated.
356 */
357static long region_chg(struct resv_map *resv, long f, long t)
358{
359 struct list_head *head = &resv->regions;
360 struct file_region *rg, *nrg = NULL;
361 long chg = 0;
362
363retry:
364 spin_lock(&resv->lock);
365retry_locked:
366 resv->adds_in_progress++;
367
368 /*
369 * Check for sufficient descriptors in the cache to accommodate
370 * the number of in progress add operations.
371 */
372 if (resv->adds_in_progress > resv->region_cache_count) {
373 struct file_region *trg;
374
375 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
376 /* Must drop lock to allocate a new descriptor. */
377 resv->adds_in_progress--;
378 spin_unlock(&resv->lock);
379
380 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
381 if (!trg) {
382 kfree(nrg);
383 return -ENOMEM;
384 }
385
386 spin_lock(&resv->lock);
387 list_add(&trg->link, &resv->region_cache);
388 resv->region_cache_count++;
389 goto retry_locked;
390 }
391
392 /* Locate the region we are before or in. */
393 list_for_each_entry(rg, head, link)
394 if (f <= rg->to)
395 break;
396
397 /* If we are below the current region then a new region is required.
398 * Subtle, allocate a new region at the position but make it zero
399 * size such that we can guarantee to record the reservation. */
400 if (&rg->link == head || t < rg->from) {
401 if (!nrg) {
402 resv->adds_in_progress--;
403 spin_unlock(&resv->lock);
404 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
405 if (!nrg)
406 return -ENOMEM;
407
408 nrg->from = f;
409 nrg->to = f;
410 INIT_LIST_HEAD(&nrg->link);
411 goto retry;
412 }
413
414 list_add(&nrg->link, rg->link.prev);
415 chg = t - f;
416 goto out_nrg;
417 }
418
419 /* Round our left edge to the current segment if it encloses us. */
420 if (f > rg->from)
421 f = rg->from;
422 chg = t - f;
423
424 /* Check for and consume any regions we now overlap with. */
425 list_for_each_entry(rg, rg->link.prev, link) {
426 if (&rg->link == head)
427 break;
428 if (rg->from > t)
429 goto out;
430
431 /* We overlap with this area, if it extends further than
432 * us then we must extend ourselves. Account for its
433 * existing reservation. */
434 if (rg->to > t) {
435 chg += rg->to - t;
436 t = rg->to;
437 }
438 chg -= rg->to - rg->from;
439 }
440
441out:
442 spin_unlock(&resv->lock);
443 /* We already know we raced and no longer need the new region */
444 kfree(nrg);
445 return chg;
446out_nrg:
447 spin_unlock(&resv->lock);
448 return chg;
449}
450
451/*
452 * Abort the in progress add operation. The adds_in_progress field
453 * of the resv_map keeps track of the operations in progress between
454 * calls to region_chg and region_add. Operations are sometimes
455 * aborted after the call to region_chg. In such cases, region_abort
456 * is called to decrement the adds_in_progress counter.
457 *
458 * NOTE: The range arguments [f, t) are not needed or used in this
459 * routine. They are kept to make reading the calling code easier as
460 * arguments will match the associated region_chg call.
461 */
462static void region_abort(struct resv_map *resv, long f, long t)
463{
464 spin_lock(&resv->lock);
465 VM_BUG_ON(!resv->region_cache_count);
466 resv->adds_in_progress--;
467 spin_unlock(&resv->lock);
468}
469
470/*
471 * Delete the specified range [f, t) from the reserve map. If the
472 * t parameter is LONG_MAX, this indicates that ALL regions after f
473 * should be deleted. Locate the regions which intersect [f, t)
474 * and either trim, delete or split the existing regions.
475 *
476 * Returns the number of huge pages deleted from the reserve map.
477 * In the normal case, the return value is zero or more. In the
478 * case where a region must be split, a new region descriptor must
479 * be allocated. If the allocation fails, -ENOMEM will be returned.
480 * NOTE: If the parameter t == LONG_MAX, then we will never split
481 * a region and possibly return -ENOMEM. Callers specifying
482 * t == LONG_MAX do not need to check for -ENOMEM error.
483 */
484static long region_del(struct resv_map *resv, long f, long t)
485{
486 struct list_head *head = &resv->regions;
487 struct file_region *rg, *trg;
488 struct file_region *nrg = NULL;
489 long del = 0;
490
491retry:
492 spin_lock(&resv->lock);
493 list_for_each_entry_safe(rg, trg, head, link) {
494 /*
495 * Skip regions before the range to be deleted. file_region
496 * ranges are normally of the form [from, to). However, there
497 * may be a "placeholder" entry in the map which is of the form
498 * (from, to) with from == to. Check for placeholder entries
499 * at the beginning of the range to be deleted.
500 */
501 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
502 continue;
503
504 if (rg->from >= t)
505 break;
506
507 if (f > rg->from && t < rg->to) { /* Must split region */
508 /*
509 * Check for an entry in the cache before dropping
510 * lock and attempting allocation.
511 */
512 if (!nrg &&
513 resv->region_cache_count > resv->adds_in_progress) {
514 nrg = list_first_entry(&resv->region_cache,
515 struct file_region,
516 link);
517 list_del(&nrg->link);
518 resv->region_cache_count--;
519 }
520
521 if (!nrg) {
522 spin_unlock(&resv->lock);
523 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
524 if (!nrg)
525 return -ENOMEM;
526 goto retry;
527 }
528
529 del += t - f;
530
531 /* New entry for end of split region */
532 nrg->from = t;
533 nrg->to = rg->to;
534 INIT_LIST_HEAD(&nrg->link);
535
536 /* Original entry is trimmed */
537 rg->to = f;
538
539 list_add(&nrg->link, &rg->link);
540 nrg = NULL;
541 break;
542 }
543
544 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
545 del += rg->to - rg->from;
546 list_del(&rg->link);
547 kfree(rg);
548 continue;
549 }
550
551 if (f <= rg->from) { /* Trim beginning of region */
552 del += t - rg->from;
553 rg->from = t;
554 } else { /* Trim end of region */
555 del += rg->to - f;
556 rg->to = f;
557 }
558 }
559
560 spin_unlock(&resv->lock);
561 kfree(nrg);
562 return del;
563}
564
565/*
566 * A rare out of memory error was encountered which prevented removal of
567 * the reserve map region for a page. The huge page itself was free'ed
568 * and removed from the page cache. This routine will adjust the subpool
569 * usage count, and the global reserve count if needed. By incrementing
570 * these counts, the reserve map entry which could not be deleted will
571 * appear as a "reserved" entry instead of simply dangling with incorrect
572 * counts.
573 */
574void hugetlb_fix_reserve_counts(struct inode *inode)
575{
576 struct hugepage_subpool *spool = subpool_inode(inode);
577 long rsv_adjust;
578
579 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
580 if (rsv_adjust) {
581 struct hstate *h = hstate_inode(inode);
582
583 hugetlb_acct_memory(h, 1);
584 }
585}
586
587/*
588 * Count and return the number of huge pages in the reserve map
589 * that intersect with the range [f, t).
590 */
591static long region_count(struct resv_map *resv, long f, long t)
592{
593 struct list_head *head = &resv->regions;
594 struct file_region *rg;
595 long chg = 0;
596
597 spin_lock(&resv->lock);
598 /* Locate each segment we overlap with, and count that overlap. */
599 list_for_each_entry(rg, head, link) {
600 long seg_from;
601 long seg_to;
602
603 if (rg->to <= f)
604 continue;
605 if (rg->from >= t)
606 break;
607
608 seg_from = max(rg->from, f);
609 seg_to = min(rg->to, t);
610
611 chg += seg_to - seg_from;
612 }
613 spin_unlock(&resv->lock);
614
615 return chg;
616}
617
618/*
619 * Convert the address within this vma to the page offset within
620 * the mapping, in pagecache page units; huge pages here.
621 */
622static pgoff_t vma_hugecache_offset(struct hstate *h,
623 struct vm_area_struct *vma, unsigned long address)
624{
625 return ((address - vma->vm_start) >> huge_page_shift(h)) +
626 (vma->vm_pgoff >> huge_page_order(h));
627}
628
629pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
630 unsigned long address)
631{
632 return vma_hugecache_offset(hstate_vma(vma), vma, address);
633}
634EXPORT_SYMBOL_GPL(linear_hugepage_index);
635
636/*
637 * Return the size of the pages allocated when backing a VMA. In the majority
638 * cases this will be same size as used by the page table entries.
639 */
640unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
641{
642 if (vma->vm_ops && vma->vm_ops->pagesize)
643 return vma->vm_ops->pagesize(vma);
644 return PAGE_SIZE;
645}
646EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648/*
649 * Return the page size being used by the MMU to back a VMA. In the majority
650 * of cases, the page size used by the kernel matches the MMU size. On
651 * architectures where it differs, an architecture-specific 'strong'
652 * version of this symbol is required.
653 */
654__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
655{
656 return vma_kernel_pagesize(vma);
657}
658
659/*
660 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
661 * bits of the reservation map pointer, which are always clear due to
662 * alignment.
663 */
664#define HPAGE_RESV_OWNER (1UL << 0)
665#define HPAGE_RESV_UNMAPPED (1UL << 1)
666#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
667
668/*
669 * These helpers are used to track how many pages are reserved for
670 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
671 * is guaranteed to have their future faults succeed.
672 *
673 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
674 * the reserve counters are updated with the hugetlb_lock held. It is safe
675 * to reset the VMA at fork() time as it is not in use yet and there is no
676 * chance of the global counters getting corrupted as a result of the values.
677 *
678 * The private mapping reservation is represented in a subtly different
679 * manner to a shared mapping. A shared mapping has a region map associated
680 * with the underlying file, this region map represents the backing file
681 * pages which have ever had a reservation assigned which this persists even
682 * after the page is instantiated. A private mapping has a region map
683 * associated with the original mmap which is attached to all VMAs which
684 * reference it, this region map represents those offsets which have consumed
685 * reservation ie. where pages have been instantiated.
686 */
687static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688{
689 return (unsigned long)vma->vm_private_data;
690}
691
692static void set_vma_private_data(struct vm_area_struct *vma,
693 unsigned long value)
694{
695 vma->vm_private_data = (void *)value;
696}
697
698struct resv_map *resv_map_alloc(void)
699{
700 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
701 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702
703 if (!resv_map || !rg) {
704 kfree(resv_map);
705 kfree(rg);
706 return NULL;
707 }
708
709 kref_init(&resv_map->refs);
710 spin_lock_init(&resv_map->lock);
711 INIT_LIST_HEAD(&resv_map->regions);
712
713 resv_map->adds_in_progress = 0;
714
715 INIT_LIST_HEAD(&resv_map->region_cache);
716 list_add(&rg->link, &resv_map->region_cache);
717 resv_map->region_cache_count = 1;
718
719 return resv_map;
720}
721
722void resv_map_release(struct kref *ref)
723{
724 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
725 struct list_head *head = &resv_map->region_cache;
726 struct file_region *rg, *trg;
727
728 /* Clear out any active regions before we release the map. */
729 region_del(resv_map, 0, LONG_MAX);
730
731 /* ... and any entries left in the cache */
732 list_for_each_entry_safe(rg, trg, head, link) {
733 list_del(&rg->link);
734 kfree(rg);
735 }
736
737 VM_BUG_ON(resv_map->adds_in_progress);
738
739 kfree(resv_map);
740}
741
742static inline struct resv_map *inode_resv_map(struct inode *inode)
743{
744 /*
745 * At inode evict time, i_mapping may not point to the original
746 * address space within the inode. This original address space
747 * contains the pointer to the resv_map. So, always use the
748 * address space embedded within the inode.
749 * The VERY common case is inode->mapping == &inode->i_data but,
750 * this may not be true for device special inodes.
751 */
752 return (struct resv_map *)(&inode->i_data)->private_data;
753}
754
755static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
756{
757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758 if (vma->vm_flags & VM_MAYSHARE) {
759 struct address_space *mapping = vma->vm_file->f_mapping;
760 struct inode *inode = mapping->host;
761
762 return inode_resv_map(inode);
763
764 } else {
765 return (struct resv_map *)(get_vma_private_data(vma) &
766 ~HPAGE_RESV_MASK);
767 }
768}
769
770static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
771{
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775 set_vma_private_data(vma, (get_vma_private_data(vma) &
776 HPAGE_RESV_MASK) | (unsigned long)map);
777}
778
779static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
780{
781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
783
784 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
785}
786
787static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
788{
789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
790
791 return (get_vma_private_data(vma) & flag) != 0;
792}
793
794/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
795void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
796{
797 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
798 if (!(vma->vm_flags & VM_MAYSHARE))
799 vma->vm_private_data = (void *)0;
800}
801
802/* Returns true if the VMA has associated reserve pages */
803static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
804{
805 if (vma->vm_flags & VM_NORESERVE) {
806 /*
807 * This address is already reserved by other process(chg == 0),
808 * so, we should decrement reserved count. Without decrementing,
809 * reserve count remains after releasing inode, because this
810 * allocated page will go into page cache and is regarded as
811 * coming from reserved pool in releasing step. Currently, we
812 * don't have any other solution to deal with this situation
813 * properly, so add work-around here.
814 */
815 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
816 return true;
817 else
818 return false;
819 }
820
821 /* Shared mappings always use reserves */
822 if (vma->vm_flags & VM_MAYSHARE) {
823 /*
824 * We know VM_NORESERVE is not set. Therefore, there SHOULD
825 * be a region map for all pages. The only situation where
826 * there is no region map is if a hole was punched via
827 * fallocate. In this case, there really are no reverves to
828 * use. This situation is indicated if chg != 0.
829 */
830 if (chg)
831 return false;
832 else
833 return true;
834 }
835
836 /*
837 * Only the process that called mmap() has reserves for
838 * private mappings.
839 */
840 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
841 /*
842 * Like the shared case above, a hole punch or truncate
843 * could have been performed on the private mapping.
844 * Examine the value of chg to determine if reserves
845 * actually exist or were previously consumed.
846 * Very Subtle - The value of chg comes from a previous
847 * call to vma_needs_reserves(). The reserve map for
848 * private mappings has different (opposite) semantics
849 * than that of shared mappings. vma_needs_reserves()
850 * has already taken this difference in semantics into
851 * account. Therefore, the meaning of chg is the same
852 * as in the shared case above. Code could easily be
853 * combined, but keeping it separate draws attention to
854 * subtle differences.
855 */
856 if (chg)
857 return false;
858 else
859 return true;
860 }
861
862 return false;
863}
864
865static void enqueue_huge_page(struct hstate *h, struct page *page)
866{
867 int nid = page_to_nid(page);
868 list_move(&page->lru, &h->hugepage_freelists[nid]);
869 h->free_huge_pages++;
870 h->free_huge_pages_node[nid]++;
871}
872
873static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
874{
875 struct page *page;
876
877 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
878 if (!PageHWPoison(page))
879 break;
880 /*
881 * if 'non-isolated free hugepage' not found on the list,
882 * the allocation fails.
883 */
884 if (&h->hugepage_freelists[nid] == &page->lru)
885 return NULL;
886 list_move(&page->lru, &h->hugepage_activelist);
887 set_page_refcounted(page);
888 h->free_huge_pages--;
889 h->free_huge_pages_node[nid]--;
890 return page;
891}
892
893static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
894 nodemask_t *nmask)
895{
896 unsigned int cpuset_mems_cookie;
897 struct zonelist *zonelist;
898 struct zone *zone;
899 struct zoneref *z;
900 int node = NUMA_NO_NODE;
901
902 zonelist = node_zonelist(nid, gfp_mask);
903
904retry_cpuset:
905 cpuset_mems_cookie = read_mems_allowed_begin();
906 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
907 struct page *page;
908
909 if (!cpuset_zone_allowed(zone, gfp_mask))
910 continue;
911 /*
912 * no need to ask again on the same node. Pool is node rather than
913 * zone aware
914 */
915 if (zone_to_nid(zone) == node)
916 continue;
917 node = zone_to_nid(zone);
918
919 page = dequeue_huge_page_node_exact(h, node);
920 if (page)
921 return page;
922 }
923 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
924 goto retry_cpuset;
925
926 return NULL;
927}
928
929/* Movability of hugepages depends on migration support. */
930static inline gfp_t htlb_alloc_mask(struct hstate *h)
931{
932 if (hugepage_movable_supported(h))
933 return GFP_HIGHUSER_MOVABLE;
934 else
935 return GFP_HIGHUSER;
936}
937
938static struct page *dequeue_huge_page_vma(struct hstate *h,
939 struct vm_area_struct *vma,
940 unsigned long address, int avoid_reserve,
941 long chg)
942{
943 struct page *page;
944 struct mempolicy *mpol;
945 gfp_t gfp_mask;
946 nodemask_t *nodemask;
947 int nid;
948
949 /*
950 * A child process with MAP_PRIVATE mappings created by their parent
951 * have no page reserves. This check ensures that reservations are
952 * not "stolen". The child may still get SIGKILLed
953 */
954 if (!vma_has_reserves(vma, chg) &&
955 h->free_huge_pages - h->resv_huge_pages == 0)
956 goto err;
957
958 /* If reserves cannot be used, ensure enough pages are in the pool */
959 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
960 goto err;
961
962 gfp_mask = htlb_alloc_mask(h);
963 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
964 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
965 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
966 SetPagePrivate(page);
967 h->resv_huge_pages--;
968 }
969
970 mpol_cond_put(mpol);
971 return page;
972
973err:
974 return NULL;
975}
976
977/*
978 * common helper functions for hstate_next_node_to_{alloc|free}.
979 * We may have allocated or freed a huge page based on a different
980 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
981 * be outside of *nodes_allowed. Ensure that we use an allowed
982 * node for alloc or free.
983 */
984static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
985{
986 nid = next_node_in(nid, *nodes_allowed);
987 VM_BUG_ON(nid >= MAX_NUMNODES);
988
989 return nid;
990}
991
992static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
993{
994 if (!node_isset(nid, *nodes_allowed))
995 nid = next_node_allowed(nid, nodes_allowed);
996 return nid;
997}
998
999/*
1000 * returns the previously saved node ["this node"] from which to
1001 * allocate a persistent huge page for the pool and advance the
1002 * next node from which to allocate, handling wrap at end of node
1003 * mask.
1004 */
1005static int hstate_next_node_to_alloc(struct hstate *h,
1006 nodemask_t *nodes_allowed)
1007{
1008 int nid;
1009
1010 VM_BUG_ON(!nodes_allowed);
1011
1012 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1013 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1014
1015 return nid;
1016}
1017
1018/*
1019 * helper for free_pool_huge_page() - return the previously saved
1020 * node ["this node"] from which to free a huge page. Advance the
1021 * next node id whether or not we find a free huge page to free so
1022 * that the next attempt to free addresses the next node.
1023 */
1024static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1025{
1026 int nid;
1027
1028 VM_BUG_ON(!nodes_allowed);
1029
1030 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1031 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1032
1033 return nid;
1034}
1035
1036#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1037 for (nr_nodes = nodes_weight(*mask); \
1038 nr_nodes > 0 && \
1039 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1040 nr_nodes--)
1041
1042#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1043 for (nr_nodes = nodes_weight(*mask); \
1044 nr_nodes > 0 && \
1045 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1046 nr_nodes--)
1047
1048#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1049static void destroy_compound_gigantic_page(struct page *page,
1050 unsigned int order)
1051{
1052 int i;
1053 int nr_pages = 1 << order;
1054 struct page *p = page + 1;
1055
1056 atomic_set(compound_mapcount_ptr(page), 0);
1057 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1058 clear_compound_head(p);
1059 set_page_refcounted(p);
1060 }
1061
1062 set_compound_order(page, 0);
1063 __ClearPageHead(page);
1064}
1065
1066static void free_gigantic_page(struct page *page, unsigned int order)
1067{
1068 free_contig_range(page_to_pfn(page), 1 << order);
1069}
1070
1071#ifdef CONFIG_CONTIG_ALLOC
1072static int __alloc_gigantic_page(unsigned long start_pfn,
1073 unsigned long nr_pages, gfp_t gfp_mask)
1074{
1075 unsigned long end_pfn = start_pfn + nr_pages;
1076 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1077 gfp_mask);
1078}
1079
1080static bool pfn_range_valid_gigantic(struct zone *z,
1081 unsigned long start_pfn, unsigned long nr_pages)
1082{
1083 unsigned long i, end_pfn = start_pfn + nr_pages;
1084 struct page *page;
1085
1086 for (i = start_pfn; i < end_pfn; i++) {
1087 page = pfn_to_online_page(i);
1088 if (!page)
1089 return false;
1090
1091 if (page_zone(page) != z)
1092 return false;
1093
1094 if (PageReserved(page))
1095 return false;
1096
1097 if (page_count(page) > 0)
1098 return false;
1099
1100 if (PageHuge(page))
1101 return false;
1102 }
1103
1104 return true;
1105}
1106
1107static bool zone_spans_last_pfn(const struct zone *zone,
1108 unsigned long start_pfn, unsigned long nr_pages)
1109{
1110 unsigned long last_pfn = start_pfn + nr_pages - 1;
1111 return zone_spans_pfn(zone, last_pfn);
1112}
1113
1114static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1115 int nid, nodemask_t *nodemask)
1116{
1117 unsigned int order = huge_page_order(h);
1118 unsigned long nr_pages = 1 << order;
1119 unsigned long ret, pfn, flags;
1120 struct zonelist *zonelist;
1121 struct zone *zone;
1122 struct zoneref *z;
1123
1124 zonelist = node_zonelist(nid, gfp_mask);
1125 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1126 spin_lock_irqsave(&zone->lock, flags);
1127
1128 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1129 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1130 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1131 /*
1132 * We release the zone lock here because
1133 * alloc_contig_range() will also lock the zone
1134 * at some point. If there's an allocation
1135 * spinning on this lock, it may win the race
1136 * and cause alloc_contig_range() to fail...
1137 */
1138 spin_unlock_irqrestore(&zone->lock, flags);
1139 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1140 if (!ret)
1141 return pfn_to_page(pfn);
1142 spin_lock_irqsave(&zone->lock, flags);
1143 }
1144 pfn += nr_pages;
1145 }
1146
1147 spin_unlock_irqrestore(&zone->lock, flags);
1148 }
1149
1150 return NULL;
1151}
1152
1153static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1154static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1155#else /* !CONFIG_CONTIG_ALLOC */
1156static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1157 int nid, nodemask_t *nodemask)
1158{
1159 return NULL;
1160}
1161#endif /* CONFIG_CONTIG_ALLOC */
1162
1163#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1164static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1165 int nid, nodemask_t *nodemask)
1166{
1167 return NULL;
1168}
1169static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1170static inline void destroy_compound_gigantic_page(struct page *page,
1171 unsigned int order) { }
1172#endif
1173
1174static void update_and_free_page(struct hstate *h, struct page *page)
1175{
1176 int i;
1177
1178 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1179 return;
1180
1181 h->nr_huge_pages--;
1182 h->nr_huge_pages_node[page_to_nid(page)]--;
1183 for (i = 0; i < pages_per_huge_page(h); i++) {
1184 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1185 1 << PG_referenced | 1 << PG_dirty |
1186 1 << PG_active | 1 << PG_private |
1187 1 << PG_writeback);
1188 }
1189 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1190 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1191 set_page_refcounted(page);
1192 if (hstate_is_gigantic(h)) {
1193 destroy_compound_gigantic_page(page, huge_page_order(h));
1194 free_gigantic_page(page, huge_page_order(h));
1195 } else {
1196 __free_pages(page, huge_page_order(h));
1197 }
1198}
1199
1200struct hstate *size_to_hstate(unsigned long size)
1201{
1202 struct hstate *h;
1203
1204 for_each_hstate(h) {
1205 if (huge_page_size(h) == size)
1206 return h;
1207 }
1208 return NULL;
1209}
1210
1211/*
1212 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1213 * to hstate->hugepage_activelist.)
1214 *
1215 * This function can be called for tail pages, but never returns true for them.
1216 */
1217bool page_huge_active(struct page *page)
1218{
1219 VM_BUG_ON_PAGE(!PageHuge(page), page);
1220 return PageHead(page) && PagePrivate(&page[1]);
1221}
1222
1223/* never called for tail page */
1224static void set_page_huge_active(struct page *page)
1225{
1226 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227 SetPagePrivate(&page[1]);
1228}
1229
1230static void clear_page_huge_active(struct page *page)
1231{
1232 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1233 ClearPagePrivate(&page[1]);
1234}
1235
1236/*
1237 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1238 * code
1239 */
1240static inline bool PageHugeTemporary(struct page *page)
1241{
1242 if (!PageHuge(page))
1243 return false;
1244
1245 return (unsigned long)page[2].mapping == -1U;
1246}
1247
1248static inline void SetPageHugeTemporary(struct page *page)
1249{
1250 page[2].mapping = (void *)-1U;
1251}
1252
1253static inline void ClearPageHugeTemporary(struct page *page)
1254{
1255 page[2].mapping = NULL;
1256}
1257
1258void free_huge_page(struct page *page)
1259{
1260 /*
1261 * Can't pass hstate in here because it is called from the
1262 * compound page destructor.
1263 */
1264 struct hstate *h = page_hstate(page);
1265 int nid = page_to_nid(page);
1266 struct hugepage_subpool *spool =
1267 (struct hugepage_subpool *)page_private(page);
1268 bool restore_reserve;
1269
1270 VM_BUG_ON_PAGE(page_count(page), page);
1271 VM_BUG_ON_PAGE(page_mapcount(page), page);
1272
1273 set_page_private(page, 0);
1274 page->mapping = NULL;
1275 restore_reserve = PagePrivate(page);
1276 ClearPagePrivate(page);
1277
1278 /*
1279 * If PagePrivate() was set on page, page allocation consumed a
1280 * reservation. If the page was associated with a subpool, there
1281 * would have been a page reserved in the subpool before allocation
1282 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1283 * reservtion, do not call hugepage_subpool_put_pages() as this will
1284 * remove the reserved page from the subpool.
1285 */
1286 if (!restore_reserve) {
1287 /*
1288 * A return code of zero implies that the subpool will be
1289 * under its minimum size if the reservation is not restored
1290 * after page is free. Therefore, force restore_reserve
1291 * operation.
1292 */
1293 if (hugepage_subpool_put_pages(spool, 1) == 0)
1294 restore_reserve = true;
1295 }
1296
1297 spin_lock(&hugetlb_lock);
1298 clear_page_huge_active(page);
1299 hugetlb_cgroup_uncharge_page(hstate_index(h),
1300 pages_per_huge_page(h), page);
1301 if (restore_reserve)
1302 h->resv_huge_pages++;
1303
1304 if (PageHugeTemporary(page)) {
1305 list_del(&page->lru);
1306 ClearPageHugeTemporary(page);
1307 update_and_free_page(h, page);
1308 } else if (h->surplus_huge_pages_node[nid]) {
1309 /* remove the page from active list */
1310 list_del(&page->lru);
1311 update_and_free_page(h, page);
1312 h->surplus_huge_pages--;
1313 h->surplus_huge_pages_node[nid]--;
1314 } else {
1315 arch_clear_hugepage_flags(page);
1316 enqueue_huge_page(h, page);
1317 }
1318 spin_unlock(&hugetlb_lock);
1319}
1320
1321static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1322{
1323 INIT_LIST_HEAD(&page->lru);
1324 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1325 spin_lock(&hugetlb_lock);
1326 set_hugetlb_cgroup(page, NULL);
1327 h->nr_huge_pages++;
1328 h->nr_huge_pages_node[nid]++;
1329 spin_unlock(&hugetlb_lock);
1330}
1331
1332static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1333{
1334 int i;
1335 int nr_pages = 1 << order;
1336 struct page *p = page + 1;
1337
1338 /* we rely on prep_new_huge_page to set the destructor */
1339 set_compound_order(page, order);
1340 __ClearPageReserved(page);
1341 __SetPageHead(page);
1342 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1343 /*
1344 * For gigantic hugepages allocated through bootmem at
1345 * boot, it's safer to be consistent with the not-gigantic
1346 * hugepages and clear the PG_reserved bit from all tail pages
1347 * too. Otherwse drivers using get_user_pages() to access tail
1348 * pages may get the reference counting wrong if they see
1349 * PG_reserved set on a tail page (despite the head page not
1350 * having PG_reserved set). Enforcing this consistency between
1351 * head and tail pages allows drivers to optimize away a check
1352 * on the head page when they need know if put_page() is needed
1353 * after get_user_pages().
1354 */
1355 __ClearPageReserved(p);
1356 set_page_count(p, 0);
1357 set_compound_head(p, page);
1358 }
1359 atomic_set(compound_mapcount_ptr(page), -1);
1360}
1361
1362/*
1363 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1364 * transparent huge pages. See the PageTransHuge() documentation for more
1365 * details.
1366 */
1367int PageHuge(struct page *page)
1368{
1369 if (!PageCompound(page))
1370 return 0;
1371
1372 page = compound_head(page);
1373 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1374}
1375EXPORT_SYMBOL_GPL(PageHuge);
1376
1377/*
1378 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1379 * normal or transparent huge pages.
1380 */
1381int PageHeadHuge(struct page *page_head)
1382{
1383 if (!PageHead(page_head))
1384 return 0;
1385
1386 return get_compound_page_dtor(page_head) == free_huge_page;
1387}
1388
1389pgoff_t __basepage_index(struct page *page)
1390{
1391 struct page *page_head = compound_head(page);
1392 pgoff_t index = page_index(page_head);
1393 unsigned long compound_idx;
1394
1395 if (!PageHuge(page_head))
1396 return page_index(page);
1397
1398 if (compound_order(page_head) >= MAX_ORDER)
1399 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1400 else
1401 compound_idx = page - page_head;
1402
1403 return (index << compound_order(page_head)) + compound_idx;
1404}
1405
1406static struct page *alloc_buddy_huge_page(struct hstate *h,
1407 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1408 nodemask_t *node_alloc_noretry)
1409{
1410 int order = huge_page_order(h);
1411 struct page *page;
1412 bool alloc_try_hard = true;
1413
1414 /*
1415 * By default we always try hard to allocate the page with
1416 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1417 * a loop (to adjust global huge page counts) and previous allocation
1418 * failed, do not continue to try hard on the same node. Use the
1419 * node_alloc_noretry bitmap to manage this state information.
1420 */
1421 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1422 alloc_try_hard = false;
1423 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1424 if (alloc_try_hard)
1425 gfp_mask |= __GFP_RETRY_MAYFAIL;
1426 if (nid == NUMA_NO_NODE)
1427 nid = numa_mem_id();
1428 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1429 if (page)
1430 __count_vm_event(HTLB_BUDDY_PGALLOC);
1431 else
1432 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1433
1434 /*
1435 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1436 * indicates an overall state change. Clear bit so that we resume
1437 * normal 'try hard' allocations.
1438 */
1439 if (node_alloc_noretry && page && !alloc_try_hard)
1440 node_clear(nid, *node_alloc_noretry);
1441
1442 /*
1443 * If we tried hard to get a page but failed, set bit so that
1444 * subsequent attempts will not try as hard until there is an
1445 * overall state change.
1446 */
1447 if (node_alloc_noretry && !page && alloc_try_hard)
1448 node_set(nid, *node_alloc_noretry);
1449
1450 return page;
1451}
1452
1453/*
1454 * Common helper to allocate a fresh hugetlb page. All specific allocators
1455 * should use this function to get new hugetlb pages
1456 */
1457static struct page *alloc_fresh_huge_page(struct hstate *h,
1458 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1459 nodemask_t *node_alloc_noretry)
1460{
1461 struct page *page;
1462
1463 if (hstate_is_gigantic(h))
1464 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1465 else
1466 page = alloc_buddy_huge_page(h, gfp_mask,
1467 nid, nmask, node_alloc_noretry);
1468 if (!page)
1469 return NULL;
1470
1471 if (hstate_is_gigantic(h))
1472 prep_compound_gigantic_page(page, huge_page_order(h));
1473 prep_new_huge_page(h, page, page_to_nid(page));
1474
1475 return page;
1476}
1477
1478/*
1479 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1480 * manner.
1481 */
1482static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1483 nodemask_t *node_alloc_noretry)
1484{
1485 struct page *page;
1486 int nr_nodes, node;
1487 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1488
1489 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1490 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1491 node_alloc_noretry);
1492 if (page)
1493 break;
1494 }
1495
1496 if (!page)
1497 return 0;
1498
1499 put_page(page); /* free it into the hugepage allocator */
1500
1501 return 1;
1502}
1503
1504/*
1505 * Free huge page from pool from next node to free.
1506 * Attempt to keep persistent huge pages more or less
1507 * balanced over allowed nodes.
1508 * Called with hugetlb_lock locked.
1509 */
1510static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1511 bool acct_surplus)
1512{
1513 int nr_nodes, node;
1514 int ret = 0;
1515
1516 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1517 /*
1518 * If we're returning unused surplus pages, only examine
1519 * nodes with surplus pages.
1520 */
1521 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1522 !list_empty(&h->hugepage_freelists[node])) {
1523 struct page *page =
1524 list_entry(h->hugepage_freelists[node].next,
1525 struct page, lru);
1526 list_del(&page->lru);
1527 h->free_huge_pages--;
1528 h->free_huge_pages_node[node]--;
1529 if (acct_surplus) {
1530 h->surplus_huge_pages--;
1531 h->surplus_huge_pages_node[node]--;
1532 }
1533 update_and_free_page(h, page);
1534 ret = 1;
1535 break;
1536 }
1537 }
1538
1539 return ret;
1540}
1541
1542/*
1543 * Dissolve a given free hugepage into free buddy pages. This function does
1544 * nothing for in-use hugepages and non-hugepages.
1545 * This function returns values like below:
1546 *
1547 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1548 * (allocated or reserved.)
1549 * 0: successfully dissolved free hugepages or the page is not a
1550 * hugepage (considered as already dissolved)
1551 */
1552int dissolve_free_huge_page(struct page *page)
1553{
1554 int rc = -EBUSY;
1555
1556 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1557 if (!PageHuge(page))
1558 return 0;
1559
1560 spin_lock(&hugetlb_lock);
1561 if (!PageHuge(page)) {
1562 rc = 0;
1563 goto out;
1564 }
1565
1566 if (!page_count(page)) {
1567 struct page *head = compound_head(page);
1568 struct hstate *h = page_hstate(head);
1569 int nid = page_to_nid(head);
1570 if (h->free_huge_pages - h->resv_huge_pages == 0)
1571 goto out;
1572 /*
1573 * Move PageHWPoison flag from head page to the raw error page,
1574 * which makes any subpages rather than the error page reusable.
1575 */
1576 if (PageHWPoison(head) && page != head) {
1577 SetPageHWPoison(page);
1578 ClearPageHWPoison(head);
1579 }
1580 list_del(&head->lru);
1581 h->free_huge_pages--;
1582 h->free_huge_pages_node[nid]--;
1583 h->max_huge_pages--;
1584 update_and_free_page(h, head);
1585 rc = 0;
1586 }
1587out:
1588 spin_unlock(&hugetlb_lock);
1589 return rc;
1590}
1591
1592/*
1593 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1594 * make specified memory blocks removable from the system.
1595 * Note that this will dissolve a free gigantic hugepage completely, if any
1596 * part of it lies within the given range.
1597 * Also note that if dissolve_free_huge_page() returns with an error, all
1598 * free hugepages that were dissolved before that error are lost.
1599 */
1600int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1601{
1602 unsigned long pfn;
1603 struct page *page;
1604 int rc = 0;
1605
1606 if (!hugepages_supported())
1607 return rc;
1608
1609 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1610 page = pfn_to_page(pfn);
1611 rc = dissolve_free_huge_page(page);
1612 if (rc)
1613 break;
1614 }
1615
1616 return rc;
1617}
1618
1619/*
1620 * Allocates a fresh surplus page from the page allocator.
1621 */
1622static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1623 int nid, nodemask_t *nmask)
1624{
1625 struct page *page = NULL;
1626
1627 if (hstate_is_gigantic(h))
1628 return NULL;
1629
1630 spin_lock(&hugetlb_lock);
1631 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1632 goto out_unlock;
1633 spin_unlock(&hugetlb_lock);
1634
1635 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1636 if (!page)
1637 return NULL;
1638
1639 spin_lock(&hugetlb_lock);
1640 /*
1641 * We could have raced with the pool size change.
1642 * Double check that and simply deallocate the new page
1643 * if we would end up overcommiting the surpluses. Abuse
1644 * temporary page to workaround the nasty free_huge_page
1645 * codeflow
1646 */
1647 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1648 SetPageHugeTemporary(page);
1649 spin_unlock(&hugetlb_lock);
1650 put_page(page);
1651 return NULL;
1652 } else {
1653 h->surplus_huge_pages++;
1654 h->surplus_huge_pages_node[page_to_nid(page)]++;
1655 }
1656
1657out_unlock:
1658 spin_unlock(&hugetlb_lock);
1659
1660 return page;
1661}
1662
1663struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1664 int nid, nodemask_t *nmask)
1665{
1666 struct page *page;
1667
1668 if (hstate_is_gigantic(h))
1669 return NULL;
1670
1671 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1672 if (!page)
1673 return NULL;
1674
1675 /*
1676 * We do not account these pages as surplus because they are only
1677 * temporary and will be released properly on the last reference
1678 */
1679 SetPageHugeTemporary(page);
1680
1681 return page;
1682}
1683
1684/*
1685 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1686 */
1687static
1688struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1689 struct vm_area_struct *vma, unsigned long addr)
1690{
1691 struct page *page;
1692 struct mempolicy *mpol;
1693 gfp_t gfp_mask = htlb_alloc_mask(h);
1694 int nid;
1695 nodemask_t *nodemask;
1696
1697 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1698 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1699 mpol_cond_put(mpol);
1700
1701 return page;
1702}
1703
1704/* page migration callback function */
1705struct page *alloc_huge_page_node(struct hstate *h, int nid)
1706{
1707 gfp_t gfp_mask = htlb_alloc_mask(h);
1708 struct page *page = NULL;
1709
1710 if (nid != NUMA_NO_NODE)
1711 gfp_mask |= __GFP_THISNODE;
1712
1713 spin_lock(&hugetlb_lock);
1714 if (h->free_huge_pages - h->resv_huge_pages > 0)
1715 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1716 spin_unlock(&hugetlb_lock);
1717
1718 if (!page)
1719 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1720
1721 return page;
1722}
1723
1724/* page migration callback function */
1725struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1726 nodemask_t *nmask)
1727{
1728 gfp_t gfp_mask = htlb_alloc_mask(h);
1729
1730 spin_lock(&hugetlb_lock);
1731 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1732 struct page *page;
1733
1734 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1735 if (page) {
1736 spin_unlock(&hugetlb_lock);
1737 return page;
1738 }
1739 }
1740 spin_unlock(&hugetlb_lock);
1741
1742 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1743}
1744
1745/* mempolicy aware migration callback */
1746struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1747 unsigned long address)
1748{
1749 struct mempolicy *mpol;
1750 nodemask_t *nodemask;
1751 struct page *page;
1752 gfp_t gfp_mask;
1753 int node;
1754
1755 gfp_mask = htlb_alloc_mask(h);
1756 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1757 page = alloc_huge_page_nodemask(h, node, nodemask);
1758 mpol_cond_put(mpol);
1759
1760 return page;
1761}
1762
1763/*
1764 * Increase the hugetlb pool such that it can accommodate a reservation
1765 * of size 'delta'.
1766 */
1767static int gather_surplus_pages(struct hstate *h, int delta)
1768{
1769 struct list_head surplus_list;
1770 struct page *page, *tmp;
1771 int ret, i;
1772 int needed, allocated;
1773 bool alloc_ok = true;
1774
1775 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1776 if (needed <= 0) {
1777 h->resv_huge_pages += delta;
1778 return 0;
1779 }
1780
1781 allocated = 0;
1782 INIT_LIST_HEAD(&surplus_list);
1783
1784 ret = -ENOMEM;
1785retry:
1786 spin_unlock(&hugetlb_lock);
1787 for (i = 0; i < needed; i++) {
1788 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1789 NUMA_NO_NODE, NULL);
1790 if (!page) {
1791 alloc_ok = false;
1792 break;
1793 }
1794 list_add(&page->lru, &surplus_list);
1795 cond_resched();
1796 }
1797 allocated += i;
1798
1799 /*
1800 * After retaking hugetlb_lock, we need to recalculate 'needed'
1801 * because either resv_huge_pages or free_huge_pages may have changed.
1802 */
1803 spin_lock(&hugetlb_lock);
1804 needed = (h->resv_huge_pages + delta) -
1805 (h->free_huge_pages + allocated);
1806 if (needed > 0) {
1807 if (alloc_ok)
1808 goto retry;
1809 /*
1810 * We were not able to allocate enough pages to
1811 * satisfy the entire reservation so we free what
1812 * we've allocated so far.
1813 */
1814 goto free;
1815 }
1816 /*
1817 * The surplus_list now contains _at_least_ the number of extra pages
1818 * needed to accommodate the reservation. Add the appropriate number
1819 * of pages to the hugetlb pool and free the extras back to the buddy
1820 * allocator. Commit the entire reservation here to prevent another
1821 * process from stealing the pages as they are added to the pool but
1822 * before they are reserved.
1823 */
1824 needed += allocated;
1825 h->resv_huge_pages += delta;
1826 ret = 0;
1827
1828 /* Free the needed pages to the hugetlb pool */
1829 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1830 if ((--needed) < 0)
1831 break;
1832 /*
1833 * This page is now managed by the hugetlb allocator and has
1834 * no users -- drop the buddy allocator's reference.
1835 */
1836 put_page_testzero(page);
1837 VM_BUG_ON_PAGE(page_count(page), page);
1838 enqueue_huge_page(h, page);
1839 }
1840free:
1841 spin_unlock(&hugetlb_lock);
1842
1843 /* Free unnecessary surplus pages to the buddy allocator */
1844 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1845 put_page(page);
1846 spin_lock(&hugetlb_lock);
1847
1848 return ret;
1849}
1850
1851/*
1852 * This routine has two main purposes:
1853 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1854 * in unused_resv_pages. This corresponds to the prior adjustments made
1855 * to the associated reservation map.
1856 * 2) Free any unused surplus pages that may have been allocated to satisfy
1857 * the reservation. As many as unused_resv_pages may be freed.
1858 *
1859 * Called with hugetlb_lock held. However, the lock could be dropped (and
1860 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1861 * we must make sure nobody else can claim pages we are in the process of
1862 * freeing. Do this by ensuring resv_huge_page always is greater than the
1863 * number of huge pages we plan to free when dropping the lock.
1864 */
1865static void return_unused_surplus_pages(struct hstate *h,
1866 unsigned long unused_resv_pages)
1867{
1868 unsigned long nr_pages;
1869
1870 /* Cannot return gigantic pages currently */
1871 if (hstate_is_gigantic(h))
1872 goto out;
1873
1874 /*
1875 * Part (or even all) of the reservation could have been backed
1876 * by pre-allocated pages. Only free surplus pages.
1877 */
1878 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1879
1880 /*
1881 * We want to release as many surplus pages as possible, spread
1882 * evenly across all nodes with memory. Iterate across these nodes
1883 * until we can no longer free unreserved surplus pages. This occurs
1884 * when the nodes with surplus pages have no free pages.
1885 * free_pool_huge_page() will balance the the freed pages across the
1886 * on-line nodes with memory and will handle the hstate accounting.
1887 *
1888 * Note that we decrement resv_huge_pages as we free the pages. If
1889 * we drop the lock, resv_huge_pages will still be sufficiently large
1890 * to cover subsequent pages we may free.
1891 */
1892 while (nr_pages--) {
1893 h->resv_huge_pages--;
1894 unused_resv_pages--;
1895 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1896 goto out;
1897 cond_resched_lock(&hugetlb_lock);
1898 }
1899
1900out:
1901 /* Fully uncommit the reservation */
1902 h->resv_huge_pages -= unused_resv_pages;
1903}
1904
1905
1906/*
1907 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1908 * are used by the huge page allocation routines to manage reservations.
1909 *
1910 * vma_needs_reservation is called to determine if the huge page at addr
1911 * within the vma has an associated reservation. If a reservation is
1912 * needed, the value 1 is returned. The caller is then responsible for
1913 * managing the global reservation and subpool usage counts. After
1914 * the huge page has been allocated, vma_commit_reservation is called
1915 * to add the page to the reservation map. If the page allocation fails,
1916 * the reservation must be ended instead of committed. vma_end_reservation
1917 * is called in such cases.
1918 *
1919 * In the normal case, vma_commit_reservation returns the same value
1920 * as the preceding vma_needs_reservation call. The only time this
1921 * is not the case is if a reserve map was changed between calls. It
1922 * is the responsibility of the caller to notice the difference and
1923 * take appropriate action.
1924 *
1925 * vma_add_reservation is used in error paths where a reservation must
1926 * be restored when a newly allocated huge page must be freed. It is
1927 * to be called after calling vma_needs_reservation to determine if a
1928 * reservation exists.
1929 */
1930enum vma_resv_mode {
1931 VMA_NEEDS_RESV,
1932 VMA_COMMIT_RESV,
1933 VMA_END_RESV,
1934 VMA_ADD_RESV,
1935};
1936static long __vma_reservation_common(struct hstate *h,
1937 struct vm_area_struct *vma, unsigned long addr,
1938 enum vma_resv_mode mode)
1939{
1940 struct resv_map *resv;
1941 pgoff_t idx;
1942 long ret;
1943
1944 resv = vma_resv_map(vma);
1945 if (!resv)
1946 return 1;
1947
1948 idx = vma_hugecache_offset(h, vma, addr);
1949 switch (mode) {
1950 case VMA_NEEDS_RESV:
1951 ret = region_chg(resv, idx, idx + 1);
1952 break;
1953 case VMA_COMMIT_RESV:
1954 ret = region_add(resv, idx, idx + 1);
1955 break;
1956 case VMA_END_RESV:
1957 region_abort(resv, idx, idx + 1);
1958 ret = 0;
1959 break;
1960 case VMA_ADD_RESV:
1961 if (vma->vm_flags & VM_MAYSHARE)
1962 ret = region_add(resv, idx, idx + 1);
1963 else {
1964 region_abort(resv, idx, idx + 1);
1965 ret = region_del(resv, idx, idx + 1);
1966 }
1967 break;
1968 default:
1969 BUG();
1970 }
1971
1972 if (vma->vm_flags & VM_MAYSHARE)
1973 return ret;
1974 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1975 /*
1976 * In most cases, reserves always exist for private mappings.
1977 * However, a file associated with mapping could have been
1978 * hole punched or truncated after reserves were consumed.
1979 * As subsequent fault on such a range will not use reserves.
1980 * Subtle - The reserve map for private mappings has the
1981 * opposite meaning than that of shared mappings. If NO
1982 * entry is in the reserve map, it means a reservation exists.
1983 * If an entry exists in the reserve map, it means the
1984 * reservation has already been consumed. As a result, the
1985 * return value of this routine is the opposite of the
1986 * value returned from reserve map manipulation routines above.
1987 */
1988 if (ret)
1989 return 0;
1990 else
1991 return 1;
1992 }
1993 else
1994 return ret < 0 ? ret : 0;
1995}
1996
1997static long vma_needs_reservation(struct hstate *h,
1998 struct vm_area_struct *vma, unsigned long addr)
1999{
2000 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2001}
2002
2003static long vma_commit_reservation(struct hstate *h,
2004 struct vm_area_struct *vma, unsigned long addr)
2005{
2006 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2007}
2008
2009static void vma_end_reservation(struct hstate *h,
2010 struct vm_area_struct *vma, unsigned long addr)
2011{
2012 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2013}
2014
2015static long vma_add_reservation(struct hstate *h,
2016 struct vm_area_struct *vma, unsigned long addr)
2017{
2018 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2019}
2020
2021/*
2022 * This routine is called to restore a reservation on error paths. In the
2023 * specific error paths, a huge page was allocated (via alloc_huge_page)
2024 * and is about to be freed. If a reservation for the page existed,
2025 * alloc_huge_page would have consumed the reservation and set PagePrivate
2026 * in the newly allocated page. When the page is freed via free_huge_page,
2027 * the global reservation count will be incremented if PagePrivate is set.
2028 * However, free_huge_page can not adjust the reserve map. Adjust the
2029 * reserve map here to be consistent with global reserve count adjustments
2030 * to be made by free_huge_page.
2031 */
2032static void restore_reserve_on_error(struct hstate *h,
2033 struct vm_area_struct *vma, unsigned long address,
2034 struct page *page)
2035{
2036 if (unlikely(PagePrivate(page))) {
2037 long rc = vma_needs_reservation(h, vma, address);
2038
2039 if (unlikely(rc < 0)) {
2040 /*
2041 * Rare out of memory condition in reserve map
2042 * manipulation. Clear PagePrivate so that
2043 * global reserve count will not be incremented
2044 * by free_huge_page. This will make it appear
2045 * as though the reservation for this page was
2046 * consumed. This may prevent the task from
2047 * faulting in the page at a later time. This
2048 * is better than inconsistent global huge page
2049 * accounting of reserve counts.
2050 */
2051 ClearPagePrivate(page);
2052 } else if (rc) {
2053 rc = vma_add_reservation(h, vma, address);
2054 if (unlikely(rc < 0))
2055 /*
2056 * See above comment about rare out of
2057 * memory condition.
2058 */
2059 ClearPagePrivate(page);
2060 } else
2061 vma_end_reservation(h, vma, address);
2062 }
2063}
2064
2065struct page *alloc_huge_page(struct vm_area_struct *vma,
2066 unsigned long addr, int avoid_reserve)
2067{
2068 struct hugepage_subpool *spool = subpool_vma(vma);
2069 struct hstate *h = hstate_vma(vma);
2070 struct page *page;
2071 long map_chg, map_commit;
2072 long gbl_chg;
2073 int ret, idx;
2074 struct hugetlb_cgroup *h_cg;
2075
2076 idx = hstate_index(h);
2077 /*
2078 * Examine the region/reserve map to determine if the process
2079 * has a reservation for the page to be allocated. A return
2080 * code of zero indicates a reservation exists (no change).
2081 */
2082 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2083 if (map_chg < 0)
2084 return ERR_PTR(-ENOMEM);
2085
2086 /*
2087 * Processes that did not create the mapping will have no
2088 * reserves as indicated by the region/reserve map. Check
2089 * that the allocation will not exceed the subpool limit.
2090 * Allocations for MAP_NORESERVE mappings also need to be
2091 * checked against any subpool limit.
2092 */
2093 if (map_chg || avoid_reserve) {
2094 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2095 if (gbl_chg < 0) {
2096 vma_end_reservation(h, vma, addr);
2097 return ERR_PTR(-ENOSPC);
2098 }
2099
2100 /*
2101 * Even though there was no reservation in the region/reserve
2102 * map, there could be reservations associated with the
2103 * subpool that can be used. This would be indicated if the
2104 * return value of hugepage_subpool_get_pages() is zero.
2105 * However, if avoid_reserve is specified we still avoid even
2106 * the subpool reservations.
2107 */
2108 if (avoid_reserve)
2109 gbl_chg = 1;
2110 }
2111
2112 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2113 if (ret)
2114 goto out_subpool_put;
2115
2116 spin_lock(&hugetlb_lock);
2117 /*
2118 * glb_chg is passed to indicate whether or not a page must be taken
2119 * from the global free pool (global change). gbl_chg == 0 indicates
2120 * a reservation exists for the allocation.
2121 */
2122 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2123 if (!page) {
2124 spin_unlock(&hugetlb_lock);
2125 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2126 if (!page)
2127 goto out_uncharge_cgroup;
2128 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2129 SetPagePrivate(page);
2130 h->resv_huge_pages--;
2131 }
2132 spin_lock(&hugetlb_lock);
2133 list_move(&page->lru, &h->hugepage_activelist);
2134 /* Fall through */
2135 }
2136 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2137 spin_unlock(&hugetlb_lock);
2138
2139 set_page_private(page, (unsigned long)spool);
2140
2141 map_commit = vma_commit_reservation(h, vma, addr);
2142 if (unlikely(map_chg > map_commit)) {
2143 /*
2144 * The page was added to the reservation map between
2145 * vma_needs_reservation and vma_commit_reservation.
2146 * This indicates a race with hugetlb_reserve_pages.
2147 * Adjust for the subpool count incremented above AND
2148 * in hugetlb_reserve_pages for the same page. Also,
2149 * the reservation count added in hugetlb_reserve_pages
2150 * no longer applies.
2151 */
2152 long rsv_adjust;
2153
2154 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2155 hugetlb_acct_memory(h, -rsv_adjust);
2156 }
2157 return page;
2158
2159out_uncharge_cgroup:
2160 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2161out_subpool_put:
2162 if (map_chg || avoid_reserve)
2163 hugepage_subpool_put_pages(spool, 1);
2164 vma_end_reservation(h, vma, addr);
2165 return ERR_PTR(-ENOSPC);
2166}
2167
2168int alloc_bootmem_huge_page(struct hstate *h)
2169 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2170int __alloc_bootmem_huge_page(struct hstate *h)
2171{
2172 struct huge_bootmem_page *m;
2173 int nr_nodes, node;
2174
2175 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2176 void *addr;
2177
2178 addr = memblock_alloc_try_nid_raw(
2179 huge_page_size(h), huge_page_size(h),
2180 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2181 if (addr) {
2182 /*
2183 * Use the beginning of the huge page to store the
2184 * huge_bootmem_page struct (until gather_bootmem
2185 * puts them into the mem_map).
2186 */
2187 m = addr;
2188 goto found;
2189 }
2190 }
2191 return 0;
2192
2193found:
2194 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2195 /* Put them into a private list first because mem_map is not up yet */
2196 INIT_LIST_HEAD(&m->list);
2197 list_add(&m->list, &huge_boot_pages);
2198 m->hstate = h;
2199 return 1;
2200}
2201
2202static void __init prep_compound_huge_page(struct page *page,
2203 unsigned int order)
2204{
2205 if (unlikely(order > (MAX_ORDER - 1)))
2206 prep_compound_gigantic_page(page, order);
2207 else
2208 prep_compound_page(page, order);
2209}
2210
2211/* Put bootmem huge pages into the standard lists after mem_map is up */
2212static void __init gather_bootmem_prealloc(void)
2213{
2214 struct huge_bootmem_page *m;
2215
2216 list_for_each_entry(m, &huge_boot_pages, list) {
2217 struct page *page = virt_to_page(m);
2218 struct hstate *h = m->hstate;
2219
2220 WARN_ON(page_count(page) != 1);
2221 prep_compound_huge_page(page, h->order);
2222 WARN_ON(PageReserved(page));
2223 prep_new_huge_page(h, page, page_to_nid(page));
2224 put_page(page); /* free it into the hugepage allocator */
2225
2226 /*
2227 * If we had gigantic hugepages allocated at boot time, we need
2228 * to restore the 'stolen' pages to totalram_pages in order to
2229 * fix confusing memory reports from free(1) and another
2230 * side-effects, like CommitLimit going negative.
2231 */
2232 if (hstate_is_gigantic(h))
2233 adjust_managed_page_count(page, 1 << h->order);
2234 cond_resched();
2235 }
2236}
2237
2238static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2239{
2240 unsigned long i;
2241 nodemask_t *node_alloc_noretry;
2242
2243 if (!hstate_is_gigantic(h)) {
2244 /*
2245 * Bit mask controlling how hard we retry per-node allocations.
2246 * Ignore errors as lower level routines can deal with
2247 * node_alloc_noretry == NULL. If this kmalloc fails at boot
2248 * time, we are likely in bigger trouble.
2249 */
2250 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2251 GFP_KERNEL);
2252 } else {
2253 /* allocations done at boot time */
2254 node_alloc_noretry = NULL;
2255 }
2256
2257 /* bit mask controlling how hard we retry per-node allocations */
2258 if (node_alloc_noretry)
2259 nodes_clear(*node_alloc_noretry);
2260
2261 for (i = 0; i < h->max_huge_pages; ++i) {
2262 if (hstate_is_gigantic(h)) {
2263 if (!alloc_bootmem_huge_page(h))
2264 break;
2265 } else if (!alloc_pool_huge_page(h,
2266 &node_states[N_MEMORY],
2267 node_alloc_noretry))
2268 break;
2269 cond_resched();
2270 }
2271 if (i < h->max_huge_pages) {
2272 char buf[32];
2273
2274 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2275 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2276 h->max_huge_pages, buf, i);
2277 h->max_huge_pages = i;
2278 }
2279
2280 kfree(node_alloc_noretry);
2281}
2282
2283static void __init hugetlb_init_hstates(void)
2284{
2285 struct hstate *h;
2286
2287 for_each_hstate(h) {
2288 if (minimum_order > huge_page_order(h))
2289 minimum_order = huge_page_order(h);
2290
2291 /* oversize hugepages were init'ed in early boot */
2292 if (!hstate_is_gigantic(h))
2293 hugetlb_hstate_alloc_pages(h);
2294 }
2295 VM_BUG_ON(minimum_order == UINT_MAX);
2296}
2297
2298static void __init report_hugepages(void)
2299{
2300 struct hstate *h;
2301
2302 for_each_hstate(h) {
2303 char buf[32];
2304
2305 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2306 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2307 buf, h->free_huge_pages);
2308 }
2309}
2310
2311#ifdef CONFIG_HIGHMEM
2312static void try_to_free_low(struct hstate *h, unsigned long count,
2313 nodemask_t *nodes_allowed)
2314{
2315 int i;
2316
2317 if (hstate_is_gigantic(h))
2318 return;
2319
2320 for_each_node_mask(i, *nodes_allowed) {
2321 struct page *page, *next;
2322 struct list_head *freel = &h->hugepage_freelists[i];
2323 list_for_each_entry_safe(page, next, freel, lru) {
2324 if (count >= h->nr_huge_pages)
2325 return;
2326 if (PageHighMem(page))
2327 continue;
2328 list_del(&page->lru);
2329 update_and_free_page(h, page);
2330 h->free_huge_pages--;
2331 h->free_huge_pages_node[page_to_nid(page)]--;
2332 }
2333 }
2334}
2335#else
2336static inline void try_to_free_low(struct hstate *h, unsigned long count,
2337 nodemask_t *nodes_allowed)
2338{
2339}
2340#endif
2341
2342/*
2343 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2344 * balanced by operating on them in a round-robin fashion.
2345 * Returns 1 if an adjustment was made.
2346 */
2347static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2348 int delta)
2349{
2350 int nr_nodes, node;
2351
2352 VM_BUG_ON(delta != -1 && delta != 1);
2353
2354 if (delta < 0) {
2355 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2356 if (h->surplus_huge_pages_node[node])
2357 goto found;
2358 }
2359 } else {
2360 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2361 if (h->surplus_huge_pages_node[node] <
2362 h->nr_huge_pages_node[node])
2363 goto found;
2364 }
2365 }
2366 return 0;
2367
2368found:
2369 h->surplus_huge_pages += delta;
2370 h->surplus_huge_pages_node[node] += delta;
2371 return 1;
2372}
2373
2374#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2375static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2376 nodemask_t *nodes_allowed)
2377{
2378 unsigned long min_count, ret;
2379 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2380
2381 /*
2382 * Bit mask controlling how hard we retry per-node allocations.
2383 * If we can not allocate the bit mask, do not attempt to allocate
2384 * the requested huge pages.
2385 */
2386 if (node_alloc_noretry)
2387 nodes_clear(*node_alloc_noretry);
2388 else
2389 return -ENOMEM;
2390
2391 spin_lock(&hugetlb_lock);
2392
2393 /*
2394 * Check for a node specific request.
2395 * Changing node specific huge page count may require a corresponding
2396 * change to the global count. In any case, the passed node mask
2397 * (nodes_allowed) will restrict alloc/free to the specified node.
2398 */
2399 if (nid != NUMA_NO_NODE) {
2400 unsigned long old_count = count;
2401
2402 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2403 /*
2404 * User may have specified a large count value which caused the
2405 * above calculation to overflow. In this case, they wanted
2406 * to allocate as many huge pages as possible. Set count to
2407 * largest possible value to align with their intention.
2408 */
2409 if (count < old_count)
2410 count = ULONG_MAX;
2411 }
2412
2413 /*
2414 * Gigantic pages runtime allocation depend on the capability for large
2415 * page range allocation.
2416 * If the system does not provide this feature, return an error when
2417 * the user tries to allocate gigantic pages but let the user free the
2418 * boottime allocated gigantic pages.
2419 */
2420 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2421 if (count > persistent_huge_pages(h)) {
2422 spin_unlock(&hugetlb_lock);
2423 NODEMASK_FREE(node_alloc_noretry);
2424 return -EINVAL;
2425 }
2426 /* Fall through to decrease pool */
2427 }
2428
2429 /*
2430 * Increase the pool size
2431 * First take pages out of surplus state. Then make up the
2432 * remaining difference by allocating fresh huge pages.
2433 *
2434 * We might race with alloc_surplus_huge_page() here and be unable
2435 * to convert a surplus huge page to a normal huge page. That is
2436 * not critical, though, it just means the overall size of the
2437 * pool might be one hugepage larger than it needs to be, but
2438 * within all the constraints specified by the sysctls.
2439 */
2440 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2441 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2442 break;
2443 }
2444
2445 while (count > persistent_huge_pages(h)) {
2446 /*
2447 * If this allocation races such that we no longer need the
2448 * page, free_huge_page will handle it by freeing the page
2449 * and reducing the surplus.
2450 */
2451 spin_unlock(&hugetlb_lock);
2452
2453 /* yield cpu to avoid soft lockup */
2454 cond_resched();
2455
2456 ret = alloc_pool_huge_page(h, nodes_allowed,
2457 node_alloc_noretry);
2458 spin_lock(&hugetlb_lock);
2459 if (!ret)
2460 goto out;
2461
2462 /* Bail for signals. Probably ctrl-c from user */
2463 if (signal_pending(current))
2464 goto out;
2465 }
2466
2467 /*
2468 * Decrease the pool size
2469 * First return free pages to the buddy allocator (being careful
2470 * to keep enough around to satisfy reservations). Then place
2471 * pages into surplus state as needed so the pool will shrink
2472 * to the desired size as pages become free.
2473 *
2474 * By placing pages into the surplus state independent of the
2475 * overcommit value, we are allowing the surplus pool size to
2476 * exceed overcommit. There are few sane options here. Since
2477 * alloc_surplus_huge_page() is checking the global counter,
2478 * though, we'll note that we're not allowed to exceed surplus
2479 * and won't grow the pool anywhere else. Not until one of the
2480 * sysctls are changed, or the surplus pages go out of use.
2481 */
2482 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2483 min_count = max(count, min_count);
2484 try_to_free_low(h, min_count, nodes_allowed);
2485 while (min_count < persistent_huge_pages(h)) {
2486 if (!free_pool_huge_page(h, nodes_allowed, 0))
2487 break;
2488 cond_resched_lock(&hugetlb_lock);
2489 }
2490 while (count < persistent_huge_pages(h)) {
2491 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2492 break;
2493 }
2494out:
2495 h->max_huge_pages = persistent_huge_pages(h);
2496 spin_unlock(&hugetlb_lock);
2497
2498 NODEMASK_FREE(node_alloc_noretry);
2499
2500 return 0;
2501}
2502
2503#define HSTATE_ATTR_RO(_name) \
2504 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2505
2506#define HSTATE_ATTR(_name) \
2507 static struct kobj_attribute _name##_attr = \
2508 __ATTR(_name, 0644, _name##_show, _name##_store)
2509
2510static struct kobject *hugepages_kobj;
2511static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2512
2513static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2514
2515static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2516{
2517 int i;
2518
2519 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2520 if (hstate_kobjs[i] == kobj) {
2521 if (nidp)
2522 *nidp = NUMA_NO_NODE;
2523 return &hstates[i];
2524 }
2525
2526 return kobj_to_node_hstate(kobj, nidp);
2527}
2528
2529static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2530 struct kobj_attribute *attr, char *buf)
2531{
2532 struct hstate *h;
2533 unsigned long nr_huge_pages;
2534 int nid;
2535
2536 h = kobj_to_hstate(kobj, &nid);
2537 if (nid == NUMA_NO_NODE)
2538 nr_huge_pages = h->nr_huge_pages;
2539 else
2540 nr_huge_pages = h->nr_huge_pages_node[nid];
2541
2542 return sprintf(buf, "%lu\n", nr_huge_pages);
2543}
2544
2545static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2546 struct hstate *h, int nid,
2547 unsigned long count, size_t len)
2548{
2549 int err;
2550 nodemask_t nodes_allowed, *n_mask;
2551
2552 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2553 return -EINVAL;
2554
2555 if (nid == NUMA_NO_NODE) {
2556 /*
2557 * global hstate attribute
2558 */
2559 if (!(obey_mempolicy &&
2560 init_nodemask_of_mempolicy(&nodes_allowed)))
2561 n_mask = &node_states[N_MEMORY];
2562 else
2563 n_mask = &nodes_allowed;
2564 } else {
2565 /*
2566 * Node specific request. count adjustment happens in
2567 * set_max_huge_pages() after acquiring hugetlb_lock.
2568 */
2569 init_nodemask_of_node(&nodes_allowed, nid);
2570 n_mask = &nodes_allowed;
2571 }
2572
2573 err = set_max_huge_pages(h, count, nid, n_mask);
2574
2575 return err ? err : len;
2576}
2577
2578static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2579 struct kobject *kobj, const char *buf,
2580 size_t len)
2581{
2582 struct hstate *h;
2583 unsigned long count;
2584 int nid;
2585 int err;
2586
2587 err = kstrtoul(buf, 10, &count);
2588 if (err)
2589 return err;
2590
2591 h = kobj_to_hstate(kobj, &nid);
2592 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2593}
2594
2595static ssize_t nr_hugepages_show(struct kobject *kobj,
2596 struct kobj_attribute *attr, char *buf)
2597{
2598 return nr_hugepages_show_common(kobj, attr, buf);
2599}
2600
2601static ssize_t nr_hugepages_store(struct kobject *kobj,
2602 struct kobj_attribute *attr, const char *buf, size_t len)
2603{
2604 return nr_hugepages_store_common(false, kobj, buf, len);
2605}
2606HSTATE_ATTR(nr_hugepages);
2607
2608#ifdef CONFIG_NUMA
2609
2610/*
2611 * hstate attribute for optionally mempolicy-based constraint on persistent
2612 * huge page alloc/free.
2613 */
2614static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2615 struct kobj_attribute *attr, char *buf)
2616{
2617 return nr_hugepages_show_common(kobj, attr, buf);
2618}
2619
2620static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2621 struct kobj_attribute *attr, const char *buf, size_t len)
2622{
2623 return nr_hugepages_store_common(true, kobj, buf, len);
2624}
2625HSTATE_ATTR(nr_hugepages_mempolicy);
2626#endif
2627
2628
2629static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2630 struct kobj_attribute *attr, char *buf)
2631{
2632 struct hstate *h = kobj_to_hstate(kobj, NULL);
2633 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2634}
2635
2636static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2637 struct kobj_attribute *attr, const char *buf, size_t count)
2638{
2639 int err;
2640 unsigned long input;
2641 struct hstate *h = kobj_to_hstate(kobj, NULL);
2642
2643 if (hstate_is_gigantic(h))
2644 return -EINVAL;
2645
2646 err = kstrtoul(buf, 10, &input);
2647 if (err)
2648 return err;
2649
2650 spin_lock(&hugetlb_lock);
2651 h->nr_overcommit_huge_pages = input;
2652 spin_unlock(&hugetlb_lock);
2653
2654 return count;
2655}
2656HSTATE_ATTR(nr_overcommit_hugepages);
2657
2658static ssize_t free_hugepages_show(struct kobject *kobj,
2659 struct kobj_attribute *attr, char *buf)
2660{
2661 struct hstate *h;
2662 unsigned long free_huge_pages;
2663 int nid;
2664
2665 h = kobj_to_hstate(kobj, &nid);
2666 if (nid == NUMA_NO_NODE)
2667 free_huge_pages = h->free_huge_pages;
2668 else
2669 free_huge_pages = h->free_huge_pages_node[nid];
2670
2671 return sprintf(buf, "%lu\n", free_huge_pages);
2672}
2673HSTATE_ATTR_RO(free_hugepages);
2674
2675static ssize_t resv_hugepages_show(struct kobject *kobj,
2676 struct kobj_attribute *attr, char *buf)
2677{
2678 struct hstate *h = kobj_to_hstate(kobj, NULL);
2679 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2680}
2681HSTATE_ATTR_RO(resv_hugepages);
2682
2683static ssize_t surplus_hugepages_show(struct kobject *kobj,
2684 struct kobj_attribute *attr, char *buf)
2685{
2686 struct hstate *h;
2687 unsigned long surplus_huge_pages;
2688 int nid;
2689
2690 h = kobj_to_hstate(kobj, &nid);
2691 if (nid == NUMA_NO_NODE)
2692 surplus_huge_pages = h->surplus_huge_pages;
2693 else
2694 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2695
2696 return sprintf(buf, "%lu\n", surplus_huge_pages);
2697}
2698HSTATE_ATTR_RO(surplus_hugepages);
2699
2700static struct attribute *hstate_attrs[] = {
2701 &nr_hugepages_attr.attr,
2702 &nr_overcommit_hugepages_attr.attr,
2703 &free_hugepages_attr.attr,
2704 &resv_hugepages_attr.attr,
2705 &surplus_hugepages_attr.attr,
2706#ifdef CONFIG_NUMA
2707 &nr_hugepages_mempolicy_attr.attr,
2708#endif
2709 NULL,
2710};
2711
2712static const struct attribute_group hstate_attr_group = {
2713 .attrs = hstate_attrs,
2714};
2715
2716static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2717 struct kobject **hstate_kobjs,
2718 const struct attribute_group *hstate_attr_group)
2719{
2720 int retval;
2721 int hi = hstate_index(h);
2722
2723 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2724 if (!hstate_kobjs[hi])
2725 return -ENOMEM;
2726
2727 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2728 if (retval)
2729 kobject_put(hstate_kobjs[hi]);
2730
2731 return retval;
2732}
2733
2734static void __init hugetlb_sysfs_init(void)
2735{
2736 struct hstate *h;
2737 int err;
2738
2739 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2740 if (!hugepages_kobj)
2741 return;
2742
2743 for_each_hstate(h) {
2744 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2745 hstate_kobjs, &hstate_attr_group);
2746 if (err)
2747 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2748 }
2749}
2750
2751#ifdef CONFIG_NUMA
2752
2753/*
2754 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2755 * with node devices in node_devices[] using a parallel array. The array
2756 * index of a node device or _hstate == node id.
2757 * This is here to avoid any static dependency of the node device driver, in
2758 * the base kernel, on the hugetlb module.
2759 */
2760struct node_hstate {
2761 struct kobject *hugepages_kobj;
2762 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2763};
2764static struct node_hstate node_hstates[MAX_NUMNODES];
2765
2766/*
2767 * A subset of global hstate attributes for node devices
2768 */
2769static struct attribute *per_node_hstate_attrs[] = {
2770 &nr_hugepages_attr.attr,
2771 &free_hugepages_attr.attr,
2772 &surplus_hugepages_attr.attr,
2773 NULL,
2774};
2775
2776static const struct attribute_group per_node_hstate_attr_group = {
2777 .attrs = per_node_hstate_attrs,
2778};
2779
2780/*
2781 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2782 * Returns node id via non-NULL nidp.
2783 */
2784static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2785{
2786 int nid;
2787
2788 for (nid = 0; nid < nr_node_ids; nid++) {
2789 struct node_hstate *nhs = &node_hstates[nid];
2790 int i;
2791 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2792 if (nhs->hstate_kobjs[i] == kobj) {
2793 if (nidp)
2794 *nidp = nid;
2795 return &hstates[i];
2796 }
2797 }
2798
2799 BUG();
2800 return NULL;
2801}
2802
2803/*
2804 * Unregister hstate attributes from a single node device.
2805 * No-op if no hstate attributes attached.
2806 */
2807static void hugetlb_unregister_node(struct node *node)
2808{
2809 struct hstate *h;
2810 struct node_hstate *nhs = &node_hstates[node->dev.id];
2811
2812 if (!nhs->hugepages_kobj)
2813 return; /* no hstate attributes */
2814
2815 for_each_hstate(h) {
2816 int idx = hstate_index(h);
2817 if (nhs->hstate_kobjs[idx]) {
2818 kobject_put(nhs->hstate_kobjs[idx]);
2819 nhs->hstate_kobjs[idx] = NULL;
2820 }
2821 }
2822
2823 kobject_put(nhs->hugepages_kobj);
2824 nhs->hugepages_kobj = NULL;
2825}
2826
2827
2828/*
2829 * Register hstate attributes for a single node device.
2830 * No-op if attributes already registered.
2831 */
2832static void hugetlb_register_node(struct node *node)
2833{
2834 struct hstate *h;
2835 struct node_hstate *nhs = &node_hstates[node->dev.id];
2836 int err;
2837
2838 if (nhs->hugepages_kobj)
2839 return; /* already allocated */
2840
2841 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2842 &node->dev.kobj);
2843 if (!nhs->hugepages_kobj)
2844 return;
2845
2846 for_each_hstate(h) {
2847 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2848 nhs->hstate_kobjs,
2849 &per_node_hstate_attr_group);
2850 if (err) {
2851 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2852 h->name, node->dev.id);
2853 hugetlb_unregister_node(node);
2854 break;
2855 }
2856 }
2857}
2858
2859/*
2860 * hugetlb init time: register hstate attributes for all registered node
2861 * devices of nodes that have memory. All on-line nodes should have
2862 * registered their associated device by this time.
2863 */
2864static void __init hugetlb_register_all_nodes(void)
2865{
2866 int nid;
2867
2868 for_each_node_state(nid, N_MEMORY) {
2869 struct node *node = node_devices[nid];
2870 if (node->dev.id == nid)
2871 hugetlb_register_node(node);
2872 }
2873
2874 /*
2875 * Let the node device driver know we're here so it can
2876 * [un]register hstate attributes on node hotplug.
2877 */
2878 register_hugetlbfs_with_node(hugetlb_register_node,
2879 hugetlb_unregister_node);
2880}
2881#else /* !CONFIG_NUMA */
2882
2883static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2884{
2885 BUG();
2886 if (nidp)
2887 *nidp = -1;
2888 return NULL;
2889}
2890
2891static void hugetlb_register_all_nodes(void) { }
2892
2893#endif
2894
2895static int __init hugetlb_init(void)
2896{
2897 int i;
2898
2899 if (!hugepages_supported())
2900 return 0;
2901
2902 if (!size_to_hstate(default_hstate_size)) {
2903 if (default_hstate_size != 0) {
2904 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2905 default_hstate_size, HPAGE_SIZE);
2906 }
2907
2908 default_hstate_size = HPAGE_SIZE;
2909 if (!size_to_hstate(default_hstate_size))
2910 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2911 }
2912 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2913 if (default_hstate_max_huge_pages) {
2914 if (!default_hstate.max_huge_pages)
2915 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2916 }
2917
2918 hugetlb_init_hstates();
2919 gather_bootmem_prealloc();
2920 report_hugepages();
2921
2922 hugetlb_sysfs_init();
2923 hugetlb_register_all_nodes();
2924 hugetlb_cgroup_file_init();
2925
2926#ifdef CONFIG_SMP
2927 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2928#else
2929 num_fault_mutexes = 1;
2930#endif
2931 hugetlb_fault_mutex_table =
2932 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2933 GFP_KERNEL);
2934 BUG_ON(!hugetlb_fault_mutex_table);
2935
2936 for (i = 0; i < num_fault_mutexes; i++)
2937 mutex_init(&hugetlb_fault_mutex_table[i]);
2938 return 0;
2939}
2940subsys_initcall(hugetlb_init);
2941
2942/* Should be called on processing a hugepagesz=... option */
2943void __init hugetlb_bad_size(void)
2944{
2945 parsed_valid_hugepagesz = false;
2946}
2947
2948void __init hugetlb_add_hstate(unsigned int order)
2949{
2950 struct hstate *h;
2951 unsigned long i;
2952
2953 if (size_to_hstate(PAGE_SIZE << order)) {
2954 pr_warn("hugepagesz= specified twice, ignoring\n");
2955 return;
2956 }
2957 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2958 BUG_ON(order == 0);
2959 h = &hstates[hugetlb_max_hstate++];
2960 h->order = order;
2961 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2962 h->nr_huge_pages = 0;
2963 h->free_huge_pages = 0;
2964 for (i = 0; i < MAX_NUMNODES; ++i)
2965 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2966 INIT_LIST_HEAD(&h->hugepage_activelist);
2967 h->next_nid_to_alloc = first_memory_node;
2968 h->next_nid_to_free = first_memory_node;
2969 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2970 huge_page_size(h)/1024);
2971
2972 parsed_hstate = h;
2973}
2974
2975static int __init hugetlb_nrpages_setup(char *s)
2976{
2977 unsigned long *mhp;
2978 static unsigned long *last_mhp;
2979
2980 if (!parsed_valid_hugepagesz) {
2981 pr_warn("hugepages = %s preceded by "
2982 "an unsupported hugepagesz, ignoring\n", s);
2983 parsed_valid_hugepagesz = true;
2984 return 1;
2985 }
2986 /*
2987 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2988 * so this hugepages= parameter goes to the "default hstate".
2989 */
2990 else if (!hugetlb_max_hstate)
2991 mhp = &default_hstate_max_huge_pages;
2992 else
2993 mhp = &parsed_hstate->max_huge_pages;
2994
2995 if (mhp == last_mhp) {
2996 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2997 return 1;
2998 }
2999
3000 if (sscanf(s, "%lu", mhp) <= 0)
3001 *mhp = 0;
3002
3003 /*
3004 * Global state is always initialized later in hugetlb_init.
3005 * But we need to allocate >= MAX_ORDER hstates here early to still
3006 * use the bootmem allocator.
3007 */
3008 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3009 hugetlb_hstate_alloc_pages(parsed_hstate);
3010
3011 last_mhp = mhp;
3012
3013 return 1;
3014}
3015__setup("hugepages=", hugetlb_nrpages_setup);
3016
3017static int __init hugetlb_default_setup(char *s)
3018{
3019 default_hstate_size = memparse(s, &s);
3020 return 1;
3021}
3022__setup("default_hugepagesz=", hugetlb_default_setup);
3023
3024static unsigned int cpuset_mems_nr(unsigned int *array)
3025{
3026 int node;
3027 unsigned int nr = 0;
3028
3029 for_each_node_mask(node, cpuset_current_mems_allowed)
3030 nr += array[node];
3031
3032 return nr;
3033}
3034
3035#ifdef CONFIG_SYSCTL
3036static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3037 struct ctl_table *table, int write,
3038 void __user *buffer, size_t *length, loff_t *ppos)
3039{
3040 struct hstate *h = &default_hstate;
3041 unsigned long tmp = h->max_huge_pages;
3042 int ret;
3043
3044 if (!hugepages_supported())
3045 return -EOPNOTSUPP;
3046
3047 table->data = &tmp;
3048 table->maxlen = sizeof(unsigned long);
3049 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3050 if (ret)
3051 goto out;
3052
3053 if (write)
3054 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3055 NUMA_NO_NODE, tmp, *length);
3056out:
3057 return ret;
3058}
3059
3060int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3061 void __user *buffer, size_t *length, loff_t *ppos)
3062{
3063
3064 return hugetlb_sysctl_handler_common(false, table, write,
3065 buffer, length, ppos);
3066}
3067
3068#ifdef CONFIG_NUMA
3069int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3070 void __user *buffer, size_t *length, loff_t *ppos)
3071{
3072 return hugetlb_sysctl_handler_common(true, table, write,
3073 buffer, length, ppos);
3074}
3075#endif /* CONFIG_NUMA */
3076
3077int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3078 void __user *buffer,
3079 size_t *length, loff_t *ppos)
3080{
3081 struct hstate *h = &default_hstate;
3082 unsigned long tmp;
3083 int ret;
3084
3085 if (!hugepages_supported())
3086 return -EOPNOTSUPP;
3087
3088 tmp = h->nr_overcommit_huge_pages;
3089
3090 if (write && hstate_is_gigantic(h))
3091 return -EINVAL;
3092
3093 table->data = &tmp;
3094 table->maxlen = sizeof(unsigned long);
3095 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3096 if (ret)
3097 goto out;
3098
3099 if (write) {
3100 spin_lock(&hugetlb_lock);
3101 h->nr_overcommit_huge_pages = tmp;
3102 spin_unlock(&hugetlb_lock);
3103 }
3104out:
3105 return ret;
3106}
3107
3108#endif /* CONFIG_SYSCTL */
3109
3110void hugetlb_report_meminfo(struct seq_file *m)
3111{
3112 struct hstate *h;
3113 unsigned long total = 0;
3114
3115 if (!hugepages_supported())
3116 return;
3117
3118 for_each_hstate(h) {
3119 unsigned long count = h->nr_huge_pages;
3120
3121 total += (PAGE_SIZE << huge_page_order(h)) * count;
3122
3123 if (h == &default_hstate)
3124 seq_printf(m,
3125 "HugePages_Total: %5lu\n"
3126 "HugePages_Free: %5lu\n"
3127 "HugePages_Rsvd: %5lu\n"
3128 "HugePages_Surp: %5lu\n"
3129 "Hugepagesize: %8lu kB\n",
3130 count,
3131 h->free_huge_pages,
3132 h->resv_huge_pages,
3133 h->surplus_huge_pages,
3134 (PAGE_SIZE << huge_page_order(h)) / 1024);
3135 }
3136
3137 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3138}
3139
3140int hugetlb_report_node_meminfo(int nid, char *buf)
3141{
3142 struct hstate *h = &default_hstate;
3143 if (!hugepages_supported())
3144 return 0;
3145 return sprintf(buf,
3146 "Node %d HugePages_Total: %5u\n"
3147 "Node %d HugePages_Free: %5u\n"
3148 "Node %d HugePages_Surp: %5u\n",
3149 nid, h->nr_huge_pages_node[nid],
3150 nid, h->free_huge_pages_node[nid],
3151 nid, h->surplus_huge_pages_node[nid]);
3152}
3153
3154void hugetlb_show_meminfo(void)
3155{
3156 struct hstate *h;
3157 int nid;
3158
3159 if (!hugepages_supported())
3160 return;
3161
3162 for_each_node_state(nid, N_MEMORY)
3163 for_each_hstate(h)
3164 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3165 nid,
3166 h->nr_huge_pages_node[nid],
3167 h->free_huge_pages_node[nid],
3168 h->surplus_huge_pages_node[nid],
3169 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3170}
3171
3172void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3173{
3174 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3175 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3176}
3177
3178/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3179unsigned long hugetlb_total_pages(void)
3180{
3181 struct hstate *h;
3182 unsigned long nr_total_pages = 0;
3183
3184 for_each_hstate(h)
3185 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3186 return nr_total_pages;
3187}
3188
3189static int hugetlb_acct_memory(struct hstate *h, long delta)
3190{
3191 int ret = -ENOMEM;
3192
3193 spin_lock(&hugetlb_lock);
3194 /*
3195 * When cpuset is configured, it breaks the strict hugetlb page
3196 * reservation as the accounting is done on a global variable. Such
3197 * reservation is completely rubbish in the presence of cpuset because
3198 * the reservation is not checked against page availability for the
3199 * current cpuset. Application can still potentially OOM'ed by kernel
3200 * with lack of free htlb page in cpuset that the task is in.
3201 * Attempt to enforce strict accounting with cpuset is almost
3202 * impossible (or too ugly) because cpuset is too fluid that
3203 * task or memory node can be dynamically moved between cpusets.
3204 *
3205 * The change of semantics for shared hugetlb mapping with cpuset is
3206 * undesirable. However, in order to preserve some of the semantics,
3207 * we fall back to check against current free page availability as
3208 * a best attempt and hopefully to minimize the impact of changing
3209 * semantics that cpuset has.
3210 */
3211 if (delta > 0) {
3212 if (gather_surplus_pages(h, delta) < 0)
3213 goto out;
3214
3215 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3216 return_unused_surplus_pages(h, delta);
3217 goto out;
3218 }
3219 }
3220
3221 ret = 0;
3222 if (delta < 0)
3223 return_unused_surplus_pages(h, (unsigned long) -delta);
3224
3225out:
3226 spin_unlock(&hugetlb_lock);
3227 return ret;
3228}
3229
3230static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3231{
3232 struct resv_map *resv = vma_resv_map(vma);
3233
3234 /*
3235 * This new VMA should share its siblings reservation map if present.
3236 * The VMA will only ever have a valid reservation map pointer where
3237 * it is being copied for another still existing VMA. As that VMA
3238 * has a reference to the reservation map it cannot disappear until
3239 * after this open call completes. It is therefore safe to take a
3240 * new reference here without additional locking.
3241 */
3242 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3243 kref_get(&resv->refs);
3244}
3245
3246static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3247{
3248 struct hstate *h = hstate_vma(vma);
3249 struct resv_map *resv = vma_resv_map(vma);
3250 struct hugepage_subpool *spool = subpool_vma(vma);
3251 unsigned long reserve, start, end;
3252 long gbl_reserve;
3253
3254 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3255 return;
3256
3257 start = vma_hugecache_offset(h, vma, vma->vm_start);
3258 end = vma_hugecache_offset(h, vma, vma->vm_end);
3259
3260 reserve = (end - start) - region_count(resv, start, end);
3261
3262 kref_put(&resv->refs, resv_map_release);
3263
3264 if (reserve) {
3265 /*
3266 * Decrement reserve counts. The global reserve count may be
3267 * adjusted if the subpool has a minimum size.
3268 */
3269 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3270 hugetlb_acct_memory(h, -gbl_reserve);
3271 }
3272}
3273
3274static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3275{
3276 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3277 return -EINVAL;
3278 return 0;
3279}
3280
3281static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3282{
3283 struct hstate *hstate = hstate_vma(vma);
3284
3285 return 1UL << huge_page_shift(hstate);
3286}
3287
3288/*
3289 * We cannot handle pagefaults against hugetlb pages at all. They cause
3290 * handle_mm_fault() to try to instantiate regular-sized pages in the
3291 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3292 * this far.
3293 */
3294static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3295{
3296 BUG();
3297 return 0;
3298}
3299
3300/*
3301 * When a new function is introduced to vm_operations_struct and added
3302 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3303 * This is because under System V memory model, mappings created via
3304 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3305 * their original vm_ops are overwritten with shm_vm_ops.
3306 */
3307const struct vm_operations_struct hugetlb_vm_ops = {
3308 .fault = hugetlb_vm_op_fault,
3309 .open = hugetlb_vm_op_open,
3310 .close = hugetlb_vm_op_close,
3311 .split = hugetlb_vm_op_split,
3312 .pagesize = hugetlb_vm_op_pagesize,
3313};
3314
3315static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3316 int writable)
3317{
3318 pte_t entry;
3319
3320 if (writable) {
3321 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3322 vma->vm_page_prot)));
3323 } else {
3324 entry = huge_pte_wrprotect(mk_huge_pte(page,
3325 vma->vm_page_prot));
3326 }
3327 entry = pte_mkyoung(entry);
3328 entry = pte_mkhuge(entry);
3329 entry = arch_make_huge_pte(entry, vma, page, writable);
3330
3331 return entry;
3332}
3333
3334static void set_huge_ptep_writable(struct vm_area_struct *vma,
3335 unsigned long address, pte_t *ptep)
3336{
3337 pte_t entry;
3338
3339 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3340 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3341 update_mmu_cache(vma, address, ptep);
3342}
3343
3344bool is_hugetlb_entry_migration(pte_t pte)
3345{
3346 swp_entry_t swp;
3347
3348 if (huge_pte_none(pte) || pte_present(pte))
3349 return false;
3350 swp = pte_to_swp_entry(pte);
3351 if (non_swap_entry(swp) && is_migration_entry(swp))
3352 return true;
3353 else
3354 return false;
3355}
3356
3357static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3358{
3359 swp_entry_t swp;
3360
3361 if (huge_pte_none(pte) || pte_present(pte))
3362 return 0;
3363 swp = pte_to_swp_entry(pte);
3364 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3365 return 1;
3366 else
3367 return 0;
3368}
3369
3370int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3371 struct vm_area_struct *vma)
3372{
3373 pte_t *src_pte, *dst_pte, entry, dst_entry;
3374 struct page *ptepage;
3375 unsigned long addr;
3376 int cow;
3377 struct hstate *h = hstate_vma(vma);
3378 unsigned long sz = huge_page_size(h);
3379 struct mmu_notifier_range range;
3380 int ret = 0;
3381
3382 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3383
3384 if (cow) {
3385 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3386 vma->vm_start,
3387 vma->vm_end);
3388 mmu_notifier_invalidate_range_start(&range);
3389 }
3390
3391 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3392 spinlock_t *src_ptl, *dst_ptl;
3393 src_pte = huge_pte_offset(src, addr, sz);
3394 if (!src_pte)
3395 continue;
3396 dst_pte = huge_pte_alloc(dst, addr, sz);
3397 if (!dst_pte) {
3398 ret = -ENOMEM;
3399 break;
3400 }
3401
3402 /*
3403 * If the pagetables are shared don't copy or take references.
3404 * dst_pte == src_pte is the common case of src/dest sharing.
3405 *
3406 * However, src could have 'unshared' and dst shares with
3407 * another vma. If dst_pte !none, this implies sharing.
3408 * Check here before taking page table lock, and once again
3409 * after taking the lock below.
3410 */
3411 dst_entry = huge_ptep_get(dst_pte);
3412 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3413 continue;
3414
3415 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3416 src_ptl = huge_pte_lockptr(h, src, src_pte);
3417 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3418 entry = huge_ptep_get(src_pte);
3419 dst_entry = huge_ptep_get(dst_pte);
3420 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3421 /*
3422 * Skip if src entry none. Also, skip in the
3423 * unlikely case dst entry !none as this implies
3424 * sharing with another vma.
3425 */
3426 ;
3427 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3428 is_hugetlb_entry_hwpoisoned(entry))) {
3429 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3430
3431 if (is_write_migration_entry(swp_entry) && cow) {
3432 /*
3433 * COW mappings require pages in both
3434 * parent and child to be set to read.
3435 */
3436 make_migration_entry_read(&swp_entry);
3437 entry = swp_entry_to_pte(swp_entry);
3438 set_huge_swap_pte_at(src, addr, src_pte,
3439 entry, sz);
3440 }
3441 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3442 } else {
3443 if (cow) {
3444 /*
3445 * No need to notify as we are downgrading page
3446 * table protection not changing it to point
3447 * to a new page.
3448 *
3449 * See Documentation/vm/mmu_notifier.rst
3450 */
3451 huge_ptep_set_wrprotect(src, addr, src_pte);
3452 }
3453 entry = huge_ptep_get(src_pte);
3454 ptepage = pte_page(entry);
3455 get_page(ptepage);
3456 page_dup_rmap(ptepage, true);
3457 set_huge_pte_at(dst, addr, dst_pte, entry);
3458 hugetlb_count_add(pages_per_huge_page(h), dst);
3459 }
3460 spin_unlock(src_ptl);
3461 spin_unlock(dst_ptl);
3462 }
3463
3464 if (cow)
3465 mmu_notifier_invalidate_range_end(&range);
3466
3467 return ret;
3468}
3469
3470void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3471 unsigned long start, unsigned long end,
3472 struct page *ref_page)
3473{
3474 struct mm_struct *mm = vma->vm_mm;
3475 unsigned long address;
3476 pte_t *ptep;
3477 pte_t pte;
3478 spinlock_t *ptl;
3479 struct page *page;
3480 struct hstate *h = hstate_vma(vma);
3481 unsigned long sz = huge_page_size(h);
3482 struct mmu_notifier_range range;
3483
3484 WARN_ON(!is_vm_hugetlb_page(vma));
3485 BUG_ON(start & ~huge_page_mask(h));
3486 BUG_ON(end & ~huge_page_mask(h));
3487
3488 /*
3489 * This is a hugetlb vma, all the pte entries should point
3490 * to huge page.
3491 */
3492 tlb_change_page_size(tlb, sz);
3493 tlb_start_vma(tlb, vma);
3494
3495 /*
3496 * If sharing possible, alert mmu notifiers of worst case.
3497 */
3498 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3499 end);
3500 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3501 mmu_notifier_invalidate_range_start(&range);
3502 address = start;
3503 for (; address < end; address += sz) {
3504 ptep = huge_pte_offset(mm, address, sz);
3505 if (!ptep)
3506 continue;
3507
3508 ptl = huge_pte_lock(h, mm, ptep);
3509 if (huge_pmd_unshare(mm, &address, ptep)) {
3510 spin_unlock(ptl);
3511 /*
3512 * We just unmapped a page of PMDs by clearing a PUD.
3513 * The caller's TLB flush range should cover this area.
3514 */
3515 continue;
3516 }
3517
3518 pte = huge_ptep_get(ptep);
3519 if (huge_pte_none(pte)) {
3520 spin_unlock(ptl);
3521 continue;
3522 }
3523
3524 /*
3525 * Migrating hugepage or HWPoisoned hugepage is already
3526 * unmapped and its refcount is dropped, so just clear pte here.
3527 */
3528 if (unlikely(!pte_present(pte))) {
3529 huge_pte_clear(mm, address, ptep, sz);
3530 spin_unlock(ptl);
3531 continue;
3532 }
3533
3534 page = pte_page(pte);
3535 /*
3536 * If a reference page is supplied, it is because a specific
3537 * page is being unmapped, not a range. Ensure the page we
3538 * are about to unmap is the actual page of interest.
3539 */
3540 if (ref_page) {
3541 if (page != ref_page) {
3542 spin_unlock(ptl);
3543 continue;
3544 }
3545 /*
3546 * Mark the VMA as having unmapped its page so that
3547 * future faults in this VMA will fail rather than
3548 * looking like data was lost
3549 */
3550 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3551 }
3552
3553 pte = huge_ptep_get_and_clear(mm, address, ptep);
3554 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3555 if (huge_pte_dirty(pte))
3556 set_page_dirty(page);
3557
3558 hugetlb_count_sub(pages_per_huge_page(h), mm);
3559 page_remove_rmap(page, true);
3560
3561 spin_unlock(ptl);
3562 tlb_remove_page_size(tlb, page, huge_page_size(h));
3563 /*
3564 * Bail out after unmapping reference page if supplied
3565 */
3566 if (ref_page)
3567 break;
3568 }
3569 mmu_notifier_invalidate_range_end(&range);
3570 tlb_end_vma(tlb, vma);
3571}
3572
3573void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3574 struct vm_area_struct *vma, unsigned long start,
3575 unsigned long end, struct page *ref_page)
3576{
3577 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3578
3579 /*
3580 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3581 * test will fail on a vma being torn down, and not grab a page table
3582 * on its way out. We're lucky that the flag has such an appropriate
3583 * name, and can in fact be safely cleared here. We could clear it
3584 * before the __unmap_hugepage_range above, but all that's necessary
3585 * is to clear it before releasing the i_mmap_rwsem. This works
3586 * because in the context this is called, the VMA is about to be
3587 * destroyed and the i_mmap_rwsem is held.
3588 */
3589 vma->vm_flags &= ~VM_MAYSHARE;
3590}
3591
3592void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3593 unsigned long end, struct page *ref_page)
3594{
3595 struct mm_struct *mm;
3596 struct mmu_gather tlb;
3597 unsigned long tlb_start = start;
3598 unsigned long tlb_end = end;
3599
3600 /*
3601 * If shared PMDs were possibly used within this vma range, adjust
3602 * start/end for worst case tlb flushing.
3603 * Note that we can not be sure if PMDs are shared until we try to
3604 * unmap pages. However, we want to make sure TLB flushing covers
3605 * the largest possible range.
3606 */
3607 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3608
3609 mm = vma->vm_mm;
3610
3611 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3612 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3613 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3614}
3615
3616/*
3617 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3618 * mappping it owns the reserve page for. The intention is to unmap the page
3619 * from other VMAs and let the children be SIGKILLed if they are faulting the
3620 * same region.
3621 */
3622static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3623 struct page *page, unsigned long address)
3624{
3625 struct hstate *h = hstate_vma(vma);
3626 struct vm_area_struct *iter_vma;
3627 struct address_space *mapping;
3628 pgoff_t pgoff;
3629
3630 /*
3631 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3632 * from page cache lookup which is in HPAGE_SIZE units.
3633 */
3634 address = address & huge_page_mask(h);
3635 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3636 vma->vm_pgoff;
3637 mapping = vma->vm_file->f_mapping;
3638
3639 /*
3640 * Take the mapping lock for the duration of the table walk. As
3641 * this mapping should be shared between all the VMAs,
3642 * __unmap_hugepage_range() is called as the lock is already held
3643 */
3644 i_mmap_lock_write(mapping);
3645 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3646 /* Do not unmap the current VMA */
3647 if (iter_vma == vma)
3648 continue;
3649
3650 /*
3651 * Shared VMAs have their own reserves and do not affect
3652 * MAP_PRIVATE accounting but it is possible that a shared
3653 * VMA is using the same page so check and skip such VMAs.
3654 */
3655 if (iter_vma->vm_flags & VM_MAYSHARE)
3656 continue;
3657
3658 /*
3659 * Unmap the page from other VMAs without their own reserves.
3660 * They get marked to be SIGKILLed if they fault in these
3661 * areas. This is because a future no-page fault on this VMA
3662 * could insert a zeroed page instead of the data existing
3663 * from the time of fork. This would look like data corruption
3664 */
3665 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3666 unmap_hugepage_range(iter_vma, address,
3667 address + huge_page_size(h), page);
3668 }
3669 i_mmap_unlock_write(mapping);
3670}
3671
3672/*
3673 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3674 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3675 * cannot race with other handlers or page migration.
3676 * Keep the pte_same checks anyway to make transition from the mutex easier.
3677 */
3678static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3679 unsigned long address, pte_t *ptep,
3680 struct page *pagecache_page, spinlock_t *ptl)
3681{
3682 pte_t pte;
3683 struct hstate *h = hstate_vma(vma);
3684 struct page *old_page, *new_page;
3685 int outside_reserve = 0;
3686 vm_fault_t ret = 0;
3687 unsigned long haddr = address & huge_page_mask(h);
3688 struct mmu_notifier_range range;
3689
3690 pte = huge_ptep_get(ptep);
3691 old_page = pte_page(pte);
3692
3693retry_avoidcopy:
3694 /* If no-one else is actually using this page, avoid the copy
3695 * and just make the page writable */
3696 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3697 page_move_anon_rmap(old_page, vma);
3698 set_huge_ptep_writable(vma, haddr, ptep);
3699 return 0;
3700 }
3701
3702 /*
3703 * If the process that created a MAP_PRIVATE mapping is about to
3704 * perform a COW due to a shared page count, attempt to satisfy
3705 * the allocation without using the existing reserves. The pagecache
3706 * page is used to determine if the reserve at this address was
3707 * consumed or not. If reserves were used, a partial faulted mapping
3708 * at the time of fork() could consume its reserves on COW instead
3709 * of the full address range.
3710 */
3711 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3712 old_page != pagecache_page)
3713 outside_reserve = 1;
3714
3715 get_page(old_page);
3716
3717 /*
3718 * Drop page table lock as buddy allocator may be called. It will
3719 * be acquired again before returning to the caller, as expected.
3720 */
3721 spin_unlock(ptl);
3722 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3723
3724 if (IS_ERR(new_page)) {
3725 /*
3726 * If a process owning a MAP_PRIVATE mapping fails to COW,
3727 * it is due to references held by a child and an insufficient
3728 * huge page pool. To guarantee the original mappers
3729 * reliability, unmap the page from child processes. The child
3730 * may get SIGKILLed if it later faults.
3731 */
3732 if (outside_reserve) {
3733 put_page(old_page);
3734 BUG_ON(huge_pte_none(pte));
3735 unmap_ref_private(mm, vma, old_page, haddr);
3736 BUG_ON(huge_pte_none(pte));
3737 spin_lock(ptl);
3738 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3739 if (likely(ptep &&
3740 pte_same(huge_ptep_get(ptep), pte)))
3741 goto retry_avoidcopy;
3742 /*
3743 * race occurs while re-acquiring page table
3744 * lock, and our job is done.
3745 */
3746 return 0;
3747 }
3748
3749 ret = vmf_error(PTR_ERR(new_page));
3750 goto out_release_old;
3751 }
3752
3753 /*
3754 * When the original hugepage is shared one, it does not have
3755 * anon_vma prepared.
3756 */
3757 if (unlikely(anon_vma_prepare(vma))) {
3758 ret = VM_FAULT_OOM;
3759 goto out_release_all;
3760 }
3761
3762 copy_user_huge_page(new_page, old_page, address, vma,
3763 pages_per_huge_page(h));
3764 __SetPageUptodate(new_page);
3765
3766 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3767 haddr + huge_page_size(h));
3768 mmu_notifier_invalidate_range_start(&range);
3769
3770 /*
3771 * Retake the page table lock to check for racing updates
3772 * before the page tables are altered
3773 */
3774 spin_lock(ptl);
3775 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3776 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3777 ClearPagePrivate(new_page);
3778
3779 /* Break COW */
3780 huge_ptep_clear_flush(vma, haddr, ptep);
3781 mmu_notifier_invalidate_range(mm, range.start, range.end);
3782 set_huge_pte_at(mm, haddr, ptep,
3783 make_huge_pte(vma, new_page, 1));
3784 page_remove_rmap(old_page, true);
3785 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3786 set_page_huge_active(new_page);
3787 /* Make the old page be freed below */
3788 new_page = old_page;
3789 }
3790 spin_unlock(ptl);
3791 mmu_notifier_invalidate_range_end(&range);
3792out_release_all:
3793 restore_reserve_on_error(h, vma, haddr, new_page);
3794 put_page(new_page);
3795out_release_old:
3796 put_page(old_page);
3797
3798 spin_lock(ptl); /* Caller expects lock to be held */
3799 return ret;
3800}
3801
3802/* Return the pagecache page at a given address within a VMA */
3803static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3804 struct vm_area_struct *vma, unsigned long address)
3805{
3806 struct address_space *mapping;
3807 pgoff_t idx;
3808
3809 mapping = vma->vm_file->f_mapping;
3810 idx = vma_hugecache_offset(h, vma, address);
3811
3812 return find_lock_page(mapping, idx);
3813}
3814
3815/*
3816 * Return whether there is a pagecache page to back given address within VMA.
3817 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3818 */
3819static bool hugetlbfs_pagecache_present(struct hstate *h,
3820 struct vm_area_struct *vma, unsigned long address)
3821{
3822 struct address_space *mapping;
3823 pgoff_t idx;
3824 struct page *page;
3825
3826 mapping = vma->vm_file->f_mapping;
3827 idx = vma_hugecache_offset(h, vma, address);
3828
3829 page = find_get_page(mapping, idx);
3830 if (page)
3831 put_page(page);
3832 return page != NULL;
3833}
3834
3835int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3836 pgoff_t idx)
3837{
3838 struct inode *inode = mapping->host;
3839 struct hstate *h = hstate_inode(inode);
3840 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3841
3842 if (err)
3843 return err;
3844 ClearPagePrivate(page);
3845
3846 /*
3847 * set page dirty so that it will not be removed from cache/file
3848 * by non-hugetlbfs specific code paths.
3849 */
3850 set_page_dirty(page);
3851
3852 spin_lock(&inode->i_lock);
3853 inode->i_blocks += blocks_per_huge_page(h);
3854 spin_unlock(&inode->i_lock);
3855 return 0;
3856}
3857
3858static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3859 struct vm_area_struct *vma,
3860 struct address_space *mapping, pgoff_t idx,
3861 unsigned long address, pte_t *ptep, unsigned int flags)
3862{
3863 struct hstate *h = hstate_vma(vma);
3864 vm_fault_t ret = VM_FAULT_SIGBUS;
3865 int anon_rmap = 0;
3866 unsigned long size;
3867 struct page *page;
3868 pte_t new_pte;
3869 spinlock_t *ptl;
3870 unsigned long haddr = address & huge_page_mask(h);
3871 bool new_page = false;
3872
3873 /*
3874 * Currently, we are forced to kill the process in the event the
3875 * original mapper has unmapped pages from the child due to a failed
3876 * COW. Warn that such a situation has occurred as it may not be obvious
3877 */
3878 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3879 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3880 current->pid);
3881 return ret;
3882 }
3883
3884 /*
3885 * Use page lock to guard against racing truncation
3886 * before we get page_table_lock.
3887 */
3888retry:
3889 page = find_lock_page(mapping, idx);
3890 if (!page) {
3891 size = i_size_read(mapping->host) >> huge_page_shift(h);
3892 if (idx >= size)
3893 goto out;
3894
3895 /*
3896 * Check for page in userfault range
3897 */
3898 if (userfaultfd_missing(vma)) {
3899 u32 hash;
3900 struct vm_fault vmf = {
3901 .vma = vma,
3902 .address = haddr,
3903 .flags = flags,
3904 /*
3905 * Hard to debug if it ends up being
3906 * used by a callee that assumes
3907 * something about the other
3908 * uninitialized fields... same as in
3909 * memory.c
3910 */
3911 };
3912
3913 /*
3914 * hugetlb_fault_mutex must be dropped before
3915 * handling userfault. Reacquire after handling
3916 * fault to make calling code simpler.
3917 */
3918 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3919 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3920 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3921 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3922 goto out;
3923 }
3924
3925 page = alloc_huge_page(vma, haddr, 0);
3926 if (IS_ERR(page)) {
3927 /*
3928 * Returning error will result in faulting task being
3929 * sent SIGBUS. The hugetlb fault mutex prevents two
3930 * tasks from racing to fault in the same page which
3931 * could result in false unable to allocate errors.
3932 * Page migration does not take the fault mutex, but
3933 * does a clear then write of pte's under page table
3934 * lock. Page fault code could race with migration,
3935 * notice the clear pte and try to allocate a page
3936 * here. Before returning error, get ptl and make
3937 * sure there really is no pte entry.
3938 */
3939 ptl = huge_pte_lock(h, mm, ptep);
3940 if (!huge_pte_none(huge_ptep_get(ptep))) {
3941 ret = 0;
3942 spin_unlock(ptl);
3943 goto out;
3944 }
3945 spin_unlock(ptl);
3946 ret = vmf_error(PTR_ERR(page));
3947 goto out;
3948 }
3949 clear_huge_page(page, address, pages_per_huge_page(h));
3950 __SetPageUptodate(page);
3951 new_page = true;
3952
3953 if (vma->vm_flags & VM_MAYSHARE) {
3954 int err = huge_add_to_page_cache(page, mapping, idx);
3955 if (err) {
3956 put_page(page);
3957 if (err == -EEXIST)
3958 goto retry;
3959 goto out;
3960 }
3961 } else {
3962 lock_page(page);
3963 if (unlikely(anon_vma_prepare(vma))) {
3964 ret = VM_FAULT_OOM;
3965 goto backout_unlocked;
3966 }
3967 anon_rmap = 1;
3968 }
3969 } else {
3970 /*
3971 * If memory error occurs between mmap() and fault, some process
3972 * don't have hwpoisoned swap entry for errored virtual address.
3973 * So we need to block hugepage fault by PG_hwpoison bit check.
3974 */
3975 if (unlikely(PageHWPoison(page))) {
3976 ret = VM_FAULT_HWPOISON |
3977 VM_FAULT_SET_HINDEX(hstate_index(h));
3978 goto backout_unlocked;
3979 }
3980 }
3981
3982 /*
3983 * If we are going to COW a private mapping later, we examine the
3984 * pending reservations for this page now. This will ensure that
3985 * any allocations necessary to record that reservation occur outside
3986 * the spinlock.
3987 */
3988 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3989 if (vma_needs_reservation(h, vma, haddr) < 0) {
3990 ret = VM_FAULT_OOM;
3991 goto backout_unlocked;
3992 }
3993 /* Just decrements count, does not deallocate */
3994 vma_end_reservation(h, vma, haddr);
3995 }
3996
3997 ptl = huge_pte_lock(h, mm, ptep);
3998 size = i_size_read(mapping->host) >> huge_page_shift(h);
3999 if (idx >= size)
4000 goto backout;
4001
4002 ret = 0;
4003 if (!huge_pte_none(huge_ptep_get(ptep)))
4004 goto backout;
4005
4006 if (anon_rmap) {
4007 ClearPagePrivate(page);
4008 hugepage_add_new_anon_rmap(page, vma, haddr);
4009 } else
4010 page_dup_rmap(page, true);
4011 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4012 && (vma->vm_flags & VM_SHARED)));
4013 set_huge_pte_at(mm, haddr, ptep, new_pte);
4014
4015 hugetlb_count_add(pages_per_huge_page(h), mm);
4016 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4017 /* Optimization, do the COW without a second fault */
4018 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4019 }
4020
4021 spin_unlock(ptl);
4022
4023 /*
4024 * Only make newly allocated pages active. Existing pages found
4025 * in the pagecache could be !page_huge_active() if they have been
4026 * isolated for migration.
4027 */
4028 if (new_page)
4029 set_page_huge_active(page);
4030
4031 unlock_page(page);
4032out:
4033 return ret;
4034
4035backout:
4036 spin_unlock(ptl);
4037backout_unlocked:
4038 unlock_page(page);
4039 restore_reserve_on_error(h, vma, haddr, page);
4040 put_page(page);
4041 goto out;
4042}
4043
4044#ifdef CONFIG_SMP
4045u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4046 pgoff_t idx, unsigned long address)
4047{
4048 unsigned long key[2];
4049 u32 hash;
4050
4051 key[0] = (unsigned long) mapping;
4052 key[1] = idx;
4053
4054 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
4055
4056 return hash & (num_fault_mutexes - 1);
4057}
4058#else
4059/*
4060 * For uniprocesor systems we always use a single mutex, so just
4061 * return 0 and avoid the hashing overhead.
4062 */
4063u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4064 pgoff_t idx, unsigned long address)
4065{
4066 return 0;
4067}
4068#endif
4069
4070vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4071 unsigned long address, unsigned int flags)
4072{
4073 pte_t *ptep, entry;
4074 spinlock_t *ptl;
4075 vm_fault_t ret;
4076 u32 hash;
4077 pgoff_t idx;
4078 struct page *page = NULL;
4079 struct page *pagecache_page = NULL;
4080 struct hstate *h = hstate_vma(vma);
4081 struct address_space *mapping;
4082 int need_wait_lock = 0;
4083 unsigned long haddr = address & huge_page_mask(h);
4084
4085 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4086 if (ptep) {
4087 entry = huge_ptep_get(ptep);
4088 if (unlikely(is_hugetlb_entry_migration(entry))) {
4089 migration_entry_wait_huge(vma, mm, ptep);
4090 return 0;
4091 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4092 return VM_FAULT_HWPOISON_LARGE |
4093 VM_FAULT_SET_HINDEX(hstate_index(h));
4094 } else {
4095 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4096 if (!ptep)
4097 return VM_FAULT_OOM;
4098 }
4099
4100 mapping = vma->vm_file->f_mapping;
4101 idx = vma_hugecache_offset(h, vma, haddr);
4102
4103 /*
4104 * Serialize hugepage allocation and instantiation, so that we don't
4105 * get spurious allocation failures if two CPUs race to instantiate
4106 * the same page in the page cache.
4107 */
4108 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4109 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4110
4111 entry = huge_ptep_get(ptep);
4112 if (huge_pte_none(entry)) {
4113 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4114 goto out_mutex;
4115 }
4116
4117 ret = 0;
4118
4119 /*
4120 * entry could be a migration/hwpoison entry at this point, so this
4121 * check prevents the kernel from going below assuming that we have
4122 * a active hugepage in pagecache. This goto expects the 2nd page fault,
4123 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4124 * handle it.
4125 */
4126 if (!pte_present(entry))
4127 goto out_mutex;
4128
4129 /*
4130 * If we are going to COW the mapping later, we examine the pending
4131 * reservations for this page now. This will ensure that any
4132 * allocations necessary to record that reservation occur outside the
4133 * spinlock. For private mappings, we also lookup the pagecache
4134 * page now as it is used to determine if a reservation has been
4135 * consumed.
4136 */
4137 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4138 if (vma_needs_reservation(h, vma, haddr) < 0) {
4139 ret = VM_FAULT_OOM;
4140 goto out_mutex;
4141 }
4142 /* Just decrements count, does not deallocate */
4143 vma_end_reservation(h, vma, haddr);
4144
4145 if (!(vma->vm_flags & VM_MAYSHARE))
4146 pagecache_page = hugetlbfs_pagecache_page(h,
4147 vma, haddr);
4148 }
4149
4150 ptl = huge_pte_lock(h, mm, ptep);
4151
4152 /* Check for a racing update before calling hugetlb_cow */
4153 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4154 goto out_ptl;
4155
4156 /*
4157 * hugetlb_cow() requires page locks of pte_page(entry) and
4158 * pagecache_page, so here we need take the former one
4159 * when page != pagecache_page or !pagecache_page.
4160 */
4161 page = pte_page(entry);
4162 if (page != pagecache_page)
4163 if (!trylock_page(page)) {
4164 need_wait_lock = 1;
4165 goto out_ptl;
4166 }
4167
4168 get_page(page);
4169
4170 if (flags & FAULT_FLAG_WRITE) {
4171 if (!huge_pte_write(entry)) {
4172 ret = hugetlb_cow(mm, vma, address, ptep,
4173 pagecache_page, ptl);
4174 goto out_put_page;
4175 }
4176 entry = huge_pte_mkdirty(entry);
4177 }
4178 entry = pte_mkyoung(entry);
4179 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4180 flags & FAULT_FLAG_WRITE))
4181 update_mmu_cache(vma, haddr, ptep);
4182out_put_page:
4183 if (page != pagecache_page)
4184 unlock_page(page);
4185 put_page(page);
4186out_ptl:
4187 spin_unlock(ptl);
4188
4189 if (pagecache_page) {
4190 unlock_page(pagecache_page);
4191 put_page(pagecache_page);
4192 }
4193out_mutex:
4194 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4195 /*
4196 * Generally it's safe to hold refcount during waiting page lock. But
4197 * here we just wait to defer the next page fault to avoid busy loop and
4198 * the page is not used after unlocked before returning from the current
4199 * page fault. So we are safe from accessing freed page, even if we wait
4200 * here without taking refcount.
4201 */
4202 if (need_wait_lock)
4203 wait_on_page_locked(page);
4204 return ret;
4205}
4206
4207/*
4208 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4209 * modifications for huge pages.
4210 */
4211int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4212 pte_t *dst_pte,
4213 struct vm_area_struct *dst_vma,
4214 unsigned long dst_addr,
4215 unsigned long src_addr,
4216 struct page **pagep)
4217{
4218 struct address_space *mapping;
4219 pgoff_t idx;
4220 unsigned long size;
4221 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4222 struct hstate *h = hstate_vma(dst_vma);
4223 pte_t _dst_pte;
4224 spinlock_t *ptl;
4225 int ret;
4226 struct page *page;
4227
4228 if (!*pagep) {
4229 ret = -ENOMEM;
4230 page = alloc_huge_page(dst_vma, dst_addr, 0);
4231 if (IS_ERR(page))
4232 goto out;
4233
4234 ret = copy_huge_page_from_user(page,
4235 (const void __user *) src_addr,
4236 pages_per_huge_page(h), false);
4237
4238 /* fallback to copy_from_user outside mmap_sem */
4239 if (unlikely(ret)) {
4240 ret = -ENOENT;
4241 *pagep = page;
4242 /* don't free the page */
4243 goto out;
4244 }
4245 } else {
4246 page = *pagep;
4247 *pagep = NULL;
4248 }
4249
4250 /*
4251 * The memory barrier inside __SetPageUptodate makes sure that
4252 * preceding stores to the page contents become visible before
4253 * the set_pte_at() write.
4254 */
4255 __SetPageUptodate(page);
4256
4257 mapping = dst_vma->vm_file->f_mapping;
4258 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4259
4260 /*
4261 * If shared, add to page cache
4262 */
4263 if (vm_shared) {
4264 size = i_size_read(mapping->host) >> huge_page_shift(h);
4265 ret = -EFAULT;
4266 if (idx >= size)
4267 goto out_release_nounlock;
4268
4269 /*
4270 * Serialization between remove_inode_hugepages() and
4271 * huge_add_to_page_cache() below happens through the
4272 * hugetlb_fault_mutex_table that here must be hold by
4273 * the caller.
4274 */
4275 ret = huge_add_to_page_cache(page, mapping, idx);
4276 if (ret)
4277 goto out_release_nounlock;
4278 }
4279
4280 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4281 spin_lock(ptl);
4282
4283 /*
4284 * Recheck the i_size after holding PT lock to make sure not
4285 * to leave any page mapped (as page_mapped()) beyond the end
4286 * of the i_size (remove_inode_hugepages() is strict about
4287 * enforcing that). If we bail out here, we'll also leave a
4288 * page in the radix tree in the vm_shared case beyond the end
4289 * of the i_size, but remove_inode_hugepages() will take care
4290 * of it as soon as we drop the hugetlb_fault_mutex_table.
4291 */
4292 size = i_size_read(mapping->host) >> huge_page_shift(h);
4293 ret = -EFAULT;
4294 if (idx >= size)
4295 goto out_release_unlock;
4296
4297 ret = -EEXIST;
4298 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4299 goto out_release_unlock;
4300
4301 if (vm_shared) {
4302 page_dup_rmap(page, true);
4303 } else {
4304 ClearPagePrivate(page);
4305 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4306 }
4307
4308 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4309 if (dst_vma->vm_flags & VM_WRITE)
4310 _dst_pte = huge_pte_mkdirty(_dst_pte);
4311 _dst_pte = pte_mkyoung(_dst_pte);
4312
4313 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4314
4315 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4316 dst_vma->vm_flags & VM_WRITE);
4317 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4318
4319 /* No need to invalidate - it was non-present before */
4320 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4321
4322 spin_unlock(ptl);
4323 set_page_huge_active(page);
4324 if (vm_shared)
4325 unlock_page(page);
4326 ret = 0;
4327out:
4328 return ret;
4329out_release_unlock:
4330 spin_unlock(ptl);
4331 if (vm_shared)
4332 unlock_page(page);
4333out_release_nounlock:
4334 put_page(page);
4335 goto out;
4336}
4337
4338long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4339 struct page **pages, struct vm_area_struct **vmas,
4340 unsigned long *position, unsigned long *nr_pages,
4341 long i, unsigned int flags, int *nonblocking)
4342{
4343 unsigned long pfn_offset;
4344 unsigned long vaddr = *position;
4345 unsigned long remainder = *nr_pages;
4346 struct hstate *h = hstate_vma(vma);
4347 int err = -EFAULT;
4348
4349 while (vaddr < vma->vm_end && remainder) {
4350 pte_t *pte;
4351 spinlock_t *ptl = NULL;
4352 int absent;
4353 struct page *page;
4354
4355 /*
4356 * If we have a pending SIGKILL, don't keep faulting pages and
4357 * potentially allocating memory.
4358 */
4359 if (fatal_signal_pending(current)) {
4360 remainder = 0;
4361 break;
4362 }
4363
4364 /*
4365 * Some archs (sparc64, sh*) have multiple pte_ts to
4366 * each hugepage. We have to make sure we get the
4367 * first, for the page indexing below to work.
4368 *
4369 * Note that page table lock is not held when pte is null.
4370 */
4371 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4372 huge_page_size(h));
4373 if (pte)
4374 ptl = huge_pte_lock(h, mm, pte);
4375 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4376
4377 /*
4378 * When coredumping, it suits get_dump_page if we just return
4379 * an error where there's an empty slot with no huge pagecache
4380 * to back it. This way, we avoid allocating a hugepage, and
4381 * the sparse dumpfile avoids allocating disk blocks, but its
4382 * huge holes still show up with zeroes where they need to be.
4383 */
4384 if (absent && (flags & FOLL_DUMP) &&
4385 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4386 if (pte)
4387 spin_unlock(ptl);
4388 remainder = 0;
4389 break;
4390 }
4391
4392 /*
4393 * We need call hugetlb_fault for both hugepages under migration
4394 * (in which case hugetlb_fault waits for the migration,) and
4395 * hwpoisoned hugepages (in which case we need to prevent the
4396 * caller from accessing to them.) In order to do this, we use
4397 * here is_swap_pte instead of is_hugetlb_entry_migration and
4398 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4399 * both cases, and because we can't follow correct pages
4400 * directly from any kind of swap entries.
4401 */
4402 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4403 ((flags & FOLL_WRITE) &&
4404 !huge_pte_write(huge_ptep_get(pte)))) {
4405 vm_fault_t ret;
4406 unsigned int fault_flags = 0;
4407
4408 if (pte)
4409 spin_unlock(ptl);
4410 if (flags & FOLL_WRITE)
4411 fault_flags |= FAULT_FLAG_WRITE;
4412 if (nonblocking)
4413 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4414 if (flags & FOLL_NOWAIT)
4415 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4416 FAULT_FLAG_RETRY_NOWAIT;
4417 if (flags & FOLL_TRIED) {
4418 VM_WARN_ON_ONCE(fault_flags &
4419 FAULT_FLAG_ALLOW_RETRY);
4420 fault_flags |= FAULT_FLAG_TRIED;
4421 }
4422 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4423 if (ret & VM_FAULT_ERROR) {
4424 err = vm_fault_to_errno(ret, flags);
4425 remainder = 0;
4426 break;
4427 }
4428 if (ret & VM_FAULT_RETRY) {
4429 if (nonblocking &&
4430 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4431 *nonblocking = 0;
4432 *nr_pages = 0;
4433 /*
4434 * VM_FAULT_RETRY must not return an
4435 * error, it will return zero
4436 * instead.
4437 *
4438 * No need to update "position" as the
4439 * caller will not check it after
4440 * *nr_pages is set to 0.
4441 */
4442 return i;
4443 }
4444 continue;
4445 }
4446
4447 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4448 page = pte_page(huge_ptep_get(pte));
4449
4450 /*
4451 * Instead of doing 'try_get_page()' below in the same_page
4452 * loop, just check the count once here.
4453 */
4454 if (unlikely(page_count(page) <= 0)) {
4455 if (pages) {
4456 spin_unlock(ptl);
4457 remainder = 0;
4458 err = -ENOMEM;
4459 break;
4460 }
4461 }
4462same_page:
4463 if (pages) {
4464 pages[i] = mem_map_offset(page, pfn_offset);
4465 get_page(pages[i]);
4466 }
4467
4468 if (vmas)
4469 vmas[i] = vma;
4470
4471 vaddr += PAGE_SIZE;
4472 ++pfn_offset;
4473 --remainder;
4474 ++i;
4475 if (vaddr < vma->vm_end && remainder &&
4476 pfn_offset < pages_per_huge_page(h)) {
4477 /*
4478 * We use pfn_offset to avoid touching the pageframes
4479 * of this compound page.
4480 */
4481 goto same_page;
4482 }
4483 spin_unlock(ptl);
4484 }
4485 *nr_pages = remainder;
4486 /*
4487 * setting position is actually required only if remainder is
4488 * not zero but it's faster not to add a "if (remainder)"
4489 * branch.
4490 */
4491 *position = vaddr;
4492
4493 return i ? i : err;
4494}
4495
4496#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4497/*
4498 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4499 * implement this.
4500 */
4501#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4502#endif
4503
4504unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4505 unsigned long address, unsigned long end, pgprot_t newprot)
4506{
4507 struct mm_struct *mm = vma->vm_mm;
4508 unsigned long start = address;
4509 pte_t *ptep;
4510 pte_t pte;
4511 struct hstate *h = hstate_vma(vma);
4512 unsigned long pages = 0;
4513 bool shared_pmd = false;
4514 struct mmu_notifier_range range;
4515
4516 /*
4517 * In the case of shared PMDs, the area to flush could be beyond
4518 * start/end. Set range.start/range.end to cover the maximum possible
4519 * range if PMD sharing is possible.
4520 */
4521 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4522 0, vma, mm, start, end);
4523 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4524
4525 BUG_ON(address >= end);
4526 flush_cache_range(vma, range.start, range.end);
4527
4528 mmu_notifier_invalidate_range_start(&range);
4529 i_mmap_lock_write(vma->vm_file->f_mapping);
4530 for (; address < end; address += huge_page_size(h)) {
4531 spinlock_t *ptl;
4532 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4533 if (!ptep)
4534 continue;
4535 ptl = huge_pte_lock(h, mm, ptep);
4536 if (huge_pmd_unshare(mm, &address, ptep)) {
4537 pages++;
4538 spin_unlock(ptl);
4539 shared_pmd = true;
4540 continue;
4541 }
4542 pte = huge_ptep_get(ptep);
4543 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4544 spin_unlock(ptl);
4545 continue;
4546 }
4547 if (unlikely(is_hugetlb_entry_migration(pte))) {
4548 swp_entry_t entry = pte_to_swp_entry(pte);
4549
4550 if (is_write_migration_entry(entry)) {
4551 pte_t newpte;
4552
4553 make_migration_entry_read(&entry);
4554 newpte = swp_entry_to_pte(entry);
4555 set_huge_swap_pte_at(mm, address, ptep,
4556 newpte, huge_page_size(h));
4557 pages++;
4558 }
4559 spin_unlock(ptl);
4560 continue;
4561 }
4562 if (!huge_pte_none(pte)) {
4563 pte_t old_pte;
4564
4565 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4566 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4567 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4568 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4569 pages++;
4570 }
4571 spin_unlock(ptl);
4572 }
4573 /*
4574 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4575 * may have cleared our pud entry and done put_page on the page table:
4576 * once we release i_mmap_rwsem, another task can do the final put_page
4577 * and that page table be reused and filled with junk. If we actually
4578 * did unshare a page of pmds, flush the range corresponding to the pud.
4579 */
4580 if (shared_pmd)
4581 flush_hugetlb_tlb_range(vma, range.start, range.end);
4582 else
4583 flush_hugetlb_tlb_range(vma, start, end);
4584 /*
4585 * No need to call mmu_notifier_invalidate_range() we are downgrading
4586 * page table protection not changing it to point to a new page.
4587 *
4588 * See Documentation/vm/mmu_notifier.rst
4589 */
4590 i_mmap_unlock_write(vma->vm_file->f_mapping);
4591 mmu_notifier_invalidate_range_end(&range);
4592
4593 return pages << h->order;
4594}
4595
4596int hugetlb_reserve_pages(struct inode *inode,
4597 long from, long to,
4598 struct vm_area_struct *vma,
4599 vm_flags_t vm_flags)
4600{
4601 long ret, chg;
4602 struct hstate *h = hstate_inode(inode);
4603 struct hugepage_subpool *spool = subpool_inode(inode);
4604 struct resv_map *resv_map;
4605 long gbl_reserve;
4606
4607 /* This should never happen */
4608 if (from > to) {
4609 VM_WARN(1, "%s called with a negative range\n", __func__);
4610 return -EINVAL;
4611 }
4612
4613 /*
4614 * Only apply hugepage reservation if asked. At fault time, an
4615 * attempt will be made for VM_NORESERVE to allocate a page
4616 * without using reserves
4617 */
4618 if (vm_flags & VM_NORESERVE)
4619 return 0;
4620
4621 /*
4622 * Shared mappings base their reservation on the number of pages that
4623 * are already allocated on behalf of the file. Private mappings need
4624 * to reserve the full area even if read-only as mprotect() may be
4625 * called to make the mapping read-write. Assume !vma is a shm mapping
4626 */
4627 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4628 /*
4629 * resv_map can not be NULL as hugetlb_reserve_pages is only
4630 * called for inodes for which resv_maps were created (see
4631 * hugetlbfs_get_inode).
4632 */
4633 resv_map = inode_resv_map(inode);
4634
4635 chg = region_chg(resv_map, from, to);
4636
4637 } else {
4638 resv_map = resv_map_alloc();
4639 if (!resv_map)
4640 return -ENOMEM;
4641
4642 chg = to - from;
4643
4644 set_vma_resv_map(vma, resv_map);
4645 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4646 }
4647
4648 if (chg < 0) {
4649 ret = chg;
4650 goto out_err;
4651 }
4652
4653 /*
4654 * There must be enough pages in the subpool for the mapping. If
4655 * the subpool has a minimum size, there may be some global
4656 * reservations already in place (gbl_reserve).
4657 */
4658 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4659 if (gbl_reserve < 0) {
4660 ret = -ENOSPC;
4661 goto out_err;
4662 }
4663
4664 /*
4665 * Check enough hugepages are available for the reservation.
4666 * Hand the pages back to the subpool if there are not
4667 */
4668 ret = hugetlb_acct_memory(h, gbl_reserve);
4669 if (ret < 0) {
4670 /* put back original number of pages, chg */
4671 (void)hugepage_subpool_put_pages(spool, chg);
4672 goto out_err;
4673 }
4674
4675 /*
4676 * Account for the reservations made. Shared mappings record regions
4677 * that have reservations as they are shared by multiple VMAs.
4678 * When the last VMA disappears, the region map says how much
4679 * the reservation was and the page cache tells how much of
4680 * the reservation was consumed. Private mappings are per-VMA and
4681 * only the consumed reservations are tracked. When the VMA
4682 * disappears, the original reservation is the VMA size and the
4683 * consumed reservations are stored in the map. Hence, nothing
4684 * else has to be done for private mappings here
4685 */
4686 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4687 long add = region_add(resv_map, from, to);
4688
4689 if (unlikely(chg > add)) {
4690 /*
4691 * pages in this range were added to the reserve
4692 * map between region_chg and region_add. This
4693 * indicates a race with alloc_huge_page. Adjust
4694 * the subpool and reserve counts modified above
4695 * based on the difference.
4696 */
4697 long rsv_adjust;
4698
4699 rsv_adjust = hugepage_subpool_put_pages(spool,
4700 chg - add);
4701 hugetlb_acct_memory(h, -rsv_adjust);
4702 }
4703 }
4704 return 0;
4705out_err:
4706 if (!vma || vma->vm_flags & VM_MAYSHARE)
4707 /* Don't call region_abort if region_chg failed */
4708 if (chg >= 0)
4709 region_abort(resv_map, from, to);
4710 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4711 kref_put(&resv_map->refs, resv_map_release);
4712 return ret;
4713}
4714
4715long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4716 long freed)
4717{
4718 struct hstate *h = hstate_inode(inode);
4719 struct resv_map *resv_map = inode_resv_map(inode);
4720 long chg = 0;
4721 struct hugepage_subpool *spool = subpool_inode(inode);
4722 long gbl_reserve;
4723
4724 /*
4725 * Since this routine can be called in the evict inode path for all
4726 * hugetlbfs inodes, resv_map could be NULL.
4727 */
4728 if (resv_map) {
4729 chg = region_del(resv_map, start, end);
4730 /*
4731 * region_del() can fail in the rare case where a region
4732 * must be split and another region descriptor can not be
4733 * allocated. If end == LONG_MAX, it will not fail.
4734 */
4735 if (chg < 0)
4736 return chg;
4737 }
4738
4739 spin_lock(&inode->i_lock);
4740 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4741 spin_unlock(&inode->i_lock);
4742
4743 /*
4744 * If the subpool has a minimum size, the number of global
4745 * reservations to be released may be adjusted.
4746 */
4747 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4748 hugetlb_acct_memory(h, -gbl_reserve);
4749
4750 return 0;
4751}
4752
4753#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4754static unsigned long page_table_shareable(struct vm_area_struct *svma,
4755 struct vm_area_struct *vma,
4756 unsigned long addr, pgoff_t idx)
4757{
4758 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4759 svma->vm_start;
4760 unsigned long sbase = saddr & PUD_MASK;
4761 unsigned long s_end = sbase + PUD_SIZE;
4762
4763 /* Allow segments to share if only one is marked locked */
4764 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4765 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4766
4767 /*
4768 * match the virtual addresses, permission and the alignment of the
4769 * page table page.
4770 */
4771 if (pmd_index(addr) != pmd_index(saddr) ||
4772 vm_flags != svm_flags ||
4773 sbase < svma->vm_start || svma->vm_end < s_end)
4774 return 0;
4775
4776 return saddr;
4777}
4778
4779static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4780{
4781 unsigned long base = addr & PUD_MASK;
4782 unsigned long end = base + PUD_SIZE;
4783
4784 /*
4785 * check on proper vm_flags and page table alignment
4786 */
4787 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4788 return true;
4789 return false;
4790}
4791
4792/*
4793 * Determine if start,end range within vma could be mapped by shared pmd.
4794 * If yes, adjust start and end to cover range associated with possible
4795 * shared pmd mappings.
4796 */
4797void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4798 unsigned long *start, unsigned long *end)
4799{
4800 unsigned long check_addr = *start;
4801
4802 if (!(vma->vm_flags & VM_MAYSHARE))
4803 return;
4804
4805 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4806 unsigned long a_start = check_addr & PUD_MASK;
4807 unsigned long a_end = a_start + PUD_SIZE;
4808
4809 /*
4810 * If sharing is possible, adjust start/end if necessary.
4811 */
4812 if (range_in_vma(vma, a_start, a_end)) {
4813 if (a_start < *start)
4814 *start = a_start;
4815 if (a_end > *end)
4816 *end = a_end;
4817 }
4818 }
4819}
4820
4821/*
4822 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4823 * and returns the corresponding pte. While this is not necessary for the
4824 * !shared pmd case because we can allocate the pmd later as well, it makes the
4825 * code much cleaner. pmd allocation is essential for the shared case because
4826 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4827 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4828 * bad pmd for sharing.
4829 */
4830pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4831{
4832 struct vm_area_struct *vma = find_vma(mm, addr);
4833 struct address_space *mapping = vma->vm_file->f_mapping;
4834 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4835 vma->vm_pgoff;
4836 struct vm_area_struct *svma;
4837 unsigned long saddr;
4838 pte_t *spte = NULL;
4839 pte_t *pte;
4840 spinlock_t *ptl;
4841
4842 if (!vma_shareable(vma, addr))
4843 return (pte_t *)pmd_alloc(mm, pud, addr);
4844
4845 i_mmap_lock_write(mapping);
4846 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4847 if (svma == vma)
4848 continue;
4849
4850 saddr = page_table_shareable(svma, vma, addr, idx);
4851 if (saddr) {
4852 spte = huge_pte_offset(svma->vm_mm, saddr,
4853 vma_mmu_pagesize(svma));
4854 if (spte) {
4855 get_page(virt_to_page(spte));
4856 break;
4857 }
4858 }
4859 }
4860
4861 if (!spte)
4862 goto out;
4863
4864 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4865 if (pud_none(*pud)) {
4866 pud_populate(mm, pud,
4867 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4868 mm_inc_nr_pmds(mm);
4869 } else {
4870 put_page(virt_to_page(spte));
4871 }
4872 spin_unlock(ptl);
4873out:
4874 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4875 i_mmap_unlock_write(mapping);
4876 return pte;
4877}
4878
4879/*
4880 * unmap huge page backed by shared pte.
4881 *
4882 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4883 * indicated by page_count > 1, unmap is achieved by clearing pud and
4884 * decrementing the ref count. If count == 1, the pte page is not shared.
4885 *
4886 * called with page table lock held.
4887 *
4888 * returns: 1 successfully unmapped a shared pte page
4889 * 0 the underlying pte page is not shared, or it is the last user
4890 */
4891int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4892{
4893 pgd_t *pgd = pgd_offset(mm, *addr);
4894 p4d_t *p4d = p4d_offset(pgd, *addr);
4895 pud_t *pud = pud_offset(p4d, *addr);
4896
4897 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4898 if (page_count(virt_to_page(ptep)) == 1)
4899 return 0;
4900
4901 pud_clear(pud);
4902 put_page(virt_to_page(ptep));
4903 mm_dec_nr_pmds(mm);
4904 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4905 return 1;
4906}
4907#define want_pmd_share() (1)
4908#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4909pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4910{
4911 return NULL;
4912}
4913
4914int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4915{
4916 return 0;
4917}
4918
4919void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4920 unsigned long *start, unsigned long *end)
4921{
4922}
4923#define want_pmd_share() (0)
4924#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4925
4926#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4927pte_t *huge_pte_alloc(struct mm_struct *mm,
4928 unsigned long addr, unsigned long sz)
4929{
4930 pgd_t *pgd;
4931 p4d_t *p4d;
4932 pud_t *pud;
4933 pte_t *pte = NULL;
4934
4935 pgd = pgd_offset(mm, addr);
4936 p4d = p4d_alloc(mm, pgd, addr);
4937 if (!p4d)
4938 return NULL;
4939 pud = pud_alloc(mm, p4d, addr);
4940 if (pud) {
4941 if (sz == PUD_SIZE) {
4942 pte = (pte_t *)pud;
4943 } else {
4944 BUG_ON(sz != PMD_SIZE);
4945 if (want_pmd_share() && pud_none(*pud))
4946 pte = huge_pmd_share(mm, addr, pud);
4947 else
4948 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4949 }
4950 }
4951 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4952
4953 return pte;
4954}
4955
4956/*
4957 * huge_pte_offset() - Walk the page table to resolve the hugepage
4958 * entry at address @addr
4959 *
4960 * Return: Pointer to page table or swap entry (PUD or PMD) for
4961 * address @addr, or NULL if a p*d_none() entry is encountered and the
4962 * size @sz doesn't match the hugepage size at this level of the page
4963 * table.
4964 */
4965pte_t *huge_pte_offset(struct mm_struct *mm,
4966 unsigned long addr, unsigned long sz)
4967{
4968 pgd_t *pgd;
4969 p4d_t *p4d;
4970 pud_t *pud;
4971 pmd_t *pmd;
4972
4973 pgd = pgd_offset(mm, addr);
4974 if (!pgd_present(*pgd))
4975 return NULL;
4976 p4d = p4d_offset(pgd, addr);
4977 if (!p4d_present(*p4d))
4978 return NULL;
4979
4980 pud = pud_offset(p4d, addr);
4981 if (sz != PUD_SIZE && pud_none(*pud))
4982 return NULL;
4983 /* hugepage or swap? */
4984 if (pud_huge(*pud) || !pud_present(*pud))
4985 return (pte_t *)pud;
4986
4987 pmd = pmd_offset(pud, addr);
4988 if (sz != PMD_SIZE && pmd_none(*pmd))
4989 return NULL;
4990 /* hugepage or swap? */
4991 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4992 return (pte_t *)pmd;
4993
4994 return NULL;
4995}
4996
4997#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4998
4999/*
5000 * These functions are overwritable if your architecture needs its own
5001 * behavior.
5002 */
5003struct page * __weak
5004follow_huge_addr(struct mm_struct *mm, unsigned long address,
5005 int write)
5006{
5007 return ERR_PTR(-EINVAL);
5008}
5009
5010struct page * __weak
5011follow_huge_pd(struct vm_area_struct *vma,
5012 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5013{
5014 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5015 return NULL;
5016}
5017
5018struct page * __weak
5019follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5020 pmd_t *pmd, int flags)
5021{
5022 struct page *page = NULL;
5023 spinlock_t *ptl;
5024 pte_t pte;
5025retry:
5026 ptl = pmd_lockptr(mm, pmd);
5027 spin_lock(ptl);
5028 /*
5029 * make sure that the address range covered by this pmd is not
5030 * unmapped from other threads.
5031 */
5032 if (!pmd_huge(*pmd))
5033 goto out;
5034 pte = huge_ptep_get((pte_t *)pmd);
5035 if (pte_present(pte)) {
5036 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5037 if (flags & FOLL_GET)
5038 get_page(page);
5039 } else {
5040 if (is_hugetlb_entry_migration(pte)) {
5041 spin_unlock(ptl);
5042 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5043 goto retry;
5044 }
5045 /*
5046 * hwpoisoned entry is treated as no_page_table in
5047 * follow_page_mask().
5048 */
5049 }
5050out:
5051 spin_unlock(ptl);
5052 return page;
5053}
5054
5055struct page * __weak
5056follow_huge_pud(struct mm_struct *mm, unsigned long address,
5057 pud_t *pud, int flags)
5058{
5059 if (flags & FOLL_GET)
5060 return NULL;
5061
5062 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5063}
5064
5065struct page * __weak
5066follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5067{
5068 if (flags & FOLL_GET)
5069 return NULL;
5070
5071 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5072}
5073
5074bool isolate_huge_page(struct page *page, struct list_head *list)
5075{
5076 bool ret = true;
5077
5078 VM_BUG_ON_PAGE(!PageHead(page), page);
5079 spin_lock(&hugetlb_lock);
5080 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
5081 ret = false;
5082 goto unlock;
5083 }
5084 clear_page_huge_active(page);
5085 list_move_tail(&page->lru, list);
5086unlock:
5087 spin_unlock(&hugetlb_lock);
5088 return ret;
5089}
5090
5091void putback_active_hugepage(struct page *page)
5092{
5093 VM_BUG_ON_PAGE(!PageHead(page), page);
5094 spin_lock(&hugetlb_lock);
5095 set_page_huge_active(page);
5096 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5097 spin_unlock(&hugetlb_lock);
5098 put_page(page);
5099}
5100
5101void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5102{
5103 struct hstate *h = page_hstate(oldpage);
5104
5105 hugetlb_cgroup_migrate(oldpage, newpage);
5106 set_page_owner_migrate_reason(newpage, reason);
5107
5108 /*
5109 * transfer temporary state of the new huge page. This is
5110 * reverse to other transitions because the newpage is going to
5111 * be final while the old one will be freed so it takes over
5112 * the temporary status.
5113 *
5114 * Also note that we have to transfer the per-node surplus state
5115 * here as well otherwise the global surplus count will not match
5116 * the per-node's.
5117 */
5118 if (PageHugeTemporary(newpage)) {
5119 int old_nid = page_to_nid(oldpage);
5120 int new_nid = page_to_nid(newpage);
5121
5122 SetPageHugeTemporary(oldpage);
5123 ClearPageHugeTemporary(newpage);
5124
5125 spin_lock(&hugetlb_lock);
5126 if (h->surplus_huge_pages_node[old_nid]) {
5127 h->surplus_huge_pages_node[old_nid]--;
5128 h->surplus_huge_pages_node[new_nid]++;
5129 }
5130 spin_unlock(&hugetlb_lock);
5131 }
5132}
1/*
2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
4 */
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/seq_file.h>
9#include <linux/sysctl.h>
10#include <linux/highmem.h>
11#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/compiler.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/page-isolation.h>
25#include <linux/jhash.h>
26
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30
31#include <linux/io.h>
32#include <linux/hugetlb.h>
33#include <linux/hugetlb_cgroup.h>
34#include <linux/node.h>
35#include "internal.h"
36
37int hugepages_treat_as_movable;
38
39int hugetlb_max_hstate __read_mostly;
40unsigned int default_hstate_idx;
41struct hstate hstates[HUGE_MAX_HSTATE];
42/*
43 * Minimum page order among possible hugepage sizes, set to a proper value
44 * at boot time.
45 */
46static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48__initdata LIST_HEAD(huge_boot_pages);
49
50/* for command line parsing */
51static struct hstate * __initdata parsed_hstate;
52static unsigned long __initdata default_hstate_max_huge_pages;
53static unsigned long __initdata default_hstate_size;
54static bool __initdata parsed_valid_hugepagesz = true;
55
56/*
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
59 */
60DEFINE_SPINLOCK(hugetlb_lock);
61
62/*
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
65 */
66static int num_fault_mutexes;
67struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69/* Forward declaration */
70static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73{
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78 /* If no pages are used, and no other handles to the subpool
79 * remain, give up any reservations mased on minimum size and
80 * free the subpool */
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
85 kfree(spool);
86 }
87}
88
89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
91{
92 struct hugepage_subpool *spool;
93
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
109
110 return spool;
111}
112
113void hugepage_put_subpool(struct hugepage_subpool *spool)
114{
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119}
120
121/*
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
128 */
129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 long delta)
131{
132 long ret = delta;
133
134 if (!spool)
135 return ret;
136
137 spin_lock(&spool->lock);
138
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
146 }
147
148 /* minimum size accounting */
149 if (spool->min_hpages != -1 && spool->rsv_hpages) {
150 if (delta > spool->rsv_hpages) {
151 /*
152 * Asking for more reserves than those already taken on
153 * behalf of subpool. Return difference.
154 */
155 ret = delta - spool->rsv_hpages;
156 spool->rsv_hpages = 0;
157 } else {
158 ret = 0; /* reserves already accounted for */
159 spool->rsv_hpages -= delta;
160 }
161 }
162
163unlock_ret:
164 spin_unlock(&spool->lock);
165 return ret;
166}
167
168/*
169 * Subpool accounting for freeing and unreserving pages.
170 * Return the number of global page reservations that must be dropped.
171 * The return value may only be different than the passed value (delta)
172 * in the case where a subpool minimum size must be maintained.
173 */
174static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175 long delta)
176{
177 long ret = delta;
178
179 if (!spool)
180 return delta;
181
182 spin_lock(&spool->lock);
183
184 if (spool->max_hpages != -1) /* maximum size accounting */
185 spool->used_hpages -= delta;
186
187 /* minimum size accounting */
188 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189 if (spool->rsv_hpages + delta <= spool->min_hpages)
190 ret = 0;
191 else
192 ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194 spool->rsv_hpages += delta;
195 if (spool->rsv_hpages > spool->min_hpages)
196 spool->rsv_hpages = spool->min_hpages;
197 }
198
199 /*
200 * If hugetlbfs_put_super couldn't free spool due to an outstanding
201 * quota reference, free it now.
202 */
203 unlock_or_release_subpool(spool);
204
205 return ret;
206}
207
208static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209{
210 return HUGETLBFS_SB(inode->i_sb)->spool;
211}
212
213static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214{
215 return subpool_inode(file_inode(vma->vm_file));
216}
217
218/*
219 * Region tracking -- allows tracking of reservations and instantiated pages
220 * across the pages in a mapping.
221 *
222 * The region data structures are embedded into a resv_map and protected
223 * by a resv_map's lock. The set of regions within the resv_map represent
224 * reservations for huge pages, or huge pages that have already been
225 * instantiated within the map. The from and to elements are huge page
226 * indicies into the associated mapping. from indicates the starting index
227 * of the region. to represents the first index past the end of the region.
228 *
229 * For example, a file region structure with from == 0 and to == 4 represents
230 * four huge pages in a mapping. It is important to note that the to element
231 * represents the first element past the end of the region. This is used in
232 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233 *
234 * Interval notation of the form [from, to) will be used to indicate that
235 * the endpoint from is inclusive and to is exclusive.
236 */
237struct file_region {
238 struct list_head link;
239 long from;
240 long to;
241};
242
243/*
244 * Add the huge page range represented by [f, t) to the reserve
245 * map. In the normal case, existing regions will be expanded
246 * to accommodate the specified range. Sufficient regions should
247 * exist for expansion due to the previous call to region_chg
248 * with the same range. However, it is possible that region_del
249 * could have been called after region_chg and modifed the map
250 * in such a way that no region exists to be expanded. In this
251 * case, pull a region descriptor from the cache associated with
252 * the map and use that for the new range.
253 *
254 * Return the number of new huge pages added to the map. This
255 * number is greater than or equal to zero.
256 */
257static long region_add(struct resv_map *resv, long f, long t)
258{
259 struct list_head *head = &resv->regions;
260 struct file_region *rg, *nrg, *trg;
261 long add = 0;
262
263 spin_lock(&resv->lock);
264 /* Locate the region we are either in or before. */
265 list_for_each_entry(rg, head, link)
266 if (f <= rg->to)
267 break;
268
269 /*
270 * If no region exists which can be expanded to include the
271 * specified range, the list must have been modified by an
272 * interleving call to region_del(). Pull a region descriptor
273 * from the cache and use it for this range.
274 */
275 if (&rg->link == head || t < rg->from) {
276 VM_BUG_ON(resv->region_cache_count <= 0);
277
278 resv->region_cache_count--;
279 nrg = list_first_entry(&resv->region_cache, struct file_region,
280 link);
281 list_del(&nrg->link);
282
283 nrg->from = f;
284 nrg->to = t;
285 list_add(&nrg->link, rg->link.prev);
286
287 add += t - f;
288 goto out_locked;
289 }
290
291 /* Round our left edge to the current segment if it encloses us. */
292 if (f > rg->from)
293 f = rg->from;
294
295 /* Check for and consume any regions we now overlap with. */
296 nrg = rg;
297 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298 if (&rg->link == head)
299 break;
300 if (rg->from > t)
301 break;
302
303 /* If this area reaches higher then extend our area to
304 * include it completely. If this is not the first area
305 * which we intend to reuse, free it. */
306 if (rg->to > t)
307 t = rg->to;
308 if (rg != nrg) {
309 /* Decrement return value by the deleted range.
310 * Another range will span this area so that by
311 * end of routine add will be >= zero
312 */
313 add -= (rg->to - rg->from);
314 list_del(&rg->link);
315 kfree(rg);
316 }
317 }
318
319 add += (nrg->from - f); /* Added to beginning of region */
320 nrg->from = f;
321 add += t - nrg->to; /* Added to end of region */
322 nrg->to = t;
323
324out_locked:
325 resv->adds_in_progress--;
326 spin_unlock(&resv->lock);
327 VM_BUG_ON(add < 0);
328 return add;
329}
330
331/*
332 * Examine the existing reserve map and determine how many
333 * huge pages in the specified range [f, t) are NOT currently
334 * represented. This routine is called before a subsequent
335 * call to region_add that will actually modify the reserve
336 * map to add the specified range [f, t). region_chg does
337 * not change the number of huge pages represented by the
338 * map. However, if the existing regions in the map can not
339 * be expanded to represent the new range, a new file_region
340 * structure is added to the map as a placeholder. This is
341 * so that the subsequent region_add call will have all the
342 * regions it needs and will not fail.
343 *
344 * Upon entry, region_chg will also examine the cache of region descriptors
345 * associated with the map. If there are not enough descriptors cached, one
346 * will be allocated for the in progress add operation.
347 *
348 * Returns the number of huge pages that need to be added to the existing
349 * reservation map for the range [f, t). This number is greater or equal to
350 * zero. -ENOMEM is returned if a new file_region structure or cache entry
351 * is needed and can not be allocated.
352 */
353static long region_chg(struct resv_map *resv, long f, long t)
354{
355 struct list_head *head = &resv->regions;
356 struct file_region *rg, *nrg = NULL;
357 long chg = 0;
358
359retry:
360 spin_lock(&resv->lock);
361retry_locked:
362 resv->adds_in_progress++;
363
364 /*
365 * Check for sufficient descriptors in the cache to accommodate
366 * the number of in progress add operations.
367 */
368 if (resv->adds_in_progress > resv->region_cache_count) {
369 struct file_region *trg;
370
371 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372 /* Must drop lock to allocate a new descriptor. */
373 resv->adds_in_progress--;
374 spin_unlock(&resv->lock);
375
376 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377 if (!trg) {
378 kfree(nrg);
379 return -ENOMEM;
380 }
381
382 spin_lock(&resv->lock);
383 list_add(&trg->link, &resv->region_cache);
384 resv->region_cache_count++;
385 goto retry_locked;
386 }
387
388 /* Locate the region we are before or in. */
389 list_for_each_entry(rg, head, link)
390 if (f <= rg->to)
391 break;
392
393 /* If we are below the current region then a new region is required.
394 * Subtle, allocate a new region at the position but make it zero
395 * size such that we can guarantee to record the reservation. */
396 if (&rg->link == head || t < rg->from) {
397 if (!nrg) {
398 resv->adds_in_progress--;
399 spin_unlock(&resv->lock);
400 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401 if (!nrg)
402 return -ENOMEM;
403
404 nrg->from = f;
405 nrg->to = f;
406 INIT_LIST_HEAD(&nrg->link);
407 goto retry;
408 }
409
410 list_add(&nrg->link, rg->link.prev);
411 chg = t - f;
412 goto out_nrg;
413 }
414
415 /* Round our left edge to the current segment if it encloses us. */
416 if (f > rg->from)
417 f = rg->from;
418 chg = t - f;
419
420 /* Check for and consume any regions we now overlap with. */
421 list_for_each_entry(rg, rg->link.prev, link) {
422 if (&rg->link == head)
423 break;
424 if (rg->from > t)
425 goto out;
426
427 /* We overlap with this area, if it extends further than
428 * us then we must extend ourselves. Account for its
429 * existing reservation. */
430 if (rg->to > t) {
431 chg += rg->to - t;
432 t = rg->to;
433 }
434 chg -= rg->to - rg->from;
435 }
436
437out:
438 spin_unlock(&resv->lock);
439 /* We already know we raced and no longer need the new region */
440 kfree(nrg);
441 return chg;
442out_nrg:
443 spin_unlock(&resv->lock);
444 return chg;
445}
446
447/*
448 * Abort the in progress add operation. The adds_in_progress field
449 * of the resv_map keeps track of the operations in progress between
450 * calls to region_chg and region_add. Operations are sometimes
451 * aborted after the call to region_chg. In such cases, region_abort
452 * is called to decrement the adds_in_progress counter.
453 *
454 * NOTE: The range arguments [f, t) are not needed or used in this
455 * routine. They are kept to make reading the calling code easier as
456 * arguments will match the associated region_chg call.
457 */
458static void region_abort(struct resv_map *resv, long f, long t)
459{
460 spin_lock(&resv->lock);
461 VM_BUG_ON(!resv->region_cache_count);
462 resv->adds_in_progress--;
463 spin_unlock(&resv->lock);
464}
465
466/*
467 * Delete the specified range [f, t) from the reserve map. If the
468 * t parameter is LONG_MAX, this indicates that ALL regions after f
469 * should be deleted. Locate the regions which intersect [f, t)
470 * and either trim, delete or split the existing regions.
471 *
472 * Returns the number of huge pages deleted from the reserve map.
473 * In the normal case, the return value is zero or more. In the
474 * case where a region must be split, a new region descriptor must
475 * be allocated. If the allocation fails, -ENOMEM will be returned.
476 * NOTE: If the parameter t == LONG_MAX, then we will never split
477 * a region and possibly return -ENOMEM. Callers specifying
478 * t == LONG_MAX do not need to check for -ENOMEM error.
479 */
480static long region_del(struct resv_map *resv, long f, long t)
481{
482 struct list_head *head = &resv->regions;
483 struct file_region *rg, *trg;
484 struct file_region *nrg = NULL;
485 long del = 0;
486
487retry:
488 spin_lock(&resv->lock);
489 list_for_each_entry_safe(rg, trg, head, link) {
490 /*
491 * Skip regions before the range to be deleted. file_region
492 * ranges are normally of the form [from, to). However, there
493 * may be a "placeholder" entry in the map which is of the form
494 * (from, to) with from == to. Check for placeholder entries
495 * at the beginning of the range to be deleted.
496 */
497 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498 continue;
499
500 if (rg->from >= t)
501 break;
502
503 if (f > rg->from && t < rg->to) { /* Must split region */
504 /*
505 * Check for an entry in the cache before dropping
506 * lock and attempting allocation.
507 */
508 if (!nrg &&
509 resv->region_cache_count > resv->adds_in_progress) {
510 nrg = list_first_entry(&resv->region_cache,
511 struct file_region,
512 link);
513 list_del(&nrg->link);
514 resv->region_cache_count--;
515 }
516
517 if (!nrg) {
518 spin_unlock(&resv->lock);
519 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520 if (!nrg)
521 return -ENOMEM;
522 goto retry;
523 }
524
525 del += t - f;
526
527 /* New entry for end of split region */
528 nrg->from = t;
529 nrg->to = rg->to;
530 INIT_LIST_HEAD(&nrg->link);
531
532 /* Original entry is trimmed */
533 rg->to = f;
534
535 list_add(&nrg->link, &rg->link);
536 nrg = NULL;
537 break;
538 }
539
540 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541 del += rg->to - rg->from;
542 list_del(&rg->link);
543 kfree(rg);
544 continue;
545 }
546
547 if (f <= rg->from) { /* Trim beginning of region */
548 del += t - rg->from;
549 rg->from = t;
550 } else { /* Trim end of region */
551 del += rg->to - f;
552 rg->to = f;
553 }
554 }
555
556 spin_unlock(&resv->lock);
557 kfree(nrg);
558 return del;
559}
560
561/*
562 * A rare out of memory error was encountered which prevented removal of
563 * the reserve map region for a page. The huge page itself was free'ed
564 * and removed from the page cache. This routine will adjust the subpool
565 * usage count, and the global reserve count if needed. By incrementing
566 * these counts, the reserve map entry which could not be deleted will
567 * appear as a "reserved" entry instead of simply dangling with incorrect
568 * counts.
569 */
570void hugetlb_fix_reserve_counts(struct inode *inode)
571{
572 struct hugepage_subpool *spool = subpool_inode(inode);
573 long rsv_adjust;
574
575 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576 if (rsv_adjust) {
577 struct hstate *h = hstate_inode(inode);
578
579 hugetlb_acct_memory(h, 1);
580 }
581}
582
583/*
584 * Count and return the number of huge pages in the reserve map
585 * that intersect with the range [f, t).
586 */
587static long region_count(struct resv_map *resv, long f, long t)
588{
589 struct list_head *head = &resv->regions;
590 struct file_region *rg;
591 long chg = 0;
592
593 spin_lock(&resv->lock);
594 /* Locate each segment we overlap with, and count that overlap. */
595 list_for_each_entry(rg, head, link) {
596 long seg_from;
597 long seg_to;
598
599 if (rg->to <= f)
600 continue;
601 if (rg->from >= t)
602 break;
603
604 seg_from = max(rg->from, f);
605 seg_to = min(rg->to, t);
606
607 chg += seg_to - seg_from;
608 }
609 spin_unlock(&resv->lock);
610
611 return chg;
612}
613
614/*
615 * Convert the address within this vma to the page offset within
616 * the mapping, in pagecache page units; huge pages here.
617 */
618static pgoff_t vma_hugecache_offset(struct hstate *h,
619 struct vm_area_struct *vma, unsigned long address)
620{
621 return ((address - vma->vm_start) >> huge_page_shift(h)) +
622 (vma->vm_pgoff >> huge_page_order(h));
623}
624
625pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626 unsigned long address)
627{
628 return vma_hugecache_offset(hstate_vma(vma), vma, address);
629}
630EXPORT_SYMBOL_GPL(linear_hugepage_index);
631
632/*
633 * Return the size of the pages allocated when backing a VMA. In the majority
634 * cases this will be same size as used by the page table entries.
635 */
636unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
637{
638 struct hstate *hstate;
639
640 if (!is_vm_hugetlb_page(vma))
641 return PAGE_SIZE;
642
643 hstate = hstate_vma(vma);
644
645 return 1UL << huge_page_shift(hstate);
646}
647EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648
649/*
650 * Return the page size being used by the MMU to back a VMA. In the majority
651 * of cases, the page size used by the kernel matches the MMU size. On
652 * architectures where it differs, an architecture-specific version of this
653 * function is required.
654 */
655#ifndef vma_mmu_pagesize
656unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
657{
658 return vma_kernel_pagesize(vma);
659}
660#endif
661
662/*
663 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
664 * bits of the reservation map pointer, which are always clear due to
665 * alignment.
666 */
667#define HPAGE_RESV_OWNER (1UL << 0)
668#define HPAGE_RESV_UNMAPPED (1UL << 1)
669#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
670
671/*
672 * These helpers are used to track how many pages are reserved for
673 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
674 * is guaranteed to have their future faults succeed.
675 *
676 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
677 * the reserve counters are updated with the hugetlb_lock held. It is safe
678 * to reset the VMA at fork() time as it is not in use yet and there is no
679 * chance of the global counters getting corrupted as a result of the values.
680 *
681 * The private mapping reservation is represented in a subtly different
682 * manner to a shared mapping. A shared mapping has a region map associated
683 * with the underlying file, this region map represents the backing file
684 * pages which have ever had a reservation assigned which this persists even
685 * after the page is instantiated. A private mapping has a region map
686 * associated with the original mmap which is attached to all VMAs which
687 * reference it, this region map represents those offsets which have consumed
688 * reservation ie. where pages have been instantiated.
689 */
690static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691{
692 return (unsigned long)vma->vm_private_data;
693}
694
695static void set_vma_private_data(struct vm_area_struct *vma,
696 unsigned long value)
697{
698 vma->vm_private_data = (void *)value;
699}
700
701struct resv_map *resv_map_alloc(void)
702{
703 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
704 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
705
706 if (!resv_map || !rg) {
707 kfree(resv_map);
708 kfree(rg);
709 return NULL;
710 }
711
712 kref_init(&resv_map->refs);
713 spin_lock_init(&resv_map->lock);
714 INIT_LIST_HEAD(&resv_map->regions);
715
716 resv_map->adds_in_progress = 0;
717
718 INIT_LIST_HEAD(&resv_map->region_cache);
719 list_add(&rg->link, &resv_map->region_cache);
720 resv_map->region_cache_count = 1;
721
722 return resv_map;
723}
724
725void resv_map_release(struct kref *ref)
726{
727 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
728 struct list_head *head = &resv_map->region_cache;
729 struct file_region *rg, *trg;
730
731 /* Clear out any active regions before we release the map. */
732 region_del(resv_map, 0, LONG_MAX);
733
734 /* ... and any entries left in the cache */
735 list_for_each_entry_safe(rg, trg, head, link) {
736 list_del(&rg->link);
737 kfree(rg);
738 }
739
740 VM_BUG_ON(resv_map->adds_in_progress);
741
742 kfree(resv_map);
743}
744
745static inline struct resv_map *inode_resv_map(struct inode *inode)
746{
747 return inode->i_mapping->private_data;
748}
749
750static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751{
752 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
753 if (vma->vm_flags & VM_MAYSHARE) {
754 struct address_space *mapping = vma->vm_file->f_mapping;
755 struct inode *inode = mapping->host;
756
757 return inode_resv_map(inode);
758
759 } else {
760 return (struct resv_map *)(get_vma_private_data(vma) &
761 ~HPAGE_RESV_MASK);
762 }
763}
764
765static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766{
767 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
768 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769
770 set_vma_private_data(vma, (get_vma_private_data(vma) &
771 HPAGE_RESV_MASK) | (unsigned long)map);
772}
773
774static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
775{
776 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
777 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778
779 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
780}
781
782static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
783{
784 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785
786 return (get_vma_private_data(vma) & flag) != 0;
787}
788
789/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
790void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791{
792 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793 if (!(vma->vm_flags & VM_MAYSHARE))
794 vma->vm_private_data = (void *)0;
795}
796
797/* Returns true if the VMA has associated reserve pages */
798static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799{
800 if (vma->vm_flags & VM_NORESERVE) {
801 /*
802 * This address is already reserved by other process(chg == 0),
803 * so, we should decrement reserved count. Without decrementing,
804 * reserve count remains after releasing inode, because this
805 * allocated page will go into page cache and is regarded as
806 * coming from reserved pool in releasing step. Currently, we
807 * don't have any other solution to deal with this situation
808 * properly, so add work-around here.
809 */
810 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811 return true;
812 else
813 return false;
814 }
815
816 /* Shared mappings always use reserves */
817 if (vma->vm_flags & VM_MAYSHARE) {
818 /*
819 * We know VM_NORESERVE is not set. Therefore, there SHOULD
820 * be a region map for all pages. The only situation where
821 * there is no region map is if a hole was punched via
822 * fallocate. In this case, there really are no reverves to
823 * use. This situation is indicated if chg != 0.
824 */
825 if (chg)
826 return false;
827 else
828 return true;
829 }
830
831 /*
832 * Only the process that called mmap() has reserves for
833 * private mappings.
834 */
835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836 /*
837 * Like the shared case above, a hole punch or truncate
838 * could have been performed on the private mapping.
839 * Examine the value of chg to determine if reserves
840 * actually exist or were previously consumed.
841 * Very Subtle - The value of chg comes from a previous
842 * call to vma_needs_reserves(). The reserve map for
843 * private mappings has different (opposite) semantics
844 * than that of shared mappings. vma_needs_reserves()
845 * has already taken this difference in semantics into
846 * account. Therefore, the meaning of chg is the same
847 * as in the shared case above. Code could easily be
848 * combined, but keeping it separate draws attention to
849 * subtle differences.
850 */
851 if (chg)
852 return false;
853 else
854 return true;
855 }
856
857 return false;
858}
859
860static void enqueue_huge_page(struct hstate *h, struct page *page)
861{
862 int nid = page_to_nid(page);
863 list_move(&page->lru, &h->hugepage_freelists[nid]);
864 h->free_huge_pages++;
865 h->free_huge_pages_node[nid]++;
866}
867
868static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
869{
870 struct page *page;
871
872 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
873 if (!is_migrate_isolate_page(page))
874 break;
875 /*
876 * if 'non-isolated free hugepage' not found on the list,
877 * the allocation fails.
878 */
879 if (&h->hugepage_freelists[nid] == &page->lru)
880 return NULL;
881 list_move(&page->lru, &h->hugepage_activelist);
882 set_page_refcounted(page);
883 h->free_huge_pages--;
884 h->free_huge_pages_node[nid]--;
885 return page;
886}
887
888/* Movability of hugepages depends on migration support. */
889static inline gfp_t htlb_alloc_mask(struct hstate *h)
890{
891 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
892 return GFP_HIGHUSER_MOVABLE;
893 else
894 return GFP_HIGHUSER;
895}
896
897static struct page *dequeue_huge_page_vma(struct hstate *h,
898 struct vm_area_struct *vma,
899 unsigned long address, int avoid_reserve,
900 long chg)
901{
902 struct page *page = NULL;
903 struct mempolicy *mpol;
904 nodemask_t *nodemask;
905 struct zonelist *zonelist;
906 struct zone *zone;
907 struct zoneref *z;
908 unsigned int cpuset_mems_cookie;
909
910 /*
911 * A child process with MAP_PRIVATE mappings created by their parent
912 * have no page reserves. This check ensures that reservations are
913 * not "stolen". The child may still get SIGKILLed
914 */
915 if (!vma_has_reserves(vma, chg) &&
916 h->free_huge_pages - h->resv_huge_pages == 0)
917 goto err;
918
919 /* If reserves cannot be used, ensure enough pages are in the pool */
920 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
921 goto err;
922
923retry_cpuset:
924 cpuset_mems_cookie = read_mems_allowed_begin();
925 zonelist = huge_zonelist(vma, address,
926 htlb_alloc_mask(h), &mpol, &nodemask);
927
928 for_each_zone_zonelist_nodemask(zone, z, zonelist,
929 MAX_NR_ZONES - 1, nodemask) {
930 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
931 page = dequeue_huge_page_node(h, zone_to_nid(zone));
932 if (page) {
933 if (avoid_reserve)
934 break;
935 if (!vma_has_reserves(vma, chg))
936 break;
937
938 SetPagePrivate(page);
939 h->resv_huge_pages--;
940 break;
941 }
942 }
943 }
944
945 mpol_cond_put(mpol);
946 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
947 goto retry_cpuset;
948 return page;
949
950err:
951 return NULL;
952}
953
954/*
955 * common helper functions for hstate_next_node_to_{alloc|free}.
956 * We may have allocated or freed a huge page based on a different
957 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
958 * be outside of *nodes_allowed. Ensure that we use an allowed
959 * node for alloc or free.
960 */
961static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
962{
963 nid = next_node_in(nid, *nodes_allowed);
964 VM_BUG_ON(nid >= MAX_NUMNODES);
965
966 return nid;
967}
968
969static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
970{
971 if (!node_isset(nid, *nodes_allowed))
972 nid = next_node_allowed(nid, nodes_allowed);
973 return nid;
974}
975
976/*
977 * returns the previously saved node ["this node"] from which to
978 * allocate a persistent huge page for the pool and advance the
979 * next node from which to allocate, handling wrap at end of node
980 * mask.
981 */
982static int hstate_next_node_to_alloc(struct hstate *h,
983 nodemask_t *nodes_allowed)
984{
985 int nid;
986
987 VM_BUG_ON(!nodes_allowed);
988
989 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
990 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
991
992 return nid;
993}
994
995/*
996 * helper for free_pool_huge_page() - return the previously saved
997 * node ["this node"] from which to free a huge page. Advance the
998 * next node id whether or not we find a free huge page to free so
999 * that the next attempt to free addresses the next node.
1000 */
1001static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1002{
1003 int nid;
1004
1005 VM_BUG_ON(!nodes_allowed);
1006
1007 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1008 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1009
1010 return nid;
1011}
1012
1013#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1014 for (nr_nodes = nodes_weight(*mask); \
1015 nr_nodes > 0 && \
1016 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1017 nr_nodes--)
1018
1019#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1020 for (nr_nodes = nodes_weight(*mask); \
1021 nr_nodes > 0 && \
1022 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1023 nr_nodes--)
1024
1025#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
1026 ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
1027 defined(CONFIG_CMA))
1028static void destroy_compound_gigantic_page(struct page *page,
1029 unsigned int order)
1030{
1031 int i;
1032 int nr_pages = 1 << order;
1033 struct page *p = page + 1;
1034
1035 atomic_set(compound_mapcount_ptr(page), 0);
1036 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1037 clear_compound_head(p);
1038 set_page_refcounted(p);
1039 }
1040
1041 set_compound_order(page, 0);
1042 __ClearPageHead(page);
1043}
1044
1045static void free_gigantic_page(struct page *page, unsigned int order)
1046{
1047 free_contig_range(page_to_pfn(page), 1 << order);
1048}
1049
1050static int __alloc_gigantic_page(unsigned long start_pfn,
1051 unsigned long nr_pages)
1052{
1053 unsigned long end_pfn = start_pfn + nr_pages;
1054 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1055}
1056
1057static bool pfn_range_valid_gigantic(struct zone *z,
1058 unsigned long start_pfn, unsigned long nr_pages)
1059{
1060 unsigned long i, end_pfn = start_pfn + nr_pages;
1061 struct page *page;
1062
1063 for (i = start_pfn; i < end_pfn; i++) {
1064 if (!pfn_valid(i))
1065 return false;
1066
1067 page = pfn_to_page(i);
1068
1069 if (page_zone(page) != z)
1070 return false;
1071
1072 if (PageReserved(page))
1073 return false;
1074
1075 if (page_count(page) > 0)
1076 return false;
1077
1078 if (PageHuge(page))
1079 return false;
1080 }
1081
1082 return true;
1083}
1084
1085static bool zone_spans_last_pfn(const struct zone *zone,
1086 unsigned long start_pfn, unsigned long nr_pages)
1087{
1088 unsigned long last_pfn = start_pfn + nr_pages - 1;
1089 return zone_spans_pfn(zone, last_pfn);
1090}
1091
1092static struct page *alloc_gigantic_page(int nid, unsigned int order)
1093{
1094 unsigned long nr_pages = 1 << order;
1095 unsigned long ret, pfn, flags;
1096 struct zone *z;
1097
1098 z = NODE_DATA(nid)->node_zones;
1099 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1100 spin_lock_irqsave(&z->lock, flags);
1101
1102 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1103 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1104 if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1105 /*
1106 * We release the zone lock here because
1107 * alloc_contig_range() will also lock the zone
1108 * at some point. If there's an allocation
1109 * spinning on this lock, it may win the race
1110 * and cause alloc_contig_range() to fail...
1111 */
1112 spin_unlock_irqrestore(&z->lock, flags);
1113 ret = __alloc_gigantic_page(pfn, nr_pages);
1114 if (!ret)
1115 return pfn_to_page(pfn);
1116 spin_lock_irqsave(&z->lock, flags);
1117 }
1118 pfn += nr_pages;
1119 }
1120
1121 spin_unlock_irqrestore(&z->lock, flags);
1122 }
1123
1124 return NULL;
1125}
1126
1127static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1128static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1129
1130static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1131{
1132 struct page *page;
1133
1134 page = alloc_gigantic_page(nid, huge_page_order(h));
1135 if (page) {
1136 prep_compound_gigantic_page(page, huge_page_order(h));
1137 prep_new_huge_page(h, page, nid);
1138 }
1139
1140 return page;
1141}
1142
1143static int alloc_fresh_gigantic_page(struct hstate *h,
1144 nodemask_t *nodes_allowed)
1145{
1146 struct page *page = NULL;
1147 int nr_nodes, node;
1148
1149 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1150 page = alloc_fresh_gigantic_page_node(h, node);
1151 if (page)
1152 return 1;
1153 }
1154
1155 return 0;
1156}
1157
1158static inline bool gigantic_page_supported(void) { return true; }
1159#else
1160static inline bool gigantic_page_supported(void) { return false; }
1161static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1162static inline void destroy_compound_gigantic_page(struct page *page,
1163 unsigned int order) { }
1164static inline int alloc_fresh_gigantic_page(struct hstate *h,
1165 nodemask_t *nodes_allowed) { return 0; }
1166#endif
1167
1168static void update_and_free_page(struct hstate *h, struct page *page)
1169{
1170 int i;
1171
1172 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1173 return;
1174
1175 h->nr_huge_pages--;
1176 h->nr_huge_pages_node[page_to_nid(page)]--;
1177 for (i = 0; i < pages_per_huge_page(h); i++) {
1178 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1179 1 << PG_referenced | 1 << PG_dirty |
1180 1 << PG_active | 1 << PG_private |
1181 1 << PG_writeback);
1182 }
1183 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1184 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1185 set_page_refcounted(page);
1186 if (hstate_is_gigantic(h)) {
1187 destroy_compound_gigantic_page(page, huge_page_order(h));
1188 free_gigantic_page(page, huge_page_order(h));
1189 } else {
1190 __free_pages(page, huge_page_order(h));
1191 }
1192}
1193
1194struct hstate *size_to_hstate(unsigned long size)
1195{
1196 struct hstate *h;
1197
1198 for_each_hstate(h) {
1199 if (huge_page_size(h) == size)
1200 return h;
1201 }
1202 return NULL;
1203}
1204
1205/*
1206 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1207 * to hstate->hugepage_activelist.)
1208 *
1209 * This function can be called for tail pages, but never returns true for them.
1210 */
1211bool page_huge_active(struct page *page)
1212{
1213 VM_BUG_ON_PAGE(!PageHuge(page), page);
1214 return PageHead(page) && PagePrivate(&page[1]);
1215}
1216
1217/* never called for tail page */
1218static void set_page_huge_active(struct page *page)
1219{
1220 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1221 SetPagePrivate(&page[1]);
1222}
1223
1224static void clear_page_huge_active(struct page *page)
1225{
1226 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227 ClearPagePrivate(&page[1]);
1228}
1229
1230void free_huge_page(struct page *page)
1231{
1232 /*
1233 * Can't pass hstate in here because it is called from the
1234 * compound page destructor.
1235 */
1236 struct hstate *h = page_hstate(page);
1237 int nid = page_to_nid(page);
1238 struct hugepage_subpool *spool =
1239 (struct hugepage_subpool *)page_private(page);
1240 bool restore_reserve;
1241
1242 set_page_private(page, 0);
1243 page->mapping = NULL;
1244 VM_BUG_ON_PAGE(page_count(page), page);
1245 VM_BUG_ON_PAGE(page_mapcount(page), page);
1246 restore_reserve = PagePrivate(page);
1247 ClearPagePrivate(page);
1248
1249 /*
1250 * A return code of zero implies that the subpool will be under its
1251 * minimum size if the reservation is not restored after page is free.
1252 * Therefore, force restore_reserve operation.
1253 */
1254 if (hugepage_subpool_put_pages(spool, 1) == 0)
1255 restore_reserve = true;
1256
1257 spin_lock(&hugetlb_lock);
1258 clear_page_huge_active(page);
1259 hugetlb_cgroup_uncharge_page(hstate_index(h),
1260 pages_per_huge_page(h), page);
1261 if (restore_reserve)
1262 h->resv_huge_pages++;
1263
1264 if (h->surplus_huge_pages_node[nid]) {
1265 /* remove the page from active list */
1266 list_del(&page->lru);
1267 update_and_free_page(h, page);
1268 h->surplus_huge_pages--;
1269 h->surplus_huge_pages_node[nid]--;
1270 } else {
1271 arch_clear_hugepage_flags(page);
1272 enqueue_huge_page(h, page);
1273 }
1274 spin_unlock(&hugetlb_lock);
1275}
1276
1277static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1278{
1279 INIT_LIST_HEAD(&page->lru);
1280 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1281 spin_lock(&hugetlb_lock);
1282 set_hugetlb_cgroup(page, NULL);
1283 h->nr_huge_pages++;
1284 h->nr_huge_pages_node[nid]++;
1285 spin_unlock(&hugetlb_lock);
1286 put_page(page); /* free it into the hugepage allocator */
1287}
1288
1289static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1290{
1291 int i;
1292 int nr_pages = 1 << order;
1293 struct page *p = page + 1;
1294
1295 /* we rely on prep_new_huge_page to set the destructor */
1296 set_compound_order(page, order);
1297 __ClearPageReserved(page);
1298 __SetPageHead(page);
1299 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1300 /*
1301 * For gigantic hugepages allocated through bootmem at
1302 * boot, it's safer to be consistent with the not-gigantic
1303 * hugepages and clear the PG_reserved bit from all tail pages
1304 * too. Otherwse drivers using get_user_pages() to access tail
1305 * pages may get the reference counting wrong if they see
1306 * PG_reserved set on a tail page (despite the head page not
1307 * having PG_reserved set). Enforcing this consistency between
1308 * head and tail pages allows drivers to optimize away a check
1309 * on the head page when they need know if put_page() is needed
1310 * after get_user_pages().
1311 */
1312 __ClearPageReserved(p);
1313 set_page_count(p, 0);
1314 set_compound_head(p, page);
1315 }
1316 atomic_set(compound_mapcount_ptr(page), -1);
1317}
1318
1319/*
1320 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1321 * transparent huge pages. See the PageTransHuge() documentation for more
1322 * details.
1323 */
1324int PageHuge(struct page *page)
1325{
1326 if (!PageCompound(page))
1327 return 0;
1328
1329 page = compound_head(page);
1330 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1331}
1332EXPORT_SYMBOL_GPL(PageHuge);
1333
1334/*
1335 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1336 * normal or transparent huge pages.
1337 */
1338int PageHeadHuge(struct page *page_head)
1339{
1340 if (!PageHead(page_head))
1341 return 0;
1342
1343 return get_compound_page_dtor(page_head) == free_huge_page;
1344}
1345
1346pgoff_t __basepage_index(struct page *page)
1347{
1348 struct page *page_head = compound_head(page);
1349 pgoff_t index = page_index(page_head);
1350 unsigned long compound_idx;
1351
1352 if (!PageHuge(page_head))
1353 return page_index(page);
1354
1355 if (compound_order(page_head) >= MAX_ORDER)
1356 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1357 else
1358 compound_idx = page - page_head;
1359
1360 return (index << compound_order(page_head)) + compound_idx;
1361}
1362
1363static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1364{
1365 struct page *page;
1366
1367 page = __alloc_pages_node(nid,
1368 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1369 __GFP_REPEAT|__GFP_NOWARN,
1370 huge_page_order(h));
1371 if (page) {
1372 prep_new_huge_page(h, page, nid);
1373 }
1374
1375 return page;
1376}
1377
1378static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1379{
1380 struct page *page;
1381 int nr_nodes, node;
1382 int ret = 0;
1383
1384 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1385 page = alloc_fresh_huge_page_node(h, node);
1386 if (page) {
1387 ret = 1;
1388 break;
1389 }
1390 }
1391
1392 if (ret)
1393 count_vm_event(HTLB_BUDDY_PGALLOC);
1394 else
1395 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1396
1397 return ret;
1398}
1399
1400/*
1401 * Free huge page from pool from next node to free.
1402 * Attempt to keep persistent huge pages more or less
1403 * balanced over allowed nodes.
1404 * Called with hugetlb_lock locked.
1405 */
1406static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1407 bool acct_surplus)
1408{
1409 int nr_nodes, node;
1410 int ret = 0;
1411
1412 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1413 /*
1414 * If we're returning unused surplus pages, only examine
1415 * nodes with surplus pages.
1416 */
1417 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1418 !list_empty(&h->hugepage_freelists[node])) {
1419 struct page *page =
1420 list_entry(h->hugepage_freelists[node].next,
1421 struct page, lru);
1422 list_del(&page->lru);
1423 h->free_huge_pages--;
1424 h->free_huge_pages_node[node]--;
1425 if (acct_surplus) {
1426 h->surplus_huge_pages--;
1427 h->surplus_huge_pages_node[node]--;
1428 }
1429 update_and_free_page(h, page);
1430 ret = 1;
1431 break;
1432 }
1433 }
1434
1435 return ret;
1436}
1437
1438/*
1439 * Dissolve a given free hugepage into free buddy pages. This function does
1440 * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1441 * number of free hugepages would be reduced below the number of reserved
1442 * hugepages.
1443 */
1444static int dissolve_free_huge_page(struct page *page)
1445{
1446 int rc = 0;
1447
1448 spin_lock(&hugetlb_lock);
1449 if (PageHuge(page) && !page_count(page)) {
1450 struct page *head = compound_head(page);
1451 struct hstate *h = page_hstate(head);
1452 int nid = page_to_nid(head);
1453 if (h->free_huge_pages - h->resv_huge_pages == 0) {
1454 rc = -EBUSY;
1455 goto out;
1456 }
1457 list_del(&head->lru);
1458 h->free_huge_pages--;
1459 h->free_huge_pages_node[nid]--;
1460 h->max_huge_pages--;
1461 update_and_free_page(h, head);
1462 }
1463out:
1464 spin_unlock(&hugetlb_lock);
1465 return rc;
1466}
1467
1468/*
1469 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1470 * make specified memory blocks removable from the system.
1471 * Note that this will dissolve a free gigantic hugepage completely, if any
1472 * part of it lies within the given range.
1473 * Also note that if dissolve_free_huge_page() returns with an error, all
1474 * free hugepages that were dissolved before that error are lost.
1475 */
1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1477{
1478 unsigned long pfn;
1479 struct page *page;
1480 int rc = 0;
1481
1482 if (!hugepages_supported())
1483 return rc;
1484
1485 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1486 page = pfn_to_page(pfn);
1487 if (PageHuge(page) && !page_count(page)) {
1488 rc = dissolve_free_huge_page(page);
1489 if (rc)
1490 break;
1491 }
1492 }
1493
1494 return rc;
1495}
1496
1497/*
1498 * There are 3 ways this can get called:
1499 * 1. With vma+addr: we use the VMA's memory policy
1500 * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge
1501 * page from any node, and let the buddy allocator itself figure
1502 * it out.
1503 * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page
1504 * strictly from 'nid'
1505 */
1506static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1507 struct vm_area_struct *vma, unsigned long addr, int nid)
1508{
1509 int order = huge_page_order(h);
1510 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1511 unsigned int cpuset_mems_cookie;
1512
1513 /*
1514 * We need a VMA to get a memory policy. If we do not
1515 * have one, we use the 'nid' argument.
1516 *
1517 * The mempolicy stuff below has some non-inlined bits
1518 * and calls ->vm_ops. That makes it hard to optimize at
1519 * compile-time, even when NUMA is off and it does
1520 * nothing. This helps the compiler optimize it out.
1521 */
1522 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1523 /*
1524 * If a specific node is requested, make sure to
1525 * get memory from there, but only when a node
1526 * is explicitly specified.
1527 */
1528 if (nid != NUMA_NO_NODE)
1529 gfp |= __GFP_THISNODE;
1530 /*
1531 * Make sure to call something that can handle
1532 * nid=NUMA_NO_NODE
1533 */
1534 return alloc_pages_node(nid, gfp, order);
1535 }
1536
1537 /*
1538 * OK, so we have a VMA. Fetch the mempolicy and try to
1539 * allocate a huge page with it. We will only reach this
1540 * when CONFIG_NUMA=y.
1541 */
1542 do {
1543 struct page *page;
1544 struct mempolicy *mpol;
1545 struct zonelist *zl;
1546 nodemask_t *nodemask;
1547
1548 cpuset_mems_cookie = read_mems_allowed_begin();
1549 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1550 mpol_cond_put(mpol);
1551 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1552 if (page)
1553 return page;
1554 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1555
1556 return NULL;
1557}
1558
1559/*
1560 * There are two ways to allocate a huge page:
1561 * 1. When you have a VMA and an address (like a fault)
1562 * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1563 *
1564 * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in
1565 * this case which signifies that the allocation should be done with
1566 * respect for the VMA's memory policy.
1567 *
1568 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1569 * implies that memory policies will not be taken in to account.
1570 */
1571static struct page *__alloc_buddy_huge_page(struct hstate *h,
1572 struct vm_area_struct *vma, unsigned long addr, int nid)
1573{
1574 struct page *page;
1575 unsigned int r_nid;
1576
1577 if (hstate_is_gigantic(h))
1578 return NULL;
1579
1580 /*
1581 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1582 * This makes sure the caller is picking _one_ of the modes with which
1583 * we can call this function, not both.
1584 */
1585 if (vma || (addr != -1)) {
1586 VM_WARN_ON_ONCE(addr == -1);
1587 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1588 }
1589 /*
1590 * Assume we will successfully allocate the surplus page to
1591 * prevent racing processes from causing the surplus to exceed
1592 * overcommit
1593 *
1594 * This however introduces a different race, where a process B
1595 * tries to grow the static hugepage pool while alloc_pages() is
1596 * called by process A. B will only examine the per-node
1597 * counters in determining if surplus huge pages can be
1598 * converted to normal huge pages in adjust_pool_surplus(). A
1599 * won't be able to increment the per-node counter, until the
1600 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1601 * no more huge pages can be converted from surplus to normal
1602 * state (and doesn't try to convert again). Thus, we have a
1603 * case where a surplus huge page exists, the pool is grown, and
1604 * the surplus huge page still exists after, even though it
1605 * should just have been converted to a normal huge page. This
1606 * does not leak memory, though, as the hugepage will be freed
1607 * once it is out of use. It also does not allow the counters to
1608 * go out of whack in adjust_pool_surplus() as we don't modify
1609 * the node values until we've gotten the hugepage and only the
1610 * per-node value is checked there.
1611 */
1612 spin_lock(&hugetlb_lock);
1613 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1614 spin_unlock(&hugetlb_lock);
1615 return NULL;
1616 } else {
1617 h->nr_huge_pages++;
1618 h->surplus_huge_pages++;
1619 }
1620 spin_unlock(&hugetlb_lock);
1621
1622 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1623
1624 spin_lock(&hugetlb_lock);
1625 if (page) {
1626 INIT_LIST_HEAD(&page->lru);
1627 r_nid = page_to_nid(page);
1628 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1629 set_hugetlb_cgroup(page, NULL);
1630 /*
1631 * We incremented the global counters already
1632 */
1633 h->nr_huge_pages_node[r_nid]++;
1634 h->surplus_huge_pages_node[r_nid]++;
1635 __count_vm_event(HTLB_BUDDY_PGALLOC);
1636 } else {
1637 h->nr_huge_pages--;
1638 h->surplus_huge_pages--;
1639 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1640 }
1641 spin_unlock(&hugetlb_lock);
1642
1643 return page;
1644}
1645
1646/*
1647 * Allocate a huge page from 'nid'. Note, 'nid' may be
1648 * NUMA_NO_NODE, which means that it may be allocated
1649 * anywhere.
1650 */
1651static
1652struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1653{
1654 unsigned long addr = -1;
1655
1656 return __alloc_buddy_huge_page(h, NULL, addr, nid);
1657}
1658
1659/*
1660 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1661 */
1662static
1663struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1664 struct vm_area_struct *vma, unsigned long addr)
1665{
1666 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1667}
1668
1669/*
1670 * This allocation function is useful in the context where vma is irrelevant.
1671 * E.g. soft-offlining uses this function because it only cares physical
1672 * address of error page.
1673 */
1674struct page *alloc_huge_page_node(struct hstate *h, int nid)
1675{
1676 struct page *page = NULL;
1677
1678 spin_lock(&hugetlb_lock);
1679 if (h->free_huge_pages - h->resv_huge_pages > 0)
1680 page = dequeue_huge_page_node(h, nid);
1681 spin_unlock(&hugetlb_lock);
1682
1683 if (!page)
1684 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1685
1686 return page;
1687}
1688
1689/*
1690 * Increase the hugetlb pool such that it can accommodate a reservation
1691 * of size 'delta'.
1692 */
1693static int gather_surplus_pages(struct hstate *h, int delta)
1694{
1695 struct list_head surplus_list;
1696 struct page *page, *tmp;
1697 int ret, i;
1698 int needed, allocated;
1699 bool alloc_ok = true;
1700
1701 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1702 if (needed <= 0) {
1703 h->resv_huge_pages += delta;
1704 return 0;
1705 }
1706
1707 allocated = 0;
1708 INIT_LIST_HEAD(&surplus_list);
1709
1710 ret = -ENOMEM;
1711retry:
1712 spin_unlock(&hugetlb_lock);
1713 for (i = 0; i < needed; i++) {
1714 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1715 if (!page) {
1716 alloc_ok = false;
1717 break;
1718 }
1719 list_add(&page->lru, &surplus_list);
1720 }
1721 allocated += i;
1722
1723 /*
1724 * After retaking hugetlb_lock, we need to recalculate 'needed'
1725 * because either resv_huge_pages or free_huge_pages may have changed.
1726 */
1727 spin_lock(&hugetlb_lock);
1728 needed = (h->resv_huge_pages + delta) -
1729 (h->free_huge_pages + allocated);
1730 if (needed > 0) {
1731 if (alloc_ok)
1732 goto retry;
1733 /*
1734 * We were not able to allocate enough pages to
1735 * satisfy the entire reservation so we free what
1736 * we've allocated so far.
1737 */
1738 goto free;
1739 }
1740 /*
1741 * The surplus_list now contains _at_least_ the number of extra pages
1742 * needed to accommodate the reservation. Add the appropriate number
1743 * of pages to the hugetlb pool and free the extras back to the buddy
1744 * allocator. Commit the entire reservation here to prevent another
1745 * process from stealing the pages as they are added to the pool but
1746 * before they are reserved.
1747 */
1748 needed += allocated;
1749 h->resv_huge_pages += delta;
1750 ret = 0;
1751
1752 /* Free the needed pages to the hugetlb pool */
1753 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1754 if ((--needed) < 0)
1755 break;
1756 /*
1757 * This page is now managed by the hugetlb allocator and has
1758 * no users -- drop the buddy allocator's reference.
1759 */
1760 put_page_testzero(page);
1761 VM_BUG_ON_PAGE(page_count(page), page);
1762 enqueue_huge_page(h, page);
1763 }
1764free:
1765 spin_unlock(&hugetlb_lock);
1766
1767 /* Free unnecessary surplus pages to the buddy allocator */
1768 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1769 put_page(page);
1770 spin_lock(&hugetlb_lock);
1771
1772 return ret;
1773}
1774
1775/*
1776 * This routine has two main purposes:
1777 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1778 * in unused_resv_pages. This corresponds to the prior adjustments made
1779 * to the associated reservation map.
1780 * 2) Free any unused surplus pages that may have been allocated to satisfy
1781 * the reservation. As many as unused_resv_pages may be freed.
1782 *
1783 * Called with hugetlb_lock held. However, the lock could be dropped (and
1784 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1785 * we must make sure nobody else can claim pages we are in the process of
1786 * freeing. Do this by ensuring resv_huge_page always is greater than the
1787 * number of huge pages we plan to free when dropping the lock.
1788 */
1789static void return_unused_surplus_pages(struct hstate *h,
1790 unsigned long unused_resv_pages)
1791{
1792 unsigned long nr_pages;
1793
1794 /* Cannot return gigantic pages currently */
1795 if (hstate_is_gigantic(h))
1796 goto out;
1797
1798 /*
1799 * Part (or even all) of the reservation could have been backed
1800 * by pre-allocated pages. Only free surplus pages.
1801 */
1802 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1803
1804 /*
1805 * We want to release as many surplus pages as possible, spread
1806 * evenly across all nodes with memory. Iterate across these nodes
1807 * until we can no longer free unreserved surplus pages. This occurs
1808 * when the nodes with surplus pages have no free pages.
1809 * free_pool_huge_page() will balance the the freed pages across the
1810 * on-line nodes with memory and will handle the hstate accounting.
1811 *
1812 * Note that we decrement resv_huge_pages as we free the pages. If
1813 * we drop the lock, resv_huge_pages will still be sufficiently large
1814 * to cover subsequent pages we may free.
1815 */
1816 while (nr_pages--) {
1817 h->resv_huge_pages--;
1818 unused_resv_pages--;
1819 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1820 goto out;
1821 cond_resched_lock(&hugetlb_lock);
1822 }
1823
1824out:
1825 /* Fully uncommit the reservation */
1826 h->resv_huge_pages -= unused_resv_pages;
1827}
1828
1829
1830/*
1831 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1832 * are used by the huge page allocation routines to manage reservations.
1833 *
1834 * vma_needs_reservation is called to determine if the huge page at addr
1835 * within the vma has an associated reservation. If a reservation is
1836 * needed, the value 1 is returned. The caller is then responsible for
1837 * managing the global reservation and subpool usage counts. After
1838 * the huge page has been allocated, vma_commit_reservation is called
1839 * to add the page to the reservation map. If the page allocation fails,
1840 * the reservation must be ended instead of committed. vma_end_reservation
1841 * is called in such cases.
1842 *
1843 * In the normal case, vma_commit_reservation returns the same value
1844 * as the preceding vma_needs_reservation call. The only time this
1845 * is not the case is if a reserve map was changed between calls. It
1846 * is the responsibility of the caller to notice the difference and
1847 * take appropriate action.
1848 *
1849 * vma_add_reservation is used in error paths where a reservation must
1850 * be restored when a newly allocated huge page must be freed. It is
1851 * to be called after calling vma_needs_reservation to determine if a
1852 * reservation exists.
1853 */
1854enum vma_resv_mode {
1855 VMA_NEEDS_RESV,
1856 VMA_COMMIT_RESV,
1857 VMA_END_RESV,
1858 VMA_ADD_RESV,
1859};
1860static long __vma_reservation_common(struct hstate *h,
1861 struct vm_area_struct *vma, unsigned long addr,
1862 enum vma_resv_mode mode)
1863{
1864 struct resv_map *resv;
1865 pgoff_t idx;
1866 long ret;
1867
1868 resv = vma_resv_map(vma);
1869 if (!resv)
1870 return 1;
1871
1872 idx = vma_hugecache_offset(h, vma, addr);
1873 switch (mode) {
1874 case VMA_NEEDS_RESV:
1875 ret = region_chg(resv, idx, idx + 1);
1876 break;
1877 case VMA_COMMIT_RESV:
1878 ret = region_add(resv, idx, idx + 1);
1879 break;
1880 case VMA_END_RESV:
1881 region_abort(resv, idx, idx + 1);
1882 ret = 0;
1883 break;
1884 case VMA_ADD_RESV:
1885 if (vma->vm_flags & VM_MAYSHARE)
1886 ret = region_add(resv, idx, idx + 1);
1887 else {
1888 region_abort(resv, idx, idx + 1);
1889 ret = region_del(resv, idx, idx + 1);
1890 }
1891 break;
1892 default:
1893 BUG();
1894 }
1895
1896 if (vma->vm_flags & VM_MAYSHARE)
1897 return ret;
1898 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1899 /*
1900 * In most cases, reserves always exist for private mappings.
1901 * However, a file associated with mapping could have been
1902 * hole punched or truncated after reserves were consumed.
1903 * As subsequent fault on such a range will not use reserves.
1904 * Subtle - The reserve map for private mappings has the
1905 * opposite meaning than that of shared mappings. If NO
1906 * entry is in the reserve map, it means a reservation exists.
1907 * If an entry exists in the reserve map, it means the
1908 * reservation has already been consumed. As a result, the
1909 * return value of this routine is the opposite of the
1910 * value returned from reserve map manipulation routines above.
1911 */
1912 if (ret)
1913 return 0;
1914 else
1915 return 1;
1916 }
1917 else
1918 return ret < 0 ? ret : 0;
1919}
1920
1921static long vma_needs_reservation(struct hstate *h,
1922 struct vm_area_struct *vma, unsigned long addr)
1923{
1924 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1925}
1926
1927static long vma_commit_reservation(struct hstate *h,
1928 struct vm_area_struct *vma, unsigned long addr)
1929{
1930 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1931}
1932
1933static void vma_end_reservation(struct hstate *h,
1934 struct vm_area_struct *vma, unsigned long addr)
1935{
1936 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1937}
1938
1939static long vma_add_reservation(struct hstate *h,
1940 struct vm_area_struct *vma, unsigned long addr)
1941{
1942 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1943}
1944
1945/*
1946 * This routine is called to restore a reservation on error paths. In the
1947 * specific error paths, a huge page was allocated (via alloc_huge_page)
1948 * and is about to be freed. If a reservation for the page existed,
1949 * alloc_huge_page would have consumed the reservation and set PagePrivate
1950 * in the newly allocated page. When the page is freed via free_huge_page,
1951 * the global reservation count will be incremented if PagePrivate is set.
1952 * However, free_huge_page can not adjust the reserve map. Adjust the
1953 * reserve map here to be consistent with global reserve count adjustments
1954 * to be made by free_huge_page.
1955 */
1956static void restore_reserve_on_error(struct hstate *h,
1957 struct vm_area_struct *vma, unsigned long address,
1958 struct page *page)
1959{
1960 if (unlikely(PagePrivate(page))) {
1961 long rc = vma_needs_reservation(h, vma, address);
1962
1963 if (unlikely(rc < 0)) {
1964 /*
1965 * Rare out of memory condition in reserve map
1966 * manipulation. Clear PagePrivate so that
1967 * global reserve count will not be incremented
1968 * by free_huge_page. This will make it appear
1969 * as though the reservation for this page was
1970 * consumed. This may prevent the task from
1971 * faulting in the page at a later time. This
1972 * is better than inconsistent global huge page
1973 * accounting of reserve counts.
1974 */
1975 ClearPagePrivate(page);
1976 } else if (rc) {
1977 rc = vma_add_reservation(h, vma, address);
1978 if (unlikely(rc < 0))
1979 /*
1980 * See above comment about rare out of
1981 * memory condition.
1982 */
1983 ClearPagePrivate(page);
1984 } else
1985 vma_end_reservation(h, vma, address);
1986 }
1987}
1988
1989struct page *alloc_huge_page(struct vm_area_struct *vma,
1990 unsigned long addr, int avoid_reserve)
1991{
1992 struct hugepage_subpool *spool = subpool_vma(vma);
1993 struct hstate *h = hstate_vma(vma);
1994 struct page *page;
1995 long map_chg, map_commit;
1996 long gbl_chg;
1997 int ret, idx;
1998 struct hugetlb_cgroup *h_cg;
1999
2000 idx = hstate_index(h);
2001 /*
2002 * Examine the region/reserve map to determine if the process
2003 * has a reservation for the page to be allocated. A return
2004 * code of zero indicates a reservation exists (no change).
2005 */
2006 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2007 if (map_chg < 0)
2008 return ERR_PTR(-ENOMEM);
2009
2010 /*
2011 * Processes that did not create the mapping will have no
2012 * reserves as indicated by the region/reserve map. Check
2013 * that the allocation will not exceed the subpool limit.
2014 * Allocations for MAP_NORESERVE mappings also need to be
2015 * checked against any subpool limit.
2016 */
2017 if (map_chg || avoid_reserve) {
2018 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2019 if (gbl_chg < 0) {
2020 vma_end_reservation(h, vma, addr);
2021 return ERR_PTR(-ENOSPC);
2022 }
2023
2024 /*
2025 * Even though there was no reservation in the region/reserve
2026 * map, there could be reservations associated with the
2027 * subpool that can be used. This would be indicated if the
2028 * return value of hugepage_subpool_get_pages() is zero.
2029 * However, if avoid_reserve is specified we still avoid even
2030 * the subpool reservations.
2031 */
2032 if (avoid_reserve)
2033 gbl_chg = 1;
2034 }
2035
2036 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2037 if (ret)
2038 goto out_subpool_put;
2039
2040 spin_lock(&hugetlb_lock);
2041 /*
2042 * glb_chg is passed to indicate whether or not a page must be taken
2043 * from the global free pool (global change). gbl_chg == 0 indicates
2044 * a reservation exists for the allocation.
2045 */
2046 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2047 if (!page) {
2048 spin_unlock(&hugetlb_lock);
2049 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
2050 if (!page)
2051 goto out_uncharge_cgroup;
2052 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2053 SetPagePrivate(page);
2054 h->resv_huge_pages--;
2055 }
2056 spin_lock(&hugetlb_lock);
2057 list_move(&page->lru, &h->hugepage_activelist);
2058 /* Fall through */
2059 }
2060 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2061 spin_unlock(&hugetlb_lock);
2062
2063 set_page_private(page, (unsigned long)spool);
2064
2065 map_commit = vma_commit_reservation(h, vma, addr);
2066 if (unlikely(map_chg > map_commit)) {
2067 /*
2068 * The page was added to the reservation map between
2069 * vma_needs_reservation and vma_commit_reservation.
2070 * This indicates a race with hugetlb_reserve_pages.
2071 * Adjust for the subpool count incremented above AND
2072 * in hugetlb_reserve_pages for the same page. Also,
2073 * the reservation count added in hugetlb_reserve_pages
2074 * no longer applies.
2075 */
2076 long rsv_adjust;
2077
2078 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2079 hugetlb_acct_memory(h, -rsv_adjust);
2080 }
2081 return page;
2082
2083out_uncharge_cgroup:
2084 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2085out_subpool_put:
2086 if (map_chg || avoid_reserve)
2087 hugepage_subpool_put_pages(spool, 1);
2088 vma_end_reservation(h, vma, addr);
2089 return ERR_PTR(-ENOSPC);
2090}
2091
2092/*
2093 * alloc_huge_page()'s wrapper which simply returns the page if allocation
2094 * succeeds, otherwise NULL. This function is called from new_vma_page(),
2095 * where no ERR_VALUE is expected to be returned.
2096 */
2097struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2098 unsigned long addr, int avoid_reserve)
2099{
2100 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2101 if (IS_ERR(page))
2102 page = NULL;
2103 return page;
2104}
2105
2106int __weak alloc_bootmem_huge_page(struct hstate *h)
2107{
2108 struct huge_bootmem_page *m;
2109 int nr_nodes, node;
2110
2111 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2112 void *addr;
2113
2114 addr = memblock_virt_alloc_try_nid_nopanic(
2115 huge_page_size(h), huge_page_size(h),
2116 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2117 if (addr) {
2118 /*
2119 * Use the beginning of the huge page to store the
2120 * huge_bootmem_page struct (until gather_bootmem
2121 * puts them into the mem_map).
2122 */
2123 m = addr;
2124 goto found;
2125 }
2126 }
2127 return 0;
2128
2129found:
2130 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2131 /* Put them into a private list first because mem_map is not up yet */
2132 list_add(&m->list, &huge_boot_pages);
2133 m->hstate = h;
2134 return 1;
2135}
2136
2137static void __init prep_compound_huge_page(struct page *page,
2138 unsigned int order)
2139{
2140 if (unlikely(order > (MAX_ORDER - 1)))
2141 prep_compound_gigantic_page(page, order);
2142 else
2143 prep_compound_page(page, order);
2144}
2145
2146/* Put bootmem huge pages into the standard lists after mem_map is up */
2147static void __init gather_bootmem_prealloc(void)
2148{
2149 struct huge_bootmem_page *m;
2150
2151 list_for_each_entry(m, &huge_boot_pages, list) {
2152 struct hstate *h = m->hstate;
2153 struct page *page;
2154
2155#ifdef CONFIG_HIGHMEM
2156 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2157 memblock_free_late(__pa(m),
2158 sizeof(struct huge_bootmem_page));
2159#else
2160 page = virt_to_page(m);
2161#endif
2162 WARN_ON(page_count(page) != 1);
2163 prep_compound_huge_page(page, h->order);
2164 WARN_ON(PageReserved(page));
2165 prep_new_huge_page(h, page, page_to_nid(page));
2166 /*
2167 * If we had gigantic hugepages allocated at boot time, we need
2168 * to restore the 'stolen' pages to totalram_pages in order to
2169 * fix confusing memory reports from free(1) and another
2170 * side-effects, like CommitLimit going negative.
2171 */
2172 if (hstate_is_gigantic(h))
2173 adjust_managed_page_count(page, 1 << h->order);
2174 }
2175}
2176
2177static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2178{
2179 unsigned long i;
2180
2181 for (i = 0; i < h->max_huge_pages; ++i) {
2182 if (hstate_is_gigantic(h)) {
2183 if (!alloc_bootmem_huge_page(h))
2184 break;
2185 } else if (!alloc_fresh_huge_page(h,
2186 &node_states[N_MEMORY]))
2187 break;
2188 }
2189 h->max_huge_pages = i;
2190}
2191
2192static void __init hugetlb_init_hstates(void)
2193{
2194 struct hstate *h;
2195
2196 for_each_hstate(h) {
2197 if (minimum_order > huge_page_order(h))
2198 minimum_order = huge_page_order(h);
2199
2200 /* oversize hugepages were init'ed in early boot */
2201 if (!hstate_is_gigantic(h))
2202 hugetlb_hstate_alloc_pages(h);
2203 }
2204 VM_BUG_ON(minimum_order == UINT_MAX);
2205}
2206
2207static char * __init memfmt(char *buf, unsigned long n)
2208{
2209 if (n >= (1UL << 30))
2210 sprintf(buf, "%lu GB", n >> 30);
2211 else if (n >= (1UL << 20))
2212 sprintf(buf, "%lu MB", n >> 20);
2213 else
2214 sprintf(buf, "%lu KB", n >> 10);
2215 return buf;
2216}
2217
2218static void __init report_hugepages(void)
2219{
2220 struct hstate *h;
2221
2222 for_each_hstate(h) {
2223 char buf[32];
2224 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2225 memfmt(buf, huge_page_size(h)),
2226 h->free_huge_pages);
2227 }
2228}
2229
2230#ifdef CONFIG_HIGHMEM
2231static void try_to_free_low(struct hstate *h, unsigned long count,
2232 nodemask_t *nodes_allowed)
2233{
2234 int i;
2235
2236 if (hstate_is_gigantic(h))
2237 return;
2238
2239 for_each_node_mask(i, *nodes_allowed) {
2240 struct page *page, *next;
2241 struct list_head *freel = &h->hugepage_freelists[i];
2242 list_for_each_entry_safe(page, next, freel, lru) {
2243 if (count >= h->nr_huge_pages)
2244 return;
2245 if (PageHighMem(page))
2246 continue;
2247 list_del(&page->lru);
2248 update_and_free_page(h, page);
2249 h->free_huge_pages--;
2250 h->free_huge_pages_node[page_to_nid(page)]--;
2251 }
2252 }
2253}
2254#else
2255static inline void try_to_free_low(struct hstate *h, unsigned long count,
2256 nodemask_t *nodes_allowed)
2257{
2258}
2259#endif
2260
2261/*
2262 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2263 * balanced by operating on them in a round-robin fashion.
2264 * Returns 1 if an adjustment was made.
2265 */
2266static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2267 int delta)
2268{
2269 int nr_nodes, node;
2270
2271 VM_BUG_ON(delta != -1 && delta != 1);
2272
2273 if (delta < 0) {
2274 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2275 if (h->surplus_huge_pages_node[node])
2276 goto found;
2277 }
2278 } else {
2279 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2280 if (h->surplus_huge_pages_node[node] <
2281 h->nr_huge_pages_node[node])
2282 goto found;
2283 }
2284 }
2285 return 0;
2286
2287found:
2288 h->surplus_huge_pages += delta;
2289 h->surplus_huge_pages_node[node] += delta;
2290 return 1;
2291}
2292
2293#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2294static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2295 nodemask_t *nodes_allowed)
2296{
2297 unsigned long min_count, ret;
2298
2299 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2300 return h->max_huge_pages;
2301
2302 /*
2303 * Increase the pool size
2304 * First take pages out of surplus state. Then make up the
2305 * remaining difference by allocating fresh huge pages.
2306 *
2307 * We might race with __alloc_buddy_huge_page() here and be unable
2308 * to convert a surplus huge page to a normal huge page. That is
2309 * not critical, though, it just means the overall size of the
2310 * pool might be one hugepage larger than it needs to be, but
2311 * within all the constraints specified by the sysctls.
2312 */
2313 spin_lock(&hugetlb_lock);
2314 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2315 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2316 break;
2317 }
2318
2319 while (count > persistent_huge_pages(h)) {
2320 /*
2321 * If this allocation races such that we no longer need the
2322 * page, free_huge_page will handle it by freeing the page
2323 * and reducing the surplus.
2324 */
2325 spin_unlock(&hugetlb_lock);
2326
2327 /* yield cpu to avoid soft lockup */
2328 cond_resched();
2329
2330 if (hstate_is_gigantic(h))
2331 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2332 else
2333 ret = alloc_fresh_huge_page(h, nodes_allowed);
2334 spin_lock(&hugetlb_lock);
2335 if (!ret)
2336 goto out;
2337
2338 /* Bail for signals. Probably ctrl-c from user */
2339 if (signal_pending(current))
2340 goto out;
2341 }
2342
2343 /*
2344 * Decrease the pool size
2345 * First return free pages to the buddy allocator (being careful
2346 * to keep enough around to satisfy reservations). Then place
2347 * pages into surplus state as needed so the pool will shrink
2348 * to the desired size as pages become free.
2349 *
2350 * By placing pages into the surplus state independent of the
2351 * overcommit value, we are allowing the surplus pool size to
2352 * exceed overcommit. There are few sane options here. Since
2353 * __alloc_buddy_huge_page() is checking the global counter,
2354 * though, we'll note that we're not allowed to exceed surplus
2355 * and won't grow the pool anywhere else. Not until one of the
2356 * sysctls are changed, or the surplus pages go out of use.
2357 */
2358 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2359 min_count = max(count, min_count);
2360 try_to_free_low(h, min_count, nodes_allowed);
2361 while (min_count < persistent_huge_pages(h)) {
2362 if (!free_pool_huge_page(h, nodes_allowed, 0))
2363 break;
2364 cond_resched_lock(&hugetlb_lock);
2365 }
2366 while (count < persistent_huge_pages(h)) {
2367 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2368 break;
2369 }
2370out:
2371 ret = persistent_huge_pages(h);
2372 spin_unlock(&hugetlb_lock);
2373 return ret;
2374}
2375
2376#define HSTATE_ATTR_RO(_name) \
2377 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2378
2379#define HSTATE_ATTR(_name) \
2380 static struct kobj_attribute _name##_attr = \
2381 __ATTR(_name, 0644, _name##_show, _name##_store)
2382
2383static struct kobject *hugepages_kobj;
2384static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2385
2386static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2387
2388static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2389{
2390 int i;
2391
2392 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2393 if (hstate_kobjs[i] == kobj) {
2394 if (nidp)
2395 *nidp = NUMA_NO_NODE;
2396 return &hstates[i];
2397 }
2398
2399 return kobj_to_node_hstate(kobj, nidp);
2400}
2401
2402static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2403 struct kobj_attribute *attr, char *buf)
2404{
2405 struct hstate *h;
2406 unsigned long nr_huge_pages;
2407 int nid;
2408
2409 h = kobj_to_hstate(kobj, &nid);
2410 if (nid == NUMA_NO_NODE)
2411 nr_huge_pages = h->nr_huge_pages;
2412 else
2413 nr_huge_pages = h->nr_huge_pages_node[nid];
2414
2415 return sprintf(buf, "%lu\n", nr_huge_pages);
2416}
2417
2418static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2419 struct hstate *h, int nid,
2420 unsigned long count, size_t len)
2421{
2422 int err;
2423 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2424
2425 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2426 err = -EINVAL;
2427 goto out;
2428 }
2429
2430 if (nid == NUMA_NO_NODE) {
2431 /*
2432 * global hstate attribute
2433 */
2434 if (!(obey_mempolicy &&
2435 init_nodemask_of_mempolicy(nodes_allowed))) {
2436 NODEMASK_FREE(nodes_allowed);
2437 nodes_allowed = &node_states[N_MEMORY];
2438 }
2439 } else if (nodes_allowed) {
2440 /*
2441 * per node hstate attribute: adjust count to global,
2442 * but restrict alloc/free to the specified node.
2443 */
2444 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2445 init_nodemask_of_node(nodes_allowed, nid);
2446 } else
2447 nodes_allowed = &node_states[N_MEMORY];
2448
2449 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2450
2451 if (nodes_allowed != &node_states[N_MEMORY])
2452 NODEMASK_FREE(nodes_allowed);
2453
2454 return len;
2455out:
2456 NODEMASK_FREE(nodes_allowed);
2457 return err;
2458}
2459
2460static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2461 struct kobject *kobj, const char *buf,
2462 size_t len)
2463{
2464 struct hstate *h;
2465 unsigned long count;
2466 int nid;
2467 int err;
2468
2469 err = kstrtoul(buf, 10, &count);
2470 if (err)
2471 return err;
2472
2473 h = kobj_to_hstate(kobj, &nid);
2474 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2475}
2476
2477static ssize_t nr_hugepages_show(struct kobject *kobj,
2478 struct kobj_attribute *attr, char *buf)
2479{
2480 return nr_hugepages_show_common(kobj, attr, buf);
2481}
2482
2483static ssize_t nr_hugepages_store(struct kobject *kobj,
2484 struct kobj_attribute *attr, const char *buf, size_t len)
2485{
2486 return nr_hugepages_store_common(false, kobj, buf, len);
2487}
2488HSTATE_ATTR(nr_hugepages);
2489
2490#ifdef CONFIG_NUMA
2491
2492/*
2493 * hstate attribute for optionally mempolicy-based constraint on persistent
2494 * huge page alloc/free.
2495 */
2496static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2497 struct kobj_attribute *attr, char *buf)
2498{
2499 return nr_hugepages_show_common(kobj, attr, buf);
2500}
2501
2502static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2503 struct kobj_attribute *attr, const char *buf, size_t len)
2504{
2505 return nr_hugepages_store_common(true, kobj, buf, len);
2506}
2507HSTATE_ATTR(nr_hugepages_mempolicy);
2508#endif
2509
2510
2511static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2512 struct kobj_attribute *attr, char *buf)
2513{
2514 struct hstate *h = kobj_to_hstate(kobj, NULL);
2515 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2516}
2517
2518static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2519 struct kobj_attribute *attr, const char *buf, size_t count)
2520{
2521 int err;
2522 unsigned long input;
2523 struct hstate *h = kobj_to_hstate(kobj, NULL);
2524
2525 if (hstate_is_gigantic(h))
2526 return -EINVAL;
2527
2528 err = kstrtoul(buf, 10, &input);
2529 if (err)
2530 return err;
2531
2532 spin_lock(&hugetlb_lock);
2533 h->nr_overcommit_huge_pages = input;
2534 spin_unlock(&hugetlb_lock);
2535
2536 return count;
2537}
2538HSTATE_ATTR(nr_overcommit_hugepages);
2539
2540static ssize_t free_hugepages_show(struct kobject *kobj,
2541 struct kobj_attribute *attr, char *buf)
2542{
2543 struct hstate *h;
2544 unsigned long free_huge_pages;
2545 int nid;
2546
2547 h = kobj_to_hstate(kobj, &nid);
2548 if (nid == NUMA_NO_NODE)
2549 free_huge_pages = h->free_huge_pages;
2550 else
2551 free_huge_pages = h->free_huge_pages_node[nid];
2552
2553 return sprintf(buf, "%lu\n", free_huge_pages);
2554}
2555HSTATE_ATTR_RO(free_hugepages);
2556
2557static ssize_t resv_hugepages_show(struct kobject *kobj,
2558 struct kobj_attribute *attr, char *buf)
2559{
2560 struct hstate *h = kobj_to_hstate(kobj, NULL);
2561 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2562}
2563HSTATE_ATTR_RO(resv_hugepages);
2564
2565static ssize_t surplus_hugepages_show(struct kobject *kobj,
2566 struct kobj_attribute *attr, char *buf)
2567{
2568 struct hstate *h;
2569 unsigned long surplus_huge_pages;
2570 int nid;
2571
2572 h = kobj_to_hstate(kobj, &nid);
2573 if (nid == NUMA_NO_NODE)
2574 surplus_huge_pages = h->surplus_huge_pages;
2575 else
2576 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2577
2578 return sprintf(buf, "%lu\n", surplus_huge_pages);
2579}
2580HSTATE_ATTR_RO(surplus_hugepages);
2581
2582static struct attribute *hstate_attrs[] = {
2583 &nr_hugepages_attr.attr,
2584 &nr_overcommit_hugepages_attr.attr,
2585 &free_hugepages_attr.attr,
2586 &resv_hugepages_attr.attr,
2587 &surplus_hugepages_attr.attr,
2588#ifdef CONFIG_NUMA
2589 &nr_hugepages_mempolicy_attr.attr,
2590#endif
2591 NULL,
2592};
2593
2594static struct attribute_group hstate_attr_group = {
2595 .attrs = hstate_attrs,
2596};
2597
2598static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2599 struct kobject **hstate_kobjs,
2600 struct attribute_group *hstate_attr_group)
2601{
2602 int retval;
2603 int hi = hstate_index(h);
2604
2605 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2606 if (!hstate_kobjs[hi])
2607 return -ENOMEM;
2608
2609 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2610 if (retval)
2611 kobject_put(hstate_kobjs[hi]);
2612
2613 return retval;
2614}
2615
2616static void __init hugetlb_sysfs_init(void)
2617{
2618 struct hstate *h;
2619 int err;
2620
2621 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2622 if (!hugepages_kobj)
2623 return;
2624
2625 for_each_hstate(h) {
2626 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2627 hstate_kobjs, &hstate_attr_group);
2628 if (err)
2629 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2630 }
2631}
2632
2633#ifdef CONFIG_NUMA
2634
2635/*
2636 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2637 * with node devices in node_devices[] using a parallel array. The array
2638 * index of a node device or _hstate == node id.
2639 * This is here to avoid any static dependency of the node device driver, in
2640 * the base kernel, on the hugetlb module.
2641 */
2642struct node_hstate {
2643 struct kobject *hugepages_kobj;
2644 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2645};
2646static struct node_hstate node_hstates[MAX_NUMNODES];
2647
2648/*
2649 * A subset of global hstate attributes for node devices
2650 */
2651static struct attribute *per_node_hstate_attrs[] = {
2652 &nr_hugepages_attr.attr,
2653 &free_hugepages_attr.attr,
2654 &surplus_hugepages_attr.attr,
2655 NULL,
2656};
2657
2658static struct attribute_group per_node_hstate_attr_group = {
2659 .attrs = per_node_hstate_attrs,
2660};
2661
2662/*
2663 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2664 * Returns node id via non-NULL nidp.
2665 */
2666static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2667{
2668 int nid;
2669
2670 for (nid = 0; nid < nr_node_ids; nid++) {
2671 struct node_hstate *nhs = &node_hstates[nid];
2672 int i;
2673 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2674 if (nhs->hstate_kobjs[i] == kobj) {
2675 if (nidp)
2676 *nidp = nid;
2677 return &hstates[i];
2678 }
2679 }
2680
2681 BUG();
2682 return NULL;
2683}
2684
2685/*
2686 * Unregister hstate attributes from a single node device.
2687 * No-op if no hstate attributes attached.
2688 */
2689static void hugetlb_unregister_node(struct node *node)
2690{
2691 struct hstate *h;
2692 struct node_hstate *nhs = &node_hstates[node->dev.id];
2693
2694 if (!nhs->hugepages_kobj)
2695 return; /* no hstate attributes */
2696
2697 for_each_hstate(h) {
2698 int idx = hstate_index(h);
2699 if (nhs->hstate_kobjs[idx]) {
2700 kobject_put(nhs->hstate_kobjs[idx]);
2701 nhs->hstate_kobjs[idx] = NULL;
2702 }
2703 }
2704
2705 kobject_put(nhs->hugepages_kobj);
2706 nhs->hugepages_kobj = NULL;
2707}
2708
2709
2710/*
2711 * Register hstate attributes for a single node device.
2712 * No-op if attributes already registered.
2713 */
2714static void hugetlb_register_node(struct node *node)
2715{
2716 struct hstate *h;
2717 struct node_hstate *nhs = &node_hstates[node->dev.id];
2718 int err;
2719
2720 if (nhs->hugepages_kobj)
2721 return; /* already allocated */
2722
2723 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2724 &node->dev.kobj);
2725 if (!nhs->hugepages_kobj)
2726 return;
2727
2728 for_each_hstate(h) {
2729 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2730 nhs->hstate_kobjs,
2731 &per_node_hstate_attr_group);
2732 if (err) {
2733 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2734 h->name, node->dev.id);
2735 hugetlb_unregister_node(node);
2736 break;
2737 }
2738 }
2739}
2740
2741/*
2742 * hugetlb init time: register hstate attributes for all registered node
2743 * devices of nodes that have memory. All on-line nodes should have
2744 * registered their associated device by this time.
2745 */
2746static void __init hugetlb_register_all_nodes(void)
2747{
2748 int nid;
2749
2750 for_each_node_state(nid, N_MEMORY) {
2751 struct node *node = node_devices[nid];
2752 if (node->dev.id == nid)
2753 hugetlb_register_node(node);
2754 }
2755
2756 /*
2757 * Let the node device driver know we're here so it can
2758 * [un]register hstate attributes on node hotplug.
2759 */
2760 register_hugetlbfs_with_node(hugetlb_register_node,
2761 hugetlb_unregister_node);
2762}
2763#else /* !CONFIG_NUMA */
2764
2765static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2766{
2767 BUG();
2768 if (nidp)
2769 *nidp = -1;
2770 return NULL;
2771}
2772
2773static void hugetlb_register_all_nodes(void) { }
2774
2775#endif
2776
2777static int __init hugetlb_init(void)
2778{
2779 int i;
2780
2781 if (!hugepages_supported())
2782 return 0;
2783
2784 if (!size_to_hstate(default_hstate_size)) {
2785 default_hstate_size = HPAGE_SIZE;
2786 if (!size_to_hstate(default_hstate_size))
2787 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2788 }
2789 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2790 if (default_hstate_max_huge_pages) {
2791 if (!default_hstate.max_huge_pages)
2792 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2793 }
2794
2795 hugetlb_init_hstates();
2796 gather_bootmem_prealloc();
2797 report_hugepages();
2798
2799 hugetlb_sysfs_init();
2800 hugetlb_register_all_nodes();
2801 hugetlb_cgroup_file_init();
2802
2803#ifdef CONFIG_SMP
2804 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2805#else
2806 num_fault_mutexes = 1;
2807#endif
2808 hugetlb_fault_mutex_table =
2809 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2810 BUG_ON(!hugetlb_fault_mutex_table);
2811
2812 for (i = 0; i < num_fault_mutexes; i++)
2813 mutex_init(&hugetlb_fault_mutex_table[i]);
2814 return 0;
2815}
2816subsys_initcall(hugetlb_init);
2817
2818/* Should be called on processing a hugepagesz=... option */
2819void __init hugetlb_bad_size(void)
2820{
2821 parsed_valid_hugepagesz = false;
2822}
2823
2824void __init hugetlb_add_hstate(unsigned int order)
2825{
2826 struct hstate *h;
2827 unsigned long i;
2828
2829 if (size_to_hstate(PAGE_SIZE << order)) {
2830 pr_warn("hugepagesz= specified twice, ignoring\n");
2831 return;
2832 }
2833 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2834 BUG_ON(order == 0);
2835 h = &hstates[hugetlb_max_hstate++];
2836 h->order = order;
2837 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2838 h->nr_huge_pages = 0;
2839 h->free_huge_pages = 0;
2840 for (i = 0; i < MAX_NUMNODES; ++i)
2841 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2842 INIT_LIST_HEAD(&h->hugepage_activelist);
2843 h->next_nid_to_alloc = first_memory_node;
2844 h->next_nid_to_free = first_memory_node;
2845 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2846 huge_page_size(h)/1024);
2847
2848 parsed_hstate = h;
2849}
2850
2851static int __init hugetlb_nrpages_setup(char *s)
2852{
2853 unsigned long *mhp;
2854 static unsigned long *last_mhp;
2855
2856 if (!parsed_valid_hugepagesz) {
2857 pr_warn("hugepages = %s preceded by "
2858 "an unsupported hugepagesz, ignoring\n", s);
2859 parsed_valid_hugepagesz = true;
2860 return 1;
2861 }
2862 /*
2863 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2864 * so this hugepages= parameter goes to the "default hstate".
2865 */
2866 else if (!hugetlb_max_hstate)
2867 mhp = &default_hstate_max_huge_pages;
2868 else
2869 mhp = &parsed_hstate->max_huge_pages;
2870
2871 if (mhp == last_mhp) {
2872 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2873 return 1;
2874 }
2875
2876 if (sscanf(s, "%lu", mhp) <= 0)
2877 *mhp = 0;
2878
2879 /*
2880 * Global state is always initialized later in hugetlb_init.
2881 * But we need to allocate >= MAX_ORDER hstates here early to still
2882 * use the bootmem allocator.
2883 */
2884 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2885 hugetlb_hstate_alloc_pages(parsed_hstate);
2886
2887 last_mhp = mhp;
2888
2889 return 1;
2890}
2891__setup("hugepages=", hugetlb_nrpages_setup);
2892
2893static int __init hugetlb_default_setup(char *s)
2894{
2895 default_hstate_size = memparse(s, &s);
2896 return 1;
2897}
2898__setup("default_hugepagesz=", hugetlb_default_setup);
2899
2900static unsigned int cpuset_mems_nr(unsigned int *array)
2901{
2902 int node;
2903 unsigned int nr = 0;
2904
2905 for_each_node_mask(node, cpuset_current_mems_allowed)
2906 nr += array[node];
2907
2908 return nr;
2909}
2910
2911#ifdef CONFIG_SYSCTL
2912static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2913 struct ctl_table *table, int write,
2914 void __user *buffer, size_t *length, loff_t *ppos)
2915{
2916 struct hstate *h = &default_hstate;
2917 unsigned long tmp = h->max_huge_pages;
2918 int ret;
2919
2920 if (!hugepages_supported())
2921 return -EOPNOTSUPP;
2922
2923 table->data = &tmp;
2924 table->maxlen = sizeof(unsigned long);
2925 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2926 if (ret)
2927 goto out;
2928
2929 if (write)
2930 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2931 NUMA_NO_NODE, tmp, *length);
2932out:
2933 return ret;
2934}
2935
2936int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2937 void __user *buffer, size_t *length, loff_t *ppos)
2938{
2939
2940 return hugetlb_sysctl_handler_common(false, table, write,
2941 buffer, length, ppos);
2942}
2943
2944#ifdef CONFIG_NUMA
2945int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2946 void __user *buffer, size_t *length, loff_t *ppos)
2947{
2948 return hugetlb_sysctl_handler_common(true, table, write,
2949 buffer, length, ppos);
2950}
2951#endif /* CONFIG_NUMA */
2952
2953int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2954 void __user *buffer,
2955 size_t *length, loff_t *ppos)
2956{
2957 struct hstate *h = &default_hstate;
2958 unsigned long tmp;
2959 int ret;
2960
2961 if (!hugepages_supported())
2962 return -EOPNOTSUPP;
2963
2964 tmp = h->nr_overcommit_huge_pages;
2965
2966 if (write && hstate_is_gigantic(h))
2967 return -EINVAL;
2968
2969 table->data = &tmp;
2970 table->maxlen = sizeof(unsigned long);
2971 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2972 if (ret)
2973 goto out;
2974
2975 if (write) {
2976 spin_lock(&hugetlb_lock);
2977 h->nr_overcommit_huge_pages = tmp;
2978 spin_unlock(&hugetlb_lock);
2979 }
2980out:
2981 return ret;
2982}
2983
2984#endif /* CONFIG_SYSCTL */
2985
2986void hugetlb_report_meminfo(struct seq_file *m)
2987{
2988 struct hstate *h = &default_hstate;
2989 if (!hugepages_supported())
2990 return;
2991 seq_printf(m,
2992 "HugePages_Total: %5lu\n"
2993 "HugePages_Free: %5lu\n"
2994 "HugePages_Rsvd: %5lu\n"
2995 "HugePages_Surp: %5lu\n"
2996 "Hugepagesize: %8lu kB\n",
2997 h->nr_huge_pages,
2998 h->free_huge_pages,
2999 h->resv_huge_pages,
3000 h->surplus_huge_pages,
3001 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3002}
3003
3004int hugetlb_report_node_meminfo(int nid, char *buf)
3005{
3006 struct hstate *h = &default_hstate;
3007 if (!hugepages_supported())
3008 return 0;
3009 return sprintf(buf,
3010 "Node %d HugePages_Total: %5u\n"
3011 "Node %d HugePages_Free: %5u\n"
3012 "Node %d HugePages_Surp: %5u\n",
3013 nid, h->nr_huge_pages_node[nid],
3014 nid, h->free_huge_pages_node[nid],
3015 nid, h->surplus_huge_pages_node[nid]);
3016}
3017
3018void hugetlb_show_meminfo(void)
3019{
3020 struct hstate *h;
3021 int nid;
3022
3023 if (!hugepages_supported())
3024 return;
3025
3026 for_each_node_state(nid, N_MEMORY)
3027 for_each_hstate(h)
3028 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3029 nid,
3030 h->nr_huge_pages_node[nid],
3031 h->free_huge_pages_node[nid],
3032 h->surplus_huge_pages_node[nid],
3033 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3034}
3035
3036void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3037{
3038 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3039 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3040}
3041
3042/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3043unsigned long hugetlb_total_pages(void)
3044{
3045 struct hstate *h;
3046 unsigned long nr_total_pages = 0;
3047
3048 for_each_hstate(h)
3049 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3050 return nr_total_pages;
3051}
3052
3053static int hugetlb_acct_memory(struct hstate *h, long delta)
3054{
3055 int ret = -ENOMEM;
3056
3057 spin_lock(&hugetlb_lock);
3058 /*
3059 * When cpuset is configured, it breaks the strict hugetlb page
3060 * reservation as the accounting is done on a global variable. Such
3061 * reservation is completely rubbish in the presence of cpuset because
3062 * the reservation is not checked against page availability for the
3063 * current cpuset. Application can still potentially OOM'ed by kernel
3064 * with lack of free htlb page in cpuset that the task is in.
3065 * Attempt to enforce strict accounting with cpuset is almost
3066 * impossible (or too ugly) because cpuset is too fluid that
3067 * task or memory node can be dynamically moved between cpusets.
3068 *
3069 * The change of semantics for shared hugetlb mapping with cpuset is
3070 * undesirable. However, in order to preserve some of the semantics,
3071 * we fall back to check against current free page availability as
3072 * a best attempt and hopefully to minimize the impact of changing
3073 * semantics that cpuset has.
3074 */
3075 if (delta > 0) {
3076 if (gather_surplus_pages(h, delta) < 0)
3077 goto out;
3078
3079 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3080 return_unused_surplus_pages(h, delta);
3081 goto out;
3082 }
3083 }
3084
3085 ret = 0;
3086 if (delta < 0)
3087 return_unused_surplus_pages(h, (unsigned long) -delta);
3088
3089out:
3090 spin_unlock(&hugetlb_lock);
3091 return ret;
3092}
3093
3094static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3095{
3096 struct resv_map *resv = vma_resv_map(vma);
3097
3098 /*
3099 * This new VMA should share its siblings reservation map if present.
3100 * The VMA will only ever have a valid reservation map pointer where
3101 * it is being copied for another still existing VMA. As that VMA
3102 * has a reference to the reservation map it cannot disappear until
3103 * after this open call completes. It is therefore safe to take a
3104 * new reference here without additional locking.
3105 */
3106 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3107 kref_get(&resv->refs);
3108}
3109
3110static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3111{
3112 struct hstate *h = hstate_vma(vma);
3113 struct resv_map *resv = vma_resv_map(vma);
3114 struct hugepage_subpool *spool = subpool_vma(vma);
3115 unsigned long reserve, start, end;
3116 long gbl_reserve;
3117
3118 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3119 return;
3120
3121 start = vma_hugecache_offset(h, vma, vma->vm_start);
3122 end = vma_hugecache_offset(h, vma, vma->vm_end);
3123
3124 reserve = (end - start) - region_count(resv, start, end);
3125
3126 kref_put(&resv->refs, resv_map_release);
3127
3128 if (reserve) {
3129 /*
3130 * Decrement reserve counts. The global reserve count may be
3131 * adjusted if the subpool has a minimum size.
3132 */
3133 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3134 hugetlb_acct_memory(h, -gbl_reserve);
3135 }
3136}
3137
3138/*
3139 * We cannot handle pagefaults against hugetlb pages at all. They cause
3140 * handle_mm_fault() to try to instantiate regular-sized pages in the
3141 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3142 * this far.
3143 */
3144static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3145{
3146 BUG();
3147 return 0;
3148}
3149
3150const struct vm_operations_struct hugetlb_vm_ops = {
3151 .fault = hugetlb_vm_op_fault,
3152 .open = hugetlb_vm_op_open,
3153 .close = hugetlb_vm_op_close,
3154};
3155
3156static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3157 int writable)
3158{
3159 pte_t entry;
3160
3161 if (writable) {
3162 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3163 vma->vm_page_prot)));
3164 } else {
3165 entry = huge_pte_wrprotect(mk_huge_pte(page,
3166 vma->vm_page_prot));
3167 }
3168 entry = pte_mkyoung(entry);
3169 entry = pte_mkhuge(entry);
3170 entry = arch_make_huge_pte(entry, vma, page, writable);
3171
3172 return entry;
3173}
3174
3175static void set_huge_ptep_writable(struct vm_area_struct *vma,
3176 unsigned long address, pte_t *ptep)
3177{
3178 pte_t entry;
3179
3180 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3181 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3182 update_mmu_cache(vma, address, ptep);
3183}
3184
3185static int is_hugetlb_entry_migration(pte_t pte)
3186{
3187 swp_entry_t swp;
3188
3189 if (huge_pte_none(pte) || pte_present(pte))
3190 return 0;
3191 swp = pte_to_swp_entry(pte);
3192 if (non_swap_entry(swp) && is_migration_entry(swp))
3193 return 1;
3194 else
3195 return 0;
3196}
3197
3198static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3199{
3200 swp_entry_t swp;
3201
3202 if (huge_pte_none(pte) || pte_present(pte))
3203 return 0;
3204 swp = pte_to_swp_entry(pte);
3205 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3206 return 1;
3207 else
3208 return 0;
3209}
3210
3211int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3212 struct vm_area_struct *vma)
3213{
3214 pte_t *src_pte, *dst_pte, entry;
3215 struct page *ptepage;
3216 unsigned long addr;
3217 int cow;
3218 struct hstate *h = hstate_vma(vma);
3219 unsigned long sz = huge_page_size(h);
3220 unsigned long mmun_start; /* For mmu_notifiers */
3221 unsigned long mmun_end; /* For mmu_notifiers */
3222 int ret = 0;
3223
3224 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3225
3226 mmun_start = vma->vm_start;
3227 mmun_end = vma->vm_end;
3228 if (cow)
3229 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3230
3231 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3232 spinlock_t *src_ptl, *dst_ptl;
3233 src_pte = huge_pte_offset(src, addr);
3234 if (!src_pte)
3235 continue;
3236 dst_pte = huge_pte_alloc(dst, addr, sz);
3237 if (!dst_pte) {
3238 ret = -ENOMEM;
3239 break;
3240 }
3241
3242 /* If the pagetables are shared don't copy or take references */
3243 if (dst_pte == src_pte)
3244 continue;
3245
3246 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3247 src_ptl = huge_pte_lockptr(h, src, src_pte);
3248 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3249 entry = huge_ptep_get(src_pte);
3250 if (huge_pte_none(entry)) { /* skip none entry */
3251 ;
3252 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3253 is_hugetlb_entry_hwpoisoned(entry))) {
3254 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3255
3256 if (is_write_migration_entry(swp_entry) && cow) {
3257 /*
3258 * COW mappings require pages in both
3259 * parent and child to be set to read.
3260 */
3261 make_migration_entry_read(&swp_entry);
3262 entry = swp_entry_to_pte(swp_entry);
3263 set_huge_pte_at(src, addr, src_pte, entry);
3264 }
3265 set_huge_pte_at(dst, addr, dst_pte, entry);
3266 } else {
3267 if (cow) {
3268 huge_ptep_set_wrprotect(src, addr, src_pte);
3269 mmu_notifier_invalidate_range(src, mmun_start,
3270 mmun_end);
3271 }
3272 entry = huge_ptep_get(src_pte);
3273 ptepage = pte_page(entry);
3274 get_page(ptepage);
3275 page_dup_rmap(ptepage, true);
3276 set_huge_pte_at(dst, addr, dst_pte, entry);
3277 hugetlb_count_add(pages_per_huge_page(h), dst);
3278 }
3279 spin_unlock(src_ptl);
3280 spin_unlock(dst_ptl);
3281 }
3282
3283 if (cow)
3284 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3285
3286 return ret;
3287}
3288
3289void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3290 unsigned long start, unsigned long end,
3291 struct page *ref_page)
3292{
3293 struct mm_struct *mm = vma->vm_mm;
3294 unsigned long address;
3295 pte_t *ptep;
3296 pte_t pte;
3297 spinlock_t *ptl;
3298 struct page *page;
3299 struct hstate *h = hstate_vma(vma);
3300 unsigned long sz = huge_page_size(h);
3301 const unsigned long mmun_start = start; /* For mmu_notifiers */
3302 const unsigned long mmun_end = end; /* For mmu_notifiers */
3303
3304 WARN_ON(!is_vm_hugetlb_page(vma));
3305 BUG_ON(start & ~huge_page_mask(h));
3306 BUG_ON(end & ~huge_page_mask(h));
3307
3308 /*
3309 * This is a hugetlb vma, all the pte entries should point
3310 * to huge page.
3311 */
3312 tlb_remove_check_page_size_change(tlb, sz);
3313 tlb_start_vma(tlb, vma);
3314 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3315 address = start;
3316 for (; address < end; address += sz) {
3317 ptep = huge_pte_offset(mm, address);
3318 if (!ptep)
3319 continue;
3320
3321 ptl = huge_pte_lock(h, mm, ptep);
3322 if (huge_pmd_unshare(mm, &address, ptep)) {
3323 spin_unlock(ptl);
3324 continue;
3325 }
3326
3327 pte = huge_ptep_get(ptep);
3328 if (huge_pte_none(pte)) {
3329 spin_unlock(ptl);
3330 continue;
3331 }
3332
3333 /*
3334 * Migrating hugepage or HWPoisoned hugepage is already
3335 * unmapped and its refcount is dropped, so just clear pte here.
3336 */
3337 if (unlikely(!pte_present(pte))) {
3338 huge_pte_clear(mm, address, ptep);
3339 spin_unlock(ptl);
3340 continue;
3341 }
3342
3343 page = pte_page(pte);
3344 /*
3345 * If a reference page is supplied, it is because a specific
3346 * page is being unmapped, not a range. Ensure the page we
3347 * are about to unmap is the actual page of interest.
3348 */
3349 if (ref_page) {
3350 if (page != ref_page) {
3351 spin_unlock(ptl);
3352 continue;
3353 }
3354 /*
3355 * Mark the VMA as having unmapped its page so that
3356 * future faults in this VMA will fail rather than
3357 * looking like data was lost
3358 */
3359 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3360 }
3361
3362 pte = huge_ptep_get_and_clear(mm, address, ptep);
3363 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3364 if (huge_pte_dirty(pte))
3365 set_page_dirty(page);
3366
3367 hugetlb_count_sub(pages_per_huge_page(h), mm);
3368 page_remove_rmap(page, true);
3369
3370 spin_unlock(ptl);
3371 tlb_remove_page_size(tlb, page, huge_page_size(h));
3372 /*
3373 * Bail out after unmapping reference page if supplied
3374 */
3375 if (ref_page)
3376 break;
3377 }
3378 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3379 tlb_end_vma(tlb, vma);
3380}
3381
3382void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3383 struct vm_area_struct *vma, unsigned long start,
3384 unsigned long end, struct page *ref_page)
3385{
3386 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3387
3388 /*
3389 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3390 * test will fail on a vma being torn down, and not grab a page table
3391 * on its way out. We're lucky that the flag has such an appropriate
3392 * name, and can in fact be safely cleared here. We could clear it
3393 * before the __unmap_hugepage_range above, but all that's necessary
3394 * is to clear it before releasing the i_mmap_rwsem. This works
3395 * because in the context this is called, the VMA is about to be
3396 * destroyed and the i_mmap_rwsem is held.
3397 */
3398 vma->vm_flags &= ~VM_MAYSHARE;
3399}
3400
3401void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3402 unsigned long end, struct page *ref_page)
3403{
3404 struct mm_struct *mm;
3405 struct mmu_gather tlb;
3406
3407 mm = vma->vm_mm;
3408
3409 tlb_gather_mmu(&tlb, mm, start, end);
3410 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3411 tlb_finish_mmu(&tlb, start, end);
3412}
3413
3414/*
3415 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3416 * mappping it owns the reserve page for. The intention is to unmap the page
3417 * from other VMAs and let the children be SIGKILLed if they are faulting the
3418 * same region.
3419 */
3420static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3421 struct page *page, unsigned long address)
3422{
3423 struct hstate *h = hstate_vma(vma);
3424 struct vm_area_struct *iter_vma;
3425 struct address_space *mapping;
3426 pgoff_t pgoff;
3427
3428 /*
3429 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3430 * from page cache lookup which is in HPAGE_SIZE units.
3431 */
3432 address = address & huge_page_mask(h);
3433 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3434 vma->vm_pgoff;
3435 mapping = vma->vm_file->f_mapping;
3436
3437 /*
3438 * Take the mapping lock for the duration of the table walk. As
3439 * this mapping should be shared between all the VMAs,
3440 * __unmap_hugepage_range() is called as the lock is already held
3441 */
3442 i_mmap_lock_write(mapping);
3443 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3444 /* Do not unmap the current VMA */
3445 if (iter_vma == vma)
3446 continue;
3447
3448 /*
3449 * Shared VMAs have their own reserves and do not affect
3450 * MAP_PRIVATE accounting but it is possible that a shared
3451 * VMA is using the same page so check and skip such VMAs.
3452 */
3453 if (iter_vma->vm_flags & VM_MAYSHARE)
3454 continue;
3455
3456 /*
3457 * Unmap the page from other VMAs without their own reserves.
3458 * They get marked to be SIGKILLed if they fault in these
3459 * areas. This is because a future no-page fault on this VMA
3460 * could insert a zeroed page instead of the data existing
3461 * from the time of fork. This would look like data corruption
3462 */
3463 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3464 unmap_hugepage_range(iter_vma, address,
3465 address + huge_page_size(h), page);
3466 }
3467 i_mmap_unlock_write(mapping);
3468}
3469
3470/*
3471 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3472 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3473 * cannot race with other handlers or page migration.
3474 * Keep the pte_same checks anyway to make transition from the mutex easier.
3475 */
3476static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3477 unsigned long address, pte_t *ptep,
3478 struct page *pagecache_page, spinlock_t *ptl)
3479{
3480 pte_t pte;
3481 struct hstate *h = hstate_vma(vma);
3482 struct page *old_page, *new_page;
3483 int ret = 0, outside_reserve = 0;
3484 unsigned long mmun_start; /* For mmu_notifiers */
3485 unsigned long mmun_end; /* For mmu_notifiers */
3486
3487 pte = huge_ptep_get(ptep);
3488 old_page = pte_page(pte);
3489
3490retry_avoidcopy:
3491 /* If no-one else is actually using this page, avoid the copy
3492 * and just make the page writable */
3493 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3494 page_move_anon_rmap(old_page, vma);
3495 set_huge_ptep_writable(vma, address, ptep);
3496 return 0;
3497 }
3498
3499 /*
3500 * If the process that created a MAP_PRIVATE mapping is about to
3501 * perform a COW due to a shared page count, attempt to satisfy
3502 * the allocation without using the existing reserves. The pagecache
3503 * page is used to determine if the reserve at this address was
3504 * consumed or not. If reserves were used, a partial faulted mapping
3505 * at the time of fork() could consume its reserves on COW instead
3506 * of the full address range.
3507 */
3508 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3509 old_page != pagecache_page)
3510 outside_reserve = 1;
3511
3512 get_page(old_page);
3513
3514 /*
3515 * Drop page table lock as buddy allocator may be called. It will
3516 * be acquired again before returning to the caller, as expected.
3517 */
3518 spin_unlock(ptl);
3519 new_page = alloc_huge_page(vma, address, outside_reserve);
3520
3521 if (IS_ERR(new_page)) {
3522 /*
3523 * If a process owning a MAP_PRIVATE mapping fails to COW,
3524 * it is due to references held by a child and an insufficient
3525 * huge page pool. To guarantee the original mappers
3526 * reliability, unmap the page from child processes. The child
3527 * may get SIGKILLed if it later faults.
3528 */
3529 if (outside_reserve) {
3530 put_page(old_page);
3531 BUG_ON(huge_pte_none(pte));
3532 unmap_ref_private(mm, vma, old_page, address);
3533 BUG_ON(huge_pte_none(pte));
3534 spin_lock(ptl);
3535 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3536 if (likely(ptep &&
3537 pte_same(huge_ptep_get(ptep), pte)))
3538 goto retry_avoidcopy;
3539 /*
3540 * race occurs while re-acquiring page table
3541 * lock, and our job is done.
3542 */
3543 return 0;
3544 }
3545
3546 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3547 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3548 goto out_release_old;
3549 }
3550
3551 /*
3552 * When the original hugepage is shared one, it does not have
3553 * anon_vma prepared.
3554 */
3555 if (unlikely(anon_vma_prepare(vma))) {
3556 ret = VM_FAULT_OOM;
3557 goto out_release_all;
3558 }
3559
3560 copy_user_huge_page(new_page, old_page, address, vma,
3561 pages_per_huge_page(h));
3562 __SetPageUptodate(new_page);
3563 set_page_huge_active(new_page);
3564
3565 mmun_start = address & huge_page_mask(h);
3566 mmun_end = mmun_start + huge_page_size(h);
3567 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3568
3569 /*
3570 * Retake the page table lock to check for racing updates
3571 * before the page tables are altered
3572 */
3573 spin_lock(ptl);
3574 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3575 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3576 ClearPagePrivate(new_page);
3577
3578 /* Break COW */
3579 huge_ptep_clear_flush(vma, address, ptep);
3580 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3581 set_huge_pte_at(mm, address, ptep,
3582 make_huge_pte(vma, new_page, 1));
3583 page_remove_rmap(old_page, true);
3584 hugepage_add_new_anon_rmap(new_page, vma, address);
3585 /* Make the old page be freed below */
3586 new_page = old_page;
3587 }
3588 spin_unlock(ptl);
3589 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3590out_release_all:
3591 restore_reserve_on_error(h, vma, address, new_page);
3592 put_page(new_page);
3593out_release_old:
3594 put_page(old_page);
3595
3596 spin_lock(ptl); /* Caller expects lock to be held */
3597 return ret;
3598}
3599
3600/* Return the pagecache page at a given address within a VMA */
3601static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3602 struct vm_area_struct *vma, unsigned long address)
3603{
3604 struct address_space *mapping;
3605 pgoff_t idx;
3606
3607 mapping = vma->vm_file->f_mapping;
3608 idx = vma_hugecache_offset(h, vma, address);
3609
3610 return find_lock_page(mapping, idx);
3611}
3612
3613/*
3614 * Return whether there is a pagecache page to back given address within VMA.
3615 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3616 */
3617static bool hugetlbfs_pagecache_present(struct hstate *h,
3618 struct vm_area_struct *vma, unsigned long address)
3619{
3620 struct address_space *mapping;
3621 pgoff_t idx;
3622 struct page *page;
3623
3624 mapping = vma->vm_file->f_mapping;
3625 idx = vma_hugecache_offset(h, vma, address);
3626
3627 page = find_get_page(mapping, idx);
3628 if (page)
3629 put_page(page);
3630 return page != NULL;
3631}
3632
3633int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3634 pgoff_t idx)
3635{
3636 struct inode *inode = mapping->host;
3637 struct hstate *h = hstate_inode(inode);
3638 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3639
3640 if (err)
3641 return err;
3642 ClearPagePrivate(page);
3643
3644 spin_lock(&inode->i_lock);
3645 inode->i_blocks += blocks_per_huge_page(h);
3646 spin_unlock(&inode->i_lock);
3647 return 0;
3648}
3649
3650static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3651 struct address_space *mapping, pgoff_t idx,
3652 unsigned long address, pte_t *ptep, unsigned int flags)
3653{
3654 struct hstate *h = hstate_vma(vma);
3655 int ret = VM_FAULT_SIGBUS;
3656 int anon_rmap = 0;
3657 unsigned long size;
3658 struct page *page;
3659 pte_t new_pte;
3660 spinlock_t *ptl;
3661
3662 /*
3663 * Currently, we are forced to kill the process in the event the
3664 * original mapper has unmapped pages from the child due to a failed
3665 * COW. Warn that such a situation has occurred as it may not be obvious
3666 */
3667 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3668 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3669 current->pid);
3670 return ret;
3671 }
3672
3673 /*
3674 * Use page lock to guard against racing truncation
3675 * before we get page_table_lock.
3676 */
3677retry:
3678 page = find_lock_page(mapping, idx);
3679 if (!page) {
3680 size = i_size_read(mapping->host) >> huge_page_shift(h);
3681 if (idx >= size)
3682 goto out;
3683 page = alloc_huge_page(vma, address, 0);
3684 if (IS_ERR(page)) {
3685 ret = PTR_ERR(page);
3686 if (ret == -ENOMEM)
3687 ret = VM_FAULT_OOM;
3688 else
3689 ret = VM_FAULT_SIGBUS;
3690 goto out;
3691 }
3692 clear_huge_page(page, address, pages_per_huge_page(h));
3693 __SetPageUptodate(page);
3694 set_page_huge_active(page);
3695
3696 if (vma->vm_flags & VM_MAYSHARE) {
3697 int err = huge_add_to_page_cache(page, mapping, idx);
3698 if (err) {
3699 put_page(page);
3700 if (err == -EEXIST)
3701 goto retry;
3702 goto out;
3703 }
3704 } else {
3705 lock_page(page);
3706 if (unlikely(anon_vma_prepare(vma))) {
3707 ret = VM_FAULT_OOM;
3708 goto backout_unlocked;
3709 }
3710 anon_rmap = 1;
3711 }
3712 } else {
3713 /*
3714 * If memory error occurs between mmap() and fault, some process
3715 * don't have hwpoisoned swap entry for errored virtual address.
3716 * So we need to block hugepage fault by PG_hwpoison bit check.
3717 */
3718 if (unlikely(PageHWPoison(page))) {
3719 ret = VM_FAULT_HWPOISON |
3720 VM_FAULT_SET_HINDEX(hstate_index(h));
3721 goto backout_unlocked;
3722 }
3723 }
3724
3725 /*
3726 * If we are going to COW a private mapping later, we examine the
3727 * pending reservations for this page now. This will ensure that
3728 * any allocations necessary to record that reservation occur outside
3729 * the spinlock.
3730 */
3731 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3732 if (vma_needs_reservation(h, vma, address) < 0) {
3733 ret = VM_FAULT_OOM;
3734 goto backout_unlocked;
3735 }
3736 /* Just decrements count, does not deallocate */
3737 vma_end_reservation(h, vma, address);
3738 }
3739
3740 ptl = huge_pte_lock(h, mm, ptep);
3741 size = i_size_read(mapping->host) >> huge_page_shift(h);
3742 if (idx >= size)
3743 goto backout;
3744
3745 ret = 0;
3746 if (!huge_pte_none(huge_ptep_get(ptep)))
3747 goto backout;
3748
3749 if (anon_rmap) {
3750 ClearPagePrivate(page);
3751 hugepage_add_new_anon_rmap(page, vma, address);
3752 } else
3753 page_dup_rmap(page, true);
3754 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3755 && (vma->vm_flags & VM_SHARED)));
3756 set_huge_pte_at(mm, address, ptep, new_pte);
3757
3758 hugetlb_count_add(pages_per_huge_page(h), mm);
3759 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3760 /* Optimization, do the COW without a second fault */
3761 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3762 }
3763
3764 spin_unlock(ptl);
3765 unlock_page(page);
3766out:
3767 return ret;
3768
3769backout:
3770 spin_unlock(ptl);
3771backout_unlocked:
3772 unlock_page(page);
3773 restore_reserve_on_error(h, vma, address, page);
3774 put_page(page);
3775 goto out;
3776}
3777
3778#ifdef CONFIG_SMP
3779u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3780 struct vm_area_struct *vma,
3781 struct address_space *mapping,
3782 pgoff_t idx, unsigned long address)
3783{
3784 unsigned long key[2];
3785 u32 hash;
3786
3787 if (vma->vm_flags & VM_SHARED) {
3788 key[0] = (unsigned long) mapping;
3789 key[1] = idx;
3790 } else {
3791 key[0] = (unsigned long) mm;
3792 key[1] = address >> huge_page_shift(h);
3793 }
3794
3795 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3796
3797 return hash & (num_fault_mutexes - 1);
3798}
3799#else
3800/*
3801 * For uniprocesor systems we always use a single mutex, so just
3802 * return 0 and avoid the hashing overhead.
3803 */
3804u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3805 struct vm_area_struct *vma,
3806 struct address_space *mapping,
3807 pgoff_t idx, unsigned long address)
3808{
3809 return 0;
3810}
3811#endif
3812
3813int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3814 unsigned long address, unsigned int flags)
3815{
3816 pte_t *ptep, entry;
3817 spinlock_t *ptl;
3818 int ret;
3819 u32 hash;
3820 pgoff_t idx;
3821 struct page *page = NULL;
3822 struct page *pagecache_page = NULL;
3823 struct hstate *h = hstate_vma(vma);
3824 struct address_space *mapping;
3825 int need_wait_lock = 0;
3826
3827 address &= huge_page_mask(h);
3828
3829 ptep = huge_pte_offset(mm, address);
3830 if (ptep) {
3831 entry = huge_ptep_get(ptep);
3832 if (unlikely(is_hugetlb_entry_migration(entry))) {
3833 migration_entry_wait_huge(vma, mm, ptep);
3834 return 0;
3835 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3836 return VM_FAULT_HWPOISON_LARGE |
3837 VM_FAULT_SET_HINDEX(hstate_index(h));
3838 } else {
3839 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3840 if (!ptep)
3841 return VM_FAULT_OOM;
3842 }
3843
3844 mapping = vma->vm_file->f_mapping;
3845 idx = vma_hugecache_offset(h, vma, address);
3846
3847 /*
3848 * Serialize hugepage allocation and instantiation, so that we don't
3849 * get spurious allocation failures if two CPUs race to instantiate
3850 * the same page in the page cache.
3851 */
3852 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3853 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3854
3855 entry = huge_ptep_get(ptep);
3856 if (huge_pte_none(entry)) {
3857 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3858 goto out_mutex;
3859 }
3860
3861 ret = 0;
3862
3863 /*
3864 * entry could be a migration/hwpoison entry at this point, so this
3865 * check prevents the kernel from going below assuming that we have
3866 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3867 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3868 * handle it.
3869 */
3870 if (!pte_present(entry))
3871 goto out_mutex;
3872
3873 /*
3874 * If we are going to COW the mapping later, we examine the pending
3875 * reservations for this page now. This will ensure that any
3876 * allocations necessary to record that reservation occur outside the
3877 * spinlock. For private mappings, we also lookup the pagecache
3878 * page now as it is used to determine if a reservation has been
3879 * consumed.
3880 */
3881 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3882 if (vma_needs_reservation(h, vma, address) < 0) {
3883 ret = VM_FAULT_OOM;
3884 goto out_mutex;
3885 }
3886 /* Just decrements count, does not deallocate */
3887 vma_end_reservation(h, vma, address);
3888
3889 if (!(vma->vm_flags & VM_MAYSHARE))
3890 pagecache_page = hugetlbfs_pagecache_page(h,
3891 vma, address);
3892 }
3893
3894 ptl = huge_pte_lock(h, mm, ptep);
3895
3896 /* Check for a racing update before calling hugetlb_cow */
3897 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3898 goto out_ptl;
3899
3900 /*
3901 * hugetlb_cow() requires page locks of pte_page(entry) and
3902 * pagecache_page, so here we need take the former one
3903 * when page != pagecache_page or !pagecache_page.
3904 */
3905 page = pte_page(entry);
3906 if (page != pagecache_page)
3907 if (!trylock_page(page)) {
3908 need_wait_lock = 1;
3909 goto out_ptl;
3910 }
3911
3912 get_page(page);
3913
3914 if (flags & FAULT_FLAG_WRITE) {
3915 if (!huge_pte_write(entry)) {
3916 ret = hugetlb_cow(mm, vma, address, ptep,
3917 pagecache_page, ptl);
3918 goto out_put_page;
3919 }
3920 entry = huge_pte_mkdirty(entry);
3921 }
3922 entry = pte_mkyoung(entry);
3923 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3924 flags & FAULT_FLAG_WRITE))
3925 update_mmu_cache(vma, address, ptep);
3926out_put_page:
3927 if (page != pagecache_page)
3928 unlock_page(page);
3929 put_page(page);
3930out_ptl:
3931 spin_unlock(ptl);
3932
3933 if (pagecache_page) {
3934 unlock_page(pagecache_page);
3935 put_page(pagecache_page);
3936 }
3937out_mutex:
3938 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3939 /*
3940 * Generally it's safe to hold refcount during waiting page lock. But
3941 * here we just wait to defer the next page fault to avoid busy loop and
3942 * the page is not used after unlocked before returning from the current
3943 * page fault. So we are safe from accessing freed page, even if we wait
3944 * here without taking refcount.
3945 */
3946 if (need_wait_lock)
3947 wait_on_page_locked(page);
3948 return ret;
3949}
3950
3951long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3952 struct page **pages, struct vm_area_struct **vmas,
3953 unsigned long *position, unsigned long *nr_pages,
3954 long i, unsigned int flags)
3955{
3956 unsigned long pfn_offset;
3957 unsigned long vaddr = *position;
3958 unsigned long remainder = *nr_pages;
3959 struct hstate *h = hstate_vma(vma);
3960
3961 while (vaddr < vma->vm_end && remainder) {
3962 pte_t *pte;
3963 spinlock_t *ptl = NULL;
3964 int absent;
3965 struct page *page;
3966
3967 /*
3968 * If we have a pending SIGKILL, don't keep faulting pages and
3969 * potentially allocating memory.
3970 */
3971 if (unlikely(fatal_signal_pending(current))) {
3972 remainder = 0;
3973 break;
3974 }
3975
3976 /*
3977 * Some archs (sparc64, sh*) have multiple pte_ts to
3978 * each hugepage. We have to make sure we get the
3979 * first, for the page indexing below to work.
3980 *
3981 * Note that page table lock is not held when pte is null.
3982 */
3983 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3984 if (pte)
3985 ptl = huge_pte_lock(h, mm, pte);
3986 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3987
3988 /*
3989 * When coredumping, it suits get_dump_page if we just return
3990 * an error where there's an empty slot with no huge pagecache
3991 * to back it. This way, we avoid allocating a hugepage, and
3992 * the sparse dumpfile avoids allocating disk blocks, but its
3993 * huge holes still show up with zeroes where they need to be.
3994 */
3995 if (absent && (flags & FOLL_DUMP) &&
3996 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3997 if (pte)
3998 spin_unlock(ptl);
3999 remainder = 0;
4000 break;
4001 }
4002
4003 /*
4004 * We need call hugetlb_fault for both hugepages under migration
4005 * (in which case hugetlb_fault waits for the migration,) and
4006 * hwpoisoned hugepages (in which case we need to prevent the
4007 * caller from accessing to them.) In order to do this, we use
4008 * here is_swap_pte instead of is_hugetlb_entry_migration and
4009 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4010 * both cases, and because we can't follow correct pages
4011 * directly from any kind of swap entries.
4012 */
4013 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4014 ((flags & FOLL_WRITE) &&
4015 !huge_pte_write(huge_ptep_get(pte)))) {
4016 int ret;
4017
4018 if (pte)
4019 spin_unlock(ptl);
4020 ret = hugetlb_fault(mm, vma, vaddr,
4021 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
4022 if (!(ret & VM_FAULT_ERROR))
4023 continue;
4024
4025 remainder = 0;
4026 break;
4027 }
4028
4029 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4030 page = pte_page(huge_ptep_get(pte));
4031same_page:
4032 if (pages) {
4033 pages[i] = mem_map_offset(page, pfn_offset);
4034 get_page(pages[i]);
4035 }
4036
4037 if (vmas)
4038 vmas[i] = vma;
4039
4040 vaddr += PAGE_SIZE;
4041 ++pfn_offset;
4042 --remainder;
4043 ++i;
4044 if (vaddr < vma->vm_end && remainder &&
4045 pfn_offset < pages_per_huge_page(h)) {
4046 /*
4047 * We use pfn_offset to avoid touching the pageframes
4048 * of this compound page.
4049 */
4050 goto same_page;
4051 }
4052 spin_unlock(ptl);
4053 }
4054 *nr_pages = remainder;
4055 *position = vaddr;
4056
4057 return i ? i : -EFAULT;
4058}
4059
4060#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4061/*
4062 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4063 * implement this.
4064 */
4065#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4066#endif
4067
4068unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4069 unsigned long address, unsigned long end, pgprot_t newprot)
4070{
4071 struct mm_struct *mm = vma->vm_mm;
4072 unsigned long start = address;
4073 pte_t *ptep;
4074 pte_t pte;
4075 struct hstate *h = hstate_vma(vma);
4076 unsigned long pages = 0;
4077
4078 BUG_ON(address >= end);
4079 flush_cache_range(vma, address, end);
4080
4081 mmu_notifier_invalidate_range_start(mm, start, end);
4082 i_mmap_lock_write(vma->vm_file->f_mapping);
4083 for (; address < end; address += huge_page_size(h)) {
4084 spinlock_t *ptl;
4085 ptep = huge_pte_offset(mm, address);
4086 if (!ptep)
4087 continue;
4088 ptl = huge_pte_lock(h, mm, ptep);
4089 if (huge_pmd_unshare(mm, &address, ptep)) {
4090 pages++;
4091 spin_unlock(ptl);
4092 continue;
4093 }
4094 pte = huge_ptep_get(ptep);
4095 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4096 spin_unlock(ptl);
4097 continue;
4098 }
4099 if (unlikely(is_hugetlb_entry_migration(pte))) {
4100 swp_entry_t entry = pte_to_swp_entry(pte);
4101
4102 if (is_write_migration_entry(entry)) {
4103 pte_t newpte;
4104
4105 make_migration_entry_read(&entry);
4106 newpte = swp_entry_to_pte(entry);
4107 set_huge_pte_at(mm, address, ptep, newpte);
4108 pages++;
4109 }
4110 spin_unlock(ptl);
4111 continue;
4112 }
4113 if (!huge_pte_none(pte)) {
4114 pte = huge_ptep_get_and_clear(mm, address, ptep);
4115 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4116 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4117 set_huge_pte_at(mm, address, ptep, pte);
4118 pages++;
4119 }
4120 spin_unlock(ptl);
4121 }
4122 /*
4123 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4124 * may have cleared our pud entry and done put_page on the page table:
4125 * once we release i_mmap_rwsem, another task can do the final put_page
4126 * and that page table be reused and filled with junk.
4127 */
4128 flush_hugetlb_tlb_range(vma, start, end);
4129 mmu_notifier_invalidate_range(mm, start, end);
4130 i_mmap_unlock_write(vma->vm_file->f_mapping);
4131 mmu_notifier_invalidate_range_end(mm, start, end);
4132
4133 return pages << h->order;
4134}
4135
4136int hugetlb_reserve_pages(struct inode *inode,
4137 long from, long to,
4138 struct vm_area_struct *vma,
4139 vm_flags_t vm_flags)
4140{
4141 long ret, chg;
4142 struct hstate *h = hstate_inode(inode);
4143 struct hugepage_subpool *spool = subpool_inode(inode);
4144 struct resv_map *resv_map;
4145 long gbl_reserve;
4146
4147 /*
4148 * Only apply hugepage reservation if asked. At fault time, an
4149 * attempt will be made for VM_NORESERVE to allocate a page
4150 * without using reserves
4151 */
4152 if (vm_flags & VM_NORESERVE)
4153 return 0;
4154
4155 /*
4156 * Shared mappings base their reservation on the number of pages that
4157 * are already allocated on behalf of the file. Private mappings need
4158 * to reserve the full area even if read-only as mprotect() may be
4159 * called to make the mapping read-write. Assume !vma is a shm mapping
4160 */
4161 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4162 resv_map = inode_resv_map(inode);
4163
4164 chg = region_chg(resv_map, from, to);
4165
4166 } else {
4167 resv_map = resv_map_alloc();
4168 if (!resv_map)
4169 return -ENOMEM;
4170
4171 chg = to - from;
4172
4173 set_vma_resv_map(vma, resv_map);
4174 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4175 }
4176
4177 if (chg < 0) {
4178 ret = chg;
4179 goto out_err;
4180 }
4181
4182 /*
4183 * There must be enough pages in the subpool for the mapping. If
4184 * the subpool has a minimum size, there may be some global
4185 * reservations already in place (gbl_reserve).
4186 */
4187 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4188 if (gbl_reserve < 0) {
4189 ret = -ENOSPC;
4190 goto out_err;
4191 }
4192
4193 /*
4194 * Check enough hugepages are available for the reservation.
4195 * Hand the pages back to the subpool if there are not
4196 */
4197 ret = hugetlb_acct_memory(h, gbl_reserve);
4198 if (ret < 0) {
4199 /* put back original number of pages, chg */
4200 (void)hugepage_subpool_put_pages(spool, chg);
4201 goto out_err;
4202 }
4203
4204 /*
4205 * Account for the reservations made. Shared mappings record regions
4206 * that have reservations as they are shared by multiple VMAs.
4207 * When the last VMA disappears, the region map says how much
4208 * the reservation was and the page cache tells how much of
4209 * the reservation was consumed. Private mappings are per-VMA and
4210 * only the consumed reservations are tracked. When the VMA
4211 * disappears, the original reservation is the VMA size and the
4212 * consumed reservations are stored in the map. Hence, nothing
4213 * else has to be done for private mappings here
4214 */
4215 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4216 long add = region_add(resv_map, from, to);
4217
4218 if (unlikely(chg > add)) {
4219 /*
4220 * pages in this range were added to the reserve
4221 * map between region_chg and region_add. This
4222 * indicates a race with alloc_huge_page. Adjust
4223 * the subpool and reserve counts modified above
4224 * based on the difference.
4225 */
4226 long rsv_adjust;
4227
4228 rsv_adjust = hugepage_subpool_put_pages(spool,
4229 chg - add);
4230 hugetlb_acct_memory(h, -rsv_adjust);
4231 }
4232 }
4233 return 0;
4234out_err:
4235 if (!vma || vma->vm_flags & VM_MAYSHARE)
4236 region_abort(resv_map, from, to);
4237 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4238 kref_put(&resv_map->refs, resv_map_release);
4239 return ret;
4240}
4241
4242long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4243 long freed)
4244{
4245 struct hstate *h = hstate_inode(inode);
4246 struct resv_map *resv_map = inode_resv_map(inode);
4247 long chg = 0;
4248 struct hugepage_subpool *spool = subpool_inode(inode);
4249 long gbl_reserve;
4250
4251 if (resv_map) {
4252 chg = region_del(resv_map, start, end);
4253 /*
4254 * region_del() can fail in the rare case where a region
4255 * must be split and another region descriptor can not be
4256 * allocated. If end == LONG_MAX, it will not fail.
4257 */
4258 if (chg < 0)
4259 return chg;
4260 }
4261
4262 spin_lock(&inode->i_lock);
4263 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4264 spin_unlock(&inode->i_lock);
4265
4266 /*
4267 * If the subpool has a minimum size, the number of global
4268 * reservations to be released may be adjusted.
4269 */
4270 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4271 hugetlb_acct_memory(h, -gbl_reserve);
4272
4273 return 0;
4274}
4275
4276#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4277static unsigned long page_table_shareable(struct vm_area_struct *svma,
4278 struct vm_area_struct *vma,
4279 unsigned long addr, pgoff_t idx)
4280{
4281 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4282 svma->vm_start;
4283 unsigned long sbase = saddr & PUD_MASK;
4284 unsigned long s_end = sbase + PUD_SIZE;
4285
4286 /* Allow segments to share if only one is marked locked */
4287 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4288 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4289
4290 /*
4291 * match the virtual addresses, permission and the alignment of the
4292 * page table page.
4293 */
4294 if (pmd_index(addr) != pmd_index(saddr) ||
4295 vm_flags != svm_flags ||
4296 sbase < svma->vm_start || svma->vm_end < s_end)
4297 return 0;
4298
4299 return saddr;
4300}
4301
4302static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4303{
4304 unsigned long base = addr & PUD_MASK;
4305 unsigned long end = base + PUD_SIZE;
4306
4307 /*
4308 * check on proper vm_flags and page table alignment
4309 */
4310 if (vma->vm_flags & VM_MAYSHARE &&
4311 vma->vm_start <= base && end <= vma->vm_end)
4312 return true;
4313 return false;
4314}
4315
4316/*
4317 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4318 * and returns the corresponding pte. While this is not necessary for the
4319 * !shared pmd case because we can allocate the pmd later as well, it makes the
4320 * code much cleaner. pmd allocation is essential for the shared case because
4321 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4322 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4323 * bad pmd for sharing.
4324 */
4325pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4326{
4327 struct vm_area_struct *vma = find_vma(mm, addr);
4328 struct address_space *mapping = vma->vm_file->f_mapping;
4329 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4330 vma->vm_pgoff;
4331 struct vm_area_struct *svma;
4332 unsigned long saddr;
4333 pte_t *spte = NULL;
4334 pte_t *pte;
4335 spinlock_t *ptl;
4336
4337 if (!vma_shareable(vma, addr))
4338 return (pte_t *)pmd_alloc(mm, pud, addr);
4339
4340 i_mmap_lock_write(mapping);
4341 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4342 if (svma == vma)
4343 continue;
4344
4345 saddr = page_table_shareable(svma, vma, addr, idx);
4346 if (saddr) {
4347 spte = huge_pte_offset(svma->vm_mm, saddr);
4348 if (spte) {
4349 get_page(virt_to_page(spte));
4350 break;
4351 }
4352 }
4353 }
4354
4355 if (!spte)
4356 goto out;
4357
4358 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4359 if (pud_none(*pud)) {
4360 pud_populate(mm, pud,
4361 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4362 mm_inc_nr_pmds(mm);
4363 } else {
4364 put_page(virt_to_page(spte));
4365 }
4366 spin_unlock(ptl);
4367out:
4368 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4369 i_mmap_unlock_write(mapping);
4370 return pte;
4371}
4372
4373/*
4374 * unmap huge page backed by shared pte.
4375 *
4376 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4377 * indicated by page_count > 1, unmap is achieved by clearing pud and
4378 * decrementing the ref count. If count == 1, the pte page is not shared.
4379 *
4380 * called with page table lock held.
4381 *
4382 * returns: 1 successfully unmapped a shared pte page
4383 * 0 the underlying pte page is not shared, or it is the last user
4384 */
4385int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4386{
4387 pgd_t *pgd = pgd_offset(mm, *addr);
4388 pud_t *pud = pud_offset(pgd, *addr);
4389
4390 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4391 if (page_count(virt_to_page(ptep)) == 1)
4392 return 0;
4393
4394 pud_clear(pud);
4395 put_page(virt_to_page(ptep));
4396 mm_dec_nr_pmds(mm);
4397 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4398 return 1;
4399}
4400#define want_pmd_share() (1)
4401#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4402pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4403{
4404 return NULL;
4405}
4406
4407int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4408{
4409 return 0;
4410}
4411#define want_pmd_share() (0)
4412#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4413
4414#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4415pte_t *huge_pte_alloc(struct mm_struct *mm,
4416 unsigned long addr, unsigned long sz)
4417{
4418 pgd_t *pgd;
4419 pud_t *pud;
4420 pte_t *pte = NULL;
4421
4422 pgd = pgd_offset(mm, addr);
4423 pud = pud_alloc(mm, pgd, addr);
4424 if (pud) {
4425 if (sz == PUD_SIZE) {
4426 pte = (pte_t *)pud;
4427 } else {
4428 BUG_ON(sz != PMD_SIZE);
4429 if (want_pmd_share() && pud_none(*pud))
4430 pte = huge_pmd_share(mm, addr, pud);
4431 else
4432 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4433 }
4434 }
4435 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4436
4437 return pte;
4438}
4439
4440pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4441{
4442 pgd_t *pgd;
4443 pud_t *pud;
4444 pmd_t *pmd = NULL;
4445
4446 pgd = pgd_offset(mm, addr);
4447 if (pgd_present(*pgd)) {
4448 pud = pud_offset(pgd, addr);
4449 if (pud_present(*pud)) {
4450 if (pud_huge(*pud))
4451 return (pte_t *)pud;
4452 pmd = pmd_offset(pud, addr);
4453 }
4454 }
4455 return (pte_t *) pmd;
4456}
4457
4458#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4459
4460/*
4461 * These functions are overwritable if your architecture needs its own
4462 * behavior.
4463 */
4464struct page * __weak
4465follow_huge_addr(struct mm_struct *mm, unsigned long address,
4466 int write)
4467{
4468 return ERR_PTR(-EINVAL);
4469}
4470
4471struct page * __weak
4472follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4473 pmd_t *pmd, int flags)
4474{
4475 struct page *page = NULL;
4476 spinlock_t *ptl;
4477 pte_t pte;
4478retry:
4479 ptl = pmd_lockptr(mm, pmd);
4480 spin_lock(ptl);
4481 /*
4482 * make sure that the address range covered by this pmd is not
4483 * unmapped from other threads.
4484 */
4485 if (!pmd_huge(*pmd))
4486 goto out;
4487 pte = huge_ptep_get((pte_t *)pmd);
4488 if (pte_present(pte)) {
4489 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4490 if (flags & FOLL_GET)
4491 get_page(page);
4492 } else {
4493 if (is_hugetlb_entry_migration(pte)) {
4494 spin_unlock(ptl);
4495 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4496 goto retry;
4497 }
4498 /*
4499 * hwpoisoned entry is treated as no_page_table in
4500 * follow_page_mask().
4501 */
4502 }
4503out:
4504 spin_unlock(ptl);
4505 return page;
4506}
4507
4508struct page * __weak
4509follow_huge_pud(struct mm_struct *mm, unsigned long address,
4510 pud_t *pud, int flags)
4511{
4512 if (flags & FOLL_GET)
4513 return NULL;
4514
4515 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4516}
4517
4518#ifdef CONFIG_MEMORY_FAILURE
4519
4520/*
4521 * This function is called from memory failure code.
4522 */
4523int dequeue_hwpoisoned_huge_page(struct page *hpage)
4524{
4525 struct hstate *h = page_hstate(hpage);
4526 int nid = page_to_nid(hpage);
4527 int ret = -EBUSY;
4528
4529 spin_lock(&hugetlb_lock);
4530 /*
4531 * Just checking !page_huge_active is not enough, because that could be
4532 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4533 */
4534 if (!page_huge_active(hpage) && !page_count(hpage)) {
4535 /*
4536 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4537 * but dangling hpage->lru can trigger list-debug warnings
4538 * (this happens when we call unpoison_memory() on it),
4539 * so let it point to itself with list_del_init().
4540 */
4541 list_del_init(&hpage->lru);
4542 set_page_refcounted(hpage);
4543 h->free_huge_pages--;
4544 h->free_huge_pages_node[nid]--;
4545 ret = 0;
4546 }
4547 spin_unlock(&hugetlb_lock);
4548 return ret;
4549}
4550#endif
4551
4552bool isolate_huge_page(struct page *page, struct list_head *list)
4553{
4554 bool ret = true;
4555
4556 VM_BUG_ON_PAGE(!PageHead(page), page);
4557 spin_lock(&hugetlb_lock);
4558 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4559 ret = false;
4560 goto unlock;
4561 }
4562 clear_page_huge_active(page);
4563 list_move_tail(&page->lru, list);
4564unlock:
4565 spin_unlock(&hugetlb_lock);
4566 return ret;
4567}
4568
4569void putback_active_hugepage(struct page *page)
4570{
4571 VM_BUG_ON_PAGE(!PageHead(page), page);
4572 spin_lock(&hugetlb_lock);
4573 set_page_huge_active(page);
4574 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4575 spin_unlock(&hugetlb_lock);
4576 put_page(page);
4577}