Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/mmdebug.h>
23#include <linux/sched/signal.h>
24#include <linux/rmap.h>
25#include <linux/string_helpers.h>
26#include <linux/swap.h>
27#include <linux/swapops.h>
28#include <linux/jhash.h>
29#include <linux/numa.h>
30
31#include <asm/page.h>
32#include <asm/pgtable.h>
33#include <asm/tlb.h>
34
35#include <linux/io.h>
36#include <linux/hugetlb.h>
37#include <linux/hugetlb_cgroup.h>
38#include <linux/node.h>
39#include <linux/userfaultfd_k.h>
40#include <linux/page_owner.h>
41#include "internal.h"
42
43int hugetlb_max_hstate __read_mostly;
44unsigned int default_hstate_idx;
45struct hstate hstates[HUGE_MAX_HSTATE];
46/*
47 * Minimum page order among possible hugepage sizes, set to a proper value
48 * at boot time.
49 */
50static unsigned int minimum_order __read_mostly = UINT_MAX;
51
52__initdata LIST_HEAD(huge_boot_pages);
53
54/* for command line parsing */
55static struct hstate * __initdata parsed_hstate;
56static unsigned long __initdata default_hstate_max_huge_pages;
57static unsigned long __initdata default_hstate_size;
58static bool __initdata parsed_valid_hugepagesz = true;
59
60/*
61 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
62 * free_huge_pages, and surplus_huge_pages.
63 */
64DEFINE_SPINLOCK(hugetlb_lock);
65
66/*
67 * Serializes faults on the same logical page. This is used to
68 * prevent spurious OOMs when the hugepage pool is fully utilized.
69 */
70static int num_fault_mutexes;
71struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
72
73/* Forward declaration */
74static int hugetlb_acct_memory(struct hstate *h, long delta);
75
76static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
77{
78 bool free = (spool->count == 0) && (spool->used_hpages == 0);
79
80 spin_unlock(&spool->lock);
81
82 /* If no pages are used, and no other handles to the subpool
83 * remain, give up any reservations mased on minimum size and
84 * free the subpool */
85 if (free) {
86 if (spool->min_hpages != -1)
87 hugetlb_acct_memory(spool->hstate,
88 -spool->min_hpages);
89 kfree(spool);
90 }
91}
92
93struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
94 long min_hpages)
95{
96 struct hugepage_subpool *spool;
97
98 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
99 if (!spool)
100 return NULL;
101
102 spin_lock_init(&spool->lock);
103 spool->count = 1;
104 spool->max_hpages = max_hpages;
105 spool->hstate = h;
106 spool->min_hpages = min_hpages;
107
108 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
109 kfree(spool);
110 return NULL;
111 }
112 spool->rsv_hpages = min_hpages;
113
114 return spool;
115}
116
117void hugepage_put_subpool(struct hugepage_subpool *spool)
118{
119 spin_lock(&spool->lock);
120 BUG_ON(!spool->count);
121 spool->count--;
122 unlock_or_release_subpool(spool);
123}
124
125/*
126 * Subpool accounting for allocating and reserving pages.
127 * Return -ENOMEM if there are not enough resources to satisfy the
128 * the request. Otherwise, return the number of pages by which the
129 * global pools must be adjusted (upward). The returned value may
130 * only be different than the passed value (delta) in the case where
131 * a subpool minimum size must be manitained.
132 */
133static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
134 long delta)
135{
136 long ret = delta;
137
138 if (!spool)
139 return ret;
140
141 spin_lock(&spool->lock);
142
143 if (spool->max_hpages != -1) { /* maximum size accounting */
144 if ((spool->used_hpages + delta) <= spool->max_hpages)
145 spool->used_hpages += delta;
146 else {
147 ret = -ENOMEM;
148 goto unlock_ret;
149 }
150 }
151
152 /* minimum size accounting */
153 if (spool->min_hpages != -1 && spool->rsv_hpages) {
154 if (delta > spool->rsv_hpages) {
155 /*
156 * Asking for more reserves than those already taken on
157 * behalf of subpool. Return difference.
158 */
159 ret = delta - spool->rsv_hpages;
160 spool->rsv_hpages = 0;
161 } else {
162 ret = 0; /* reserves already accounted for */
163 spool->rsv_hpages -= delta;
164 }
165 }
166
167unlock_ret:
168 spin_unlock(&spool->lock);
169 return ret;
170}
171
172/*
173 * Subpool accounting for freeing and unreserving pages.
174 * Return the number of global page reservations that must be dropped.
175 * The return value may only be different than the passed value (delta)
176 * in the case where a subpool minimum size must be maintained.
177 */
178static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
179 long delta)
180{
181 long ret = delta;
182
183 if (!spool)
184 return delta;
185
186 spin_lock(&spool->lock);
187
188 if (spool->max_hpages != -1) /* maximum size accounting */
189 spool->used_hpages -= delta;
190
191 /* minimum size accounting */
192 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
193 if (spool->rsv_hpages + delta <= spool->min_hpages)
194 ret = 0;
195 else
196 ret = spool->rsv_hpages + delta - spool->min_hpages;
197
198 spool->rsv_hpages += delta;
199 if (spool->rsv_hpages > spool->min_hpages)
200 spool->rsv_hpages = spool->min_hpages;
201 }
202
203 /*
204 * If hugetlbfs_put_super couldn't free spool due to an outstanding
205 * quota reference, free it now.
206 */
207 unlock_or_release_subpool(spool);
208
209 return ret;
210}
211
212static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
213{
214 return HUGETLBFS_SB(inode->i_sb)->spool;
215}
216
217static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
218{
219 return subpool_inode(file_inode(vma->vm_file));
220}
221
222/*
223 * Region tracking -- allows tracking of reservations and instantiated pages
224 * across the pages in a mapping.
225 *
226 * The region data structures are embedded into a resv_map and protected
227 * by a resv_map's lock. The set of regions within the resv_map represent
228 * reservations for huge pages, or huge pages that have already been
229 * instantiated within the map. The from and to elements are huge page
230 * indicies into the associated mapping. from indicates the starting index
231 * of the region. to represents the first index past the end of the region.
232 *
233 * For example, a file region structure with from == 0 and to == 4 represents
234 * four huge pages in a mapping. It is important to note that the to element
235 * represents the first element past the end of the region. This is used in
236 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
237 *
238 * Interval notation of the form [from, to) will be used to indicate that
239 * the endpoint from is inclusive and to is exclusive.
240 */
241struct file_region {
242 struct list_head link;
243 long from;
244 long to;
245};
246
247/*
248 * Add the huge page range represented by [f, t) to the reserve
249 * map. In the normal case, existing regions will be expanded
250 * to accommodate the specified range. Sufficient regions should
251 * exist for expansion due to the previous call to region_chg
252 * with the same range. However, it is possible that region_del
253 * could have been called after region_chg and modifed the map
254 * in such a way that no region exists to be expanded. In this
255 * case, pull a region descriptor from the cache associated with
256 * the map and use that for the new range.
257 *
258 * Return the number of new huge pages added to the map. This
259 * number is greater than or equal to zero.
260 */
261static long region_add(struct resv_map *resv, long f, long t)
262{
263 struct list_head *head = &resv->regions;
264 struct file_region *rg, *nrg, *trg;
265 long add = 0;
266
267 spin_lock(&resv->lock);
268 /* Locate the region we are either in or before. */
269 list_for_each_entry(rg, head, link)
270 if (f <= rg->to)
271 break;
272
273 /*
274 * If no region exists which can be expanded to include the
275 * specified range, the list must have been modified by an
276 * interleving call to region_del(). Pull a region descriptor
277 * from the cache and use it for this range.
278 */
279 if (&rg->link == head || t < rg->from) {
280 VM_BUG_ON(resv->region_cache_count <= 0);
281
282 resv->region_cache_count--;
283 nrg = list_first_entry(&resv->region_cache, struct file_region,
284 link);
285 list_del(&nrg->link);
286
287 nrg->from = f;
288 nrg->to = t;
289 list_add(&nrg->link, rg->link.prev);
290
291 add += t - f;
292 goto out_locked;
293 }
294
295 /* Round our left edge to the current segment if it encloses us. */
296 if (f > rg->from)
297 f = rg->from;
298
299 /* Check for and consume any regions we now overlap with. */
300 nrg = rg;
301 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
302 if (&rg->link == head)
303 break;
304 if (rg->from > t)
305 break;
306
307 /* If this area reaches higher then extend our area to
308 * include it completely. If this is not the first area
309 * which we intend to reuse, free it. */
310 if (rg->to > t)
311 t = rg->to;
312 if (rg != nrg) {
313 /* Decrement return value by the deleted range.
314 * Another range will span this area so that by
315 * end of routine add will be >= zero
316 */
317 add -= (rg->to - rg->from);
318 list_del(&rg->link);
319 kfree(rg);
320 }
321 }
322
323 add += (nrg->from - f); /* Added to beginning of region */
324 nrg->from = f;
325 add += t - nrg->to; /* Added to end of region */
326 nrg->to = t;
327
328out_locked:
329 resv->adds_in_progress--;
330 spin_unlock(&resv->lock);
331 VM_BUG_ON(add < 0);
332 return add;
333}
334
335/*
336 * Examine the existing reserve map and determine how many
337 * huge pages in the specified range [f, t) are NOT currently
338 * represented. This routine is called before a subsequent
339 * call to region_add that will actually modify the reserve
340 * map to add the specified range [f, t). region_chg does
341 * not change the number of huge pages represented by the
342 * map. However, if the existing regions in the map can not
343 * be expanded to represent the new range, a new file_region
344 * structure is added to the map as a placeholder. This is
345 * so that the subsequent region_add call will have all the
346 * regions it needs and will not fail.
347 *
348 * Upon entry, region_chg will also examine the cache of region descriptors
349 * associated with the map. If there are not enough descriptors cached, one
350 * will be allocated for the in progress add operation.
351 *
352 * Returns the number of huge pages that need to be added to the existing
353 * reservation map for the range [f, t). This number is greater or equal to
354 * zero. -ENOMEM is returned if a new file_region structure or cache entry
355 * is needed and can not be allocated.
356 */
357static long region_chg(struct resv_map *resv, long f, long t)
358{
359 struct list_head *head = &resv->regions;
360 struct file_region *rg, *nrg = NULL;
361 long chg = 0;
362
363retry:
364 spin_lock(&resv->lock);
365retry_locked:
366 resv->adds_in_progress++;
367
368 /*
369 * Check for sufficient descriptors in the cache to accommodate
370 * the number of in progress add operations.
371 */
372 if (resv->adds_in_progress > resv->region_cache_count) {
373 struct file_region *trg;
374
375 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
376 /* Must drop lock to allocate a new descriptor. */
377 resv->adds_in_progress--;
378 spin_unlock(&resv->lock);
379
380 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
381 if (!trg) {
382 kfree(nrg);
383 return -ENOMEM;
384 }
385
386 spin_lock(&resv->lock);
387 list_add(&trg->link, &resv->region_cache);
388 resv->region_cache_count++;
389 goto retry_locked;
390 }
391
392 /* Locate the region we are before or in. */
393 list_for_each_entry(rg, head, link)
394 if (f <= rg->to)
395 break;
396
397 /* If we are below the current region then a new region is required.
398 * Subtle, allocate a new region at the position but make it zero
399 * size such that we can guarantee to record the reservation. */
400 if (&rg->link == head || t < rg->from) {
401 if (!nrg) {
402 resv->adds_in_progress--;
403 spin_unlock(&resv->lock);
404 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
405 if (!nrg)
406 return -ENOMEM;
407
408 nrg->from = f;
409 nrg->to = f;
410 INIT_LIST_HEAD(&nrg->link);
411 goto retry;
412 }
413
414 list_add(&nrg->link, rg->link.prev);
415 chg = t - f;
416 goto out_nrg;
417 }
418
419 /* Round our left edge to the current segment if it encloses us. */
420 if (f > rg->from)
421 f = rg->from;
422 chg = t - f;
423
424 /* Check for and consume any regions we now overlap with. */
425 list_for_each_entry(rg, rg->link.prev, link) {
426 if (&rg->link == head)
427 break;
428 if (rg->from > t)
429 goto out;
430
431 /* We overlap with this area, if it extends further than
432 * us then we must extend ourselves. Account for its
433 * existing reservation. */
434 if (rg->to > t) {
435 chg += rg->to - t;
436 t = rg->to;
437 }
438 chg -= rg->to - rg->from;
439 }
440
441out:
442 spin_unlock(&resv->lock);
443 /* We already know we raced and no longer need the new region */
444 kfree(nrg);
445 return chg;
446out_nrg:
447 spin_unlock(&resv->lock);
448 return chg;
449}
450
451/*
452 * Abort the in progress add operation. The adds_in_progress field
453 * of the resv_map keeps track of the operations in progress between
454 * calls to region_chg and region_add. Operations are sometimes
455 * aborted after the call to region_chg. In such cases, region_abort
456 * is called to decrement the adds_in_progress counter.
457 *
458 * NOTE: The range arguments [f, t) are not needed or used in this
459 * routine. They are kept to make reading the calling code easier as
460 * arguments will match the associated region_chg call.
461 */
462static void region_abort(struct resv_map *resv, long f, long t)
463{
464 spin_lock(&resv->lock);
465 VM_BUG_ON(!resv->region_cache_count);
466 resv->adds_in_progress--;
467 spin_unlock(&resv->lock);
468}
469
470/*
471 * Delete the specified range [f, t) from the reserve map. If the
472 * t parameter is LONG_MAX, this indicates that ALL regions after f
473 * should be deleted. Locate the regions which intersect [f, t)
474 * and either trim, delete or split the existing regions.
475 *
476 * Returns the number of huge pages deleted from the reserve map.
477 * In the normal case, the return value is zero or more. In the
478 * case where a region must be split, a new region descriptor must
479 * be allocated. If the allocation fails, -ENOMEM will be returned.
480 * NOTE: If the parameter t == LONG_MAX, then we will never split
481 * a region and possibly return -ENOMEM. Callers specifying
482 * t == LONG_MAX do not need to check for -ENOMEM error.
483 */
484static long region_del(struct resv_map *resv, long f, long t)
485{
486 struct list_head *head = &resv->regions;
487 struct file_region *rg, *trg;
488 struct file_region *nrg = NULL;
489 long del = 0;
490
491retry:
492 spin_lock(&resv->lock);
493 list_for_each_entry_safe(rg, trg, head, link) {
494 /*
495 * Skip regions before the range to be deleted. file_region
496 * ranges are normally of the form [from, to). However, there
497 * may be a "placeholder" entry in the map which is of the form
498 * (from, to) with from == to. Check for placeholder entries
499 * at the beginning of the range to be deleted.
500 */
501 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
502 continue;
503
504 if (rg->from >= t)
505 break;
506
507 if (f > rg->from && t < rg->to) { /* Must split region */
508 /*
509 * Check for an entry in the cache before dropping
510 * lock and attempting allocation.
511 */
512 if (!nrg &&
513 resv->region_cache_count > resv->adds_in_progress) {
514 nrg = list_first_entry(&resv->region_cache,
515 struct file_region,
516 link);
517 list_del(&nrg->link);
518 resv->region_cache_count--;
519 }
520
521 if (!nrg) {
522 spin_unlock(&resv->lock);
523 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
524 if (!nrg)
525 return -ENOMEM;
526 goto retry;
527 }
528
529 del += t - f;
530
531 /* New entry for end of split region */
532 nrg->from = t;
533 nrg->to = rg->to;
534 INIT_LIST_HEAD(&nrg->link);
535
536 /* Original entry is trimmed */
537 rg->to = f;
538
539 list_add(&nrg->link, &rg->link);
540 nrg = NULL;
541 break;
542 }
543
544 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
545 del += rg->to - rg->from;
546 list_del(&rg->link);
547 kfree(rg);
548 continue;
549 }
550
551 if (f <= rg->from) { /* Trim beginning of region */
552 del += t - rg->from;
553 rg->from = t;
554 } else { /* Trim end of region */
555 del += rg->to - f;
556 rg->to = f;
557 }
558 }
559
560 spin_unlock(&resv->lock);
561 kfree(nrg);
562 return del;
563}
564
565/*
566 * A rare out of memory error was encountered which prevented removal of
567 * the reserve map region for a page. The huge page itself was free'ed
568 * and removed from the page cache. This routine will adjust the subpool
569 * usage count, and the global reserve count if needed. By incrementing
570 * these counts, the reserve map entry which could not be deleted will
571 * appear as a "reserved" entry instead of simply dangling with incorrect
572 * counts.
573 */
574void hugetlb_fix_reserve_counts(struct inode *inode)
575{
576 struct hugepage_subpool *spool = subpool_inode(inode);
577 long rsv_adjust;
578
579 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
580 if (rsv_adjust) {
581 struct hstate *h = hstate_inode(inode);
582
583 hugetlb_acct_memory(h, 1);
584 }
585}
586
587/*
588 * Count and return the number of huge pages in the reserve map
589 * that intersect with the range [f, t).
590 */
591static long region_count(struct resv_map *resv, long f, long t)
592{
593 struct list_head *head = &resv->regions;
594 struct file_region *rg;
595 long chg = 0;
596
597 spin_lock(&resv->lock);
598 /* Locate each segment we overlap with, and count that overlap. */
599 list_for_each_entry(rg, head, link) {
600 long seg_from;
601 long seg_to;
602
603 if (rg->to <= f)
604 continue;
605 if (rg->from >= t)
606 break;
607
608 seg_from = max(rg->from, f);
609 seg_to = min(rg->to, t);
610
611 chg += seg_to - seg_from;
612 }
613 spin_unlock(&resv->lock);
614
615 return chg;
616}
617
618/*
619 * Convert the address within this vma to the page offset within
620 * the mapping, in pagecache page units; huge pages here.
621 */
622static pgoff_t vma_hugecache_offset(struct hstate *h,
623 struct vm_area_struct *vma, unsigned long address)
624{
625 return ((address - vma->vm_start) >> huge_page_shift(h)) +
626 (vma->vm_pgoff >> huge_page_order(h));
627}
628
629pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
630 unsigned long address)
631{
632 return vma_hugecache_offset(hstate_vma(vma), vma, address);
633}
634EXPORT_SYMBOL_GPL(linear_hugepage_index);
635
636/*
637 * Return the size of the pages allocated when backing a VMA. In the majority
638 * cases this will be same size as used by the page table entries.
639 */
640unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
641{
642 if (vma->vm_ops && vma->vm_ops->pagesize)
643 return vma->vm_ops->pagesize(vma);
644 return PAGE_SIZE;
645}
646EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648/*
649 * Return the page size being used by the MMU to back a VMA. In the majority
650 * of cases, the page size used by the kernel matches the MMU size. On
651 * architectures where it differs, an architecture-specific 'strong'
652 * version of this symbol is required.
653 */
654__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
655{
656 return vma_kernel_pagesize(vma);
657}
658
659/*
660 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
661 * bits of the reservation map pointer, which are always clear due to
662 * alignment.
663 */
664#define HPAGE_RESV_OWNER (1UL << 0)
665#define HPAGE_RESV_UNMAPPED (1UL << 1)
666#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
667
668/*
669 * These helpers are used to track how many pages are reserved for
670 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
671 * is guaranteed to have their future faults succeed.
672 *
673 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
674 * the reserve counters are updated with the hugetlb_lock held. It is safe
675 * to reset the VMA at fork() time as it is not in use yet and there is no
676 * chance of the global counters getting corrupted as a result of the values.
677 *
678 * The private mapping reservation is represented in a subtly different
679 * manner to a shared mapping. A shared mapping has a region map associated
680 * with the underlying file, this region map represents the backing file
681 * pages which have ever had a reservation assigned which this persists even
682 * after the page is instantiated. A private mapping has a region map
683 * associated with the original mmap which is attached to all VMAs which
684 * reference it, this region map represents those offsets which have consumed
685 * reservation ie. where pages have been instantiated.
686 */
687static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688{
689 return (unsigned long)vma->vm_private_data;
690}
691
692static void set_vma_private_data(struct vm_area_struct *vma,
693 unsigned long value)
694{
695 vma->vm_private_data = (void *)value;
696}
697
698struct resv_map *resv_map_alloc(void)
699{
700 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
701 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702
703 if (!resv_map || !rg) {
704 kfree(resv_map);
705 kfree(rg);
706 return NULL;
707 }
708
709 kref_init(&resv_map->refs);
710 spin_lock_init(&resv_map->lock);
711 INIT_LIST_HEAD(&resv_map->regions);
712
713 resv_map->adds_in_progress = 0;
714
715 INIT_LIST_HEAD(&resv_map->region_cache);
716 list_add(&rg->link, &resv_map->region_cache);
717 resv_map->region_cache_count = 1;
718
719 return resv_map;
720}
721
722void resv_map_release(struct kref *ref)
723{
724 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
725 struct list_head *head = &resv_map->region_cache;
726 struct file_region *rg, *trg;
727
728 /* Clear out any active regions before we release the map. */
729 region_del(resv_map, 0, LONG_MAX);
730
731 /* ... and any entries left in the cache */
732 list_for_each_entry_safe(rg, trg, head, link) {
733 list_del(&rg->link);
734 kfree(rg);
735 }
736
737 VM_BUG_ON(resv_map->adds_in_progress);
738
739 kfree(resv_map);
740}
741
742static inline struct resv_map *inode_resv_map(struct inode *inode)
743{
744 /*
745 * At inode evict time, i_mapping may not point to the original
746 * address space within the inode. This original address space
747 * contains the pointer to the resv_map. So, always use the
748 * address space embedded within the inode.
749 * The VERY common case is inode->mapping == &inode->i_data but,
750 * this may not be true for device special inodes.
751 */
752 return (struct resv_map *)(&inode->i_data)->private_data;
753}
754
755static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
756{
757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758 if (vma->vm_flags & VM_MAYSHARE) {
759 struct address_space *mapping = vma->vm_file->f_mapping;
760 struct inode *inode = mapping->host;
761
762 return inode_resv_map(inode);
763
764 } else {
765 return (struct resv_map *)(get_vma_private_data(vma) &
766 ~HPAGE_RESV_MASK);
767 }
768}
769
770static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
771{
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775 set_vma_private_data(vma, (get_vma_private_data(vma) &
776 HPAGE_RESV_MASK) | (unsigned long)map);
777}
778
779static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
780{
781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
783
784 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
785}
786
787static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
788{
789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
790
791 return (get_vma_private_data(vma) & flag) != 0;
792}
793
794/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
795void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
796{
797 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
798 if (!(vma->vm_flags & VM_MAYSHARE))
799 vma->vm_private_data = (void *)0;
800}
801
802/* Returns true if the VMA has associated reserve pages */
803static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
804{
805 if (vma->vm_flags & VM_NORESERVE) {
806 /*
807 * This address is already reserved by other process(chg == 0),
808 * so, we should decrement reserved count. Without decrementing,
809 * reserve count remains after releasing inode, because this
810 * allocated page will go into page cache and is regarded as
811 * coming from reserved pool in releasing step. Currently, we
812 * don't have any other solution to deal with this situation
813 * properly, so add work-around here.
814 */
815 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
816 return true;
817 else
818 return false;
819 }
820
821 /* Shared mappings always use reserves */
822 if (vma->vm_flags & VM_MAYSHARE) {
823 /*
824 * We know VM_NORESERVE is not set. Therefore, there SHOULD
825 * be a region map for all pages. The only situation where
826 * there is no region map is if a hole was punched via
827 * fallocate. In this case, there really are no reverves to
828 * use. This situation is indicated if chg != 0.
829 */
830 if (chg)
831 return false;
832 else
833 return true;
834 }
835
836 /*
837 * Only the process that called mmap() has reserves for
838 * private mappings.
839 */
840 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
841 /*
842 * Like the shared case above, a hole punch or truncate
843 * could have been performed on the private mapping.
844 * Examine the value of chg to determine if reserves
845 * actually exist or were previously consumed.
846 * Very Subtle - The value of chg comes from a previous
847 * call to vma_needs_reserves(). The reserve map for
848 * private mappings has different (opposite) semantics
849 * than that of shared mappings. vma_needs_reserves()
850 * has already taken this difference in semantics into
851 * account. Therefore, the meaning of chg is the same
852 * as in the shared case above. Code could easily be
853 * combined, but keeping it separate draws attention to
854 * subtle differences.
855 */
856 if (chg)
857 return false;
858 else
859 return true;
860 }
861
862 return false;
863}
864
865static void enqueue_huge_page(struct hstate *h, struct page *page)
866{
867 int nid = page_to_nid(page);
868 list_move(&page->lru, &h->hugepage_freelists[nid]);
869 h->free_huge_pages++;
870 h->free_huge_pages_node[nid]++;
871}
872
873static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
874{
875 struct page *page;
876
877 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
878 if (!PageHWPoison(page))
879 break;
880 /*
881 * if 'non-isolated free hugepage' not found on the list,
882 * the allocation fails.
883 */
884 if (&h->hugepage_freelists[nid] == &page->lru)
885 return NULL;
886 list_move(&page->lru, &h->hugepage_activelist);
887 set_page_refcounted(page);
888 h->free_huge_pages--;
889 h->free_huge_pages_node[nid]--;
890 return page;
891}
892
893static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
894 nodemask_t *nmask)
895{
896 unsigned int cpuset_mems_cookie;
897 struct zonelist *zonelist;
898 struct zone *zone;
899 struct zoneref *z;
900 int node = NUMA_NO_NODE;
901
902 zonelist = node_zonelist(nid, gfp_mask);
903
904retry_cpuset:
905 cpuset_mems_cookie = read_mems_allowed_begin();
906 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
907 struct page *page;
908
909 if (!cpuset_zone_allowed(zone, gfp_mask))
910 continue;
911 /*
912 * no need to ask again on the same node. Pool is node rather than
913 * zone aware
914 */
915 if (zone_to_nid(zone) == node)
916 continue;
917 node = zone_to_nid(zone);
918
919 page = dequeue_huge_page_node_exact(h, node);
920 if (page)
921 return page;
922 }
923 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
924 goto retry_cpuset;
925
926 return NULL;
927}
928
929/* Movability of hugepages depends on migration support. */
930static inline gfp_t htlb_alloc_mask(struct hstate *h)
931{
932 if (hugepage_movable_supported(h))
933 return GFP_HIGHUSER_MOVABLE;
934 else
935 return GFP_HIGHUSER;
936}
937
938static struct page *dequeue_huge_page_vma(struct hstate *h,
939 struct vm_area_struct *vma,
940 unsigned long address, int avoid_reserve,
941 long chg)
942{
943 struct page *page;
944 struct mempolicy *mpol;
945 gfp_t gfp_mask;
946 nodemask_t *nodemask;
947 int nid;
948
949 /*
950 * A child process with MAP_PRIVATE mappings created by their parent
951 * have no page reserves. This check ensures that reservations are
952 * not "stolen". The child may still get SIGKILLed
953 */
954 if (!vma_has_reserves(vma, chg) &&
955 h->free_huge_pages - h->resv_huge_pages == 0)
956 goto err;
957
958 /* If reserves cannot be used, ensure enough pages are in the pool */
959 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
960 goto err;
961
962 gfp_mask = htlb_alloc_mask(h);
963 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
964 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
965 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
966 SetPagePrivate(page);
967 h->resv_huge_pages--;
968 }
969
970 mpol_cond_put(mpol);
971 return page;
972
973err:
974 return NULL;
975}
976
977/*
978 * common helper functions for hstate_next_node_to_{alloc|free}.
979 * We may have allocated or freed a huge page based on a different
980 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
981 * be outside of *nodes_allowed. Ensure that we use an allowed
982 * node for alloc or free.
983 */
984static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
985{
986 nid = next_node_in(nid, *nodes_allowed);
987 VM_BUG_ON(nid >= MAX_NUMNODES);
988
989 return nid;
990}
991
992static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
993{
994 if (!node_isset(nid, *nodes_allowed))
995 nid = next_node_allowed(nid, nodes_allowed);
996 return nid;
997}
998
999/*
1000 * returns the previously saved node ["this node"] from which to
1001 * allocate a persistent huge page for the pool and advance the
1002 * next node from which to allocate, handling wrap at end of node
1003 * mask.
1004 */
1005static int hstate_next_node_to_alloc(struct hstate *h,
1006 nodemask_t *nodes_allowed)
1007{
1008 int nid;
1009
1010 VM_BUG_ON(!nodes_allowed);
1011
1012 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1013 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1014
1015 return nid;
1016}
1017
1018/*
1019 * helper for free_pool_huge_page() - return the previously saved
1020 * node ["this node"] from which to free a huge page. Advance the
1021 * next node id whether or not we find a free huge page to free so
1022 * that the next attempt to free addresses the next node.
1023 */
1024static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1025{
1026 int nid;
1027
1028 VM_BUG_ON(!nodes_allowed);
1029
1030 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1031 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1032
1033 return nid;
1034}
1035
1036#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1037 for (nr_nodes = nodes_weight(*mask); \
1038 nr_nodes > 0 && \
1039 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1040 nr_nodes--)
1041
1042#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1043 for (nr_nodes = nodes_weight(*mask); \
1044 nr_nodes > 0 && \
1045 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1046 nr_nodes--)
1047
1048#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1049static void destroy_compound_gigantic_page(struct page *page,
1050 unsigned int order)
1051{
1052 int i;
1053 int nr_pages = 1 << order;
1054 struct page *p = page + 1;
1055
1056 atomic_set(compound_mapcount_ptr(page), 0);
1057 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1058 clear_compound_head(p);
1059 set_page_refcounted(p);
1060 }
1061
1062 set_compound_order(page, 0);
1063 __ClearPageHead(page);
1064}
1065
1066static void free_gigantic_page(struct page *page, unsigned int order)
1067{
1068 free_contig_range(page_to_pfn(page), 1 << order);
1069}
1070
1071#ifdef CONFIG_CONTIG_ALLOC
1072static int __alloc_gigantic_page(unsigned long start_pfn,
1073 unsigned long nr_pages, gfp_t gfp_mask)
1074{
1075 unsigned long end_pfn = start_pfn + nr_pages;
1076 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1077 gfp_mask);
1078}
1079
1080static bool pfn_range_valid_gigantic(struct zone *z,
1081 unsigned long start_pfn, unsigned long nr_pages)
1082{
1083 unsigned long i, end_pfn = start_pfn + nr_pages;
1084 struct page *page;
1085
1086 for (i = start_pfn; i < end_pfn; i++) {
1087 page = pfn_to_online_page(i);
1088 if (!page)
1089 return false;
1090
1091 if (page_zone(page) != z)
1092 return false;
1093
1094 if (PageReserved(page))
1095 return false;
1096
1097 if (page_count(page) > 0)
1098 return false;
1099
1100 if (PageHuge(page))
1101 return false;
1102 }
1103
1104 return true;
1105}
1106
1107static bool zone_spans_last_pfn(const struct zone *zone,
1108 unsigned long start_pfn, unsigned long nr_pages)
1109{
1110 unsigned long last_pfn = start_pfn + nr_pages - 1;
1111 return zone_spans_pfn(zone, last_pfn);
1112}
1113
1114static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1115 int nid, nodemask_t *nodemask)
1116{
1117 unsigned int order = huge_page_order(h);
1118 unsigned long nr_pages = 1 << order;
1119 unsigned long ret, pfn, flags;
1120 struct zonelist *zonelist;
1121 struct zone *zone;
1122 struct zoneref *z;
1123
1124 zonelist = node_zonelist(nid, gfp_mask);
1125 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1126 spin_lock_irqsave(&zone->lock, flags);
1127
1128 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1129 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1130 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1131 /*
1132 * We release the zone lock here because
1133 * alloc_contig_range() will also lock the zone
1134 * at some point. If there's an allocation
1135 * spinning on this lock, it may win the race
1136 * and cause alloc_contig_range() to fail...
1137 */
1138 spin_unlock_irqrestore(&zone->lock, flags);
1139 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1140 if (!ret)
1141 return pfn_to_page(pfn);
1142 spin_lock_irqsave(&zone->lock, flags);
1143 }
1144 pfn += nr_pages;
1145 }
1146
1147 spin_unlock_irqrestore(&zone->lock, flags);
1148 }
1149
1150 return NULL;
1151}
1152
1153static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1154static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1155#else /* !CONFIG_CONTIG_ALLOC */
1156static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1157 int nid, nodemask_t *nodemask)
1158{
1159 return NULL;
1160}
1161#endif /* CONFIG_CONTIG_ALLOC */
1162
1163#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1164static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1165 int nid, nodemask_t *nodemask)
1166{
1167 return NULL;
1168}
1169static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1170static inline void destroy_compound_gigantic_page(struct page *page,
1171 unsigned int order) { }
1172#endif
1173
1174static void update_and_free_page(struct hstate *h, struct page *page)
1175{
1176 int i;
1177
1178 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1179 return;
1180
1181 h->nr_huge_pages--;
1182 h->nr_huge_pages_node[page_to_nid(page)]--;
1183 for (i = 0; i < pages_per_huge_page(h); i++) {
1184 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1185 1 << PG_referenced | 1 << PG_dirty |
1186 1 << PG_active | 1 << PG_private |
1187 1 << PG_writeback);
1188 }
1189 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1190 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1191 set_page_refcounted(page);
1192 if (hstate_is_gigantic(h)) {
1193 destroy_compound_gigantic_page(page, huge_page_order(h));
1194 free_gigantic_page(page, huge_page_order(h));
1195 } else {
1196 __free_pages(page, huge_page_order(h));
1197 }
1198}
1199
1200struct hstate *size_to_hstate(unsigned long size)
1201{
1202 struct hstate *h;
1203
1204 for_each_hstate(h) {
1205 if (huge_page_size(h) == size)
1206 return h;
1207 }
1208 return NULL;
1209}
1210
1211/*
1212 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1213 * to hstate->hugepage_activelist.)
1214 *
1215 * This function can be called for tail pages, but never returns true for them.
1216 */
1217bool page_huge_active(struct page *page)
1218{
1219 VM_BUG_ON_PAGE(!PageHuge(page), page);
1220 return PageHead(page) && PagePrivate(&page[1]);
1221}
1222
1223/* never called for tail page */
1224static void set_page_huge_active(struct page *page)
1225{
1226 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227 SetPagePrivate(&page[1]);
1228}
1229
1230static void clear_page_huge_active(struct page *page)
1231{
1232 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1233 ClearPagePrivate(&page[1]);
1234}
1235
1236/*
1237 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1238 * code
1239 */
1240static inline bool PageHugeTemporary(struct page *page)
1241{
1242 if (!PageHuge(page))
1243 return false;
1244
1245 return (unsigned long)page[2].mapping == -1U;
1246}
1247
1248static inline void SetPageHugeTemporary(struct page *page)
1249{
1250 page[2].mapping = (void *)-1U;
1251}
1252
1253static inline void ClearPageHugeTemporary(struct page *page)
1254{
1255 page[2].mapping = NULL;
1256}
1257
1258void free_huge_page(struct page *page)
1259{
1260 /*
1261 * Can't pass hstate in here because it is called from the
1262 * compound page destructor.
1263 */
1264 struct hstate *h = page_hstate(page);
1265 int nid = page_to_nid(page);
1266 struct hugepage_subpool *spool =
1267 (struct hugepage_subpool *)page_private(page);
1268 bool restore_reserve;
1269
1270 VM_BUG_ON_PAGE(page_count(page), page);
1271 VM_BUG_ON_PAGE(page_mapcount(page), page);
1272
1273 set_page_private(page, 0);
1274 page->mapping = NULL;
1275 restore_reserve = PagePrivate(page);
1276 ClearPagePrivate(page);
1277
1278 /*
1279 * If PagePrivate() was set on page, page allocation consumed a
1280 * reservation. If the page was associated with a subpool, there
1281 * would have been a page reserved in the subpool before allocation
1282 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1283 * reservtion, do not call hugepage_subpool_put_pages() as this will
1284 * remove the reserved page from the subpool.
1285 */
1286 if (!restore_reserve) {
1287 /*
1288 * A return code of zero implies that the subpool will be
1289 * under its minimum size if the reservation is not restored
1290 * after page is free. Therefore, force restore_reserve
1291 * operation.
1292 */
1293 if (hugepage_subpool_put_pages(spool, 1) == 0)
1294 restore_reserve = true;
1295 }
1296
1297 spin_lock(&hugetlb_lock);
1298 clear_page_huge_active(page);
1299 hugetlb_cgroup_uncharge_page(hstate_index(h),
1300 pages_per_huge_page(h), page);
1301 if (restore_reserve)
1302 h->resv_huge_pages++;
1303
1304 if (PageHugeTemporary(page)) {
1305 list_del(&page->lru);
1306 ClearPageHugeTemporary(page);
1307 update_and_free_page(h, page);
1308 } else if (h->surplus_huge_pages_node[nid]) {
1309 /* remove the page from active list */
1310 list_del(&page->lru);
1311 update_and_free_page(h, page);
1312 h->surplus_huge_pages--;
1313 h->surplus_huge_pages_node[nid]--;
1314 } else {
1315 arch_clear_hugepage_flags(page);
1316 enqueue_huge_page(h, page);
1317 }
1318 spin_unlock(&hugetlb_lock);
1319}
1320
1321static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1322{
1323 INIT_LIST_HEAD(&page->lru);
1324 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1325 spin_lock(&hugetlb_lock);
1326 set_hugetlb_cgroup(page, NULL);
1327 h->nr_huge_pages++;
1328 h->nr_huge_pages_node[nid]++;
1329 spin_unlock(&hugetlb_lock);
1330}
1331
1332static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1333{
1334 int i;
1335 int nr_pages = 1 << order;
1336 struct page *p = page + 1;
1337
1338 /* we rely on prep_new_huge_page to set the destructor */
1339 set_compound_order(page, order);
1340 __ClearPageReserved(page);
1341 __SetPageHead(page);
1342 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1343 /*
1344 * For gigantic hugepages allocated through bootmem at
1345 * boot, it's safer to be consistent with the not-gigantic
1346 * hugepages and clear the PG_reserved bit from all tail pages
1347 * too. Otherwse drivers using get_user_pages() to access tail
1348 * pages may get the reference counting wrong if they see
1349 * PG_reserved set on a tail page (despite the head page not
1350 * having PG_reserved set). Enforcing this consistency between
1351 * head and tail pages allows drivers to optimize away a check
1352 * on the head page when they need know if put_page() is needed
1353 * after get_user_pages().
1354 */
1355 __ClearPageReserved(p);
1356 set_page_count(p, 0);
1357 set_compound_head(p, page);
1358 }
1359 atomic_set(compound_mapcount_ptr(page), -1);
1360}
1361
1362/*
1363 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1364 * transparent huge pages. See the PageTransHuge() documentation for more
1365 * details.
1366 */
1367int PageHuge(struct page *page)
1368{
1369 if (!PageCompound(page))
1370 return 0;
1371
1372 page = compound_head(page);
1373 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1374}
1375EXPORT_SYMBOL_GPL(PageHuge);
1376
1377/*
1378 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1379 * normal or transparent huge pages.
1380 */
1381int PageHeadHuge(struct page *page_head)
1382{
1383 if (!PageHead(page_head))
1384 return 0;
1385
1386 return get_compound_page_dtor(page_head) == free_huge_page;
1387}
1388
1389pgoff_t __basepage_index(struct page *page)
1390{
1391 struct page *page_head = compound_head(page);
1392 pgoff_t index = page_index(page_head);
1393 unsigned long compound_idx;
1394
1395 if (!PageHuge(page_head))
1396 return page_index(page);
1397
1398 if (compound_order(page_head) >= MAX_ORDER)
1399 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1400 else
1401 compound_idx = page - page_head;
1402
1403 return (index << compound_order(page_head)) + compound_idx;
1404}
1405
1406static struct page *alloc_buddy_huge_page(struct hstate *h,
1407 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1408 nodemask_t *node_alloc_noretry)
1409{
1410 int order = huge_page_order(h);
1411 struct page *page;
1412 bool alloc_try_hard = true;
1413
1414 /*
1415 * By default we always try hard to allocate the page with
1416 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1417 * a loop (to adjust global huge page counts) and previous allocation
1418 * failed, do not continue to try hard on the same node. Use the
1419 * node_alloc_noretry bitmap to manage this state information.
1420 */
1421 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1422 alloc_try_hard = false;
1423 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1424 if (alloc_try_hard)
1425 gfp_mask |= __GFP_RETRY_MAYFAIL;
1426 if (nid == NUMA_NO_NODE)
1427 nid = numa_mem_id();
1428 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1429 if (page)
1430 __count_vm_event(HTLB_BUDDY_PGALLOC);
1431 else
1432 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1433
1434 /*
1435 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1436 * indicates an overall state change. Clear bit so that we resume
1437 * normal 'try hard' allocations.
1438 */
1439 if (node_alloc_noretry && page && !alloc_try_hard)
1440 node_clear(nid, *node_alloc_noretry);
1441
1442 /*
1443 * If we tried hard to get a page but failed, set bit so that
1444 * subsequent attempts will not try as hard until there is an
1445 * overall state change.
1446 */
1447 if (node_alloc_noretry && !page && alloc_try_hard)
1448 node_set(nid, *node_alloc_noretry);
1449
1450 return page;
1451}
1452
1453/*
1454 * Common helper to allocate a fresh hugetlb page. All specific allocators
1455 * should use this function to get new hugetlb pages
1456 */
1457static struct page *alloc_fresh_huge_page(struct hstate *h,
1458 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1459 nodemask_t *node_alloc_noretry)
1460{
1461 struct page *page;
1462
1463 if (hstate_is_gigantic(h))
1464 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1465 else
1466 page = alloc_buddy_huge_page(h, gfp_mask,
1467 nid, nmask, node_alloc_noretry);
1468 if (!page)
1469 return NULL;
1470
1471 if (hstate_is_gigantic(h))
1472 prep_compound_gigantic_page(page, huge_page_order(h));
1473 prep_new_huge_page(h, page, page_to_nid(page));
1474
1475 return page;
1476}
1477
1478/*
1479 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1480 * manner.
1481 */
1482static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1483 nodemask_t *node_alloc_noretry)
1484{
1485 struct page *page;
1486 int nr_nodes, node;
1487 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1488
1489 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1490 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1491 node_alloc_noretry);
1492 if (page)
1493 break;
1494 }
1495
1496 if (!page)
1497 return 0;
1498
1499 put_page(page); /* free it into the hugepage allocator */
1500
1501 return 1;
1502}
1503
1504/*
1505 * Free huge page from pool from next node to free.
1506 * Attempt to keep persistent huge pages more or less
1507 * balanced over allowed nodes.
1508 * Called with hugetlb_lock locked.
1509 */
1510static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1511 bool acct_surplus)
1512{
1513 int nr_nodes, node;
1514 int ret = 0;
1515
1516 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1517 /*
1518 * If we're returning unused surplus pages, only examine
1519 * nodes with surplus pages.
1520 */
1521 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1522 !list_empty(&h->hugepage_freelists[node])) {
1523 struct page *page =
1524 list_entry(h->hugepage_freelists[node].next,
1525 struct page, lru);
1526 list_del(&page->lru);
1527 h->free_huge_pages--;
1528 h->free_huge_pages_node[node]--;
1529 if (acct_surplus) {
1530 h->surplus_huge_pages--;
1531 h->surplus_huge_pages_node[node]--;
1532 }
1533 update_and_free_page(h, page);
1534 ret = 1;
1535 break;
1536 }
1537 }
1538
1539 return ret;
1540}
1541
1542/*
1543 * Dissolve a given free hugepage into free buddy pages. This function does
1544 * nothing for in-use hugepages and non-hugepages.
1545 * This function returns values like below:
1546 *
1547 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1548 * (allocated or reserved.)
1549 * 0: successfully dissolved free hugepages or the page is not a
1550 * hugepage (considered as already dissolved)
1551 */
1552int dissolve_free_huge_page(struct page *page)
1553{
1554 int rc = -EBUSY;
1555
1556 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1557 if (!PageHuge(page))
1558 return 0;
1559
1560 spin_lock(&hugetlb_lock);
1561 if (!PageHuge(page)) {
1562 rc = 0;
1563 goto out;
1564 }
1565
1566 if (!page_count(page)) {
1567 struct page *head = compound_head(page);
1568 struct hstate *h = page_hstate(head);
1569 int nid = page_to_nid(head);
1570 if (h->free_huge_pages - h->resv_huge_pages == 0)
1571 goto out;
1572 /*
1573 * Move PageHWPoison flag from head page to the raw error page,
1574 * which makes any subpages rather than the error page reusable.
1575 */
1576 if (PageHWPoison(head) && page != head) {
1577 SetPageHWPoison(page);
1578 ClearPageHWPoison(head);
1579 }
1580 list_del(&head->lru);
1581 h->free_huge_pages--;
1582 h->free_huge_pages_node[nid]--;
1583 h->max_huge_pages--;
1584 update_and_free_page(h, head);
1585 rc = 0;
1586 }
1587out:
1588 spin_unlock(&hugetlb_lock);
1589 return rc;
1590}
1591
1592/*
1593 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1594 * make specified memory blocks removable from the system.
1595 * Note that this will dissolve a free gigantic hugepage completely, if any
1596 * part of it lies within the given range.
1597 * Also note that if dissolve_free_huge_page() returns with an error, all
1598 * free hugepages that were dissolved before that error are lost.
1599 */
1600int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1601{
1602 unsigned long pfn;
1603 struct page *page;
1604 int rc = 0;
1605
1606 if (!hugepages_supported())
1607 return rc;
1608
1609 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1610 page = pfn_to_page(pfn);
1611 rc = dissolve_free_huge_page(page);
1612 if (rc)
1613 break;
1614 }
1615
1616 return rc;
1617}
1618
1619/*
1620 * Allocates a fresh surplus page from the page allocator.
1621 */
1622static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1623 int nid, nodemask_t *nmask)
1624{
1625 struct page *page = NULL;
1626
1627 if (hstate_is_gigantic(h))
1628 return NULL;
1629
1630 spin_lock(&hugetlb_lock);
1631 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1632 goto out_unlock;
1633 spin_unlock(&hugetlb_lock);
1634
1635 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1636 if (!page)
1637 return NULL;
1638
1639 spin_lock(&hugetlb_lock);
1640 /*
1641 * We could have raced with the pool size change.
1642 * Double check that and simply deallocate the new page
1643 * if we would end up overcommiting the surpluses. Abuse
1644 * temporary page to workaround the nasty free_huge_page
1645 * codeflow
1646 */
1647 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1648 SetPageHugeTemporary(page);
1649 spin_unlock(&hugetlb_lock);
1650 put_page(page);
1651 return NULL;
1652 } else {
1653 h->surplus_huge_pages++;
1654 h->surplus_huge_pages_node[page_to_nid(page)]++;
1655 }
1656
1657out_unlock:
1658 spin_unlock(&hugetlb_lock);
1659
1660 return page;
1661}
1662
1663struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1664 int nid, nodemask_t *nmask)
1665{
1666 struct page *page;
1667
1668 if (hstate_is_gigantic(h))
1669 return NULL;
1670
1671 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1672 if (!page)
1673 return NULL;
1674
1675 /*
1676 * We do not account these pages as surplus because they are only
1677 * temporary and will be released properly on the last reference
1678 */
1679 SetPageHugeTemporary(page);
1680
1681 return page;
1682}
1683
1684/*
1685 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1686 */
1687static
1688struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1689 struct vm_area_struct *vma, unsigned long addr)
1690{
1691 struct page *page;
1692 struct mempolicy *mpol;
1693 gfp_t gfp_mask = htlb_alloc_mask(h);
1694 int nid;
1695 nodemask_t *nodemask;
1696
1697 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1698 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1699 mpol_cond_put(mpol);
1700
1701 return page;
1702}
1703
1704/* page migration callback function */
1705struct page *alloc_huge_page_node(struct hstate *h, int nid)
1706{
1707 gfp_t gfp_mask = htlb_alloc_mask(h);
1708 struct page *page = NULL;
1709
1710 if (nid != NUMA_NO_NODE)
1711 gfp_mask |= __GFP_THISNODE;
1712
1713 spin_lock(&hugetlb_lock);
1714 if (h->free_huge_pages - h->resv_huge_pages > 0)
1715 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1716 spin_unlock(&hugetlb_lock);
1717
1718 if (!page)
1719 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1720
1721 return page;
1722}
1723
1724/* page migration callback function */
1725struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1726 nodemask_t *nmask)
1727{
1728 gfp_t gfp_mask = htlb_alloc_mask(h);
1729
1730 spin_lock(&hugetlb_lock);
1731 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1732 struct page *page;
1733
1734 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1735 if (page) {
1736 spin_unlock(&hugetlb_lock);
1737 return page;
1738 }
1739 }
1740 spin_unlock(&hugetlb_lock);
1741
1742 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1743}
1744
1745/* mempolicy aware migration callback */
1746struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1747 unsigned long address)
1748{
1749 struct mempolicy *mpol;
1750 nodemask_t *nodemask;
1751 struct page *page;
1752 gfp_t gfp_mask;
1753 int node;
1754
1755 gfp_mask = htlb_alloc_mask(h);
1756 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1757 page = alloc_huge_page_nodemask(h, node, nodemask);
1758 mpol_cond_put(mpol);
1759
1760 return page;
1761}
1762
1763/*
1764 * Increase the hugetlb pool such that it can accommodate a reservation
1765 * of size 'delta'.
1766 */
1767static int gather_surplus_pages(struct hstate *h, int delta)
1768{
1769 struct list_head surplus_list;
1770 struct page *page, *tmp;
1771 int ret, i;
1772 int needed, allocated;
1773 bool alloc_ok = true;
1774
1775 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1776 if (needed <= 0) {
1777 h->resv_huge_pages += delta;
1778 return 0;
1779 }
1780
1781 allocated = 0;
1782 INIT_LIST_HEAD(&surplus_list);
1783
1784 ret = -ENOMEM;
1785retry:
1786 spin_unlock(&hugetlb_lock);
1787 for (i = 0; i < needed; i++) {
1788 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1789 NUMA_NO_NODE, NULL);
1790 if (!page) {
1791 alloc_ok = false;
1792 break;
1793 }
1794 list_add(&page->lru, &surplus_list);
1795 cond_resched();
1796 }
1797 allocated += i;
1798
1799 /*
1800 * After retaking hugetlb_lock, we need to recalculate 'needed'
1801 * because either resv_huge_pages or free_huge_pages may have changed.
1802 */
1803 spin_lock(&hugetlb_lock);
1804 needed = (h->resv_huge_pages + delta) -
1805 (h->free_huge_pages + allocated);
1806 if (needed > 0) {
1807 if (alloc_ok)
1808 goto retry;
1809 /*
1810 * We were not able to allocate enough pages to
1811 * satisfy the entire reservation so we free what
1812 * we've allocated so far.
1813 */
1814 goto free;
1815 }
1816 /*
1817 * The surplus_list now contains _at_least_ the number of extra pages
1818 * needed to accommodate the reservation. Add the appropriate number
1819 * of pages to the hugetlb pool and free the extras back to the buddy
1820 * allocator. Commit the entire reservation here to prevent another
1821 * process from stealing the pages as they are added to the pool but
1822 * before they are reserved.
1823 */
1824 needed += allocated;
1825 h->resv_huge_pages += delta;
1826 ret = 0;
1827
1828 /* Free the needed pages to the hugetlb pool */
1829 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1830 if ((--needed) < 0)
1831 break;
1832 /*
1833 * This page is now managed by the hugetlb allocator and has
1834 * no users -- drop the buddy allocator's reference.
1835 */
1836 put_page_testzero(page);
1837 VM_BUG_ON_PAGE(page_count(page), page);
1838 enqueue_huge_page(h, page);
1839 }
1840free:
1841 spin_unlock(&hugetlb_lock);
1842
1843 /* Free unnecessary surplus pages to the buddy allocator */
1844 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1845 put_page(page);
1846 spin_lock(&hugetlb_lock);
1847
1848 return ret;
1849}
1850
1851/*
1852 * This routine has two main purposes:
1853 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1854 * in unused_resv_pages. This corresponds to the prior adjustments made
1855 * to the associated reservation map.
1856 * 2) Free any unused surplus pages that may have been allocated to satisfy
1857 * the reservation. As many as unused_resv_pages may be freed.
1858 *
1859 * Called with hugetlb_lock held. However, the lock could be dropped (and
1860 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1861 * we must make sure nobody else can claim pages we are in the process of
1862 * freeing. Do this by ensuring resv_huge_page always is greater than the
1863 * number of huge pages we plan to free when dropping the lock.
1864 */
1865static void return_unused_surplus_pages(struct hstate *h,
1866 unsigned long unused_resv_pages)
1867{
1868 unsigned long nr_pages;
1869
1870 /* Cannot return gigantic pages currently */
1871 if (hstate_is_gigantic(h))
1872 goto out;
1873
1874 /*
1875 * Part (or even all) of the reservation could have been backed
1876 * by pre-allocated pages. Only free surplus pages.
1877 */
1878 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1879
1880 /*
1881 * We want to release as many surplus pages as possible, spread
1882 * evenly across all nodes with memory. Iterate across these nodes
1883 * until we can no longer free unreserved surplus pages. This occurs
1884 * when the nodes with surplus pages have no free pages.
1885 * free_pool_huge_page() will balance the the freed pages across the
1886 * on-line nodes with memory and will handle the hstate accounting.
1887 *
1888 * Note that we decrement resv_huge_pages as we free the pages. If
1889 * we drop the lock, resv_huge_pages will still be sufficiently large
1890 * to cover subsequent pages we may free.
1891 */
1892 while (nr_pages--) {
1893 h->resv_huge_pages--;
1894 unused_resv_pages--;
1895 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1896 goto out;
1897 cond_resched_lock(&hugetlb_lock);
1898 }
1899
1900out:
1901 /* Fully uncommit the reservation */
1902 h->resv_huge_pages -= unused_resv_pages;
1903}
1904
1905
1906/*
1907 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1908 * are used by the huge page allocation routines to manage reservations.
1909 *
1910 * vma_needs_reservation is called to determine if the huge page at addr
1911 * within the vma has an associated reservation. If a reservation is
1912 * needed, the value 1 is returned. The caller is then responsible for
1913 * managing the global reservation and subpool usage counts. After
1914 * the huge page has been allocated, vma_commit_reservation is called
1915 * to add the page to the reservation map. If the page allocation fails,
1916 * the reservation must be ended instead of committed. vma_end_reservation
1917 * is called in such cases.
1918 *
1919 * In the normal case, vma_commit_reservation returns the same value
1920 * as the preceding vma_needs_reservation call. The only time this
1921 * is not the case is if a reserve map was changed between calls. It
1922 * is the responsibility of the caller to notice the difference and
1923 * take appropriate action.
1924 *
1925 * vma_add_reservation is used in error paths where a reservation must
1926 * be restored when a newly allocated huge page must be freed. It is
1927 * to be called after calling vma_needs_reservation to determine if a
1928 * reservation exists.
1929 */
1930enum vma_resv_mode {
1931 VMA_NEEDS_RESV,
1932 VMA_COMMIT_RESV,
1933 VMA_END_RESV,
1934 VMA_ADD_RESV,
1935};
1936static long __vma_reservation_common(struct hstate *h,
1937 struct vm_area_struct *vma, unsigned long addr,
1938 enum vma_resv_mode mode)
1939{
1940 struct resv_map *resv;
1941 pgoff_t idx;
1942 long ret;
1943
1944 resv = vma_resv_map(vma);
1945 if (!resv)
1946 return 1;
1947
1948 idx = vma_hugecache_offset(h, vma, addr);
1949 switch (mode) {
1950 case VMA_NEEDS_RESV:
1951 ret = region_chg(resv, idx, idx + 1);
1952 break;
1953 case VMA_COMMIT_RESV:
1954 ret = region_add(resv, idx, idx + 1);
1955 break;
1956 case VMA_END_RESV:
1957 region_abort(resv, idx, idx + 1);
1958 ret = 0;
1959 break;
1960 case VMA_ADD_RESV:
1961 if (vma->vm_flags & VM_MAYSHARE)
1962 ret = region_add(resv, idx, idx + 1);
1963 else {
1964 region_abort(resv, idx, idx + 1);
1965 ret = region_del(resv, idx, idx + 1);
1966 }
1967 break;
1968 default:
1969 BUG();
1970 }
1971
1972 if (vma->vm_flags & VM_MAYSHARE)
1973 return ret;
1974 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1975 /*
1976 * In most cases, reserves always exist for private mappings.
1977 * However, a file associated with mapping could have been
1978 * hole punched or truncated after reserves were consumed.
1979 * As subsequent fault on such a range will not use reserves.
1980 * Subtle - The reserve map for private mappings has the
1981 * opposite meaning than that of shared mappings. If NO
1982 * entry is in the reserve map, it means a reservation exists.
1983 * If an entry exists in the reserve map, it means the
1984 * reservation has already been consumed. As a result, the
1985 * return value of this routine is the opposite of the
1986 * value returned from reserve map manipulation routines above.
1987 */
1988 if (ret)
1989 return 0;
1990 else
1991 return 1;
1992 }
1993 else
1994 return ret < 0 ? ret : 0;
1995}
1996
1997static long vma_needs_reservation(struct hstate *h,
1998 struct vm_area_struct *vma, unsigned long addr)
1999{
2000 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2001}
2002
2003static long vma_commit_reservation(struct hstate *h,
2004 struct vm_area_struct *vma, unsigned long addr)
2005{
2006 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2007}
2008
2009static void vma_end_reservation(struct hstate *h,
2010 struct vm_area_struct *vma, unsigned long addr)
2011{
2012 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2013}
2014
2015static long vma_add_reservation(struct hstate *h,
2016 struct vm_area_struct *vma, unsigned long addr)
2017{
2018 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2019}
2020
2021/*
2022 * This routine is called to restore a reservation on error paths. In the
2023 * specific error paths, a huge page was allocated (via alloc_huge_page)
2024 * and is about to be freed. If a reservation for the page existed,
2025 * alloc_huge_page would have consumed the reservation and set PagePrivate
2026 * in the newly allocated page. When the page is freed via free_huge_page,
2027 * the global reservation count will be incremented if PagePrivate is set.
2028 * However, free_huge_page can not adjust the reserve map. Adjust the
2029 * reserve map here to be consistent with global reserve count adjustments
2030 * to be made by free_huge_page.
2031 */
2032static void restore_reserve_on_error(struct hstate *h,
2033 struct vm_area_struct *vma, unsigned long address,
2034 struct page *page)
2035{
2036 if (unlikely(PagePrivate(page))) {
2037 long rc = vma_needs_reservation(h, vma, address);
2038
2039 if (unlikely(rc < 0)) {
2040 /*
2041 * Rare out of memory condition in reserve map
2042 * manipulation. Clear PagePrivate so that
2043 * global reserve count will not be incremented
2044 * by free_huge_page. This will make it appear
2045 * as though the reservation for this page was
2046 * consumed. This may prevent the task from
2047 * faulting in the page at a later time. This
2048 * is better than inconsistent global huge page
2049 * accounting of reserve counts.
2050 */
2051 ClearPagePrivate(page);
2052 } else if (rc) {
2053 rc = vma_add_reservation(h, vma, address);
2054 if (unlikely(rc < 0))
2055 /*
2056 * See above comment about rare out of
2057 * memory condition.
2058 */
2059 ClearPagePrivate(page);
2060 } else
2061 vma_end_reservation(h, vma, address);
2062 }
2063}
2064
2065struct page *alloc_huge_page(struct vm_area_struct *vma,
2066 unsigned long addr, int avoid_reserve)
2067{
2068 struct hugepage_subpool *spool = subpool_vma(vma);
2069 struct hstate *h = hstate_vma(vma);
2070 struct page *page;
2071 long map_chg, map_commit;
2072 long gbl_chg;
2073 int ret, idx;
2074 struct hugetlb_cgroup *h_cg;
2075
2076 idx = hstate_index(h);
2077 /*
2078 * Examine the region/reserve map to determine if the process
2079 * has a reservation for the page to be allocated. A return
2080 * code of zero indicates a reservation exists (no change).
2081 */
2082 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2083 if (map_chg < 0)
2084 return ERR_PTR(-ENOMEM);
2085
2086 /*
2087 * Processes that did not create the mapping will have no
2088 * reserves as indicated by the region/reserve map. Check
2089 * that the allocation will not exceed the subpool limit.
2090 * Allocations for MAP_NORESERVE mappings also need to be
2091 * checked against any subpool limit.
2092 */
2093 if (map_chg || avoid_reserve) {
2094 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2095 if (gbl_chg < 0) {
2096 vma_end_reservation(h, vma, addr);
2097 return ERR_PTR(-ENOSPC);
2098 }
2099
2100 /*
2101 * Even though there was no reservation in the region/reserve
2102 * map, there could be reservations associated with the
2103 * subpool that can be used. This would be indicated if the
2104 * return value of hugepage_subpool_get_pages() is zero.
2105 * However, if avoid_reserve is specified we still avoid even
2106 * the subpool reservations.
2107 */
2108 if (avoid_reserve)
2109 gbl_chg = 1;
2110 }
2111
2112 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2113 if (ret)
2114 goto out_subpool_put;
2115
2116 spin_lock(&hugetlb_lock);
2117 /*
2118 * glb_chg is passed to indicate whether or not a page must be taken
2119 * from the global free pool (global change). gbl_chg == 0 indicates
2120 * a reservation exists for the allocation.
2121 */
2122 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2123 if (!page) {
2124 spin_unlock(&hugetlb_lock);
2125 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2126 if (!page)
2127 goto out_uncharge_cgroup;
2128 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2129 SetPagePrivate(page);
2130 h->resv_huge_pages--;
2131 }
2132 spin_lock(&hugetlb_lock);
2133 list_move(&page->lru, &h->hugepage_activelist);
2134 /* Fall through */
2135 }
2136 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2137 spin_unlock(&hugetlb_lock);
2138
2139 set_page_private(page, (unsigned long)spool);
2140
2141 map_commit = vma_commit_reservation(h, vma, addr);
2142 if (unlikely(map_chg > map_commit)) {
2143 /*
2144 * The page was added to the reservation map between
2145 * vma_needs_reservation and vma_commit_reservation.
2146 * This indicates a race with hugetlb_reserve_pages.
2147 * Adjust for the subpool count incremented above AND
2148 * in hugetlb_reserve_pages for the same page. Also,
2149 * the reservation count added in hugetlb_reserve_pages
2150 * no longer applies.
2151 */
2152 long rsv_adjust;
2153
2154 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2155 hugetlb_acct_memory(h, -rsv_adjust);
2156 }
2157 return page;
2158
2159out_uncharge_cgroup:
2160 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2161out_subpool_put:
2162 if (map_chg || avoid_reserve)
2163 hugepage_subpool_put_pages(spool, 1);
2164 vma_end_reservation(h, vma, addr);
2165 return ERR_PTR(-ENOSPC);
2166}
2167
2168int alloc_bootmem_huge_page(struct hstate *h)
2169 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2170int __alloc_bootmem_huge_page(struct hstate *h)
2171{
2172 struct huge_bootmem_page *m;
2173 int nr_nodes, node;
2174
2175 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2176 void *addr;
2177
2178 addr = memblock_alloc_try_nid_raw(
2179 huge_page_size(h), huge_page_size(h),
2180 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2181 if (addr) {
2182 /*
2183 * Use the beginning of the huge page to store the
2184 * huge_bootmem_page struct (until gather_bootmem
2185 * puts them into the mem_map).
2186 */
2187 m = addr;
2188 goto found;
2189 }
2190 }
2191 return 0;
2192
2193found:
2194 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2195 /* Put them into a private list first because mem_map is not up yet */
2196 INIT_LIST_HEAD(&m->list);
2197 list_add(&m->list, &huge_boot_pages);
2198 m->hstate = h;
2199 return 1;
2200}
2201
2202static void __init prep_compound_huge_page(struct page *page,
2203 unsigned int order)
2204{
2205 if (unlikely(order > (MAX_ORDER - 1)))
2206 prep_compound_gigantic_page(page, order);
2207 else
2208 prep_compound_page(page, order);
2209}
2210
2211/* Put bootmem huge pages into the standard lists after mem_map is up */
2212static void __init gather_bootmem_prealloc(void)
2213{
2214 struct huge_bootmem_page *m;
2215
2216 list_for_each_entry(m, &huge_boot_pages, list) {
2217 struct page *page = virt_to_page(m);
2218 struct hstate *h = m->hstate;
2219
2220 WARN_ON(page_count(page) != 1);
2221 prep_compound_huge_page(page, h->order);
2222 WARN_ON(PageReserved(page));
2223 prep_new_huge_page(h, page, page_to_nid(page));
2224 put_page(page); /* free it into the hugepage allocator */
2225
2226 /*
2227 * If we had gigantic hugepages allocated at boot time, we need
2228 * to restore the 'stolen' pages to totalram_pages in order to
2229 * fix confusing memory reports from free(1) and another
2230 * side-effects, like CommitLimit going negative.
2231 */
2232 if (hstate_is_gigantic(h))
2233 adjust_managed_page_count(page, 1 << h->order);
2234 cond_resched();
2235 }
2236}
2237
2238static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2239{
2240 unsigned long i;
2241 nodemask_t *node_alloc_noretry;
2242
2243 if (!hstate_is_gigantic(h)) {
2244 /*
2245 * Bit mask controlling how hard we retry per-node allocations.
2246 * Ignore errors as lower level routines can deal with
2247 * node_alloc_noretry == NULL. If this kmalloc fails at boot
2248 * time, we are likely in bigger trouble.
2249 */
2250 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2251 GFP_KERNEL);
2252 } else {
2253 /* allocations done at boot time */
2254 node_alloc_noretry = NULL;
2255 }
2256
2257 /* bit mask controlling how hard we retry per-node allocations */
2258 if (node_alloc_noretry)
2259 nodes_clear(*node_alloc_noretry);
2260
2261 for (i = 0; i < h->max_huge_pages; ++i) {
2262 if (hstate_is_gigantic(h)) {
2263 if (!alloc_bootmem_huge_page(h))
2264 break;
2265 } else if (!alloc_pool_huge_page(h,
2266 &node_states[N_MEMORY],
2267 node_alloc_noretry))
2268 break;
2269 cond_resched();
2270 }
2271 if (i < h->max_huge_pages) {
2272 char buf[32];
2273
2274 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2275 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2276 h->max_huge_pages, buf, i);
2277 h->max_huge_pages = i;
2278 }
2279
2280 kfree(node_alloc_noretry);
2281}
2282
2283static void __init hugetlb_init_hstates(void)
2284{
2285 struct hstate *h;
2286
2287 for_each_hstate(h) {
2288 if (minimum_order > huge_page_order(h))
2289 minimum_order = huge_page_order(h);
2290
2291 /* oversize hugepages were init'ed in early boot */
2292 if (!hstate_is_gigantic(h))
2293 hugetlb_hstate_alloc_pages(h);
2294 }
2295 VM_BUG_ON(minimum_order == UINT_MAX);
2296}
2297
2298static void __init report_hugepages(void)
2299{
2300 struct hstate *h;
2301
2302 for_each_hstate(h) {
2303 char buf[32];
2304
2305 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2306 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2307 buf, h->free_huge_pages);
2308 }
2309}
2310
2311#ifdef CONFIG_HIGHMEM
2312static void try_to_free_low(struct hstate *h, unsigned long count,
2313 nodemask_t *nodes_allowed)
2314{
2315 int i;
2316
2317 if (hstate_is_gigantic(h))
2318 return;
2319
2320 for_each_node_mask(i, *nodes_allowed) {
2321 struct page *page, *next;
2322 struct list_head *freel = &h->hugepage_freelists[i];
2323 list_for_each_entry_safe(page, next, freel, lru) {
2324 if (count >= h->nr_huge_pages)
2325 return;
2326 if (PageHighMem(page))
2327 continue;
2328 list_del(&page->lru);
2329 update_and_free_page(h, page);
2330 h->free_huge_pages--;
2331 h->free_huge_pages_node[page_to_nid(page)]--;
2332 }
2333 }
2334}
2335#else
2336static inline void try_to_free_low(struct hstate *h, unsigned long count,
2337 nodemask_t *nodes_allowed)
2338{
2339}
2340#endif
2341
2342/*
2343 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2344 * balanced by operating on them in a round-robin fashion.
2345 * Returns 1 if an adjustment was made.
2346 */
2347static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2348 int delta)
2349{
2350 int nr_nodes, node;
2351
2352 VM_BUG_ON(delta != -1 && delta != 1);
2353
2354 if (delta < 0) {
2355 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2356 if (h->surplus_huge_pages_node[node])
2357 goto found;
2358 }
2359 } else {
2360 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2361 if (h->surplus_huge_pages_node[node] <
2362 h->nr_huge_pages_node[node])
2363 goto found;
2364 }
2365 }
2366 return 0;
2367
2368found:
2369 h->surplus_huge_pages += delta;
2370 h->surplus_huge_pages_node[node] += delta;
2371 return 1;
2372}
2373
2374#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2375static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2376 nodemask_t *nodes_allowed)
2377{
2378 unsigned long min_count, ret;
2379 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2380
2381 /*
2382 * Bit mask controlling how hard we retry per-node allocations.
2383 * If we can not allocate the bit mask, do not attempt to allocate
2384 * the requested huge pages.
2385 */
2386 if (node_alloc_noretry)
2387 nodes_clear(*node_alloc_noretry);
2388 else
2389 return -ENOMEM;
2390
2391 spin_lock(&hugetlb_lock);
2392
2393 /*
2394 * Check for a node specific request.
2395 * Changing node specific huge page count may require a corresponding
2396 * change to the global count. In any case, the passed node mask
2397 * (nodes_allowed) will restrict alloc/free to the specified node.
2398 */
2399 if (nid != NUMA_NO_NODE) {
2400 unsigned long old_count = count;
2401
2402 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2403 /*
2404 * User may have specified a large count value which caused the
2405 * above calculation to overflow. In this case, they wanted
2406 * to allocate as many huge pages as possible. Set count to
2407 * largest possible value to align with their intention.
2408 */
2409 if (count < old_count)
2410 count = ULONG_MAX;
2411 }
2412
2413 /*
2414 * Gigantic pages runtime allocation depend on the capability for large
2415 * page range allocation.
2416 * If the system does not provide this feature, return an error when
2417 * the user tries to allocate gigantic pages but let the user free the
2418 * boottime allocated gigantic pages.
2419 */
2420 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2421 if (count > persistent_huge_pages(h)) {
2422 spin_unlock(&hugetlb_lock);
2423 NODEMASK_FREE(node_alloc_noretry);
2424 return -EINVAL;
2425 }
2426 /* Fall through to decrease pool */
2427 }
2428
2429 /*
2430 * Increase the pool size
2431 * First take pages out of surplus state. Then make up the
2432 * remaining difference by allocating fresh huge pages.
2433 *
2434 * We might race with alloc_surplus_huge_page() here and be unable
2435 * to convert a surplus huge page to a normal huge page. That is
2436 * not critical, though, it just means the overall size of the
2437 * pool might be one hugepage larger than it needs to be, but
2438 * within all the constraints specified by the sysctls.
2439 */
2440 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2441 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2442 break;
2443 }
2444
2445 while (count > persistent_huge_pages(h)) {
2446 /*
2447 * If this allocation races such that we no longer need the
2448 * page, free_huge_page will handle it by freeing the page
2449 * and reducing the surplus.
2450 */
2451 spin_unlock(&hugetlb_lock);
2452
2453 /* yield cpu to avoid soft lockup */
2454 cond_resched();
2455
2456 ret = alloc_pool_huge_page(h, nodes_allowed,
2457 node_alloc_noretry);
2458 spin_lock(&hugetlb_lock);
2459 if (!ret)
2460 goto out;
2461
2462 /* Bail for signals. Probably ctrl-c from user */
2463 if (signal_pending(current))
2464 goto out;
2465 }
2466
2467 /*
2468 * Decrease the pool size
2469 * First return free pages to the buddy allocator (being careful
2470 * to keep enough around to satisfy reservations). Then place
2471 * pages into surplus state as needed so the pool will shrink
2472 * to the desired size as pages become free.
2473 *
2474 * By placing pages into the surplus state independent of the
2475 * overcommit value, we are allowing the surplus pool size to
2476 * exceed overcommit. There are few sane options here. Since
2477 * alloc_surplus_huge_page() is checking the global counter,
2478 * though, we'll note that we're not allowed to exceed surplus
2479 * and won't grow the pool anywhere else. Not until one of the
2480 * sysctls are changed, or the surplus pages go out of use.
2481 */
2482 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2483 min_count = max(count, min_count);
2484 try_to_free_low(h, min_count, nodes_allowed);
2485 while (min_count < persistent_huge_pages(h)) {
2486 if (!free_pool_huge_page(h, nodes_allowed, 0))
2487 break;
2488 cond_resched_lock(&hugetlb_lock);
2489 }
2490 while (count < persistent_huge_pages(h)) {
2491 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2492 break;
2493 }
2494out:
2495 h->max_huge_pages = persistent_huge_pages(h);
2496 spin_unlock(&hugetlb_lock);
2497
2498 NODEMASK_FREE(node_alloc_noretry);
2499
2500 return 0;
2501}
2502
2503#define HSTATE_ATTR_RO(_name) \
2504 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2505
2506#define HSTATE_ATTR(_name) \
2507 static struct kobj_attribute _name##_attr = \
2508 __ATTR(_name, 0644, _name##_show, _name##_store)
2509
2510static struct kobject *hugepages_kobj;
2511static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2512
2513static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2514
2515static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2516{
2517 int i;
2518
2519 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2520 if (hstate_kobjs[i] == kobj) {
2521 if (nidp)
2522 *nidp = NUMA_NO_NODE;
2523 return &hstates[i];
2524 }
2525
2526 return kobj_to_node_hstate(kobj, nidp);
2527}
2528
2529static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2530 struct kobj_attribute *attr, char *buf)
2531{
2532 struct hstate *h;
2533 unsigned long nr_huge_pages;
2534 int nid;
2535
2536 h = kobj_to_hstate(kobj, &nid);
2537 if (nid == NUMA_NO_NODE)
2538 nr_huge_pages = h->nr_huge_pages;
2539 else
2540 nr_huge_pages = h->nr_huge_pages_node[nid];
2541
2542 return sprintf(buf, "%lu\n", nr_huge_pages);
2543}
2544
2545static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2546 struct hstate *h, int nid,
2547 unsigned long count, size_t len)
2548{
2549 int err;
2550 nodemask_t nodes_allowed, *n_mask;
2551
2552 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2553 return -EINVAL;
2554
2555 if (nid == NUMA_NO_NODE) {
2556 /*
2557 * global hstate attribute
2558 */
2559 if (!(obey_mempolicy &&
2560 init_nodemask_of_mempolicy(&nodes_allowed)))
2561 n_mask = &node_states[N_MEMORY];
2562 else
2563 n_mask = &nodes_allowed;
2564 } else {
2565 /*
2566 * Node specific request. count adjustment happens in
2567 * set_max_huge_pages() after acquiring hugetlb_lock.
2568 */
2569 init_nodemask_of_node(&nodes_allowed, nid);
2570 n_mask = &nodes_allowed;
2571 }
2572
2573 err = set_max_huge_pages(h, count, nid, n_mask);
2574
2575 return err ? err : len;
2576}
2577
2578static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2579 struct kobject *kobj, const char *buf,
2580 size_t len)
2581{
2582 struct hstate *h;
2583 unsigned long count;
2584 int nid;
2585 int err;
2586
2587 err = kstrtoul(buf, 10, &count);
2588 if (err)
2589 return err;
2590
2591 h = kobj_to_hstate(kobj, &nid);
2592 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2593}
2594
2595static ssize_t nr_hugepages_show(struct kobject *kobj,
2596 struct kobj_attribute *attr, char *buf)
2597{
2598 return nr_hugepages_show_common(kobj, attr, buf);
2599}
2600
2601static ssize_t nr_hugepages_store(struct kobject *kobj,
2602 struct kobj_attribute *attr, const char *buf, size_t len)
2603{
2604 return nr_hugepages_store_common(false, kobj, buf, len);
2605}
2606HSTATE_ATTR(nr_hugepages);
2607
2608#ifdef CONFIG_NUMA
2609
2610/*
2611 * hstate attribute for optionally mempolicy-based constraint on persistent
2612 * huge page alloc/free.
2613 */
2614static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2615 struct kobj_attribute *attr, char *buf)
2616{
2617 return nr_hugepages_show_common(kobj, attr, buf);
2618}
2619
2620static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2621 struct kobj_attribute *attr, const char *buf, size_t len)
2622{
2623 return nr_hugepages_store_common(true, kobj, buf, len);
2624}
2625HSTATE_ATTR(nr_hugepages_mempolicy);
2626#endif
2627
2628
2629static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2630 struct kobj_attribute *attr, char *buf)
2631{
2632 struct hstate *h = kobj_to_hstate(kobj, NULL);
2633 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2634}
2635
2636static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2637 struct kobj_attribute *attr, const char *buf, size_t count)
2638{
2639 int err;
2640 unsigned long input;
2641 struct hstate *h = kobj_to_hstate(kobj, NULL);
2642
2643 if (hstate_is_gigantic(h))
2644 return -EINVAL;
2645
2646 err = kstrtoul(buf, 10, &input);
2647 if (err)
2648 return err;
2649
2650 spin_lock(&hugetlb_lock);
2651 h->nr_overcommit_huge_pages = input;
2652 spin_unlock(&hugetlb_lock);
2653
2654 return count;
2655}
2656HSTATE_ATTR(nr_overcommit_hugepages);
2657
2658static ssize_t free_hugepages_show(struct kobject *kobj,
2659 struct kobj_attribute *attr, char *buf)
2660{
2661 struct hstate *h;
2662 unsigned long free_huge_pages;
2663 int nid;
2664
2665 h = kobj_to_hstate(kobj, &nid);
2666 if (nid == NUMA_NO_NODE)
2667 free_huge_pages = h->free_huge_pages;
2668 else
2669 free_huge_pages = h->free_huge_pages_node[nid];
2670
2671 return sprintf(buf, "%lu\n", free_huge_pages);
2672}
2673HSTATE_ATTR_RO(free_hugepages);
2674
2675static ssize_t resv_hugepages_show(struct kobject *kobj,
2676 struct kobj_attribute *attr, char *buf)
2677{
2678 struct hstate *h = kobj_to_hstate(kobj, NULL);
2679 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2680}
2681HSTATE_ATTR_RO(resv_hugepages);
2682
2683static ssize_t surplus_hugepages_show(struct kobject *kobj,
2684 struct kobj_attribute *attr, char *buf)
2685{
2686 struct hstate *h;
2687 unsigned long surplus_huge_pages;
2688 int nid;
2689
2690 h = kobj_to_hstate(kobj, &nid);
2691 if (nid == NUMA_NO_NODE)
2692 surplus_huge_pages = h->surplus_huge_pages;
2693 else
2694 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2695
2696 return sprintf(buf, "%lu\n", surplus_huge_pages);
2697}
2698HSTATE_ATTR_RO(surplus_hugepages);
2699
2700static struct attribute *hstate_attrs[] = {
2701 &nr_hugepages_attr.attr,
2702 &nr_overcommit_hugepages_attr.attr,
2703 &free_hugepages_attr.attr,
2704 &resv_hugepages_attr.attr,
2705 &surplus_hugepages_attr.attr,
2706#ifdef CONFIG_NUMA
2707 &nr_hugepages_mempolicy_attr.attr,
2708#endif
2709 NULL,
2710};
2711
2712static const struct attribute_group hstate_attr_group = {
2713 .attrs = hstate_attrs,
2714};
2715
2716static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2717 struct kobject **hstate_kobjs,
2718 const struct attribute_group *hstate_attr_group)
2719{
2720 int retval;
2721 int hi = hstate_index(h);
2722
2723 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2724 if (!hstate_kobjs[hi])
2725 return -ENOMEM;
2726
2727 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2728 if (retval)
2729 kobject_put(hstate_kobjs[hi]);
2730
2731 return retval;
2732}
2733
2734static void __init hugetlb_sysfs_init(void)
2735{
2736 struct hstate *h;
2737 int err;
2738
2739 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2740 if (!hugepages_kobj)
2741 return;
2742
2743 for_each_hstate(h) {
2744 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2745 hstate_kobjs, &hstate_attr_group);
2746 if (err)
2747 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2748 }
2749}
2750
2751#ifdef CONFIG_NUMA
2752
2753/*
2754 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2755 * with node devices in node_devices[] using a parallel array. The array
2756 * index of a node device or _hstate == node id.
2757 * This is here to avoid any static dependency of the node device driver, in
2758 * the base kernel, on the hugetlb module.
2759 */
2760struct node_hstate {
2761 struct kobject *hugepages_kobj;
2762 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2763};
2764static struct node_hstate node_hstates[MAX_NUMNODES];
2765
2766/*
2767 * A subset of global hstate attributes for node devices
2768 */
2769static struct attribute *per_node_hstate_attrs[] = {
2770 &nr_hugepages_attr.attr,
2771 &free_hugepages_attr.attr,
2772 &surplus_hugepages_attr.attr,
2773 NULL,
2774};
2775
2776static const struct attribute_group per_node_hstate_attr_group = {
2777 .attrs = per_node_hstate_attrs,
2778};
2779
2780/*
2781 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2782 * Returns node id via non-NULL nidp.
2783 */
2784static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2785{
2786 int nid;
2787
2788 for (nid = 0; nid < nr_node_ids; nid++) {
2789 struct node_hstate *nhs = &node_hstates[nid];
2790 int i;
2791 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2792 if (nhs->hstate_kobjs[i] == kobj) {
2793 if (nidp)
2794 *nidp = nid;
2795 return &hstates[i];
2796 }
2797 }
2798
2799 BUG();
2800 return NULL;
2801}
2802
2803/*
2804 * Unregister hstate attributes from a single node device.
2805 * No-op if no hstate attributes attached.
2806 */
2807static void hugetlb_unregister_node(struct node *node)
2808{
2809 struct hstate *h;
2810 struct node_hstate *nhs = &node_hstates[node->dev.id];
2811
2812 if (!nhs->hugepages_kobj)
2813 return; /* no hstate attributes */
2814
2815 for_each_hstate(h) {
2816 int idx = hstate_index(h);
2817 if (nhs->hstate_kobjs[idx]) {
2818 kobject_put(nhs->hstate_kobjs[idx]);
2819 nhs->hstate_kobjs[idx] = NULL;
2820 }
2821 }
2822
2823 kobject_put(nhs->hugepages_kobj);
2824 nhs->hugepages_kobj = NULL;
2825}
2826
2827
2828/*
2829 * Register hstate attributes for a single node device.
2830 * No-op if attributes already registered.
2831 */
2832static void hugetlb_register_node(struct node *node)
2833{
2834 struct hstate *h;
2835 struct node_hstate *nhs = &node_hstates[node->dev.id];
2836 int err;
2837
2838 if (nhs->hugepages_kobj)
2839 return; /* already allocated */
2840
2841 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2842 &node->dev.kobj);
2843 if (!nhs->hugepages_kobj)
2844 return;
2845
2846 for_each_hstate(h) {
2847 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2848 nhs->hstate_kobjs,
2849 &per_node_hstate_attr_group);
2850 if (err) {
2851 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2852 h->name, node->dev.id);
2853 hugetlb_unregister_node(node);
2854 break;
2855 }
2856 }
2857}
2858
2859/*
2860 * hugetlb init time: register hstate attributes for all registered node
2861 * devices of nodes that have memory. All on-line nodes should have
2862 * registered their associated device by this time.
2863 */
2864static void __init hugetlb_register_all_nodes(void)
2865{
2866 int nid;
2867
2868 for_each_node_state(nid, N_MEMORY) {
2869 struct node *node = node_devices[nid];
2870 if (node->dev.id == nid)
2871 hugetlb_register_node(node);
2872 }
2873
2874 /*
2875 * Let the node device driver know we're here so it can
2876 * [un]register hstate attributes on node hotplug.
2877 */
2878 register_hugetlbfs_with_node(hugetlb_register_node,
2879 hugetlb_unregister_node);
2880}
2881#else /* !CONFIG_NUMA */
2882
2883static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2884{
2885 BUG();
2886 if (nidp)
2887 *nidp = -1;
2888 return NULL;
2889}
2890
2891static void hugetlb_register_all_nodes(void) { }
2892
2893#endif
2894
2895static int __init hugetlb_init(void)
2896{
2897 int i;
2898
2899 if (!hugepages_supported())
2900 return 0;
2901
2902 if (!size_to_hstate(default_hstate_size)) {
2903 if (default_hstate_size != 0) {
2904 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2905 default_hstate_size, HPAGE_SIZE);
2906 }
2907
2908 default_hstate_size = HPAGE_SIZE;
2909 if (!size_to_hstate(default_hstate_size))
2910 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2911 }
2912 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2913 if (default_hstate_max_huge_pages) {
2914 if (!default_hstate.max_huge_pages)
2915 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2916 }
2917
2918 hugetlb_init_hstates();
2919 gather_bootmem_prealloc();
2920 report_hugepages();
2921
2922 hugetlb_sysfs_init();
2923 hugetlb_register_all_nodes();
2924 hugetlb_cgroup_file_init();
2925
2926#ifdef CONFIG_SMP
2927 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2928#else
2929 num_fault_mutexes = 1;
2930#endif
2931 hugetlb_fault_mutex_table =
2932 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2933 GFP_KERNEL);
2934 BUG_ON(!hugetlb_fault_mutex_table);
2935
2936 for (i = 0; i < num_fault_mutexes; i++)
2937 mutex_init(&hugetlb_fault_mutex_table[i]);
2938 return 0;
2939}
2940subsys_initcall(hugetlb_init);
2941
2942/* Should be called on processing a hugepagesz=... option */
2943void __init hugetlb_bad_size(void)
2944{
2945 parsed_valid_hugepagesz = false;
2946}
2947
2948void __init hugetlb_add_hstate(unsigned int order)
2949{
2950 struct hstate *h;
2951 unsigned long i;
2952
2953 if (size_to_hstate(PAGE_SIZE << order)) {
2954 pr_warn("hugepagesz= specified twice, ignoring\n");
2955 return;
2956 }
2957 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2958 BUG_ON(order == 0);
2959 h = &hstates[hugetlb_max_hstate++];
2960 h->order = order;
2961 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2962 h->nr_huge_pages = 0;
2963 h->free_huge_pages = 0;
2964 for (i = 0; i < MAX_NUMNODES; ++i)
2965 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2966 INIT_LIST_HEAD(&h->hugepage_activelist);
2967 h->next_nid_to_alloc = first_memory_node;
2968 h->next_nid_to_free = first_memory_node;
2969 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2970 huge_page_size(h)/1024);
2971
2972 parsed_hstate = h;
2973}
2974
2975static int __init hugetlb_nrpages_setup(char *s)
2976{
2977 unsigned long *mhp;
2978 static unsigned long *last_mhp;
2979
2980 if (!parsed_valid_hugepagesz) {
2981 pr_warn("hugepages = %s preceded by "
2982 "an unsupported hugepagesz, ignoring\n", s);
2983 parsed_valid_hugepagesz = true;
2984 return 1;
2985 }
2986 /*
2987 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2988 * so this hugepages= parameter goes to the "default hstate".
2989 */
2990 else if (!hugetlb_max_hstate)
2991 mhp = &default_hstate_max_huge_pages;
2992 else
2993 mhp = &parsed_hstate->max_huge_pages;
2994
2995 if (mhp == last_mhp) {
2996 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2997 return 1;
2998 }
2999
3000 if (sscanf(s, "%lu", mhp) <= 0)
3001 *mhp = 0;
3002
3003 /*
3004 * Global state is always initialized later in hugetlb_init.
3005 * But we need to allocate >= MAX_ORDER hstates here early to still
3006 * use the bootmem allocator.
3007 */
3008 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3009 hugetlb_hstate_alloc_pages(parsed_hstate);
3010
3011 last_mhp = mhp;
3012
3013 return 1;
3014}
3015__setup("hugepages=", hugetlb_nrpages_setup);
3016
3017static int __init hugetlb_default_setup(char *s)
3018{
3019 default_hstate_size = memparse(s, &s);
3020 return 1;
3021}
3022__setup("default_hugepagesz=", hugetlb_default_setup);
3023
3024static unsigned int cpuset_mems_nr(unsigned int *array)
3025{
3026 int node;
3027 unsigned int nr = 0;
3028
3029 for_each_node_mask(node, cpuset_current_mems_allowed)
3030 nr += array[node];
3031
3032 return nr;
3033}
3034
3035#ifdef CONFIG_SYSCTL
3036static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3037 struct ctl_table *table, int write,
3038 void __user *buffer, size_t *length, loff_t *ppos)
3039{
3040 struct hstate *h = &default_hstate;
3041 unsigned long tmp = h->max_huge_pages;
3042 int ret;
3043
3044 if (!hugepages_supported())
3045 return -EOPNOTSUPP;
3046
3047 table->data = &tmp;
3048 table->maxlen = sizeof(unsigned long);
3049 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3050 if (ret)
3051 goto out;
3052
3053 if (write)
3054 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3055 NUMA_NO_NODE, tmp, *length);
3056out:
3057 return ret;
3058}
3059
3060int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3061 void __user *buffer, size_t *length, loff_t *ppos)
3062{
3063
3064 return hugetlb_sysctl_handler_common(false, table, write,
3065 buffer, length, ppos);
3066}
3067
3068#ifdef CONFIG_NUMA
3069int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3070 void __user *buffer, size_t *length, loff_t *ppos)
3071{
3072 return hugetlb_sysctl_handler_common(true, table, write,
3073 buffer, length, ppos);
3074}
3075#endif /* CONFIG_NUMA */
3076
3077int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3078 void __user *buffer,
3079 size_t *length, loff_t *ppos)
3080{
3081 struct hstate *h = &default_hstate;
3082 unsigned long tmp;
3083 int ret;
3084
3085 if (!hugepages_supported())
3086 return -EOPNOTSUPP;
3087
3088 tmp = h->nr_overcommit_huge_pages;
3089
3090 if (write && hstate_is_gigantic(h))
3091 return -EINVAL;
3092
3093 table->data = &tmp;
3094 table->maxlen = sizeof(unsigned long);
3095 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3096 if (ret)
3097 goto out;
3098
3099 if (write) {
3100 spin_lock(&hugetlb_lock);
3101 h->nr_overcommit_huge_pages = tmp;
3102 spin_unlock(&hugetlb_lock);
3103 }
3104out:
3105 return ret;
3106}
3107
3108#endif /* CONFIG_SYSCTL */
3109
3110void hugetlb_report_meminfo(struct seq_file *m)
3111{
3112 struct hstate *h;
3113 unsigned long total = 0;
3114
3115 if (!hugepages_supported())
3116 return;
3117
3118 for_each_hstate(h) {
3119 unsigned long count = h->nr_huge_pages;
3120
3121 total += (PAGE_SIZE << huge_page_order(h)) * count;
3122
3123 if (h == &default_hstate)
3124 seq_printf(m,
3125 "HugePages_Total: %5lu\n"
3126 "HugePages_Free: %5lu\n"
3127 "HugePages_Rsvd: %5lu\n"
3128 "HugePages_Surp: %5lu\n"
3129 "Hugepagesize: %8lu kB\n",
3130 count,
3131 h->free_huge_pages,
3132 h->resv_huge_pages,
3133 h->surplus_huge_pages,
3134 (PAGE_SIZE << huge_page_order(h)) / 1024);
3135 }
3136
3137 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3138}
3139
3140int hugetlb_report_node_meminfo(int nid, char *buf)
3141{
3142 struct hstate *h = &default_hstate;
3143 if (!hugepages_supported())
3144 return 0;
3145 return sprintf(buf,
3146 "Node %d HugePages_Total: %5u\n"
3147 "Node %d HugePages_Free: %5u\n"
3148 "Node %d HugePages_Surp: %5u\n",
3149 nid, h->nr_huge_pages_node[nid],
3150 nid, h->free_huge_pages_node[nid],
3151 nid, h->surplus_huge_pages_node[nid]);
3152}
3153
3154void hugetlb_show_meminfo(void)
3155{
3156 struct hstate *h;
3157 int nid;
3158
3159 if (!hugepages_supported())
3160 return;
3161
3162 for_each_node_state(nid, N_MEMORY)
3163 for_each_hstate(h)
3164 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3165 nid,
3166 h->nr_huge_pages_node[nid],
3167 h->free_huge_pages_node[nid],
3168 h->surplus_huge_pages_node[nid],
3169 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3170}
3171
3172void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3173{
3174 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3175 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3176}
3177
3178/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3179unsigned long hugetlb_total_pages(void)
3180{
3181 struct hstate *h;
3182 unsigned long nr_total_pages = 0;
3183
3184 for_each_hstate(h)
3185 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3186 return nr_total_pages;
3187}
3188
3189static int hugetlb_acct_memory(struct hstate *h, long delta)
3190{
3191 int ret = -ENOMEM;
3192
3193 spin_lock(&hugetlb_lock);
3194 /*
3195 * When cpuset is configured, it breaks the strict hugetlb page
3196 * reservation as the accounting is done on a global variable. Such
3197 * reservation is completely rubbish in the presence of cpuset because
3198 * the reservation is not checked against page availability for the
3199 * current cpuset. Application can still potentially OOM'ed by kernel
3200 * with lack of free htlb page in cpuset that the task is in.
3201 * Attempt to enforce strict accounting with cpuset is almost
3202 * impossible (or too ugly) because cpuset is too fluid that
3203 * task or memory node can be dynamically moved between cpusets.
3204 *
3205 * The change of semantics for shared hugetlb mapping with cpuset is
3206 * undesirable. However, in order to preserve some of the semantics,
3207 * we fall back to check against current free page availability as
3208 * a best attempt and hopefully to minimize the impact of changing
3209 * semantics that cpuset has.
3210 */
3211 if (delta > 0) {
3212 if (gather_surplus_pages(h, delta) < 0)
3213 goto out;
3214
3215 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3216 return_unused_surplus_pages(h, delta);
3217 goto out;
3218 }
3219 }
3220
3221 ret = 0;
3222 if (delta < 0)
3223 return_unused_surplus_pages(h, (unsigned long) -delta);
3224
3225out:
3226 spin_unlock(&hugetlb_lock);
3227 return ret;
3228}
3229
3230static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3231{
3232 struct resv_map *resv = vma_resv_map(vma);
3233
3234 /*
3235 * This new VMA should share its siblings reservation map if present.
3236 * The VMA will only ever have a valid reservation map pointer where
3237 * it is being copied for another still existing VMA. As that VMA
3238 * has a reference to the reservation map it cannot disappear until
3239 * after this open call completes. It is therefore safe to take a
3240 * new reference here without additional locking.
3241 */
3242 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3243 kref_get(&resv->refs);
3244}
3245
3246static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3247{
3248 struct hstate *h = hstate_vma(vma);
3249 struct resv_map *resv = vma_resv_map(vma);
3250 struct hugepage_subpool *spool = subpool_vma(vma);
3251 unsigned long reserve, start, end;
3252 long gbl_reserve;
3253
3254 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3255 return;
3256
3257 start = vma_hugecache_offset(h, vma, vma->vm_start);
3258 end = vma_hugecache_offset(h, vma, vma->vm_end);
3259
3260 reserve = (end - start) - region_count(resv, start, end);
3261
3262 kref_put(&resv->refs, resv_map_release);
3263
3264 if (reserve) {
3265 /*
3266 * Decrement reserve counts. The global reserve count may be
3267 * adjusted if the subpool has a minimum size.
3268 */
3269 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3270 hugetlb_acct_memory(h, -gbl_reserve);
3271 }
3272}
3273
3274static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3275{
3276 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3277 return -EINVAL;
3278 return 0;
3279}
3280
3281static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3282{
3283 struct hstate *hstate = hstate_vma(vma);
3284
3285 return 1UL << huge_page_shift(hstate);
3286}
3287
3288/*
3289 * We cannot handle pagefaults against hugetlb pages at all. They cause
3290 * handle_mm_fault() to try to instantiate regular-sized pages in the
3291 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3292 * this far.
3293 */
3294static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3295{
3296 BUG();
3297 return 0;
3298}
3299
3300/*
3301 * When a new function is introduced to vm_operations_struct and added
3302 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3303 * This is because under System V memory model, mappings created via
3304 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3305 * their original vm_ops are overwritten with shm_vm_ops.
3306 */
3307const struct vm_operations_struct hugetlb_vm_ops = {
3308 .fault = hugetlb_vm_op_fault,
3309 .open = hugetlb_vm_op_open,
3310 .close = hugetlb_vm_op_close,
3311 .split = hugetlb_vm_op_split,
3312 .pagesize = hugetlb_vm_op_pagesize,
3313};
3314
3315static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3316 int writable)
3317{
3318 pte_t entry;
3319
3320 if (writable) {
3321 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3322 vma->vm_page_prot)));
3323 } else {
3324 entry = huge_pte_wrprotect(mk_huge_pte(page,
3325 vma->vm_page_prot));
3326 }
3327 entry = pte_mkyoung(entry);
3328 entry = pte_mkhuge(entry);
3329 entry = arch_make_huge_pte(entry, vma, page, writable);
3330
3331 return entry;
3332}
3333
3334static void set_huge_ptep_writable(struct vm_area_struct *vma,
3335 unsigned long address, pte_t *ptep)
3336{
3337 pte_t entry;
3338
3339 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3340 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3341 update_mmu_cache(vma, address, ptep);
3342}
3343
3344bool is_hugetlb_entry_migration(pte_t pte)
3345{
3346 swp_entry_t swp;
3347
3348 if (huge_pte_none(pte) || pte_present(pte))
3349 return false;
3350 swp = pte_to_swp_entry(pte);
3351 if (non_swap_entry(swp) && is_migration_entry(swp))
3352 return true;
3353 else
3354 return false;
3355}
3356
3357static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3358{
3359 swp_entry_t swp;
3360
3361 if (huge_pte_none(pte) || pte_present(pte))
3362 return 0;
3363 swp = pte_to_swp_entry(pte);
3364 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3365 return 1;
3366 else
3367 return 0;
3368}
3369
3370int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3371 struct vm_area_struct *vma)
3372{
3373 pte_t *src_pte, *dst_pte, entry, dst_entry;
3374 struct page *ptepage;
3375 unsigned long addr;
3376 int cow;
3377 struct hstate *h = hstate_vma(vma);
3378 unsigned long sz = huge_page_size(h);
3379 struct mmu_notifier_range range;
3380 int ret = 0;
3381
3382 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3383
3384 if (cow) {
3385 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3386 vma->vm_start,
3387 vma->vm_end);
3388 mmu_notifier_invalidate_range_start(&range);
3389 }
3390
3391 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3392 spinlock_t *src_ptl, *dst_ptl;
3393 src_pte = huge_pte_offset(src, addr, sz);
3394 if (!src_pte)
3395 continue;
3396 dst_pte = huge_pte_alloc(dst, addr, sz);
3397 if (!dst_pte) {
3398 ret = -ENOMEM;
3399 break;
3400 }
3401
3402 /*
3403 * If the pagetables are shared don't copy or take references.
3404 * dst_pte == src_pte is the common case of src/dest sharing.
3405 *
3406 * However, src could have 'unshared' and dst shares with
3407 * another vma. If dst_pte !none, this implies sharing.
3408 * Check here before taking page table lock, and once again
3409 * after taking the lock below.
3410 */
3411 dst_entry = huge_ptep_get(dst_pte);
3412 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3413 continue;
3414
3415 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3416 src_ptl = huge_pte_lockptr(h, src, src_pte);
3417 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3418 entry = huge_ptep_get(src_pte);
3419 dst_entry = huge_ptep_get(dst_pte);
3420 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3421 /*
3422 * Skip if src entry none. Also, skip in the
3423 * unlikely case dst entry !none as this implies
3424 * sharing with another vma.
3425 */
3426 ;
3427 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3428 is_hugetlb_entry_hwpoisoned(entry))) {
3429 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3430
3431 if (is_write_migration_entry(swp_entry) && cow) {
3432 /*
3433 * COW mappings require pages in both
3434 * parent and child to be set to read.
3435 */
3436 make_migration_entry_read(&swp_entry);
3437 entry = swp_entry_to_pte(swp_entry);
3438 set_huge_swap_pte_at(src, addr, src_pte,
3439 entry, sz);
3440 }
3441 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3442 } else {
3443 if (cow) {
3444 /*
3445 * No need to notify as we are downgrading page
3446 * table protection not changing it to point
3447 * to a new page.
3448 *
3449 * See Documentation/vm/mmu_notifier.rst
3450 */
3451 huge_ptep_set_wrprotect(src, addr, src_pte);
3452 }
3453 entry = huge_ptep_get(src_pte);
3454 ptepage = pte_page(entry);
3455 get_page(ptepage);
3456 page_dup_rmap(ptepage, true);
3457 set_huge_pte_at(dst, addr, dst_pte, entry);
3458 hugetlb_count_add(pages_per_huge_page(h), dst);
3459 }
3460 spin_unlock(src_ptl);
3461 spin_unlock(dst_ptl);
3462 }
3463
3464 if (cow)
3465 mmu_notifier_invalidate_range_end(&range);
3466
3467 return ret;
3468}
3469
3470void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3471 unsigned long start, unsigned long end,
3472 struct page *ref_page)
3473{
3474 struct mm_struct *mm = vma->vm_mm;
3475 unsigned long address;
3476 pte_t *ptep;
3477 pte_t pte;
3478 spinlock_t *ptl;
3479 struct page *page;
3480 struct hstate *h = hstate_vma(vma);
3481 unsigned long sz = huge_page_size(h);
3482 struct mmu_notifier_range range;
3483
3484 WARN_ON(!is_vm_hugetlb_page(vma));
3485 BUG_ON(start & ~huge_page_mask(h));
3486 BUG_ON(end & ~huge_page_mask(h));
3487
3488 /*
3489 * This is a hugetlb vma, all the pte entries should point
3490 * to huge page.
3491 */
3492 tlb_change_page_size(tlb, sz);
3493 tlb_start_vma(tlb, vma);
3494
3495 /*
3496 * If sharing possible, alert mmu notifiers of worst case.
3497 */
3498 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3499 end);
3500 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3501 mmu_notifier_invalidate_range_start(&range);
3502 address = start;
3503 for (; address < end; address += sz) {
3504 ptep = huge_pte_offset(mm, address, sz);
3505 if (!ptep)
3506 continue;
3507
3508 ptl = huge_pte_lock(h, mm, ptep);
3509 if (huge_pmd_unshare(mm, &address, ptep)) {
3510 spin_unlock(ptl);
3511 /*
3512 * We just unmapped a page of PMDs by clearing a PUD.
3513 * The caller's TLB flush range should cover this area.
3514 */
3515 continue;
3516 }
3517
3518 pte = huge_ptep_get(ptep);
3519 if (huge_pte_none(pte)) {
3520 spin_unlock(ptl);
3521 continue;
3522 }
3523
3524 /*
3525 * Migrating hugepage or HWPoisoned hugepage is already
3526 * unmapped and its refcount is dropped, so just clear pte here.
3527 */
3528 if (unlikely(!pte_present(pte))) {
3529 huge_pte_clear(mm, address, ptep, sz);
3530 spin_unlock(ptl);
3531 continue;
3532 }
3533
3534 page = pte_page(pte);
3535 /*
3536 * If a reference page is supplied, it is because a specific
3537 * page is being unmapped, not a range. Ensure the page we
3538 * are about to unmap is the actual page of interest.
3539 */
3540 if (ref_page) {
3541 if (page != ref_page) {
3542 spin_unlock(ptl);
3543 continue;
3544 }
3545 /*
3546 * Mark the VMA as having unmapped its page so that
3547 * future faults in this VMA will fail rather than
3548 * looking like data was lost
3549 */
3550 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3551 }
3552
3553 pte = huge_ptep_get_and_clear(mm, address, ptep);
3554 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3555 if (huge_pte_dirty(pte))
3556 set_page_dirty(page);
3557
3558 hugetlb_count_sub(pages_per_huge_page(h), mm);
3559 page_remove_rmap(page, true);
3560
3561 spin_unlock(ptl);
3562 tlb_remove_page_size(tlb, page, huge_page_size(h));
3563 /*
3564 * Bail out after unmapping reference page if supplied
3565 */
3566 if (ref_page)
3567 break;
3568 }
3569 mmu_notifier_invalidate_range_end(&range);
3570 tlb_end_vma(tlb, vma);
3571}
3572
3573void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3574 struct vm_area_struct *vma, unsigned long start,
3575 unsigned long end, struct page *ref_page)
3576{
3577 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3578
3579 /*
3580 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3581 * test will fail on a vma being torn down, and not grab a page table
3582 * on its way out. We're lucky that the flag has such an appropriate
3583 * name, and can in fact be safely cleared here. We could clear it
3584 * before the __unmap_hugepage_range above, but all that's necessary
3585 * is to clear it before releasing the i_mmap_rwsem. This works
3586 * because in the context this is called, the VMA is about to be
3587 * destroyed and the i_mmap_rwsem is held.
3588 */
3589 vma->vm_flags &= ~VM_MAYSHARE;
3590}
3591
3592void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3593 unsigned long end, struct page *ref_page)
3594{
3595 struct mm_struct *mm;
3596 struct mmu_gather tlb;
3597 unsigned long tlb_start = start;
3598 unsigned long tlb_end = end;
3599
3600 /*
3601 * If shared PMDs were possibly used within this vma range, adjust
3602 * start/end for worst case tlb flushing.
3603 * Note that we can not be sure if PMDs are shared until we try to
3604 * unmap pages. However, we want to make sure TLB flushing covers
3605 * the largest possible range.
3606 */
3607 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3608
3609 mm = vma->vm_mm;
3610
3611 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3612 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3613 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3614}
3615
3616/*
3617 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3618 * mappping it owns the reserve page for. The intention is to unmap the page
3619 * from other VMAs and let the children be SIGKILLed if they are faulting the
3620 * same region.
3621 */
3622static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3623 struct page *page, unsigned long address)
3624{
3625 struct hstate *h = hstate_vma(vma);
3626 struct vm_area_struct *iter_vma;
3627 struct address_space *mapping;
3628 pgoff_t pgoff;
3629
3630 /*
3631 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3632 * from page cache lookup which is in HPAGE_SIZE units.
3633 */
3634 address = address & huge_page_mask(h);
3635 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3636 vma->vm_pgoff;
3637 mapping = vma->vm_file->f_mapping;
3638
3639 /*
3640 * Take the mapping lock for the duration of the table walk. As
3641 * this mapping should be shared between all the VMAs,
3642 * __unmap_hugepage_range() is called as the lock is already held
3643 */
3644 i_mmap_lock_write(mapping);
3645 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3646 /* Do not unmap the current VMA */
3647 if (iter_vma == vma)
3648 continue;
3649
3650 /*
3651 * Shared VMAs have their own reserves and do not affect
3652 * MAP_PRIVATE accounting but it is possible that a shared
3653 * VMA is using the same page so check and skip such VMAs.
3654 */
3655 if (iter_vma->vm_flags & VM_MAYSHARE)
3656 continue;
3657
3658 /*
3659 * Unmap the page from other VMAs without their own reserves.
3660 * They get marked to be SIGKILLed if they fault in these
3661 * areas. This is because a future no-page fault on this VMA
3662 * could insert a zeroed page instead of the data existing
3663 * from the time of fork. This would look like data corruption
3664 */
3665 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3666 unmap_hugepage_range(iter_vma, address,
3667 address + huge_page_size(h), page);
3668 }
3669 i_mmap_unlock_write(mapping);
3670}
3671
3672/*
3673 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3674 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3675 * cannot race with other handlers or page migration.
3676 * Keep the pte_same checks anyway to make transition from the mutex easier.
3677 */
3678static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3679 unsigned long address, pte_t *ptep,
3680 struct page *pagecache_page, spinlock_t *ptl)
3681{
3682 pte_t pte;
3683 struct hstate *h = hstate_vma(vma);
3684 struct page *old_page, *new_page;
3685 int outside_reserve = 0;
3686 vm_fault_t ret = 0;
3687 unsigned long haddr = address & huge_page_mask(h);
3688 struct mmu_notifier_range range;
3689
3690 pte = huge_ptep_get(ptep);
3691 old_page = pte_page(pte);
3692
3693retry_avoidcopy:
3694 /* If no-one else is actually using this page, avoid the copy
3695 * and just make the page writable */
3696 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3697 page_move_anon_rmap(old_page, vma);
3698 set_huge_ptep_writable(vma, haddr, ptep);
3699 return 0;
3700 }
3701
3702 /*
3703 * If the process that created a MAP_PRIVATE mapping is about to
3704 * perform a COW due to a shared page count, attempt to satisfy
3705 * the allocation without using the existing reserves. The pagecache
3706 * page is used to determine if the reserve at this address was
3707 * consumed or not. If reserves were used, a partial faulted mapping
3708 * at the time of fork() could consume its reserves on COW instead
3709 * of the full address range.
3710 */
3711 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3712 old_page != pagecache_page)
3713 outside_reserve = 1;
3714
3715 get_page(old_page);
3716
3717 /*
3718 * Drop page table lock as buddy allocator may be called. It will
3719 * be acquired again before returning to the caller, as expected.
3720 */
3721 spin_unlock(ptl);
3722 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3723
3724 if (IS_ERR(new_page)) {
3725 /*
3726 * If a process owning a MAP_PRIVATE mapping fails to COW,
3727 * it is due to references held by a child and an insufficient
3728 * huge page pool. To guarantee the original mappers
3729 * reliability, unmap the page from child processes. The child
3730 * may get SIGKILLed if it later faults.
3731 */
3732 if (outside_reserve) {
3733 put_page(old_page);
3734 BUG_ON(huge_pte_none(pte));
3735 unmap_ref_private(mm, vma, old_page, haddr);
3736 BUG_ON(huge_pte_none(pte));
3737 spin_lock(ptl);
3738 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3739 if (likely(ptep &&
3740 pte_same(huge_ptep_get(ptep), pte)))
3741 goto retry_avoidcopy;
3742 /*
3743 * race occurs while re-acquiring page table
3744 * lock, and our job is done.
3745 */
3746 return 0;
3747 }
3748
3749 ret = vmf_error(PTR_ERR(new_page));
3750 goto out_release_old;
3751 }
3752
3753 /*
3754 * When the original hugepage is shared one, it does not have
3755 * anon_vma prepared.
3756 */
3757 if (unlikely(anon_vma_prepare(vma))) {
3758 ret = VM_FAULT_OOM;
3759 goto out_release_all;
3760 }
3761
3762 copy_user_huge_page(new_page, old_page, address, vma,
3763 pages_per_huge_page(h));
3764 __SetPageUptodate(new_page);
3765
3766 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3767 haddr + huge_page_size(h));
3768 mmu_notifier_invalidate_range_start(&range);
3769
3770 /*
3771 * Retake the page table lock to check for racing updates
3772 * before the page tables are altered
3773 */
3774 spin_lock(ptl);
3775 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3776 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3777 ClearPagePrivate(new_page);
3778
3779 /* Break COW */
3780 huge_ptep_clear_flush(vma, haddr, ptep);
3781 mmu_notifier_invalidate_range(mm, range.start, range.end);
3782 set_huge_pte_at(mm, haddr, ptep,
3783 make_huge_pte(vma, new_page, 1));
3784 page_remove_rmap(old_page, true);
3785 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3786 set_page_huge_active(new_page);
3787 /* Make the old page be freed below */
3788 new_page = old_page;
3789 }
3790 spin_unlock(ptl);
3791 mmu_notifier_invalidate_range_end(&range);
3792out_release_all:
3793 restore_reserve_on_error(h, vma, haddr, new_page);
3794 put_page(new_page);
3795out_release_old:
3796 put_page(old_page);
3797
3798 spin_lock(ptl); /* Caller expects lock to be held */
3799 return ret;
3800}
3801
3802/* Return the pagecache page at a given address within a VMA */
3803static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3804 struct vm_area_struct *vma, unsigned long address)
3805{
3806 struct address_space *mapping;
3807 pgoff_t idx;
3808
3809 mapping = vma->vm_file->f_mapping;
3810 idx = vma_hugecache_offset(h, vma, address);
3811
3812 return find_lock_page(mapping, idx);
3813}
3814
3815/*
3816 * Return whether there is a pagecache page to back given address within VMA.
3817 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3818 */
3819static bool hugetlbfs_pagecache_present(struct hstate *h,
3820 struct vm_area_struct *vma, unsigned long address)
3821{
3822 struct address_space *mapping;
3823 pgoff_t idx;
3824 struct page *page;
3825
3826 mapping = vma->vm_file->f_mapping;
3827 idx = vma_hugecache_offset(h, vma, address);
3828
3829 page = find_get_page(mapping, idx);
3830 if (page)
3831 put_page(page);
3832 return page != NULL;
3833}
3834
3835int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3836 pgoff_t idx)
3837{
3838 struct inode *inode = mapping->host;
3839 struct hstate *h = hstate_inode(inode);
3840 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3841
3842 if (err)
3843 return err;
3844 ClearPagePrivate(page);
3845
3846 /*
3847 * set page dirty so that it will not be removed from cache/file
3848 * by non-hugetlbfs specific code paths.
3849 */
3850 set_page_dirty(page);
3851
3852 spin_lock(&inode->i_lock);
3853 inode->i_blocks += blocks_per_huge_page(h);
3854 spin_unlock(&inode->i_lock);
3855 return 0;
3856}
3857
3858static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3859 struct vm_area_struct *vma,
3860 struct address_space *mapping, pgoff_t idx,
3861 unsigned long address, pte_t *ptep, unsigned int flags)
3862{
3863 struct hstate *h = hstate_vma(vma);
3864 vm_fault_t ret = VM_FAULT_SIGBUS;
3865 int anon_rmap = 0;
3866 unsigned long size;
3867 struct page *page;
3868 pte_t new_pte;
3869 spinlock_t *ptl;
3870 unsigned long haddr = address & huge_page_mask(h);
3871 bool new_page = false;
3872
3873 /*
3874 * Currently, we are forced to kill the process in the event the
3875 * original mapper has unmapped pages from the child due to a failed
3876 * COW. Warn that such a situation has occurred as it may not be obvious
3877 */
3878 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3879 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3880 current->pid);
3881 return ret;
3882 }
3883
3884 /*
3885 * Use page lock to guard against racing truncation
3886 * before we get page_table_lock.
3887 */
3888retry:
3889 page = find_lock_page(mapping, idx);
3890 if (!page) {
3891 size = i_size_read(mapping->host) >> huge_page_shift(h);
3892 if (idx >= size)
3893 goto out;
3894
3895 /*
3896 * Check for page in userfault range
3897 */
3898 if (userfaultfd_missing(vma)) {
3899 u32 hash;
3900 struct vm_fault vmf = {
3901 .vma = vma,
3902 .address = haddr,
3903 .flags = flags,
3904 /*
3905 * Hard to debug if it ends up being
3906 * used by a callee that assumes
3907 * something about the other
3908 * uninitialized fields... same as in
3909 * memory.c
3910 */
3911 };
3912
3913 /*
3914 * hugetlb_fault_mutex must be dropped before
3915 * handling userfault. Reacquire after handling
3916 * fault to make calling code simpler.
3917 */
3918 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3919 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3920 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3921 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3922 goto out;
3923 }
3924
3925 page = alloc_huge_page(vma, haddr, 0);
3926 if (IS_ERR(page)) {
3927 /*
3928 * Returning error will result in faulting task being
3929 * sent SIGBUS. The hugetlb fault mutex prevents two
3930 * tasks from racing to fault in the same page which
3931 * could result in false unable to allocate errors.
3932 * Page migration does not take the fault mutex, but
3933 * does a clear then write of pte's under page table
3934 * lock. Page fault code could race with migration,
3935 * notice the clear pte and try to allocate a page
3936 * here. Before returning error, get ptl and make
3937 * sure there really is no pte entry.
3938 */
3939 ptl = huge_pte_lock(h, mm, ptep);
3940 if (!huge_pte_none(huge_ptep_get(ptep))) {
3941 ret = 0;
3942 spin_unlock(ptl);
3943 goto out;
3944 }
3945 spin_unlock(ptl);
3946 ret = vmf_error(PTR_ERR(page));
3947 goto out;
3948 }
3949 clear_huge_page(page, address, pages_per_huge_page(h));
3950 __SetPageUptodate(page);
3951 new_page = true;
3952
3953 if (vma->vm_flags & VM_MAYSHARE) {
3954 int err = huge_add_to_page_cache(page, mapping, idx);
3955 if (err) {
3956 put_page(page);
3957 if (err == -EEXIST)
3958 goto retry;
3959 goto out;
3960 }
3961 } else {
3962 lock_page(page);
3963 if (unlikely(anon_vma_prepare(vma))) {
3964 ret = VM_FAULT_OOM;
3965 goto backout_unlocked;
3966 }
3967 anon_rmap = 1;
3968 }
3969 } else {
3970 /*
3971 * If memory error occurs between mmap() and fault, some process
3972 * don't have hwpoisoned swap entry for errored virtual address.
3973 * So we need to block hugepage fault by PG_hwpoison bit check.
3974 */
3975 if (unlikely(PageHWPoison(page))) {
3976 ret = VM_FAULT_HWPOISON |
3977 VM_FAULT_SET_HINDEX(hstate_index(h));
3978 goto backout_unlocked;
3979 }
3980 }
3981
3982 /*
3983 * If we are going to COW a private mapping later, we examine the
3984 * pending reservations for this page now. This will ensure that
3985 * any allocations necessary to record that reservation occur outside
3986 * the spinlock.
3987 */
3988 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3989 if (vma_needs_reservation(h, vma, haddr) < 0) {
3990 ret = VM_FAULT_OOM;
3991 goto backout_unlocked;
3992 }
3993 /* Just decrements count, does not deallocate */
3994 vma_end_reservation(h, vma, haddr);
3995 }
3996
3997 ptl = huge_pte_lock(h, mm, ptep);
3998 size = i_size_read(mapping->host) >> huge_page_shift(h);
3999 if (idx >= size)
4000 goto backout;
4001
4002 ret = 0;
4003 if (!huge_pte_none(huge_ptep_get(ptep)))
4004 goto backout;
4005
4006 if (anon_rmap) {
4007 ClearPagePrivate(page);
4008 hugepage_add_new_anon_rmap(page, vma, haddr);
4009 } else
4010 page_dup_rmap(page, true);
4011 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4012 && (vma->vm_flags & VM_SHARED)));
4013 set_huge_pte_at(mm, haddr, ptep, new_pte);
4014
4015 hugetlb_count_add(pages_per_huge_page(h), mm);
4016 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4017 /* Optimization, do the COW without a second fault */
4018 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4019 }
4020
4021 spin_unlock(ptl);
4022
4023 /*
4024 * Only make newly allocated pages active. Existing pages found
4025 * in the pagecache could be !page_huge_active() if they have been
4026 * isolated for migration.
4027 */
4028 if (new_page)
4029 set_page_huge_active(page);
4030
4031 unlock_page(page);
4032out:
4033 return ret;
4034
4035backout:
4036 spin_unlock(ptl);
4037backout_unlocked:
4038 unlock_page(page);
4039 restore_reserve_on_error(h, vma, haddr, page);
4040 put_page(page);
4041 goto out;
4042}
4043
4044#ifdef CONFIG_SMP
4045u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4046 pgoff_t idx, unsigned long address)
4047{
4048 unsigned long key[2];
4049 u32 hash;
4050
4051 key[0] = (unsigned long) mapping;
4052 key[1] = idx;
4053
4054 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
4055
4056 return hash & (num_fault_mutexes - 1);
4057}
4058#else
4059/*
4060 * For uniprocesor systems we always use a single mutex, so just
4061 * return 0 and avoid the hashing overhead.
4062 */
4063u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
4064 pgoff_t idx, unsigned long address)
4065{
4066 return 0;
4067}
4068#endif
4069
4070vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4071 unsigned long address, unsigned int flags)
4072{
4073 pte_t *ptep, entry;
4074 spinlock_t *ptl;
4075 vm_fault_t ret;
4076 u32 hash;
4077 pgoff_t idx;
4078 struct page *page = NULL;
4079 struct page *pagecache_page = NULL;
4080 struct hstate *h = hstate_vma(vma);
4081 struct address_space *mapping;
4082 int need_wait_lock = 0;
4083 unsigned long haddr = address & huge_page_mask(h);
4084
4085 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4086 if (ptep) {
4087 entry = huge_ptep_get(ptep);
4088 if (unlikely(is_hugetlb_entry_migration(entry))) {
4089 migration_entry_wait_huge(vma, mm, ptep);
4090 return 0;
4091 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4092 return VM_FAULT_HWPOISON_LARGE |
4093 VM_FAULT_SET_HINDEX(hstate_index(h));
4094 } else {
4095 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4096 if (!ptep)
4097 return VM_FAULT_OOM;
4098 }
4099
4100 mapping = vma->vm_file->f_mapping;
4101 idx = vma_hugecache_offset(h, vma, haddr);
4102
4103 /*
4104 * Serialize hugepage allocation and instantiation, so that we don't
4105 * get spurious allocation failures if two CPUs race to instantiate
4106 * the same page in the page cache.
4107 */
4108 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4109 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4110
4111 entry = huge_ptep_get(ptep);
4112 if (huge_pte_none(entry)) {
4113 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4114 goto out_mutex;
4115 }
4116
4117 ret = 0;
4118
4119 /*
4120 * entry could be a migration/hwpoison entry at this point, so this
4121 * check prevents the kernel from going below assuming that we have
4122 * a active hugepage in pagecache. This goto expects the 2nd page fault,
4123 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4124 * handle it.
4125 */
4126 if (!pte_present(entry))
4127 goto out_mutex;
4128
4129 /*
4130 * If we are going to COW the mapping later, we examine the pending
4131 * reservations for this page now. This will ensure that any
4132 * allocations necessary to record that reservation occur outside the
4133 * spinlock. For private mappings, we also lookup the pagecache
4134 * page now as it is used to determine if a reservation has been
4135 * consumed.
4136 */
4137 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4138 if (vma_needs_reservation(h, vma, haddr) < 0) {
4139 ret = VM_FAULT_OOM;
4140 goto out_mutex;
4141 }
4142 /* Just decrements count, does not deallocate */
4143 vma_end_reservation(h, vma, haddr);
4144
4145 if (!(vma->vm_flags & VM_MAYSHARE))
4146 pagecache_page = hugetlbfs_pagecache_page(h,
4147 vma, haddr);
4148 }
4149
4150 ptl = huge_pte_lock(h, mm, ptep);
4151
4152 /* Check for a racing update before calling hugetlb_cow */
4153 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4154 goto out_ptl;
4155
4156 /*
4157 * hugetlb_cow() requires page locks of pte_page(entry) and
4158 * pagecache_page, so here we need take the former one
4159 * when page != pagecache_page or !pagecache_page.
4160 */
4161 page = pte_page(entry);
4162 if (page != pagecache_page)
4163 if (!trylock_page(page)) {
4164 need_wait_lock = 1;
4165 goto out_ptl;
4166 }
4167
4168 get_page(page);
4169
4170 if (flags & FAULT_FLAG_WRITE) {
4171 if (!huge_pte_write(entry)) {
4172 ret = hugetlb_cow(mm, vma, address, ptep,
4173 pagecache_page, ptl);
4174 goto out_put_page;
4175 }
4176 entry = huge_pte_mkdirty(entry);
4177 }
4178 entry = pte_mkyoung(entry);
4179 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4180 flags & FAULT_FLAG_WRITE))
4181 update_mmu_cache(vma, haddr, ptep);
4182out_put_page:
4183 if (page != pagecache_page)
4184 unlock_page(page);
4185 put_page(page);
4186out_ptl:
4187 spin_unlock(ptl);
4188
4189 if (pagecache_page) {
4190 unlock_page(pagecache_page);
4191 put_page(pagecache_page);
4192 }
4193out_mutex:
4194 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4195 /*
4196 * Generally it's safe to hold refcount during waiting page lock. But
4197 * here we just wait to defer the next page fault to avoid busy loop and
4198 * the page is not used after unlocked before returning from the current
4199 * page fault. So we are safe from accessing freed page, even if we wait
4200 * here without taking refcount.
4201 */
4202 if (need_wait_lock)
4203 wait_on_page_locked(page);
4204 return ret;
4205}
4206
4207/*
4208 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4209 * modifications for huge pages.
4210 */
4211int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4212 pte_t *dst_pte,
4213 struct vm_area_struct *dst_vma,
4214 unsigned long dst_addr,
4215 unsigned long src_addr,
4216 struct page **pagep)
4217{
4218 struct address_space *mapping;
4219 pgoff_t idx;
4220 unsigned long size;
4221 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4222 struct hstate *h = hstate_vma(dst_vma);
4223 pte_t _dst_pte;
4224 spinlock_t *ptl;
4225 int ret;
4226 struct page *page;
4227
4228 if (!*pagep) {
4229 ret = -ENOMEM;
4230 page = alloc_huge_page(dst_vma, dst_addr, 0);
4231 if (IS_ERR(page))
4232 goto out;
4233
4234 ret = copy_huge_page_from_user(page,
4235 (const void __user *) src_addr,
4236 pages_per_huge_page(h), false);
4237
4238 /* fallback to copy_from_user outside mmap_sem */
4239 if (unlikely(ret)) {
4240 ret = -ENOENT;
4241 *pagep = page;
4242 /* don't free the page */
4243 goto out;
4244 }
4245 } else {
4246 page = *pagep;
4247 *pagep = NULL;
4248 }
4249
4250 /*
4251 * The memory barrier inside __SetPageUptodate makes sure that
4252 * preceding stores to the page contents become visible before
4253 * the set_pte_at() write.
4254 */
4255 __SetPageUptodate(page);
4256
4257 mapping = dst_vma->vm_file->f_mapping;
4258 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4259
4260 /*
4261 * If shared, add to page cache
4262 */
4263 if (vm_shared) {
4264 size = i_size_read(mapping->host) >> huge_page_shift(h);
4265 ret = -EFAULT;
4266 if (idx >= size)
4267 goto out_release_nounlock;
4268
4269 /*
4270 * Serialization between remove_inode_hugepages() and
4271 * huge_add_to_page_cache() below happens through the
4272 * hugetlb_fault_mutex_table that here must be hold by
4273 * the caller.
4274 */
4275 ret = huge_add_to_page_cache(page, mapping, idx);
4276 if (ret)
4277 goto out_release_nounlock;
4278 }
4279
4280 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4281 spin_lock(ptl);
4282
4283 /*
4284 * Recheck the i_size after holding PT lock to make sure not
4285 * to leave any page mapped (as page_mapped()) beyond the end
4286 * of the i_size (remove_inode_hugepages() is strict about
4287 * enforcing that). If we bail out here, we'll also leave a
4288 * page in the radix tree in the vm_shared case beyond the end
4289 * of the i_size, but remove_inode_hugepages() will take care
4290 * of it as soon as we drop the hugetlb_fault_mutex_table.
4291 */
4292 size = i_size_read(mapping->host) >> huge_page_shift(h);
4293 ret = -EFAULT;
4294 if (idx >= size)
4295 goto out_release_unlock;
4296
4297 ret = -EEXIST;
4298 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4299 goto out_release_unlock;
4300
4301 if (vm_shared) {
4302 page_dup_rmap(page, true);
4303 } else {
4304 ClearPagePrivate(page);
4305 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4306 }
4307
4308 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4309 if (dst_vma->vm_flags & VM_WRITE)
4310 _dst_pte = huge_pte_mkdirty(_dst_pte);
4311 _dst_pte = pte_mkyoung(_dst_pte);
4312
4313 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4314
4315 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4316 dst_vma->vm_flags & VM_WRITE);
4317 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4318
4319 /* No need to invalidate - it was non-present before */
4320 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4321
4322 spin_unlock(ptl);
4323 set_page_huge_active(page);
4324 if (vm_shared)
4325 unlock_page(page);
4326 ret = 0;
4327out:
4328 return ret;
4329out_release_unlock:
4330 spin_unlock(ptl);
4331 if (vm_shared)
4332 unlock_page(page);
4333out_release_nounlock:
4334 put_page(page);
4335 goto out;
4336}
4337
4338long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4339 struct page **pages, struct vm_area_struct **vmas,
4340 unsigned long *position, unsigned long *nr_pages,
4341 long i, unsigned int flags, int *nonblocking)
4342{
4343 unsigned long pfn_offset;
4344 unsigned long vaddr = *position;
4345 unsigned long remainder = *nr_pages;
4346 struct hstate *h = hstate_vma(vma);
4347 int err = -EFAULT;
4348
4349 while (vaddr < vma->vm_end && remainder) {
4350 pte_t *pte;
4351 spinlock_t *ptl = NULL;
4352 int absent;
4353 struct page *page;
4354
4355 /*
4356 * If we have a pending SIGKILL, don't keep faulting pages and
4357 * potentially allocating memory.
4358 */
4359 if (fatal_signal_pending(current)) {
4360 remainder = 0;
4361 break;
4362 }
4363
4364 /*
4365 * Some archs (sparc64, sh*) have multiple pte_ts to
4366 * each hugepage. We have to make sure we get the
4367 * first, for the page indexing below to work.
4368 *
4369 * Note that page table lock is not held when pte is null.
4370 */
4371 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4372 huge_page_size(h));
4373 if (pte)
4374 ptl = huge_pte_lock(h, mm, pte);
4375 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4376
4377 /*
4378 * When coredumping, it suits get_dump_page if we just return
4379 * an error where there's an empty slot with no huge pagecache
4380 * to back it. This way, we avoid allocating a hugepage, and
4381 * the sparse dumpfile avoids allocating disk blocks, but its
4382 * huge holes still show up with zeroes where they need to be.
4383 */
4384 if (absent && (flags & FOLL_DUMP) &&
4385 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4386 if (pte)
4387 spin_unlock(ptl);
4388 remainder = 0;
4389 break;
4390 }
4391
4392 /*
4393 * We need call hugetlb_fault for both hugepages under migration
4394 * (in which case hugetlb_fault waits for the migration,) and
4395 * hwpoisoned hugepages (in which case we need to prevent the
4396 * caller from accessing to them.) In order to do this, we use
4397 * here is_swap_pte instead of is_hugetlb_entry_migration and
4398 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4399 * both cases, and because we can't follow correct pages
4400 * directly from any kind of swap entries.
4401 */
4402 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4403 ((flags & FOLL_WRITE) &&
4404 !huge_pte_write(huge_ptep_get(pte)))) {
4405 vm_fault_t ret;
4406 unsigned int fault_flags = 0;
4407
4408 if (pte)
4409 spin_unlock(ptl);
4410 if (flags & FOLL_WRITE)
4411 fault_flags |= FAULT_FLAG_WRITE;
4412 if (nonblocking)
4413 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4414 if (flags & FOLL_NOWAIT)
4415 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4416 FAULT_FLAG_RETRY_NOWAIT;
4417 if (flags & FOLL_TRIED) {
4418 VM_WARN_ON_ONCE(fault_flags &
4419 FAULT_FLAG_ALLOW_RETRY);
4420 fault_flags |= FAULT_FLAG_TRIED;
4421 }
4422 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4423 if (ret & VM_FAULT_ERROR) {
4424 err = vm_fault_to_errno(ret, flags);
4425 remainder = 0;
4426 break;
4427 }
4428 if (ret & VM_FAULT_RETRY) {
4429 if (nonblocking &&
4430 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4431 *nonblocking = 0;
4432 *nr_pages = 0;
4433 /*
4434 * VM_FAULT_RETRY must not return an
4435 * error, it will return zero
4436 * instead.
4437 *
4438 * No need to update "position" as the
4439 * caller will not check it after
4440 * *nr_pages is set to 0.
4441 */
4442 return i;
4443 }
4444 continue;
4445 }
4446
4447 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4448 page = pte_page(huge_ptep_get(pte));
4449
4450 /*
4451 * Instead of doing 'try_get_page()' below in the same_page
4452 * loop, just check the count once here.
4453 */
4454 if (unlikely(page_count(page) <= 0)) {
4455 if (pages) {
4456 spin_unlock(ptl);
4457 remainder = 0;
4458 err = -ENOMEM;
4459 break;
4460 }
4461 }
4462same_page:
4463 if (pages) {
4464 pages[i] = mem_map_offset(page, pfn_offset);
4465 get_page(pages[i]);
4466 }
4467
4468 if (vmas)
4469 vmas[i] = vma;
4470
4471 vaddr += PAGE_SIZE;
4472 ++pfn_offset;
4473 --remainder;
4474 ++i;
4475 if (vaddr < vma->vm_end && remainder &&
4476 pfn_offset < pages_per_huge_page(h)) {
4477 /*
4478 * We use pfn_offset to avoid touching the pageframes
4479 * of this compound page.
4480 */
4481 goto same_page;
4482 }
4483 spin_unlock(ptl);
4484 }
4485 *nr_pages = remainder;
4486 /*
4487 * setting position is actually required only if remainder is
4488 * not zero but it's faster not to add a "if (remainder)"
4489 * branch.
4490 */
4491 *position = vaddr;
4492
4493 return i ? i : err;
4494}
4495
4496#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4497/*
4498 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4499 * implement this.
4500 */
4501#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4502#endif
4503
4504unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4505 unsigned long address, unsigned long end, pgprot_t newprot)
4506{
4507 struct mm_struct *mm = vma->vm_mm;
4508 unsigned long start = address;
4509 pte_t *ptep;
4510 pte_t pte;
4511 struct hstate *h = hstate_vma(vma);
4512 unsigned long pages = 0;
4513 bool shared_pmd = false;
4514 struct mmu_notifier_range range;
4515
4516 /*
4517 * In the case of shared PMDs, the area to flush could be beyond
4518 * start/end. Set range.start/range.end to cover the maximum possible
4519 * range if PMD sharing is possible.
4520 */
4521 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4522 0, vma, mm, start, end);
4523 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4524
4525 BUG_ON(address >= end);
4526 flush_cache_range(vma, range.start, range.end);
4527
4528 mmu_notifier_invalidate_range_start(&range);
4529 i_mmap_lock_write(vma->vm_file->f_mapping);
4530 for (; address < end; address += huge_page_size(h)) {
4531 spinlock_t *ptl;
4532 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4533 if (!ptep)
4534 continue;
4535 ptl = huge_pte_lock(h, mm, ptep);
4536 if (huge_pmd_unshare(mm, &address, ptep)) {
4537 pages++;
4538 spin_unlock(ptl);
4539 shared_pmd = true;
4540 continue;
4541 }
4542 pte = huge_ptep_get(ptep);
4543 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4544 spin_unlock(ptl);
4545 continue;
4546 }
4547 if (unlikely(is_hugetlb_entry_migration(pte))) {
4548 swp_entry_t entry = pte_to_swp_entry(pte);
4549
4550 if (is_write_migration_entry(entry)) {
4551 pte_t newpte;
4552
4553 make_migration_entry_read(&entry);
4554 newpte = swp_entry_to_pte(entry);
4555 set_huge_swap_pte_at(mm, address, ptep,
4556 newpte, huge_page_size(h));
4557 pages++;
4558 }
4559 spin_unlock(ptl);
4560 continue;
4561 }
4562 if (!huge_pte_none(pte)) {
4563 pte_t old_pte;
4564
4565 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4566 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4567 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4568 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4569 pages++;
4570 }
4571 spin_unlock(ptl);
4572 }
4573 /*
4574 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4575 * may have cleared our pud entry and done put_page on the page table:
4576 * once we release i_mmap_rwsem, another task can do the final put_page
4577 * and that page table be reused and filled with junk. If we actually
4578 * did unshare a page of pmds, flush the range corresponding to the pud.
4579 */
4580 if (shared_pmd)
4581 flush_hugetlb_tlb_range(vma, range.start, range.end);
4582 else
4583 flush_hugetlb_tlb_range(vma, start, end);
4584 /*
4585 * No need to call mmu_notifier_invalidate_range() we are downgrading
4586 * page table protection not changing it to point to a new page.
4587 *
4588 * See Documentation/vm/mmu_notifier.rst
4589 */
4590 i_mmap_unlock_write(vma->vm_file->f_mapping);
4591 mmu_notifier_invalidate_range_end(&range);
4592
4593 return pages << h->order;
4594}
4595
4596int hugetlb_reserve_pages(struct inode *inode,
4597 long from, long to,
4598 struct vm_area_struct *vma,
4599 vm_flags_t vm_flags)
4600{
4601 long ret, chg;
4602 struct hstate *h = hstate_inode(inode);
4603 struct hugepage_subpool *spool = subpool_inode(inode);
4604 struct resv_map *resv_map;
4605 long gbl_reserve;
4606
4607 /* This should never happen */
4608 if (from > to) {
4609 VM_WARN(1, "%s called with a negative range\n", __func__);
4610 return -EINVAL;
4611 }
4612
4613 /*
4614 * Only apply hugepage reservation if asked. At fault time, an
4615 * attempt will be made for VM_NORESERVE to allocate a page
4616 * without using reserves
4617 */
4618 if (vm_flags & VM_NORESERVE)
4619 return 0;
4620
4621 /*
4622 * Shared mappings base their reservation on the number of pages that
4623 * are already allocated on behalf of the file. Private mappings need
4624 * to reserve the full area even if read-only as mprotect() may be
4625 * called to make the mapping read-write. Assume !vma is a shm mapping
4626 */
4627 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4628 /*
4629 * resv_map can not be NULL as hugetlb_reserve_pages is only
4630 * called for inodes for which resv_maps were created (see
4631 * hugetlbfs_get_inode).
4632 */
4633 resv_map = inode_resv_map(inode);
4634
4635 chg = region_chg(resv_map, from, to);
4636
4637 } else {
4638 resv_map = resv_map_alloc();
4639 if (!resv_map)
4640 return -ENOMEM;
4641
4642 chg = to - from;
4643
4644 set_vma_resv_map(vma, resv_map);
4645 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4646 }
4647
4648 if (chg < 0) {
4649 ret = chg;
4650 goto out_err;
4651 }
4652
4653 /*
4654 * There must be enough pages in the subpool for the mapping. If
4655 * the subpool has a minimum size, there may be some global
4656 * reservations already in place (gbl_reserve).
4657 */
4658 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4659 if (gbl_reserve < 0) {
4660 ret = -ENOSPC;
4661 goto out_err;
4662 }
4663
4664 /*
4665 * Check enough hugepages are available for the reservation.
4666 * Hand the pages back to the subpool if there are not
4667 */
4668 ret = hugetlb_acct_memory(h, gbl_reserve);
4669 if (ret < 0) {
4670 /* put back original number of pages, chg */
4671 (void)hugepage_subpool_put_pages(spool, chg);
4672 goto out_err;
4673 }
4674
4675 /*
4676 * Account for the reservations made. Shared mappings record regions
4677 * that have reservations as they are shared by multiple VMAs.
4678 * When the last VMA disappears, the region map says how much
4679 * the reservation was and the page cache tells how much of
4680 * the reservation was consumed. Private mappings are per-VMA and
4681 * only the consumed reservations are tracked. When the VMA
4682 * disappears, the original reservation is the VMA size and the
4683 * consumed reservations are stored in the map. Hence, nothing
4684 * else has to be done for private mappings here
4685 */
4686 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4687 long add = region_add(resv_map, from, to);
4688
4689 if (unlikely(chg > add)) {
4690 /*
4691 * pages in this range were added to the reserve
4692 * map between region_chg and region_add. This
4693 * indicates a race with alloc_huge_page. Adjust
4694 * the subpool and reserve counts modified above
4695 * based on the difference.
4696 */
4697 long rsv_adjust;
4698
4699 rsv_adjust = hugepage_subpool_put_pages(spool,
4700 chg - add);
4701 hugetlb_acct_memory(h, -rsv_adjust);
4702 }
4703 }
4704 return 0;
4705out_err:
4706 if (!vma || vma->vm_flags & VM_MAYSHARE)
4707 /* Don't call region_abort if region_chg failed */
4708 if (chg >= 0)
4709 region_abort(resv_map, from, to);
4710 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4711 kref_put(&resv_map->refs, resv_map_release);
4712 return ret;
4713}
4714
4715long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4716 long freed)
4717{
4718 struct hstate *h = hstate_inode(inode);
4719 struct resv_map *resv_map = inode_resv_map(inode);
4720 long chg = 0;
4721 struct hugepage_subpool *spool = subpool_inode(inode);
4722 long gbl_reserve;
4723
4724 /*
4725 * Since this routine can be called in the evict inode path for all
4726 * hugetlbfs inodes, resv_map could be NULL.
4727 */
4728 if (resv_map) {
4729 chg = region_del(resv_map, start, end);
4730 /*
4731 * region_del() can fail in the rare case where a region
4732 * must be split and another region descriptor can not be
4733 * allocated. If end == LONG_MAX, it will not fail.
4734 */
4735 if (chg < 0)
4736 return chg;
4737 }
4738
4739 spin_lock(&inode->i_lock);
4740 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4741 spin_unlock(&inode->i_lock);
4742
4743 /*
4744 * If the subpool has a minimum size, the number of global
4745 * reservations to be released may be adjusted.
4746 */
4747 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4748 hugetlb_acct_memory(h, -gbl_reserve);
4749
4750 return 0;
4751}
4752
4753#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4754static unsigned long page_table_shareable(struct vm_area_struct *svma,
4755 struct vm_area_struct *vma,
4756 unsigned long addr, pgoff_t idx)
4757{
4758 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4759 svma->vm_start;
4760 unsigned long sbase = saddr & PUD_MASK;
4761 unsigned long s_end = sbase + PUD_SIZE;
4762
4763 /* Allow segments to share if only one is marked locked */
4764 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4765 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4766
4767 /*
4768 * match the virtual addresses, permission and the alignment of the
4769 * page table page.
4770 */
4771 if (pmd_index(addr) != pmd_index(saddr) ||
4772 vm_flags != svm_flags ||
4773 sbase < svma->vm_start || svma->vm_end < s_end)
4774 return 0;
4775
4776 return saddr;
4777}
4778
4779static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4780{
4781 unsigned long base = addr & PUD_MASK;
4782 unsigned long end = base + PUD_SIZE;
4783
4784 /*
4785 * check on proper vm_flags and page table alignment
4786 */
4787 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4788 return true;
4789 return false;
4790}
4791
4792/*
4793 * Determine if start,end range within vma could be mapped by shared pmd.
4794 * If yes, adjust start and end to cover range associated with possible
4795 * shared pmd mappings.
4796 */
4797void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4798 unsigned long *start, unsigned long *end)
4799{
4800 unsigned long check_addr = *start;
4801
4802 if (!(vma->vm_flags & VM_MAYSHARE))
4803 return;
4804
4805 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4806 unsigned long a_start = check_addr & PUD_MASK;
4807 unsigned long a_end = a_start + PUD_SIZE;
4808
4809 /*
4810 * If sharing is possible, adjust start/end if necessary.
4811 */
4812 if (range_in_vma(vma, a_start, a_end)) {
4813 if (a_start < *start)
4814 *start = a_start;
4815 if (a_end > *end)
4816 *end = a_end;
4817 }
4818 }
4819}
4820
4821/*
4822 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4823 * and returns the corresponding pte. While this is not necessary for the
4824 * !shared pmd case because we can allocate the pmd later as well, it makes the
4825 * code much cleaner. pmd allocation is essential for the shared case because
4826 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4827 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4828 * bad pmd for sharing.
4829 */
4830pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4831{
4832 struct vm_area_struct *vma = find_vma(mm, addr);
4833 struct address_space *mapping = vma->vm_file->f_mapping;
4834 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4835 vma->vm_pgoff;
4836 struct vm_area_struct *svma;
4837 unsigned long saddr;
4838 pte_t *spte = NULL;
4839 pte_t *pte;
4840 spinlock_t *ptl;
4841
4842 if (!vma_shareable(vma, addr))
4843 return (pte_t *)pmd_alloc(mm, pud, addr);
4844
4845 i_mmap_lock_write(mapping);
4846 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4847 if (svma == vma)
4848 continue;
4849
4850 saddr = page_table_shareable(svma, vma, addr, idx);
4851 if (saddr) {
4852 spte = huge_pte_offset(svma->vm_mm, saddr,
4853 vma_mmu_pagesize(svma));
4854 if (spte) {
4855 get_page(virt_to_page(spte));
4856 break;
4857 }
4858 }
4859 }
4860
4861 if (!spte)
4862 goto out;
4863
4864 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4865 if (pud_none(*pud)) {
4866 pud_populate(mm, pud,
4867 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4868 mm_inc_nr_pmds(mm);
4869 } else {
4870 put_page(virt_to_page(spte));
4871 }
4872 spin_unlock(ptl);
4873out:
4874 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4875 i_mmap_unlock_write(mapping);
4876 return pte;
4877}
4878
4879/*
4880 * unmap huge page backed by shared pte.
4881 *
4882 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4883 * indicated by page_count > 1, unmap is achieved by clearing pud and
4884 * decrementing the ref count. If count == 1, the pte page is not shared.
4885 *
4886 * called with page table lock held.
4887 *
4888 * returns: 1 successfully unmapped a shared pte page
4889 * 0 the underlying pte page is not shared, or it is the last user
4890 */
4891int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4892{
4893 pgd_t *pgd = pgd_offset(mm, *addr);
4894 p4d_t *p4d = p4d_offset(pgd, *addr);
4895 pud_t *pud = pud_offset(p4d, *addr);
4896
4897 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4898 if (page_count(virt_to_page(ptep)) == 1)
4899 return 0;
4900
4901 pud_clear(pud);
4902 put_page(virt_to_page(ptep));
4903 mm_dec_nr_pmds(mm);
4904 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4905 return 1;
4906}
4907#define want_pmd_share() (1)
4908#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4909pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4910{
4911 return NULL;
4912}
4913
4914int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4915{
4916 return 0;
4917}
4918
4919void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4920 unsigned long *start, unsigned long *end)
4921{
4922}
4923#define want_pmd_share() (0)
4924#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4925
4926#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4927pte_t *huge_pte_alloc(struct mm_struct *mm,
4928 unsigned long addr, unsigned long sz)
4929{
4930 pgd_t *pgd;
4931 p4d_t *p4d;
4932 pud_t *pud;
4933 pte_t *pte = NULL;
4934
4935 pgd = pgd_offset(mm, addr);
4936 p4d = p4d_alloc(mm, pgd, addr);
4937 if (!p4d)
4938 return NULL;
4939 pud = pud_alloc(mm, p4d, addr);
4940 if (pud) {
4941 if (sz == PUD_SIZE) {
4942 pte = (pte_t *)pud;
4943 } else {
4944 BUG_ON(sz != PMD_SIZE);
4945 if (want_pmd_share() && pud_none(*pud))
4946 pte = huge_pmd_share(mm, addr, pud);
4947 else
4948 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4949 }
4950 }
4951 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4952
4953 return pte;
4954}
4955
4956/*
4957 * huge_pte_offset() - Walk the page table to resolve the hugepage
4958 * entry at address @addr
4959 *
4960 * Return: Pointer to page table or swap entry (PUD or PMD) for
4961 * address @addr, or NULL if a p*d_none() entry is encountered and the
4962 * size @sz doesn't match the hugepage size at this level of the page
4963 * table.
4964 */
4965pte_t *huge_pte_offset(struct mm_struct *mm,
4966 unsigned long addr, unsigned long sz)
4967{
4968 pgd_t *pgd;
4969 p4d_t *p4d;
4970 pud_t *pud;
4971 pmd_t *pmd;
4972
4973 pgd = pgd_offset(mm, addr);
4974 if (!pgd_present(*pgd))
4975 return NULL;
4976 p4d = p4d_offset(pgd, addr);
4977 if (!p4d_present(*p4d))
4978 return NULL;
4979
4980 pud = pud_offset(p4d, addr);
4981 if (sz != PUD_SIZE && pud_none(*pud))
4982 return NULL;
4983 /* hugepage or swap? */
4984 if (pud_huge(*pud) || !pud_present(*pud))
4985 return (pte_t *)pud;
4986
4987 pmd = pmd_offset(pud, addr);
4988 if (sz != PMD_SIZE && pmd_none(*pmd))
4989 return NULL;
4990 /* hugepage or swap? */
4991 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4992 return (pte_t *)pmd;
4993
4994 return NULL;
4995}
4996
4997#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4998
4999/*
5000 * These functions are overwritable if your architecture needs its own
5001 * behavior.
5002 */
5003struct page * __weak
5004follow_huge_addr(struct mm_struct *mm, unsigned long address,
5005 int write)
5006{
5007 return ERR_PTR(-EINVAL);
5008}
5009
5010struct page * __weak
5011follow_huge_pd(struct vm_area_struct *vma,
5012 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5013{
5014 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5015 return NULL;
5016}
5017
5018struct page * __weak
5019follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5020 pmd_t *pmd, int flags)
5021{
5022 struct page *page = NULL;
5023 spinlock_t *ptl;
5024 pte_t pte;
5025retry:
5026 ptl = pmd_lockptr(mm, pmd);
5027 spin_lock(ptl);
5028 /*
5029 * make sure that the address range covered by this pmd is not
5030 * unmapped from other threads.
5031 */
5032 if (!pmd_huge(*pmd))
5033 goto out;
5034 pte = huge_ptep_get((pte_t *)pmd);
5035 if (pte_present(pte)) {
5036 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5037 if (flags & FOLL_GET)
5038 get_page(page);
5039 } else {
5040 if (is_hugetlb_entry_migration(pte)) {
5041 spin_unlock(ptl);
5042 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5043 goto retry;
5044 }
5045 /*
5046 * hwpoisoned entry is treated as no_page_table in
5047 * follow_page_mask().
5048 */
5049 }
5050out:
5051 spin_unlock(ptl);
5052 return page;
5053}
5054
5055struct page * __weak
5056follow_huge_pud(struct mm_struct *mm, unsigned long address,
5057 pud_t *pud, int flags)
5058{
5059 if (flags & FOLL_GET)
5060 return NULL;
5061
5062 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5063}
5064
5065struct page * __weak
5066follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5067{
5068 if (flags & FOLL_GET)
5069 return NULL;
5070
5071 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5072}
5073
5074bool isolate_huge_page(struct page *page, struct list_head *list)
5075{
5076 bool ret = true;
5077
5078 VM_BUG_ON_PAGE(!PageHead(page), page);
5079 spin_lock(&hugetlb_lock);
5080 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
5081 ret = false;
5082 goto unlock;
5083 }
5084 clear_page_huge_active(page);
5085 list_move_tail(&page->lru, list);
5086unlock:
5087 spin_unlock(&hugetlb_lock);
5088 return ret;
5089}
5090
5091void putback_active_hugepage(struct page *page)
5092{
5093 VM_BUG_ON_PAGE(!PageHead(page), page);
5094 spin_lock(&hugetlb_lock);
5095 set_page_huge_active(page);
5096 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5097 spin_unlock(&hugetlb_lock);
5098 put_page(page);
5099}
5100
5101void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5102{
5103 struct hstate *h = page_hstate(oldpage);
5104
5105 hugetlb_cgroup_migrate(oldpage, newpage);
5106 set_page_owner_migrate_reason(newpage, reason);
5107
5108 /*
5109 * transfer temporary state of the new huge page. This is
5110 * reverse to other transitions because the newpage is going to
5111 * be final while the old one will be freed so it takes over
5112 * the temporary status.
5113 *
5114 * Also note that we have to transfer the per-node surplus state
5115 * here as well otherwise the global surplus count will not match
5116 * the per-node's.
5117 */
5118 if (PageHugeTemporary(newpage)) {
5119 int old_nid = page_to_nid(oldpage);
5120 int new_nid = page_to_nid(newpage);
5121
5122 SetPageHugeTemporary(oldpage);
5123 ClearPageHugeTemporary(newpage);
5124
5125 spin_lock(&hugetlb_lock);
5126 if (h->surplus_huge_pages_node[old_nid]) {
5127 h->surplus_huge_pages_node[old_nid]--;
5128 h->surplus_huge_pages_node[new_nid]++;
5129 }
5130 spin_unlock(&hugetlb_lock);
5131 }
5132}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/sched/mm.h>
23#include <linux/mmdebug.h>
24#include <linux/sched/signal.h>
25#include <linux/rmap.h>
26#include <linux/string_helpers.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/jhash.h>
30#include <linux/numa.h>
31#include <linux/llist.h>
32#include <linux/cma.h>
33#include <linux/migrate.h>
34#include <linux/nospec.h>
35#include <linux/delayacct.h>
36#include <linux/memory.h>
37#include <linux/mm_inline.h>
38
39#include <asm/page.h>
40#include <asm/pgalloc.h>
41#include <asm/tlb.h>
42
43#include <linux/io.h>
44#include <linux/hugetlb.h>
45#include <linux/hugetlb_cgroup.h>
46#include <linux/node.h>
47#include <linux/page_owner.h>
48#include "internal.h"
49#include "hugetlb_vmemmap.h"
50
51int hugetlb_max_hstate __read_mostly;
52unsigned int default_hstate_idx;
53struct hstate hstates[HUGE_MAX_HSTATE];
54
55#ifdef CONFIG_CMA
56static struct cma *hugetlb_cma[MAX_NUMNODES];
57static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
58static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
59{
60 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
61 1 << order);
62}
63#else
64static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
65{
66 return false;
67}
68#endif
69static unsigned long hugetlb_cma_size __initdata;
70
71__initdata LIST_HEAD(huge_boot_pages);
72
73/* for command line parsing */
74static struct hstate * __initdata parsed_hstate;
75static unsigned long __initdata default_hstate_max_huge_pages;
76static bool __initdata parsed_valid_hugepagesz = true;
77static bool __initdata parsed_default_hugepagesz;
78static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
79
80/*
81 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
82 * free_huge_pages, and surplus_huge_pages.
83 */
84DEFINE_SPINLOCK(hugetlb_lock);
85
86/*
87 * Serializes faults on the same logical page. This is used to
88 * prevent spurious OOMs when the hugepage pool is fully utilized.
89 */
90static int num_fault_mutexes;
91struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
92
93/* Forward declaration */
94static int hugetlb_acct_memory(struct hstate *h, long delta);
95static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
96static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
97static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
98static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
99 unsigned long start, unsigned long end);
100static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
101
102static inline bool subpool_is_free(struct hugepage_subpool *spool)
103{
104 if (spool->count)
105 return false;
106 if (spool->max_hpages != -1)
107 return spool->used_hpages == 0;
108 if (spool->min_hpages != -1)
109 return spool->rsv_hpages == spool->min_hpages;
110
111 return true;
112}
113
114static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
115 unsigned long irq_flags)
116{
117 spin_unlock_irqrestore(&spool->lock, irq_flags);
118
119 /* If no pages are used, and no other handles to the subpool
120 * remain, give up any reservations based on minimum size and
121 * free the subpool */
122 if (subpool_is_free(spool)) {
123 if (spool->min_hpages != -1)
124 hugetlb_acct_memory(spool->hstate,
125 -spool->min_hpages);
126 kfree(spool);
127 }
128}
129
130struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
131 long min_hpages)
132{
133 struct hugepage_subpool *spool;
134
135 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
136 if (!spool)
137 return NULL;
138
139 spin_lock_init(&spool->lock);
140 spool->count = 1;
141 spool->max_hpages = max_hpages;
142 spool->hstate = h;
143 spool->min_hpages = min_hpages;
144
145 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
146 kfree(spool);
147 return NULL;
148 }
149 spool->rsv_hpages = min_hpages;
150
151 return spool;
152}
153
154void hugepage_put_subpool(struct hugepage_subpool *spool)
155{
156 unsigned long flags;
157
158 spin_lock_irqsave(&spool->lock, flags);
159 BUG_ON(!spool->count);
160 spool->count--;
161 unlock_or_release_subpool(spool, flags);
162}
163
164/*
165 * Subpool accounting for allocating and reserving pages.
166 * Return -ENOMEM if there are not enough resources to satisfy the
167 * request. Otherwise, return the number of pages by which the
168 * global pools must be adjusted (upward). The returned value may
169 * only be different than the passed value (delta) in the case where
170 * a subpool minimum size must be maintained.
171 */
172static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
173 long delta)
174{
175 long ret = delta;
176
177 if (!spool)
178 return ret;
179
180 spin_lock_irq(&spool->lock);
181
182 if (spool->max_hpages != -1) { /* maximum size accounting */
183 if ((spool->used_hpages + delta) <= spool->max_hpages)
184 spool->used_hpages += delta;
185 else {
186 ret = -ENOMEM;
187 goto unlock_ret;
188 }
189 }
190
191 /* minimum size accounting */
192 if (spool->min_hpages != -1 && spool->rsv_hpages) {
193 if (delta > spool->rsv_hpages) {
194 /*
195 * Asking for more reserves than those already taken on
196 * behalf of subpool. Return difference.
197 */
198 ret = delta - spool->rsv_hpages;
199 spool->rsv_hpages = 0;
200 } else {
201 ret = 0; /* reserves already accounted for */
202 spool->rsv_hpages -= delta;
203 }
204 }
205
206unlock_ret:
207 spin_unlock_irq(&spool->lock);
208 return ret;
209}
210
211/*
212 * Subpool accounting for freeing and unreserving pages.
213 * Return the number of global page reservations that must be dropped.
214 * The return value may only be different than the passed value (delta)
215 * in the case where a subpool minimum size must be maintained.
216 */
217static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
218 long delta)
219{
220 long ret = delta;
221 unsigned long flags;
222
223 if (!spool)
224 return delta;
225
226 spin_lock_irqsave(&spool->lock, flags);
227
228 if (spool->max_hpages != -1) /* maximum size accounting */
229 spool->used_hpages -= delta;
230
231 /* minimum size accounting */
232 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
233 if (spool->rsv_hpages + delta <= spool->min_hpages)
234 ret = 0;
235 else
236 ret = spool->rsv_hpages + delta - spool->min_hpages;
237
238 spool->rsv_hpages += delta;
239 if (spool->rsv_hpages > spool->min_hpages)
240 spool->rsv_hpages = spool->min_hpages;
241 }
242
243 /*
244 * If hugetlbfs_put_super couldn't free spool due to an outstanding
245 * quota reference, free it now.
246 */
247 unlock_or_release_subpool(spool, flags);
248
249 return ret;
250}
251
252static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
253{
254 return HUGETLBFS_SB(inode->i_sb)->spool;
255}
256
257static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
258{
259 return subpool_inode(file_inode(vma->vm_file));
260}
261
262/*
263 * hugetlb vma_lock helper routines
264 */
265void hugetlb_vma_lock_read(struct vm_area_struct *vma)
266{
267 if (__vma_shareable_lock(vma)) {
268 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
269
270 down_read(&vma_lock->rw_sema);
271 } else if (__vma_private_lock(vma)) {
272 struct resv_map *resv_map = vma_resv_map(vma);
273
274 down_read(&resv_map->rw_sema);
275 }
276}
277
278void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
279{
280 if (__vma_shareable_lock(vma)) {
281 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
282
283 up_read(&vma_lock->rw_sema);
284 } else if (__vma_private_lock(vma)) {
285 struct resv_map *resv_map = vma_resv_map(vma);
286
287 up_read(&resv_map->rw_sema);
288 }
289}
290
291void hugetlb_vma_lock_write(struct vm_area_struct *vma)
292{
293 if (__vma_shareable_lock(vma)) {
294 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
295
296 down_write(&vma_lock->rw_sema);
297 } else if (__vma_private_lock(vma)) {
298 struct resv_map *resv_map = vma_resv_map(vma);
299
300 down_write(&resv_map->rw_sema);
301 }
302}
303
304void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
305{
306 if (__vma_shareable_lock(vma)) {
307 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
308
309 up_write(&vma_lock->rw_sema);
310 } else if (__vma_private_lock(vma)) {
311 struct resv_map *resv_map = vma_resv_map(vma);
312
313 up_write(&resv_map->rw_sema);
314 }
315}
316
317int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
318{
319
320 if (__vma_shareable_lock(vma)) {
321 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
322
323 return down_write_trylock(&vma_lock->rw_sema);
324 } else if (__vma_private_lock(vma)) {
325 struct resv_map *resv_map = vma_resv_map(vma);
326
327 return down_write_trylock(&resv_map->rw_sema);
328 }
329
330 return 1;
331}
332
333void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
334{
335 if (__vma_shareable_lock(vma)) {
336 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
337
338 lockdep_assert_held(&vma_lock->rw_sema);
339 } else if (__vma_private_lock(vma)) {
340 struct resv_map *resv_map = vma_resv_map(vma);
341
342 lockdep_assert_held(&resv_map->rw_sema);
343 }
344}
345
346void hugetlb_vma_lock_release(struct kref *kref)
347{
348 struct hugetlb_vma_lock *vma_lock = container_of(kref,
349 struct hugetlb_vma_lock, refs);
350
351 kfree(vma_lock);
352}
353
354static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
355{
356 struct vm_area_struct *vma = vma_lock->vma;
357
358 /*
359 * vma_lock structure may or not be released as a result of put,
360 * it certainly will no longer be attached to vma so clear pointer.
361 * Semaphore synchronizes access to vma_lock->vma field.
362 */
363 vma_lock->vma = NULL;
364 vma->vm_private_data = NULL;
365 up_write(&vma_lock->rw_sema);
366 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
367}
368
369static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
370{
371 if (__vma_shareable_lock(vma)) {
372 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
373
374 __hugetlb_vma_unlock_write_put(vma_lock);
375 } else if (__vma_private_lock(vma)) {
376 struct resv_map *resv_map = vma_resv_map(vma);
377
378 /* no free for anon vmas, but still need to unlock */
379 up_write(&resv_map->rw_sema);
380 }
381}
382
383static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
384{
385 /*
386 * Only present in sharable vmas.
387 */
388 if (!vma || !__vma_shareable_lock(vma))
389 return;
390
391 if (vma->vm_private_data) {
392 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
393
394 down_write(&vma_lock->rw_sema);
395 __hugetlb_vma_unlock_write_put(vma_lock);
396 }
397}
398
399static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
400{
401 struct hugetlb_vma_lock *vma_lock;
402
403 /* Only establish in (flags) sharable vmas */
404 if (!vma || !(vma->vm_flags & VM_MAYSHARE))
405 return;
406
407 /* Should never get here with non-NULL vm_private_data */
408 if (vma->vm_private_data)
409 return;
410
411 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
412 if (!vma_lock) {
413 /*
414 * If we can not allocate structure, then vma can not
415 * participate in pmd sharing. This is only a possible
416 * performance enhancement and memory saving issue.
417 * However, the lock is also used to synchronize page
418 * faults with truncation. If the lock is not present,
419 * unlikely races could leave pages in a file past i_size
420 * until the file is removed. Warn in the unlikely case of
421 * allocation failure.
422 */
423 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
424 return;
425 }
426
427 kref_init(&vma_lock->refs);
428 init_rwsem(&vma_lock->rw_sema);
429 vma_lock->vma = vma;
430 vma->vm_private_data = vma_lock;
431}
432
433/* Helper that removes a struct file_region from the resv_map cache and returns
434 * it for use.
435 */
436static struct file_region *
437get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
438{
439 struct file_region *nrg;
440
441 VM_BUG_ON(resv->region_cache_count <= 0);
442
443 resv->region_cache_count--;
444 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
445 list_del(&nrg->link);
446
447 nrg->from = from;
448 nrg->to = to;
449
450 return nrg;
451}
452
453static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
454 struct file_region *rg)
455{
456#ifdef CONFIG_CGROUP_HUGETLB
457 nrg->reservation_counter = rg->reservation_counter;
458 nrg->css = rg->css;
459 if (rg->css)
460 css_get(rg->css);
461#endif
462}
463
464/* Helper that records hugetlb_cgroup uncharge info. */
465static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
466 struct hstate *h,
467 struct resv_map *resv,
468 struct file_region *nrg)
469{
470#ifdef CONFIG_CGROUP_HUGETLB
471 if (h_cg) {
472 nrg->reservation_counter =
473 &h_cg->rsvd_hugepage[hstate_index(h)];
474 nrg->css = &h_cg->css;
475 /*
476 * The caller will hold exactly one h_cg->css reference for the
477 * whole contiguous reservation region. But this area might be
478 * scattered when there are already some file_regions reside in
479 * it. As a result, many file_regions may share only one css
480 * reference. In order to ensure that one file_region must hold
481 * exactly one h_cg->css reference, we should do css_get for
482 * each file_region and leave the reference held by caller
483 * untouched.
484 */
485 css_get(&h_cg->css);
486 if (!resv->pages_per_hpage)
487 resv->pages_per_hpage = pages_per_huge_page(h);
488 /* pages_per_hpage should be the same for all entries in
489 * a resv_map.
490 */
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
492 } else {
493 nrg->reservation_counter = NULL;
494 nrg->css = NULL;
495 }
496#endif
497}
498
499static void put_uncharge_info(struct file_region *rg)
500{
501#ifdef CONFIG_CGROUP_HUGETLB
502 if (rg->css)
503 css_put(rg->css);
504#endif
505}
506
507static bool has_same_uncharge_info(struct file_region *rg,
508 struct file_region *org)
509{
510#ifdef CONFIG_CGROUP_HUGETLB
511 return rg->reservation_counter == org->reservation_counter &&
512 rg->css == org->css;
513
514#else
515 return true;
516#endif
517}
518
519static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
520{
521 struct file_region *nrg, *prg;
522
523 prg = list_prev_entry(rg, link);
524 if (&prg->link != &resv->regions && prg->to == rg->from &&
525 has_same_uncharge_info(prg, rg)) {
526 prg->to = rg->to;
527
528 list_del(&rg->link);
529 put_uncharge_info(rg);
530 kfree(rg);
531
532 rg = prg;
533 }
534
535 nrg = list_next_entry(rg, link);
536 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
537 has_same_uncharge_info(nrg, rg)) {
538 nrg->from = rg->from;
539
540 list_del(&rg->link);
541 put_uncharge_info(rg);
542 kfree(rg);
543 }
544}
545
546static inline long
547hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
548 long to, struct hstate *h, struct hugetlb_cgroup *cg,
549 long *regions_needed)
550{
551 struct file_region *nrg;
552
553 if (!regions_needed) {
554 nrg = get_file_region_entry_from_cache(map, from, to);
555 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
556 list_add(&nrg->link, rg);
557 coalesce_file_region(map, nrg);
558 } else
559 *regions_needed += 1;
560
561 return to - from;
562}
563
564/*
565 * Must be called with resv->lock held.
566 *
567 * Calling this with regions_needed != NULL will count the number of pages
568 * to be added but will not modify the linked list. And regions_needed will
569 * indicate the number of file_regions needed in the cache to carry out to add
570 * the regions for this range.
571 */
572static long add_reservation_in_range(struct resv_map *resv, long f, long t,
573 struct hugetlb_cgroup *h_cg,
574 struct hstate *h, long *regions_needed)
575{
576 long add = 0;
577 struct list_head *head = &resv->regions;
578 long last_accounted_offset = f;
579 struct file_region *iter, *trg = NULL;
580 struct list_head *rg = NULL;
581
582 if (regions_needed)
583 *regions_needed = 0;
584
585 /* In this loop, we essentially handle an entry for the range
586 * [last_accounted_offset, iter->from), at every iteration, with some
587 * bounds checking.
588 */
589 list_for_each_entry_safe(iter, trg, head, link) {
590 /* Skip irrelevant regions that start before our range. */
591 if (iter->from < f) {
592 /* If this region ends after the last accounted offset,
593 * then we need to update last_accounted_offset.
594 */
595 if (iter->to > last_accounted_offset)
596 last_accounted_offset = iter->to;
597 continue;
598 }
599
600 /* When we find a region that starts beyond our range, we've
601 * finished.
602 */
603 if (iter->from >= t) {
604 rg = iter->link.prev;
605 break;
606 }
607
608 /* Add an entry for last_accounted_offset -> iter->from, and
609 * update last_accounted_offset.
610 */
611 if (iter->from > last_accounted_offset)
612 add += hugetlb_resv_map_add(resv, iter->link.prev,
613 last_accounted_offset,
614 iter->from, h, h_cg,
615 regions_needed);
616
617 last_accounted_offset = iter->to;
618 }
619
620 /* Handle the case where our range extends beyond
621 * last_accounted_offset.
622 */
623 if (!rg)
624 rg = head->prev;
625 if (last_accounted_offset < t)
626 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
627 t, h, h_cg, regions_needed);
628
629 return add;
630}
631
632/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
633 */
634static int allocate_file_region_entries(struct resv_map *resv,
635 int regions_needed)
636 __must_hold(&resv->lock)
637{
638 LIST_HEAD(allocated_regions);
639 int to_allocate = 0, i = 0;
640 struct file_region *trg = NULL, *rg = NULL;
641
642 VM_BUG_ON(regions_needed < 0);
643
644 /*
645 * Check for sufficient descriptors in the cache to accommodate
646 * the number of in progress add operations plus regions_needed.
647 *
648 * This is a while loop because when we drop the lock, some other call
649 * to region_add or region_del may have consumed some region_entries,
650 * so we keep looping here until we finally have enough entries for
651 * (adds_in_progress + regions_needed).
652 */
653 while (resv->region_cache_count <
654 (resv->adds_in_progress + regions_needed)) {
655 to_allocate = resv->adds_in_progress + regions_needed -
656 resv->region_cache_count;
657
658 /* At this point, we should have enough entries in the cache
659 * for all the existing adds_in_progress. We should only be
660 * needing to allocate for regions_needed.
661 */
662 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
663
664 spin_unlock(&resv->lock);
665 for (i = 0; i < to_allocate; i++) {
666 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
667 if (!trg)
668 goto out_of_memory;
669 list_add(&trg->link, &allocated_regions);
670 }
671
672 spin_lock(&resv->lock);
673
674 list_splice(&allocated_regions, &resv->region_cache);
675 resv->region_cache_count += to_allocate;
676 }
677
678 return 0;
679
680out_of_memory:
681 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
682 list_del(&rg->link);
683 kfree(rg);
684 }
685 return -ENOMEM;
686}
687
688/*
689 * Add the huge page range represented by [f, t) to the reserve
690 * map. Regions will be taken from the cache to fill in this range.
691 * Sufficient regions should exist in the cache due to the previous
692 * call to region_chg with the same range, but in some cases the cache will not
693 * have sufficient entries due to races with other code doing region_add or
694 * region_del. The extra needed entries will be allocated.
695 *
696 * regions_needed is the out value provided by a previous call to region_chg.
697 *
698 * Return the number of new huge pages added to the map. This number is greater
699 * than or equal to zero. If file_region entries needed to be allocated for
700 * this operation and we were not able to allocate, it returns -ENOMEM.
701 * region_add of regions of length 1 never allocate file_regions and cannot
702 * fail; region_chg will always allocate at least 1 entry and a region_add for
703 * 1 page will only require at most 1 entry.
704 */
705static long region_add(struct resv_map *resv, long f, long t,
706 long in_regions_needed, struct hstate *h,
707 struct hugetlb_cgroup *h_cg)
708{
709 long add = 0, actual_regions_needed = 0;
710
711 spin_lock(&resv->lock);
712retry:
713
714 /* Count how many regions are actually needed to execute this add. */
715 add_reservation_in_range(resv, f, t, NULL, NULL,
716 &actual_regions_needed);
717
718 /*
719 * Check for sufficient descriptors in the cache to accommodate
720 * this add operation. Note that actual_regions_needed may be greater
721 * than in_regions_needed, as the resv_map may have been modified since
722 * the region_chg call. In this case, we need to make sure that we
723 * allocate extra entries, such that we have enough for all the
724 * existing adds_in_progress, plus the excess needed for this
725 * operation.
726 */
727 if (actual_regions_needed > in_regions_needed &&
728 resv->region_cache_count <
729 resv->adds_in_progress +
730 (actual_regions_needed - in_regions_needed)) {
731 /* region_add operation of range 1 should never need to
732 * allocate file_region entries.
733 */
734 VM_BUG_ON(t - f <= 1);
735
736 if (allocate_file_region_entries(
737 resv, actual_regions_needed - in_regions_needed)) {
738 return -ENOMEM;
739 }
740
741 goto retry;
742 }
743
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
745
746 resv->adds_in_progress -= in_regions_needed;
747
748 spin_unlock(&resv->lock);
749 return add;
750}
751
752/*
753 * Examine the existing reserve map and determine how many
754 * huge pages in the specified range [f, t) are NOT currently
755 * represented. This routine is called before a subsequent
756 * call to region_add that will actually modify the reserve
757 * map to add the specified range [f, t). region_chg does
758 * not change the number of huge pages represented by the
759 * map. A number of new file_region structures is added to the cache as a
760 * placeholder, for the subsequent region_add call to use. At least 1
761 * file_region structure is added.
762 *
763 * out_regions_needed is the number of regions added to the
764 * resv->adds_in_progress. This value needs to be provided to a follow up call
765 * to region_add or region_abort for proper accounting.
766 *
767 * Returns the number of huge pages that need to be added to the existing
768 * reservation map for the range [f, t). This number is greater or equal to
769 * zero. -ENOMEM is returned if a new file_region structure or cache entry
770 * is needed and can not be allocated.
771 */
772static long region_chg(struct resv_map *resv, long f, long t,
773 long *out_regions_needed)
774{
775 long chg = 0;
776
777 spin_lock(&resv->lock);
778
779 /* Count how many hugepages in this range are NOT represented. */
780 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
781 out_regions_needed);
782
783 if (*out_regions_needed == 0)
784 *out_regions_needed = 1;
785
786 if (allocate_file_region_entries(resv, *out_regions_needed))
787 return -ENOMEM;
788
789 resv->adds_in_progress += *out_regions_needed;
790
791 spin_unlock(&resv->lock);
792 return chg;
793}
794
795/*
796 * Abort the in progress add operation. The adds_in_progress field
797 * of the resv_map keeps track of the operations in progress between
798 * calls to region_chg and region_add. Operations are sometimes
799 * aborted after the call to region_chg. In such cases, region_abort
800 * is called to decrement the adds_in_progress counter. regions_needed
801 * is the value returned by the region_chg call, it is used to decrement
802 * the adds_in_progress counter.
803 *
804 * NOTE: The range arguments [f, t) are not needed or used in this
805 * routine. They are kept to make reading the calling code easier as
806 * arguments will match the associated region_chg call.
807 */
808static void region_abort(struct resv_map *resv, long f, long t,
809 long regions_needed)
810{
811 spin_lock(&resv->lock);
812 VM_BUG_ON(!resv->region_cache_count);
813 resv->adds_in_progress -= regions_needed;
814 spin_unlock(&resv->lock);
815}
816
817/*
818 * Delete the specified range [f, t) from the reserve map. If the
819 * t parameter is LONG_MAX, this indicates that ALL regions after f
820 * should be deleted. Locate the regions which intersect [f, t)
821 * and either trim, delete or split the existing regions.
822 *
823 * Returns the number of huge pages deleted from the reserve map.
824 * In the normal case, the return value is zero or more. In the
825 * case where a region must be split, a new region descriptor must
826 * be allocated. If the allocation fails, -ENOMEM will be returned.
827 * NOTE: If the parameter t == LONG_MAX, then we will never split
828 * a region and possibly return -ENOMEM. Callers specifying
829 * t == LONG_MAX do not need to check for -ENOMEM error.
830 */
831static long region_del(struct resv_map *resv, long f, long t)
832{
833 struct list_head *head = &resv->regions;
834 struct file_region *rg, *trg;
835 struct file_region *nrg = NULL;
836 long del = 0;
837
838retry:
839 spin_lock(&resv->lock);
840 list_for_each_entry_safe(rg, trg, head, link) {
841 /*
842 * Skip regions before the range to be deleted. file_region
843 * ranges are normally of the form [from, to). However, there
844 * may be a "placeholder" entry in the map which is of the form
845 * (from, to) with from == to. Check for placeholder entries
846 * at the beginning of the range to be deleted.
847 */
848 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
849 continue;
850
851 if (rg->from >= t)
852 break;
853
854 if (f > rg->from && t < rg->to) { /* Must split region */
855 /*
856 * Check for an entry in the cache before dropping
857 * lock and attempting allocation.
858 */
859 if (!nrg &&
860 resv->region_cache_count > resv->adds_in_progress) {
861 nrg = list_first_entry(&resv->region_cache,
862 struct file_region,
863 link);
864 list_del(&nrg->link);
865 resv->region_cache_count--;
866 }
867
868 if (!nrg) {
869 spin_unlock(&resv->lock);
870 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
871 if (!nrg)
872 return -ENOMEM;
873 goto retry;
874 }
875
876 del += t - f;
877 hugetlb_cgroup_uncharge_file_region(
878 resv, rg, t - f, false);
879
880 /* New entry for end of split region */
881 nrg->from = t;
882 nrg->to = rg->to;
883
884 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
885
886 INIT_LIST_HEAD(&nrg->link);
887
888 /* Original entry is trimmed */
889 rg->to = f;
890
891 list_add(&nrg->link, &rg->link);
892 nrg = NULL;
893 break;
894 }
895
896 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
897 del += rg->to - rg->from;
898 hugetlb_cgroup_uncharge_file_region(resv, rg,
899 rg->to - rg->from, true);
900 list_del(&rg->link);
901 kfree(rg);
902 continue;
903 }
904
905 if (f <= rg->from) { /* Trim beginning of region */
906 hugetlb_cgroup_uncharge_file_region(resv, rg,
907 t - rg->from, false);
908
909 del += t - rg->from;
910 rg->from = t;
911 } else { /* Trim end of region */
912 hugetlb_cgroup_uncharge_file_region(resv, rg,
913 rg->to - f, false);
914
915 del += rg->to - f;
916 rg->to = f;
917 }
918 }
919
920 spin_unlock(&resv->lock);
921 kfree(nrg);
922 return del;
923}
924
925/*
926 * A rare out of memory error was encountered which prevented removal of
927 * the reserve map region for a page. The huge page itself was free'ed
928 * and removed from the page cache. This routine will adjust the subpool
929 * usage count, and the global reserve count if needed. By incrementing
930 * these counts, the reserve map entry which could not be deleted will
931 * appear as a "reserved" entry instead of simply dangling with incorrect
932 * counts.
933 */
934void hugetlb_fix_reserve_counts(struct inode *inode)
935{
936 struct hugepage_subpool *spool = subpool_inode(inode);
937 long rsv_adjust;
938 bool reserved = false;
939
940 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
941 if (rsv_adjust > 0) {
942 struct hstate *h = hstate_inode(inode);
943
944 if (!hugetlb_acct_memory(h, 1))
945 reserved = true;
946 } else if (!rsv_adjust) {
947 reserved = true;
948 }
949
950 if (!reserved)
951 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
952}
953
954/*
955 * Count and return the number of huge pages in the reserve map
956 * that intersect with the range [f, t).
957 */
958static long region_count(struct resv_map *resv, long f, long t)
959{
960 struct list_head *head = &resv->regions;
961 struct file_region *rg;
962 long chg = 0;
963
964 spin_lock(&resv->lock);
965 /* Locate each segment we overlap with, and count that overlap. */
966 list_for_each_entry(rg, head, link) {
967 long seg_from;
968 long seg_to;
969
970 if (rg->to <= f)
971 continue;
972 if (rg->from >= t)
973 break;
974
975 seg_from = max(rg->from, f);
976 seg_to = min(rg->to, t);
977
978 chg += seg_to - seg_from;
979 }
980 spin_unlock(&resv->lock);
981
982 return chg;
983}
984
985/*
986 * Convert the address within this vma to the page offset within
987 * the mapping, huge page units here.
988 */
989static pgoff_t vma_hugecache_offset(struct hstate *h,
990 struct vm_area_struct *vma, unsigned long address)
991{
992 return ((address - vma->vm_start) >> huge_page_shift(h)) +
993 (vma->vm_pgoff >> huge_page_order(h));
994}
995
996/**
997 * vma_kernel_pagesize - Page size granularity for this VMA.
998 * @vma: The user mapping.
999 *
1000 * Folios in this VMA will be aligned to, and at least the size of the
1001 * number of bytes returned by this function.
1002 *
1003 * Return: The default size of the folios allocated when backing a VMA.
1004 */
1005unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1006{
1007 if (vma->vm_ops && vma->vm_ops->pagesize)
1008 return vma->vm_ops->pagesize(vma);
1009 return PAGE_SIZE;
1010}
1011EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1012
1013/*
1014 * Return the page size being used by the MMU to back a VMA. In the majority
1015 * of cases, the page size used by the kernel matches the MMU size. On
1016 * architectures where it differs, an architecture-specific 'strong'
1017 * version of this symbol is required.
1018 */
1019__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1020{
1021 return vma_kernel_pagesize(vma);
1022}
1023
1024/*
1025 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1026 * bits of the reservation map pointer, which are always clear due to
1027 * alignment.
1028 */
1029#define HPAGE_RESV_OWNER (1UL << 0)
1030#define HPAGE_RESV_UNMAPPED (1UL << 1)
1031#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1032
1033/*
1034 * These helpers are used to track how many pages are reserved for
1035 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1036 * is guaranteed to have their future faults succeed.
1037 *
1038 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1039 * the reserve counters are updated with the hugetlb_lock held. It is safe
1040 * to reset the VMA at fork() time as it is not in use yet and there is no
1041 * chance of the global counters getting corrupted as a result of the values.
1042 *
1043 * The private mapping reservation is represented in a subtly different
1044 * manner to a shared mapping. A shared mapping has a region map associated
1045 * with the underlying file, this region map represents the backing file
1046 * pages which have ever had a reservation assigned which this persists even
1047 * after the page is instantiated. A private mapping has a region map
1048 * associated with the original mmap which is attached to all VMAs which
1049 * reference it, this region map represents those offsets which have consumed
1050 * reservation ie. where pages have been instantiated.
1051 */
1052static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1053{
1054 return (unsigned long)vma->vm_private_data;
1055}
1056
1057static void set_vma_private_data(struct vm_area_struct *vma,
1058 unsigned long value)
1059{
1060 vma->vm_private_data = (void *)value;
1061}
1062
1063static void
1064resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1065 struct hugetlb_cgroup *h_cg,
1066 struct hstate *h)
1067{
1068#ifdef CONFIG_CGROUP_HUGETLB
1069 if (!h_cg || !h) {
1070 resv_map->reservation_counter = NULL;
1071 resv_map->pages_per_hpage = 0;
1072 resv_map->css = NULL;
1073 } else {
1074 resv_map->reservation_counter =
1075 &h_cg->rsvd_hugepage[hstate_index(h)];
1076 resv_map->pages_per_hpage = pages_per_huge_page(h);
1077 resv_map->css = &h_cg->css;
1078 }
1079#endif
1080}
1081
1082struct resv_map *resv_map_alloc(void)
1083{
1084 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1085 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1086
1087 if (!resv_map || !rg) {
1088 kfree(resv_map);
1089 kfree(rg);
1090 return NULL;
1091 }
1092
1093 kref_init(&resv_map->refs);
1094 spin_lock_init(&resv_map->lock);
1095 INIT_LIST_HEAD(&resv_map->regions);
1096 init_rwsem(&resv_map->rw_sema);
1097
1098 resv_map->adds_in_progress = 0;
1099 /*
1100 * Initialize these to 0. On shared mappings, 0's here indicate these
1101 * fields don't do cgroup accounting. On private mappings, these will be
1102 * re-initialized to the proper values, to indicate that hugetlb cgroup
1103 * reservations are to be un-charged from here.
1104 */
1105 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1106
1107 INIT_LIST_HEAD(&resv_map->region_cache);
1108 list_add(&rg->link, &resv_map->region_cache);
1109 resv_map->region_cache_count = 1;
1110
1111 return resv_map;
1112}
1113
1114void resv_map_release(struct kref *ref)
1115{
1116 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1117 struct list_head *head = &resv_map->region_cache;
1118 struct file_region *rg, *trg;
1119
1120 /* Clear out any active regions before we release the map. */
1121 region_del(resv_map, 0, LONG_MAX);
1122
1123 /* ... and any entries left in the cache */
1124 list_for_each_entry_safe(rg, trg, head, link) {
1125 list_del(&rg->link);
1126 kfree(rg);
1127 }
1128
1129 VM_BUG_ON(resv_map->adds_in_progress);
1130
1131 kfree(resv_map);
1132}
1133
1134static inline struct resv_map *inode_resv_map(struct inode *inode)
1135{
1136 /*
1137 * At inode evict time, i_mapping may not point to the original
1138 * address space within the inode. This original address space
1139 * contains the pointer to the resv_map. So, always use the
1140 * address space embedded within the inode.
1141 * The VERY common case is inode->mapping == &inode->i_data but,
1142 * this may not be true for device special inodes.
1143 */
1144 return (struct resv_map *)(&inode->i_data)->i_private_data;
1145}
1146
1147static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1148{
1149 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1150 if (vma->vm_flags & VM_MAYSHARE) {
1151 struct address_space *mapping = vma->vm_file->f_mapping;
1152 struct inode *inode = mapping->host;
1153
1154 return inode_resv_map(inode);
1155
1156 } else {
1157 return (struct resv_map *)(get_vma_private_data(vma) &
1158 ~HPAGE_RESV_MASK);
1159 }
1160}
1161
1162static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1163{
1164 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1165 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1166
1167 set_vma_private_data(vma, (unsigned long)map);
1168}
1169
1170static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1171{
1172 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1173 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1174
1175 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1176}
1177
1178static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1179{
1180 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1181
1182 return (get_vma_private_data(vma) & flag) != 0;
1183}
1184
1185bool __vma_private_lock(struct vm_area_struct *vma)
1186{
1187 return !(vma->vm_flags & VM_MAYSHARE) &&
1188 get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1189 is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1190}
1191
1192void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1193{
1194 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1195 /*
1196 * Clear vm_private_data
1197 * - For shared mappings this is a per-vma semaphore that may be
1198 * allocated in a subsequent call to hugetlb_vm_op_open.
1199 * Before clearing, make sure pointer is not associated with vma
1200 * as this will leak the structure. This is the case when called
1201 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1202 * been called to allocate a new structure.
1203 * - For MAP_PRIVATE mappings, this is the reserve map which does
1204 * not apply to children. Faults generated by the children are
1205 * not guaranteed to succeed, even if read-only.
1206 */
1207 if (vma->vm_flags & VM_MAYSHARE) {
1208 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1209
1210 if (vma_lock && vma_lock->vma != vma)
1211 vma->vm_private_data = NULL;
1212 } else
1213 vma->vm_private_data = NULL;
1214}
1215
1216/*
1217 * Reset and decrement one ref on hugepage private reservation.
1218 * Called with mm->mmap_lock writer semaphore held.
1219 * This function should be only used by move_vma() and operate on
1220 * same sized vma. It should never come here with last ref on the
1221 * reservation.
1222 */
1223void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1224{
1225 /*
1226 * Clear the old hugetlb private page reservation.
1227 * It has already been transferred to new_vma.
1228 *
1229 * During a mremap() operation of a hugetlb vma we call move_vma()
1230 * which copies vma into new_vma and unmaps vma. After the copy
1231 * operation both new_vma and vma share a reference to the resv_map
1232 * struct, and at that point vma is about to be unmapped. We don't
1233 * want to return the reservation to the pool at unmap of vma because
1234 * the reservation still lives on in new_vma, so simply decrement the
1235 * ref here and remove the resv_map reference from this vma.
1236 */
1237 struct resv_map *reservations = vma_resv_map(vma);
1238
1239 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1240 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1241 kref_put(&reservations->refs, resv_map_release);
1242 }
1243
1244 hugetlb_dup_vma_private(vma);
1245}
1246
1247/* Returns true if the VMA has associated reserve pages */
1248static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1249{
1250 if (vma->vm_flags & VM_NORESERVE) {
1251 /*
1252 * This address is already reserved by other process(chg == 0),
1253 * so, we should decrement reserved count. Without decrementing,
1254 * reserve count remains after releasing inode, because this
1255 * allocated page will go into page cache and is regarded as
1256 * coming from reserved pool in releasing step. Currently, we
1257 * don't have any other solution to deal with this situation
1258 * properly, so add work-around here.
1259 */
1260 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1261 return true;
1262 else
1263 return false;
1264 }
1265
1266 /* Shared mappings always use reserves */
1267 if (vma->vm_flags & VM_MAYSHARE) {
1268 /*
1269 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1270 * be a region map for all pages. The only situation where
1271 * there is no region map is if a hole was punched via
1272 * fallocate. In this case, there really are no reserves to
1273 * use. This situation is indicated if chg != 0.
1274 */
1275 if (chg)
1276 return false;
1277 else
1278 return true;
1279 }
1280
1281 /*
1282 * Only the process that called mmap() has reserves for
1283 * private mappings.
1284 */
1285 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1286 /*
1287 * Like the shared case above, a hole punch or truncate
1288 * could have been performed on the private mapping.
1289 * Examine the value of chg to determine if reserves
1290 * actually exist or were previously consumed.
1291 * Very Subtle - The value of chg comes from a previous
1292 * call to vma_needs_reserves(). The reserve map for
1293 * private mappings has different (opposite) semantics
1294 * than that of shared mappings. vma_needs_reserves()
1295 * has already taken this difference in semantics into
1296 * account. Therefore, the meaning of chg is the same
1297 * as in the shared case above. Code could easily be
1298 * combined, but keeping it separate draws attention to
1299 * subtle differences.
1300 */
1301 if (chg)
1302 return false;
1303 else
1304 return true;
1305 }
1306
1307 return false;
1308}
1309
1310static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1311{
1312 int nid = folio_nid(folio);
1313
1314 lockdep_assert_held(&hugetlb_lock);
1315 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1316
1317 list_move(&folio->lru, &h->hugepage_freelists[nid]);
1318 h->free_huge_pages++;
1319 h->free_huge_pages_node[nid]++;
1320 folio_set_hugetlb_freed(folio);
1321}
1322
1323static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1324 int nid)
1325{
1326 struct folio *folio;
1327 bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1328
1329 lockdep_assert_held(&hugetlb_lock);
1330 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1331 if (pin && !folio_is_longterm_pinnable(folio))
1332 continue;
1333
1334 if (folio_test_hwpoison(folio))
1335 continue;
1336
1337 list_move(&folio->lru, &h->hugepage_activelist);
1338 folio_ref_unfreeze(folio, 1);
1339 folio_clear_hugetlb_freed(folio);
1340 h->free_huge_pages--;
1341 h->free_huge_pages_node[nid]--;
1342 return folio;
1343 }
1344
1345 return NULL;
1346}
1347
1348static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1349 int nid, nodemask_t *nmask)
1350{
1351 unsigned int cpuset_mems_cookie;
1352 struct zonelist *zonelist;
1353 struct zone *zone;
1354 struct zoneref *z;
1355 int node = NUMA_NO_NODE;
1356
1357 zonelist = node_zonelist(nid, gfp_mask);
1358
1359retry_cpuset:
1360 cpuset_mems_cookie = read_mems_allowed_begin();
1361 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1362 struct folio *folio;
1363
1364 if (!cpuset_zone_allowed(zone, gfp_mask))
1365 continue;
1366 /*
1367 * no need to ask again on the same node. Pool is node rather than
1368 * zone aware
1369 */
1370 if (zone_to_nid(zone) == node)
1371 continue;
1372 node = zone_to_nid(zone);
1373
1374 folio = dequeue_hugetlb_folio_node_exact(h, node);
1375 if (folio)
1376 return folio;
1377 }
1378 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1379 goto retry_cpuset;
1380
1381 return NULL;
1382}
1383
1384static unsigned long available_huge_pages(struct hstate *h)
1385{
1386 return h->free_huge_pages - h->resv_huge_pages;
1387}
1388
1389static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1390 struct vm_area_struct *vma,
1391 unsigned long address, int avoid_reserve,
1392 long chg)
1393{
1394 struct folio *folio = NULL;
1395 struct mempolicy *mpol;
1396 gfp_t gfp_mask;
1397 nodemask_t *nodemask;
1398 int nid;
1399
1400 /*
1401 * A child process with MAP_PRIVATE mappings created by their parent
1402 * have no page reserves. This check ensures that reservations are
1403 * not "stolen". The child may still get SIGKILLed
1404 */
1405 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1406 goto err;
1407
1408 /* If reserves cannot be used, ensure enough pages are in the pool */
1409 if (avoid_reserve && !available_huge_pages(h))
1410 goto err;
1411
1412 gfp_mask = htlb_alloc_mask(h);
1413 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1414
1415 if (mpol_is_preferred_many(mpol)) {
1416 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1417 nid, nodemask);
1418
1419 /* Fallback to all nodes if page==NULL */
1420 nodemask = NULL;
1421 }
1422
1423 if (!folio)
1424 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1425 nid, nodemask);
1426
1427 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1428 folio_set_hugetlb_restore_reserve(folio);
1429 h->resv_huge_pages--;
1430 }
1431
1432 mpol_cond_put(mpol);
1433 return folio;
1434
1435err:
1436 return NULL;
1437}
1438
1439/*
1440 * common helper functions for hstate_next_node_to_{alloc|free}.
1441 * We may have allocated or freed a huge page based on a different
1442 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1443 * be outside of *nodes_allowed. Ensure that we use an allowed
1444 * node for alloc or free.
1445 */
1446static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1447{
1448 nid = next_node_in(nid, *nodes_allowed);
1449 VM_BUG_ON(nid >= MAX_NUMNODES);
1450
1451 return nid;
1452}
1453
1454static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1455{
1456 if (!node_isset(nid, *nodes_allowed))
1457 nid = next_node_allowed(nid, nodes_allowed);
1458 return nid;
1459}
1460
1461/*
1462 * returns the previously saved node ["this node"] from which to
1463 * allocate a persistent huge page for the pool and advance the
1464 * next node from which to allocate, handling wrap at end of node
1465 * mask.
1466 */
1467static int hstate_next_node_to_alloc(struct hstate *h,
1468 nodemask_t *nodes_allowed)
1469{
1470 int nid;
1471
1472 VM_BUG_ON(!nodes_allowed);
1473
1474 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1475 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1476
1477 return nid;
1478}
1479
1480/*
1481 * helper for remove_pool_hugetlb_folio() - return the previously saved
1482 * node ["this node"] from which to free a huge page. Advance the
1483 * next node id whether or not we find a free huge page to free so
1484 * that the next attempt to free addresses the next node.
1485 */
1486static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1487{
1488 int nid;
1489
1490 VM_BUG_ON(!nodes_allowed);
1491
1492 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1493 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1494
1495 return nid;
1496}
1497
1498#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1499 for (nr_nodes = nodes_weight(*mask); \
1500 nr_nodes > 0 && \
1501 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1502 nr_nodes--)
1503
1504#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1505 for (nr_nodes = nodes_weight(*mask); \
1506 nr_nodes > 0 && \
1507 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1508 nr_nodes--)
1509
1510/* used to demote non-gigantic_huge pages as well */
1511static void __destroy_compound_gigantic_folio(struct folio *folio,
1512 unsigned int order, bool demote)
1513{
1514 int i;
1515 int nr_pages = 1 << order;
1516 struct page *p;
1517
1518 atomic_set(&folio->_entire_mapcount, 0);
1519 atomic_set(&folio->_nr_pages_mapped, 0);
1520 atomic_set(&folio->_pincount, 0);
1521
1522 for (i = 1; i < nr_pages; i++) {
1523 p = folio_page(folio, i);
1524 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
1525 p->mapping = NULL;
1526 clear_compound_head(p);
1527 if (!demote)
1528 set_page_refcounted(p);
1529 }
1530
1531 __folio_clear_head(folio);
1532}
1533
1534static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
1535 unsigned int order)
1536{
1537 __destroy_compound_gigantic_folio(folio, order, true);
1538}
1539
1540#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1541static void destroy_compound_gigantic_folio(struct folio *folio,
1542 unsigned int order)
1543{
1544 __destroy_compound_gigantic_folio(folio, order, false);
1545}
1546
1547static void free_gigantic_folio(struct folio *folio, unsigned int order)
1548{
1549 /*
1550 * If the page isn't allocated using the cma allocator,
1551 * cma_release() returns false.
1552 */
1553#ifdef CONFIG_CMA
1554 int nid = folio_nid(folio);
1555
1556 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1557 return;
1558#endif
1559
1560 free_contig_range(folio_pfn(folio), 1 << order);
1561}
1562
1563#ifdef CONFIG_CONTIG_ALLOC
1564static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1565 int nid, nodemask_t *nodemask)
1566{
1567 struct page *page;
1568 unsigned long nr_pages = pages_per_huge_page(h);
1569 if (nid == NUMA_NO_NODE)
1570 nid = numa_mem_id();
1571
1572#ifdef CONFIG_CMA
1573 {
1574 int node;
1575
1576 if (hugetlb_cma[nid]) {
1577 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1578 huge_page_order(h), true);
1579 if (page)
1580 return page_folio(page);
1581 }
1582
1583 if (!(gfp_mask & __GFP_THISNODE)) {
1584 for_each_node_mask(node, *nodemask) {
1585 if (node == nid || !hugetlb_cma[node])
1586 continue;
1587
1588 page = cma_alloc(hugetlb_cma[node], nr_pages,
1589 huge_page_order(h), true);
1590 if (page)
1591 return page_folio(page);
1592 }
1593 }
1594 }
1595#endif
1596
1597 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1598 return page ? page_folio(page) : NULL;
1599}
1600
1601#else /* !CONFIG_CONTIG_ALLOC */
1602static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1603 int nid, nodemask_t *nodemask)
1604{
1605 return NULL;
1606}
1607#endif /* CONFIG_CONTIG_ALLOC */
1608
1609#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1610static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1611 int nid, nodemask_t *nodemask)
1612{
1613 return NULL;
1614}
1615static inline void free_gigantic_folio(struct folio *folio,
1616 unsigned int order) { }
1617static inline void destroy_compound_gigantic_folio(struct folio *folio,
1618 unsigned int order) { }
1619#endif
1620
1621static inline void __clear_hugetlb_destructor(struct hstate *h,
1622 struct folio *folio)
1623{
1624 lockdep_assert_held(&hugetlb_lock);
1625
1626 folio_clear_hugetlb(folio);
1627}
1628
1629/*
1630 * Remove hugetlb folio from lists.
1631 * If vmemmap exists for the folio, update dtor so that the folio appears
1632 * as just a compound page. Otherwise, wait until after allocating vmemmap
1633 * to update dtor.
1634 *
1635 * A reference is held on the folio, except in the case of demote.
1636 *
1637 * Must be called with hugetlb lock held.
1638 */
1639static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1640 bool adjust_surplus,
1641 bool demote)
1642{
1643 int nid = folio_nid(folio);
1644
1645 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1646 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1647
1648 lockdep_assert_held(&hugetlb_lock);
1649 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1650 return;
1651
1652 list_del(&folio->lru);
1653
1654 if (folio_test_hugetlb_freed(folio)) {
1655 h->free_huge_pages--;
1656 h->free_huge_pages_node[nid]--;
1657 }
1658 if (adjust_surplus) {
1659 h->surplus_huge_pages--;
1660 h->surplus_huge_pages_node[nid]--;
1661 }
1662
1663 /*
1664 * We can only clear the hugetlb destructor after allocating vmemmap
1665 * pages. Otherwise, someone (memory error handling) may try to write
1666 * to tail struct pages.
1667 */
1668 if (!folio_test_hugetlb_vmemmap_optimized(folio))
1669 __clear_hugetlb_destructor(h, folio);
1670
1671 /*
1672 * In the case of demote we do not ref count the page as it will soon
1673 * be turned into a page of smaller size.
1674 */
1675 if (!demote)
1676 folio_ref_unfreeze(folio, 1);
1677
1678 h->nr_huge_pages--;
1679 h->nr_huge_pages_node[nid]--;
1680}
1681
1682static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1683 bool adjust_surplus)
1684{
1685 __remove_hugetlb_folio(h, folio, adjust_surplus, false);
1686}
1687
1688static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1689 bool adjust_surplus)
1690{
1691 __remove_hugetlb_folio(h, folio, adjust_surplus, true);
1692}
1693
1694static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1695 bool adjust_surplus)
1696{
1697 int zeroed;
1698 int nid = folio_nid(folio);
1699
1700 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1701
1702 lockdep_assert_held(&hugetlb_lock);
1703
1704 INIT_LIST_HEAD(&folio->lru);
1705 h->nr_huge_pages++;
1706 h->nr_huge_pages_node[nid]++;
1707
1708 if (adjust_surplus) {
1709 h->surplus_huge_pages++;
1710 h->surplus_huge_pages_node[nid]++;
1711 }
1712
1713 folio_set_hugetlb(folio);
1714 folio_change_private(folio, NULL);
1715 /*
1716 * We have to set hugetlb_vmemmap_optimized again as above
1717 * folio_change_private(folio, NULL) cleared it.
1718 */
1719 folio_set_hugetlb_vmemmap_optimized(folio);
1720
1721 /*
1722 * This folio is about to be managed by the hugetlb allocator and
1723 * should have no users. Drop our reference, and check for others
1724 * just in case.
1725 */
1726 zeroed = folio_put_testzero(folio);
1727 if (unlikely(!zeroed))
1728 /*
1729 * It is VERY unlikely soneone else has taken a ref
1730 * on the folio. In this case, we simply return as
1731 * free_huge_folio() will be called when this other ref
1732 * is dropped.
1733 */
1734 return;
1735
1736 arch_clear_hugepage_flags(&folio->page);
1737 enqueue_hugetlb_folio(h, folio);
1738}
1739
1740static void __update_and_free_hugetlb_folio(struct hstate *h,
1741 struct folio *folio)
1742{
1743 bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
1744
1745 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1746 return;
1747
1748 /*
1749 * If we don't know which subpages are hwpoisoned, we can't free
1750 * the hugepage, so it's leaked intentionally.
1751 */
1752 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1753 return;
1754
1755 /*
1756 * If folio is not vmemmap optimized (!clear_dtor), then the folio
1757 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
1758 * can only be passed hugetlb pages and will BUG otherwise.
1759 */
1760 if (clear_dtor && hugetlb_vmemmap_restore_folio(h, folio)) {
1761 spin_lock_irq(&hugetlb_lock);
1762 /*
1763 * If we cannot allocate vmemmap pages, just refuse to free the
1764 * page and put the page back on the hugetlb free list and treat
1765 * as a surplus page.
1766 */
1767 add_hugetlb_folio(h, folio, true);
1768 spin_unlock_irq(&hugetlb_lock);
1769 return;
1770 }
1771
1772 /*
1773 * Move PageHWPoison flag from head page to the raw error pages,
1774 * which makes any healthy subpages reusable.
1775 */
1776 if (unlikely(folio_test_hwpoison(folio)))
1777 folio_clear_hugetlb_hwpoison(folio);
1778
1779 /*
1780 * If vmemmap pages were allocated above, then we need to clear the
1781 * hugetlb destructor under the hugetlb lock.
1782 */
1783 if (clear_dtor) {
1784 spin_lock_irq(&hugetlb_lock);
1785 __clear_hugetlb_destructor(h, folio);
1786 spin_unlock_irq(&hugetlb_lock);
1787 }
1788
1789 /*
1790 * Non-gigantic pages demoted from CMA allocated gigantic pages
1791 * need to be given back to CMA in free_gigantic_folio.
1792 */
1793 if (hstate_is_gigantic(h) ||
1794 hugetlb_cma_folio(folio, huge_page_order(h))) {
1795 destroy_compound_gigantic_folio(folio, huge_page_order(h));
1796 free_gigantic_folio(folio, huge_page_order(h));
1797 } else {
1798 __free_pages(&folio->page, huge_page_order(h));
1799 }
1800}
1801
1802/*
1803 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1804 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1805 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1806 * the vmemmap pages.
1807 *
1808 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1809 * freed and frees them one-by-one. As the page->mapping pointer is going
1810 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1811 * structure of a lockless linked list of huge pages to be freed.
1812 */
1813static LLIST_HEAD(hpage_freelist);
1814
1815static void free_hpage_workfn(struct work_struct *work)
1816{
1817 struct llist_node *node;
1818
1819 node = llist_del_all(&hpage_freelist);
1820
1821 while (node) {
1822 struct folio *folio;
1823 struct hstate *h;
1824
1825 folio = container_of((struct address_space **)node,
1826 struct folio, mapping);
1827 node = node->next;
1828 folio->mapping = NULL;
1829 /*
1830 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1831 * folio_hstate() is going to trigger because a previous call to
1832 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1833 * not use folio_hstate() directly.
1834 */
1835 h = size_to_hstate(folio_size(folio));
1836
1837 __update_and_free_hugetlb_folio(h, folio);
1838
1839 cond_resched();
1840 }
1841}
1842static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1843
1844static inline void flush_free_hpage_work(struct hstate *h)
1845{
1846 if (hugetlb_vmemmap_optimizable(h))
1847 flush_work(&free_hpage_work);
1848}
1849
1850static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1851 bool atomic)
1852{
1853 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1854 __update_and_free_hugetlb_folio(h, folio);
1855 return;
1856 }
1857
1858 /*
1859 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1860 *
1861 * Only call schedule_work() if hpage_freelist is previously
1862 * empty. Otherwise, schedule_work() had been called but the workfn
1863 * hasn't retrieved the list yet.
1864 */
1865 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1866 schedule_work(&free_hpage_work);
1867}
1868
1869static void bulk_vmemmap_restore_error(struct hstate *h,
1870 struct list_head *folio_list,
1871 struct list_head *non_hvo_folios)
1872{
1873 struct folio *folio, *t_folio;
1874
1875 if (!list_empty(non_hvo_folios)) {
1876 /*
1877 * Free any restored hugetlb pages so that restore of the
1878 * entire list can be retried.
1879 * The idea is that in the common case of ENOMEM errors freeing
1880 * hugetlb pages with vmemmap we will free up memory so that we
1881 * can allocate vmemmap for more hugetlb pages.
1882 */
1883 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1884 list_del(&folio->lru);
1885 spin_lock_irq(&hugetlb_lock);
1886 __clear_hugetlb_destructor(h, folio);
1887 spin_unlock_irq(&hugetlb_lock);
1888 update_and_free_hugetlb_folio(h, folio, false);
1889 cond_resched();
1890 }
1891 } else {
1892 /*
1893 * In the case where there are no folios which can be
1894 * immediately freed, we loop through the list trying to restore
1895 * vmemmap individually in the hope that someone elsewhere may
1896 * have done something to cause success (such as freeing some
1897 * memory). If unable to restore a hugetlb page, the hugetlb
1898 * page is made a surplus page and removed from the list.
1899 * If are able to restore vmemmap and free one hugetlb page, we
1900 * quit processing the list to retry the bulk operation.
1901 */
1902 list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1903 if (hugetlb_vmemmap_restore_folio(h, folio)) {
1904 list_del(&folio->lru);
1905 spin_lock_irq(&hugetlb_lock);
1906 add_hugetlb_folio(h, folio, true);
1907 spin_unlock_irq(&hugetlb_lock);
1908 } else {
1909 list_del(&folio->lru);
1910 spin_lock_irq(&hugetlb_lock);
1911 __clear_hugetlb_destructor(h, folio);
1912 spin_unlock_irq(&hugetlb_lock);
1913 update_and_free_hugetlb_folio(h, folio, false);
1914 cond_resched();
1915 break;
1916 }
1917 }
1918}
1919
1920static void update_and_free_pages_bulk(struct hstate *h,
1921 struct list_head *folio_list)
1922{
1923 long ret;
1924 struct folio *folio, *t_folio;
1925 LIST_HEAD(non_hvo_folios);
1926
1927 /*
1928 * First allocate required vmemmmap (if necessary) for all folios.
1929 * Carefully handle errors and free up any available hugetlb pages
1930 * in an effort to make forward progress.
1931 */
1932retry:
1933 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1934 if (ret < 0) {
1935 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1936 goto retry;
1937 }
1938
1939 /*
1940 * At this point, list should be empty, ret should be >= 0 and there
1941 * should only be pages on the non_hvo_folios list.
1942 * Do note that the non_hvo_folios list could be empty.
1943 * Without HVO enabled, ret will be 0 and there is no need to call
1944 * __clear_hugetlb_destructor as this was done previously.
1945 */
1946 VM_WARN_ON(!list_empty(folio_list));
1947 VM_WARN_ON(ret < 0);
1948 if (!list_empty(&non_hvo_folios) && ret) {
1949 spin_lock_irq(&hugetlb_lock);
1950 list_for_each_entry(folio, &non_hvo_folios, lru)
1951 __clear_hugetlb_destructor(h, folio);
1952 spin_unlock_irq(&hugetlb_lock);
1953 }
1954
1955 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1956 update_and_free_hugetlb_folio(h, folio, false);
1957 cond_resched();
1958 }
1959}
1960
1961struct hstate *size_to_hstate(unsigned long size)
1962{
1963 struct hstate *h;
1964
1965 for_each_hstate(h) {
1966 if (huge_page_size(h) == size)
1967 return h;
1968 }
1969 return NULL;
1970}
1971
1972void free_huge_folio(struct folio *folio)
1973{
1974 /*
1975 * Can't pass hstate in here because it is called from the
1976 * compound page destructor.
1977 */
1978 struct hstate *h = folio_hstate(folio);
1979 int nid = folio_nid(folio);
1980 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1981 bool restore_reserve;
1982 unsigned long flags;
1983
1984 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1985 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1986
1987 hugetlb_set_folio_subpool(folio, NULL);
1988 if (folio_test_anon(folio))
1989 __ClearPageAnonExclusive(&folio->page);
1990 folio->mapping = NULL;
1991 restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1992 folio_clear_hugetlb_restore_reserve(folio);
1993
1994 /*
1995 * If HPageRestoreReserve was set on page, page allocation consumed a
1996 * reservation. If the page was associated with a subpool, there
1997 * would have been a page reserved in the subpool before allocation
1998 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1999 * reservation, do not call hugepage_subpool_put_pages() as this will
2000 * remove the reserved page from the subpool.
2001 */
2002 if (!restore_reserve) {
2003 /*
2004 * A return code of zero implies that the subpool will be
2005 * under its minimum size if the reservation is not restored
2006 * after page is free. Therefore, force restore_reserve
2007 * operation.
2008 */
2009 if (hugepage_subpool_put_pages(spool, 1) == 0)
2010 restore_reserve = true;
2011 }
2012
2013 spin_lock_irqsave(&hugetlb_lock, flags);
2014 folio_clear_hugetlb_migratable(folio);
2015 hugetlb_cgroup_uncharge_folio(hstate_index(h),
2016 pages_per_huge_page(h), folio);
2017 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2018 pages_per_huge_page(h), folio);
2019 mem_cgroup_uncharge(folio);
2020 if (restore_reserve)
2021 h->resv_huge_pages++;
2022
2023 if (folio_test_hugetlb_temporary(folio)) {
2024 remove_hugetlb_folio(h, folio, false);
2025 spin_unlock_irqrestore(&hugetlb_lock, flags);
2026 update_and_free_hugetlb_folio(h, folio, true);
2027 } else if (h->surplus_huge_pages_node[nid]) {
2028 /* remove the page from active list */
2029 remove_hugetlb_folio(h, folio, true);
2030 spin_unlock_irqrestore(&hugetlb_lock, flags);
2031 update_and_free_hugetlb_folio(h, folio, true);
2032 } else {
2033 arch_clear_hugepage_flags(&folio->page);
2034 enqueue_hugetlb_folio(h, folio);
2035 spin_unlock_irqrestore(&hugetlb_lock, flags);
2036 }
2037}
2038
2039/*
2040 * Must be called with the hugetlb lock held
2041 */
2042static void __prep_account_new_huge_page(struct hstate *h, int nid)
2043{
2044 lockdep_assert_held(&hugetlb_lock);
2045 h->nr_huge_pages++;
2046 h->nr_huge_pages_node[nid]++;
2047}
2048
2049static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2050{
2051 folio_set_hugetlb(folio);
2052 INIT_LIST_HEAD(&folio->lru);
2053 hugetlb_set_folio_subpool(folio, NULL);
2054 set_hugetlb_cgroup(folio, NULL);
2055 set_hugetlb_cgroup_rsvd(folio, NULL);
2056}
2057
2058static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2059{
2060 init_new_hugetlb_folio(h, folio);
2061 hugetlb_vmemmap_optimize_folio(h, folio);
2062}
2063
2064static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
2065{
2066 __prep_new_hugetlb_folio(h, folio);
2067 spin_lock_irq(&hugetlb_lock);
2068 __prep_account_new_huge_page(h, nid);
2069 spin_unlock_irq(&hugetlb_lock);
2070}
2071
2072static bool __prep_compound_gigantic_folio(struct folio *folio,
2073 unsigned int order, bool demote)
2074{
2075 int i, j;
2076 int nr_pages = 1 << order;
2077 struct page *p;
2078
2079 __folio_clear_reserved(folio);
2080 for (i = 0; i < nr_pages; i++) {
2081 p = folio_page(folio, i);
2082
2083 /*
2084 * For gigantic hugepages allocated through bootmem at
2085 * boot, it's safer to be consistent with the not-gigantic
2086 * hugepages and clear the PG_reserved bit from all tail pages
2087 * too. Otherwise drivers using get_user_pages() to access tail
2088 * pages may get the reference counting wrong if they see
2089 * PG_reserved set on a tail page (despite the head page not
2090 * having PG_reserved set). Enforcing this consistency between
2091 * head and tail pages allows drivers to optimize away a check
2092 * on the head page when they need know if put_page() is needed
2093 * after get_user_pages().
2094 */
2095 if (i != 0) /* head page cleared above */
2096 __ClearPageReserved(p);
2097 /*
2098 * Subtle and very unlikely
2099 *
2100 * Gigantic 'page allocators' such as memblock or cma will
2101 * return a set of pages with each page ref counted. We need
2102 * to turn this set of pages into a compound page with tail
2103 * page ref counts set to zero. Code such as speculative page
2104 * cache adding could take a ref on a 'to be' tail page.
2105 * We need to respect any increased ref count, and only set
2106 * the ref count to zero if count is currently 1. If count
2107 * is not 1, we return an error. An error return indicates
2108 * the set of pages can not be converted to a gigantic page.
2109 * The caller who allocated the pages should then discard the
2110 * pages using the appropriate free interface.
2111 *
2112 * In the case of demote, the ref count will be zero.
2113 */
2114 if (!demote) {
2115 if (!page_ref_freeze(p, 1)) {
2116 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
2117 goto out_error;
2118 }
2119 } else {
2120 VM_BUG_ON_PAGE(page_count(p), p);
2121 }
2122 if (i != 0)
2123 set_compound_head(p, &folio->page);
2124 }
2125 __folio_set_head(folio);
2126 /* we rely on prep_new_hugetlb_folio to set the destructor */
2127 folio_set_order(folio, order);
2128 atomic_set(&folio->_entire_mapcount, -1);
2129 atomic_set(&folio->_nr_pages_mapped, 0);
2130 atomic_set(&folio->_pincount, 0);
2131 return true;
2132
2133out_error:
2134 /* undo page modifications made above */
2135 for (j = 0; j < i; j++) {
2136 p = folio_page(folio, j);
2137 if (j != 0)
2138 clear_compound_head(p);
2139 set_page_refcounted(p);
2140 }
2141 /* need to clear PG_reserved on remaining tail pages */
2142 for (; j < nr_pages; j++) {
2143 p = folio_page(folio, j);
2144 __ClearPageReserved(p);
2145 }
2146 return false;
2147}
2148
2149static bool prep_compound_gigantic_folio(struct folio *folio,
2150 unsigned int order)
2151{
2152 return __prep_compound_gigantic_folio(folio, order, false);
2153}
2154
2155static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
2156 unsigned int order)
2157{
2158 return __prep_compound_gigantic_folio(folio, order, true);
2159}
2160
2161/*
2162 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2163 * transparent huge pages. See the PageTransHuge() documentation for more
2164 * details.
2165 */
2166int PageHuge(struct page *page)
2167{
2168 struct folio *folio;
2169
2170 if (!PageCompound(page))
2171 return 0;
2172 folio = page_folio(page);
2173 return folio_test_hugetlb(folio);
2174}
2175EXPORT_SYMBOL_GPL(PageHuge);
2176
2177/*
2178 * Find and lock address space (mapping) in write mode.
2179 *
2180 * Upon entry, the page is locked which means that page_mapping() is
2181 * stable. Due to locking order, we can only trylock_write. If we can
2182 * not get the lock, simply return NULL to caller.
2183 */
2184struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2185{
2186 struct address_space *mapping = page_mapping(hpage);
2187
2188 if (!mapping)
2189 return mapping;
2190
2191 if (i_mmap_trylock_write(mapping))
2192 return mapping;
2193
2194 return NULL;
2195}
2196
2197static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2198 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2199 nodemask_t *node_alloc_noretry)
2200{
2201 int order = huge_page_order(h);
2202 struct page *page;
2203 bool alloc_try_hard = true;
2204 bool retry = true;
2205
2206 /*
2207 * By default we always try hard to allocate the page with
2208 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2209 * a loop (to adjust global huge page counts) and previous allocation
2210 * failed, do not continue to try hard on the same node. Use the
2211 * node_alloc_noretry bitmap to manage this state information.
2212 */
2213 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2214 alloc_try_hard = false;
2215 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2216 if (alloc_try_hard)
2217 gfp_mask |= __GFP_RETRY_MAYFAIL;
2218 if (nid == NUMA_NO_NODE)
2219 nid = numa_mem_id();
2220retry:
2221 page = __alloc_pages(gfp_mask, order, nid, nmask);
2222
2223 /* Freeze head page */
2224 if (page && !page_ref_freeze(page, 1)) {
2225 __free_pages(page, order);
2226 if (retry) { /* retry once */
2227 retry = false;
2228 goto retry;
2229 }
2230 /* WOW! twice in a row. */
2231 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2232 page = NULL;
2233 }
2234
2235 /*
2236 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2237 * indicates an overall state change. Clear bit so that we resume
2238 * normal 'try hard' allocations.
2239 */
2240 if (node_alloc_noretry && page && !alloc_try_hard)
2241 node_clear(nid, *node_alloc_noretry);
2242
2243 /*
2244 * If we tried hard to get a page but failed, set bit so that
2245 * subsequent attempts will not try as hard until there is an
2246 * overall state change.
2247 */
2248 if (node_alloc_noretry && !page && alloc_try_hard)
2249 node_set(nid, *node_alloc_noretry);
2250
2251 if (!page) {
2252 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2253 return NULL;
2254 }
2255
2256 __count_vm_event(HTLB_BUDDY_PGALLOC);
2257 return page_folio(page);
2258}
2259
2260static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
2261 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2262 nodemask_t *node_alloc_noretry)
2263{
2264 struct folio *folio;
2265 bool retry = false;
2266
2267retry:
2268 if (hstate_is_gigantic(h))
2269 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2270 else
2271 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2272 nid, nmask, node_alloc_noretry);
2273 if (!folio)
2274 return NULL;
2275
2276 if (hstate_is_gigantic(h)) {
2277 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2278 /*
2279 * Rare failure to convert pages to compound page.
2280 * Free pages and try again - ONCE!
2281 */
2282 free_gigantic_folio(folio, huge_page_order(h));
2283 if (!retry) {
2284 retry = true;
2285 goto retry;
2286 }
2287 return NULL;
2288 }
2289 }
2290
2291 return folio;
2292}
2293
2294static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
2295 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2296 nodemask_t *node_alloc_noretry)
2297{
2298 struct folio *folio;
2299
2300 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2301 node_alloc_noretry);
2302 if (folio)
2303 init_new_hugetlb_folio(h, folio);
2304 return folio;
2305}
2306
2307/*
2308 * Common helper to allocate a fresh hugetlb page. All specific allocators
2309 * should use this function to get new hugetlb pages
2310 *
2311 * Note that returned page is 'frozen': ref count of head page and all tail
2312 * pages is zero.
2313 */
2314static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2315 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2316 nodemask_t *node_alloc_noretry)
2317{
2318 struct folio *folio;
2319
2320 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2321 node_alloc_noretry);
2322 if (!folio)
2323 return NULL;
2324
2325 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2326 return folio;
2327}
2328
2329static void prep_and_add_allocated_folios(struct hstate *h,
2330 struct list_head *folio_list)
2331{
2332 unsigned long flags;
2333 struct folio *folio, *tmp_f;
2334
2335 /* Send list for bulk vmemmap optimization processing */
2336 hugetlb_vmemmap_optimize_folios(h, folio_list);
2337
2338 /* Add all new pool pages to free lists in one lock cycle */
2339 spin_lock_irqsave(&hugetlb_lock, flags);
2340 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
2341 __prep_account_new_huge_page(h, folio_nid(folio));
2342 enqueue_hugetlb_folio(h, folio);
2343 }
2344 spin_unlock_irqrestore(&hugetlb_lock, flags);
2345}
2346
2347/*
2348 * Allocates a fresh hugetlb page in a node interleaved manner. The page
2349 * will later be added to the appropriate hugetlb pool.
2350 */
2351static struct folio *alloc_pool_huge_folio(struct hstate *h,
2352 nodemask_t *nodes_allowed,
2353 nodemask_t *node_alloc_noretry)
2354{
2355 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2356 int nr_nodes, node;
2357
2358 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2359 struct folio *folio;
2360
2361 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2362 nodes_allowed, node_alloc_noretry);
2363 if (folio)
2364 return folio;
2365 }
2366
2367 return NULL;
2368}
2369
2370/*
2371 * Remove huge page from pool from next node to free. Attempt to keep
2372 * persistent huge pages more or less balanced over allowed nodes.
2373 * This routine only 'removes' the hugetlb page. The caller must make
2374 * an additional call to free the page to low level allocators.
2375 * Called with hugetlb_lock locked.
2376 */
2377static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2378 nodemask_t *nodes_allowed, bool acct_surplus)
2379{
2380 int nr_nodes, node;
2381 struct folio *folio = NULL;
2382
2383 lockdep_assert_held(&hugetlb_lock);
2384 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2385 /*
2386 * If we're returning unused surplus pages, only examine
2387 * nodes with surplus pages.
2388 */
2389 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2390 !list_empty(&h->hugepage_freelists[node])) {
2391 folio = list_entry(h->hugepage_freelists[node].next,
2392 struct folio, lru);
2393 remove_hugetlb_folio(h, folio, acct_surplus);
2394 break;
2395 }
2396 }
2397
2398 return folio;
2399}
2400
2401/*
2402 * Dissolve a given free hugepage into free buddy pages. This function does
2403 * nothing for in-use hugepages and non-hugepages.
2404 * This function returns values like below:
2405 *
2406 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2407 * when the system is under memory pressure and the feature of
2408 * freeing unused vmemmap pages associated with each hugetlb page
2409 * is enabled.
2410 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2411 * (allocated or reserved.)
2412 * 0: successfully dissolved free hugepages or the page is not a
2413 * hugepage (considered as already dissolved)
2414 */
2415int dissolve_free_huge_page(struct page *page)
2416{
2417 int rc = -EBUSY;
2418 struct folio *folio = page_folio(page);
2419
2420retry:
2421 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2422 if (!folio_test_hugetlb(folio))
2423 return 0;
2424
2425 spin_lock_irq(&hugetlb_lock);
2426 if (!folio_test_hugetlb(folio)) {
2427 rc = 0;
2428 goto out;
2429 }
2430
2431 if (!folio_ref_count(folio)) {
2432 struct hstate *h = folio_hstate(folio);
2433 if (!available_huge_pages(h))
2434 goto out;
2435
2436 /*
2437 * We should make sure that the page is already on the free list
2438 * when it is dissolved.
2439 */
2440 if (unlikely(!folio_test_hugetlb_freed(folio))) {
2441 spin_unlock_irq(&hugetlb_lock);
2442 cond_resched();
2443
2444 /*
2445 * Theoretically, we should return -EBUSY when we
2446 * encounter this race. In fact, we have a chance
2447 * to successfully dissolve the page if we do a
2448 * retry. Because the race window is quite small.
2449 * If we seize this opportunity, it is an optimization
2450 * for increasing the success rate of dissolving page.
2451 */
2452 goto retry;
2453 }
2454
2455 remove_hugetlb_folio(h, folio, false);
2456 h->max_huge_pages--;
2457 spin_unlock_irq(&hugetlb_lock);
2458
2459 /*
2460 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2461 * before freeing the page. update_and_free_hugtlb_folio will fail to
2462 * free the page if it can not allocate required vmemmap. We
2463 * need to adjust max_huge_pages if the page is not freed.
2464 * Attempt to allocate vmemmmap here so that we can take
2465 * appropriate action on failure.
2466 *
2467 * The folio_test_hugetlb check here is because
2468 * remove_hugetlb_folio will clear hugetlb folio flag for
2469 * non-vmemmap optimized hugetlb folios.
2470 */
2471 if (folio_test_hugetlb(folio)) {
2472 rc = hugetlb_vmemmap_restore_folio(h, folio);
2473 if (rc) {
2474 spin_lock_irq(&hugetlb_lock);
2475 add_hugetlb_folio(h, folio, false);
2476 h->max_huge_pages++;
2477 goto out;
2478 }
2479 } else
2480 rc = 0;
2481
2482 update_and_free_hugetlb_folio(h, folio, false);
2483 return rc;
2484 }
2485out:
2486 spin_unlock_irq(&hugetlb_lock);
2487 return rc;
2488}
2489
2490/*
2491 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2492 * make specified memory blocks removable from the system.
2493 * Note that this will dissolve a free gigantic hugepage completely, if any
2494 * part of it lies within the given range.
2495 * Also note that if dissolve_free_huge_page() returns with an error, all
2496 * free hugepages that were dissolved before that error are lost.
2497 */
2498int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2499{
2500 unsigned long pfn;
2501 struct page *page;
2502 int rc = 0;
2503 unsigned int order;
2504 struct hstate *h;
2505
2506 if (!hugepages_supported())
2507 return rc;
2508
2509 order = huge_page_order(&default_hstate);
2510 for_each_hstate(h)
2511 order = min(order, huge_page_order(h));
2512
2513 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2514 page = pfn_to_page(pfn);
2515 rc = dissolve_free_huge_page(page);
2516 if (rc)
2517 break;
2518 }
2519
2520 return rc;
2521}
2522
2523/*
2524 * Allocates a fresh surplus page from the page allocator.
2525 */
2526static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2527 gfp_t gfp_mask, int nid, nodemask_t *nmask)
2528{
2529 struct folio *folio = NULL;
2530
2531 if (hstate_is_gigantic(h))
2532 return NULL;
2533
2534 spin_lock_irq(&hugetlb_lock);
2535 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2536 goto out_unlock;
2537 spin_unlock_irq(&hugetlb_lock);
2538
2539 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2540 if (!folio)
2541 return NULL;
2542
2543 spin_lock_irq(&hugetlb_lock);
2544 /*
2545 * We could have raced with the pool size change.
2546 * Double check that and simply deallocate the new page
2547 * if we would end up overcommiting the surpluses. Abuse
2548 * temporary page to workaround the nasty free_huge_folio
2549 * codeflow
2550 */
2551 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2552 folio_set_hugetlb_temporary(folio);
2553 spin_unlock_irq(&hugetlb_lock);
2554 free_huge_folio(folio);
2555 return NULL;
2556 }
2557
2558 h->surplus_huge_pages++;
2559 h->surplus_huge_pages_node[folio_nid(folio)]++;
2560
2561out_unlock:
2562 spin_unlock_irq(&hugetlb_lock);
2563
2564 return folio;
2565}
2566
2567static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2568 int nid, nodemask_t *nmask)
2569{
2570 struct folio *folio;
2571
2572 if (hstate_is_gigantic(h))
2573 return NULL;
2574
2575 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2576 if (!folio)
2577 return NULL;
2578
2579 /* fresh huge pages are frozen */
2580 folio_ref_unfreeze(folio, 1);
2581 /*
2582 * We do not account these pages as surplus because they are only
2583 * temporary and will be released properly on the last reference
2584 */
2585 folio_set_hugetlb_temporary(folio);
2586
2587 return folio;
2588}
2589
2590/*
2591 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2592 */
2593static
2594struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2595 struct vm_area_struct *vma, unsigned long addr)
2596{
2597 struct folio *folio = NULL;
2598 struct mempolicy *mpol;
2599 gfp_t gfp_mask = htlb_alloc_mask(h);
2600 int nid;
2601 nodemask_t *nodemask;
2602
2603 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2604 if (mpol_is_preferred_many(mpol)) {
2605 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2606
2607 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2608 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2609
2610 /* Fallback to all nodes if page==NULL */
2611 nodemask = NULL;
2612 }
2613
2614 if (!folio)
2615 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2616 mpol_cond_put(mpol);
2617 return folio;
2618}
2619
2620/* folio migration callback function */
2621struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2622 nodemask_t *nmask, gfp_t gfp_mask)
2623{
2624 spin_lock_irq(&hugetlb_lock);
2625 if (available_huge_pages(h)) {
2626 struct folio *folio;
2627
2628 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2629 preferred_nid, nmask);
2630 if (folio) {
2631 spin_unlock_irq(&hugetlb_lock);
2632 return folio;
2633 }
2634 }
2635 spin_unlock_irq(&hugetlb_lock);
2636
2637 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2638}
2639
2640/*
2641 * Increase the hugetlb pool such that it can accommodate a reservation
2642 * of size 'delta'.
2643 */
2644static int gather_surplus_pages(struct hstate *h, long delta)
2645 __must_hold(&hugetlb_lock)
2646{
2647 LIST_HEAD(surplus_list);
2648 struct folio *folio, *tmp;
2649 int ret;
2650 long i;
2651 long needed, allocated;
2652 bool alloc_ok = true;
2653
2654 lockdep_assert_held(&hugetlb_lock);
2655 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2656 if (needed <= 0) {
2657 h->resv_huge_pages += delta;
2658 return 0;
2659 }
2660
2661 allocated = 0;
2662
2663 ret = -ENOMEM;
2664retry:
2665 spin_unlock_irq(&hugetlb_lock);
2666 for (i = 0; i < needed; i++) {
2667 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2668 NUMA_NO_NODE, NULL);
2669 if (!folio) {
2670 alloc_ok = false;
2671 break;
2672 }
2673 list_add(&folio->lru, &surplus_list);
2674 cond_resched();
2675 }
2676 allocated += i;
2677
2678 /*
2679 * After retaking hugetlb_lock, we need to recalculate 'needed'
2680 * because either resv_huge_pages or free_huge_pages may have changed.
2681 */
2682 spin_lock_irq(&hugetlb_lock);
2683 needed = (h->resv_huge_pages + delta) -
2684 (h->free_huge_pages + allocated);
2685 if (needed > 0) {
2686 if (alloc_ok)
2687 goto retry;
2688 /*
2689 * We were not able to allocate enough pages to
2690 * satisfy the entire reservation so we free what
2691 * we've allocated so far.
2692 */
2693 goto free;
2694 }
2695 /*
2696 * The surplus_list now contains _at_least_ the number of extra pages
2697 * needed to accommodate the reservation. Add the appropriate number
2698 * of pages to the hugetlb pool and free the extras back to the buddy
2699 * allocator. Commit the entire reservation here to prevent another
2700 * process from stealing the pages as they are added to the pool but
2701 * before they are reserved.
2702 */
2703 needed += allocated;
2704 h->resv_huge_pages += delta;
2705 ret = 0;
2706
2707 /* Free the needed pages to the hugetlb pool */
2708 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2709 if ((--needed) < 0)
2710 break;
2711 /* Add the page to the hugetlb allocator */
2712 enqueue_hugetlb_folio(h, folio);
2713 }
2714free:
2715 spin_unlock_irq(&hugetlb_lock);
2716
2717 /*
2718 * Free unnecessary surplus pages to the buddy allocator.
2719 * Pages have no ref count, call free_huge_folio directly.
2720 */
2721 list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2722 free_huge_folio(folio);
2723 spin_lock_irq(&hugetlb_lock);
2724
2725 return ret;
2726}
2727
2728/*
2729 * This routine has two main purposes:
2730 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2731 * in unused_resv_pages. This corresponds to the prior adjustments made
2732 * to the associated reservation map.
2733 * 2) Free any unused surplus pages that may have been allocated to satisfy
2734 * the reservation. As many as unused_resv_pages may be freed.
2735 */
2736static void return_unused_surplus_pages(struct hstate *h,
2737 unsigned long unused_resv_pages)
2738{
2739 unsigned long nr_pages;
2740 LIST_HEAD(page_list);
2741
2742 lockdep_assert_held(&hugetlb_lock);
2743 /* Uncommit the reservation */
2744 h->resv_huge_pages -= unused_resv_pages;
2745
2746 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2747 goto out;
2748
2749 /*
2750 * Part (or even all) of the reservation could have been backed
2751 * by pre-allocated pages. Only free surplus pages.
2752 */
2753 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2754
2755 /*
2756 * We want to release as many surplus pages as possible, spread
2757 * evenly across all nodes with memory. Iterate across these nodes
2758 * until we can no longer free unreserved surplus pages. This occurs
2759 * when the nodes with surplus pages have no free pages.
2760 * remove_pool_hugetlb_folio() will balance the freed pages across the
2761 * on-line nodes with memory and will handle the hstate accounting.
2762 */
2763 while (nr_pages--) {
2764 struct folio *folio;
2765
2766 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2767 if (!folio)
2768 goto out;
2769
2770 list_add(&folio->lru, &page_list);
2771 }
2772
2773out:
2774 spin_unlock_irq(&hugetlb_lock);
2775 update_and_free_pages_bulk(h, &page_list);
2776 spin_lock_irq(&hugetlb_lock);
2777}
2778
2779
2780/*
2781 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2782 * are used by the huge page allocation routines to manage reservations.
2783 *
2784 * vma_needs_reservation is called to determine if the huge page at addr
2785 * within the vma has an associated reservation. If a reservation is
2786 * needed, the value 1 is returned. The caller is then responsible for
2787 * managing the global reservation and subpool usage counts. After
2788 * the huge page has been allocated, vma_commit_reservation is called
2789 * to add the page to the reservation map. If the page allocation fails,
2790 * the reservation must be ended instead of committed. vma_end_reservation
2791 * is called in such cases.
2792 *
2793 * In the normal case, vma_commit_reservation returns the same value
2794 * as the preceding vma_needs_reservation call. The only time this
2795 * is not the case is if a reserve map was changed between calls. It
2796 * is the responsibility of the caller to notice the difference and
2797 * take appropriate action.
2798 *
2799 * vma_add_reservation is used in error paths where a reservation must
2800 * be restored when a newly allocated huge page must be freed. It is
2801 * to be called after calling vma_needs_reservation to determine if a
2802 * reservation exists.
2803 *
2804 * vma_del_reservation is used in error paths where an entry in the reserve
2805 * map was created during huge page allocation and must be removed. It is to
2806 * be called after calling vma_needs_reservation to determine if a reservation
2807 * exists.
2808 */
2809enum vma_resv_mode {
2810 VMA_NEEDS_RESV,
2811 VMA_COMMIT_RESV,
2812 VMA_END_RESV,
2813 VMA_ADD_RESV,
2814 VMA_DEL_RESV,
2815};
2816static long __vma_reservation_common(struct hstate *h,
2817 struct vm_area_struct *vma, unsigned long addr,
2818 enum vma_resv_mode mode)
2819{
2820 struct resv_map *resv;
2821 pgoff_t idx;
2822 long ret;
2823 long dummy_out_regions_needed;
2824
2825 resv = vma_resv_map(vma);
2826 if (!resv)
2827 return 1;
2828
2829 idx = vma_hugecache_offset(h, vma, addr);
2830 switch (mode) {
2831 case VMA_NEEDS_RESV:
2832 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2833 /* We assume that vma_reservation_* routines always operate on
2834 * 1 page, and that adding to resv map a 1 page entry can only
2835 * ever require 1 region.
2836 */
2837 VM_BUG_ON(dummy_out_regions_needed != 1);
2838 break;
2839 case VMA_COMMIT_RESV:
2840 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2841 /* region_add calls of range 1 should never fail. */
2842 VM_BUG_ON(ret < 0);
2843 break;
2844 case VMA_END_RESV:
2845 region_abort(resv, idx, idx + 1, 1);
2846 ret = 0;
2847 break;
2848 case VMA_ADD_RESV:
2849 if (vma->vm_flags & VM_MAYSHARE) {
2850 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2851 /* region_add calls of range 1 should never fail. */
2852 VM_BUG_ON(ret < 0);
2853 } else {
2854 region_abort(resv, idx, idx + 1, 1);
2855 ret = region_del(resv, idx, idx + 1);
2856 }
2857 break;
2858 case VMA_DEL_RESV:
2859 if (vma->vm_flags & VM_MAYSHARE) {
2860 region_abort(resv, idx, idx + 1, 1);
2861 ret = region_del(resv, idx, idx + 1);
2862 } else {
2863 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2864 /* region_add calls of range 1 should never fail. */
2865 VM_BUG_ON(ret < 0);
2866 }
2867 break;
2868 default:
2869 BUG();
2870 }
2871
2872 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2873 return ret;
2874 /*
2875 * We know private mapping must have HPAGE_RESV_OWNER set.
2876 *
2877 * In most cases, reserves always exist for private mappings.
2878 * However, a file associated with mapping could have been
2879 * hole punched or truncated after reserves were consumed.
2880 * As subsequent fault on such a range will not use reserves.
2881 * Subtle - The reserve map for private mappings has the
2882 * opposite meaning than that of shared mappings. If NO
2883 * entry is in the reserve map, it means a reservation exists.
2884 * If an entry exists in the reserve map, it means the
2885 * reservation has already been consumed. As a result, the
2886 * return value of this routine is the opposite of the
2887 * value returned from reserve map manipulation routines above.
2888 */
2889 if (ret > 0)
2890 return 0;
2891 if (ret == 0)
2892 return 1;
2893 return ret;
2894}
2895
2896static long vma_needs_reservation(struct hstate *h,
2897 struct vm_area_struct *vma, unsigned long addr)
2898{
2899 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2900}
2901
2902static long vma_commit_reservation(struct hstate *h,
2903 struct vm_area_struct *vma, unsigned long addr)
2904{
2905 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2906}
2907
2908static void vma_end_reservation(struct hstate *h,
2909 struct vm_area_struct *vma, unsigned long addr)
2910{
2911 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2912}
2913
2914static long vma_add_reservation(struct hstate *h,
2915 struct vm_area_struct *vma, unsigned long addr)
2916{
2917 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2918}
2919
2920static long vma_del_reservation(struct hstate *h,
2921 struct vm_area_struct *vma, unsigned long addr)
2922{
2923 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2924}
2925
2926/*
2927 * This routine is called to restore reservation information on error paths.
2928 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2929 * and the hugetlb mutex should remain held when calling this routine.
2930 *
2931 * It handles two specific cases:
2932 * 1) A reservation was in place and the folio consumed the reservation.
2933 * hugetlb_restore_reserve is set in the folio.
2934 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2935 * not set. However, alloc_hugetlb_folio always updates the reserve map.
2936 *
2937 * In case 1, free_huge_folio later in the error path will increment the
2938 * global reserve count. But, free_huge_folio does not have enough context
2939 * to adjust the reservation map. This case deals primarily with private
2940 * mappings. Adjust the reserve map here to be consistent with global
2941 * reserve count adjustments to be made by free_huge_folio. Make sure the
2942 * reserve map indicates there is a reservation present.
2943 *
2944 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2945 */
2946void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2947 unsigned long address, struct folio *folio)
2948{
2949 long rc = vma_needs_reservation(h, vma, address);
2950
2951 if (folio_test_hugetlb_restore_reserve(folio)) {
2952 if (unlikely(rc < 0))
2953 /*
2954 * Rare out of memory condition in reserve map
2955 * manipulation. Clear hugetlb_restore_reserve so
2956 * that global reserve count will not be incremented
2957 * by free_huge_folio. This will make it appear
2958 * as though the reservation for this folio was
2959 * consumed. This may prevent the task from
2960 * faulting in the folio at a later time. This
2961 * is better than inconsistent global huge page
2962 * accounting of reserve counts.
2963 */
2964 folio_clear_hugetlb_restore_reserve(folio);
2965 else if (rc)
2966 (void)vma_add_reservation(h, vma, address);
2967 else
2968 vma_end_reservation(h, vma, address);
2969 } else {
2970 if (!rc) {
2971 /*
2972 * This indicates there is an entry in the reserve map
2973 * not added by alloc_hugetlb_folio. We know it was added
2974 * before the alloc_hugetlb_folio call, otherwise
2975 * hugetlb_restore_reserve would be set on the folio.
2976 * Remove the entry so that a subsequent allocation
2977 * does not consume a reservation.
2978 */
2979 rc = vma_del_reservation(h, vma, address);
2980 if (rc < 0)
2981 /*
2982 * VERY rare out of memory condition. Since
2983 * we can not delete the entry, set
2984 * hugetlb_restore_reserve so that the reserve
2985 * count will be incremented when the folio
2986 * is freed. This reserve will be consumed
2987 * on a subsequent allocation.
2988 */
2989 folio_set_hugetlb_restore_reserve(folio);
2990 } else if (rc < 0) {
2991 /*
2992 * Rare out of memory condition from
2993 * vma_needs_reservation call. Memory allocation is
2994 * only attempted if a new entry is needed. Therefore,
2995 * this implies there is not an entry in the
2996 * reserve map.
2997 *
2998 * For shared mappings, no entry in the map indicates
2999 * no reservation. We are done.
3000 */
3001 if (!(vma->vm_flags & VM_MAYSHARE))
3002 /*
3003 * For private mappings, no entry indicates
3004 * a reservation is present. Since we can
3005 * not add an entry, set hugetlb_restore_reserve
3006 * on the folio so reserve count will be
3007 * incremented when freed. This reserve will
3008 * be consumed on a subsequent allocation.
3009 */
3010 folio_set_hugetlb_restore_reserve(folio);
3011 } else
3012 /*
3013 * No reservation present, do nothing
3014 */
3015 vma_end_reservation(h, vma, address);
3016 }
3017}
3018
3019/*
3020 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
3021 * the old one
3022 * @h: struct hstate old page belongs to
3023 * @old_folio: Old folio to dissolve
3024 * @list: List to isolate the page in case we need to
3025 * Returns 0 on success, otherwise negated error.
3026 */
3027static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
3028 struct folio *old_folio, struct list_head *list)
3029{
3030 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3031 int nid = folio_nid(old_folio);
3032 struct folio *new_folio;
3033 int ret = 0;
3034
3035 /*
3036 * Before dissolving the folio, we need to allocate a new one for the
3037 * pool to remain stable. Here, we allocate the folio and 'prep' it
3038 * by doing everything but actually updating counters and adding to
3039 * the pool. This simplifies and let us do most of the processing
3040 * under the lock.
3041 */
3042 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
3043 if (!new_folio)
3044 return -ENOMEM;
3045 __prep_new_hugetlb_folio(h, new_folio);
3046
3047retry:
3048 spin_lock_irq(&hugetlb_lock);
3049 if (!folio_test_hugetlb(old_folio)) {
3050 /*
3051 * Freed from under us. Drop new_folio too.
3052 */
3053 goto free_new;
3054 } else if (folio_ref_count(old_folio)) {
3055 bool isolated;
3056
3057 /*
3058 * Someone has grabbed the folio, try to isolate it here.
3059 * Fail with -EBUSY if not possible.
3060 */
3061 spin_unlock_irq(&hugetlb_lock);
3062 isolated = isolate_hugetlb(old_folio, list);
3063 ret = isolated ? 0 : -EBUSY;
3064 spin_lock_irq(&hugetlb_lock);
3065 goto free_new;
3066 } else if (!folio_test_hugetlb_freed(old_folio)) {
3067 /*
3068 * Folio's refcount is 0 but it has not been enqueued in the
3069 * freelist yet. Race window is small, so we can succeed here if
3070 * we retry.
3071 */
3072 spin_unlock_irq(&hugetlb_lock);
3073 cond_resched();
3074 goto retry;
3075 } else {
3076 /*
3077 * Ok, old_folio is still a genuine free hugepage. Remove it from
3078 * the freelist and decrease the counters. These will be
3079 * incremented again when calling __prep_account_new_huge_page()
3080 * and enqueue_hugetlb_folio() for new_folio. The counters will
3081 * remain stable since this happens under the lock.
3082 */
3083 remove_hugetlb_folio(h, old_folio, false);
3084
3085 /*
3086 * Ref count on new_folio is already zero as it was dropped
3087 * earlier. It can be directly added to the pool free list.
3088 */
3089 __prep_account_new_huge_page(h, nid);
3090 enqueue_hugetlb_folio(h, new_folio);
3091
3092 /*
3093 * Folio has been replaced, we can safely free the old one.
3094 */
3095 spin_unlock_irq(&hugetlb_lock);
3096 update_and_free_hugetlb_folio(h, old_folio, false);
3097 }
3098
3099 return ret;
3100
3101free_new:
3102 spin_unlock_irq(&hugetlb_lock);
3103 /* Folio has a zero ref count, but needs a ref to be freed */
3104 folio_ref_unfreeze(new_folio, 1);
3105 update_and_free_hugetlb_folio(h, new_folio, false);
3106
3107 return ret;
3108}
3109
3110int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
3111{
3112 struct hstate *h;
3113 struct folio *folio = page_folio(page);
3114 int ret = -EBUSY;
3115
3116 /*
3117 * The page might have been dissolved from under our feet, so make sure
3118 * to carefully check the state under the lock.
3119 * Return success when racing as if we dissolved the page ourselves.
3120 */
3121 spin_lock_irq(&hugetlb_lock);
3122 if (folio_test_hugetlb(folio)) {
3123 h = folio_hstate(folio);
3124 } else {
3125 spin_unlock_irq(&hugetlb_lock);
3126 return 0;
3127 }
3128 spin_unlock_irq(&hugetlb_lock);
3129
3130 /*
3131 * Fence off gigantic pages as there is a cyclic dependency between
3132 * alloc_contig_range and them. Return -ENOMEM as this has the effect
3133 * of bailing out right away without further retrying.
3134 */
3135 if (hstate_is_gigantic(h))
3136 return -ENOMEM;
3137
3138 if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
3139 ret = 0;
3140 else if (!folio_ref_count(folio))
3141 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3142
3143 return ret;
3144}
3145
3146struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
3147 unsigned long addr, int avoid_reserve)
3148{
3149 struct hugepage_subpool *spool = subpool_vma(vma);
3150 struct hstate *h = hstate_vma(vma);
3151 struct folio *folio;
3152 long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
3153 long gbl_chg;
3154 int memcg_charge_ret, ret, idx;
3155 struct hugetlb_cgroup *h_cg = NULL;
3156 struct mem_cgroup *memcg;
3157 bool deferred_reserve;
3158 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
3159
3160 memcg = get_mem_cgroup_from_current();
3161 memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
3162 if (memcg_charge_ret == -ENOMEM) {
3163 mem_cgroup_put(memcg);
3164 return ERR_PTR(-ENOMEM);
3165 }
3166
3167 idx = hstate_index(h);
3168 /*
3169 * Examine the region/reserve map to determine if the process
3170 * has a reservation for the page to be allocated. A return
3171 * code of zero indicates a reservation exists (no change).
3172 */
3173 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3174 if (map_chg < 0) {
3175 if (!memcg_charge_ret)
3176 mem_cgroup_cancel_charge(memcg, nr_pages);
3177 mem_cgroup_put(memcg);
3178 return ERR_PTR(-ENOMEM);
3179 }
3180
3181 /*
3182 * Processes that did not create the mapping will have no
3183 * reserves as indicated by the region/reserve map. Check
3184 * that the allocation will not exceed the subpool limit.
3185 * Allocations for MAP_NORESERVE mappings also need to be
3186 * checked against any subpool limit.
3187 */
3188 if (map_chg || avoid_reserve) {
3189 gbl_chg = hugepage_subpool_get_pages(spool, 1);
3190 if (gbl_chg < 0)
3191 goto out_end_reservation;
3192
3193 /*
3194 * Even though there was no reservation in the region/reserve
3195 * map, there could be reservations associated with the
3196 * subpool that can be used. This would be indicated if the
3197 * return value of hugepage_subpool_get_pages() is zero.
3198 * However, if avoid_reserve is specified we still avoid even
3199 * the subpool reservations.
3200 */
3201 if (avoid_reserve)
3202 gbl_chg = 1;
3203 }
3204
3205 /* If this allocation is not consuming a reservation, charge it now.
3206 */
3207 deferred_reserve = map_chg || avoid_reserve;
3208 if (deferred_reserve) {
3209 ret = hugetlb_cgroup_charge_cgroup_rsvd(
3210 idx, pages_per_huge_page(h), &h_cg);
3211 if (ret)
3212 goto out_subpool_put;
3213 }
3214
3215 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
3216 if (ret)
3217 goto out_uncharge_cgroup_reservation;
3218
3219 spin_lock_irq(&hugetlb_lock);
3220 /*
3221 * glb_chg is passed to indicate whether or not a page must be taken
3222 * from the global free pool (global change). gbl_chg == 0 indicates
3223 * a reservation exists for the allocation.
3224 */
3225 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3226 if (!folio) {
3227 spin_unlock_irq(&hugetlb_lock);
3228 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3229 if (!folio)
3230 goto out_uncharge_cgroup;
3231 spin_lock_irq(&hugetlb_lock);
3232 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
3233 folio_set_hugetlb_restore_reserve(folio);
3234 h->resv_huge_pages--;
3235 }
3236 list_add(&folio->lru, &h->hugepage_activelist);
3237 folio_ref_unfreeze(folio, 1);
3238 /* Fall through */
3239 }
3240
3241 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3242 /* If allocation is not consuming a reservation, also store the
3243 * hugetlb_cgroup pointer on the page.
3244 */
3245 if (deferred_reserve) {
3246 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3247 h_cg, folio);
3248 }
3249
3250 spin_unlock_irq(&hugetlb_lock);
3251
3252 hugetlb_set_folio_subpool(folio, spool);
3253
3254 map_commit = vma_commit_reservation(h, vma, addr);
3255 if (unlikely(map_chg > map_commit)) {
3256 /*
3257 * The page was added to the reservation map between
3258 * vma_needs_reservation and vma_commit_reservation.
3259 * This indicates a race with hugetlb_reserve_pages.
3260 * Adjust for the subpool count incremented above AND
3261 * in hugetlb_reserve_pages for the same page. Also,
3262 * the reservation count added in hugetlb_reserve_pages
3263 * no longer applies.
3264 */
3265 long rsv_adjust;
3266
3267 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3268 hugetlb_acct_memory(h, -rsv_adjust);
3269 if (deferred_reserve)
3270 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3271 pages_per_huge_page(h), folio);
3272 }
3273
3274 if (!memcg_charge_ret)
3275 mem_cgroup_commit_charge(folio, memcg);
3276 mem_cgroup_put(memcg);
3277
3278 return folio;
3279
3280out_uncharge_cgroup:
3281 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3282out_uncharge_cgroup_reservation:
3283 if (deferred_reserve)
3284 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3285 h_cg);
3286out_subpool_put:
3287 if (map_chg || avoid_reserve)
3288 hugepage_subpool_put_pages(spool, 1);
3289out_end_reservation:
3290 vma_end_reservation(h, vma, addr);
3291 if (!memcg_charge_ret)
3292 mem_cgroup_cancel_charge(memcg, nr_pages);
3293 mem_cgroup_put(memcg);
3294 return ERR_PTR(-ENOSPC);
3295}
3296
3297int alloc_bootmem_huge_page(struct hstate *h, int nid)
3298 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3299int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3300{
3301 struct huge_bootmem_page *m = NULL; /* initialize for clang */
3302 int nr_nodes, node;
3303
3304 /* do node specific alloc */
3305 if (nid != NUMA_NO_NODE) {
3306 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3307 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3308 if (!m)
3309 return 0;
3310 goto found;
3311 }
3312 /* allocate from next node when distributing huge pages */
3313 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3314 m = memblock_alloc_try_nid_raw(
3315 huge_page_size(h), huge_page_size(h),
3316 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3317 /*
3318 * Use the beginning of the huge page to store the
3319 * huge_bootmem_page struct (until gather_bootmem
3320 * puts them into the mem_map).
3321 */
3322 if (!m)
3323 return 0;
3324 goto found;
3325 }
3326
3327found:
3328
3329 /*
3330 * Only initialize the head struct page in memmap_init_reserved_pages,
3331 * rest of the struct pages will be initialized by the HugeTLB
3332 * subsystem itself.
3333 * The head struct page is used to get folio information by the HugeTLB
3334 * subsystem like zone id and node id.
3335 */
3336 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3337 huge_page_size(h) - PAGE_SIZE);
3338 /* Put them into a private list first because mem_map is not up yet */
3339 INIT_LIST_HEAD(&m->list);
3340 list_add(&m->list, &huge_boot_pages);
3341 m->hstate = h;
3342 return 1;
3343}
3344
3345/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3346static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3347 unsigned long start_page_number,
3348 unsigned long end_page_number)
3349{
3350 enum zone_type zone = zone_idx(folio_zone(folio));
3351 int nid = folio_nid(folio);
3352 unsigned long head_pfn = folio_pfn(folio);
3353 unsigned long pfn, end_pfn = head_pfn + end_page_number;
3354 int ret;
3355
3356 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
3357 struct page *page = pfn_to_page(pfn);
3358
3359 __init_single_page(page, pfn, zone, nid);
3360 prep_compound_tail((struct page *)folio, pfn - head_pfn);
3361 ret = page_ref_freeze(page, 1);
3362 VM_BUG_ON(!ret);
3363 }
3364}
3365
3366static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3367 struct hstate *h,
3368 unsigned long nr_pages)
3369{
3370 int ret;
3371
3372 /* Prepare folio head */
3373 __folio_clear_reserved(folio);
3374 __folio_set_head(folio);
3375 ret = folio_ref_freeze(folio, 1);
3376 VM_BUG_ON(!ret);
3377 /* Initialize the necessary tail struct pages */
3378 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3379 prep_compound_head((struct page *)folio, huge_page_order(h));
3380}
3381
3382static void __init prep_and_add_bootmem_folios(struct hstate *h,
3383 struct list_head *folio_list)
3384{
3385 unsigned long flags;
3386 struct folio *folio, *tmp_f;
3387
3388 /* Send list for bulk vmemmap optimization processing */
3389 hugetlb_vmemmap_optimize_folios(h, folio_list);
3390
3391 /* Add all new pool pages to free lists in one lock cycle */
3392 spin_lock_irqsave(&hugetlb_lock, flags);
3393 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3394 if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3395 /*
3396 * If HVO fails, initialize all tail struct pages
3397 * We do not worry about potential long lock hold
3398 * time as this is early in boot and there should
3399 * be no contention.
3400 */
3401 hugetlb_folio_init_tail_vmemmap(folio,
3402 HUGETLB_VMEMMAP_RESERVE_PAGES,
3403 pages_per_huge_page(h));
3404 }
3405 __prep_account_new_huge_page(h, folio_nid(folio));
3406 enqueue_hugetlb_folio(h, folio);
3407 }
3408 spin_unlock_irqrestore(&hugetlb_lock, flags);
3409}
3410
3411/*
3412 * Put bootmem huge pages into the standard lists after mem_map is up.
3413 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3414 */
3415static void __init gather_bootmem_prealloc(void)
3416{
3417 LIST_HEAD(folio_list);
3418 struct huge_bootmem_page *m;
3419 struct hstate *h = NULL, *prev_h = NULL;
3420
3421 list_for_each_entry(m, &huge_boot_pages, list) {
3422 struct page *page = virt_to_page(m);
3423 struct folio *folio = (void *)page;
3424
3425 h = m->hstate;
3426 /*
3427 * It is possible to have multiple huge page sizes (hstates)
3428 * in this list. If so, process each size separately.
3429 */
3430 if (h != prev_h && prev_h != NULL)
3431 prep_and_add_bootmem_folios(prev_h, &folio_list);
3432 prev_h = h;
3433
3434 VM_BUG_ON(!hstate_is_gigantic(h));
3435 WARN_ON(folio_ref_count(folio) != 1);
3436
3437 hugetlb_folio_init_vmemmap(folio, h,
3438 HUGETLB_VMEMMAP_RESERVE_PAGES);
3439 init_new_hugetlb_folio(h, folio);
3440 list_add(&folio->lru, &folio_list);
3441
3442 /*
3443 * We need to restore the 'stolen' pages to totalram_pages
3444 * in order to fix confusing memory reports from free(1) and
3445 * other side-effects, like CommitLimit going negative.
3446 */
3447 adjust_managed_page_count(page, pages_per_huge_page(h));
3448 cond_resched();
3449 }
3450
3451 prep_and_add_bootmem_folios(h, &folio_list);
3452}
3453
3454static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3455{
3456 unsigned long i;
3457 char buf[32];
3458
3459 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3460 if (hstate_is_gigantic(h)) {
3461 if (!alloc_bootmem_huge_page(h, nid))
3462 break;
3463 } else {
3464 struct folio *folio;
3465 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3466
3467 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3468 &node_states[N_MEMORY], NULL);
3469 if (!folio)
3470 break;
3471 free_huge_folio(folio); /* free it into the hugepage allocator */
3472 }
3473 cond_resched();
3474 }
3475 if (i == h->max_huge_pages_node[nid])
3476 return;
3477
3478 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3479 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3480 h->max_huge_pages_node[nid], buf, nid, i);
3481 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3482 h->max_huge_pages_node[nid] = i;
3483}
3484
3485/*
3486 * NOTE: this routine is called in different contexts for gigantic and
3487 * non-gigantic pages.
3488 * - For gigantic pages, this is called early in the boot process and
3489 * pages are allocated from memblock allocated or something similar.
3490 * Gigantic pages are actually added to pools later with the routine
3491 * gather_bootmem_prealloc.
3492 * - For non-gigantic pages, this is called later in the boot process after
3493 * all of mm is up and functional. Pages are allocated from buddy and
3494 * then added to hugetlb pools.
3495 */
3496static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3497{
3498 unsigned long i;
3499 struct folio *folio;
3500 LIST_HEAD(folio_list);
3501 nodemask_t *node_alloc_noretry;
3502 bool node_specific_alloc = false;
3503
3504 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3505 if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3506 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3507 return;
3508 }
3509
3510 /* do node specific alloc */
3511 for_each_online_node(i) {
3512 if (h->max_huge_pages_node[i] > 0) {
3513 hugetlb_hstate_alloc_pages_onenode(h, i);
3514 node_specific_alloc = true;
3515 }
3516 }
3517
3518 if (node_specific_alloc)
3519 return;
3520
3521 /* below will do all node balanced alloc */
3522 if (!hstate_is_gigantic(h)) {
3523 /*
3524 * Bit mask controlling how hard we retry per-node allocations.
3525 * Ignore errors as lower level routines can deal with
3526 * node_alloc_noretry == NULL. If this kmalloc fails at boot
3527 * time, we are likely in bigger trouble.
3528 */
3529 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3530 GFP_KERNEL);
3531 } else {
3532 /* allocations done at boot time */
3533 node_alloc_noretry = NULL;
3534 }
3535
3536 /* bit mask controlling how hard we retry per-node allocations */
3537 if (node_alloc_noretry)
3538 nodes_clear(*node_alloc_noretry);
3539
3540 for (i = 0; i < h->max_huge_pages; ++i) {
3541 if (hstate_is_gigantic(h)) {
3542 /*
3543 * gigantic pages not added to list as they are not
3544 * added to pools now.
3545 */
3546 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3547 break;
3548 } else {
3549 folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3550 node_alloc_noretry);
3551 if (!folio)
3552 break;
3553 list_add(&folio->lru, &folio_list);
3554 }
3555 cond_resched();
3556 }
3557
3558 /* list will be empty if hstate_is_gigantic */
3559 prep_and_add_allocated_folios(h, &folio_list);
3560
3561 if (i < h->max_huge_pages) {
3562 char buf[32];
3563
3564 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3565 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3566 h->max_huge_pages, buf, i);
3567 h->max_huge_pages = i;
3568 }
3569 kfree(node_alloc_noretry);
3570}
3571
3572static void __init hugetlb_init_hstates(void)
3573{
3574 struct hstate *h, *h2;
3575
3576 for_each_hstate(h) {
3577 /* oversize hugepages were init'ed in early boot */
3578 if (!hstate_is_gigantic(h))
3579 hugetlb_hstate_alloc_pages(h);
3580
3581 /*
3582 * Set demote order for each hstate. Note that
3583 * h->demote_order is initially 0.
3584 * - We can not demote gigantic pages if runtime freeing
3585 * is not supported, so skip this.
3586 * - If CMA allocation is possible, we can not demote
3587 * HUGETLB_PAGE_ORDER or smaller size pages.
3588 */
3589 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3590 continue;
3591 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3592 continue;
3593 for_each_hstate(h2) {
3594 if (h2 == h)
3595 continue;
3596 if (h2->order < h->order &&
3597 h2->order > h->demote_order)
3598 h->demote_order = h2->order;
3599 }
3600 }
3601}
3602
3603static void __init report_hugepages(void)
3604{
3605 struct hstate *h;
3606
3607 for_each_hstate(h) {
3608 char buf[32];
3609
3610 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3611 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3612 buf, h->free_huge_pages);
3613 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3614 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3615 }
3616}
3617
3618#ifdef CONFIG_HIGHMEM
3619static void try_to_free_low(struct hstate *h, unsigned long count,
3620 nodemask_t *nodes_allowed)
3621{
3622 int i;
3623 LIST_HEAD(page_list);
3624
3625 lockdep_assert_held(&hugetlb_lock);
3626 if (hstate_is_gigantic(h))
3627 return;
3628
3629 /*
3630 * Collect pages to be freed on a list, and free after dropping lock
3631 */
3632 for_each_node_mask(i, *nodes_allowed) {
3633 struct folio *folio, *next;
3634 struct list_head *freel = &h->hugepage_freelists[i];
3635 list_for_each_entry_safe(folio, next, freel, lru) {
3636 if (count >= h->nr_huge_pages)
3637 goto out;
3638 if (folio_test_highmem(folio))
3639 continue;
3640 remove_hugetlb_folio(h, folio, false);
3641 list_add(&folio->lru, &page_list);
3642 }
3643 }
3644
3645out:
3646 spin_unlock_irq(&hugetlb_lock);
3647 update_and_free_pages_bulk(h, &page_list);
3648 spin_lock_irq(&hugetlb_lock);
3649}
3650#else
3651static inline void try_to_free_low(struct hstate *h, unsigned long count,
3652 nodemask_t *nodes_allowed)
3653{
3654}
3655#endif
3656
3657/*
3658 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3659 * balanced by operating on them in a round-robin fashion.
3660 * Returns 1 if an adjustment was made.
3661 */
3662static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3663 int delta)
3664{
3665 int nr_nodes, node;
3666
3667 lockdep_assert_held(&hugetlb_lock);
3668 VM_BUG_ON(delta != -1 && delta != 1);
3669
3670 if (delta < 0) {
3671 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3672 if (h->surplus_huge_pages_node[node])
3673 goto found;
3674 }
3675 } else {
3676 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3677 if (h->surplus_huge_pages_node[node] <
3678 h->nr_huge_pages_node[node])
3679 goto found;
3680 }
3681 }
3682 return 0;
3683
3684found:
3685 h->surplus_huge_pages += delta;
3686 h->surplus_huge_pages_node[node] += delta;
3687 return 1;
3688}
3689
3690#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3691static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3692 nodemask_t *nodes_allowed)
3693{
3694 unsigned long min_count;
3695 unsigned long allocated;
3696 struct folio *folio;
3697 LIST_HEAD(page_list);
3698 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3699
3700 /*
3701 * Bit mask controlling how hard we retry per-node allocations.
3702 * If we can not allocate the bit mask, do not attempt to allocate
3703 * the requested huge pages.
3704 */
3705 if (node_alloc_noretry)
3706 nodes_clear(*node_alloc_noretry);
3707 else
3708 return -ENOMEM;
3709
3710 /*
3711 * resize_lock mutex prevents concurrent adjustments to number of
3712 * pages in hstate via the proc/sysfs interfaces.
3713 */
3714 mutex_lock(&h->resize_lock);
3715 flush_free_hpage_work(h);
3716 spin_lock_irq(&hugetlb_lock);
3717
3718 /*
3719 * Check for a node specific request.
3720 * Changing node specific huge page count may require a corresponding
3721 * change to the global count. In any case, the passed node mask
3722 * (nodes_allowed) will restrict alloc/free to the specified node.
3723 */
3724 if (nid != NUMA_NO_NODE) {
3725 unsigned long old_count = count;
3726
3727 count += persistent_huge_pages(h) -
3728 (h->nr_huge_pages_node[nid] -
3729 h->surplus_huge_pages_node[nid]);
3730 /*
3731 * User may have specified a large count value which caused the
3732 * above calculation to overflow. In this case, they wanted
3733 * to allocate as many huge pages as possible. Set count to
3734 * largest possible value to align with their intention.
3735 */
3736 if (count < old_count)
3737 count = ULONG_MAX;
3738 }
3739
3740 /*
3741 * Gigantic pages runtime allocation depend on the capability for large
3742 * page range allocation.
3743 * If the system does not provide this feature, return an error when
3744 * the user tries to allocate gigantic pages but let the user free the
3745 * boottime allocated gigantic pages.
3746 */
3747 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3748 if (count > persistent_huge_pages(h)) {
3749 spin_unlock_irq(&hugetlb_lock);
3750 mutex_unlock(&h->resize_lock);
3751 NODEMASK_FREE(node_alloc_noretry);
3752 return -EINVAL;
3753 }
3754 /* Fall through to decrease pool */
3755 }
3756
3757 /*
3758 * Increase the pool size
3759 * First take pages out of surplus state. Then make up the
3760 * remaining difference by allocating fresh huge pages.
3761 *
3762 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3763 * to convert a surplus huge page to a normal huge page. That is
3764 * not critical, though, it just means the overall size of the
3765 * pool might be one hugepage larger than it needs to be, but
3766 * within all the constraints specified by the sysctls.
3767 */
3768 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3769 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3770 break;
3771 }
3772
3773 allocated = 0;
3774 while (count > (persistent_huge_pages(h) + allocated)) {
3775 /*
3776 * If this allocation races such that we no longer need the
3777 * page, free_huge_folio will handle it by freeing the page
3778 * and reducing the surplus.
3779 */
3780 spin_unlock_irq(&hugetlb_lock);
3781
3782 /* yield cpu to avoid soft lockup */
3783 cond_resched();
3784
3785 folio = alloc_pool_huge_folio(h, nodes_allowed,
3786 node_alloc_noretry);
3787 if (!folio) {
3788 prep_and_add_allocated_folios(h, &page_list);
3789 spin_lock_irq(&hugetlb_lock);
3790 goto out;
3791 }
3792
3793 list_add(&folio->lru, &page_list);
3794 allocated++;
3795
3796 /* Bail for signals. Probably ctrl-c from user */
3797 if (signal_pending(current)) {
3798 prep_and_add_allocated_folios(h, &page_list);
3799 spin_lock_irq(&hugetlb_lock);
3800 goto out;
3801 }
3802
3803 spin_lock_irq(&hugetlb_lock);
3804 }
3805
3806 /* Add allocated pages to the pool */
3807 if (!list_empty(&page_list)) {
3808 spin_unlock_irq(&hugetlb_lock);
3809 prep_and_add_allocated_folios(h, &page_list);
3810 spin_lock_irq(&hugetlb_lock);
3811 }
3812
3813 /*
3814 * Decrease the pool size
3815 * First return free pages to the buddy allocator (being careful
3816 * to keep enough around to satisfy reservations). Then place
3817 * pages into surplus state as needed so the pool will shrink
3818 * to the desired size as pages become free.
3819 *
3820 * By placing pages into the surplus state independent of the
3821 * overcommit value, we are allowing the surplus pool size to
3822 * exceed overcommit. There are few sane options here. Since
3823 * alloc_surplus_hugetlb_folio() is checking the global counter,
3824 * though, we'll note that we're not allowed to exceed surplus
3825 * and won't grow the pool anywhere else. Not until one of the
3826 * sysctls are changed, or the surplus pages go out of use.
3827 */
3828 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3829 min_count = max(count, min_count);
3830 try_to_free_low(h, min_count, nodes_allowed);
3831
3832 /*
3833 * Collect pages to be removed on list without dropping lock
3834 */
3835 while (min_count < persistent_huge_pages(h)) {
3836 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3837 if (!folio)
3838 break;
3839
3840 list_add(&folio->lru, &page_list);
3841 }
3842 /* free the pages after dropping lock */
3843 spin_unlock_irq(&hugetlb_lock);
3844 update_and_free_pages_bulk(h, &page_list);
3845 flush_free_hpage_work(h);
3846 spin_lock_irq(&hugetlb_lock);
3847
3848 while (count < persistent_huge_pages(h)) {
3849 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3850 break;
3851 }
3852out:
3853 h->max_huge_pages = persistent_huge_pages(h);
3854 spin_unlock_irq(&hugetlb_lock);
3855 mutex_unlock(&h->resize_lock);
3856
3857 NODEMASK_FREE(node_alloc_noretry);
3858
3859 return 0;
3860}
3861
3862static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
3863{
3864 int i, nid = folio_nid(folio);
3865 struct hstate *target_hstate;
3866 struct page *subpage;
3867 struct folio *inner_folio;
3868 int rc = 0;
3869
3870 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3871
3872 remove_hugetlb_folio_for_demote(h, folio, false);
3873 spin_unlock_irq(&hugetlb_lock);
3874
3875 /*
3876 * If vmemmap already existed for folio, the remove routine above would
3877 * have cleared the hugetlb folio flag. Hence the folio is technically
3878 * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
3879 * passed hugetlb folios and will BUG otherwise.
3880 */
3881 if (folio_test_hugetlb(folio)) {
3882 rc = hugetlb_vmemmap_restore_folio(h, folio);
3883 if (rc) {
3884 /* Allocation of vmemmmap failed, we can not demote folio */
3885 spin_lock_irq(&hugetlb_lock);
3886 folio_ref_unfreeze(folio, 1);
3887 add_hugetlb_folio(h, folio, false);
3888 return rc;
3889 }
3890 }
3891
3892 /*
3893 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3894 * sizes as it will not ref count folios.
3895 */
3896 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3897
3898 /*
3899 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3900 * Without the mutex, pages added to target hstate could be marked
3901 * as surplus.
3902 *
3903 * Note that we already hold h->resize_lock. To prevent deadlock,
3904 * use the convention of always taking larger size hstate mutex first.
3905 */
3906 mutex_lock(&target_hstate->resize_lock);
3907 for (i = 0; i < pages_per_huge_page(h);
3908 i += pages_per_huge_page(target_hstate)) {
3909 subpage = folio_page(folio, i);
3910 inner_folio = page_folio(subpage);
3911 if (hstate_is_gigantic(target_hstate))
3912 prep_compound_gigantic_folio_for_demote(inner_folio,
3913 target_hstate->order);
3914 else
3915 prep_compound_page(subpage, target_hstate->order);
3916 folio_change_private(inner_folio, NULL);
3917 prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
3918 free_huge_folio(inner_folio);
3919 }
3920 mutex_unlock(&target_hstate->resize_lock);
3921
3922 spin_lock_irq(&hugetlb_lock);
3923
3924 /*
3925 * Not absolutely necessary, but for consistency update max_huge_pages
3926 * based on pool changes for the demoted page.
3927 */
3928 h->max_huge_pages--;
3929 target_hstate->max_huge_pages +=
3930 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3931
3932 return rc;
3933}
3934
3935static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3936 __must_hold(&hugetlb_lock)
3937{
3938 int nr_nodes, node;
3939 struct folio *folio;
3940
3941 lockdep_assert_held(&hugetlb_lock);
3942
3943 /* We should never get here if no demote order */
3944 if (!h->demote_order) {
3945 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3946 return -EINVAL; /* internal error */
3947 }
3948
3949 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3950 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3951 if (folio_test_hwpoison(folio))
3952 continue;
3953 return demote_free_hugetlb_folio(h, folio);
3954 }
3955 }
3956
3957 /*
3958 * Only way to get here is if all pages on free lists are poisoned.
3959 * Return -EBUSY so that caller will not retry.
3960 */
3961 return -EBUSY;
3962}
3963
3964#define HSTATE_ATTR_RO(_name) \
3965 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3966
3967#define HSTATE_ATTR_WO(_name) \
3968 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3969
3970#define HSTATE_ATTR(_name) \
3971 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3972
3973static struct kobject *hugepages_kobj;
3974static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3975
3976static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3977
3978static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3979{
3980 int i;
3981
3982 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3983 if (hstate_kobjs[i] == kobj) {
3984 if (nidp)
3985 *nidp = NUMA_NO_NODE;
3986 return &hstates[i];
3987 }
3988
3989 return kobj_to_node_hstate(kobj, nidp);
3990}
3991
3992static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3993 struct kobj_attribute *attr, char *buf)
3994{
3995 struct hstate *h;
3996 unsigned long nr_huge_pages;
3997 int nid;
3998
3999 h = kobj_to_hstate(kobj, &nid);
4000 if (nid == NUMA_NO_NODE)
4001 nr_huge_pages = h->nr_huge_pages;
4002 else
4003 nr_huge_pages = h->nr_huge_pages_node[nid];
4004
4005 return sysfs_emit(buf, "%lu\n", nr_huge_pages);
4006}
4007
4008static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4009 struct hstate *h, int nid,
4010 unsigned long count, size_t len)
4011{
4012 int err;
4013 nodemask_t nodes_allowed, *n_mask;
4014
4015 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
4016 return -EINVAL;
4017
4018 if (nid == NUMA_NO_NODE) {
4019 /*
4020 * global hstate attribute
4021 */
4022 if (!(obey_mempolicy &&
4023 init_nodemask_of_mempolicy(&nodes_allowed)))
4024 n_mask = &node_states[N_MEMORY];
4025 else
4026 n_mask = &nodes_allowed;
4027 } else {
4028 /*
4029 * Node specific request. count adjustment happens in
4030 * set_max_huge_pages() after acquiring hugetlb_lock.
4031 */
4032 init_nodemask_of_node(&nodes_allowed, nid);
4033 n_mask = &nodes_allowed;
4034 }
4035
4036 err = set_max_huge_pages(h, count, nid, n_mask);
4037
4038 return err ? err : len;
4039}
4040
4041static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
4042 struct kobject *kobj, const char *buf,
4043 size_t len)
4044{
4045 struct hstate *h;
4046 unsigned long count;
4047 int nid;
4048 int err;
4049
4050 err = kstrtoul(buf, 10, &count);
4051 if (err)
4052 return err;
4053
4054 h = kobj_to_hstate(kobj, &nid);
4055 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
4056}
4057
4058static ssize_t nr_hugepages_show(struct kobject *kobj,
4059 struct kobj_attribute *attr, char *buf)
4060{
4061 return nr_hugepages_show_common(kobj, attr, buf);
4062}
4063
4064static ssize_t nr_hugepages_store(struct kobject *kobj,
4065 struct kobj_attribute *attr, const char *buf, size_t len)
4066{
4067 return nr_hugepages_store_common(false, kobj, buf, len);
4068}
4069HSTATE_ATTR(nr_hugepages);
4070
4071#ifdef CONFIG_NUMA
4072
4073/*
4074 * hstate attribute for optionally mempolicy-based constraint on persistent
4075 * huge page alloc/free.
4076 */
4077static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
4078 struct kobj_attribute *attr,
4079 char *buf)
4080{
4081 return nr_hugepages_show_common(kobj, attr, buf);
4082}
4083
4084static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
4085 struct kobj_attribute *attr, const char *buf, size_t len)
4086{
4087 return nr_hugepages_store_common(true, kobj, buf, len);
4088}
4089HSTATE_ATTR(nr_hugepages_mempolicy);
4090#endif
4091
4092
4093static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
4094 struct kobj_attribute *attr, char *buf)
4095{
4096 struct hstate *h = kobj_to_hstate(kobj, NULL);
4097 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
4098}
4099
4100static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
4101 struct kobj_attribute *attr, const char *buf, size_t count)
4102{
4103 int err;
4104 unsigned long input;
4105 struct hstate *h = kobj_to_hstate(kobj, NULL);
4106
4107 if (hstate_is_gigantic(h))
4108 return -EINVAL;
4109
4110 err = kstrtoul(buf, 10, &input);
4111 if (err)
4112 return err;
4113
4114 spin_lock_irq(&hugetlb_lock);
4115 h->nr_overcommit_huge_pages = input;
4116 spin_unlock_irq(&hugetlb_lock);
4117
4118 return count;
4119}
4120HSTATE_ATTR(nr_overcommit_hugepages);
4121
4122static ssize_t free_hugepages_show(struct kobject *kobj,
4123 struct kobj_attribute *attr, char *buf)
4124{
4125 struct hstate *h;
4126 unsigned long free_huge_pages;
4127 int nid;
4128
4129 h = kobj_to_hstate(kobj, &nid);
4130 if (nid == NUMA_NO_NODE)
4131 free_huge_pages = h->free_huge_pages;
4132 else
4133 free_huge_pages = h->free_huge_pages_node[nid];
4134
4135 return sysfs_emit(buf, "%lu\n", free_huge_pages);
4136}
4137HSTATE_ATTR_RO(free_hugepages);
4138
4139static ssize_t resv_hugepages_show(struct kobject *kobj,
4140 struct kobj_attribute *attr, char *buf)
4141{
4142 struct hstate *h = kobj_to_hstate(kobj, NULL);
4143 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
4144}
4145HSTATE_ATTR_RO(resv_hugepages);
4146
4147static ssize_t surplus_hugepages_show(struct kobject *kobj,
4148 struct kobj_attribute *attr, char *buf)
4149{
4150 struct hstate *h;
4151 unsigned long surplus_huge_pages;
4152 int nid;
4153
4154 h = kobj_to_hstate(kobj, &nid);
4155 if (nid == NUMA_NO_NODE)
4156 surplus_huge_pages = h->surplus_huge_pages;
4157 else
4158 surplus_huge_pages = h->surplus_huge_pages_node[nid];
4159
4160 return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
4161}
4162HSTATE_ATTR_RO(surplus_hugepages);
4163
4164static ssize_t demote_store(struct kobject *kobj,
4165 struct kobj_attribute *attr, const char *buf, size_t len)
4166{
4167 unsigned long nr_demote;
4168 unsigned long nr_available;
4169 nodemask_t nodes_allowed, *n_mask;
4170 struct hstate *h;
4171 int err;
4172 int nid;
4173
4174 err = kstrtoul(buf, 10, &nr_demote);
4175 if (err)
4176 return err;
4177 h = kobj_to_hstate(kobj, &nid);
4178
4179 if (nid != NUMA_NO_NODE) {
4180 init_nodemask_of_node(&nodes_allowed, nid);
4181 n_mask = &nodes_allowed;
4182 } else {
4183 n_mask = &node_states[N_MEMORY];
4184 }
4185
4186 /* Synchronize with other sysfs operations modifying huge pages */
4187 mutex_lock(&h->resize_lock);
4188 spin_lock_irq(&hugetlb_lock);
4189
4190 while (nr_demote) {
4191 /*
4192 * Check for available pages to demote each time thorough the
4193 * loop as demote_pool_huge_page will drop hugetlb_lock.
4194 */
4195 if (nid != NUMA_NO_NODE)
4196 nr_available = h->free_huge_pages_node[nid];
4197 else
4198 nr_available = h->free_huge_pages;
4199 nr_available -= h->resv_huge_pages;
4200 if (!nr_available)
4201 break;
4202
4203 err = demote_pool_huge_page(h, n_mask);
4204 if (err)
4205 break;
4206
4207 nr_demote--;
4208 }
4209
4210 spin_unlock_irq(&hugetlb_lock);
4211 mutex_unlock(&h->resize_lock);
4212
4213 if (err)
4214 return err;
4215 return len;
4216}
4217HSTATE_ATTR_WO(demote);
4218
4219static ssize_t demote_size_show(struct kobject *kobj,
4220 struct kobj_attribute *attr, char *buf)
4221{
4222 struct hstate *h = kobj_to_hstate(kobj, NULL);
4223 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
4224
4225 return sysfs_emit(buf, "%lukB\n", demote_size);
4226}
4227
4228static ssize_t demote_size_store(struct kobject *kobj,
4229 struct kobj_attribute *attr,
4230 const char *buf, size_t count)
4231{
4232 struct hstate *h, *demote_hstate;
4233 unsigned long demote_size;
4234 unsigned int demote_order;
4235
4236 demote_size = (unsigned long)memparse(buf, NULL);
4237
4238 demote_hstate = size_to_hstate(demote_size);
4239 if (!demote_hstate)
4240 return -EINVAL;
4241 demote_order = demote_hstate->order;
4242 if (demote_order < HUGETLB_PAGE_ORDER)
4243 return -EINVAL;
4244
4245 /* demote order must be smaller than hstate order */
4246 h = kobj_to_hstate(kobj, NULL);
4247 if (demote_order >= h->order)
4248 return -EINVAL;
4249
4250 /* resize_lock synchronizes access to demote size and writes */
4251 mutex_lock(&h->resize_lock);
4252 h->demote_order = demote_order;
4253 mutex_unlock(&h->resize_lock);
4254
4255 return count;
4256}
4257HSTATE_ATTR(demote_size);
4258
4259static struct attribute *hstate_attrs[] = {
4260 &nr_hugepages_attr.attr,
4261 &nr_overcommit_hugepages_attr.attr,
4262 &free_hugepages_attr.attr,
4263 &resv_hugepages_attr.attr,
4264 &surplus_hugepages_attr.attr,
4265#ifdef CONFIG_NUMA
4266 &nr_hugepages_mempolicy_attr.attr,
4267#endif
4268 NULL,
4269};
4270
4271static const struct attribute_group hstate_attr_group = {
4272 .attrs = hstate_attrs,
4273};
4274
4275static struct attribute *hstate_demote_attrs[] = {
4276 &demote_size_attr.attr,
4277 &demote_attr.attr,
4278 NULL,
4279};
4280
4281static const struct attribute_group hstate_demote_attr_group = {
4282 .attrs = hstate_demote_attrs,
4283};
4284
4285static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4286 struct kobject **hstate_kobjs,
4287 const struct attribute_group *hstate_attr_group)
4288{
4289 int retval;
4290 int hi = hstate_index(h);
4291
4292 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4293 if (!hstate_kobjs[hi])
4294 return -ENOMEM;
4295
4296 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
4297 if (retval) {
4298 kobject_put(hstate_kobjs[hi]);
4299 hstate_kobjs[hi] = NULL;
4300 return retval;
4301 }
4302
4303 if (h->demote_order) {
4304 retval = sysfs_create_group(hstate_kobjs[hi],
4305 &hstate_demote_attr_group);
4306 if (retval) {
4307 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
4308 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4309 kobject_put(hstate_kobjs[hi]);
4310 hstate_kobjs[hi] = NULL;
4311 return retval;
4312 }
4313 }
4314
4315 return 0;
4316}
4317
4318#ifdef CONFIG_NUMA
4319static bool hugetlb_sysfs_initialized __ro_after_init;
4320
4321/*
4322 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4323 * with node devices in node_devices[] using a parallel array. The array
4324 * index of a node device or _hstate == node id.
4325 * This is here to avoid any static dependency of the node device driver, in
4326 * the base kernel, on the hugetlb module.
4327 */
4328struct node_hstate {
4329 struct kobject *hugepages_kobj;
4330 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4331};
4332static struct node_hstate node_hstates[MAX_NUMNODES];
4333
4334/*
4335 * A subset of global hstate attributes for node devices
4336 */
4337static struct attribute *per_node_hstate_attrs[] = {
4338 &nr_hugepages_attr.attr,
4339 &free_hugepages_attr.attr,
4340 &surplus_hugepages_attr.attr,
4341 NULL,
4342};
4343
4344static const struct attribute_group per_node_hstate_attr_group = {
4345 .attrs = per_node_hstate_attrs,
4346};
4347
4348/*
4349 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4350 * Returns node id via non-NULL nidp.
4351 */
4352static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4353{
4354 int nid;
4355
4356 for (nid = 0; nid < nr_node_ids; nid++) {
4357 struct node_hstate *nhs = &node_hstates[nid];
4358 int i;
4359 for (i = 0; i < HUGE_MAX_HSTATE; i++)
4360 if (nhs->hstate_kobjs[i] == kobj) {
4361 if (nidp)
4362 *nidp = nid;
4363 return &hstates[i];
4364 }
4365 }
4366
4367 BUG();
4368 return NULL;
4369}
4370
4371/*
4372 * Unregister hstate attributes from a single node device.
4373 * No-op if no hstate attributes attached.
4374 */
4375void hugetlb_unregister_node(struct node *node)
4376{
4377 struct hstate *h;
4378 struct node_hstate *nhs = &node_hstates[node->dev.id];
4379
4380 if (!nhs->hugepages_kobj)
4381 return; /* no hstate attributes */
4382
4383 for_each_hstate(h) {
4384 int idx = hstate_index(h);
4385 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4386
4387 if (!hstate_kobj)
4388 continue;
4389 if (h->demote_order)
4390 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4391 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4392 kobject_put(hstate_kobj);
4393 nhs->hstate_kobjs[idx] = NULL;
4394 }
4395
4396 kobject_put(nhs->hugepages_kobj);
4397 nhs->hugepages_kobj = NULL;
4398}
4399
4400
4401/*
4402 * Register hstate attributes for a single node device.
4403 * No-op if attributes already registered.
4404 */
4405void hugetlb_register_node(struct node *node)
4406{
4407 struct hstate *h;
4408 struct node_hstate *nhs = &node_hstates[node->dev.id];
4409 int err;
4410
4411 if (!hugetlb_sysfs_initialized)
4412 return;
4413
4414 if (nhs->hugepages_kobj)
4415 return; /* already allocated */
4416
4417 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
4418 &node->dev.kobj);
4419 if (!nhs->hugepages_kobj)
4420 return;
4421
4422 for_each_hstate(h) {
4423 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4424 nhs->hstate_kobjs,
4425 &per_node_hstate_attr_group);
4426 if (err) {
4427 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4428 h->name, node->dev.id);
4429 hugetlb_unregister_node(node);
4430 break;
4431 }
4432 }
4433}
4434
4435/*
4436 * hugetlb init time: register hstate attributes for all registered node
4437 * devices of nodes that have memory. All on-line nodes should have
4438 * registered their associated device by this time.
4439 */
4440static void __init hugetlb_register_all_nodes(void)
4441{
4442 int nid;
4443
4444 for_each_online_node(nid)
4445 hugetlb_register_node(node_devices[nid]);
4446}
4447#else /* !CONFIG_NUMA */
4448
4449static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4450{
4451 BUG();
4452 if (nidp)
4453 *nidp = -1;
4454 return NULL;
4455}
4456
4457static void hugetlb_register_all_nodes(void) { }
4458
4459#endif
4460
4461#ifdef CONFIG_CMA
4462static void __init hugetlb_cma_check(void);
4463#else
4464static inline __init void hugetlb_cma_check(void)
4465{
4466}
4467#endif
4468
4469static void __init hugetlb_sysfs_init(void)
4470{
4471 struct hstate *h;
4472 int err;
4473
4474 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4475 if (!hugepages_kobj)
4476 return;
4477
4478 for_each_hstate(h) {
4479 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4480 hstate_kobjs, &hstate_attr_group);
4481 if (err)
4482 pr_err("HugeTLB: Unable to add hstate %s", h->name);
4483 }
4484
4485#ifdef CONFIG_NUMA
4486 hugetlb_sysfs_initialized = true;
4487#endif
4488 hugetlb_register_all_nodes();
4489}
4490
4491#ifdef CONFIG_SYSCTL
4492static void hugetlb_sysctl_init(void);
4493#else
4494static inline void hugetlb_sysctl_init(void) { }
4495#endif
4496
4497static int __init hugetlb_init(void)
4498{
4499 int i;
4500
4501 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4502 __NR_HPAGEFLAGS);
4503
4504 if (!hugepages_supported()) {
4505 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4506 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4507 return 0;
4508 }
4509
4510 /*
4511 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4512 * architectures depend on setup being done here.
4513 */
4514 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4515 if (!parsed_default_hugepagesz) {
4516 /*
4517 * If we did not parse a default huge page size, set
4518 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4519 * number of huge pages for this default size was implicitly
4520 * specified, set that here as well.
4521 * Note that the implicit setting will overwrite an explicit
4522 * setting. A warning will be printed in this case.
4523 */
4524 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4525 if (default_hstate_max_huge_pages) {
4526 if (default_hstate.max_huge_pages) {
4527 char buf[32];
4528
4529 string_get_size(huge_page_size(&default_hstate),
4530 1, STRING_UNITS_2, buf, 32);
4531 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4532 default_hstate.max_huge_pages, buf);
4533 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4534 default_hstate_max_huge_pages);
4535 }
4536 default_hstate.max_huge_pages =
4537 default_hstate_max_huge_pages;
4538
4539 for_each_online_node(i)
4540 default_hstate.max_huge_pages_node[i] =
4541 default_hugepages_in_node[i];
4542 }
4543 }
4544
4545 hugetlb_cma_check();
4546 hugetlb_init_hstates();
4547 gather_bootmem_prealloc();
4548 report_hugepages();
4549
4550 hugetlb_sysfs_init();
4551 hugetlb_cgroup_file_init();
4552 hugetlb_sysctl_init();
4553
4554#ifdef CONFIG_SMP
4555 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4556#else
4557 num_fault_mutexes = 1;
4558#endif
4559 hugetlb_fault_mutex_table =
4560 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4561 GFP_KERNEL);
4562 BUG_ON(!hugetlb_fault_mutex_table);
4563
4564 for (i = 0; i < num_fault_mutexes; i++)
4565 mutex_init(&hugetlb_fault_mutex_table[i]);
4566 return 0;
4567}
4568subsys_initcall(hugetlb_init);
4569
4570/* Overwritten by architectures with more huge page sizes */
4571bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4572{
4573 return size == HPAGE_SIZE;
4574}
4575
4576void __init hugetlb_add_hstate(unsigned int order)
4577{
4578 struct hstate *h;
4579 unsigned long i;
4580
4581 if (size_to_hstate(PAGE_SIZE << order)) {
4582 return;
4583 }
4584 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4585 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4586 h = &hstates[hugetlb_max_hstate++];
4587 mutex_init(&h->resize_lock);
4588 h->order = order;
4589 h->mask = ~(huge_page_size(h) - 1);
4590 for (i = 0; i < MAX_NUMNODES; ++i)
4591 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4592 INIT_LIST_HEAD(&h->hugepage_activelist);
4593 h->next_nid_to_alloc = first_memory_node;
4594 h->next_nid_to_free = first_memory_node;
4595 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4596 huge_page_size(h)/SZ_1K);
4597
4598 parsed_hstate = h;
4599}
4600
4601bool __init __weak hugetlb_node_alloc_supported(void)
4602{
4603 return true;
4604}
4605
4606static void __init hugepages_clear_pages_in_node(void)
4607{
4608 if (!hugetlb_max_hstate) {
4609 default_hstate_max_huge_pages = 0;
4610 memset(default_hugepages_in_node, 0,
4611 sizeof(default_hugepages_in_node));
4612 } else {
4613 parsed_hstate->max_huge_pages = 0;
4614 memset(parsed_hstate->max_huge_pages_node, 0,
4615 sizeof(parsed_hstate->max_huge_pages_node));
4616 }
4617}
4618
4619/*
4620 * hugepages command line processing
4621 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4622 * specification. If not, ignore the hugepages value. hugepages can also
4623 * be the first huge page command line option in which case it implicitly
4624 * specifies the number of huge pages for the default size.
4625 */
4626static int __init hugepages_setup(char *s)
4627{
4628 unsigned long *mhp;
4629 static unsigned long *last_mhp;
4630 int node = NUMA_NO_NODE;
4631 int count;
4632 unsigned long tmp;
4633 char *p = s;
4634
4635 if (!parsed_valid_hugepagesz) {
4636 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4637 parsed_valid_hugepagesz = true;
4638 return 1;
4639 }
4640
4641 /*
4642 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4643 * yet, so this hugepages= parameter goes to the "default hstate".
4644 * Otherwise, it goes with the previously parsed hugepagesz or
4645 * default_hugepagesz.
4646 */
4647 else if (!hugetlb_max_hstate)
4648 mhp = &default_hstate_max_huge_pages;
4649 else
4650 mhp = &parsed_hstate->max_huge_pages;
4651
4652 if (mhp == last_mhp) {
4653 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4654 return 1;
4655 }
4656
4657 while (*p) {
4658 count = 0;
4659 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4660 goto invalid;
4661 /* Parameter is node format */
4662 if (p[count] == ':') {
4663 if (!hugetlb_node_alloc_supported()) {
4664 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4665 return 1;
4666 }
4667 if (tmp >= MAX_NUMNODES || !node_online(tmp))
4668 goto invalid;
4669 node = array_index_nospec(tmp, MAX_NUMNODES);
4670 p += count + 1;
4671 /* Parse hugepages */
4672 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4673 goto invalid;
4674 if (!hugetlb_max_hstate)
4675 default_hugepages_in_node[node] = tmp;
4676 else
4677 parsed_hstate->max_huge_pages_node[node] = tmp;
4678 *mhp += tmp;
4679 /* Go to parse next node*/
4680 if (p[count] == ',')
4681 p += count + 1;
4682 else
4683 break;
4684 } else {
4685 if (p != s)
4686 goto invalid;
4687 *mhp = tmp;
4688 break;
4689 }
4690 }
4691
4692 /*
4693 * Global state is always initialized later in hugetlb_init.
4694 * But we need to allocate gigantic hstates here early to still
4695 * use the bootmem allocator.
4696 */
4697 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4698 hugetlb_hstate_alloc_pages(parsed_hstate);
4699
4700 last_mhp = mhp;
4701
4702 return 1;
4703
4704invalid:
4705 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4706 hugepages_clear_pages_in_node();
4707 return 1;
4708}
4709__setup("hugepages=", hugepages_setup);
4710
4711/*
4712 * hugepagesz command line processing
4713 * A specific huge page size can only be specified once with hugepagesz.
4714 * hugepagesz is followed by hugepages on the command line. The global
4715 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4716 * hugepagesz argument was valid.
4717 */
4718static int __init hugepagesz_setup(char *s)
4719{
4720 unsigned long size;
4721 struct hstate *h;
4722
4723 parsed_valid_hugepagesz = false;
4724 size = (unsigned long)memparse(s, NULL);
4725
4726 if (!arch_hugetlb_valid_size(size)) {
4727 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4728 return 1;
4729 }
4730
4731 h = size_to_hstate(size);
4732 if (h) {
4733 /*
4734 * hstate for this size already exists. This is normally
4735 * an error, but is allowed if the existing hstate is the
4736 * default hstate. More specifically, it is only allowed if
4737 * the number of huge pages for the default hstate was not
4738 * previously specified.
4739 */
4740 if (!parsed_default_hugepagesz || h != &default_hstate ||
4741 default_hstate.max_huge_pages) {
4742 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4743 return 1;
4744 }
4745
4746 /*
4747 * No need to call hugetlb_add_hstate() as hstate already
4748 * exists. But, do set parsed_hstate so that a following
4749 * hugepages= parameter will be applied to this hstate.
4750 */
4751 parsed_hstate = h;
4752 parsed_valid_hugepagesz = true;
4753 return 1;
4754 }
4755
4756 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4757 parsed_valid_hugepagesz = true;
4758 return 1;
4759}
4760__setup("hugepagesz=", hugepagesz_setup);
4761
4762/*
4763 * default_hugepagesz command line input
4764 * Only one instance of default_hugepagesz allowed on command line.
4765 */
4766static int __init default_hugepagesz_setup(char *s)
4767{
4768 unsigned long size;
4769 int i;
4770
4771 parsed_valid_hugepagesz = false;
4772 if (parsed_default_hugepagesz) {
4773 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4774 return 1;
4775 }
4776
4777 size = (unsigned long)memparse(s, NULL);
4778
4779 if (!arch_hugetlb_valid_size(size)) {
4780 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4781 return 1;
4782 }
4783
4784 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4785 parsed_valid_hugepagesz = true;
4786 parsed_default_hugepagesz = true;
4787 default_hstate_idx = hstate_index(size_to_hstate(size));
4788
4789 /*
4790 * The number of default huge pages (for this size) could have been
4791 * specified as the first hugetlb parameter: hugepages=X. If so,
4792 * then default_hstate_max_huge_pages is set. If the default huge
4793 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4794 * allocated here from bootmem allocator.
4795 */
4796 if (default_hstate_max_huge_pages) {
4797 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4798 for_each_online_node(i)
4799 default_hstate.max_huge_pages_node[i] =
4800 default_hugepages_in_node[i];
4801 if (hstate_is_gigantic(&default_hstate))
4802 hugetlb_hstate_alloc_pages(&default_hstate);
4803 default_hstate_max_huge_pages = 0;
4804 }
4805
4806 return 1;
4807}
4808__setup("default_hugepagesz=", default_hugepagesz_setup);
4809
4810static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4811{
4812#ifdef CONFIG_NUMA
4813 struct mempolicy *mpol = get_task_policy(current);
4814
4815 /*
4816 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4817 * (from policy_nodemask) specifically for hugetlb case
4818 */
4819 if (mpol->mode == MPOL_BIND &&
4820 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4821 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4822 return &mpol->nodes;
4823#endif
4824 return NULL;
4825}
4826
4827static unsigned int allowed_mems_nr(struct hstate *h)
4828{
4829 int node;
4830 unsigned int nr = 0;
4831 nodemask_t *mbind_nodemask;
4832 unsigned int *array = h->free_huge_pages_node;
4833 gfp_t gfp_mask = htlb_alloc_mask(h);
4834
4835 mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4836 for_each_node_mask(node, cpuset_current_mems_allowed) {
4837 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4838 nr += array[node];
4839 }
4840
4841 return nr;
4842}
4843
4844#ifdef CONFIG_SYSCTL
4845static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4846 void *buffer, size_t *length,
4847 loff_t *ppos, unsigned long *out)
4848{
4849 struct ctl_table dup_table;
4850
4851 /*
4852 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4853 * can duplicate the @table and alter the duplicate of it.
4854 */
4855 dup_table = *table;
4856 dup_table.data = out;
4857
4858 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4859}
4860
4861static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4862 struct ctl_table *table, int write,
4863 void *buffer, size_t *length, loff_t *ppos)
4864{
4865 struct hstate *h = &default_hstate;
4866 unsigned long tmp = h->max_huge_pages;
4867 int ret;
4868
4869 if (!hugepages_supported())
4870 return -EOPNOTSUPP;
4871
4872 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4873 &tmp);
4874 if (ret)
4875 goto out;
4876
4877 if (write)
4878 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4879 NUMA_NO_NODE, tmp, *length);
4880out:
4881 return ret;
4882}
4883
4884static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4885 void *buffer, size_t *length, loff_t *ppos)
4886{
4887
4888 return hugetlb_sysctl_handler_common(false, table, write,
4889 buffer, length, ppos);
4890}
4891
4892#ifdef CONFIG_NUMA
4893static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4894 void *buffer, size_t *length, loff_t *ppos)
4895{
4896 return hugetlb_sysctl_handler_common(true, table, write,
4897 buffer, length, ppos);
4898}
4899#endif /* CONFIG_NUMA */
4900
4901static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4902 void *buffer, size_t *length, loff_t *ppos)
4903{
4904 struct hstate *h = &default_hstate;
4905 unsigned long tmp;
4906 int ret;
4907
4908 if (!hugepages_supported())
4909 return -EOPNOTSUPP;
4910
4911 tmp = h->nr_overcommit_huge_pages;
4912
4913 if (write && hstate_is_gigantic(h))
4914 return -EINVAL;
4915
4916 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4917 &tmp);
4918 if (ret)
4919 goto out;
4920
4921 if (write) {
4922 spin_lock_irq(&hugetlb_lock);
4923 h->nr_overcommit_huge_pages = tmp;
4924 spin_unlock_irq(&hugetlb_lock);
4925 }
4926out:
4927 return ret;
4928}
4929
4930static struct ctl_table hugetlb_table[] = {
4931 {
4932 .procname = "nr_hugepages",
4933 .data = NULL,
4934 .maxlen = sizeof(unsigned long),
4935 .mode = 0644,
4936 .proc_handler = hugetlb_sysctl_handler,
4937 },
4938#ifdef CONFIG_NUMA
4939 {
4940 .procname = "nr_hugepages_mempolicy",
4941 .data = NULL,
4942 .maxlen = sizeof(unsigned long),
4943 .mode = 0644,
4944 .proc_handler = &hugetlb_mempolicy_sysctl_handler,
4945 },
4946#endif
4947 {
4948 .procname = "hugetlb_shm_group",
4949 .data = &sysctl_hugetlb_shm_group,
4950 .maxlen = sizeof(gid_t),
4951 .mode = 0644,
4952 .proc_handler = proc_dointvec,
4953 },
4954 {
4955 .procname = "nr_overcommit_hugepages",
4956 .data = NULL,
4957 .maxlen = sizeof(unsigned long),
4958 .mode = 0644,
4959 .proc_handler = hugetlb_overcommit_handler,
4960 },
4961 { }
4962};
4963
4964static void hugetlb_sysctl_init(void)
4965{
4966 register_sysctl_init("vm", hugetlb_table);
4967}
4968#endif /* CONFIG_SYSCTL */
4969
4970void hugetlb_report_meminfo(struct seq_file *m)
4971{
4972 struct hstate *h;
4973 unsigned long total = 0;
4974
4975 if (!hugepages_supported())
4976 return;
4977
4978 for_each_hstate(h) {
4979 unsigned long count = h->nr_huge_pages;
4980
4981 total += huge_page_size(h) * count;
4982
4983 if (h == &default_hstate)
4984 seq_printf(m,
4985 "HugePages_Total: %5lu\n"
4986 "HugePages_Free: %5lu\n"
4987 "HugePages_Rsvd: %5lu\n"
4988 "HugePages_Surp: %5lu\n"
4989 "Hugepagesize: %8lu kB\n",
4990 count,
4991 h->free_huge_pages,
4992 h->resv_huge_pages,
4993 h->surplus_huge_pages,
4994 huge_page_size(h) / SZ_1K);
4995 }
4996
4997 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
4998}
4999
5000int hugetlb_report_node_meminfo(char *buf, int len, int nid)
5001{
5002 struct hstate *h = &default_hstate;
5003
5004 if (!hugepages_supported())
5005 return 0;
5006
5007 return sysfs_emit_at(buf, len,
5008 "Node %d HugePages_Total: %5u\n"
5009 "Node %d HugePages_Free: %5u\n"
5010 "Node %d HugePages_Surp: %5u\n",
5011 nid, h->nr_huge_pages_node[nid],
5012 nid, h->free_huge_pages_node[nid],
5013 nid, h->surplus_huge_pages_node[nid]);
5014}
5015
5016void hugetlb_show_meminfo_node(int nid)
5017{
5018 struct hstate *h;
5019
5020 if (!hugepages_supported())
5021 return;
5022
5023 for_each_hstate(h)
5024 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5025 nid,
5026 h->nr_huge_pages_node[nid],
5027 h->free_huge_pages_node[nid],
5028 h->surplus_huge_pages_node[nid],
5029 huge_page_size(h) / SZ_1K);
5030}
5031
5032void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
5033{
5034 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
5035 K(atomic_long_read(&mm->hugetlb_usage)));
5036}
5037
5038/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5039unsigned long hugetlb_total_pages(void)
5040{
5041 struct hstate *h;
5042 unsigned long nr_total_pages = 0;
5043
5044 for_each_hstate(h)
5045 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
5046 return nr_total_pages;
5047}
5048
5049static int hugetlb_acct_memory(struct hstate *h, long delta)
5050{
5051 int ret = -ENOMEM;
5052
5053 if (!delta)
5054 return 0;
5055
5056 spin_lock_irq(&hugetlb_lock);
5057 /*
5058 * When cpuset is configured, it breaks the strict hugetlb page
5059 * reservation as the accounting is done on a global variable. Such
5060 * reservation is completely rubbish in the presence of cpuset because
5061 * the reservation is not checked against page availability for the
5062 * current cpuset. Application can still potentially OOM'ed by kernel
5063 * with lack of free htlb page in cpuset that the task is in.
5064 * Attempt to enforce strict accounting with cpuset is almost
5065 * impossible (or too ugly) because cpuset is too fluid that
5066 * task or memory node can be dynamically moved between cpusets.
5067 *
5068 * The change of semantics for shared hugetlb mapping with cpuset is
5069 * undesirable. However, in order to preserve some of the semantics,
5070 * we fall back to check against current free page availability as
5071 * a best attempt and hopefully to minimize the impact of changing
5072 * semantics that cpuset has.
5073 *
5074 * Apart from cpuset, we also have memory policy mechanism that
5075 * also determines from which node the kernel will allocate memory
5076 * in a NUMA system. So similar to cpuset, we also should consider
5077 * the memory policy of the current task. Similar to the description
5078 * above.
5079 */
5080 if (delta > 0) {
5081 if (gather_surplus_pages(h, delta) < 0)
5082 goto out;
5083
5084 if (delta > allowed_mems_nr(h)) {
5085 return_unused_surplus_pages(h, delta);
5086 goto out;
5087 }
5088 }
5089
5090 ret = 0;
5091 if (delta < 0)
5092 return_unused_surplus_pages(h, (unsigned long) -delta);
5093
5094out:
5095 spin_unlock_irq(&hugetlb_lock);
5096 return ret;
5097}
5098
5099static void hugetlb_vm_op_open(struct vm_area_struct *vma)
5100{
5101 struct resv_map *resv = vma_resv_map(vma);
5102
5103 /*
5104 * HPAGE_RESV_OWNER indicates a private mapping.
5105 * This new VMA should share its siblings reservation map if present.
5106 * The VMA will only ever have a valid reservation map pointer where
5107 * it is being copied for another still existing VMA. As that VMA
5108 * has a reference to the reservation map it cannot disappear until
5109 * after this open call completes. It is therefore safe to take a
5110 * new reference here without additional locking.
5111 */
5112 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
5113 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
5114 kref_get(&resv->refs);
5115 }
5116
5117 /*
5118 * vma_lock structure for sharable mappings is vma specific.
5119 * Clear old pointer (if copied via vm_area_dup) and allocate
5120 * new structure. Before clearing, make sure vma_lock is not
5121 * for this vma.
5122 */
5123 if (vma->vm_flags & VM_MAYSHARE) {
5124 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5125
5126 if (vma_lock) {
5127 if (vma_lock->vma != vma) {
5128 vma->vm_private_data = NULL;
5129 hugetlb_vma_lock_alloc(vma);
5130 } else
5131 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5132 } else
5133 hugetlb_vma_lock_alloc(vma);
5134 }
5135}
5136
5137static void hugetlb_vm_op_close(struct vm_area_struct *vma)
5138{
5139 struct hstate *h = hstate_vma(vma);
5140 struct resv_map *resv;
5141 struct hugepage_subpool *spool = subpool_vma(vma);
5142 unsigned long reserve, start, end;
5143 long gbl_reserve;
5144
5145 hugetlb_vma_lock_free(vma);
5146
5147 resv = vma_resv_map(vma);
5148 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5149 return;
5150
5151 start = vma_hugecache_offset(h, vma, vma->vm_start);
5152 end = vma_hugecache_offset(h, vma, vma->vm_end);
5153
5154 reserve = (end - start) - region_count(resv, start, end);
5155 hugetlb_cgroup_uncharge_counter(resv, start, end);
5156 if (reserve) {
5157 /*
5158 * Decrement reserve counts. The global reserve count may be
5159 * adjusted if the subpool has a minimum size.
5160 */
5161 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5162 hugetlb_acct_memory(h, -gbl_reserve);
5163 }
5164
5165 kref_put(&resv->refs, resv_map_release);
5166}
5167
5168static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
5169{
5170 if (addr & ~(huge_page_mask(hstate_vma(vma))))
5171 return -EINVAL;
5172
5173 /*
5174 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
5175 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5176 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5177 */
5178 if (addr & ~PUD_MASK) {
5179 /*
5180 * hugetlb_vm_op_split is called right before we attempt to
5181 * split the VMA. We will need to unshare PMDs in the old and
5182 * new VMAs, so let's unshare before we split.
5183 */
5184 unsigned long floor = addr & PUD_MASK;
5185 unsigned long ceil = floor + PUD_SIZE;
5186
5187 if (floor >= vma->vm_start && ceil <= vma->vm_end)
5188 hugetlb_unshare_pmds(vma, floor, ceil);
5189 }
5190
5191 return 0;
5192}
5193
5194static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
5195{
5196 return huge_page_size(hstate_vma(vma));
5197}
5198
5199/*
5200 * We cannot handle pagefaults against hugetlb pages at all. They cause
5201 * handle_mm_fault() to try to instantiate regular-sized pages in the
5202 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
5203 * this far.
5204 */
5205static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
5206{
5207 BUG();
5208 return 0;
5209}
5210
5211/*
5212 * When a new function is introduced to vm_operations_struct and added
5213 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5214 * This is because under System V memory model, mappings created via
5215 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5216 * their original vm_ops are overwritten with shm_vm_ops.
5217 */
5218const struct vm_operations_struct hugetlb_vm_ops = {
5219 .fault = hugetlb_vm_op_fault,
5220 .open = hugetlb_vm_op_open,
5221 .close = hugetlb_vm_op_close,
5222 .may_split = hugetlb_vm_op_split,
5223 .pagesize = hugetlb_vm_op_pagesize,
5224};
5225
5226static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
5227 int writable)
5228{
5229 pte_t entry;
5230 unsigned int shift = huge_page_shift(hstate_vma(vma));
5231
5232 if (writable) {
5233 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
5234 vma->vm_page_prot)));
5235 } else {
5236 entry = huge_pte_wrprotect(mk_huge_pte(page,
5237 vma->vm_page_prot));
5238 }
5239 entry = pte_mkyoung(entry);
5240 entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
5241
5242 return entry;
5243}
5244
5245static void set_huge_ptep_writable(struct vm_area_struct *vma,
5246 unsigned long address, pte_t *ptep)
5247{
5248 pte_t entry;
5249
5250 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
5251 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
5252 update_mmu_cache(vma, address, ptep);
5253}
5254
5255bool is_hugetlb_entry_migration(pte_t pte)
5256{
5257 swp_entry_t swp;
5258
5259 if (huge_pte_none(pte) || pte_present(pte))
5260 return false;
5261 swp = pte_to_swp_entry(pte);
5262 if (is_migration_entry(swp))
5263 return true;
5264 else
5265 return false;
5266}
5267
5268bool is_hugetlb_entry_hwpoisoned(pte_t pte)
5269{
5270 swp_entry_t swp;
5271
5272 if (huge_pte_none(pte) || pte_present(pte))
5273 return false;
5274 swp = pte_to_swp_entry(pte);
5275 if (is_hwpoison_entry(swp))
5276 return true;
5277 else
5278 return false;
5279}
5280
5281static void
5282hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
5283 struct folio *new_folio, pte_t old, unsigned long sz)
5284{
5285 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
5286
5287 __folio_mark_uptodate(new_folio);
5288 hugetlb_add_new_anon_rmap(new_folio, vma, addr);
5289 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
5290 newpte = huge_pte_mkuffd_wp(newpte);
5291 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
5292 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
5293 folio_set_hugetlb_migratable(new_folio);
5294}
5295
5296int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
5297 struct vm_area_struct *dst_vma,
5298 struct vm_area_struct *src_vma)
5299{
5300 pte_t *src_pte, *dst_pte, entry;
5301 struct folio *pte_folio;
5302 unsigned long addr;
5303 bool cow = is_cow_mapping(src_vma->vm_flags);
5304 struct hstate *h = hstate_vma(src_vma);
5305 unsigned long sz = huge_page_size(h);
5306 unsigned long npages = pages_per_huge_page(h);
5307 struct mmu_notifier_range range;
5308 unsigned long last_addr_mask;
5309 int ret = 0;
5310
5311 if (cow) {
5312 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5313 src_vma->vm_start,
5314 src_vma->vm_end);
5315 mmu_notifier_invalidate_range_start(&range);
5316 vma_assert_write_locked(src_vma);
5317 raw_write_seqcount_begin(&src->write_protect_seq);
5318 } else {
5319 /*
5320 * For shared mappings the vma lock must be held before
5321 * calling hugetlb_walk() in the src vma. Otherwise, the
5322 * returned ptep could go away if part of a shared pmd and
5323 * another thread calls huge_pmd_unshare.
5324 */
5325 hugetlb_vma_lock_read(src_vma);
5326 }
5327
5328 last_addr_mask = hugetlb_mask_last_page(h);
5329 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5330 spinlock_t *src_ptl, *dst_ptl;
5331 src_pte = hugetlb_walk(src_vma, addr, sz);
5332 if (!src_pte) {
5333 addr |= last_addr_mask;
5334 continue;
5335 }
5336 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5337 if (!dst_pte) {
5338 ret = -ENOMEM;
5339 break;
5340 }
5341
5342 /*
5343 * If the pagetables are shared don't copy or take references.
5344 *
5345 * dst_pte == src_pte is the common case of src/dest sharing.
5346 * However, src could have 'unshared' and dst shares with
5347 * another vma. So page_count of ptep page is checked instead
5348 * to reliably determine whether pte is shared.
5349 */
5350 if (page_count(virt_to_page(dst_pte)) > 1) {
5351 addr |= last_addr_mask;
5352 continue;
5353 }
5354
5355 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5356 src_ptl = huge_pte_lockptr(h, src, src_pte);
5357 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5358 entry = huge_ptep_get(src_pte);
5359again:
5360 if (huge_pte_none(entry)) {
5361 /*
5362 * Skip if src entry none.
5363 */
5364 ;
5365 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5366 if (!userfaultfd_wp(dst_vma))
5367 entry = huge_pte_clear_uffd_wp(entry);
5368 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5369 } else if (unlikely(is_hugetlb_entry_migration(entry))) {
5370 swp_entry_t swp_entry = pte_to_swp_entry(entry);
5371 bool uffd_wp = pte_swp_uffd_wp(entry);
5372
5373 if (!is_readable_migration_entry(swp_entry) && cow) {
5374 /*
5375 * COW mappings require pages in both
5376 * parent and child to be set to read.
5377 */
5378 swp_entry = make_readable_migration_entry(
5379 swp_offset(swp_entry));
5380 entry = swp_entry_to_pte(swp_entry);
5381 if (userfaultfd_wp(src_vma) && uffd_wp)
5382 entry = pte_swp_mkuffd_wp(entry);
5383 set_huge_pte_at(src, addr, src_pte, entry, sz);
5384 }
5385 if (!userfaultfd_wp(dst_vma))
5386 entry = huge_pte_clear_uffd_wp(entry);
5387 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5388 } else if (unlikely(is_pte_marker(entry))) {
5389 pte_marker marker = copy_pte_marker(
5390 pte_to_swp_entry(entry), dst_vma);
5391
5392 if (marker)
5393 set_huge_pte_at(dst, addr, dst_pte,
5394 make_pte_marker(marker), sz);
5395 } else {
5396 entry = huge_ptep_get(src_pte);
5397 pte_folio = page_folio(pte_page(entry));
5398 folio_get(pte_folio);
5399
5400 /*
5401 * Failing to duplicate the anon rmap is a rare case
5402 * where we see pinned hugetlb pages while they're
5403 * prone to COW. We need to do the COW earlier during
5404 * fork.
5405 *
5406 * When pre-allocating the page or copying data, we
5407 * need to be without the pgtable locks since we could
5408 * sleep during the process.
5409 */
5410 if (!folio_test_anon(pte_folio)) {
5411 hugetlb_add_file_rmap(pte_folio);
5412 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
5413 pte_t src_pte_old = entry;
5414 struct folio *new_folio;
5415
5416 spin_unlock(src_ptl);
5417 spin_unlock(dst_ptl);
5418 /* Do not use reserve as it's private owned */
5419 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5420 if (IS_ERR(new_folio)) {
5421 folio_put(pte_folio);
5422 ret = PTR_ERR(new_folio);
5423 break;
5424 }
5425 ret = copy_user_large_folio(new_folio,
5426 pte_folio,
5427 addr, dst_vma);
5428 folio_put(pte_folio);
5429 if (ret) {
5430 folio_put(new_folio);
5431 break;
5432 }
5433
5434 /* Install the new hugetlb folio if src pte stable */
5435 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5436 src_ptl = huge_pte_lockptr(h, src, src_pte);
5437 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5438 entry = huge_ptep_get(src_pte);
5439 if (!pte_same(src_pte_old, entry)) {
5440 restore_reserve_on_error(h, dst_vma, addr,
5441 new_folio);
5442 folio_put(new_folio);
5443 /* huge_ptep of dst_pte won't change as in child */
5444 goto again;
5445 }
5446 hugetlb_install_folio(dst_vma, dst_pte, addr,
5447 new_folio, src_pte_old, sz);
5448 spin_unlock(src_ptl);
5449 spin_unlock(dst_ptl);
5450 continue;
5451 }
5452
5453 if (cow) {
5454 /*
5455 * No need to notify as we are downgrading page
5456 * table protection not changing it to point
5457 * to a new page.
5458 *
5459 * See Documentation/mm/mmu_notifier.rst
5460 */
5461 huge_ptep_set_wrprotect(src, addr, src_pte);
5462 entry = huge_pte_wrprotect(entry);
5463 }
5464
5465 if (!userfaultfd_wp(dst_vma))
5466 entry = huge_pte_clear_uffd_wp(entry);
5467
5468 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5469 hugetlb_count_add(npages, dst);
5470 }
5471 spin_unlock(src_ptl);
5472 spin_unlock(dst_ptl);
5473 }
5474
5475 if (cow) {
5476 raw_write_seqcount_end(&src->write_protect_seq);
5477 mmu_notifier_invalidate_range_end(&range);
5478 } else {
5479 hugetlb_vma_unlock_read(src_vma);
5480 }
5481
5482 return ret;
5483}
5484
5485static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5486 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5487 unsigned long sz)
5488{
5489 struct hstate *h = hstate_vma(vma);
5490 struct mm_struct *mm = vma->vm_mm;
5491 spinlock_t *src_ptl, *dst_ptl;
5492 pte_t pte;
5493
5494 dst_ptl = huge_pte_lock(h, mm, dst_pte);
5495 src_ptl = huge_pte_lockptr(h, mm, src_pte);
5496
5497 /*
5498 * We don't have to worry about the ordering of src and dst ptlocks
5499 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5500 */
5501 if (src_ptl != dst_ptl)
5502 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5503
5504 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
5505 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5506
5507 if (src_ptl != dst_ptl)
5508 spin_unlock(src_ptl);
5509 spin_unlock(dst_ptl);
5510}
5511
5512int move_hugetlb_page_tables(struct vm_area_struct *vma,
5513 struct vm_area_struct *new_vma,
5514 unsigned long old_addr, unsigned long new_addr,
5515 unsigned long len)
5516{
5517 struct hstate *h = hstate_vma(vma);
5518 struct address_space *mapping = vma->vm_file->f_mapping;
5519 unsigned long sz = huge_page_size(h);
5520 struct mm_struct *mm = vma->vm_mm;
5521 unsigned long old_end = old_addr + len;
5522 unsigned long last_addr_mask;
5523 pte_t *src_pte, *dst_pte;
5524 struct mmu_notifier_range range;
5525 bool shared_pmd = false;
5526
5527 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5528 old_end);
5529 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5530 /*
5531 * In case of shared PMDs, we should cover the maximum possible
5532 * range.
5533 */
5534 flush_cache_range(vma, range.start, range.end);
5535
5536 mmu_notifier_invalidate_range_start(&range);
5537 last_addr_mask = hugetlb_mask_last_page(h);
5538 /* Prevent race with file truncation */
5539 hugetlb_vma_lock_write(vma);
5540 i_mmap_lock_write(mapping);
5541 for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5542 src_pte = hugetlb_walk(vma, old_addr, sz);
5543 if (!src_pte) {
5544 old_addr |= last_addr_mask;
5545 new_addr |= last_addr_mask;
5546 continue;
5547 }
5548 if (huge_pte_none(huge_ptep_get(src_pte)))
5549 continue;
5550
5551 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5552 shared_pmd = true;
5553 old_addr |= last_addr_mask;
5554 new_addr |= last_addr_mask;
5555 continue;
5556 }
5557
5558 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5559 if (!dst_pte)
5560 break;
5561
5562 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5563 }
5564
5565 if (shared_pmd)
5566 flush_hugetlb_tlb_range(vma, range.start, range.end);
5567 else
5568 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5569 mmu_notifier_invalidate_range_end(&range);
5570 i_mmap_unlock_write(mapping);
5571 hugetlb_vma_unlock_write(vma);
5572
5573 return len + old_addr - old_end;
5574}
5575
5576void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5577 unsigned long start, unsigned long end,
5578 struct page *ref_page, zap_flags_t zap_flags)
5579{
5580 struct mm_struct *mm = vma->vm_mm;
5581 unsigned long address;
5582 pte_t *ptep;
5583 pte_t pte;
5584 spinlock_t *ptl;
5585 struct page *page;
5586 struct hstate *h = hstate_vma(vma);
5587 unsigned long sz = huge_page_size(h);
5588 unsigned long last_addr_mask;
5589 bool force_flush = false;
5590
5591 WARN_ON(!is_vm_hugetlb_page(vma));
5592 BUG_ON(start & ~huge_page_mask(h));
5593 BUG_ON(end & ~huge_page_mask(h));
5594
5595 /*
5596 * This is a hugetlb vma, all the pte entries should point
5597 * to huge page.
5598 */
5599 tlb_change_page_size(tlb, sz);
5600 tlb_start_vma(tlb, vma);
5601
5602 last_addr_mask = hugetlb_mask_last_page(h);
5603 address = start;
5604 for (; address < end; address += sz) {
5605 ptep = hugetlb_walk(vma, address, sz);
5606 if (!ptep) {
5607 address |= last_addr_mask;
5608 continue;
5609 }
5610
5611 ptl = huge_pte_lock(h, mm, ptep);
5612 if (huge_pmd_unshare(mm, vma, address, ptep)) {
5613 spin_unlock(ptl);
5614 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5615 force_flush = true;
5616 address |= last_addr_mask;
5617 continue;
5618 }
5619
5620 pte = huge_ptep_get(ptep);
5621 if (huge_pte_none(pte)) {
5622 spin_unlock(ptl);
5623 continue;
5624 }
5625
5626 /*
5627 * Migrating hugepage or HWPoisoned hugepage is already
5628 * unmapped and its refcount is dropped, so just clear pte here.
5629 */
5630 if (unlikely(!pte_present(pte))) {
5631 /*
5632 * If the pte was wr-protected by uffd-wp in any of the
5633 * swap forms, meanwhile the caller does not want to
5634 * drop the uffd-wp bit in this zap, then replace the
5635 * pte with a marker.
5636 */
5637 if (pte_swp_uffd_wp_any(pte) &&
5638 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5639 set_huge_pte_at(mm, address, ptep,
5640 make_pte_marker(PTE_MARKER_UFFD_WP),
5641 sz);
5642 else
5643 huge_pte_clear(mm, address, ptep, sz);
5644 spin_unlock(ptl);
5645 continue;
5646 }
5647
5648 page = pte_page(pte);
5649 /*
5650 * If a reference page is supplied, it is because a specific
5651 * page is being unmapped, not a range. Ensure the page we
5652 * are about to unmap is the actual page of interest.
5653 */
5654 if (ref_page) {
5655 if (page != ref_page) {
5656 spin_unlock(ptl);
5657 continue;
5658 }
5659 /*
5660 * Mark the VMA as having unmapped its page so that
5661 * future faults in this VMA will fail rather than
5662 * looking like data was lost
5663 */
5664 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5665 }
5666
5667 pte = huge_ptep_get_and_clear(mm, address, ptep);
5668 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5669 if (huge_pte_dirty(pte))
5670 set_page_dirty(page);
5671 /* Leave a uffd-wp pte marker if needed */
5672 if (huge_pte_uffd_wp(pte) &&
5673 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5674 set_huge_pte_at(mm, address, ptep,
5675 make_pte_marker(PTE_MARKER_UFFD_WP),
5676 sz);
5677 hugetlb_count_sub(pages_per_huge_page(h), mm);
5678 hugetlb_remove_rmap(page_folio(page));
5679
5680 spin_unlock(ptl);
5681 tlb_remove_page_size(tlb, page, huge_page_size(h));
5682 /*
5683 * Bail out after unmapping reference page if supplied
5684 */
5685 if (ref_page)
5686 break;
5687 }
5688 tlb_end_vma(tlb, vma);
5689
5690 /*
5691 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5692 * could defer the flush until now, since by holding i_mmap_rwsem we
5693 * guaranteed that the last refernece would not be dropped. But we must
5694 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5695 * dropped and the last reference to the shared PMDs page might be
5696 * dropped as well.
5697 *
5698 * In theory we could defer the freeing of the PMD pages as well, but
5699 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5700 * detect sharing, so we cannot defer the release of the page either.
5701 * Instead, do flush now.
5702 */
5703 if (force_flush)
5704 tlb_flush_mmu_tlbonly(tlb);
5705}
5706
5707void __hugetlb_zap_begin(struct vm_area_struct *vma,
5708 unsigned long *start, unsigned long *end)
5709{
5710 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5711 return;
5712
5713 adjust_range_if_pmd_sharing_possible(vma, start, end);
5714 hugetlb_vma_lock_write(vma);
5715 if (vma->vm_file)
5716 i_mmap_lock_write(vma->vm_file->f_mapping);
5717}
5718
5719void __hugetlb_zap_end(struct vm_area_struct *vma,
5720 struct zap_details *details)
5721{
5722 zap_flags_t zap_flags = details ? details->zap_flags : 0;
5723
5724 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5725 return;
5726
5727 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5728 /*
5729 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5730 * When the vma_lock is freed, this makes the vma ineligible
5731 * for pmd sharing. And, i_mmap_rwsem is required to set up
5732 * pmd sharing. This is important as page tables for this
5733 * unmapped range will be asynchrously deleted. If the page
5734 * tables are shared, there will be issues when accessed by
5735 * someone else.
5736 */
5737 __hugetlb_vma_unlock_write_free(vma);
5738 } else {
5739 hugetlb_vma_unlock_write(vma);
5740 }
5741
5742 if (vma->vm_file)
5743 i_mmap_unlock_write(vma->vm_file->f_mapping);
5744}
5745
5746void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5747 unsigned long end, struct page *ref_page,
5748 zap_flags_t zap_flags)
5749{
5750 struct mmu_notifier_range range;
5751 struct mmu_gather tlb;
5752
5753 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5754 start, end);
5755 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5756 mmu_notifier_invalidate_range_start(&range);
5757 tlb_gather_mmu(&tlb, vma->vm_mm);
5758
5759 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5760
5761 mmu_notifier_invalidate_range_end(&range);
5762 tlb_finish_mmu(&tlb);
5763}
5764
5765/*
5766 * This is called when the original mapper is failing to COW a MAP_PRIVATE
5767 * mapping it owns the reserve page for. The intention is to unmap the page
5768 * from other VMAs and let the children be SIGKILLed if they are faulting the
5769 * same region.
5770 */
5771static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5772 struct page *page, unsigned long address)
5773{
5774 struct hstate *h = hstate_vma(vma);
5775 struct vm_area_struct *iter_vma;
5776 struct address_space *mapping;
5777 pgoff_t pgoff;
5778
5779 /*
5780 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5781 * from page cache lookup which is in HPAGE_SIZE units.
5782 */
5783 address = address & huge_page_mask(h);
5784 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5785 vma->vm_pgoff;
5786 mapping = vma->vm_file->f_mapping;
5787
5788 /*
5789 * Take the mapping lock for the duration of the table walk. As
5790 * this mapping should be shared between all the VMAs,
5791 * __unmap_hugepage_range() is called as the lock is already held
5792 */
5793 i_mmap_lock_write(mapping);
5794 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5795 /* Do not unmap the current VMA */
5796 if (iter_vma == vma)
5797 continue;
5798
5799 /*
5800 * Shared VMAs have their own reserves and do not affect
5801 * MAP_PRIVATE accounting but it is possible that a shared
5802 * VMA is using the same page so check and skip such VMAs.
5803 */
5804 if (iter_vma->vm_flags & VM_MAYSHARE)
5805 continue;
5806
5807 /*
5808 * Unmap the page from other VMAs without their own reserves.
5809 * They get marked to be SIGKILLed if they fault in these
5810 * areas. This is because a future no-page fault on this VMA
5811 * could insert a zeroed page instead of the data existing
5812 * from the time of fork. This would look like data corruption
5813 */
5814 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5815 unmap_hugepage_range(iter_vma, address,
5816 address + huge_page_size(h), page, 0);
5817 }
5818 i_mmap_unlock_write(mapping);
5819}
5820
5821/*
5822 * hugetlb_wp() should be called with page lock of the original hugepage held.
5823 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5824 * cannot race with other handlers or page migration.
5825 * Keep the pte_same checks anyway to make transition from the mutex easier.
5826 */
5827static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5828 unsigned long address, pte_t *ptep, unsigned int flags,
5829 struct folio *pagecache_folio, spinlock_t *ptl)
5830{
5831 const bool unshare = flags & FAULT_FLAG_UNSHARE;
5832 pte_t pte = huge_ptep_get(ptep);
5833 struct hstate *h = hstate_vma(vma);
5834 struct folio *old_folio;
5835 struct folio *new_folio;
5836 int outside_reserve = 0;
5837 vm_fault_t ret = 0;
5838 unsigned long haddr = address & huge_page_mask(h);
5839 struct mmu_notifier_range range;
5840
5841 /*
5842 * Never handle CoW for uffd-wp protected pages. It should be only
5843 * handled when the uffd-wp protection is removed.
5844 *
5845 * Note that only the CoW optimization path (in hugetlb_no_page())
5846 * can trigger this, because hugetlb_fault() will always resolve
5847 * uffd-wp bit first.
5848 */
5849 if (!unshare && huge_pte_uffd_wp(pte))
5850 return 0;
5851
5852 /*
5853 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5854 * PTE mapped R/O such as maybe_mkwrite() would do.
5855 */
5856 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5857 return VM_FAULT_SIGSEGV;
5858
5859 /* Let's take out MAP_SHARED mappings first. */
5860 if (vma->vm_flags & VM_MAYSHARE) {
5861 set_huge_ptep_writable(vma, haddr, ptep);
5862 return 0;
5863 }
5864
5865 old_folio = page_folio(pte_page(pte));
5866
5867 delayacct_wpcopy_start();
5868
5869retry_avoidcopy:
5870 /*
5871 * If no-one else is actually using this page, we're the exclusive
5872 * owner and can reuse this page.
5873 */
5874 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5875 if (!PageAnonExclusive(&old_folio->page)) {
5876 folio_move_anon_rmap(old_folio, vma);
5877 SetPageAnonExclusive(&old_folio->page);
5878 }
5879 if (likely(!unshare))
5880 set_huge_ptep_writable(vma, haddr, ptep);
5881
5882 delayacct_wpcopy_end();
5883 return 0;
5884 }
5885 VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5886 PageAnonExclusive(&old_folio->page), &old_folio->page);
5887
5888 /*
5889 * If the process that created a MAP_PRIVATE mapping is about to
5890 * perform a COW due to a shared page count, attempt to satisfy
5891 * the allocation without using the existing reserves. The pagecache
5892 * page is used to determine if the reserve at this address was
5893 * consumed or not. If reserves were used, a partial faulted mapping
5894 * at the time of fork() could consume its reserves on COW instead
5895 * of the full address range.
5896 */
5897 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5898 old_folio != pagecache_folio)
5899 outside_reserve = 1;
5900
5901 folio_get(old_folio);
5902
5903 /*
5904 * Drop page table lock as buddy allocator may be called. It will
5905 * be acquired again before returning to the caller, as expected.
5906 */
5907 spin_unlock(ptl);
5908 new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
5909
5910 if (IS_ERR(new_folio)) {
5911 /*
5912 * If a process owning a MAP_PRIVATE mapping fails to COW,
5913 * it is due to references held by a child and an insufficient
5914 * huge page pool. To guarantee the original mappers
5915 * reliability, unmap the page from child processes. The child
5916 * may get SIGKILLed if it later faults.
5917 */
5918 if (outside_reserve) {
5919 struct address_space *mapping = vma->vm_file->f_mapping;
5920 pgoff_t idx;
5921 u32 hash;
5922
5923 folio_put(old_folio);
5924 /*
5925 * Drop hugetlb_fault_mutex and vma_lock before
5926 * unmapping. unmapping needs to hold vma_lock
5927 * in write mode. Dropping vma_lock in read mode
5928 * here is OK as COW mappings do not interact with
5929 * PMD sharing.
5930 *
5931 * Reacquire both after unmap operation.
5932 */
5933 idx = vma_hugecache_offset(h, vma, haddr);
5934 hash = hugetlb_fault_mutex_hash(mapping, idx);
5935 hugetlb_vma_unlock_read(vma);
5936 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5937
5938 unmap_ref_private(mm, vma, &old_folio->page, haddr);
5939
5940 mutex_lock(&hugetlb_fault_mutex_table[hash]);
5941 hugetlb_vma_lock_read(vma);
5942 spin_lock(ptl);
5943 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5944 if (likely(ptep &&
5945 pte_same(huge_ptep_get(ptep), pte)))
5946 goto retry_avoidcopy;
5947 /*
5948 * race occurs while re-acquiring page table
5949 * lock, and our job is done.
5950 */
5951 delayacct_wpcopy_end();
5952 return 0;
5953 }
5954
5955 ret = vmf_error(PTR_ERR(new_folio));
5956 goto out_release_old;
5957 }
5958
5959 /*
5960 * When the original hugepage is shared one, it does not have
5961 * anon_vma prepared.
5962 */
5963 if (unlikely(anon_vma_prepare(vma))) {
5964 ret = VM_FAULT_OOM;
5965 goto out_release_all;
5966 }
5967
5968 if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
5969 ret = VM_FAULT_HWPOISON_LARGE;
5970 goto out_release_all;
5971 }
5972 __folio_mark_uptodate(new_folio);
5973
5974 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
5975 haddr + huge_page_size(h));
5976 mmu_notifier_invalidate_range_start(&range);
5977
5978 /*
5979 * Retake the page table lock to check for racing updates
5980 * before the page tables are altered
5981 */
5982 spin_lock(ptl);
5983 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5984 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5985 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
5986
5987 /* Break COW or unshare */
5988 huge_ptep_clear_flush(vma, haddr, ptep);
5989 hugetlb_remove_rmap(old_folio);
5990 hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
5991 if (huge_pte_uffd_wp(pte))
5992 newpte = huge_pte_mkuffd_wp(newpte);
5993 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
5994 folio_set_hugetlb_migratable(new_folio);
5995 /* Make the old page be freed below */
5996 new_folio = old_folio;
5997 }
5998 spin_unlock(ptl);
5999 mmu_notifier_invalidate_range_end(&range);
6000out_release_all:
6001 /*
6002 * No restore in case of successful pagetable update (Break COW or
6003 * unshare)
6004 */
6005 if (new_folio != old_folio)
6006 restore_reserve_on_error(h, vma, haddr, new_folio);
6007 folio_put(new_folio);
6008out_release_old:
6009 folio_put(old_folio);
6010
6011 spin_lock(ptl); /* Caller expects lock to be held */
6012
6013 delayacct_wpcopy_end();
6014 return ret;
6015}
6016
6017/*
6018 * Return whether there is a pagecache page to back given address within VMA.
6019 */
6020static bool hugetlbfs_pagecache_present(struct hstate *h,
6021 struct vm_area_struct *vma, unsigned long address)
6022{
6023 struct address_space *mapping = vma->vm_file->f_mapping;
6024 pgoff_t idx = linear_page_index(vma, address);
6025 struct folio *folio;
6026
6027 folio = filemap_get_folio(mapping, idx);
6028 if (IS_ERR(folio))
6029 return false;
6030 folio_put(folio);
6031 return true;
6032}
6033
6034int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
6035 pgoff_t idx)
6036{
6037 struct inode *inode = mapping->host;
6038 struct hstate *h = hstate_inode(inode);
6039 int err;
6040
6041 idx <<= huge_page_order(h);
6042 __folio_set_locked(folio);
6043 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
6044
6045 if (unlikely(err)) {
6046 __folio_clear_locked(folio);
6047 return err;
6048 }
6049 folio_clear_hugetlb_restore_reserve(folio);
6050
6051 /*
6052 * mark folio dirty so that it will not be removed from cache/file
6053 * by non-hugetlbfs specific code paths.
6054 */
6055 folio_mark_dirty(folio);
6056
6057 spin_lock(&inode->i_lock);
6058 inode->i_blocks += blocks_per_huge_page(h);
6059 spin_unlock(&inode->i_lock);
6060 return 0;
6061}
6062
6063static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
6064 struct address_space *mapping,
6065 pgoff_t idx,
6066 unsigned int flags,
6067 unsigned long haddr,
6068 unsigned long addr,
6069 unsigned long reason)
6070{
6071 u32 hash;
6072 struct vm_fault vmf = {
6073 .vma = vma,
6074 .address = haddr,
6075 .real_address = addr,
6076 .flags = flags,
6077
6078 /*
6079 * Hard to debug if it ends up being
6080 * used by a callee that assumes
6081 * something about the other
6082 * uninitialized fields... same as in
6083 * memory.c
6084 */
6085 };
6086
6087 /*
6088 * vma_lock and hugetlb_fault_mutex must be dropped before handling
6089 * userfault. Also mmap_lock could be dropped due to handling
6090 * userfault, any vma operation should be careful from here.
6091 */
6092 hugetlb_vma_unlock_read(vma);
6093 hash = hugetlb_fault_mutex_hash(mapping, idx);
6094 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6095 return handle_userfault(&vmf, reason);
6096}
6097
6098/*
6099 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
6100 * false if pte changed or is changing.
6101 */
6102static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
6103 pte_t *ptep, pte_t old_pte)
6104{
6105 spinlock_t *ptl;
6106 bool same;
6107
6108 ptl = huge_pte_lock(h, mm, ptep);
6109 same = pte_same(huge_ptep_get(ptep), old_pte);
6110 spin_unlock(ptl);
6111
6112 return same;
6113}
6114
6115static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6116 struct vm_area_struct *vma,
6117 struct address_space *mapping, pgoff_t idx,
6118 unsigned long address, pte_t *ptep,
6119 pte_t old_pte, unsigned int flags)
6120{
6121 struct hstate *h = hstate_vma(vma);
6122 vm_fault_t ret = VM_FAULT_SIGBUS;
6123 int anon_rmap = 0;
6124 unsigned long size;
6125 struct folio *folio;
6126 pte_t new_pte;
6127 spinlock_t *ptl;
6128 unsigned long haddr = address & huge_page_mask(h);
6129 bool new_folio, new_pagecache_folio = false;
6130 u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
6131
6132 /*
6133 * Currently, we are forced to kill the process in the event the
6134 * original mapper has unmapped pages from the child due to a failed
6135 * COW/unsharing. Warn that such a situation has occurred as it may not
6136 * be obvious.
6137 */
6138 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
6139 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
6140 current->pid);
6141 goto out;
6142 }
6143
6144 /*
6145 * Use page lock to guard against racing truncation
6146 * before we get page_table_lock.
6147 */
6148 new_folio = false;
6149 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6150 if (IS_ERR(folio)) {
6151 size = i_size_read(mapping->host) >> huge_page_shift(h);
6152 if (idx >= size)
6153 goto out;
6154 /* Check for page in userfault range */
6155 if (userfaultfd_missing(vma)) {
6156 /*
6157 * Since hugetlb_no_page() was examining pte
6158 * without pgtable lock, we need to re-test under
6159 * lock because the pte may not be stable and could
6160 * have changed from under us. Try to detect
6161 * either changed or during-changing ptes and retry
6162 * properly when needed.
6163 *
6164 * Note that userfaultfd is actually fine with
6165 * false positives (e.g. caused by pte changed),
6166 * but not wrong logical events (e.g. caused by
6167 * reading a pte during changing). The latter can
6168 * confuse the userspace, so the strictness is very
6169 * much preferred. E.g., MISSING event should
6170 * never happen on the page after UFFDIO_COPY has
6171 * correctly installed the page and returned.
6172 */
6173 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6174 ret = 0;
6175 goto out;
6176 }
6177
6178 return hugetlb_handle_userfault(vma, mapping, idx, flags,
6179 haddr, address,
6180 VM_UFFD_MISSING);
6181 }
6182
6183 folio = alloc_hugetlb_folio(vma, haddr, 0);
6184 if (IS_ERR(folio)) {
6185 /*
6186 * Returning error will result in faulting task being
6187 * sent SIGBUS. The hugetlb fault mutex prevents two
6188 * tasks from racing to fault in the same page which
6189 * could result in false unable to allocate errors.
6190 * Page migration does not take the fault mutex, but
6191 * does a clear then write of pte's under page table
6192 * lock. Page fault code could race with migration,
6193 * notice the clear pte and try to allocate a page
6194 * here. Before returning error, get ptl and make
6195 * sure there really is no pte entry.
6196 */
6197 if (hugetlb_pte_stable(h, mm, ptep, old_pte))
6198 ret = vmf_error(PTR_ERR(folio));
6199 else
6200 ret = 0;
6201 goto out;
6202 }
6203 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
6204 __folio_mark_uptodate(folio);
6205 new_folio = true;
6206
6207 if (vma->vm_flags & VM_MAYSHARE) {
6208 int err = hugetlb_add_to_page_cache(folio, mapping, idx);
6209 if (err) {
6210 /*
6211 * err can't be -EEXIST which implies someone
6212 * else consumed the reservation since hugetlb
6213 * fault mutex is held when add a hugetlb page
6214 * to the page cache. So it's safe to call
6215 * restore_reserve_on_error() here.
6216 */
6217 restore_reserve_on_error(h, vma, haddr, folio);
6218 folio_put(folio);
6219 goto out;
6220 }
6221 new_pagecache_folio = true;
6222 } else {
6223 folio_lock(folio);
6224 if (unlikely(anon_vma_prepare(vma))) {
6225 ret = VM_FAULT_OOM;
6226 goto backout_unlocked;
6227 }
6228 anon_rmap = 1;
6229 }
6230 } else {
6231 /*
6232 * If memory error occurs between mmap() and fault, some process
6233 * don't have hwpoisoned swap entry for errored virtual address.
6234 * So we need to block hugepage fault by PG_hwpoison bit check.
6235 */
6236 if (unlikely(folio_test_hwpoison(folio))) {
6237 ret = VM_FAULT_HWPOISON_LARGE |
6238 VM_FAULT_SET_HINDEX(hstate_index(h));
6239 goto backout_unlocked;
6240 }
6241
6242 /* Check for page in userfault range. */
6243 if (userfaultfd_minor(vma)) {
6244 folio_unlock(folio);
6245 folio_put(folio);
6246 /* See comment in userfaultfd_missing() block above */
6247 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6248 ret = 0;
6249 goto out;
6250 }
6251 return hugetlb_handle_userfault(vma, mapping, idx, flags,
6252 haddr, address,
6253 VM_UFFD_MINOR);
6254 }
6255 }
6256
6257 /*
6258 * If we are going to COW a private mapping later, we examine the
6259 * pending reservations for this page now. This will ensure that
6260 * any allocations necessary to record that reservation occur outside
6261 * the spinlock.
6262 */
6263 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6264 if (vma_needs_reservation(h, vma, haddr) < 0) {
6265 ret = VM_FAULT_OOM;
6266 goto backout_unlocked;
6267 }
6268 /* Just decrements count, does not deallocate */
6269 vma_end_reservation(h, vma, haddr);
6270 }
6271
6272 ptl = huge_pte_lock(h, mm, ptep);
6273 ret = 0;
6274 /* If pte changed from under us, retry */
6275 if (!pte_same(huge_ptep_get(ptep), old_pte))
6276 goto backout;
6277
6278 if (anon_rmap)
6279 hugetlb_add_new_anon_rmap(folio, vma, haddr);
6280 else
6281 hugetlb_add_file_rmap(folio);
6282 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
6283 && (vma->vm_flags & VM_SHARED)));
6284 /*
6285 * If this pte was previously wr-protected, keep it wr-protected even
6286 * if populated.
6287 */
6288 if (unlikely(pte_marker_uffd_wp(old_pte)))
6289 new_pte = huge_pte_mkuffd_wp(new_pte);
6290 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
6291
6292 hugetlb_count_add(pages_per_huge_page(h), mm);
6293 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6294 /* Optimization, do the COW without a second fault */
6295 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
6296 }
6297
6298 spin_unlock(ptl);
6299
6300 /*
6301 * Only set hugetlb_migratable in newly allocated pages. Existing pages
6302 * found in the pagecache may not have hugetlb_migratable if they have
6303 * been isolated for migration.
6304 */
6305 if (new_folio)
6306 folio_set_hugetlb_migratable(folio);
6307
6308 folio_unlock(folio);
6309out:
6310 hugetlb_vma_unlock_read(vma);
6311 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6312 return ret;
6313
6314backout:
6315 spin_unlock(ptl);
6316backout_unlocked:
6317 if (new_folio && !new_pagecache_folio)
6318 restore_reserve_on_error(h, vma, haddr, folio);
6319
6320 folio_unlock(folio);
6321 folio_put(folio);
6322 goto out;
6323}
6324
6325#ifdef CONFIG_SMP
6326u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6327{
6328 unsigned long key[2];
6329 u32 hash;
6330
6331 key[0] = (unsigned long) mapping;
6332 key[1] = idx;
6333
6334 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
6335
6336 return hash & (num_fault_mutexes - 1);
6337}
6338#else
6339/*
6340 * For uniprocessor systems we always use a single mutex, so just
6341 * return 0 and avoid the hashing overhead.
6342 */
6343u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6344{
6345 return 0;
6346}
6347#endif
6348
6349vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6350 unsigned long address, unsigned int flags)
6351{
6352 pte_t *ptep, entry;
6353 spinlock_t *ptl;
6354 vm_fault_t ret;
6355 u32 hash;
6356 pgoff_t idx;
6357 struct folio *folio = NULL;
6358 struct folio *pagecache_folio = NULL;
6359 struct hstate *h = hstate_vma(vma);
6360 struct address_space *mapping;
6361 int need_wait_lock = 0;
6362 unsigned long haddr = address & huge_page_mask(h);
6363
6364 /* TODO: Handle faults under the VMA lock */
6365 if (flags & FAULT_FLAG_VMA_LOCK) {
6366 vma_end_read(vma);
6367 return VM_FAULT_RETRY;
6368 }
6369
6370 /*
6371 * Serialize hugepage allocation and instantiation, so that we don't
6372 * get spurious allocation failures if two CPUs race to instantiate
6373 * the same page in the page cache.
6374 */
6375 mapping = vma->vm_file->f_mapping;
6376 idx = vma_hugecache_offset(h, vma, haddr);
6377 hash = hugetlb_fault_mutex_hash(mapping, idx);
6378 mutex_lock(&hugetlb_fault_mutex_table[hash]);
6379
6380 /*
6381 * Acquire vma lock before calling huge_pte_alloc and hold
6382 * until finished with ptep. This prevents huge_pmd_unshare from
6383 * being called elsewhere and making the ptep no longer valid.
6384 */
6385 hugetlb_vma_lock_read(vma);
6386 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
6387 if (!ptep) {
6388 hugetlb_vma_unlock_read(vma);
6389 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6390 return VM_FAULT_OOM;
6391 }
6392
6393 entry = huge_ptep_get(ptep);
6394 if (huge_pte_none_mostly(entry)) {
6395 if (is_pte_marker(entry)) {
6396 pte_marker marker =
6397 pte_marker_get(pte_to_swp_entry(entry));
6398
6399 if (marker & PTE_MARKER_POISONED) {
6400 ret = VM_FAULT_HWPOISON_LARGE;
6401 goto out_mutex;
6402 }
6403 }
6404
6405 /*
6406 * Other PTE markers should be handled the same way as none PTE.
6407 *
6408 * hugetlb_no_page will drop vma lock and hugetlb fault
6409 * mutex internally, which make us return immediately.
6410 */
6411 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
6412 entry, flags);
6413 }
6414
6415 ret = 0;
6416
6417 /*
6418 * entry could be a migration/hwpoison entry at this point, so this
6419 * check prevents the kernel from going below assuming that we have
6420 * an active hugepage in pagecache. This goto expects the 2nd page
6421 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6422 * properly handle it.
6423 */
6424 if (!pte_present(entry)) {
6425 if (unlikely(is_hugetlb_entry_migration(entry))) {
6426 /*
6427 * Release the hugetlb fault lock now, but retain
6428 * the vma lock, because it is needed to guard the
6429 * huge_pte_lockptr() later in
6430 * migration_entry_wait_huge(). The vma lock will
6431 * be released there.
6432 */
6433 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6434 migration_entry_wait_huge(vma, ptep);
6435 return 0;
6436 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
6437 ret = VM_FAULT_HWPOISON_LARGE |
6438 VM_FAULT_SET_HINDEX(hstate_index(h));
6439 goto out_mutex;
6440 }
6441
6442 /*
6443 * If we are going to COW/unshare the mapping later, we examine the
6444 * pending reservations for this page now. This will ensure that any
6445 * allocations necessary to record that reservation occur outside the
6446 * spinlock. Also lookup the pagecache page now as it is used to
6447 * determine if a reservation has been consumed.
6448 */
6449 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6450 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
6451 if (vma_needs_reservation(h, vma, haddr) < 0) {
6452 ret = VM_FAULT_OOM;
6453 goto out_mutex;
6454 }
6455 /* Just decrements count, does not deallocate */
6456 vma_end_reservation(h, vma, haddr);
6457
6458 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6459 if (IS_ERR(pagecache_folio))
6460 pagecache_folio = NULL;
6461 }
6462
6463 ptl = huge_pte_lock(h, mm, ptep);
6464
6465 /* Check for a racing update before calling hugetlb_wp() */
6466 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
6467 goto out_ptl;
6468
6469 /* Handle userfault-wp first, before trying to lock more pages */
6470 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
6471 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
6472 if (!userfaultfd_wp_async(vma)) {
6473 struct vm_fault vmf = {
6474 .vma = vma,
6475 .address = haddr,
6476 .real_address = address,
6477 .flags = flags,
6478 };
6479
6480 spin_unlock(ptl);
6481 if (pagecache_folio) {
6482 folio_unlock(pagecache_folio);
6483 folio_put(pagecache_folio);
6484 }
6485 hugetlb_vma_unlock_read(vma);
6486 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6487 return handle_userfault(&vmf, VM_UFFD_WP);
6488 }
6489
6490 entry = huge_pte_clear_uffd_wp(entry);
6491 set_huge_pte_at(mm, haddr, ptep, entry,
6492 huge_page_size(hstate_vma(vma)));
6493 /* Fallthrough to CoW */
6494 }
6495
6496 /*
6497 * hugetlb_wp() requires page locks of pte_page(entry) and
6498 * pagecache_folio, so here we need take the former one
6499 * when folio != pagecache_folio or !pagecache_folio.
6500 */
6501 folio = page_folio(pte_page(entry));
6502 if (folio != pagecache_folio)
6503 if (!folio_trylock(folio)) {
6504 need_wait_lock = 1;
6505 goto out_ptl;
6506 }
6507
6508 folio_get(folio);
6509
6510 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6511 if (!huge_pte_write(entry)) {
6512 ret = hugetlb_wp(mm, vma, address, ptep, flags,
6513 pagecache_folio, ptl);
6514 goto out_put_page;
6515 } else if (likely(flags & FAULT_FLAG_WRITE)) {
6516 entry = huge_pte_mkdirty(entry);
6517 }
6518 }
6519 entry = pte_mkyoung(entry);
6520 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
6521 flags & FAULT_FLAG_WRITE))
6522 update_mmu_cache(vma, haddr, ptep);
6523out_put_page:
6524 if (folio != pagecache_folio)
6525 folio_unlock(folio);
6526 folio_put(folio);
6527out_ptl:
6528 spin_unlock(ptl);
6529
6530 if (pagecache_folio) {
6531 folio_unlock(pagecache_folio);
6532 folio_put(pagecache_folio);
6533 }
6534out_mutex:
6535 hugetlb_vma_unlock_read(vma);
6536 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6537 /*
6538 * Generally it's safe to hold refcount during waiting page lock. But
6539 * here we just wait to defer the next page fault to avoid busy loop and
6540 * the page is not used after unlocked before returning from the current
6541 * page fault. So we are safe from accessing freed page, even if we wait
6542 * here without taking refcount.
6543 */
6544 if (need_wait_lock)
6545 folio_wait_locked(folio);
6546 return ret;
6547}
6548
6549#ifdef CONFIG_USERFAULTFD
6550/*
6551 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6552 */
6553static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6554 struct vm_area_struct *vma, unsigned long address)
6555{
6556 struct mempolicy *mpol;
6557 nodemask_t *nodemask;
6558 struct folio *folio;
6559 gfp_t gfp_mask;
6560 int node;
6561
6562 gfp_mask = htlb_alloc_mask(h);
6563 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6564 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
6565 mpol_cond_put(mpol);
6566
6567 return folio;
6568}
6569
6570/*
6571 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6572 * with modifications for hugetlb pages.
6573 */
6574int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6575 struct vm_area_struct *dst_vma,
6576 unsigned long dst_addr,
6577 unsigned long src_addr,
6578 uffd_flags_t flags,
6579 struct folio **foliop)
6580{
6581 struct mm_struct *dst_mm = dst_vma->vm_mm;
6582 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6583 bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6584 struct hstate *h = hstate_vma(dst_vma);
6585 struct address_space *mapping = dst_vma->vm_file->f_mapping;
6586 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6587 unsigned long size;
6588 int vm_shared = dst_vma->vm_flags & VM_SHARED;
6589 pte_t _dst_pte;
6590 spinlock_t *ptl;
6591 int ret = -ENOMEM;
6592 struct folio *folio;
6593 int writable;
6594 bool folio_in_pagecache = false;
6595
6596 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6597 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6598
6599 /* Don't overwrite any existing PTEs (even markers) */
6600 if (!huge_pte_none(huge_ptep_get(dst_pte))) {
6601 spin_unlock(ptl);
6602 return -EEXIST;
6603 }
6604
6605 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6606 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6607 huge_page_size(h));
6608
6609 /* No need to invalidate - it was non-present before */
6610 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6611
6612 spin_unlock(ptl);
6613 return 0;
6614 }
6615
6616 if (is_continue) {
6617 ret = -EFAULT;
6618 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6619 if (IS_ERR(folio))
6620 goto out;
6621 folio_in_pagecache = true;
6622 } else if (!*foliop) {
6623 /* If a folio already exists, then it's UFFDIO_COPY for
6624 * a non-missing case. Return -EEXIST.
6625 */
6626 if (vm_shared &&
6627 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6628 ret = -EEXIST;
6629 goto out;
6630 }
6631
6632 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6633 if (IS_ERR(folio)) {
6634 ret = -ENOMEM;
6635 goto out;
6636 }
6637
6638 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6639 false);
6640
6641 /* fallback to copy_from_user outside mmap_lock */
6642 if (unlikely(ret)) {
6643 ret = -ENOENT;
6644 /* Free the allocated folio which may have
6645 * consumed a reservation.
6646 */
6647 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6648 folio_put(folio);
6649
6650 /* Allocate a temporary folio to hold the copied
6651 * contents.
6652 */
6653 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6654 if (!folio) {
6655 ret = -ENOMEM;
6656 goto out;
6657 }
6658 *foliop = folio;
6659 /* Set the outparam foliop and return to the caller to
6660 * copy the contents outside the lock. Don't free the
6661 * folio.
6662 */
6663 goto out;
6664 }
6665 } else {
6666 if (vm_shared &&
6667 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6668 folio_put(*foliop);
6669 ret = -EEXIST;
6670 *foliop = NULL;
6671 goto out;
6672 }
6673
6674 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6675 if (IS_ERR(folio)) {
6676 folio_put(*foliop);
6677 ret = -ENOMEM;
6678 *foliop = NULL;
6679 goto out;
6680 }
6681 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6682 folio_put(*foliop);
6683 *foliop = NULL;
6684 if (ret) {
6685 folio_put(folio);
6686 goto out;
6687 }
6688 }
6689
6690 /*
6691 * The memory barrier inside __folio_mark_uptodate makes sure that
6692 * preceding stores to the page contents become visible before
6693 * the set_pte_at() write.
6694 */
6695 __folio_mark_uptodate(folio);
6696
6697 /* Add shared, newly allocated pages to the page cache. */
6698 if (vm_shared && !is_continue) {
6699 size = i_size_read(mapping->host) >> huge_page_shift(h);
6700 ret = -EFAULT;
6701 if (idx >= size)
6702 goto out_release_nounlock;
6703
6704 /*
6705 * Serialization between remove_inode_hugepages() and
6706 * hugetlb_add_to_page_cache() below happens through the
6707 * hugetlb_fault_mutex_table that here must be hold by
6708 * the caller.
6709 */
6710 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6711 if (ret)
6712 goto out_release_nounlock;
6713 folio_in_pagecache = true;
6714 }
6715
6716 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6717
6718 ret = -EIO;
6719 if (folio_test_hwpoison(folio))
6720 goto out_release_unlock;
6721
6722 /*
6723 * We allow to overwrite a pte marker: consider when both MISSING|WP
6724 * registered, we firstly wr-protect a none pte which has no page cache
6725 * page backing it, then access the page.
6726 */
6727 ret = -EEXIST;
6728 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6729 goto out_release_unlock;
6730
6731 if (folio_in_pagecache)
6732 hugetlb_add_file_rmap(folio);
6733 else
6734 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6735
6736 /*
6737 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6738 * with wp flag set, don't set pte write bit.
6739 */
6740 if (wp_enabled || (is_continue && !vm_shared))
6741 writable = 0;
6742 else
6743 writable = dst_vma->vm_flags & VM_WRITE;
6744
6745 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6746 /*
6747 * Always mark UFFDIO_COPY page dirty; note that this may not be
6748 * extremely important for hugetlbfs for now since swapping is not
6749 * supported, but we should still be clear in that this page cannot be
6750 * thrown away at will, even if write bit not set.
6751 */
6752 _dst_pte = huge_pte_mkdirty(_dst_pte);
6753 _dst_pte = pte_mkyoung(_dst_pte);
6754
6755 if (wp_enabled)
6756 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6757
6758 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
6759
6760 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6761
6762 /* No need to invalidate - it was non-present before */
6763 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6764
6765 spin_unlock(ptl);
6766 if (!is_continue)
6767 folio_set_hugetlb_migratable(folio);
6768 if (vm_shared || is_continue)
6769 folio_unlock(folio);
6770 ret = 0;
6771out:
6772 return ret;
6773out_release_unlock:
6774 spin_unlock(ptl);
6775 if (vm_shared || is_continue)
6776 folio_unlock(folio);
6777out_release_nounlock:
6778 if (!folio_in_pagecache)
6779 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6780 folio_put(folio);
6781 goto out;
6782}
6783#endif /* CONFIG_USERFAULTFD */
6784
6785struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6786 unsigned long address, unsigned int flags,
6787 unsigned int *page_mask)
6788{
6789 struct hstate *h = hstate_vma(vma);
6790 struct mm_struct *mm = vma->vm_mm;
6791 unsigned long haddr = address & huge_page_mask(h);
6792 struct page *page = NULL;
6793 spinlock_t *ptl;
6794 pte_t *pte, entry;
6795 int ret;
6796
6797 hugetlb_vma_lock_read(vma);
6798 pte = hugetlb_walk(vma, haddr, huge_page_size(h));
6799 if (!pte)
6800 goto out_unlock;
6801
6802 ptl = huge_pte_lock(h, mm, pte);
6803 entry = huge_ptep_get(pte);
6804 if (pte_present(entry)) {
6805 page = pte_page(entry);
6806
6807 if (!huge_pte_write(entry)) {
6808 if (flags & FOLL_WRITE) {
6809 page = NULL;
6810 goto out;
6811 }
6812
6813 if (gup_must_unshare(vma, flags, page)) {
6814 /* Tell the caller to do unsharing */
6815 page = ERR_PTR(-EMLINK);
6816 goto out;
6817 }
6818 }
6819
6820 page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
6821
6822 /*
6823 * Note that page may be a sub-page, and with vmemmap
6824 * optimizations the page struct may be read only.
6825 * try_grab_page() will increase the ref count on the
6826 * head page, so this will be OK.
6827 *
6828 * try_grab_page() should always be able to get the page here,
6829 * because we hold the ptl lock and have verified pte_present().
6830 */
6831 ret = try_grab_page(page, flags);
6832
6833 if (WARN_ON_ONCE(ret)) {
6834 page = ERR_PTR(ret);
6835 goto out;
6836 }
6837
6838 *page_mask = (1U << huge_page_order(h)) - 1;
6839 }
6840out:
6841 spin_unlock(ptl);
6842out_unlock:
6843 hugetlb_vma_unlock_read(vma);
6844
6845 /*
6846 * Fixup retval for dump requests: if pagecache doesn't exist,
6847 * don't try to allocate a new page but just skip it.
6848 */
6849 if (!page && (flags & FOLL_DUMP) &&
6850 !hugetlbfs_pagecache_present(h, vma, address))
6851 page = ERR_PTR(-EFAULT);
6852
6853 return page;
6854}
6855
6856long hugetlb_change_protection(struct vm_area_struct *vma,
6857 unsigned long address, unsigned long end,
6858 pgprot_t newprot, unsigned long cp_flags)
6859{
6860 struct mm_struct *mm = vma->vm_mm;
6861 unsigned long start = address;
6862 pte_t *ptep;
6863 pte_t pte;
6864 struct hstate *h = hstate_vma(vma);
6865 long pages = 0, psize = huge_page_size(h);
6866 bool shared_pmd = false;
6867 struct mmu_notifier_range range;
6868 unsigned long last_addr_mask;
6869 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6870 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6871
6872 /*
6873 * In the case of shared PMDs, the area to flush could be beyond
6874 * start/end. Set range.start/range.end to cover the maximum possible
6875 * range if PMD sharing is possible.
6876 */
6877 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6878 0, mm, start, end);
6879 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6880
6881 BUG_ON(address >= end);
6882 flush_cache_range(vma, range.start, range.end);
6883
6884 mmu_notifier_invalidate_range_start(&range);
6885 hugetlb_vma_lock_write(vma);
6886 i_mmap_lock_write(vma->vm_file->f_mapping);
6887 last_addr_mask = hugetlb_mask_last_page(h);
6888 for (; address < end; address += psize) {
6889 spinlock_t *ptl;
6890 ptep = hugetlb_walk(vma, address, psize);
6891 if (!ptep) {
6892 if (!uffd_wp) {
6893 address |= last_addr_mask;
6894 continue;
6895 }
6896 /*
6897 * Userfaultfd wr-protect requires pgtable
6898 * pre-allocations to install pte markers.
6899 */
6900 ptep = huge_pte_alloc(mm, vma, address, psize);
6901 if (!ptep) {
6902 pages = -ENOMEM;
6903 break;
6904 }
6905 }
6906 ptl = huge_pte_lock(h, mm, ptep);
6907 if (huge_pmd_unshare(mm, vma, address, ptep)) {
6908 /*
6909 * When uffd-wp is enabled on the vma, unshare
6910 * shouldn't happen at all. Warn about it if it
6911 * happened due to some reason.
6912 */
6913 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6914 pages++;
6915 spin_unlock(ptl);
6916 shared_pmd = true;
6917 address |= last_addr_mask;
6918 continue;
6919 }
6920 pte = huge_ptep_get(ptep);
6921 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6922 /* Nothing to do. */
6923 } else if (unlikely(is_hugetlb_entry_migration(pte))) {
6924 swp_entry_t entry = pte_to_swp_entry(pte);
6925 struct page *page = pfn_swap_entry_to_page(entry);
6926 pte_t newpte = pte;
6927
6928 if (is_writable_migration_entry(entry)) {
6929 if (PageAnon(page))
6930 entry = make_readable_exclusive_migration_entry(
6931 swp_offset(entry));
6932 else
6933 entry = make_readable_migration_entry(
6934 swp_offset(entry));
6935 newpte = swp_entry_to_pte(entry);
6936 pages++;
6937 }
6938
6939 if (uffd_wp)
6940 newpte = pte_swp_mkuffd_wp(newpte);
6941 else if (uffd_wp_resolve)
6942 newpte = pte_swp_clear_uffd_wp(newpte);
6943 if (!pte_same(pte, newpte))
6944 set_huge_pte_at(mm, address, ptep, newpte, psize);
6945 } else if (unlikely(is_pte_marker(pte))) {
6946 /* No other markers apply for now. */
6947 WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
6948 if (uffd_wp_resolve)
6949 /* Safe to modify directly (non-present->none). */
6950 huge_pte_clear(mm, address, ptep, psize);
6951 } else if (!huge_pte_none(pte)) {
6952 pte_t old_pte;
6953 unsigned int shift = huge_page_shift(hstate_vma(vma));
6954
6955 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6956 pte = huge_pte_modify(old_pte, newprot);
6957 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6958 if (uffd_wp)
6959 pte = huge_pte_mkuffd_wp(pte);
6960 else if (uffd_wp_resolve)
6961 pte = huge_pte_clear_uffd_wp(pte);
6962 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6963 pages++;
6964 } else {
6965 /* None pte */
6966 if (unlikely(uffd_wp))
6967 /* Safe to modify directly (none->non-present). */
6968 set_huge_pte_at(mm, address, ptep,
6969 make_pte_marker(PTE_MARKER_UFFD_WP),
6970 psize);
6971 }
6972 spin_unlock(ptl);
6973 }
6974 /*
6975 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6976 * may have cleared our pud entry and done put_page on the page table:
6977 * once we release i_mmap_rwsem, another task can do the final put_page
6978 * and that page table be reused and filled with junk. If we actually
6979 * did unshare a page of pmds, flush the range corresponding to the pud.
6980 */
6981 if (shared_pmd)
6982 flush_hugetlb_tlb_range(vma, range.start, range.end);
6983 else
6984 flush_hugetlb_tlb_range(vma, start, end);
6985 /*
6986 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6987 * downgrading page table protection not changing it to point to a new
6988 * page.
6989 *
6990 * See Documentation/mm/mmu_notifier.rst
6991 */
6992 i_mmap_unlock_write(vma->vm_file->f_mapping);
6993 hugetlb_vma_unlock_write(vma);
6994 mmu_notifier_invalidate_range_end(&range);
6995
6996 return pages > 0 ? (pages << h->order) : pages;
6997}
6998
6999/* Return true if reservation was successful, false otherwise. */
7000bool hugetlb_reserve_pages(struct inode *inode,
7001 long from, long to,
7002 struct vm_area_struct *vma,
7003 vm_flags_t vm_flags)
7004{
7005 long chg = -1, add = -1;
7006 struct hstate *h = hstate_inode(inode);
7007 struct hugepage_subpool *spool = subpool_inode(inode);
7008 struct resv_map *resv_map;
7009 struct hugetlb_cgroup *h_cg = NULL;
7010 long gbl_reserve, regions_needed = 0;
7011
7012 /* This should never happen */
7013 if (from > to) {
7014 VM_WARN(1, "%s called with a negative range\n", __func__);
7015 return false;
7016 }
7017
7018 /*
7019 * vma specific semaphore used for pmd sharing and fault/truncation
7020 * synchronization
7021 */
7022 hugetlb_vma_lock_alloc(vma);
7023
7024 /*
7025 * Only apply hugepage reservation if asked. At fault time, an
7026 * attempt will be made for VM_NORESERVE to allocate a page
7027 * without using reserves
7028 */
7029 if (vm_flags & VM_NORESERVE)
7030 return true;
7031
7032 /*
7033 * Shared mappings base their reservation on the number of pages that
7034 * are already allocated on behalf of the file. Private mappings need
7035 * to reserve the full area even if read-only as mprotect() may be
7036 * called to make the mapping read-write. Assume !vma is a shm mapping
7037 */
7038 if (!vma || vma->vm_flags & VM_MAYSHARE) {
7039 /*
7040 * resv_map can not be NULL as hugetlb_reserve_pages is only
7041 * called for inodes for which resv_maps were created (see
7042 * hugetlbfs_get_inode).
7043 */
7044 resv_map = inode_resv_map(inode);
7045
7046 chg = region_chg(resv_map, from, to, ®ions_needed);
7047 } else {
7048 /* Private mapping. */
7049 resv_map = resv_map_alloc();
7050 if (!resv_map)
7051 goto out_err;
7052
7053 chg = to - from;
7054
7055 set_vma_resv_map(vma, resv_map);
7056 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
7057 }
7058
7059 if (chg < 0)
7060 goto out_err;
7061
7062 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
7063 chg * pages_per_huge_page(h), &h_cg) < 0)
7064 goto out_err;
7065
7066 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
7067 /* For private mappings, the hugetlb_cgroup uncharge info hangs
7068 * of the resv_map.
7069 */
7070 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
7071 }
7072
7073 /*
7074 * There must be enough pages in the subpool for the mapping. If
7075 * the subpool has a minimum size, there may be some global
7076 * reservations already in place (gbl_reserve).
7077 */
7078 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
7079 if (gbl_reserve < 0)
7080 goto out_uncharge_cgroup;
7081
7082 /*
7083 * Check enough hugepages are available for the reservation.
7084 * Hand the pages back to the subpool if there are not
7085 */
7086 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
7087 goto out_put_pages;
7088
7089 /*
7090 * Account for the reservations made. Shared mappings record regions
7091 * that have reservations as they are shared by multiple VMAs.
7092 * When the last VMA disappears, the region map says how much
7093 * the reservation was and the page cache tells how much of
7094 * the reservation was consumed. Private mappings are per-VMA and
7095 * only the consumed reservations are tracked. When the VMA
7096 * disappears, the original reservation is the VMA size and the
7097 * consumed reservations are stored in the map. Hence, nothing
7098 * else has to be done for private mappings here
7099 */
7100 if (!vma || vma->vm_flags & VM_MAYSHARE) {
7101 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
7102
7103 if (unlikely(add < 0)) {
7104 hugetlb_acct_memory(h, -gbl_reserve);
7105 goto out_put_pages;
7106 } else if (unlikely(chg > add)) {
7107 /*
7108 * pages in this range were added to the reserve
7109 * map between region_chg and region_add. This
7110 * indicates a race with alloc_hugetlb_folio. Adjust
7111 * the subpool and reserve counts modified above
7112 * based on the difference.
7113 */
7114 long rsv_adjust;
7115
7116 /*
7117 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7118 * reference to h_cg->css. See comment below for detail.
7119 */
7120 hugetlb_cgroup_uncharge_cgroup_rsvd(
7121 hstate_index(h),
7122 (chg - add) * pages_per_huge_page(h), h_cg);
7123
7124 rsv_adjust = hugepage_subpool_put_pages(spool,
7125 chg - add);
7126 hugetlb_acct_memory(h, -rsv_adjust);
7127 } else if (h_cg) {
7128 /*
7129 * The file_regions will hold their own reference to
7130 * h_cg->css. So we should release the reference held
7131 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7132 * done.
7133 */
7134 hugetlb_cgroup_put_rsvd_cgroup(h_cg);
7135 }
7136 }
7137 return true;
7138
7139out_put_pages:
7140 /* put back original number of pages, chg */
7141 (void)hugepage_subpool_put_pages(spool, chg);
7142out_uncharge_cgroup:
7143 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
7144 chg * pages_per_huge_page(h), h_cg);
7145out_err:
7146 hugetlb_vma_lock_free(vma);
7147 if (!vma || vma->vm_flags & VM_MAYSHARE)
7148 /* Only call region_abort if the region_chg succeeded but the
7149 * region_add failed or didn't run.
7150 */
7151 if (chg >= 0 && add < 0)
7152 region_abort(resv_map, from, to, regions_needed);
7153 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
7154 kref_put(&resv_map->refs, resv_map_release);
7155 set_vma_resv_map(vma, NULL);
7156 }
7157 return false;
7158}
7159
7160long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
7161 long freed)
7162{
7163 struct hstate *h = hstate_inode(inode);
7164 struct resv_map *resv_map = inode_resv_map(inode);
7165 long chg = 0;
7166 struct hugepage_subpool *spool = subpool_inode(inode);
7167 long gbl_reserve;
7168
7169 /*
7170 * Since this routine can be called in the evict inode path for all
7171 * hugetlbfs inodes, resv_map could be NULL.
7172 */
7173 if (resv_map) {
7174 chg = region_del(resv_map, start, end);
7175 /*
7176 * region_del() can fail in the rare case where a region
7177 * must be split and another region descriptor can not be
7178 * allocated. If end == LONG_MAX, it will not fail.
7179 */
7180 if (chg < 0)
7181 return chg;
7182 }
7183
7184 spin_lock(&inode->i_lock);
7185 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
7186 spin_unlock(&inode->i_lock);
7187
7188 /*
7189 * If the subpool has a minimum size, the number of global
7190 * reservations to be released may be adjusted.
7191 *
7192 * Note that !resv_map implies freed == 0. So (chg - freed)
7193 * won't go negative.
7194 */
7195 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
7196 hugetlb_acct_memory(h, -gbl_reserve);
7197
7198 return 0;
7199}
7200
7201#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7202static unsigned long page_table_shareable(struct vm_area_struct *svma,
7203 struct vm_area_struct *vma,
7204 unsigned long addr, pgoff_t idx)
7205{
7206 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
7207 svma->vm_start;
7208 unsigned long sbase = saddr & PUD_MASK;
7209 unsigned long s_end = sbase + PUD_SIZE;
7210
7211 /* Allow segments to share if only one is marked locked */
7212 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
7213 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
7214
7215 /*
7216 * match the virtual addresses, permission and the alignment of the
7217 * page table page.
7218 *
7219 * Also, vma_lock (vm_private_data) is required for sharing.
7220 */
7221 if (pmd_index(addr) != pmd_index(saddr) ||
7222 vm_flags != svm_flags ||
7223 !range_in_vma(svma, sbase, s_end) ||
7224 !svma->vm_private_data)
7225 return 0;
7226
7227 return saddr;
7228}
7229
7230bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7231{
7232 unsigned long start = addr & PUD_MASK;
7233 unsigned long end = start + PUD_SIZE;
7234
7235#ifdef CONFIG_USERFAULTFD
7236 if (uffd_disable_huge_pmd_share(vma))
7237 return false;
7238#endif
7239 /*
7240 * check on proper vm_flags and page table alignment
7241 */
7242 if (!(vma->vm_flags & VM_MAYSHARE))
7243 return false;
7244 if (!vma->vm_private_data) /* vma lock required for sharing */
7245 return false;
7246 if (!range_in_vma(vma, start, end))
7247 return false;
7248 return true;
7249}
7250
7251/*
7252 * Determine if start,end range within vma could be mapped by shared pmd.
7253 * If yes, adjust start and end to cover range associated with possible
7254 * shared pmd mappings.
7255 */
7256void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7257 unsigned long *start, unsigned long *end)
7258{
7259 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
7260 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7261
7262 /*
7263 * vma needs to span at least one aligned PUD size, and the range
7264 * must be at least partially within in.
7265 */
7266 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
7267 (*end <= v_start) || (*start >= v_end))
7268 return;
7269
7270 /* Extend the range to be PUD aligned for a worst case scenario */
7271 if (*start > v_start)
7272 *start = ALIGN_DOWN(*start, PUD_SIZE);
7273
7274 if (*end < v_end)
7275 *end = ALIGN(*end, PUD_SIZE);
7276}
7277
7278/*
7279 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7280 * and returns the corresponding pte. While this is not necessary for the
7281 * !shared pmd case because we can allocate the pmd later as well, it makes the
7282 * code much cleaner. pmd allocation is essential for the shared case because
7283 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7284 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7285 * bad pmd for sharing.
7286 */
7287pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7288 unsigned long addr, pud_t *pud)
7289{
7290 struct address_space *mapping = vma->vm_file->f_mapping;
7291 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7292 vma->vm_pgoff;
7293 struct vm_area_struct *svma;
7294 unsigned long saddr;
7295 pte_t *spte = NULL;
7296 pte_t *pte;
7297
7298 i_mmap_lock_read(mapping);
7299 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7300 if (svma == vma)
7301 continue;
7302
7303 saddr = page_table_shareable(svma, vma, addr, idx);
7304 if (saddr) {
7305 spte = hugetlb_walk(svma, saddr,
7306 vma_mmu_pagesize(svma));
7307 if (spte) {
7308 get_page(virt_to_page(spte));
7309 break;
7310 }
7311 }
7312 }
7313
7314 if (!spte)
7315 goto out;
7316
7317 spin_lock(&mm->page_table_lock);
7318 if (pud_none(*pud)) {
7319 pud_populate(mm, pud,
7320 (pmd_t *)((unsigned long)spte & PAGE_MASK));
7321 mm_inc_nr_pmds(mm);
7322 } else {
7323 put_page(virt_to_page(spte));
7324 }
7325 spin_unlock(&mm->page_table_lock);
7326out:
7327 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7328 i_mmap_unlock_read(mapping);
7329 return pte;
7330}
7331
7332/*
7333 * unmap huge page backed by shared pte.
7334 *
7335 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7336 * indicated by page_count > 1, unmap is achieved by clearing pud and
7337 * decrementing the ref count. If count == 1, the pte page is not shared.
7338 *
7339 * Called with page table lock held.
7340 *
7341 * returns: 1 successfully unmapped a shared pte page
7342 * 0 the underlying pte page is not shared, or it is the last user
7343 */
7344int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7345 unsigned long addr, pte_t *ptep)
7346{
7347 pgd_t *pgd = pgd_offset(mm, addr);
7348 p4d_t *p4d = p4d_offset(pgd, addr);
7349 pud_t *pud = pud_offset(p4d, addr);
7350
7351 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7352 hugetlb_vma_assert_locked(vma);
7353 BUG_ON(page_count(virt_to_page(ptep)) == 0);
7354 if (page_count(virt_to_page(ptep)) == 1)
7355 return 0;
7356
7357 pud_clear(pud);
7358 put_page(virt_to_page(ptep));
7359 mm_dec_nr_pmds(mm);
7360 return 1;
7361}
7362
7363#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7364
7365pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7366 unsigned long addr, pud_t *pud)
7367{
7368 return NULL;
7369}
7370
7371int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7372 unsigned long addr, pte_t *ptep)
7373{
7374 return 0;
7375}
7376
7377void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7378 unsigned long *start, unsigned long *end)
7379{
7380}
7381
7382bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7383{
7384 return false;
7385}
7386#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7387
7388#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7389pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7390 unsigned long addr, unsigned long sz)
7391{
7392 pgd_t *pgd;
7393 p4d_t *p4d;
7394 pud_t *pud;
7395 pte_t *pte = NULL;
7396
7397 pgd = pgd_offset(mm, addr);
7398 p4d = p4d_alloc(mm, pgd, addr);
7399 if (!p4d)
7400 return NULL;
7401 pud = pud_alloc(mm, p4d, addr);
7402 if (pud) {
7403 if (sz == PUD_SIZE) {
7404 pte = (pte_t *)pud;
7405 } else {
7406 BUG_ON(sz != PMD_SIZE);
7407 if (want_pmd_share(vma, addr) && pud_none(*pud))
7408 pte = huge_pmd_share(mm, vma, addr, pud);
7409 else
7410 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7411 }
7412 }
7413
7414 if (pte) {
7415 pte_t pteval = ptep_get_lockless(pte);
7416
7417 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7418 }
7419
7420 return pte;
7421}
7422
7423/*
7424 * huge_pte_offset() - Walk the page table to resolve the hugepage
7425 * entry at address @addr
7426 *
7427 * Return: Pointer to page table entry (PUD or PMD) for
7428 * address @addr, or NULL if a !p*d_present() entry is encountered and the
7429 * size @sz doesn't match the hugepage size at this level of the page
7430 * table.
7431 */
7432pte_t *huge_pte_offset(struct mm_struct *mm,
7433 unsigned long addr, unsigned long sz)
7434{
7435 pgd_t *pgd;
7436 p4d_t *p4d;
7437 pud_t *pud;
7438 pmd_t *pmd;
7439
7440 pgd = pgd_offset(mm, addr);
7441 if (!pgd_present(*pgd))
7442 return NULL;
7443 p4d = p4d_offset(pgd, addr);
7444 if (!p4d_present(*p4d))
7445 return NULL;
7446
7447 pud = pud_offset(p4d, addr);
7448 if (sz == PUD_SIZE)
7449 /* must be pud huge, non-present or none */
7450 return (pte_t *)pud;
7451 if (!pud_present(*pud))
7452 return NULL;
7453 /* must have a valid entry and size to go further */
7454
7455 pmd = pmd_offset(pud, addr);
7456 /* must be pmd huge, non-present or none */
7457 return (pte_t *)pmd;
7458}
7459
7460/*
7461 * Return a mask that can be used to update an address to the last huge
7462 * page in a page table page mapping size. Used to skip non-present
7463 * page table entries when linearly scanning address ranges. Architectures
7464 * with unique huge page to page table relationships can define their own
7465 * version of this routine.
7466 */
7467unsigned long hugetlb_mask_last_page(struct hstate *h)
7468{
7469 unsigned long hp_size = huge_page_size(h);
7470
7471 if (hp_size == PUD_SIZE)
7472 return P4D_SIZE - PUD_SIZE;
7473 else if (hp_size == PMD_SIZE)
7474 return PUD_SIZE - PMD_SIZE;
7475 else
7476 return 0UL;
7477}
7478
7479#else
7480
7481/* See description above. Architectures can provide their own version. */
7482__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7483{
7484#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7485 if (huge_page_size(h) == PMD_SIZE)
7486 return PUD_SIZE - PMD_SIZE;
7487#endif
7488 return 0UL;
7489}
7490
7491#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7492
7493/*
7494 * These functions are overwritable if your architecture needs its own
7495 * behavior.
7496 */
7497bool isolate_hugetlb(struct folio *folio, struct list_head *list)
7498{
7499 bool ret = true;
7500
7501 spin_lock_irq(&hugetlb_lock);
7502 if (!folio_test_hugetlb(folio) ||
7503 !folio_test_hugetlb_migratable(folio) ||
7504 !folio_try_get(folio)) {
7505 ret = false;
7506 goto unlock;
7507 }
7508 folio_clear_hugetlb_migratable(folio);
7509 list_move_tail(&folio->lru, list);
7510unlock:
7511 spin_unlock_irq(&hugetlb_lock);
7512 return ret;
7513}
7514
7515int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7516{
7517 int ret = 0;
7518
7519 *hugetlb = false;
7520 spin_lock_irq(&hugetlb_lock);
7521 if (folio_test_hugetlb(folio)) {
7522 *hugetlb = true;
7523 if (folio_test_hugetlb_freed(folio))
7524 ret = 0;
7525 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7526 ret = folio_try_get(folio);
7527 else
7528 ret = -EBUSY;
7529 }
7530 spin_unlock_irq(&hugetlb_lock);
7531 return ret;
7532}
7533
7534int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7535 bool *migratable_cleared)
7536{
7537 int ret;
7538
7539 spin_lock_irq(&hugetlb_lock);
7540 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7541 spin_unlock_irq(&hugetlb_lock);
7542 return ret;
7543}
7544
7545void folio_putback_active_hugetlb(struct folio *folio)
7546{
7547 spin_lock_irq(&hugetlb_lock);
7548 folio_set_hugetlb_migratable(folio);
7549 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7550 spin_unlock_irq(&hugetlb_lock);
7551 folio_put(folio);
7552}
7553
7554void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7555{
7556 struct hstate *h = folio_hstate(old_folio);
7557
7558 hugetlb_cgroup_migrate(old_folio, new_folio);
7559 set_page_owner_migrate_reason(&new_folio->page, reason);
7560
7561 /*
7562 * transfer temporary state of the new hugetlb folio. This is
7563 * reverse to other transitions because the newpage is going to
7564 * be final while the old one will be freed so it takes over
7565 * the temporary status.
7566 *
7567 * Also note that we have to transfer the per-node surplus state
7568 * here as well otherwise the global surplus count will not match
7569 * the per-node's.
7570 */
7571 if (folio_test_hugetlb_temporary(new_folio)) {
7572 int old_nid = folio_nid(old_folio);
7573 int new_nid = folio_nid(new_folio);
7574
7575 folio_set_hugetlb_temporary(old_folio);
7576 folio_clear_hugetlb_temporary(new_folio);
7577
7578
7579 /*
7580 * There is no need to transfer the per-node surplus state
7581 * when we do not cross the node.
7582 */
7583 if (new_nid == old_nid)
7584 return;
7585 spin_lock_irq(&hugetlb_lock);
7586 if (h->surplus_huge_pages_node[old_nid]) {
7587 h->surplus_huge_pages_node[old_nid]--;
7588 h->surplus_huge_pages_node[new_nid]++;
7589 }
7590 spin_unlock_irq(&hugetlb_lock);
7591 }
7592}
7593
7594static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7595 unsigned long start,
7596 unsigned long end)
7597{
7598 struct hstate *h = hstate_vma(vma);
7599 unsigned long sz = huge_page_size(h);
7600 struct mm_struct *mm = vma->vm_mm;
7601 struct mmu_notifier_range range;
7602 unsigned long address;
7603 spinlock_t *ptl;
7604 pte_t *ptep;
7605
7606 if (!(vma->vm_flags & VM_MAYSHARE))
7607 return;
7608
7609 if (start >= end)
7610 return;
7611
7612 flush_cache_range(vma, start, end);
7613 /*
7614 * No need to call adjust_range_if_pmd_sharing_possible(), because
7615 * we have already done the PUD_SIZE alignment.
7616 */
7617 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7618 start, end);
7619 mmu_notifier_invalidate_range_start(&range);
7620 hugetlb_vma_lock_write(vma);
7621 i_mmap_lock_write(vma->vm_file->f_mapping);
7622 for (address = start; address < end; address += PUD_SIZE) {
7623 ptep = hugetlb_walk(vma, address, sz);
7624 if (!ptep)
7625 continue;
7626 ptl = huge_pte_lock(h, mm, ptep);
7627 huge_pmd_unshare(mm, vma, address, ptep);
7628 spin_unlock(ptl);
7629 }
7630 flush_hugetlb_tlb_range(vma, start, end);
7631 i_mmap_unlock_write(vma->vm_file->f_mapping);
7632 hugetlb_vma_unlock_write(vma);
7633 /*
7634 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7635 * Documentation/mm/mmu_notifier.rst.
7636 */
7637 mmu_notifier_invalidate_range_end(&range);
7638}
7639
7640/*
7641 * This function will unconditionally remove all the shared pmd pgtable entries
7642 * within the specific vma for a hugetlbfs memory range.
7643 */
7644void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7645{
7646 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7647 ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7648}
7649
7650#ifdef CONFIG_CMA
7651static bool cma_reserve_called __initdata;
7652
7653static int __init cmdline_parse_hugetlb_cma(char *p)
7654{
7655 int nid, count = 0;
7656 unsigned long tmp;
7657 char *s = p;
7658
7659 while (*s) {
7660 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7661 break;
7662
7663 if (s[count] == ':') {
7664 if (tmp >= MAX_NUMNODES)
7665 break;
7666 nid = array_index_nospec(tmp, MAX_NUMNODES);
7667
7668 s += count + 1;
7669 tmp = memparse(s, &s);
7670 hugetlb_cma_size_in_node[nid] = tmp;
7671 hugetlb_cma_size += tmp;
7672
7673 /*
7674 * Skip the separator if have one, otherwise
7675 * break the parsing.
7676 */
7677 if (*s == ',')
7678 s++;
7679 else
7680 break;
7681 } else {
7682 hugetlb_cma_size = memparse(p, &p);
7683 break;
7684 }
7685 }
7686
7687 return 0;
7688}
7689
7690early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7691
7692void __init hugetlb_cma_reserve(int order)
7693{
7694 unsigned long size, reserved, per_node;
7695 bool node_specific_cma_alloc = false;
7696 int nid;
7697
7698 cma_reserve_called = true;
7699
7700 if (!hugetlb_cma_size)
7701 return;
7702
7703 for (nid = 0; nid < MAX_NUMNODES; nid++) {
7704 if (hugetlb_cma_size_in_node[nid] == 0)
7705 continue;
7706
7707 if (!node_online(nid)) {
7708 pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7709 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7710 hugetlb_cma_size_in_node[nid] = 0;
7711 continue;
7712 }
7713
7714 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7715 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7716 nid, (PAGE_SIZE << order) / SZ_1M);
7717 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7718 hugetlb_cma_size_in_node[nid] = 0;
7719 } else {
7720 node_specific_cma_alloc = true;
7721 }
7722 }
7723
7724 /* Validate the CMA size again in case some invalid nodes specified. */
7725 if (!hugetlb_cma_size)
7726 return;
7727
7728 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7729 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7730 (PAGE_SIZE << order) / SZ_1M);
7731 hugetlb_cma_size = 0;
7732 return;
7733 }
7734
7735 if (!node_specific_cma_alloc) {
7736 /*
7737 * If 3 GB area is requested on a machine with 4 numa nodes,
7738 * let's allocate 1 GB on first three nodes and ignore the last one.
7739 */
7740 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7741 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7742 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7743 }
7744
7745 reserved = 0;
7746 for_each_online_node(nid) {
7747 int res;
7748 char name[CMA_MAX_NAME];
7749
7750 if (node_specific_cma_alloc) {
7751 if (hugetlb_cma_size_in_node[nid] == 0)
7752 continue;
7753
7754 size = hugetlb_cma_size_in_node[nid];
7755 } else {
7756 size = min(per_node, hugetlb_cma_size - reserved);
7757 }
7758
7759 size = round_up(size, PAGE_SIZE << order);
7760
7761 snprintf(name, sizeof(name), "hugetlb%d", nid);
7762 /*
7763 * Note that 'order per bit' is based on smallest size that
7764 * may be returned to CMA allocator in the case of
7765 * huge page demotion.
7766 */
7767 res = cma_declare_contiguous_nid(0, size, 0,
7768 PAGE_SIZE << HUGETLB_PAGE_ORDER,
7769 0, false, name,
7770 &hugetlb_cma[nid], nid);
7771 if (res) {
7772 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7773 res, nid);
7774 continue;
7775 }
7776
7777 reserved += size;
7778 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7779 size / SZ_1M, nid);
7780
7781 if (reserved >= hugetlb_cma_size)
7782 break;
7783 }
7784
7785 if (!reserved)
7786 /*
7787 * hugetlb_cma_size is used to determine if allocations from
7788 * cma are possible. Set to zero if no cma regions are set up.
7789 */
7790 hugetlb_cma_size = 0;
7791}
7792
7793static void __init hugetlb_cma_check(void)
7794{
7795 if (!hugetlb_cma_size || cma_reserve_called)
7796 return;
7797
7798 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7799}
7800
7801#endif /* CONFIG_CMA */