Loading...
1/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/highmem.h>
11#include <linux/hugetlb.h>
12#include <linux/mmu_notifier.h>
13#include <linux/rmap.h>
14#include <linux/swap.h>
15#include <linux/shrinker.h>
16#include <linux/mm_inline.h>
17#include <linux/kthread.h>
18#include <linux/khugepaged.h>
19#include <linux/freezer.h>
20#include <linux/mman.h>
21#include <linux/pagemap.h>
22#include <linux/migrate.h>
23#include <linux/hashtable.h>
24
25#include <asm/tlb.h>
26#include <asm/pgalloc.h>
27#include "internal.h"
28
29/*
30 * By default transparent hugepage support is disabled in order that avoid
31 * to risk increase the memory footprint of applications without a guaranteed
32 * benefit. When transparent hugepage support is enabled, is for all mappings,
33 * and khugepaged scans all mappings.
34 * Defrag is invoked by khugepaged hugepage allocations and by page faults
35 * for all hugepage allocations.
36 */
37unsigned long transparent_hugepage_flags __read_mostly =
38#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
39 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
40#endif
41#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
42 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
43#endif
44 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
45 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
46 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
47
48/* default scan 8*512 pte (or vmas) every 30 second */
49static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
50static unsigned int khugepaged_pages_collapsed;
51static unsigned int khugepaged_full_scans;
52static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
53/* during fragmentation poll the hugepage allocator once every minute */
54static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
55static struct task_struct *khugepaged_thread __read_mostly;
56static DEFINE_MUTEX(khugepaged_mutex);
57static DEFINE_SPINLOCK(khugepaged_mm_lock);
58static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
59/*
60 * default collapse hugepages if there is at least one pte mapped like
61 * it would have happened if the vma was large enough during page
62 * fault.
63 */
64static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
65
66static int khugepaged(void *none);
67static int khugepaged_slab_init(void);
68
69#define MM_SLOTS_HASH_BITS 10
70static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
71
72static struct kmem_cache *mm_slot_cache __read_mostly;
73
74/**
75 * struct mm_slot - hash lookup from mm to mm_slot
76 * @hash: hash collision list
77 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
78 * @mm: the mm that this information is valid for
79 */
80struct mm_slot {
81 struct hlist_node hash;
82 struct list_head mm_node;
83 struct mm_struct *mm;
84};
85
86/**
87 * struct khugepaged_scan - cursor for scanning
88 * @mm_head: the head of the mm list to scan
89 * @mm_slot: the current mm_slot we are scanning
90 * @address: the next address inside that to be scanned
91 *
92 * There is only the one khugepaged_scan instance of this cursor structure.
93 */
94struct khugepaged_scan {
95 struct list_head mm_head;
96 struct mm_slot *mm_slot;
97 unsigned long address;
98};
99static struct khugepaged_scan khugepaged_scan = {
100 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
101};
102
103
104static int set_recommended_min_free_kbytes(void)
105{
106 struct zone *zone;
107 int nr_zones = 0;
108 unsigned long recommended_min;
109
110 if (!khugepaged_enabled())
111 return 0;
112
113 for_each_populated_zone(zone)
114 nr_zones++;
115
116 /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
117 recommended_min = pageblock_nr_pages * nr_zones * 2;
118
119 /*
120 * Make sure that on average at least two pageblocks are almost free
121 * of another type, one for a migratetype to fall back to and a
122 * second to avoid subsequent fallbacks of other types There are 3
123 * MIGRATE_TYPES we care about.
124 */
125 recommended_min += pageblock_nr_pages * nr_zones *
126 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
127
128 /* don't ever allow to reserve more than 5% of the lowmem */
129 recommended_min = min(recommended_min,
130 (unsigned long) nr_free_buffer_pages() / 20);
131 recommended_min <<= (PAGE_SHIFT-10);
132
133 if (recommended_min > min_free_kbytes) {
134 if (user_min_free_kbytes >= 0)
135 pr_info("raising min_free_kbytes from %d to %lu "
136 "to help transparent hugepage allocations\n",
137 min_free_kbytes, recommended_min);
138
139 min_free_kbytes = recommended_min;
140 }
141 setup_per_zone_wmarks();
142 return 0;
143}
144late_initcall(set_recommended_min_free_kbytes);
145
146static int start_khugepaged(void)
147{
148 int err = 0;
149 if (khugepaged_enabled()) {
150 if (!khugepaged_thread)
151 khugepaged_thread = kthread_run(khugepaged, NULL,
152 "khugepaged");
153 if (unlikely(IS_ERR(khugepaged_thread))) {
154 printk(KERN_ERR
155 "khugepaged: kthread_run(khugepaged) failed\n");
156 err = PTR_ERR(khugepaged_thread);
157 khugepaged_thread = NULL;
158 }
159
160 if (!list_empty(&khugepaged_scan.mm_head))
161 wake_up_interruptible(&khugepaged_wait);
162
163 set_recommended_min_free_kbytes();
164 } else if (khugepaged_thread) {
165 kthread_stop(khugepaged_thread);
166 khugepaged_thread = NULL;
167 }
168
169 return err;
170}
171
172static atomic_t huge_zero_refcount;
173static struct page *huge_zero_page __read_mostly;
174
175static inline bool is_huge_zero_page(struct page *page)
176{
177 return ACCESS_ONCE(huge_zero_page) == page;
178}
179
180static inline bool is_huge_zero_pmd(pmd_t pmd)
181{
182 return is_huge_zero_page(pmd_page(pmd));
183}
184
185static struct page *get_huge_zero_page(void)
186{
187 struct page *zero_page;
188retry:
189 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
190 return ACCESS_ONCE(huge_zero_page);
191
192 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
193 HPAGE_PMD_ORDER);
194 if (!zero_page) {
195 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
196 return NULL;
197 }
198 count_vm_event(THP_ZERO_PAGE_ALLOC);
199 preempt_disable();
200 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
201 preempt_enable();
202 __free_page(zero_page);
203 goto retry;
204 }
205
206 /* We take additional reference here. It will be put back by shrinker */
207 atomic_set(&huge_zero_refcount, 2);
208 preempt_enable();
209 return ACCESS_ONCE(huge_zero_page);
210}
211
212static void put_huge_zero_page(void)
213{
214 /*
215 * Counter should never go to zero here. Only shrinker can put
216 * last reference.
217 */
218 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
219}
220
221static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
222 struct shrink_control *sc)
223{
224 /* we can free zero page only if last reference remains */
225 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
226}
227
228static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
229 struct shrink_control *sc)
230{
231 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
232 struct page *zero_page = xchg(&huge_zero_page, NULL);
233 BUG_ON(zero_page == NULL);
234 __free_page(zero_page);
235 return HPAGE_PMD_NR;
236 }
237
238 return 0;
239}
240
241static struct shrinker huge_zero_page_shrinker = {
242 .count_objects = shrink_huge_zero_page_count,
243 .scan_objects = shrink_huge_zero_page_scan,
244 .seeks = DEFAULT_SEEKS,
245};
246
247#ifdef CONFIG_SYSFS
248
249static ssize_t double_flag_show(struct kobject *kobj,
250 struct kobj_attribute *attr, char *buf,
251 enum transparent_hugepage_flag enabled,
252 enum transparent_hugepage_flag req_madv)
253{
254 if (test_bit(enabled, &transparent_hugepage_flags)) {
255 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
256 return sprintf(buf, "[always] madvise never\n");
257 } else if (test_bit(req_madv, &transparent_hugepage_flags))
258 return sprintf(buf, "always [madvise] never\n");
259 else
260 return sprintf(buf, "always madvise [never]\n");
261}
262static ssize_t double_flag_store(struct kobject *kobj,
263 struct kobj_attribute *attr,
264 const char *buf, size_t count,
265 enum transparent_hugepage_flag enabled,
266 enum transparent_hugepage_flag req_madv)
267{
268 if (!memcmp("always", buf,
269 min(sizeof("always")-1, count))) {
270 set_bit(enabled, &transparent_hugepage_flags);
271 clear_bit(req_madv, &transparent_hugepage_flags);
272 } else if (!memcmp("madvise", buf,
273 min(sizeof("madvise")-1, count))) {
274 clear_bit(enabled, &transparent_hugepage_flags);
275 set_bit(req_madv, &transparent_hugepage_flags);
276 } else if (!memcmp("never", buf,
277 min(sizeof("never")-1, count))) {
278 clear_bit(enabled, &transparent_hugepage_flags);
279 clear_bit(req_madv, &transparent_hugepage_flags);
280 } else
281 return -EINVAL;
282
283 return count;
284}
285
286static ssize_t enabled_show(struct kobject *kobj,
287 struct kobj_attribute *attr, char *buf)
288{
289 return double_flag_show(kobj, attr, buf,
290 TRANSPARENT_HUGEPAGE_FLAG,
291 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
292}
293static ssize_t enabled_store(struct kobject *kobj,
294 struct kobj_attribute *attr,
295 const char *buf, size_t count)
296{
297 ssize_t ret;
298
299 ret = double_flag_store(kobj, attr, buf, count,
300 TRANSPARENT_HUGEPAGE_FLAG,
301 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
302
303 if (ret > 0) {
304 int err;
305
306 mutex_lock(&khugepaged_mutex);
307 err = start_khugepaged();
308 mutex_unlock(&khugepaged_mutex);
309
310 if (err)
311 ret = err;
312 }
313
314 return ret;
315}
316static struct kobj_attribute enabled_attr =
317 __ATTR(enabled, 0644, enabled_show, enabled_store);
318
319static ssize_t single_flag_show(struct kobject *kobj,
320 struct kobj_attribute *attr, char *buf,
321 enum transparent_hugepage_flag flag)
322{
323 return sprintf(buf, "%d\n",
324 !!test_bit(flag, &transparent_hugepage_flags));
325}
326
327static ssize_t single_flag_store(struct kobject *kobj,
328 struct kobj_attribute *attr,
329 const char *buf, size_t count,
330 enum transparent_hugepage_flag flag)
331{
332 unsigned long value;
333 int ret;
334
335 ret = kstrtoul(buf, 10, &value);
336 if (ret < 0)
337 return ret;
338 if (value > 1)
339 return -EINVAL;
340
341 if (value)
342 set_bit(flag, &transparent_hugepage_flags);
343 else
344 clear_bit(flag, &transparent_hugepage_flags);
345
346 return count;
347}
348
349/*
350 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
351 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
352 * memory just to allocate one more hugepage.
353 */
354static ssize_t defrag_show(struct kobject *kobj,
355 struct kobj_attribute *attr, char *buf)
356{
357 return double_flag_show(kobj, attr, buf,
358 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
359 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
360}
361static ssize_t defrag_store(struct kobject *kobj,
362 struct kobj_attribute *attr,
363 const char *buf, size_t count)
364{
365 return double_flag_store(kobj, attr, buf, count,
366 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
367 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
368}
369static struct kobj_attribute defrag_attr =
370 __ATTR(defrag, 0644, defrag_show, defrag_store);
371
372static ssize_t use_zero_page_show(struct kobject *kobj,
373 struct kobj_attribute *attr, char *buf)
374{
375 return single_flag_show(kobj, attr, buf,
376 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
377}
378static ssize_t use_zero_page_store(struct kobject *kobj,
379 struct kobj_attribute *attr, const char *buf, size_t count)
380{
381 return single_flag_store(kobj, attr, buf, count,
382 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
383}
384static struct kobj_attribute use_zero_page_attr =
385 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
386#ifdef CONFIG_DEBUG_VM
387static ssize_t debug_cow_show(struct kobject *kobj,
388 struct kobj_attribute *attr, char *buf)
389{
390 return single_flag_show(kobj, attr, buf,
391 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
392}
393static ssize_t debug_cow_store(struct kobject *kobj,
394 struct kobj_attribute *attr,
395 const char *buf, size_t count)
396{
397 return single_flag_store(kobj, attr, buf, count,
398 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
399}
400static struct kobj_attribute debug_cow_attr =
401 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
402#endif /* CONFIG_DEBUG_VM */
403
404static struct attribute *hugepage_attr[] = {
405 &enabled_attr.attr,
406 &defrag_attr.attr,
407 &use_zero_page_attr.attr,
408#ifdef CONFIG_DEBUG_VM
409 &debug_cow_attr.attr,
410#endif
411 NULL,
412};
413
414static struct attribute_group hugepage_attr_group = {
415 .attrs = hugepage_attr,
416};
417
418static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
419 struct kobj_attribute *attr,
420 char *buf)
421{
422 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
423}
424
425static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
426 struct kobj_attribute *attr,
427 const char *buf, size_t count)
428{
429 unsigned long msecs;
430 int err;
431
432 err = kstrtoul(buf, 10, &msecs);
433 if (err || msecs > UINT_MAX)
434 return -EINVAL;
435
436 khugepaged_scan_sleep_millisecs = msecs;
437 wake_up_interruptible(&khugepaged_wait);
438
439 return count;
440}
441static struct kobj_attribute scan_sleep_millisecs_attr =
442 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
443 scan_sleep_millisecs_store);
444
445static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
446 struct kobj_attribute *attr,
447 char *buf)
448{
449 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
450}
451
452static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
453 struct kobj_attribute *attr,
454 const char *buf, size_t count)
455{
456 unsigned long msecs;
457 int err;
458
459 err = kstrtoul(buf, 10, &msecs);
460 if (err || msecs > UINT_MAX)
461 return -EINVAL;
462
463 khugepaged_alloc_sleep_millisecs = msecs;
464 wake_up_interruptible(&khugepaged_wait);
465
466 return count;
467}
468static struct kobj_attribute alloc_sleep_millisecs_attr =
469 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
470 alloc_sleep_millisecs_store);
471
472static ssize_t pages_to_scan_show(struct kobject *kobj,
473 struct kobj_attribute *attr,
474 char *buf)
475{
476 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
477}
478static ssize_t pages_to_scan_store(struct kobject *kobj,
479 struct kobj_attribute *attr,
480 const char *buf, size_t count)
481{
482 int err;
483 unsigned long pages;
484
485 err = kstrtoul(buf, 10, &pages);
486 if (err || !pages || pages > UINT_MAX)
487 return -EINVAL;
488
489 khugepaged_pages_to_scan = pages;
490
491 return count;
492}
493static struct kobj_attribute pages_to_scan_attr =
494 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
495 pages_to_scan_store);
496
497static ssize_t pages_collapsed_show(struct kobject *kobj,
498 struct kobj_attribute *attr,
499 char *buf)
500{
501 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
502}
503static struct kobj_attribute pages_collapsed_attr =
504 __ATTR_RO(pages_collapsed);
505
506static ssize_t full_scans_show(struct kobject *kobj,
507 struct kobj_attribute *attr,
508 char *buf)
509{
510 return sprintf(buf, "%u\n", khugepaged_full_scans);
511}
512static struct kobj_attribute full_scans_attr =
513 __ATTR_RO(full_scans);
514
515static ssize_t khugepaged_defrag_show(struct kobject *kobj,
516 struct kobj_attribute *attr, char *buf)
517{
518 return single_flag_show(kobj, attr, buf,
519 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
520}
521static ssize_t khugepaged_defrag_store(struct kobject *kobj,
522 struct kobj_attribute *attr,
523 const char *buf, size_t count)
524{
525 return single_flag_store(kobj, attr, buf, count,
526 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
527}
528static struct kobj_attribute khugepaged_defrag_attr =
529 __ATTR(defrag, 0644, khugepaged_defrag_show,
530 khugepaged_defrag_store);
531
532/*
533 * max_ptes_none controls if khugepaged should collapse hugepages over
534 * any unmapped ptes in turn potentially increasing the memory
535 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
536 * reduce the available free memory in the system as it
537 * runs. Increasing max_ptes_none will instead potentially reduce the
538 * free memory in the system during the khugepaged scan.
539 */
540static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
541 struct kobj_attribute *attr,
542 char *buf)
543{
544 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
545}
546static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
547 struct kobj_attribute *attr,
548 const char *buf, size_t count)
549{
550 int err;
551 unsigned long max_ptes_none;
552
553 err = kstrtoul(buf, 10, &max_ptes_none);
554 if (err || max_ptes_none > HPAGE_PMD_NR-1)
555 return -EINVAL;
556
557 khugepaged_max_ptes_none = max_ptes_none;
558
559 return count;
560}
561static struct kobj_attribute khugepaged_max_ptes_none_attr =
562 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
563 khugepaged_max_ptes_none_store);
564
565static struct attribute *khugepaged_attr[] = {
566 &khugepaged_defrag_attr.attr,
567 &khugepaged_max_ptes_none_attr.attr,
568 &pages_to_scan_attr.attr,
569 &pages_collapsed_attr.attr,
570 &full_scans_attr.attr,
571 &scan_sleep_millisecs_attr.attr,
572 &alloc_sleep_millisecs_attr.attr,
573 NULL,
574};
575
576static struct attribute_group khugepaged_attr_group = {
577 .attrs = khugepaged_attr,
578 .name = "khugepaged",
579};
580
581static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
582{
583 int err;
584
585 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
586 if (unlikely(!*hugepage_kobj)) {
587 printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
588 return -ENOMEM;
589 }
590
591 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
592 if (err) {
593 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
594 goto delete_obj;
595 }
596
597 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
598 if (err) {
599 printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
600 goto remove_hp_group;
601 }
602
603 return 0;
604
605remove_hp_group:
606 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
607delete_obj:
608 kobject_put(*hugepage_kobj);
609 return err;
610}
611
612static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
613{
614 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
615 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
616 kobject_put(hugepage_kobj);
617}
618#else
619static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
620{
621 return 0;
622}
623
624static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
625{
626}
627#endif /* CONFIG_SYSFS */
628
629static int __init hugepage_init(void)
630{
631 int err;
632 struct kobject *hugepage_kobj;
633
634 if (!has_transparent_hugepage()) {
635 transparent_hugepage_flags = 0;
636 return -EINVAL;
637 }
638
639 err = hugepage_init_sysfs(&hugepage_kobj);
640 if (err)
641 return err;
642
643 err = khugepaged_slab_init();
644 if (err)
645 goto out;
646
647 register_shrinker(&huge_zero_page_shrinker);
648
649 /*
650 * By default disable transparent hugepages on smaller systems,
651 * where the extra memory used could hurt more than TLB overhead
652 * is likely to save. The admin can still enable it through /sys.
653 */
654 if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
655 transparent_hugepage_flags = 0;
656
657 start_khugepaged();
658
659 return 0;
660out:
661 hugepage_exit_sysfs(hugepage_kobj);
662 return err;
663}
664subsys_initcall(hugepage_init);
665
666static int __init setup_transparent_hugepage(char *str)
667{
668 int ret = 0;
669 if (!str)
670 goto out;
671 if (!strcmp(str, "always")) {
672 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
673 &transparent_hugepage_flags);
674 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
675 &transparent_hugepage_flags);
676 ret = 1;
677 } else if (!strcmp(str, "madvise")) {
678 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
679 &transparent_hugepage_flags);
680 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
681 &transparent_hugepage_flags);
682 ret = 1;
683 } else if (!strcmp(str, "never")) {
684 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
685 &transparent_hugepage_flags);
686 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
687 &transparent_hugepage_flags);
688 ret = 1;
689 }
690out:
691 if (!ret)
692 printk(KERN_WARNING
693 "transparent_hugepage= cannot parse, ignored\n");
694 return ret;
695}
696__setup("transparent_hugepage=", setup_transparent_hugepage);
697
698pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
699{
700 if (likely(vma->vm_flags & VM_WRITE))
701 pmd = pmd_mkwrite(pmd);
702 return pmd;
703}
704
705static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
706{
707 pmd_t entry;
708 entry = mk_pmd(page, prot);
709 entry = pmd_mkhuge(entry);
710 return entry;
711}
712
713static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
714 struct vm_area_struct *vma,
715 unsigned long haddr, pmd_t *pmd,
716 struct page *page)
717{
718 pgtable_t pgtable;
719 spinlock_t *ptl;
720
721 VM_BUG_ON_PAGE(!PageCompound(page), page);
722 pgtable = pte_alloc_one(mm, haddr);
723 if (unlikely(!pgtable))
724 return VM_FAULT_OOM;
725
726 clear_huge_page(page, haddr, HPAGE_PMD_NR);
727 /*
728 * The memory barrier inside __SetPageUptodate makes sure that
729 * clear_huge_page writes become visible before the set_pmd_at()
730 * write.
731 */
732 __SetPageUptodate(page);
733
734 ptl = pmd_lock(mm, pmd);
735 if (unlikely(!pmd_none(*pmd))) {
736 spin_unlock(ptl);
737 mem_cgroup_uncharge_page(page);
738 put_page(page);
739 pte_free(mm, pgtable);
740 } else {
741 pmd_t entry;
742 entry = mk_huge_pmd(page, vma->vm_page_prot);
743 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
744 page_add_new_anon_rmap(page, vma, haddr);
745 pgtable_trans_huge_deposit(mm, pmd, pgtable);
746 set_pmd_at(mm, haddr, pmd, entry);
747 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
748 atomic_long_inc(&mm->nr_ptes);
749 spin_unlock(ptl);
750 }
751
752 return 0;
753}
754
755static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
756{
757 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
758}
759
760static inline struct page *alloc_hugepage_vma(int defrag,
761 struct vm_area_struct *vma,
762 unsigned long haddr, int nd,
763 gfp_t extra_gfp)
764{
765 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
766 HPAGE_PMD_ORDER, vma, haddr, nd);
767}
768
769/* Caller must hold page table lock. */
770static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
771 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
772 struct page *zero_page)
773{
774 pmd_t entry;
775 if (!pmd_none(*pmd))
776 return false;
777 entry = mk_pmd(zero_page, vma->vm_page_prot);
778 entry = pmd_wrprotect(entry);
779 entry = pmd_mkhuge(entry);
780 pgtable_trans_huge_deposit(mm, pmd, pgtable);
781 set_pmd_at(mm, haddr, pmd, entry);
782 atomic_long_inc(&mm->nr_ptes);
783 return true;
784}
785
786int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
787 unsigned long address, pmd_t *pmd,
788 unsigned int flags)
789{
790 struct page *page;
791 unsigned long haddr = address & HPAGE_PMD_MASK;
792
793 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
794 return VM_FAULT_FALLBACK;
795 if (unlikely(anon_vma_prepare(vma)))
796 return VM_FAULT_OOM;
797 if (unlikely(khugepaged_enter(vma)))
798 return VM_FAULT_OOM;
799 if (!(flags & FAULT_FLAG_WRITE) &&
800 transparent_hugepage_use_zero_page()) {
801 spinlock_t *ptl;
802 pgtable_t pgtable;
803 struct page *zero_page;
804 bool set;
805 pgtable = pte_alloc_one(mm, haddr);
806 if (unlikely(!pgtable))
807 return VM_FAULT_OOM;
808 zero_page = get_huge_zero_page();
809 if (unlikely(!zero_page)) {
810 pte_free(mm, pgtable);
811 count_vm_event(THP_FAULT_FALLBACK);
812 return VM_FAULT_FALLBACK;
813 }
814 ptl = pmd_lock(mm, pmd);
815 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
816 zero_page);
817 spin_unlock(ptl);
818 if (!set) {
819 pte_free(mm, pgtable);
820 put_huge_zero_page();
821 }
822 return 0;
823 }
824 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
825 vma, haddr, numa_node_id(), 0);
826 if (unlikely(!page)) {
827 count_vm_event(THP_FAULT_FALLBACK);
828 return VM_FAULT_FALLBACK;
829 }
830 if (unlikely(mem_cgroup_charge_anon(page, mm, GFP_KERNEL))) {
831 put_page(page);
832 count_vm_event(THP_FAULT_FALLBACK);
833 return VM_FAULT_FALLBACK;
834 }
835 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
836 mem_cgroup_uncharge_page(page);
837 put_page(page);
838 count_vm_event(THP_FAULT_FALLBACK);
839 return VM_FAULT_FALLBACK;
840 }
841
842 count_vm_event(THP_FAULT_ALLOC);
843 return 0;
844}
845
846int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
847 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
848 struct vm_area_struct *vma)
849{
850 spinlock_t *dst_ptl, *src_ptl;
851 struct page *src_page;
852 pmd_t pmd;
853 pgtable_t pgtable;
854 int ret;
855
856 ret = -ENOMEM;
857 pgtable = pte_alloc_one(dst_mm, addr);
858 if (unlikely(!pgtable))
859 goto out;
860
861 dst_ptl = pmd_lock(dst_mm, dst_pmd);
862 src_ptl = pmd_lockptr(src_mm, src_pmd);
863 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
864
865 ret = -EAGAIN;
866 pmd = *src_pmd;
867 if (unlikely(!pmd_trans_huge(pmd))) {
868 pte_free(dst_mm, pgtable);
869 goto out_unlock;
870 }
871 /*
872 * When page table lock is held, the huge zero pmd should not be
873 * under splitting since we don't split the page itself, only pmd to
874 * a page table.
875 */
876 if (is_huge_zero_pmd(pmd)) {
877 struct page *zero_page;
878 bool set;
879 /*
880 * get_huge_zero_page() will never allocate a new page here,
881 * since we already have a zero page to copy. It just takes a
882 * reference.
883 */
884 zero_page = get_huge_zero_page();
885 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
886 zero_page);
887 BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
888 ret = 0;
889 goto out_unlock;
890 }
891
892 if (unlikely(pmd_trans_splitting(pmd))) {
893 /* split huge page running from under us */
894 spin_unlock(src_ptl);
895 spin_unlock(dst_ptl);
896 pte_free(dst_mm, pgtable);
897
898 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
899 goto out;
900 }
901 src_page = pmd_page(pmd);
902 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
903 get_page(src_page);
904 page_dup_rmap(src_page);
905 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
906
907 pmdp_set_wrprotect(src_mm, addr, src_pmd);
908 pmd = pmd_mkold(pmd_wrprotect(pmd));
909 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
910 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
911 atomic_long_inc(&dst_mm->nr_ptes);
912
913 ret = 0;
914out_unlock:
915 spin_unlock(src_ptl);
916 spin_unlock(dst_ptl);
917out:
918 return ret;
919}
920
921void huge_pmd_set_accessed(struct mm_struct *mm,
922 struct vm_area_struct *vma,
923 unsigned long address,
924 pmd_t *pmd, pmd_t orig_pmd,
925 int dirty)
926{
927 spinlock_t *ptl;
928 pmd_t entry;
929 unsigned long haddr;
930
931 ptl = pmd_lock(mm, pmd);
932 if (unlikely(!pmd_same(*pmd, orig_pmd)))
933 goto unlock;
934
935 entry = pmd_mkyoung(orig_pmd);
936 haddr = address & HPAGE_PMD_MASK;
937 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
938 update_mmu_cache_pmd(vma, address, pmd);
939
940unlock:
941 spin_unlock(ptl);
942}
943
944static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
945 struct vm_area_struct *vma,
946 unsigned long address,
947 pmd_t *pmd, pmd_t orig_pmd,
948 struct page *page,
949 unsigned long haddr)
950{
951 spinlock_t *ptl;
952 pgtable_t pgtable;
953 pmd_t _pmd;
954 int ret = 0, i;
955 struct page **pages;
956 unsigned long mmun_start; /* For mmu_notifiers */
957 unsigned long mmun_end; /* For mmu_notifiers */
958
959 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
960 GFP_KERNEL);
961 if (unlikely(!pages)) {
962 ret |= VM_FAULT_OOM;
963 goto out;
964 }
965
966 for (i = 0; i < HPAGE_PMD_NR; i++) {
967 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
968 __GFP_OTHER_NODE,
969 vma, address, page_to_nid(page));
970 if (unlikely(!pages[i] ||
971 mem_cgroup_charge_anon(pages[i], mm,
972 GFP_KERNEL))) {
973 if (pages[i])
974 put_page(pages[i]);
975 mem_cgroup_uncharge_start();
976 while (--i >= 0) {
977 mem_cgroup_uncharge_page(pages[i]);
978 put_page(pages[i]);
979 }
980 mem_cgroup_uncharge_end();
981 kfree(pages);
982 ret |= VM_FAULT_OOM;
983 goto out;
984 }
985 }
986
987 for (i = 0; i < HPAGE_PMD_NR; i++) {
988 copy_user_highpage(pages[i], page + i,
989 haddr + PAGE_SIZE * i, vma);
990 __SetPageUptodate(pages[i]);
991 cond_resched();
992 }
993
994 mmun_start = haddr;
995 mmun_end = haddr + HPAGE_PMD_SIZE;
996 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
997
998 ptl = pmd_lock(mm, pmd);
999 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1000 goto out_free_pages;
1001 VM_BUG_ON_PAGE(!PageHead(page), page);
1002
1003 pmdp_clear_flush(vma, haddr, pmd);
1004 /* leave pmd empty until pte is filled */
1005
1006 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1007 pmd_populate(mm, &_pmd, pgtable);
1008
1009 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1010 pte_t *pte, entry;
1011 entry = mk_pte(pages[i], vma->vm_page_prot);
1012 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1013 page_add_new_anon_rmap(pages[i], vma, haddr);
1014 pte = pte_offset_map(&_pmd, haddr);
1015 VM_BUG_ON(!pte_none(*pte));
1016 set_pte_at(mm, haddr, pte, entry);
1017 pte_unmap(pte);
1018 }
1019 kfree(pages);
1020
1021 smp_wmb(); /* make pte visible before pmd */
1022 pmd_populate(mm, pmd, pgtable);
1023 page_remove_rmap(page);
1024 spin_unlock(ptl);
1025
1026 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1027
1028 ret |= VM_FAULT_WRITE;
1029 put_page(page);
1030
1031out:
1032 return ret;
1033
1034out_free_pages:
1035 spin_unlock(ptl);
1036 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1037 mem_cgroup_uncharge_start();
1038 for (i = 0; i < HPAGE_PMD_NR; i++) {
1039 mem_cgroup_uncharge_page(pages[i]);
1040 put_page(pages[i]);
1041 }
1042 mem_cgroup_uncharge_end();
1043 kfree(pages);
1044 goto out;
1045}
1046
1047int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1048 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1049{
1050 spinlock_t *ptl;
1051 int ret = 0;
1052 struct page *page = NULL, *new_page;
1053 unsigned long haddr;
1054 unsigned long mmun_start; /* For mmu_notifiers */
1055 unsigned long mmun_end; /* For mmu_notifiers */
1056
1057 ptl = pmd_lockptr(mm, pmd);
1058 VM_BUG_ON(!vma->anon_vma);
1059 haddr = address & HPAGE_PMD_MASK;
1060 if (is_huge_zero_pmd(orig_pmd))
1061 goto alloc;
1062 spin_lock(ptl);
1063 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1064 goto out_unlock;
1065
1066 page = pmd_page(orig_pmd);
1067 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1068 if (page_mapcount(page) == 1) {
1069 pmd_t entry;
1070 entry = pmd_mkyoung(orig_pmd);
1071 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1072 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
1073 update_mmu_cache_pmd(vma, address, pmd);
1074 ret |= VM_FAULT_WRITE;
1075 goto out_unlock;
1076 }
1077 get_page(page);
1078 spin_unlock(ptl);
1079alloc:
1080 if (transparent_hugepage_enabled(vma) &&
1081 !transparent_hugepage_debug_cow())
1082 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
1083 vma, haddr, numa_node_id(), 0);
1084 else
1085 new_page = NULL;
1086
1087 if (unlikely(!new_page)) {
1088 if (!page) {
1089 split_huge_page_pmd(vma, address, pmd);
1090 ret |= VM_FAULT_FALLBACK;
1091 } else {
1092 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1093 pmd, orig_pmd, page, haddr);
1094 if (ret & VM_FAULT_OOM) {
1095 split_huge_page(page);
1096 ret |= VM_FAULT_FALLBACK;
1097 }
1098 put_page(page);
1099 }
1100 count_vm_event(THP_FAULT_FALLBACK);
1101 goto out;
1102 }
1103
1104 if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL))) {
1105 put_page(new_page);
1106 if (page) {
1107 split_huge_page(page);
1108 put_page(page);
1109 } else
1110 split_huge_page_pmd(vma, address, pmd);
1111 ret |= VM_FAULT_FALLBACK;
1112 count_vm_event(THP_FAULT_FALLBACK);
1113 goto out;
1114 }
1115
1116 count_vm_event(THP_FAULT_ALLOC);
1117
1118 if (!page)
1119 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1120 else
1121 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1122 __SetPageUptodate(new_page);
1123
1124 mmun_start = haddr;
1125 mmun_end = haddr + HPAGE_PMD_SIZE;
1126 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1127
1128 spin_lock(ptl);
1129 if (page)
1130 put_page(page);
1131 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1132 spin_unlock(ptl);
1133 mem_cgroup_uncharge_page(new_page);
1134 put_page(new_page);
1135 goto out_mn;
1136 } else {
1137 pmd_t entry;
1138 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1139 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1140 pmdp_clear_flush(vma, haddr, pmd);
1141 page_add_new_anon_rmap(new_page, vma, haddr);
1142 set_pmd_at(mm, haddr, pmd, entry);
1143 update_mmu_cache_pmd(vma, address, pmd);
1144 if (!page) {
1145 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1146 put_huge_zero_page();
1147 } else {
1148 VM_BUG_ON_PAGE(!PageHead(page), page);
1149 page_remove_rmap(page);
1150 put_page(page);
1151 }
1152 ret |= VM_FAULT_WRITE;
1153 }
1154 spin_unlock(ptl);
1155out_mn:
1156 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1157out:
1158 return ret;
1159out_unlock:
1160 spin_unlock(ptl);
1161 return ret;
1162}
1163
1164struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1165 unsigned long addr,
1166 pmd_t *pmd,
1167 unsigned int flags)
1168{
1169 struct mm_struct *mm = vma->vm_mm;
1170 struct page *page = NULL;
1171
1172 assert_spin_locked(pmd_lockptr(mm, pmd));
1173
1174 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1175 goto out;
1176
1177 /* Avoid dumping huge zero page */
1178 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1179 return ERR_PTR(-EFAULT);
1180
1181 /* Full NUMA hinting faults to serialise migration in fault paths */
1182 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1183 goto out;
1184
1185 page = pmd_page(*pmd);
1186 VM_BUG_ON_PAGE(!PageHead(page), page);
1187 if (flags & FOLL_TOUCH) {
1188 pmd_t _pmd;
1189 /*
1190 * We should set the dirty bit only for FOLL_WRITE but
1191 * for now the dirty bit in the pmd is meaningless.
1192 * And if the dirty bit will become meaningful and
1193 * we'll only set it with FOLL_WRITE, an atomic
1194 * set_bit will be required on the pmd to set the
1195 * young bit, instead of the current set_pmd_at.
1196 */
1197 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1198 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1199 pmd, _pmd, 1))
1200 update_mmu_cache_pmd(vma, addr, pmd);
1201 }
1202 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1203 if (page->mapping && trylock_page(page)) {
1204 lru_add_drain();
1205 if (page->mapping)
1206 mlock_vma_page(page);
1207 unlock_page(page);
1208 }
1209 }
1210 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1211 VM_BUG_ON_PAGE(!PageCompound(page), page);
1212 if (flags & FOLL_GET)
1213 get_page_foll(page);
1214
1215out:
1216 return page;
1217}
1218
1219/* NUMA hinting page fault entry point for trans huge pmds */
1220int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1221 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1222{
1223 spinlock_t *ptl;
1224 struct anon_vma *anon_vma = NULL;
1225 struct page *page;
1226 unsigned long haddr = addr & HPAGE_PMD_MASK;
1227 int page_nid = -1, this_nid = numa_node_id();
1228 int target_nid, last_cpupid = -1;
1229 bool page_locked;
1230 bool migrated = false;
1231 int flags = 0;
1232
1233 ptl = pmd_lock(mm, pmdp);
1234 if (unlikely(!pmd_same(pmd, *pmdp)))
1235 goto out_unlock;
1236
1237 /*
1238 * If there are potential migrations, wait for completion and retry
1239 * without disrupting NUMA hinting information. Do not relock and
1240 * check_same as the page may no longer be mapped.
1241 */
1242 if (unlikely(pmd_trans_migrating(*pmdp))) {
1243 spin_unlock(ptl);
1244 wait_migrate_huge_page(vma->anon_vma, pmdp);
1245 goto out;
1246 }
1247
1248 page = pmd_page(pmd);
1249 BUG_ON(is_huge_zero_page(page));
1250 page_nid = page_to_nid(page);
1251 last_cpupid = page_cpupid_last(page);
1252 count_vm_numa_event(NUMA_HINT_FAULTS);
1253 if (page_nid == this_nid) {
1254 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1255 flags |= TNF_FAULT_LOCAL;
1256 }
1257
1258 /*
1259 * Avoid grouping on DSO/COW pages in specific and RO pages
1260 * in general, RO pages shouldn't hurt as much anyway since
1261 * they can be in shared cache state.
1262 */
1263 if (!pmd_write(pmd))
1264 flags |= TNF_NO_GROUP;
1265
1266 /*
1267 * Acquire the page lock to serialise THP migrations but avoid dropping
1268 * page_table_lock if at all possible
1269 */
1270 page_locked = trylock_page(page);
1271 target_nid = mpol_misplaced(page, vma, haddr);
1272 if (target_nid == -1) {
1273 /* If the page was locked, there are no parallel migrations */
1274 if (page_locked)
1275 goto clear_pmdnuma;
1276 }
1277
1278 /* Migration could have started since the pmd_trans_migrating check */
1279 if (!page_locked) {
1280 spin_unlock(ptl);
1281 wait_on_page_locked(page);
1282 page_nid = -1;
1283 goto out;
1284 }
1285
1286 /*
1287 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1288 * to serialises splits
1289 */
1290 get_page(page);
1291 spin_unlock(ptl);
1292 anon_vma = page_lock_anon_vma_read(page);
1293
1294 /* Confirm the PMD did not change while page_table_lock was released */
1295 spin_lock(ptl);
1296 if (unlikely(!pmd_same(pmd, *pmdp))) {
1297 unlock_page(page);
1298 put_page(page);
1299 page_nid = -1;
1300 goto out_unlock;
1301 }
1302
1303 /* Bail if we fail to protect against THP splits for any reason */
1304 if (unlikely(!anon_vma)) {
1305 put_page(page);
1306 page_nid = -1;
1307 goto clear_pmdnuma;
1308 }
1309
1310 /*
1311 * Migrate the THP to the requested node, returns with page unlocked
1312 * and pmd_numa cleared.
1313 */
1314 spin_unlock(ptl);
1315 migrated = migrate_misplaced_transhuge_page(mm, vma,
1316 pmdp, pmd, addr, page, target_nid);
1317 if (migrated) {
1318 flags |= TNF_MIGRATED;
1319 page_nid = target_nid;
1320 }
1321
1322 goto out;
1323clear_pmdnuma:
1324 BUG_ON(!PageLocked(page));
1325 pmd = pmd_mknonnuma(pmd);
1326 set_pmd_at(mm, haddr, pmdp, pmd);
1327 VM_BUG_ON(pmd_numa(*pmdp));
1328 update_mmu_cache_pmd(vma, addr, pmdp);
1329 unlock_page(page);
1330out_unlock:
1331 spin_unlock(ptl);
1332
1333out:
1334 if (anon_vma)
1335 page_unlock_anon_vma_read(anon_vma);
1336
1337 if (page_nid != -1)
1338 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1339
1340 return 0;
1341}
1342
1343int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1344 pmd_t *pmd, unsigned long addr)
1345{
1346 spinlock_t *ptl;
1347 int ret = 0;
1348
1349 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1350 struct page *page;
1351 pgtable_t pgtable;
1352 pmd_t orig_pmd;
1353 /*
1354 * For architectures like ppc64 we look at deposited pgtable
1355 * when calling pmdp_get_and_clear. So do the
1356 * pgtable_trans_huge_withdraw after finishing pmdp related
1357 * operations.
1358 */
1359 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1360 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1361 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1362 if (is_huge_zero_pmd(orig_pmd)) {
1363 atomic_long_dec(&tlb->mm->nr_ptes);
1364 spin_unlock(ptl);
1365 put_huge_zero_page();
1366 } else {
1367 page = pmd_page(orig_pmd);
1368 page_remove_rmap(page);
1369 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1370 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1371 VM_BUG_ON_PAGE(!PageHead(page), page);
1372 atomic_long_dec(&tlb->mm->nr_ptes);
1373 spin_unlock(ptl);
1374 tlb_remove_page(tlb, page);
1375 }
1376 pte_free(tlb->mm, pgtable);
1377 ret = 1;
1378 }
1379 return ret;
1380}
1381
1382int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1383 unsigned long addr, unsigned long end,
1384 unsigned char *vec)
1385{
1386 spinlock_t *ptl;
1387 int ret = 0;
1388
1389 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1390 /*
1391 * All logical pages in the range are present
1392 * if backed by a huge page.
1393 */
1394 spin_unlock(ptl);
1395 memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1396 ret = 1;
1397 }
1398
1399 return ret;
1400}
1401
1402int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1403 unsigned long old_addr,
1404 unsigned long new_addr, unsigned long old_end,
1405 pmd_t *old_pmd, pmd_t *new_pmd)
1406{
1407 spinlock_t *old_ptl, *new_ptl;
1408 int ret = 0;
1409 pmd_t pmd;
1410
1411 struct mm_struct *mm = vma->vm_mm;
1412
1413 if ((old_addr & ~HPAGE_PMD_MASK) ||
1414 (new_addr & ~HPAGE_PMD_MASK) ||
1415 old_end - old_addr < HPAGE_PMD_SIZE ||
1416 (new_vma->vm_flags & VM_NOHUGEPAGE))
1417 goto out;
1418
1419 /*
1420 * The destination pmd shouldn't be established, free_pgtables()
1421 * should have release it.
1422 */
1423 if (WARN_ON(!pmd_none(*new_pmd))) {
1424 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1425 goto out;
1426 }
1427
1428 /*
1429 * We don't have to worry about the ordering of src and dst
1430 * ptlocks because exclusive mmap_sem prevents deadlock.
1431 */
1432 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1433 if (ret == 1) {
1434 new_ptl = pmd_lockptr(mm, new_pmd);
1435 if (new_ptl != old_ptl)
1436 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1437 pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1438 VM_BUG_ON(!pmd_none(*new_pmd));
1439
1440 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1441 pgtable_t pgtable;
1442 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1443 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1444 }
1445 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1446 if (new_ptl != old_ptl)
1447 spin_unlock(new_ptl);
1448 spin_unlock(old_ptl);
1449 }
1450out:
1451 return ret;
1452}
1453
1454/*
1455 * Returns
1456 * - 0 if PMD could not be locked
1457 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1458 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1459 */
1460int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1461 unsigned long addr, pgprot_t newprot, int prot_numa)
1462{
1463 struct mm_struct *mm = vma->vm_mm;
1464 spinlock_t *ptl;
1465 int ret = 0;
1466
1467 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1468 pmd_t entry;
1469 ret = 1;
1470 if (!prot_numa) {
1471 entry = pmdp_get_and_clear(mm, addr, pmd);
1472 if (pmd_numa(entry))
1473 entry = pmd_mknonnuma(entry);
1474 entry = pmd_modify(entry, newprot);
1475 ret = HPAGE_PMD_NR;
1476 set_pmd_at(mm, addr, pmd, entry);
1477 BUG_ON(pmd_write(entry));
1478 } else {
1479 struct page *page = pmd_page(*pmd);
1480
1481 /*
1482 * Do not trap faults against the zero page. The
1483 * read-only data is likely to be read-cached on the
1484 * local CPU cache and it is less useful to know about
1485 * local vs remote hits on the zero page.
1486 */
1487 if (!is_huge_zero_page(page) &&
1488 !pmd_numa(*pmd)) {
1489 pmdp_set_numa(mm, addr, pmd);
1490 ret = HPAGE_PMD_NR;
1491 }
1492 }
1493 spin_unlock(ptl);
1494 }
1495
1496 return ret;
1497}
1498
1499/*
1500 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1501 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1502 *
1503 * Note that if it returns 1, this routine returns without unlocking page
1504 * table locks. So callers must unlock them.
1505 */
1506int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1507 spinlock_t **ptl)
1508{
1509 *ptl = pmd_lock(vma->vm_mm, pmd);
1510 if (likely(pmd_trans_huge(*pmd))) {
1511 if (unlikely(pmd_trans_splitting(*pmd))) {
1512 spin_unlock(*ptl);
1513 wait_split_huge_page(vma->anon_vma, pmd);
1514 return -1;
1515 } else {
1516 /* Thp mapped by 'pmd' is stable, so we can
1517 * handle it as it is. */
1518 return 1;
1519 }
1520 }
1521 spin_unlock(*ptl);
1522 return 0;
1523}
1524
1525/*
1526 * This function returns whether a given @page is mapped onto the @address
1527 * in the virtual space of @mm.
1528 *
1529 * When it's true, this function returns *pmd with holding the page table lock
1530 * and passing it back to the caller via @ptl.
1531 * If it's false, returns NULL without holding the page table lock.
1532 */
1533pmd_t *page_check_address_pmd(struct page *page,
1534 struct mm_struct *mm,
1535 unsigned long address,
1536 enum page_check_address_pmd_flag flag,
1537 spinlock_t **ptl)
1538{
1539 pgd_t *pgd;
1540 pud_t *pud;
1541 pmd_t *pmd;
1542
1543 if (address & ~HPAGE_PMD_MASK)
1544 return NULL;
1545
1546 pgd = pgd_offset(mm, address);
1547 if (!pgd_present(*pgd))
1548 return NULL;
1549 pud = pud_offset(pgd, address);
1550 if (!pud_present(*pud))
1551 return NULL;
1552 pmd = pmd_offset(pud, address);
1553
1554 *ptl = pmd_lock(mm, pmd);
1555 if (!pmd_present(*pmd))
1556 goto unlock;
1557 if (pmd_page(*pmd) != page)
1558 goto unlock;
1559 /*
1560 * split_vma() may create temporary aliased mappings. There is
1561 * no risk as long as all huge pmd are found and have their
1562 * splitting bit set before __split_huge_page_refcount
1563 * runs. Finding the same huge pmd more than once during the
1564 * same rmap walk is not a problem.
1565 */
1566 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1567 pmd_trans_splitting(*pmd))
1568 goto unlock;
1569 if (pmd_trans_huge(*pmd)) {
1570 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1571 !pmd_trans_splitting(*pmd));
1572 return pmd;
1573 }
1574unlock:
1575 spin_unlock(*ptl);
1576 return NULL;
1577}
1578
1579static int __split_huge_page_splitting(struct page *page,
1580 struct vm_area_struct *vma,
1581 unsigned long address)
1582{
1583 struct mm_struct *mm = vma->vm_mm;
1584 spinlock_t *ptl;
1585 pmd_t *pmd;
1586 int ret = 0;
1587 /* For mmu_notifiers */
1588 const unsigned long mmun_start = address;
1589 const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
1590
1591 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1592 pmd = page_check_address_pmd(page, mm, address,
1593 PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1594 if (pmd) {
1595 /*
1596 * We can't temporarily set the pmd to null in order
1597 * to split it, the pmd must remain marked huge at all
1598 * times or the VM won't take the pmd_trans_huge paths
1599 * and it won't wait on the anon_vma->root->rwsem to
1600 * serialize against split_huge_page*.
1601 */
1602 pmdp_splitting_flush(vma, address, pmd);
1603 ret = 1;
1604 spin_unlock(ptl);
1605 }
1606 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1607
1608 return ret;
1609}
1610
1611static void __split_huge_page_refcount(struct page *page,
1612 struct list_head *list)
1613{
1614 int i;
1615 struct zone *zone = page_zone(page);
1616 struct lruvec *lruvec;
1617 int tail_count = 0;
1618
1619 /* prevent PageLRU to go away from under us, and freeze lru stats */
1620 spin_lock_irq(&zone->lru_lock);
1621 lruvec = mem_cgroup_page_lruvec(page, zone);
1622
1623 compound_lock(page);
1624 /* complete memcg works before add pages to LRU */
1625 mem_cgroup_split_huge_fixup(page);
1626
1627 for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1628 struct page *page_tail = page + i;
1629
1630 /* tail_page->_mapcount cannot change */
1631 BUG_ON(page_mapcount(page_tail) < 0);
1632 tail_count += page_mapcount(page_tail);
1633 /* check for overflow */
1634 BUG_ON(tail_count < 0);
1635 BUG_ON(atomic_read(&page_tail->_count) != 0);
1636 /*
1637 * tail_page->_count is zero and not changing from
1638 * under us. But get_page_unless_zero() may be running
1639 * from under us on the tail_page. If we used
1640 * atomic_set() below instead of atomic_add(), we
1641 * would then run atomic_set() concurrently with
1642 * get_page_unless_zero(), and atomic_set() is
1643 * implemented in C not using locked ops. spin_unlock
1644 * on x86 sometime uses locked ops because of PPro
1645 * errata 66, 92, so unless somebody can guarantee
1646 * atomic_set() here would be safe on all archs (and
1647 * not only on x86), it's safer to use atomic_add().
1648 */
1649 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1650 &page_tail->_count);
1651
1652 /* after clearing PageTail the gup refcount can be released */
1653 smp_mb();
1654
1655 /*
1656 * retain hwpoison flag of the poisoned tail page:
1657 * fix for the unsuitable process killed on Guest Machine(KVM)
1658 * by the memory-failure.
1659 */
1660 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1661 page_tail->flags |= (page->flags &
1662 ((1L << PG_referenced) |
1663 (1L << PG_swapbacked) |
1664 (1L << PG_mlocked) |
1665 (1L << PG_uptodate) |
1666 (1L << PG_active) |
1667 (1L << PG_unevictable)));
1668 page_tail->flags |= (1L << PG_dirty);
1669
1670 /* clear PageTail before overwriting first_page */
1671 smp_wmb();
1672
1673 /*
1674 * __split_huge_page_splitting() already set the
1675 * splitting bit in all pmd that could map this
1676 * hugepage, that will ensure no CPU can alter the
1677 * mapcount on the head page. The mapcount is only
1678 * accounted in the head page and it has to be
1679 * transferred to all tail pages in the below code. So
1680 * for this code to be safe, the split the mapcount
1681 * can't change. But that doesn't mean userland can't
1682 * keep changing and reading the page contents while
1683 * we transfer the mapcount, so the pmd splitting
1684 * status is achieved setting a reserved bit in the
1685 * pmd, not by clearing the present bit.
1686 */
1687 page_tail->_mapcount = page->_mapcount;
1688
1689 BUG_ON(page_tail->mapping);
1690 page_tail->mapping = page->mapping;
1691
1692 page_tail->index = page->index + i;
1693 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1694
1695 BUG_ON(!PageAnon(page_tail));
1696 BUG_ON(!PageUptodate(page_tail));
1697 BUG_ON(!PageDirty(page_tail));
1698 BUG_ON(!PageSwapBacked(page_tail));
1699
1700 lru_add_page_tail(page, page_tail, lruvec, list);
1701 }
1702 atomic_sub(tail_count, &page->_count);
1703 BUG_ON(atomic_read(&page->_count) <= 0);
1704
1705 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1706
1707 ClearPageCompound(page);
1708 compound_unlock(page);
1709 spin_unlock_irq(&zone->lru_lock);
1710
1711 for (i = 1; i < HPAGE_PMD_NR; i++) {
1712 struct page *page_tail = page + i;
1713 BUG_ON(page_count(page_tail) <= 0);
1714 /*
1715 * Tail pages may be freed if there wasn't any mapping
1716 * like if add_to_swap() is running on a lru page that
1717 * had its mapping zapped. And freeing these pages
1718 * requires taking the lru_lock so we do the put_page
1719 * of the tail pages after the split is complete.
1720 */
1721 put_page(page_tail);
1722 }
1723
1724 /*
1725 * Only the head page (now become a regular page) is required
1726 * to be pinned by the caller.
1727 */
1728 BUG_ON(page_count(page) <= 0);
1729}
1730
1731static int __split_huge_page_map(struct page *page,
1732 struct vm_area_struct *vma,
1733 unsigned long address)
1734{
1735 struct mm_struct *mm = vma->vm_mm;
1736 spinlock_t *ptl;
1737 pmd_t *pmd, _pmd;
1738 int ret = 0, i;
1739 pgtable_t pgtable;
1740 unsigned long haddr;
1741
1742 pmd = page_check_address_pmd(page, mm, address,
1743 PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1744 if (pmd) {
1745 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1746 pmd_populate(mm, &_pmd, pgtable);
1747
1748 haddr = address;
1749 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1750 pte_t *pte, entry;
1751 BUG_ON(PageCompound(page+i));
1752 entry = mk_pte(page + i, vma->vm_page_prot);
1753 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1754 if (!pmd_write(*pmd))
1755 entry = pte_wrprotect(entry);
1756 else
1757 BUG_ON(page_mapcount(page) != 1);
1758 if (!pmd_young(*pmd))
1759 entry = pte_mkold(entry);
1760 if (pmd_numa(*pmd))
1761 entry = pte_mknuma(entry);
1762 pte = pte_offset_map(&_pmd, haddr);
1763 BUG_ON(!pte_none(*pte));
1764 set_pte_at(mm, haddr, pte, entry);
1765 pte_unmap(pte);
1766 }
1767
1768 smp_wmb(); /* make pte visible before pmd */
1769 /*
1770 * Up to this point the pmd is present and huge and
1771 * userland has the whole access to the hugepage
1772 * during the split (which happens in place). If we
1773 * overwrite the pmd with the not-huge version
1774 * pointing to the pte here (which of course we could
1775 * if all CPUs were bug free), userland could trigger
1776 * a small page size TLB miss on the small sized TLB
1777 * while the hugepage TLB entry is still established
1778 * in the huge TLB. Some CPU doesn't like that. See
1779 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1780 * Erratum 383 on page 93. Intel should be safe but is
1781 * also warns that it's only safe if the permission
1782 * and cache attributes of the two entries loaded in
1783 * the two TLB is identical (which should be the case
1784 * here). But it is generally safer to never allow
1785 * small and huge TLB entries for the same virtual
1786 * address to be loaded simultaneously. So instead of
1787 * doing "pmd_populate(); flush_tlb_range();" we first
1788 * mark the current pmd notpresent (atomically because
1789 * here the pmd_trans_huge and pmd_trans_splitting
1790 * must remain set at all times on the pmd until the
1791 * split is complete for this pmd), then we flush the
1792 * SMP TLB and finally we write the non-huge version
1793 * of the pmd entry with pmd_populate.
1794 */
1795 pmdp_invalidate(vma, address, pmd);
1796 pmd_populate(mm, pmd, pgtable);
1797 ret = 1;
1798 spin_unlock(ptl);
1799 }
1800
1801 return ret;
1802}
1803
1804/* must be called with anon_vma->root->rwsem held */
1805static void __split_huge_page(struct page *page,
1806 struct anon_vma *anon_vma,
1807 struct list_head *list)
1808{
1809 int mapcount, mapcount2;
1810 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1811 struct anon_vma_chain *avc;
1812
1813 BUG_ON(!PageHead(page));
1814 BUG_ON(PageTail(page));
1815
1816 mapcount = 0;
1817 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1818 struct vm_area_struct *vma = avc->vma;
1819 unsigned long addr = vma_address(page, vma);
1820 BUG_ON(is_vma_temporary_stack(vma));
1821 mapcount += __split_huge_page_splitting(page, vma, addr);
1822 }
1823 /*
1824 * It is critical that new vmas are added to the tail of the
1825 * anon_vma list. This guarantes that if copy_huge_pmd() runs
1826 * and establishes a child pmd before
1827 * __split_huge_page_splitting() freezes the parent pmd (so if
1828 * we fail to prevent copy_huge_pmd() from running until the
1829 * whole __split_huge_page() is complete), we will still see
1830 * the newly established pmd of the child later during the
1831 * walk, to be able to set it as pmd_trans_splitting too.
1832 */
1833 if (mapcount != page_mapcount(page))
1834 printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1835 mapcount, page_mapcount(page));
1836 BUG_ON(mapcount != page_mapcount(page));
1837
1838 __split_huge_page_refcount(page, list);
1839
1840 mapcount2 = 0;
1841 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1842 struct vm_area_struct *vma = avc->vma;
1843 unsigned long addr = vma_address(page, vma);
1844 BUG_ON(is_vma_temporary_stack(vma));
1845 mapcount2 += __split_huge_page_map(page, vma, addr);
1846 }
1847 if (mapcount != mapcount2)
1848 printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1849 mapcount, mapcount2, page_mapcount(page));
1850 BUG_ON(mapcount != mapcount2);
1851}
1852
1853/*
1854 * Split a hugepage into normal pages. This doesn't change the position of head
1855 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1856 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1857 * from the hugepage.
1858 * Return 0 if the hugepage is split successfully otherwise return 1.
1859 */
1860int split_huge_page_to_list(struct page *page, struct list_head *list)
1861{
1862 struct anon_vma *anon_vma;
1863 int ret = 1;
1864
1865 BUG_ON(is_huge_zero_page(page));
1866 BUG_ON(!PageAnon(page));
1867
1868 /*
1869 * The caller does not necessarily hold an mmap_sem that would prevent
1870 * the anon_vma disappearing so we first we take a reference to it
1871 * and then lock the anon_vma for write. This is similar to
1872 * page_lock_anon_vma_read except the write lock is taken to serialise
1873 * against parallel split or collapse operations.
1874 */
1875 anon_vma = page_get_anon_vma(page);
1876 if (!anon_vma)
1877 goto out;
1878 anon_vma_lock_write(anon_vma);
1879
1880 ret = 0;
1881 if (!PageCompound(page))
1882 goto out_unlock;
1883
1884 BUG_ON(!PageSwapBacked(page));
1885 __split_huge_page(page, anon_vma, list);
1886 count_vm_event(THP_SPLIT);
1887
1888 BUG_ON(PageCompound(page));
1889out_unlock:
1890 anon_vma_unlock_write(anon_vma);
1891 put_anon_vma(anon_vma);
1892out:
1893 return ret;
1894}
1895
1896#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
1897
1898int hugepage_madvise(struct vm_area_struct *vma,
1899 unsigned long *vm_flags, int advice)
1900{
1901 switch (advice) {
1902 case MADV_HUGEPAGE:
1903#ifdef CONFIG_S390
1904 /*
1905 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1906 * can't handle this properly after s390_enable_sie, so we simply
1907 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1908 */
1909 if (mm_has_pgste(vma->vm_mm))
1910 return 0;
1911#endif
1912 /*
1913 * Be somewhat over-protective like KSM for now!
1914 */
1915 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1916 return -EINVAL;
1917 *vm_flags &= ~VM_NOHUGEPAGE;
1918 *vm_flags |= VM_HUGEPAGE;
1919 /*
1920 * If the vma become good for khugepaged to scan,
1921 * register it here without waiting a page fault that
1922 * may not happen any time soon.
1923 */
1924 if (unlikely(khugepaged_enter_vma_merge(vma)))
1925 return -ENOMEM;
1926 break;
1927 case MADV_NOHUGEPAGE:
1928 /*
1929 * Be somewhat over-protective like KSM for now!
1930 */
1931 if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1932 return -EINVAL;
1933 *vm_flags &= ~VM_HUGEPAGE;
1934 *vm_flags |= VM_NOHUGEPAGE;
1935 /*
1936 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1937 * this vma even if we leave the mm registered in khugepaged if
1938 * it got registered before VM_NOHUGEPAGE was set.
1939 */
1940 break;
1941 }
1942
1943 return 0;
1944}
1945
1946static int __init khugepaged_slab_init(void)
1947{
1948 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1949 sizeof(struct mm_slot),
1950 __alignof__(struct mm_slot), 0, NULL);
1951 if (!mm_slot_cache)
1952 return -ENOMEM;
1953
1954 return 0;
1955}
1956
1957static inline struct mm_slot *alloc_mm_slot(void)
1958{
1959 if (!mm_slot_cache) /* initialization failed */
1960 return NULL;
1961 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1962}
1963
1964static inline void free_mm_slot(struct mm_slot *mm_slot)
1965{
1966 kmem_cache_free(mm_slot_cache, mm_slot);
1967}
1968
1969static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1970{
1971 struct mm_slot *mm_slot;
1972
1973 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1974 if (mm == mm_slot->mm)
1975 return mm_slot;
1976
1977 return NULL;
1978}
1979
1980static void insert_to_mm_slots_hash(struct mm_struct *mm,
1981 struct mm_slot *mm_slot)
1982{
1983 mm_slot->mm = mm;
1984 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1985}
1986
1987static inline int khugepaged_test_exit(struct mm_struct *mm)
1988{
1989 return atomic_read(&mm->mm_users) == 0;
1990}
1991
1992int __khugepaged_enter(struct mm_struct *mm)
1993{
1994 struct mm_slot *mm_slot;
1995 int wakeup;
1996
1997 mm_slot = alloc_mm_slot();
1998 if (!mm_slot)
1999 return -ENOMEM;
2000
2001 /* __khugepaged_exit() must not run from under us */
2002 VM_BUG_ON(khugepaged_test_exit(mm));
2003 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2004 free_mm_slot(mm_slot);
2005 return 0;
2006 }
2007
2008 spin_lock(&khugepaged_mm_lock);
2009 insert_to_mm_slots_hash(mm, mm_slot);
2010 /*
2011 * Insert just behind the scanning cursor, to let the area settle
2012 * down a little.
2013 */
2014 wakeup = list_empty(&khugepaged_scan.mm_head);
2015 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2016 spin_unlock(&khugepaged_mm_lock);
2017
2018 atomic_inc(&mm->mm_count);
2019 if (wakeup)
2020 wake_up_interruptible(&khugepaged_wait);
2021
2022 return 0;
2023}
2024
2025int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
2026{
2027 unsigned long hstart, hend;
2028 if (!vma->anon_vma)
2029 /*
2030 * Not yet faulted in so we will register later in the
2031 * page fault if needed.
2032 */
2033 return 0;
2034 if (vma->vm_ops)
2035 /* khugepaged not yet working on file or special mappings */
2036 return 0;
2037 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2038 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2039 hend = vma->vm_end & HPAGE_PMD_MASK;
2040 if (hstart < hend)
2041 return khugepaged_enter(vma);
2042 return 0;
2043}
2044
2045void __khugepaged_exit(struct mm_struct *mm)
2046{
2047 struct mm_slot *mm_slot;
2048 int free = 0;
2049
2050 spin_lock(&khugepaged_mm_lock);
2051 mm_slot = get_mm_slot(mm);
2052 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2053 hash_del(&mm_slot->hash);
2054 list_del(&mm_slot->mm_node);
2055 free = 1;
2056 }
2057 spin_unlock(&khugepaged_mm_lock);
2058
2059 if (free) {
2060 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2061 free_mm_slot(mm_slot);
2062 mmdrop(mm);
2063 } else if (mm_slot) {
2064 /*
2065 * This is required to serialize against
2066 * khugepaged_test_exit() (which is guaranteed to run
2067 * under mmap sem read mode). Stop here (after we
2068 * return all pagetables will be destroyed) until
2069 * khugepaged has finished working on the pagetables
2070 * under the mmap_sem.
2071 */
2072 down_write(&mm->mmap_sem);
2073 up_write(&mm->mmap_sem);
2074 }
2075}
2076
2077static void release_pte_page(struct page *page)
2078{
2079 /* 0 stands for page_is_file_cache(page) == false */
2080 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2081 unlock_page(page);
2082 putback_lru_page(page);
2083}
2084
2085static void release_pte_pages(pte_t *pte, pte_t *_pte)
2086{
2087 while (--_pte >= pte) {
2088 pte_t pteval = *_pte;
2089 if (!pte_none(pteval))
2090 release_pte_page(pte_page(pteval));
2091 }
2092}
2093
2094static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2095 unsigned long address,
2096 pte_t *pte)
2097{
2098 struct page *page;
2099 pte_t *_pte;
2100 int referenced = 0, none = 0;
2101 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2102 _pte++, address += PAGE_SIZE) {
2103 pte_t pteval = *_pte;
2104 if (pte_none(pteval)) {
2105 if (++none <= khugepaged_max_ptes_none)
2106 continue;
2107 else
2108 goto out;
2109 }
2110 if (!pte_present(pteval) || !pte_write(pteval))
2111 goto out;
2112 page = vm_normal_page(vma, address, pteval);
2113 if (unlikely(!page))
2114 goto out;
2115
2116 VM_BUG_ON_PAGE(PageCompound(page), page);
2117 VM_BUG_ON_PAGE(!PageAnon(page), page);
2118 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2119
2120 /* cannot use mapcount: can't collapse if there's a gup pin */
2121 if (page_count(page) != 1)
2122 goto out;
2123 /*
2124 * We can do it before isolate_lru_page because the
2125 * page can't be freed from under us. NOTE: PG_lock
2126 * is needed to serialize against split_huge_page
2127 * when invoked from the VM.
2128 */
2129 if (!trylock_page(page))
2130 goto out;
2131 /*
2132 * Isolate the page to avoid collapsing an hugepage
2133 * currently in use by the VM.
2134 */
2135 if (isolate_lru_page(page)) {
2136 unlock_page(page);
2137 goto out;
2138 }
2139 /* 0 stands for page_is_file_cache(page) == false */
2140 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2141 VM_BUG_ON_PAGE(!PageLocked(page), page);
2142 VM_BUG_ON_PAGE(PageLRU(page), page);
2143
2144 /* If there is no mapped pte young don't collapse the page */
2145 if (pte_young(pteval) || PageReferenced(page) ||
2146 mmu_notifier_test_young(vma->vm_mm, address))
2147 referenced = 1;
2148 }
2149 if (likely(referenced))
2150 return 1;
2151out:
2152 release_pte_pages(pte, _pte);
2153 return 0;
2154}
2155
2156static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2157 struct vm_area_struct *vma,
2158 unsigned long address,
2159 spinlock_t *ptl)
2160{
2161 pte_t *_pte;
2162 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2163 pte_t pteval = *_pte;
2164 struct page *src_page;
2165
2166 if (pte_none(pteval)) {
2167 clear_user_highpage(page, address);
2168 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2169 } else {
2170 src_page = pte_page(pteval);
2171 copy_user_highpage(page, src_page, address, vma);
2172 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2173 release_pte_page(src_page);
2174 /*
2175 * ptl mostly unnecessary, but preempt has to
2176 * be disabled to update the per-cpu stats
2177 * inside page_remove_rmap().
2178 */
2179 spin_lock(ptl);
2180 /*
2181 * paravirt calls inside pte_clear here are
2182 * superfluous.
2183 */
2184 pte_clear(vma->vm_mm, address, _pte);
2185 page_remove_rmap(src_page);
2186 spin_unlock(ptl);
2187 free_page_and_swap_cache(src_page);
2188 }
2189
2190 address += PAGE_SIZE;
2191 page++;
2192 }
2193}
2194
2195static void khugepaged_alloc_sleep(void)
2196{
2197 wait_event_freezable_timeout(khugepaged_wait, false,
2198 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2199}
2200
2201static int khugepaged_node_load[MAX_NUMNODES];
2202
2203#ifdef CONFIG_NUMA
2204static int khugepaged_find_target_node(void)
2205{
2206 static int last_khugepaged_target_node = NUMA_NO_NODE;
2207 int nid, target_node = 0, max_value = 0;
2208
2209 /* find first node with max normal pages hit */
2210 for (nid = 0; nid < MAX_NUMNODES; nid++)
2211 if (khugepaged_node_load[nid] > max_value) {
2212 max_value = khugepaged_node_load[nid];
2213 target_node = nid;
2214 }
2215
2216 /* do some balance if several nodes have the same hit record */
2217 if (target_node <= last_khugepaged_target_node)
2218 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2219 nid++)
2220 if (max_value == khugepaged_node_load[nid]) {
2221 target_node = nid;
2222 break;
2223 }
2224
2225 last_khugepaged_target_node = target_node;
2226 return target_node;
2227}
2228
2229static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2230{
2231 if (IS_ERR(*hpage)) {
2232 if (!*wait)
2233 return false;
2234
2235 *wait = false;
2236 *hpage = NULL;
2237 khugepaged_alloc_sleep();
2238 } else if (*hpage) {
2239 put_page(*hpage);
2240 *hpage = NULL;
2241 }
2242
2243 return true;
2244}
2245
2246static struct page
2247*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2248 struct vm_area_struct *vma, unsigned long address,
2249 int node)
2250{
2251 VM_BUG_ON_PAGE(*hpage, *hpage);
2252 /*
2253 * Allocate the page while the vma is still valid and under
2254 * the mmap_sem read mode so there is no memory allocation
2255 * later when we take the mmap_sem in write mode. This is more
2256 * friendly behavior (OTOH it may actually hide bugs) to
2257 * filesystems in userland with daemons allocating memory in
2258 * the userland I/O paths. Allocating memory with the
2259 * mmap_sem in read mode is good idea also to allow greater
2260 * scalability.
2261 */
2262 *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
2263 khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
2264 /*
2265 * After allocating the hugepage, release the mmap_sem read lock in
2266 * preparation for taking it in write mode.
2267 */
2268 up_read(&mm->mmap_sem);
2269 if (unlikely(!*hpage)) {
2270 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2271 *hpage = ERR_PTR(-ENOMEM);
2272 return NULL;
2273 }
2274
2275 count_vm_event(THP_COLLAPSE_ALLOC);
2276 return *hpage;
2277}
2278#else
2279static int khugepaged_find_target_node(void)
2280{
2281 return 0;
2282}
2283
2284static inline struct page *alloc_hugepage(int defrag)
2285{
2286 return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2287 HPAGE_PMD_ORDER);
2288}
2289
2290static struct page *khugepaged_alloc_hugepage(bool *wait)
2291{
2292 struct page *hpage;
2293
2294 do {
2295 hpage = alloc_hugepage(khugepaged_defrag());
2296 if (!hpage) {
2297 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2298 if (!*wait)
2299 return NULL;
2300
2301 *wait = false;
2302 khugepaged_alloc_sleep();
2303 } else
2304 count_vm_event(THP_COLLAPSE_ALLOC);
2305 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2306
2307 return hpage;
2308}
2309
2310static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2311{
2312 if (!*hpage)
2313 *hpage = khugepaged_alloc_hugepage(wait);
2314
2315 if (unlikely(!*hpage))
2316 return false;
2317
2318 return true;
2319}
2320
2321static struct page
2322*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2323 struct vm_area_struct *vma, unsigned long address,
2324 int node)
2325{
2326 up_read(&mm->mmap_sem);
2327 VM_BUG_ON(!*hpage);
2328 return *hpage;
2329}
2330#endif
2331
2332static bool hugepage_vma_check(struct vm_area_struct *vma)
2333{
2334 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2335 (vma->vm_flags & VM_NOHUGEPAGE))
2336 return false;
2337
2338 if (!vma->anon_vma || vma->vm_ops)
2339 return false;
2340 if (is_vma_temporary_stack(vma))
2341 return false;
2342 VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2343 return true;
2344}
2345
2346static void collapse_huge_page(struct mm_struct *mm,
2347 unsigned long address,
2348 struct page **hpage,
2349 struct vm_area_struct *vma,
2350 int node)
2351{
2352 pmd_t *pmd, _pmd;
2353 pte_t *pte;
2354 pgtable_t pgtable;
2355 struct page *new_page;
2356 spinlock_t *pmd_ptl, *pte_ptl;
2357 int isolated;
2358 unsigned long hstart, hend;
2359 unsigned long mmun_start; /* For mmu_notifiers */
2360 unsigned long mmun_end; /* For mmu_notifiers */
2361
2362 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2363
2364 /* release the mmap_sem read lock. */
2365 new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
2366 if (!new_page)
2367 return;
2368
2369 if (unlikely(mem_cgroup_charge_anon(new_page, mm, GFP_KERNEL)))
2370 return;
2371
2372 /*
2373 * Prevent all access to pagetables with the exception of
2374 * gup_fast later hanlded by the ptep_clear_flush and the VM
2375 * handled by the anon_vma lock + PG_lock.
2376 */
2377 down_write(&mm->mmap_sem);
2378 if (unlikely(khugepaged_test_exit(mm)))
2379 goto out;
2380
2381 vma = find_vma(mm, address);
2382 if (!vma)
2383 goto out;
2384 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2385 hend = vma->vm_end & HPAGE_PMD_MASK;
2386 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2387 goto out;
2388 if (!hugepage_vma_check(vma))
2389 goto out;
2390 pmd = mm_find_pmd(mm, address);
2391 if (!pmd)
2392 goto out;
2393 if (pmd_trans_huge(*pmd))
2394 goto out;
2395
2396 anon_vma_lock_write(vma->anon_vma);
2397
2398 pte = pte_offset_map(pmd, address);
2399 pte_ptl = pte_lockptr(mm, pmd);
2400
2401 mmun_start = address;
2402 mmun_end = address + HPAGE_PMD_SIZE;
2403 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2404 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2405 /*
2406 * After this gup_fast can't run anymore. This also removes
2407 * any huge TLB entry from the CPU so we won't allow
2408 * huge and small TLB entries for the same virtual address
2409 * to avoid the risk of CPU bugs in that area.
2410 */
2411 _pmd = pmdp_clear_flush(vma, address, pmd);
2412 spin_unlock(pmd_ptl);
2413 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2414
2415 spin_lock(pte_ptl);
2416 isolated = __collapse_huge_page_isolate(vma, address, pte);
2417 spin_unlock(pte_ptl);
2418
2419 if (unlikely(!isolated)) {
2420 pte_unmap(pte);
2421 spin_lock(pmd_ptl);
2422 BUG_ON(!pmd_none(*pmd));
2423 /*
2424 * We can only use set_pmd_at when establishing
2425 * hugepmds and never for establishing regular pmds that
2426 * points to regular pagetables. Use pmd_populate for that
2427 */
2428 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2429 spin_unlock(pmd_ptl);
2430 anon_vma_unlock_write(vma->anon_vma);
2431 goto out;
2432 }
2433
2434 /*
2435 * All pages are isolated and locked so anon_vma rmap
2436 * can't run anymore.
2437 */
2438 anon_vma_unlock_write(vma->anon_vma);
2439
2440 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2441 pte_unmap(pte);
2442 __SetPageUptodate(new_page);
2443 pgtable = pmd_pgtable(_pmd);
2444
2445 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2446 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2447
2448 /*
2449 * spin_lock() below is not the equivalent of smp_wmb(), so
2450 * this is needed to avoid the copy_huge_page writes to become
2451 * visible after the set_pmd_at() write.
2452 */
2453 smp_wmb();
2454
2455 spin_lock(pmd_ptl);
2456 BUG_ON(!pmd_none(*pmd));
2457 page_add_new_anon_rmap(new_page, vma, address);
2458 pgtable_trans_huge_deposit(mm, pmd, pgtable);
2459 set_pmd_at(mm, address, pmd, _pmd);
2460 update_mmu_cache_pmd(vma, address, pmd);
2461 spin_unlock(pmd_ptl);
2462
2463 *hpage = NULL;
2464
2465 khugepaged_pages_collapsed++;
2466out_up_write:
2467 up_write(&mm->mmap_sem);
2468 return;
2469
2470out:
2471 mem_cgroup_uncharge_page(new_page);
2472 goto out_up_write;
2473}
2474
2475static int khugepaged_scan_pmd(struct mm_struct *mm,
2476 struct vm_area_struct *vma,
2477 unsigned long address,
2478 struct page **hpage)
2479{
2480 pmd_t *pmd;
2481 pte_t *pte, *_pte;
2482 int ret = 0, referenced = 0, none = 0;
2483 struct page *page;
2484 unsigned long _address;
2485 spinlock_t *ptl;
2486 int node = NUMA_NO_NODE;
2487
2488 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2489
2490 pmd = mm_find_pmd(mm, address);
2491 if (!pmd)
2492 goto out;
2493 if (pmd_trans_huge(*pmd))
2494 goto out;
2495
2496 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2497 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2498 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2499 _pte++, _address += PAGE_SIZE) {
2500 pte_t pteval = *_pte;
2501 if (pte_none(pteval)) {
2502 if (++none <= khugepaged_max_ptes_none)
2503 continue;
2504 else
2505 goto out_unmap;
2506 }
2507 if (!pte_present(pteval) || !pte_write(pteval))
2508 goto out_unmap;
2509 page = vm_normal_page(vma, _address, pteval);
2510 if (unlikely(!page))
2511 goto out_unmap;
2512 /*
2513 * Record which node the original page is from and save this
2514 * information to khugepaged_node_load[].
2515 * Khupaged will allocate hugepage from the node has the max
2516 * hit record.
2517 */
2518 node = page_to_nid(page);
2519 khugepaged_node_load[node]++;
2520 VM_BUG_ON_PAGE(PageCompound(page), page);
2521 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2522 goto out_unmap;
2523 /* cannot use mapcount: can't collapse if there's a gup pin */
2524 if (page_count(page) != 1)
2525 goto out_unmap;
2526 if (pte_young(pteval) || PageReferenced(page) ||
2527 mmu_notifier_test_young(vma->vm_mm, address))
2528 referenced = 1;
2529 }
2530 if (referenced)
2531 ret = 1;
2532out_unmap:
2533 pte_unmap_unlock(pte, ptl);
2534 if (ret) {
2535 node = khugepaged_find_target_node();
2536 /* collapse_huge_page will return with the mmap_sem released */
2537 collapse_huge_page(mm, address, hpage, vma, node);
2538 }
2539out:
2540 return ret;
2541}
2542
2543static void collect_mm_slot(struct mm_slot *mm_slot)
2544{
2545 struct mm_struct *mm = mm_slot->mm;
2546
2547 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2548
2549 if (khugepaged_test_exit(mm)) {
2550 /* free mm_slot */
2551 hash_del(&mm_slot->hash);
2552 list_del(&mm_slot->mm_node);
2553
2554 /*
2555 * Not strictly needed because the mm exited already.
2556 *
2557 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2558 */
2559
2560 /* khugepaged_mm_lock actually not necessary for the below */
2561 free_mm_slot(mm_slot);
2562 mmdrop(mm);
2563 }
2564}
2565
2566static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2567 struct page **hpage)
2568 __releases(&khugepaged_mm_lock)
2569 __acquires(&khugepaged_mm_lock)
2570{
2571 struct mm_slot *mm_slot;
2572 struct mm_struct *mm;
2573 struct vm_area_struct *vma;
2574 int progress = 0;
2575
2576 VM_BUG_ON(!pages);
2577 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2578
2579 if (khugepaged_scan.mm_slot)
2580 mm_slot = khugepaged_scan.mm_slot;
2581 else {
2582 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2583 struct mm_slot, mm_node);
2584 khugepaged_scan.address = 0;
2585 khugepaged_scan.mm_slot = mm_slot;
2586 }
2587 spin_unlock(&khugepaged_mm_lock);
2588
2589 mm = mm_slot->mm;
2590 down_read(&mm->mmap_sem);
2591 if (unlikely(khugepaged_test_exit(mm)))
2592 vma = NULL;
2593 else
2594 vma = find_vma(mm, khugepaged_scan.address);
2595
2596 progress++;
2597 for (; vma; vma = vma->vm_next) {
2598 unsigned long hstart, hend;
2599
2600 cond_resched();
2601 if (unlikely(khugepaged_test_exit(mm))) {
2602 progress++;
2603 break;
2604 }
2605 if (!hugepage_vma_check(vma)) {
2606skip:
2607 progress++;
2608 continue;
2609 }
2610 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2611 hend = vma->vm_end & HPAGE_PMD_MASK;
2612 if (hstart >= hend)
2613 goto skip;
2614 if (khugepaged_scan.address > hend)
2615 goto skip;
2616 if (khugepaged_scan.address < hstart)
2617 khugepaged_scan.address = hstart;
2618 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2619
2620 while (khugepaged_scan.address < hend) {
2621 int ret;
2622 cond_resched();
2623 if (unlikely(khugepaged_test_exit(mm)))
2624 goto breakouterloop;
2625
2626 VM_BUG_ON(khugepaged_scan.address < hstart ||
2627 khugepaged_scan.address + HPAGE_PMD_SIZE >
2628 hend);
2629 ret = khugepaged_scan_pmd(mm, vma,
2630 khugepaged_scan.address,
2631 hpage);
2632 /* move to next address */
2633 khugepaged_scan.address += HPAGE_PMD_SIZE;
2634 progress += HPAGE_PMD_NR;
2635 if (ret)
2636 /* we released mmap_sem so break loop */
2637 goto breakouterloop_mmap_sem;
2638 if (progress >= pages)
2639 goto breakouterloop;
2640 }
2641 }
2642breakouterloop:
2643 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2644breakouterloop_mmap_sem:
2645
2646 spin_lock(&khugepaged_mm_lock);
2647 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2648 /*
2649 * Release the current mm_slot if this mm is about to die, or
2650 * if we scanned all vmas of this mm.
2651 */
2652 if (khugepaged_test_exit(mm) || !vma) {
2653 /*
2654 * Make sure that if mm_users is reaching zero while
2655 * khugepaged runs here, khugepaged_exit will find
2656 * mm_slot not pointing to the exiting mm.
2657 */
2658 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2659 khugepaged_scan.mm_slot = list_entry(
2660 mm_slot->mm_node.next,
2661 struct mm_slot, mm_node);
2662 khugepaged_scan.address = 0;
2663 } else {
2664 khugepaged_scan.mm_slot = NULL;
2665 khugepaged_full_scans++;
2666 }
2667
2668 collect_mm_slot(mm_slot);
2669 }
2670
2671 return progress;
2672}
2673
2674static int khugepaged_has_work(void)
2675{
2676 return !list_empty(&khugepaged_scan.mm_head) &&
2677 khugepaged_enabled();
2678}
2679
2680static int khugepaged_wait_event(void)
2681{
2682 return !list_empty(&khugepaged_scan.mm_head) ||
2683 kthread_should_stop();
2684}
2685
2686static void khugepaged_do_scan(void)
2687{
2688 struct page *hpage = NULL;
2689 unsigned int progress = 0, pass_through_head = 0;
2690 unsigned int pages = khugepaged_pages_to_scan;
2691 bool wait = true;
2692
2693 barrier(); /* write khugepaged_pages_to_scan to local stack */
2694
2695 while (progress < pages) {
2696 if (!khugepaged_prealloc_page(&hpage, &wait))
2697 break;
2698
2699 cond_resched();
2700
2701 if (unlikely(kthread_should_stop() || freezing(current)))
2702 break;
2703
2704 spin_lock(&khugepaged_mm_lock);
2705 if (!khugepaged_scan.mm_slot)
2706 pass_through_head++;
2707 if (khugepaged_has_work() &&
2708 pass_through_head < 2)
2709 progress += khugepaged_scan_mm_slot(pages - progress,
2710 &hpage);
2711 else
2712 progress = pages;
2713 spin_unlock(&khugepaged_mm_lock);
2714 }
2715
2716 if (!IS_ERR_OR_NULL(hpage))
2717 put_page(hpage);
2718}
2719
2720static void khugepaged_wait_work(void)
2721{
2722 try_to_freeze();
2723
2724 if (khugepaged_has_work()) {
2725 if (!khugepaged_scan_sleep_millisecs)
2726 return;
2727
2728 wait_event_freezable_timeout(khugepaged_wait,
2729 kthread_should_stop(),
2730 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2731 return;
2732 }
2733
2734 if (khugepaged_enabled())
2735 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2736}
2737
2738static int khugepaged(void *none)
2739{
2740 struct mm_slot *mm_slot;
2741
2742 set_freezable();
2743 set_user_nice(current, 19);
2744
2745 while (!kthread_should_stop()) {
2746 khugepaged_do_scan();
2747 khugepaged_wait_work();
2748 }
2749
2750 spin_lock(&khugepaged_mm_lock);
2751 mm_slot = khugepaged_scan.mm_slot;
2752 khugepaged_scan.mm_slot = NULL;
2753 if (mm_slot)
2754 collect_mm_slot(mm_slot);
2755 spin_unlock(&khugepaged_mm_lock);
2756 return 0;
2757}
2758
2759static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2760 unsigned long haddr, pmd_t *pmd)
2761{
2762 struct mm_struct *mm = vma->vm_mm;
2763 pgtable_t pgtable;
2764 pmd_t _pmd;
2765 int i;
2766
2767 pmdp_clear_flush(vma, haddr, pmd);
2768 /* leave pmd empty until pte is filled */
2769
2770 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2771 pmd_populate(mm, &_pmd, pgtable);
2772
2773 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2774 pte_t *pte, entry;
2775 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2776 entry = pte_mkspecial(entry);
2777 pte = pte_offset_map(&_pmd, haddr);
2778 VM_BUG_ON(!pte_none(*pte));
2779 set_pte_at(mm, haddr, pte, entry);
2780 pte_unmap(pte);
2781 }
2782 smp_wmb(); /* make pte visible before pmd */
2783 pmd_populate(mm, pmd, pgtable);
2784 put_huge_zero_page();
2785}
2786
2787void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2788 pmd_t *pmd)
2789{
2790 spinlock_t *ptl;
2791 struct page *page;
2792 struct mm_struct *mm = vma->vm_mm;
2793 unsigned long haddr = address & HPAGE_PMD_MASK;
2794 unsigned long mmun_start; /* For mmu_notifiers */
2795 unsigned long mmun_end; /* For mmu_notifiers */
2796
2797 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
2798
2799 mmun_start = haddr;
2800 mmun_end = haddr + HPAGE_PMD_SIZE;
2801again:
2802 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2803 ptl = pmd_lock(mm, pmd);
2804 if (unlikely(!pmd_trans_huge(*pmd))) {
2805 spin_unlock(ptl);
2806 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2807 return;
2808 }
2809 if (is_huge_zero_pmd(*pmd)) {
2810 __split_huge_zero_page_pmd(vma, haddr, pmd);
2811 spin_unlock(ptl);
2812 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2813 return;
2814 }
2815 page = pmd_page(*pmd);
2816 VM_BUG_ON_PAGE(!page_count(page), page);
2817 get_page(page);
2818 spin_unlock(ptl);
2819 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2820
2821 split_huge_page(page);
2822
2823 put_page(page);
2824
2825 /*
2826 * We don't always have down_write of mmap_sem here: a racing
2827 * do_huge_pmd_wp_page() might have copied-on-write to another
2828 * huge page before our split_huge_page() got the anon_vma lock.
2829 */
2830 if (unlikely(pmd_trans_huge(*pmd)))
2831 goto again;
2832}
2833
2834void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2835 pmd_t *pmd)
2836{
2837 struct vm_area_struct *vma;
2838
2839 vma = find_vma(mm, address);
2840 BUG_ON(vma == NULL);
2841 split_huge_page_pmd(vma, address, pmd);
2842}
2843
2844static void split_huge_page_address(struct mm_struct *mm,
2845 unsigned long address)
2846{
2847 pmd_t *pmd;
2848
2849 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2850
2851 pmd = mm_find_pmd(mm, address);
2852 if (!pmd)
2853 return;
2854 /*
2855 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2856 * materialize from under us.
2857 */
2858 split_huge_page_pmd_mm(mm, address, pmd);
2859}
2860
2861void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2862 unsigned long start,
2863 unsigned long end,
2864 long adjust_next)
2865{
2866 /*
2867 * If the new start address isn't hpage aligned and it could
2868 * previously contain an hugepage: check if we need to split
2869 * an huge pmd.
2870 */
2871 if (start & ~HPAGE_PMD_MASK &&
2872 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2873 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2874 split_huge_page_address(vma->vm_mm, start);
2875
2876 /*
2877 * If the new end address isn't hpage aligned and it could
2878 * previously contain an hugepage: check if we need to split
2879 * an huge pmd.
2880 */
2881 if (end & ~HPAGE_PMD_MASK &&
2882 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2883 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2884 split_huge_page_address(vma->vm_mm, end);
2885
2886 /*
2887 * If we're also updating the vma->vm_next->vm_start, if the new
2888 * vm_next->vm_start isn't page aligned and it could previously
2889 * contain an hugepage: check if we need to split an huge pmd.
2890 */
2891 if (adjust_next > 0) {
2892 struct vm_area_struct *next = vma->vm_next;
2893 unsigned long nstart = next->vm_start;
2894 nstart += adjust_next << PAGE_SHIFT;
2895 if (nstart & ~HPAGE_PMD_MASK &&
2896 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2897 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2898 split_huge_page_address(next->vm_mm, nstart);
2899 }
2900}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2009 Red Hat, Inc.
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/mm.h>
9#include <linux/sched.h>
10#include <linux/sched/mm.h>
11#include <linux/sched/coredump.h>
12#include <linux/sched/numa_balancing.h>
13#include <linux/highmem.h>
14#include <linux/hugetlb.h>
15#include <linux/mmu_notifier.h>
16#include <linux/rmap.h>
17#include <linux/swap.h>
18#include <linux/shrinker.h>
19#include <linux/mm_inline.h>
20#include <linux/swapops.h>
21#include <linux/backing-dev.h>
22#include <linux/dax.h>
23#include <linux/khugepaged.h>
24#include <linux/freezer.h>
25#include <linux/pfn_t.h>
26#include <linux/mman.h>
27#include <linux/memremap.h>
28#include <linux/pagemap.h>
29#include <linux/debugfs.h>
30#include <linux/migrate.h>
31#include <linux/hashtable.h>
32#include <linux/userfaultfd_k.h>
33#include <linux/page_idle.h>
34#include <linux/shmem_fs.h>
35#include <linux/oom.h>
36#include <linux/numa.h>
37#include <linux/page_owner.h>
38#include <linux/sched/sysctl.h>
39#include <linux/memory-tiers.h>
40
41#include <asm/tlb.h>
42#include <asm/pgalloc.h>
43#include "internal.h"
44#include "swap.h"
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/thp.h>
48
49/*
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
56 */
57unsigned long transparent_hugepage_flags __read_mostly =
58#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
60#endif
61#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
63#endif
64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
67
68static struct shrinker deferred_split_shrinker;
69
70static atomic_t huge_zero_refcount;
71struct page *huge_zero_page __read_mostly;
72unsigned long huge_zero_pfn __read_mostly = ~0UL;
73
74bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
75 bool smaps, bool in_pf, bool enforce_sysfs)
76{
77 if (!vma->vm_mm) /* vdso */
78 return false;
79
80 /*
81 * Explicitly disabled through madvise or prctl, or some
82 * architectures may disable THP for some mappings, for
83 * example, s390 kvm.
84 * */
85 if ((vm_flags & VM_NOHUGEPAGE) ||
86 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
87 return false;
88 /*
89 * If the hardware/firmware marked hugepage support disabled.
90 */
91 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
92 return false;
93
94 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
95 if (vma_is_dax(vma))
96 return in_pf;
97
98 /*
99 * Special VMA and hugetlb VMA.
100 * Must be checked after dax since some dax mappings may have
101 * VM_MIXEDMAP set.
102 */
103 if (vm_flags & VM_NO_KHUGEPAGED)
104 return false;
105
106 /*
107 * Check alignment for file vma and size for both file and anon vma.
108 *
109 * Skip the check for page fault. Huge fault does the check in fault
110 * handlers. And this check is not suitable for huge PUD fault.
111 */
112 if (!in_pf &&
113 !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
114 return false;
115
116 /*
117 * Enabled via shmem mount options or sysfs settings.
118 * Must be done before hugepage flags check since shmem has its
119 * own flags.
120 */
121 if (!in_pf && shmem_file(vma->vm_file))
122 return shmem_huge_enabled(vma, !enforce_sysfs);
123
124 /* Enforce sysfs THP requirements as necessary */
125 if (enforce_sysfs &&
126 (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
127 !hugepage_flags_always())))
128 return false;
129
130 /* Only regular file is valid */
131 if (!in_pf && file_thp_enabled(vma))
132 return true;
133
134 if (!vma_is_anonymous(vma))
135 return false;
136
137 if (vma_is_temporary_stack(vma))
138 return false;
139
140 /*
141 * THPeligible bit of smaps should show 1 for proper VMAs even
142 * though anon_vma is not initialized yet.
143 *
144 * Allow page fault since anon_vma may be not initialized until
145 * the first page fault.
146 */
147 if (!vma->anon_vma)
148 return (smaps || in_pf);
149
150 return true;
151}
152
153static bool get_huge_zero_page(void)
154{
155 struct page *zero_page;
156retry:
157 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
158 return true;
159
160 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
161 HPAGE_PMD_ORDER);
162 if (!zero_page) {
163 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
164 return false;
165 }
166 preempt_disable();
167 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
168 preempt_enable();
169 __free_pages(zero_page, compound_order(zero_page));
170 goto retry;
171 }
172 WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
173
174 /* We take additional reference here. It will be put back by shrinker */
175 atomic_set(&huge_zero_refcount, 2);
176 preempt_enable();
177 count_vm_event(THP_ZERO_PAGE_ALLOC);
178 return true;
179}
180
181static void put_huge_zero_page(void)
182{
183 /*
184 * Counter should never go to zero here. Only shrinker can put
185 * last reference.
186 */
187 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
188}
189
190struct page *mm_get_huge_zero_page(struct mm_struct *mm)
191{
192 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
193 return READ_ONCE(huge_zero_page);
194
195 if (!get_huge_zero_page())
196 return NULL;
197
198 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
199 put_huge_zero_page();
200
201 return READ_ONCE(huge_zero_page);
202}
203
204void mm_put_huge_zero_page(struct mm_struct *mm)
205{
206 if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
207 put_huge_zero_page();
208}
209
210static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
211 struct shrink_control *sc)
212{
213 /* we can free zero page only if last reference remains */
214 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
215}
216
217static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
218 struct shrink_control *sc)
219{
220 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
221 struct page *zero_page = xchg(&huge_zero_page, NULL);
222 BUG_ON(zero_page == NULL);
223 WRITE_ONCE(huge_zero_pfn, ~0UL);
224 __free_pages(zero_page, compound_order(zero_page));
225 return HPAGE_PMD_NR;
226 }
227
228 return 0;
229}
230
231static struct shrinker huge_zero_page_shrinker = {
232 .count_objects = shrink_huge_zero_page_count,
233 .scan_objects = shrink_huge_zero_page_scan,
234 .seeks = DEFAULT_SEEKS,
235};
236
237#ifdef CONFIG_SYSFS
238static ssize_t enabled_show(struct kobject *kobj,
239 struct kobj_attribute *attr, char *buf)
240{
241 const char *output;
242
243 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
244 output = "[always] madvise never";
245 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
246 &transparent_hugepage_flags))
247 output = "always [madvise] never";
248 else
249 output = "always madvise [never]";
250
251 return sysfs_emit(buf, "%s\n", output);
252}
253
254static ssize_t enabled_store(struct kobject *kobj,
255 struct kobj_attribute *attr,
256 const char *buf, size_t count)
257{
258 ssize_t ret = count;
259
260 if (sysfs_streq(buf, "always")) {
261 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
262 set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
263 } else if (sysfs_streq(buf, "madvise")) {
264 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
265 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
266 } else if (sysfs_streq(buf, "never")) {
267 clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
268 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
269 } else
270 ret = -EINVAL;
271
272 if (ret > 0) {
273 int err = start_stop_khugepaged();
274 if (err)
275 ret = err;
276 }
277 return ret;
278}
279
280static struct kobj_attribute enabled_attr = __ATTR_RW(enabled);
281
282ssize_t single_hugepage_flag_show(struct kobject *kobj,
283 struct kobj_attribute *attr, char *buf,
284 enum transparent_hugepage_flag flag)
285{
286 return sysfs_emit(buf, "%d\n",
287 !!test_bit(flag, &transparent_hugepage_flags));
288}
289
290ssize_t single_hugepage_flag_store(struct kobject *kobj,
291 struct kobj_attribute *attr,
292 const char *buf, size_t count,
293 enum transparent_hugepage_flag flag)
294{
295 unsigned long value;
296 int ret;
297
298 ret = kstrtoul(buf, 10, &value);
299 if (ret < 0)
300 return ret;
301 if (value > 1)
302 return -EINVAL;
303
304 if (value)
305 set_bit(flag, &transparent_hugepage_flags);
306 else
307 clear_bit(flag, &transparent_hugepage_flags);
308
309 return count;
310}
311
312static ssize_t defrag_show(struct kobject *kobj,
313 struct kobj_attribute *attr, char *buf)
314{
315 const char *output;
316
317 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
318 &transparent_hugepage_flags))
319 output = "[always] defer defer+madvise madvise never";
320 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
321 &transparent_hugepage_flags))
322 output = "always [defer] defer+madvise madvise never";
323 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
324 &transparent_hugepage_flags))
325 output = "always defer [defer+madvise] madvise never";
326 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
327 &transparent_hugepage_flags))
328 output = "always defer defer+madvise [madvise] never";
329 else
330 output = "always defer defer+madvise madvise [never]";
331
332 return sysfs_emit(buf, "%s\n", output);
333}
334
335static ssize_t defrag_store(struct kobject *kobj,
336 struct kobj_attribute *attr,
337 const char *buf, size_t count)
338{
339 if (sysfs_streq(buf, "always")) {
340 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
341 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
342 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
343 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
344 } else if (sysfs_streq(buf, "defer+madvise")) {
345 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
346 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
347 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
348 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
349 } else if (sysfs_streq(buf, "defer")) {
350 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
351 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
352 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
353 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
354 } else if (sysfs_streq(buf, "madvise")) {
355 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
356 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
357 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
358 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
359 } else if (sysfs_streq(buf, "never")) {
360 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
361 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
362 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
363 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
364 } else
365 return -EINVAL;
366
367 return count;
368}
369static struct kobj_attribute defrag_attr = __ATTR_RW(defrag);
370
371static ssize_t use_zero_page_show(struct kobject *kobj,
372 struct kobj_attribute *attr, char *buf)
373{
374 return single_hugepage_flag_show(kobj, attr, buf,
375 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
376}
377static ssize_t use_zero_page_store(struct kobject *kobj,
378 struct kobj_attribute *attr, const char *buf, size_t count)
379{
380 return single_hugepage_flag_store(kobj, attr, buf, count,
381 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
382}
383static struct kobj_attribute use_zero_page_attr = __ATTR_RW(use_zero_page);
384
385static ssize_t hpage_pmd_size_show(struct kobject *kobj,
386 struct kobj_attribute *attr, char *buf)
387{
388 return sysfs_emit(buf, "%lu\n", HPAGE_PMD_SIZE);
389}
390static struct kobj_attribute hpage_pmd_size_attr =
391 __ATTR_RO(hpage_pmd_size);
392
393static struct attribute *hugepage_attr[] = {
394 &enabled_attr.attr,
395 &defrag_attr.attr,
396 &use_zero_page_attr.attr,
397 &hpage_pmd_size_attr.attr,
398#ifdef CONFIG_SHMEM
399 &shmem_enabled_attr.attr,
400#endif
401 NULL,
402};
403
404static const struct attribute_group hugepage_attr_group = {
405 .attrs = hugepage_attr,
406};
407
408static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
409{
410 int err;
411
412 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
413 if (unlikely(!*hugepage_kobj)) {
414 pr_err("failed to create transparent hugepage kobject\n");
415 return -ENOMEM;
416 }
417
418 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
419 if (err) {
420 pr_err("failed to register transparent hugepage group\n");
421 goto delete_obj;
422 }
423
424 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
425 if (err) {
426 pr_err("failed to register transparent hugepage group\n");
427 goto remove_hp_group;
428 }
429
430 return 0;
431
432remove_hp_group:
433 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
434delete_obj:
435 kobject_put(*hugepage_kobj);
436 return err;
437}
438
439static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
440{
441 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
442 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
443 kobject_put(hugepage_kobj);
444}
445#else
446static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
447{
448 return 0;
449}
450
451static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
452{
453}
454#endif /* CONFIG_SYSFS */
455
456static int __init hugepage_init(void)
457{
458 int err;
459 struct kobject *hugepage_kobj;
460
461 if (!has_transparent_hugepage()) {
462 /*
463 * Hardware doesn't support hugepages, hence disable
464 * DAX PMD support.
465 */
466 transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX;
467 return -EINVAL;
468 }
469
470 /*
471 * hugepages can't be allocated by the buddy allocator
472 */
473 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER >= MAX_ORDER);
474 /*
475 * we use page->mapping and page->index in second tail page
476 * as list_head: assuming THP order >= 2
477 */
478 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
479
480 err = hugepage_init_sysfs(&hugepage_kobj);
481 if (err)
482 goto err_sysfs;
483
484 err = khugepaged_init();
485 if (err)
486 goto err_slab;
487
488 err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
489 if (err)
490 goto err_hzp_shrinker;
491 err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
492 if (err)
493 goto err_split_shrinker;
494
495 /*
496 * By default disable transparent hugepages on smaller systems,
497 * where the extra memory used could hurt more than TLB overhead
498 * is likely to save. The admin can still enable it through /sys.
499 */
500 if (totalram_pages() < (512 << (20 - PAGE_SHIFT))) {
501 transparent_hugepage_flags = 0;
502 return 0;
503 }
504
505 err = start_stop_khugepaged();
506 if (err)
507 goto err_khugepaged;
508
509 return 0;
510err_khugepaged:
511 unregister_shrinker(&deferred_split_shrinker);
512err_split_shrinker:
513 unregister_shrinker(&huge_zero_page_shrinker);
514err_hzp_shrinker:
515 khugepaged_destroy();
516err_slab:
517 hugepage_exit_sysfs(hugepage_kobj);
518err_sysfs:
519 return err;
520}
521subsys_initcall(hugepage_init);
522
523static int __init setup_transparent_hugepage(char *str)
524{
525 int ret = 0;
526 if (!str)
527 goto out;
528 if (!strcmp(str, "always")) {
529 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
530 &transparent_hugepage_flags);
531 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
532 &transparent_hugepage_flags);
533 ret = 1;
534 } else if (!strcmp(str, "madvise")) {
535 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
536 &transparent_hugepage_flags);
537 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
538 &transparent_hugepage_flags);
539 ret = 1;
540 } else if (!strcmp(str, "never")) {
541 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
542 &transparent_hugepage_flags);
543 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
544 &transparent_hugepage_flags);
545 ret = 1;
546 }
547out:
548 if (!ret)
549 pr_warn("transparent_hugepage= cannot parse, ignored\n");
550 return ret;
551}
552__setup("transparent_hugepage=", setup_transparent_hugepage);
553
554pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
555{
556 if (likely(vma->vm_flags & VM_WRITE))
557 pmd = pmd_mkwrite(pmd);
558 return pmd;
559}
560
561#ifdef CONFIG_MEMCG
562static inline struct deferred_split *get_deferred_split_queue(struct page *page)
563{
564 struct mem_cgroup *memcg = page_memcg(compound_head(page));
565 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
566
567 if (memcg)
568 return &memcg->deferred_split_queue;
569 else
570 return &pgdat->deferred_split_queue;
571}
572#else
573static inline struct deferred_split *get_deferred_split_queue(struct page *page)
574{
575 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page));
576
577 return &pgdat->deferred_split_queue;
578}
579#endif
580
581void prep_transhuge_page(struct page *page)
582{
583 /*
584 * we use page->mapping and page->index in second tail page
585 * as list_head: assuming THP order >= 2
586 */
587
588 INIT_LIST_HEAD(page_deferred_list(page));
589 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
590}
591
592static inline bool is_transparent_hugepage(struct page *page)
593{
594 if (!PageCompound(page))
595 return false;
596
597 page = compound_head(page);
598 return is_huge_zero_page(page) ||
599 page[1].compound_dtor == TRANSHUGE_PAGE_DTOR;
600}
601
602static unsigned long __thp_get_unmapped_area(struct file *filp,
603 unsigned long addr, unsigned long len,
604 loff_t off, unsigned long flags, unsigned long size)
605{
606 loff_t off_end = off + len;
607 loff_t off_align = round_up(off, size);
608 unsigned long len_pad, ret;
609
610 if (off_end <= off_align || (off_end - off_align) < size)
611 return 0;
612
613 len_pad = len + size;
614 if (len_pad < len || (off + len_pad) < off)
615 return 0;
616
617 ret = current->mm->get_unmapped_area(filp, addr, len_pad,
618 off >> PAGE_SHIFT, flags);
619
620 /*
621 * The failure might be due to length padding. The caller will retry
622 * without the padding.
623 */
624 if (IS_ERR_VALUE(ret))
625 return 0;
626
627 /*
628 * Do not try to align to THP boundary if allocation at the address
629 * hint succeeds.
630 */
631 if (ret == addr)
632 return addr;
633
634 ret += (off - ret) & (size - 1);
635 return ret;
636}
637
638unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
639 unsigned long len, unsigned long pgoff, unsigned long flags)
640{
641 unsigned long ret;
642 loff_t off = (loff_t)pgoff << PAGE_SHIFT;
643
644 ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE);
645 if (ret)
646 return ret;
647
648 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
649}
650EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
651
652static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
653 struct page *page, gfp_t gfp)
654{
655 struct vm_area_struct *vma = vmf->vma;
656 pgtable_t pgtable;
657 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
658 vm_fault_t ret = 0;
659
660 VM_BUG_ON_PAGE(!PageCompound(page), page);
661
662 if (mem_cgroup_charge(page_folio(page), vma->vm_mm, gfp)) {
663 put_page(page);
664 count_vm_event(THP_FAULT_FALLBACK);
665 count_vm_event(THP_FAULT_FALLBACK_CHARGE);
666 return VM_FAULT_FALLBACK;
667 }
668 cgroup_throttle_swaprate(page, gfp);
669
670 pgtable = pte_alloc_one(vma->vm_mm);
671 if (unlikely(!pgtable)) {
672 ret = VM_FAULT_OOM;
673 goto release;
674 }
675
676 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
677 /*
678 * The memory barrier inside __SetPageUptodate makes sure that
679 * clear_huge_page writes become visible before the set_pmd_at()
680 * write.
681 */
682 __SetPageUptodate(page);
683
684 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
685 if (unlikely(!pmd_none(*vmf->pmd))) {
686 goto unlock_release;
687 } else {
688 pmd_t entry;
689
690 ret = check_stable_address_space(vma->vm_mm);
691 if (ret)
692 goto unlock_release;
693
694 /* Deliver the page fault to userland */
695 if (userfaultfd_missing(vma)) {
696 spin_unlock(vmf->ptl);
697 put_page(page);
698 pte_free(vma->vm_mm, pgtable);
699 ret = handle_userfault(vmf, VM_UFFD_MISSING);
700 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
701 return ret;
702 }
703
704 entry = mk_huge_pmd(page, vma->vm_page_prot);
705 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
706 page_add_new_anon_rmap(page, vma, haddr);
707 lru_cache_add_inactive_or_unevictable(page, vma);
708 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
709 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
710 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
711 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
712 mm_inc_nr_ptes(vma->vm_mm);
713 spin_unlock(vmf->ptl);
714 count_vm_event(THP_FAULT_ALLOC);
715 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC);
716 }
717
718 return 0;
719unlock_release:
720 spin_unlock(vmf->ptl);
721release:
722 if (pgtable)
723 pte_free(vma->vm_mm, pgtable);
724 put_page(page);
725 return ret;
726
727}
728
729/*
730 * always: directly stall for all thp allocations
731 * defer: wake kswapd and fail if not immediately available
732 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
733 * fail if not immediately available
734 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
735 * available
736 * never: never stall for any thp allocation
737 */
738gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma)
739{
740 const bool vma_madvised = vma && (vma->vm_flags & VM_HUGEPAGE);
741
742 /* Always do synchronous compaction */
743 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
744 return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
745
746 /* Kick kcompactd and fail quickly */
747 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
748 return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
749
750 /* Synchronous compaction if madvised, otherwise kick kcompactd */
751 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
752 return GFP_TRANSHUGE_LIGHT |
753 (vma_madvised ? __GFP_DIRECT_RECLAIM :
754 __GFP_KSWAPD_RECLAIM);
755
756 /* Only do synchronous compaction if madvised */
757 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
758 return GFP_TRANSHUGE_LIGHT |
759 (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
760
761 return GFP_TRANSHUGE_LIGHT;
762}
763
764/* Caller must hold page table lock. */
765static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
766 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
767 struct page *zero_page)
768{
769 pmd_t entry;
770 if (!pmd_none(*pmd))
771 return;
772 entry = mk_pmd(zero_page, vma->vm_page_prot);
773 entry = pmd_mkhuge(entry);
774 pgtable_trans_huge_deposit(mm, pmd, pgtable);
775 set_pmd_at(mm, haddr, pmd, entry);
776 mm_inc_nr_ptes(mm);
777}
778
779vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
780{
781 struct vm_area_struct *vma = vmf->vma;
782 gfp_t gfp;
783 struct folio *folio;
784 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
785
786 if (!transhuge_vma_suitable(vma, haddr))
787 return VM_FAULT_FALLBACK;
788 if (unlikely(anon_vma_prepare(vma)))
789 return VM_FAULT_OOM;
790 khugepaged_enter_vma(vma, vma->vm_flags);
791
792 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
793 !mm_forbids_zeropage(vma->vm_mm) &&
794 transparent_hugepage_use_zero_page()) {
795 pgtable_t pgtable;
796 struct page *zero_page;
797 vm_fault_t ret;
798 pgtable = pte_alloc_one(vma->vm_mm);
799 if (unlikely(!pgtable))
800 return VM_FAULT_OOM;
801 zero_page = mm_get_huge_zero_page(vma->vm_mm);
802 if (unlikely(!zero_page)) {
803 pte_free(vma->vm_mm, pgtable);
804 count_vm_event(THP_FAULT_FALLBACK);
805 return VM_FAULT_FALLBACK;
806 }
807 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
808 ret = 0;
809 if (pmd_none(*vmf->pmd)) {
810 ret = check_stable_address_space(vma->vm_mm);
811 if (ret) {
812 spin_unlock(vmf->ptl);
813 pte_free(vma->vm_mm, pgtable);
814 } else if (userfaultfd_missing(vma)) {
815 spin_unlock(vmf->ptl);
816 pte_free(vma->vm_mm, pgtable);
817 ret = handle_userfault(vmf, VM_UFFD_MISSING);
818 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
819 } else {
820 set_huge_zero_page(pgtable, vma->vm_mm, vma,
821 haddr, vmf->pmd, zero_page);
822 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
823 spin_unlock(vmf->ptl);
824 }
825 } else {
826 spin_unlock(vmf->ptl);
827 pte_free(vma->vm_mm, pgtable);
828 }
829 return ret;
830 }
831 gfp = vma_thp_gfp_mask(vma);
832 folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true);
833 if (unlikely(!folio)) {
834 count_vm_event(THP_FAULT_FALLBACK);
835 return VM_FAULT_FALLBACK;
836 }
837 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
838}
839
840static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
841 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
842 pgtable_t pgtable)
843{
844 struct mm_struct *mm = vma->vm_mm;
845 pmd_t entry;
846 spinlock_t *ptl;
847
848 ptl = pmd_lock(mm, pmd);
849 if (!pmd_none(*pmd)) {
850 if (write) {
851 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
852 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
853 goto out_unlock;
854 }
855 entry = pmd_mkyoung(*pmd);
856 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
857 if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
858 update_mmu_cache_pmd(vma, addr, pmd);
859 }
860
861 goto out_unlock;
862 }
863
864 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
865 if (pfn_t_devmap(pfn))
866 entry = pmd_mkdevmap(entry);
867 if (write) {
868 entry = pmd_mkyoung(pmd_mkdirty(entry));
869 entry = maybe_pmd_mkwrite(entry, vma);
870 }
871
872 if (pgtable) {
873 pgtable_trans_huge_deposit(mm, pmd, pgtable);
874 mm_inc_nr_ptes(mm);
875 pgtable = NULL;
876 }
877
878 set_pmd_at(mm, addr, pmd, entry);
879 update_mmu_cache_pmd(vma, addr, pmd);
880
881out_unlock:
882 spin_unlock(ptl);
883 if (pgtable)
884 pte_free(mm, pgtable);
885}
886
887/**
888 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
889 * @vmf: Structure describing the fault
890 * @pfn: pfn to insert
891 * @pgprot: page protection to use
892 * @write: whether it's a write fault
893 *
894 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
895 * also consult the vmf_insert_mixed_prot() documentation when
896 * @pgprot != @vmf->vma->vm_page_prot.
897 *
898 * Return: vm_fault_t value.
899 */
900vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
901 pgprot_t pgprot, bool write)
902{
903 unsigned long addr = vmf->address & PMD_MASK;
904 struct vm_area_struct *vma = vmf->vma;
905 pgtable_t pgtable = NULL;
906
907 /*
908 * If we had pmd_special, we could avoid all these restrictions,
909 * but we need to be consistent with PTEs and architectures that
910 * can't support a 'special' bit.
911 */
912 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
913 !pfn_t_devmap(pfn));
914 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
915 (VM_PFNMAP|VM_MIXEDMAP));
916 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
917
918 if (addr < vma->vm_start || addr >= vma->vm_end)
919 return VM_FAULT_SIGBUS;
920
921 if (arch_needs_pgtable_deposit()) {
922 pgtable = pte_alloc_one(vma->vm_mm);
923 if (!pgtable)
924 return VM_FAULT_OOM;
925 }
926
927 track_pfn_insert(vma, &pgprot, pfn);
928
929 insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
930 return VM_FAULT_NOPAGE;
931}
932EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot);
933
934#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
935static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma)
936{
937 if (likely(vma->vm_flags & VM_WRITE))
938 pud = pud_mkwrite(pud);
939 return pud;
940}
941
942static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
943 pud_t *pud, pfn_t pfn, pgprot_t prot, bool write)
944{
945 struct mm_struct *mm = vma->vm_mm;
946 pud_t entry;
947 spinlock_t *ptl;
948
949 ptl = pud_lock(mm, pud);
950 if (!pud_none(*pud)) {
951 if (write) {
952 if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
953 WARN_ON_ONCE(!is_huge_zero_pud(*pud));
954 goto out_unlock;
955 }
956 entry = pud_mkyoung(*pud);
957 entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
958 if (pudp_set_access_flags(vma, addr, pud, entry, 1))
959 update_mmu_cache_pud(vma, addr, pud);
960 }
961 goto out_unlock;
962 }
963
964 entry = pud_mkhuge(pfn_t_pud(pfn, prot));
965 if (pfn_t_devmap(pfn))
966 entry = pud_mkdevmap(entry);
967 if (write) {
968 entry = pud_mkyoung(pud_mkdirty(entry));
969 entry = maybe_pud_mkwrite(entry, vma);
970 }
971 set_pud_at(mm, addr, pud, entry);
972 update_mmu_cache_pud(vma, addr, pud);
973
974out_unlock:
975 spin_unlock(ptl);
976}
977
978/**
979 * vmf_insert_pfn_pud_prot - insert a pud size pfn
980 * @vmf: Structure describing the fault
981 * @pfn: pfn to insert
982 * @pgprot: page protection to use
983 * @write: whether it's a write fault
984 *
985 * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
986 * also consult the vmf_insert_mixed_prot() documentation when
987 * @pgprot != @vmf->vma->vm_page_prot.
988 *
989 * Return: vm_fault_t value.
990 */
991vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
992 pgprot_t pgprot, bool write)
993{
994 unsigned long addr = vmf->address & PUD_MASK;
995 struct vm_area_struct *vma = vmf->vma;
996
997 /*
998 * If we had pud_special, we could avoid all these restrictions,
999 * but we need to be consistent with PTEs and architectures that
1000 * can't support a 'special' bit.
1001 */
1002 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
1003 !pfn_t_devmap(pfn));
1004 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1005 (VM_PFNMAP|VM_MIXEDMAP));
1006 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1007
1008 if (addr < vma->vm_start || addr >= vma->vm_end)
1009 return VM_FAULT_SIGBUS;
1010
1011 track_pfn_insert(vma, &pgprot, pfn);
1012
1013 insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
1014 return VM_FAULT_NOPAGE;
1015}
1016EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot);
1017#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1018
1019static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
1020 pmd_t *pmd, bool write)
1021{
1022 pmd_t _pmd;
1023
1024 _pmd = pmd_mkyoung(*pmd);
1025 if (write)
1026 _pmd = pmd_mkdirty(_pmd);
1027 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1028 pmd, _pmd, write))
1029 update_mmu_cache_pmd(vma, addr, pmd);
1030}
1031
1032struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1033 pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
1034{
1035 unsigned long pfn = pmd_pfn(*pmd);
1036 struct mm_struct *mm = vma->vm_mm;
1037 struct page *page;
1038 int ret;
1039
1040 assert_spin_locked(pmd_lockptr(mm, pmd));
1041
1042 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1043 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1044 (FOLL_PIN | FOLL_GET)))
1045 return NULL;
1046
1047 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1048 return NULL;
1049
1050 if (pmd_present(*pmd) && pmd_devmap(*pmd))
1051 /* pass */;
1052 else
1053 return NULL;
1054
1055 if (flags & FOLL_TOUCH)
1056 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1057
1058 /*
1059 * device mapped pages can only be returned if the
1060 * caller will manage the page reference count.
1061 */
1062 if (!(flags & (FOLL_GET | FOLL_PIN)))
1063 return ERR_PTR(-EEXIST);
1064
1065 pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
1066 *pgmap = get_dev_pagemap(pfn, *pgmap);
1067 if (!*pgmap)
1068 return ERR_PTR(-EFAULT);
1069 page = pfn_to_page(pfn);
1070 ret = try_grab_page(page, flags);
1071 if (ret)
1072 page = ERR_PTR(ret);
1073
1074 return page;
1075}
1076
1077int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1078 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
1079 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1080{
1081 spinlock_t *dst_ptl, *src_ptl;
1082 struct page *src_page;
1083 pmd_t pmd;
1084 pgtable_t pgtable = NULL;
1085 int ret = -ENOMEM;
1086
1087 /* Skip if can be re-fill on fault */
1088 if (!vma_is_anonymous(dst_vma))
1089 return 0;
1090
1091 pgtable = pte_alloc_one(dst_mm);
1092 if (unlikely(!pgtable))
1093 goto out;
1094
1095 dst_ptl = pmd_lock(dst_mm, dst_pmd);
1096 src_ptl = pmd_lockptr(src_mm, src_pmd);
1097 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1098
1099 ret = -EAGAIN;
1100 pmd = *src_pmd;
1101
1102#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1103 if (unlikely(is_swap_pmd(pmd))) {
1104 swp_entry_t entry = pmd_to_swp_entry(pmd);
1105
1106 VM_BUG_ON(!is_pmd_migration_entry(pmd));
1107 if (!is_readable_migration_entry(entry)) {
1108 entry = make_readable_migration_entry(
1109 swp_offset(entry));
1110 pmd = swp_entry_to_pmd(entry);
1111 if (pmd_swp_soft_dirty(*src_pmd))
1112 pmd = pmd_swp_mksoft_dirty(pmd);
1113 if (pmd_swp_uffd_wp(*src_pmd))
1114 pmd = pmd_swp_mkuffd_wp(pmd);
1115 set_pmd_at(src_mm, addr, src_pmd, pmd);
1116 }
1117 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1118 mm_inc_nr_ptes(dst_mm);
1119 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1120 if (!userfaultfd_wp(dst_vma))
1121 pmd = pmd_swp_clear_uffd_wp(pmd);
1122 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1123 ret = 0;
1124 goto out_unlock;
1125 }
1126#endif
1127
1128 if (unlikely(!pmd_trans_huge(pmd))) {
1129 pte_free(dst_mm, pgtable);
1130 goto out_unlock;
1131 }
1132 /*
1133 * When page table lock is held, the huge zero pmd should not be
1134 * under splitting since we don't split the page itself, only pmd to
1135 * a page table.
1136 */
1137 if (is_huge_zero_pmd(pmd)) {
1138 /*
1139 * get_huge_zero_page() will never allocate a new page here,
1140 * since we already have a zero page to copy. It just takes a
1141 * reference.
1142 */
1143 mm_get_huge_zero_page(dst_mm);
1144 goto out_zero_page;
1145 }
1146
1147 src_page = pmd_page(pmd);
1148 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
1149
1150 get_page(src_page);
1151 if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
1152 /* Page maybe pinned: split and retry the fault on PTEs. */
1153 put_page(src_page);
1154 pte_free(dst_mm, pgtable);
1155 spin_unlock(src_ptl);
1156 spin_unlock(dst_ptl);
1157 __split_huge_pmd(src_vma, src_pmd, addr, false, NULL);
1158 return -EAGAIN;
1159 }
1160 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1161out_zero_page:
1162 mm_inc_nr_ptes(dst_mm);
1163 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
1164 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1165 if (!userfaultfd_wp(dst_vma))
1166 pmd = pmd_clear_uffd_wp(pmd);
1167 pmd = pmd_mkold(pmd_wrprotect(pmd));
1168 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
1169
1170 ret = 0;
1171out_unlock:
1172 spin_unlock(src_ptl);
1173 spin_unlock(dst_ptl);
1174out:
1175 return ret;
1176}
1177
1178#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1179static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
1180 pud_t *pud, bool write)
1181{
1182 pud_t _pud;
1183
1184 _pud = pud_mkyoung(*pud);
1185 if (write)
1186 _pud = pud_mkdirty(_pud);
1187 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
1188 pud, _pud, write))
1189 update_mmu_cache_pud(vma, addr, pud);
1190}
1191
1192struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1193 pud_t *pud, int flags, struct dev_pagemap **pgmap)
1194{
1195 unsigned long pfn = pud_pfn(*pud);
1196 struct mm_struct *mm = vma->vm_mm;
1197 struct page *page;
1198 int ret;
1199
1200 assert_spin_locked(pud_lockptr(mm, pud));
1201
1202 if (flags & FOLL_WRITE && !pud_write(*pud))
1203 return NULL;
1204
1205 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1206 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
1207 (FOLL_PIN | FOLL_GET)))
1208 return NULL;
1209
1210 if (pud_present(*pud) && pud_devmap(*pud))
1211 /* pass */;
1212 else
1213 return NULL;
1214
1215 if (flags & FOLL_TOUCH)
1216 touch_pud(vma, addr, pud, flags & FOLL_WRITE);
1217
1218 /*
1219 * device mapped pages can only be returned if the
1220 * caller will manage the page reference count.
1221 *
1222 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1223 */
1224 if (!(flags & (FOLL_GET | FOLL_PIN)))
1225 return ERR_PTR(-EEXIST);
1226
1227 pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
1228 *pgmap = get_dev_pagemap(pfn, *pgmap);
1229 if (!*pgmap)
1230 return ERR_PTR(-EFAULT);
1231 page = pfn_to_page(pfn);
1232
1233 ret = try_grab_page(page, flags);
1234 if (ret)
1235 page = ERR_PTR(ret);
1236
1237 return page;
1238}
1239
1240int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1241 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1242 struct vm_area_struct *vma)
1243{
1244 spinlock_t *dst_ptl, *src_ptl;
1245 pud_t pud;
1246 int ret;
1247
1248 dst_ptl = pud_lock(dst_mm, dst_pud);
1249 src_ptl = pud_lockptr(src_mm, src_pud);
1250 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
1251
1252 ret = -EAGAIN;
1253 pud = *src_pud;
1254 if (unlikely(!pud_trans_huge(pud) && !pud_devmap(pud)))
1255 goto out_unlock;
1256
1257 /*
1258 * When page table lock is held, the huge zero pud should not be
1259 * under splitting since we don't split the page itself, only pud to
1260 * a page table.
1261 */
1262 if (is_huge_zero_pud(pud)) {
1263 /* No huge zero pud yet */
1264 }
1265
1266 /*
1267 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1268 * and split if duplicating fails.
1269 */
1270 pudp_set_wrprotect(src_mm, addr, src_pud);
1271 pud = pud_mkold(pud_wrprotect(pud));
1272 set_pud_at(dst_mm, addr, dst_pud, pud);
1273
1274 ret = 0;
1275out_unlock:
1276 spin_unlock(src_ptl);
1277 spin_unlock(dst_ptl);
1278 return ret;
1279}
1280
1281void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
1282{
1283 bool write = vmf->flags & FAULT_FLAG_WRITE;
1284
1285 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud);
1286 if (unlikely(!pud_same(*vmf->pud, orig_pud)))
1287 goto unlock;
1288
1289 touch_pud(vmf->vma, vmf->address, vmf->pud, write);
1290unlock:
1291 spin_unlock(vmf->ptl);
1292}
1293#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1294
1295void huge_pmd_set_accessed(struct vm_fault *vmf)
1296{
1297 bool write = vmf->flags & FAULT_FLAG_WRITE;
1298
1299 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1300 if (unlikely(!pmd_same(*vmf->pmd, vmf->orig_pmd)))
1301 goto unlock;
1302
1303 touch_pmd(vmf->vma, vmf->address, vmf->pmd, write);
1304
1305unlock:
1306 spin_unlock(vmf->ptl);
1307}
1308
1309vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
1310{
1311 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
1312 struct vm_area_struct *vma = vmf->vma;
1313 struct folio *folio;
1314 struct page *page;
1315 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1316 pmd_t orig_pmd = vmf->orig_pmd;
1317
1318 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd);
1319 VM_BUG_ON_VMA(!vma->anon_vma, vma);
1320
1321 if (is_huge_zero_pmd(orig_pmd))
1322 goto fallback;
1323
1324 spin_lock(vmf->ptl);
1325
1326 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1327 spin_unlock(vmf->ptl);
1328 return 0;
1329 }
1330
1331 page = pmd_page(orig_pmd);
1332 folio = page_folio(page);
1333 VM_BUG_ON_PAGE(!PageHead(page), page);
1334
1335 /* Early check when only holding the PT lock. */
1336 if (PageAnonExclusive(page))
1337 goto reuse;
1338
1339 if (!folio_trylock(folio)) {
1340 folio_get(folio);
1341 spin_unlock(vmf->ptl);
1342 folio_lock(folio);
1343 spin_lock(vmf->ptl);
1344 if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
1345 spin_unlock(vmf->ptl);
1346 folio_unlock(folio);
1347 folio_put(folio);
1348 return 0;
1349 }
1350 folio_put(folio);
1351 }
1352
1353 /* Recheck after temporarily dropping the PT lock. */
1354 if (PageAnonExclusive(page)) {
1355 folio_unlock(folio);
1356 goto reuse;
1357 }
1358
1359 /*
1360 * See do_wp_page(): we can only reuse the folio exclusively if
1361 * there are no additional references. Note that we always drain
1362 * the LRU pagevecs immediately after adding a THP.
1363 */
1364 if (folio_ref_count(folio) >
1365 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
1366 goto unlock_fallback;
1367 if (folio_test_swapcache(folio))
1368 folio_free_swap(folio);
1369 if (folio_ref_count(folio) == 1) {
1370 pmd_t entry;
1371
1372 page_move_anon_rmap(page, vma);
1373 folio_unlock(folio);
1374reuse:
1375 if (unlikely(unshare)) {
1376 spin_unlock(vmf->ptl);
1377 return 0;
1378 }
1379 entry = pmd_mkyoung(orig_pmd);
1380 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1381 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1))
1382 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1383 spin_unlock(vmf->ptl);
1384 return 0;
1385 }
1386
1387unlock_fallback:
1388 folio_unlock(folio);
1389 spin_unlock(vmf->ptl);
1390fallback:
1391 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1392 return VM_FAULT_FALLBACK;
1393}
1394
1395static inline bool can_change_pmd_writable(struct vm_area_struct *vma,
1396 unsigned long addr, pmd_t pmd)
1397{
1398 struct page *page;
1399
1400 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
1401 return false;
1402
1403 /* Don't touch entries that are not even readable (NUMA hinting). */
1404 if (pmd_protnone(pmd))
1405 return false;
1406
1407 /* Do we need write faults for softdirty tracking? */
1408 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1409 return false;
1410
1411 /* Do we need write faults for uffd-wp tracking? */
1412 if (userfaultfd_huge_pmd_wp(vma, pmd))
1413 return false;
1414
1415 if (!(vma->vm_flags & VM_SHARED)) {
1416 /* See can_change_pte_writable(). */
1417 page = vm_normal_page_pmd(vma, addr, pmd);
1418 return page && PageAnon(page) && PageAnonExclusive(page);
1419 }
1420
1421 /* See can_change_pte_writable(). */
1422 return pmd_dirty(pmd);
1423}
1424
1425/* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1426static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1427 struct vm_area_struct *vma,
1428 unsigned int flags)
1429{
1430 /* If the pmd is writable, we can write to the page. */
1431 if (pmd_write(pmd))
1432 return true;
1433
1434 /* Maybe FOLL_FORCE is set to override it? */
1435 if (!(flags & FOLL_FORCE))
1436 return false;
1437
1438 /* But FOLL_FORCE has no effect on shared mappings */
1439 if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
1440 return false;
1441
1442 /* ... or read-only private ones */
1443 if (!(vma->vm_flags & VM_MAYWRITE))
1444 return false;
1445
1446 /* ... or already writable ones that just need to take a write fault */
1447 if (vma->vm_flags & VM_WRITE)
1448 return false;
1449
1450 /*
1451 * See can_change_pte_writable(): we broke COW and could map the page
1452 * writable if we have an exclusive anonymous page ...
1453 */
1454 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1455 return false;
1456
1457 /* ... and a write-fault isn't required for other reasons. */
1458 if (vma_soft_dirty_enabled(vma) && !pmd_soft_dirty(pmd))
1459 return false;
1460 return !userfaultfd_huge_pmd_wp(vma, pmd);
1461}
1462
1463struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1464 unsigned long addr,
1465 pmd_t *pmd,
1466 unsigned int flags)
1467{
1468 struct mm_struct *mm = vma->vm_mm;
1469 struct page *page;
1470 int ret;
1471
1472 assert_spin_locked(pmd_lockptr(mm, pmd));
1473
1474 page = pmd_page(*pmd);
1475 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1476
1477 if ((flags & FOLL_WRITE) &&
1478 !can_follow_write_pmd(*pmd, page, vma, flags))
1479 return NULL;
1480
1481 /* Avoid dumping huge zero page */
1482 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1483 return ERR_PTR(-EFAULT);
1484
1485 /* Full NUMA hinting faults to serialise migration in fault paths */
1486 if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
1487 return NULL;
1488
1489 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1490 return ERR_PTR(-EMLINK);
1491
1492 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1493 !PageAnonExclusive(page), page);
1494
1495 ret = try_grab_page(page, flags);
1496 if (ret)
1497 return ERR_PTR(ret);
1498
1499 if (flags & FOLL_TOUCH)
1500 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
1501
1502 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1503 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1504
1505 return page;
1506}
1507
1508/* NUMA hinting page fault entry point for trans huge pmds */
1509vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
1510{
1511 struct vm_area_struct *vma = vmf->vma;
1512 pmd_t oldpmd = vmf->orig_pmd;
1513 pmd_t pmd;
1514 struct page *page;
1515 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1516 int page_nid = NUMA_NO_NODE;
1517 int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
1518 bool migrated = false, writable = false;
1519 int flags = 0;
1520
1521 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1522 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1523 spin_unlock(vmf->ptl);
1524 goto out;
1525 }
1526
1527 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1528
1529 /*
1530 * Detect now whether the PMD could be writable; this information
1531 * is only valid while holding the PT lock.
1532 */
1533 writable = pmd_write(pmd);
1534 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
1535 can_change_pmd_writable(vma, vmf->address, pmd))
1536 writable = true;
1537
1538 page = vm_normal_page_pmd(vma, haddr, pmd);
1539 if (!page)
1540 goto out_map;
1541
1542 /* See similar comment in do_numa_page for explanation */
1543 if (!writable)
1544 flags |= TNF_NO_GROUP;
1545
1546 page_nid = page_to_nid(page);
1547 /*
1548 * For memory tiering mode, cpupid of slow memory page is used
1549 * to record page access time. So use default value.
1550 */
1551 if (node_is_toptier(page_nid))
1552 last_cpupid = page_cpupid_last(page);
1553 target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1554 &flags);
1555
1556 if (target_nid == NUMA_NO_NODE) {
1557 put_page(page);
1558 goto out_map;
1559 }
1560
1561 spin_unlock(vmf->ptl);
1562 writable = false;
1563
1564 migrated = migrate_misplaced_page(page, vma, target_nid);
1565 if (migrated) {
1566 flags |= TNF_MIGRATED;
1567 page_nid = target_nid;
1568 } else {
1569 flags |= TNF_MIGRATE_FAIL;
1570 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
1571 if (unlikely(!pmd_same(oldpmd, *vmf->pmd))) {
1572 spin_unlock(vmf->ptl);
1573 goto out;
1574 }
1575 goto out_map;
1576 }
1577
1578out:
1579 if (page_nid != NUMA_NO_NODE)
1580 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR,
1581 flags);
1582
1583 return 0;
1584
1585out_map:
1586 /* Restore the PMD */
1587 pmd = pmd_modify(oldpmd, vma->vm_page_prot);
1588 pmd = pmd_mkyoung(pmd);
1589 if (writable)
1590 pmd = pmd_mkwrite(pmd);
1591 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd);
1592 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1593 spin_unlock(vmf->ptl);
1594 goto out;
1595}
1596
1597/*
1598 * Return true if we do MADV_FREE successfully on entire pmd page.
1599 * Otherwise, return false.
1600 */
1601bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1602 pmd_t *pmd, unsigned long addr, unsigned long next)
1603{
1604 spinlock_t *ptl;
1605 pmd_t orig_pmd;
1606 struct page *page;
1607 struct mm_struct *mm = tlb->mm;
1608 bool ret = false;
1609
1610 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1611
1612 ptl = pmd_trans_huge_lock(pmd, vma);
1613 if (!ptl)
1614 goto out_unlocked;
1615
1616 orig_pmd = *pmd;
1617 if (is_huge_zero_pmd(orig_pmd))
1618 goto out;
1619
1620 if (unlikely(!pmd_present(orig_pmd))) {
1621 VM_BUG_ON(thp_migration_supported() &&
1622 !is_pmd_migration_entry(orig_pmd));
1623 goto out;
1624 }
1625
1626 page = pmd_page(orig_pmd);
1627 /*
1628 * If other processes are mapping this page, we couldn't discard
1629 * the page unless they all do MADV_FREE so let's skip the page.
1630 */
1631 if (total_mapcount(page) != 1)
1632 goto out;
1633
1634 if (!trylock_page(page))
1635 goto out;
1636
1637 /*
1638 * If user want to discard part-pages of THP, split it so MADV_FREE
1639 * will deactivate only them.
1640 */
1641 if (next - addr != HPAGE_PMD_SIZE) {
1642 get_page(page);
1643 spin_unlock(ptl);
1644 split_huge_page(page);
1645 unlock_page(page);
1646 put_page(page);
1647 goto out_unlocked;
1648 }
1649
1650 if (PageDirty(page))
1651 ClearPageDirty(page);
1652 unlock_page(page);
1653
1654 if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
1655 pmdp_invalidate(vma, addr, pmd);
1656 orig_pmd = pmd_mkold(orig_pmd);
1657 orig_pmd = pmd_mkclean(orig_pmd);
1658
1659 set_pmd_at(mm, addr, pmd, orig_pmd);
1660 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1661 }
1662
1663 mark_page_lazyfree(page);
1664 ret = true;
1665out:
1666 spin_unlock(ptl);
1667out_unlocked:
1668 return ret;
1669}
1670
1671static inline void zap_deposited_table(struct mm_struct *mm, pmd_t *pmd)
1672{
1673 pgtable_t pgtable;
1674
1675 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1676 pte_free(mm, pgtable);
1677 mm_dec_nr_ptes(mm);
1678}
1679
1680int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1681 pmd_t *pmd, unsigned long addr)
1682{
1683 pmd_t orig_pmd;
1684 spinlock_t *ptl;
1685
1686 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1687
1688 ptl = __pmd_trans_huge_lock(pmd, vma);
1689 if (!ptl)
1690 return 0;
1691 /*
1692 * For architectures like ppc64 we look at deposited pgtable
1693 * when calling pmdp_huge_get_and_clear. So do the
1694 * pgtable_trans_huge_withdraw after finishing pmdp related
1695 * operations.
1696 */
1697 orig_pmd = pmdp_huge_get_and_clear_full(vma, addr, pmd,
1698 tlb->fullmm);
1699 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1700 if (vma_is_special_huge(vma)) {
1701 if (arch_needs_pgtable_deposit())
1702 zap_deposited_table(tlb->mm, pmd);
1703 spin_unlock(ptl);
1704 } else if (is_huge_zero_pmd(orig_pmd)) {
1705 zap_deposited_table(tlb->mm, pmd);
1706 spin_unlock(ptl);
1707 } else {
1708 struct page *page = NULL;
1709 int flush_needed = 1;
1710
1711 if (pmd_present(orig_pmd)) {
1712 page = pmd_page(orig_pmd);
1713 page_remove_rmap(page, vma, true);
1714 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1715 VM_BUG_ON_PAGE(!PageHead(page), page);
1716 } else if (thp_migration_supported()) {
1717 swp_entry_t entry;
1718
1719 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd));
1720 entry = pmd_to_swp_entry(orig_pmd);
1721 page = pfn_swap_entry_to_page(entry);
1722 flush_needed = 0;
1723 } else
1724 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1725
1726 if (PageAnon(page)) {
1727 zap_deposited_table(tlb->mm, pmd);
1728 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1729 } else {
1730 if (arch_needs_pgtable_deposit())
1731 zap_deposited_table(tlb->mm, pmd);
1732 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1733 }
1734
1735 spin_unlock(ptl);
1736 if (flush_needed)
1737 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1738 }
1739 return 1;
1740}
1741
1742#ifndef pmd_move_must_withdraw
1743static inline int pmd_move_must_withdraw(spinlock_t *new_pmd_ptl,
1744 spinlock_t *old_pmd_ptl,
1745 struct vm_area_struct *vma)
1746{
1747 /*
1748 * With split pmd lock we also need to move preallocated
1749 * PTE page table if new_pmd is on different PMD page table.
1750 *
1751 * We also don't deposit and withdraw tables for file pages.
1752 */
1753 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
1754}
1755#endif
1756
1757static pmd_t move_soft_dirty_pmd(pmd_t pmd)
1758{
1759#ifdef CONFIG_MEM_SOFT_DIRTY
1760 if (unlikely(is_pmd_migration_entry(pmd)))
1761 pmd = pmd_swp_mksoft_dirty(pmd);
1762 else if (pmd_present(pmd))
1763 pmd = pmd_mksoft_dirty(pmd);
1764#endif
1765 return pmd;
1766}
1767
1768bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
1769 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
1770{
1771 spinlock_t *old_ptl, *new_ptl;
1772 pmd_t pmd;
1773 struct mm_struct *mm = vma->vm_mm;
1774 bool force_flush = false;
1775
1776 /*
1777 * The destination pmd shouldn't be established, free_pgtables()
1778 * should have release it.
1779 */
1780 if (WARN_ON(!pmd_none(*new_pmd))) {
1781 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1782 return false;
1783 }
1784
1785 /*
1786 * We don't have to worry about the ordering of src and dst
1787 * ptlocks because exclusive mmap_lock prevents deadlock.
1788 */
1789 old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
1790 if (old_ptl) {
1791 new_ptl = pmd_lockptr(mm, new_pmd);
1792 if (new_ptl != old_ptl)
1793 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1794 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1795 if (pmd_present(pmd))
1796 force_flush = true;
1797 VM_BUG_ON(!pmd_none(*new_pmd));
1798
1799 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) {
1800 pgtable_t pgtable;
1801 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1802 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1803 }
1804 pmd = move_soft_dirty_pmd(pmd);
1805 set_pmd_at(mm, new_addr, new_pmd, pmd);
1806 if (force_flush)
1807 flush_pmd_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
1808 if (new_ptl != old_ptl)
1809 spin_unlock(new_ptl);
1810 spin_unlock(old_ptl);
1811 return true;
1812 }
1813 return false;
1814}
1815
1816/*
1817 * Returns
1818 * - 0 if PMD could not be locked
1819 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1820 * or if prot_numa but THP migration is not supported
1821 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1822 */
1823int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1824 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
1825 unsigned long cp_flags)
1826{
1827 struct mm_struct *mm = vma->vm_mm;
1828 spinlock_t *ptl;
1829 pmd_t oldpmd, entry;
1830 bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
1831 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
1832 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
1833 int ret = 1;
1834
1835 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
1836
1837 if (prot_numa && !thp_migration_supported())
1838 return 1;
1839
1840 ptl = __pmd_trans_huge_lock(pmd, vma);
1841 if (!ptl)
1842 return 0;
1843
1844#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1845 if (is_swap_pmd(*pmd)) {
1846 swp_entry_t entry = pmd_to_swp_entry(*pmd);
1847 struct page *page = pfn_swap_entry_to_page(entry);
1848
1849 VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1850 if (is_writable_migration_entry(entry)) {
1851 pmd_t newpmd;
1852 /*
1853 * A protection check is difficult so
1854 * just be safe and disable write
1855 */
1856 if (PageAnon(page))
1857 entry = make_readable_exclusive_migration_entry(swp_offset(entry));
1858 else
1859 entry = make_readable_migration_entry(swp_offset(entry));
1860 newpmd = swp_entry_to_pmd(entry);
1861 if (pmd_swp_soft_dirty(*pmd))
1862 newpmd = pmd_swp_mksoft_dirty(newpmd);
1863 if (pmd_swp_uffd_wp(*pmd))
1864 newpmd = pmd_swp_mkuffd_wp(newpmd);
1865 set_pmd_at(mm, addr, pmd, newpmd);
1866 }
1867 goto unlock;
1868 }
1869#endif
1870
1871 if (prot_numa) {
1872 struct page *page;
1873 bool toptier;
1874 /*
1875 * Avoid trapping faults against the zero page. The read-only
1876 * data is likely to be read-cached on the local CPU and
1877 * local/remote hits to the zero page are not interesting.
1878 */
1879 if (is_huge_zero_pmd(*pmd))
1880 goto unlock;
1881
1882 if (pmd_protnone(*pmd))
1883 goto unlock;
1884
1885 page = pmd_page(*pmd);
1886 toptier = node_is_toptier(page_to_nid(page));
1887 /*
1888 * Skip scanning top tier node if normal numa
1889 * balancing is disabled
1890 */
1891 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
1892 toptier)
1893 goto unlock;
1894
1895 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
1896 !toptier)
1897 xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1898 }
1899 /*
1900 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1901 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1902 * which is also under mmap_read_lock(mm):
1903 *
1904 * CPU0: CPU1:
1905 * change_huge_pmd(prot_numa=1)
1906 * pmdp_huge_get_and_clear_notify()
1907 * madvise_dontneed()
1908 * zap_pmd_range()
1909 * pmd_trans_huge(*pmd) == 0 (without ptl)
1910 * // skip the pmd
1911 * set_pmd_at();
1912 * // pmd is re-established
1913 *
1914 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1915 * which may break userspace.
1916 *
1917 * pmdp_invalidate_ad() is required to make sure we don't miss
1918 * dirty/young flags set by hardware.
1919 */
1920 oldpmd = pmdp_invalidate_ad(vma, addr, pmd);
1921
1922 entry = pmd_modify(oldpmd, newprot);
1923 if (uffd_wp) {
1924 entry = pmd_wrprotect(entry);
1925 entry = pmd_mkuffd_wp(entry);
1926 } else if (uffd_wp_resolve) {
1927 /*
1928 * Leave the write bit to be handled by PF interrupt
1929 * handler, then things like COW could be properly
1930 * handled.
1931 */
1932 entry = pmd_clear_uffd_wp(entry);
1933 }
1934
1935 /* See change_pte_range(). */
1936 if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pmd_write(entry) &&
1937 can_change_pmd_writable(vma, addr, entry))
1938 entry = pmd_mkwrite(entry);
1939
1940 ret = HPAGE_PMD_NR;
1941 set_pmd_at(mm, addr, pmd, entry);
1942
1943 if (huge_pmd_needs_flush(oldpmd, entry))
1944 tlb_flush_pmd_range(tlb, addr, HPAGE_PMD_SIZE);
1945unlock:
1946 spin_unlock(ptl);
1947 return ret;
1948}
1949
1950/*
1951 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1952 *
1953 * Note that if it returns page table lock pointer, this routine returns without
1954 * unlocking page table lock. So callers must unlock it.
1955 */
1956spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1957{
1958 spinlock_t *ptl;
1959 ptl = pmd_lock(vma->vm_mm, pmd);
1960 if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1961 pmd_devmap(*pmd)))
1962 return ptl;
1963 spin_unlock(ptl);
1964 return NULL;
1965}
1966
1967/*
1968 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1969 *
1970 * Note that if it returns page table lock pointer, this routine returns without
1971 * unlocking page table lock. So callers must unlock it.
1972 */
1973spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma)
1974{
1975 spinlock_t *ptl;
1976
1977 ptl = pud_lock(vma->vm_mm, pud);
1978 if (likely(pud_trans_huge(*pud) || pud_devmap(*pud)))
1979 return ptl;
1980 spin_unlock(ptl);
1981 return NULL;
1982}
1983
1984#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1985int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
1986 pud_t *pud, unsigned long addr)
1987{
1988 spinlock_t *ptl;
1989
1990 ptl = __pud_trans_huge_lock(pud, vma);
1991 if (!ptl)
1992 return 0;
1993
1994 pudp_huge_get_and_clear_full(tlb->mm, addr, pud, tlb->fullmm);
1995 tlb_remove_pud_tlb_entry(tlb, pud, addr);
1996 if (vma_is_special_huge(vma)) {
1997 spin_unlock(ptl);
1998 /* No zero page support yet */
1999 } else {
2000 /* No support for anonymous PUD pages yet */
2001 BUG();
2002 }
2003 return 1;
2004}
2005
2006static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud,
2007 unsigned long haddr)
2008{
2009 VM_BUG_ON(haddr & ~HPAGE_PUD_MASK);
2010 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2011 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma);
2012 VM_BUG_ON(!pud_trans_huge(*pud) && !pud_devmap(*pud));
2013
2014 count_vm_event(THP_SPLIT_PUD);
2015
2016 pudp_huge_clear_flush_notify(vma, haddr, pud);
2017}
2018
2019void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
2020 unsigned long address)
2021{
2022 spinlock_t *ptl;
2023 struct mmu_notifier_range range;
2024
2025 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2026 address & HPAGE_PUD_MASK,
2027 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2028 mmu_notifier_invalidate_range_start(&range);
2029 ptl = pud_lock(vma->vm_mm, pud);
2030 if (unlikely(!pud_trans_huge(*pud) && !pud_devmap(*pud)))
2031 goto out;
2032 __split_huge_pud_locked(vma, pud, range.start);
2033
2034out:
2035 spin_unlock(ptl);
2036 /*
2037 * No need to double call mmu_notifier->invalidate_range() callback as
2038 * the above pudp_huge_clear_flush_notify() did already call it.
2039 */
2040 mmu_notifier_invalidate_range_only_end(&range);
2041}
2042#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2043
2044static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2045 unsigned long haddr, pmd_t *pmd)
2046{
2047 struct mm_struct *mm = vma->vm_mm;
2048 pgtable_t pgtable;
2049 pmd_t _pmd;
2050 int i;
2051
2052 /*
2053 * Leave pmd empty until pte is filled note that it is fine to delay
2054 * notification until mmu_notifier_invalidate_range_end() as we are
2055 * replacing a zero pmd write protected page with a zero pte write
2056 * protected page.
2057 *
2058 * See Documentation/mm/mmu_notifier.rst
2059 */
2060 pmdp_huge_clear_flush(vma, haddr, pmd);
2061
2062 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2063 pmd_populate(mm, &_pmd, pgtable);
2064
2065 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2066 pte_t *pte, entry;
2067 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2068 entry = pte_mkspecial(entry);
2069 pte = pte_offset_map(&_pmd, haddr);
2070 VM_BUG_ON(!pte_none(*pte));
2071 set_pte_at(mm, haddr, pte, entry);
2072 pte_unmap(pte);
2073 }
2074 smp_wmb(); /* make pte visible before pmd */
2075 pmd_populate(mm, pmd, pgtable);
2076}
2077
2078static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2079 unsigned long haddr, bool freeze)
2080{
2081 struct mm_struct *mm = vma->vm_mm;
2082 struct page *page;
2083 pgtable_t pgtable;
2084 pmd_t old_pmd, _pmd;
2085 bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
2086 bool anon_exclusive = false, dirty = false;
2087 unsigned long addr;
2088 int i;
2089
2090 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2091 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2092 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2093 VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2094 && !pmd_devmap(*pmd));
2095
2096 count_vm_event(THP_SPLIT_PMD);
2097
2098 if (!vma_is_anonymous(vma)) {
2099 old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2100 /*
2101 * We are going to unmap this huge page. So
2102 * just go ahead and zap it
2103 */
2104 if (arch_needs_pgtable_deposit())
2105 zap_deposited_table(mm, pmd);
2106 if (vma_is_special_huge(vma))
2107 return;
2108 if (unlikely(is_pmd_migration_entry(old_pmd))) {
2109 swp_entry_t entry;
2110
2111 entry = pmd_to_swp_entry(old_pmd);
2112 page = pfn_swap_entry_to_page(entry);
2113 } else {
2114 page = pmd_page(old_pmd);
2115 if (!PageDirty(page) && pmd_dirty(old_pmd))
2116 set_page_dirty(page);
2117 if (!PageReferenced(page) && pmd_young(old_pmd))
2118 SetPageReferenced(page);
2119 page_remove_rmap(page, vma, true);
2120 put_page(page);
2121 }
2122 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2123 return;
2124 }
2125
2126 if (is_huge_zero_pmd(*pmd)) {
2127 /*
2128 * FIXME: Do we want to invalidate secondary mmu by calling
2129 * mmu_notifier_invalidate_range() see comments below inside
2130 * __split_huge_pmd() ?
2131 *
2132 * We are going from a zero huge page write protected to zero
2133 * small page also write protected so it does not seems useful
2134 * to invalidate secondary mmu at this time.
2135 */
2136 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2137 }
2138
2139 /*
2140 * Up to this point the pmd is present and huge and userland has the
2141 * whole access to the hugepage during the split (which happens in
2142 * place). If we overwrite the pmd with the not-huge version pointing
2143 * to the pte here (which of course we could if all CPUs were bug
2144 * free), userland could trigger a small page size TLB miss on the
2145 * small sized TLB while the hugepage TLB entry is still established in
2146 * the huge TLB. Some CPU doesn't like that.
2147 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2148 * 383 on page 105. Intel should be safe but is also warns that it's
2149 * only safe if the permission and cache attributes of the two entries
2150 * loaded in the two TLB is identical (which should be the case here).
2151 * But it is generally safer to never allow small and huge TLB entries
2152 * for the same virtual address to be loaded simultaneously. So instead
2153 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2154 * current pmd notpresent (atomically because here the pmd_trans_huge
2155 * must remain set at all times on the pmd until the split is complete
2156 * for this pmd), then we flush the SMP TLB and finally we write the
2157 * non-huge version of the pmd entry with pmd_populate.
2158 */
2159 old_pmd = pmdp_invalidate(vma, haddr, pmd);
2160
2161 pmd_migration = is_pmd_migration_entry(old_pmd);
2162 if (unlikely(pmd_migration)) {
2163 swp_entry_t entry;
2164
2165 entry = pmd_to_swp_entry(old_pmd);
2166 page = pfn_swap_entry_to_page(entry);
2167 write = is_writable_migration_entry(entry);
2168 if (PageAnon(page))
2169 anon_exclusive = is_readable_exclusive_migration_entry(entry);
2170 young = is_migration_entry_young(entry);
2171 dirty = is_migration_entry_dirty(entry);
2172 soft_dirty = pmd_swp_soft_dirty(old_pmd);
2173 uffd_wp = pmd_swp_uffd_wp(old_pmd);
2174 } else {
2175 page = pmd_page(old_pmd);
2176 if (pmd_dirty(old_pmd)) {
2177 dirty = true;
2178 SetPageDirty(page);
2179 }
2180 write = pmd_write(old_pmd);
2181 young = pmd_young(old_pmd);
2182 soft_dirty = pmd_soft_dirty(old_pmd);
2183 uffd_wp = pmd_uffd_wp(old_pmd);
2184
2185 VM_BUG_ON_PAGE(!page_count(page), page);
2186
2187 /*
2188 * Without "freeze", we'll simply split the PMD, propagating the
2189 * PageAnonExclusive() flag for each PTE by setting it for
2190 * each subpage -- no need to (temporarily) clear.
2191 *
2192 * With "freeze" we want to replace mapped pages by
2193 * migration entries right away. This is only possible if we
2194 * managed to clear PageAnonExclusive() -- see
2195 * set_pmd_migration_entry().
2196 *
2197 * In case we cannot clear PageAnonExclusive(), split the PMD
2198 * only and let try_to_migrate_one() fail later.
2199 *
2200 * See page_try_share_anon_rmap(): invalidate PMD first.
2201 */
2202 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2203 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2204 freeze = false;
2205 if (!freeze)
2206 page_ref_add(page, HPAGE_PMD_NR - 1);
2207 }
2208
2209 /*
2210 * Withdraw the table only after we mark the pmd entry invalid.
2211 * This's critical for some architectures (Power).
2212 */
2213 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2214 pmd_populate(mm, &_pmd, pgtable);
2215
2216 for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
2217 pte_t entry, *pte;
2218 /*
2219 * Note that NUMA hinting access restrictions are not
2220 * transferred to avoid any possibility of altering
2221 * permissions across VMAs.
2222 */
2223 if (freeze || pmd_migration) {
2224 swp_entry_t swp_entry;
2225 if (write)
2226 swp_entry = make_writable_migration_entry(
2227 page_to_pfn(page + i));
2228 else if (anon_exclusive)
2229 swp_entry = make_readable_exclusive_migration_entry(
2230 page_to_pfn(page + i));
2231 else
2232 swp_entry = make_readable_migration_entry(
2233 page_to_pfn(page + i));
2234 if (young)
2235 swp_entry = make_migration_entry_young(swp_entry);
2236 if (dirty)
2237 swp_entry = make_migration_entry_dirty(swp_entry);
2238 entry = swp_entry_to_pte(swp_entry);
2239 if (soft_dirty)
2240 entry = pte_swp_mksoft_dirty(entry);
2241 if (uffd_wp)
2242 entry = pte_swp_mkuffd_wp(entry);
2243 } else {
2244 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2245 entry = maybe_mkwrite(entry, vma);
2246 if (anon_exclusive)
2247 SetPageAnonExclusive(page + i);
2248 if (!young)
2249 entry = pte_mkold(entry);
2250 /* NOTE: this may set soft-dirty too on some archs */
2251 if (dirty)
2252 entry = pte_mkdirty(entry);
2253 /*
2254 * NOTE: this needs to happen after pte_mkdirty,
2255 * because some archs (sparc64, loongarch) could
2256 * set hw write bit when mkdirty.
2257 */
2258 if (!write)
2259 entry = pte_wrprotect(entry);
2260 if (soft_dirty)
2261 entry = pte_mksoft_dirty(entry);
2262 if (uffd_wp)
2263 entry = pte_mkuffd_wp(entry);
2264 page_add_anon_rmap(page + i, vma, addr, false);
2265 }
2266 pte = pte_offset_map(&_pmd, addr);
2267 BUG_ON(!pte_none(*pte));
2268 set_pte_at(mm, addr, pte, entry);
2269 pte_unmap(pte);
2270 }
2271
2272 if (!pmd_migration)
2273 page_remove_rmap(page, vma, true);
2274 if (freeze)
2275 put_page(page);
2276
2277 smp_wmb(); /* make pte visible before pmd */
2278 pmd_populate(mm, pmd, pgtable);
2279}
2280
2281void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2282 unsigned long address, bool freeze, struct folio *folio)
2283{
2284 spinlock_t *ptl;
2285 struct mmu_notifier_range range;
2286
2287 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2288 address & HPAGE_PMD_MASK,
2289 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2290 mmu_notifier_invalidate_range_start(&range);
2291 ptl = pmd_lock(vma->vm_mm, pmd);
2292
2293 /*
2294 * If caller asks to setup a migration entry, we need a folio to check
2295 * pmd against. Otherwise we can end up replacing wrong folio.
2296 */
2297 VM_BUG_ON(freeze && !folio);
2298 VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
2299
2300 if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
2301 is_pmd_migration_entry(*pmd)) {
2302 /*
2303 * It's safe to call pmd_page when folio is set because it's
2304 * guaranteed that pmd is present.
2305 */
2306 if (folio && folio != page_folio(pmd_page(*pmd)))
2307 goto out;
2308 __split_huge_pmd_locked(vma, pmd, range.start, freeze);
2309 }
2310
2311out:
2312 spin_unlock(ptl);
2313 /*
2314 * No need to double call mmu_notifier->invalidate_range() callback.
2315 * They are 3 cases to consider inside __split_huge_pmd_locked():
2316 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2317 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2318 * fault will trigger a flush_notify before pointing to a new page
2319 * (it is fine if the secondary mmu keeps pointing to the old zero
2320 * page in the meantime)
2321 * 3) Split a huge pmd into pte pointing to the same page. No need
2322 * to invalidate secondary tlb entry they are all still valid.
2323 * any further changes to individual pte will notify. So no need
2324 * to call mmu_notifier->invalidate_range()
2325 */
2326 mmu_notifier_invalidate_range_only_end(&range);
2327}
2328
2329void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2330 bool freeze, struct folio *folio)
2331{
2332 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
2333
2334 if (!pmd)
2335 return;
2336
2337 __split_huge_pmd(vma, pmd, address, freeze, folio);
2338}
2339
2340static inline void split_huge_pmd_if_needed(struct vm_area_struct *vma, unsigned long address)
2341{
2342 /*
2343 * If the new address isn't hpage aligned and it could previously
2344 * contain an hugepage: check if we need to split an huge pmd.
2345 */
2346 if (!IS_ALIGNED(address, HPAGE_PMD_SIZE) &&
2347 range_in_vma(vma, ALIGN_DOWN(address, HPAGE_PMD_SIZE),
2348 ALIGN(address, HPAGE_PMD_SIZE)))
2349 split_huge_pmd_address(vma, address, false, NULL);
2350}
2351
2352void vma_adjust_trans_huge(struct vm_area_struct *vma,
2353 unsigned long start,
2354 unsigned long end,
2355 long adjust_next)
2356{
2357 /* Check if we need to split start first. */
2358 split_huge_pmd_if_needed(vma, start);
2359
2360 /* Check if we need to split end next. */
2361 split_huge_pmd_if_needed(vma, end);
2362
2363 /*
2364 * If we're also updating the next vma vm_start,
2365 * check if we need to split it.
2366 */
2367 if (adjust_next > 0) {
2368 struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
2369 unsigned long nstart = next->vm_start;
2370 nstart += adjust_next;
2371 split_huge_pmd_if_needed(next, nstart);
2372 }
2373}
2374
2375static void unmap_folio(struct folio *folio)
2376{
2377 enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
2378 TTU_SYNC;
2379
2380 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2381
2382 /*
2383 * Anon pages need migration entries to preserve them, but file
2384 * pages can simply be left unmapped, then faulted back on demand.
2385 * If that is ever changed (perhaps for mlock), update remap_page().
2386 */
2387 if (folio_test_anon(folio))
2388 try_to_migrate(folio, ttu_flags);
2389 else
2390 try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);
2391}
2392
2393static void remap_page(struct folio *folio, unsigned long nr)
2394{
2395 int i = 0;
2396
2397 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2398 if (!folio_test_anon(folio))
2399 return;
2400 for (;;) {
2401 remove_migration_ptes(folio, folio, true);
2402 i += folio_nr_pages(folio);
2403 if (i >= nr)
2404 break;
2405 folio = folio_next(folio);
2406 }
2407}
2408
2409static void lru_add_page_tail(struct page *head, struct page *tail,
2410 struct lruvec *lruvec, struct list_head *list)
2411{
2412 VM_BUG_ON_PAGE(!PageHead(head), head);
2413 VM_BUG_ON_PAGE(PageCompound(tail), head);
2414 VM_BUG_ON_PAGE(PageLRU(tail), head);
2415 lockdep_assert_held(&lruvec->lru_lock);
2416
2417 if (list) {
2418 /* page reclaim is reclaiming a huge page */
2419 VM_WARN_ON(PageLRU(head));
2420 get_page(tail);
2421 list_add_tail(&tail->lru, list);
2422 } else {
2423 /* head is still on lru (and we have it frozen) */
2424 VM_WARN_ON(!PageLRU(head));
2425 if (PageUnevictable(tail))
2426 tail->mlock_count = 0;
2427 else
2428 list_add_tail(&tail->lru, &head->lru);
2429 SetPageLRU(tail);
2430 }
2431}
2432
2433static void __split_huge_page_tail(struct page *head, int tail,
2434 struct lruvec *lruvec, struct list_head *list)
2435{
2436 struct page *page_tail = head + tail;
2437
2438 VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail);
2439
2440 /*
2441 * Clone page flags before unfreezing refcount.
2442 *
2443 * After successful get_page_unless_zero() might follow flags change,
2444 * for example lock_page() which set PG_waiters.
2445 *
2446 * Note that for mapped sub-pages of an anonymous THP,
2447 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2448 * the migration entry instead from where remap_page() will restore it.
2449 * We can still have PG_anon_exclusive set on effectively unmapped and
2450 * unreferenced sub-pages of an anonymous THP: we can simply drop
2451 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2452 */
2453 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
2454 page_tail->flags |= (head->flags &
2455 ((1L << PG_referenced) |
2456 (1L << PG_swapbacked) |
2457 (1L << PG_swapcache) |
2458 (1L << PG_mlocked) |
2459 (1L << PG_uptodate) |
2460 (1L << PG_active) |
2461 (1L << PG_workingset) |
2462 (1L << PG_locked) |
2463 (1L << PG_unevictable) |
2464#ifdef CONFIG_ARCH_USES_PG_ARCH_X
2465 (1L << PG_arch_2) |
2466 (1L << PG_arch_3) |
2467#endif
2468 (1L << PG_dirty) |
2469 LRU_GEN_MASK | LRU_REFS_MASK));
2470
2471 /* ->mapping in first and second tail page is replaced by other uses */
2472 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
2473 page_tail);
2474 page_tail->mapping = head->mapping;
2475 page_tail->index = head->index + tail;
2476
2477 /*
2478 * page->private should not be set in tail pages with the exception
2479 * of swap cache pages that store the swp_entry_t in tail pages.
2480 * Fix up and warn once if private is unexpectedly set.
2481 *
2482 * What of 32-bit systems, on which head[1].compound_pincount overlays
2483 * head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and
2484 * compound_pincount must be 0 for folio_ref_freeze() to have succeeded.
2485 */
2486 if (!folio_test_swapcache(page_folio(head))) {
2487 VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
2488 page_tail->private = 0;
2489 }
2490
2491 /* Page flags must be visible before we make the page non-compound. */
2492 smp_wmb();
2493
2494 /*
2495 * Clear PageTail before unfreezing page refcount.
2496 *
2497 * After successful get_page_unless_zero() might follow put_page()
2498 * which needs correct compound_head().
2499 */
2500 clear_compound_head(page_tail);
2501
2502 /* Finally unfreeze refcount. Additional reference from page cache. */
2503 page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) ||
2504 PageSwapCache(head)));
2505
2506 if (page_is_young(head))
2507 set_page_young(page_tail);
2508 if (page_is_idle(head))
2509 set_page_idle(page_tail);
2510
2511 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
2512
2513 /*
2514 * always add to the tail because some iterators expect new
2515 * pages to show after the currently processed elements - e.g.
2516 * migrate_pages
2517 */
2518 lru_add_page_tail(head, page_tail, lruvec, list);
2519}
2520
2521static void __split_huge_page(struct page *page, struct list_head *list,
2522 pgoff_t end)
2523{
2524 struct folio *folio = page_folio(page);
2525 struct page *head = &folio->page;
2526 struct lruvec *lruvec;
2527 struct address_space *swap_cache = NULL;
2528 unsigned long offset = 0;
2529 unsigned int nr = thp_nr_pages(head);
2530 int i;
2531
2532 /* complete memcg works before add pages to LRU */
2533 split_page_memcg(head, nr);
2534
2535 if (PageAnon(head) && PageSwapCache(head)) {
2536 swp_entry_t entry = { .val = page_private(head) };
2537
2538 offset = swp_offset(entry);
2539 swap_cache = swap_address_space(entry);
2540 xa_lock(&swap_cache->i_pages);
2541 }
2542
2543 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2544 lruvec = folio_lruvec_lock(folio);
2545
2546 ClearPageHasHWPoisoned(head);
2547
2548 for (i = nr - 1; i >= 1; i--) {
2549 __split_huge_page_tail(head, i, lruvec, list);
2550 /* Some pages can be beyond EOF: drop them from page cache */
2551 if (head[i].index >= end) {
2552 struct folio *tail = page_folio(head + i);
2553
2554 if (shmem_mapping(head->mapping))
2555 shmem_uncharge(head->mapping->host, 1);
2556 else if (folio_test_clear_dirty(tail))
2557 folio_account_cleaned(tail,
2558 inode_to_wb(folio->mapping->host));
2559 __filemap_remove_folio(tail, NULL);
2560 folio_put(tail);
2561 } else if (!PageAnon(page)) {
2562 __xa_store(&head->mapping->i_pages, head[i].index,
2563 head + i, 0);
2564 } else if (swap_cache) {
2565 __xa_store(&swap_cache->i_pages, offset + i,
2566 head + i, 0);
2567 }
2568 }
2569
2570 ClearPageCompound(head);
2571 unlock_page_lruvec(lruvec);
2572 /* Caller disabled irqs, so they are still disabled here */
2573
2574 split_page_owner(head, nr);
2575
2576 /* See comment in __split_huge_page_tail() */
2577 if (PageAnon(head)) {
2578 /* Additional pin to swap cache */
2579 if (PageSwapCache(head)) {
2580 page_ref_add(head, 2);
2581 xa_unlock(&swap_cache->i_pages);
2582 } else {
2583 page_ref_inc(head);
2584 }
2585 } else {
2586 /* Additional pin to page cache */
2587 page_ref_add(head, 2);
2588 xa_unlock(&head->mapping->i_pages);
2589 }
2590 local_irq_enable();
2591
2592 remap_page(folio, nr);
2593
2594 if (PageSwapCache(head)) {
2595 swp_entry_t entry = { .val = page_private(head) };
2596
2597 split_swap_cluster(entry);
2598 }
2599
2600 for (i = 0; i < nr; i++) {
2601 struct page *subpage = head + i;
2602 if (subpage == page)
2603 continue;
2604 unlock_page(subpage);
2605
2606 /*
2607 * Subpages may be freed if there wasn't any mapping
2608 * like if add_to_swap() is running on a lru page that
2609 * had its mapping zapped. And freeing these pages
2610 * requires taking the lru_lock so we do the put_page
2611 * of the tail pages after the split is complete.
2612 */
2613 free_page_and_swap_cache(subpage);
2614 }
2615}
2616
2617/* Racy check whether the huge page can be split */
2618bool can_split_folio(struct folio *folio, int *pextra_pins)
2619{
2620 int extra_pins;
2621
2622 /* Additional pins from page cache */
2623 if (folio_test_anon(folio))
2624 extra_pins = folio_test_swapcache(folio) ?
2625 folio_nr_pages(folio) : 0;
2626 else
2627 extra_pins = folio_nr_pages(folio);
2628 if (pextra_pins)
2629 *pextra_pins = extra_pins;
2630 return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 1;
2631}
2632
2633/*
2634 * This function splits huge page into normal pages. @page can point to any
2635 * subpage of huge page to split. Split doesn't change the position of @page.
2636 *
2637 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2638 * The huge page must be locked.
2639 *
2640 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2641 *
2642 * Both head page and tail pages will inherit mapping, flags, and so on from
2643 * the hugepage.
2644 *
2645 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2646 * they are not mapped.
2647 *
2648 * Returns 0 if the hugepage is split successfully.
2649 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2650 * us.
2651 */
2652int split_huge_page_to_list(struct page *page, struct list_head *list)
2653{
2654 struct folio *folio = page_folio(page);
2655 struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page);
2656 XA_STATE(xas, &folio->mapping->i_pages, folio->index);
2657 struct anon_vma *anon_vma = NULL;
2658 struct address_space *mapping = NULL;
2659 int extra_pins, ret;
2660 pgoff_t end;
2661 bool is_hzp;
2662
2663 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
2664 VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
2665
2666 is_hzp = is_huge_zero_page(&folio->page);
2667 VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
2668 if (is_hzp)
2669 return -EBUSY;
2670
2671 if (folio_test_writeback(folio))
2672 return -EBUSY;
2673
2674 if (folio_test_anon(folio)) {
2675 /*
2676 * The caller does not necessarily hold an mmap_lock that would
2677 * prevent the anon_vma disappearing so we first we take a
2678 * reference to it and then lock the anon_vma for write. This
2679 * is similar to folio_lock_anon_vma_read except the write lock
2680 * is taken to serialise against parallel split or collapse
2681 * operations.
2682 */
2683 anon_vma = folio_get_anon_vma(folio);
2684 if (!anon_vma) {
2685 ret = -EBUSY;
2686 goto out;
2687 }
2688 end = -1;
2689 mapping = NULL;
2690 anon_vma_lock_write(anon_vma);
2691 } else {
2692 gfp_t gfp;
2693
2694 mapping = folio->mapping;
2695
2696 /* Truncated ? */
2697 if (!mapping) {
2698 ret = -EBUSY;
2699 goto out;
2700 }
2701
2702 gfp = current_gfp_context(mapping_gfp_mask(mapping) &
2703 GFP_RECLAIM_MASK);
2704
2705 if (folio_test_private(folio) &&
2706 !filemap_release_folio(folio, gfp)) {
2707 ret = -EBUSY;
2708 goto out;
2709 }
2710
2711 xas_split_alloc(&xas, folio, folio_order(folio), gfp);
2712 if (xas_error(&xas)) {
2713 ret = xas_error(&xas);
2714 goto out;
2715 }
2716
2717 anon_vma = NULL;
2718 i_mmap_lock_read(mapping);
2719
2720 /*
2721 *__split_huge_page() may need to trim off pages beyond EOF:
2722 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2723 * which cannot be nested inside the page tree lock. So note
2724 * end now: i_size itself may be changed at any moment, but
2725 * folio lock is good enough to serialize the trimming.
2726 */
2727 end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2728 if (shmem_mapping(mapping))
2729 end = shmem_fallocend(mapping->host, end);
2730 }
2731
2732 /*
2733 * Racy check if we can split the page, before unmap_folio() will
2734 * split PMDs
2735 */
2736 if (!can_split_folio(folio, &extra_pins)) {
2737 ret = -EAGAIN;
2738 goto out_unlock;
2739 }
2740
2741 unmap_folio(folio);
2742
2743 /* block interrupt reentry in xa_lock and spinlock */
2744 local_irq_disable();
2745 if (mapping) {
2746 /*
2747 * Check if the folio is present in page cache.
2748 * We assume all tail are present too, if folio is there.
2749 */
2750 xas_lock(&xas);
2751 xas_reset(&xas);
2752 if (xas_load(&xas) != folio)
2753 goto fail;
2754 }
2755
2756 /* Prevent deferred_split_scan() touching ->_refcount */
2757 spin_lock(&ds_queue->split_queue_lock);
2758 if (folio_ref_freeze(folio, 1 + extra_pins)) {
2759 if (!list_empty(page_deferred_list(&folio->page))) {
2760 ds_queue->split_queue_len--;
2761 list_del(page_deferred_list(&folio->page));
2762 }
2763 spin_unlock(&ds_queue->split_queue_lock);
2764 if (mapping) {
2765 int nr = folio_nr_pages(folio);
2766
2767 xas_split(&xas, folio, folio_order(folio));
2768 if (folio_test_swapbacked(folio)) {
2769 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
2770 -nr);
2771 } else {
2772 __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
2773 -nr);
2774 filemap_nr_thps_dec(mapping);
2775 }
2776 }
2777
2778 __split_huge_page(page, list, end);
2779 ret = 0;
2780 } else {
2781 spin_unlock(&ds_queue->split_queue_lock);
2782fail:
2783 if (mapping)
2784 xas_unlock(&xas);
2785 local_irq_enable();
2786 remap_page(folio, folio_nr_pages(folio));
2787 ret = -EAGAIN;
2788 }
2789
2790out_unlock:
2791 if (anon_vma) {
2792 anon_vma_unlock_write(anon_vma);
2793 put_anon_vma(anon_vma);
2794 }
2795 if (mapping)
2796 i_mmap_unlock_read(mapping);
2797out:
2798 xas_destroy(&xas);
2799 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
2800 return ret;
2801}
2802
2803void free_transhuge_page(struct page *page)
2804{
2805 struct deferred_split *ds_queue = get_deferred_split_queue(page);
2806 unsigned long flags;
2807
2808 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2809 if (!list_empty(page_deferred_list(page))) {
2810 ds_queue->split_queue_len--;
2811 list_del(page_deferred_list(page));
2812 }
2813 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2814 free_compound_page(page);
2815}
2816
2817void deferred_split_huge_page(struct page *page)
2818{
2819 struct deferred_split *ds_queue = get_deferred_split_queue(page);
2820#ifdef CONFIG_MEMCG
2821 struct mem_cgroup *memcg = page_memcg(compound_head(page));
2822#endif
2823 unsigned long flags;
2824
2825 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
2826
2827 /*
2828 * The try_to_unmap() in page reclaim path might reach here too,
2829 * this may cause a race condition to corrupt deferred split queue.
2830 * And, if page reclaim is already handling the same page, it is
2831 * unnecessary to handle it again in shrinker.
2832 *
2833 * Check PageSwapCache to determine if the page is being
2834 * handled by page reclaim since THP swap would add the page into
2835 * swap cache before calling try_to_unmap().
2836 */
2837 if (PageSwapCache(page))
2838 return;
2839
2840 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2841 if (list_empty(page_deferred_list(page))) {
2842 count_vm_event(THP_DEFERRED_SPLIT_PAGE);
2843 list_add_tail(page_deferred_list(page), &ds_queue->split_queue);
2844 ds_queue->split_queue_len++;
2845#ifdef CONFIG_MEMCG
2846 if (memcg)
2847 set_shrinker_bit(memcg, page_to_nid(page),
2848 deferred_split_shrinker.id);
2849#endif
2850 }
2851 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2852}
2853
2854static unsigned long deferred_split_count(struct shrinker *shrink,
2855 struct shrink_control *sc)
2856{
2857 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2858 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2859
2860#ifdef CONFIG_MEMCG
2861 if (sc->memcg)
2862 ds_queue = &sc->memcg->deferred_split_queue;
2863#endif
2864 return READ_ONCE(ds_queue->split_queue_len);
2865}
2866
2867static unsigned long deferred_split_scan(struct shrinker *shrink,
2868 struct shrink_control *sc)
2869{
2870 struct pglist_data *pgdata = NODE_DATA(sc->nid);
2871 struct deferred_split *ds_queue = &pgdata->deferred_split_queue;
2872 unsigned long flags;
2873 LIST_HEAD(list), *pos, *next;
2874 struct page *page;
2875 int split = 0;
2876
2877#ifdef CONFIG_MEMCG
2878 if (sc->memcg)
2879 ds_queue = &sc->memcg->deferred_split_queue;
2880#endif
2881
2882 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2883 /* Take pin on all head pages to avoid freeing them under us */
2884 list_for_each_safe(pos, next, &ds_queue->split_queue) {
2885 page = list_entry((void *)pos, struct page, deferred_list);
2886 page = compound_head(page);
2887 if (get_page_unless_zero(page)) {
2888 list_move(page_deferred_list(page), &list);
2889 } else {
2890 /* We lost race with put_compound_page() */
2891 list_del_init(page_deferred_list(page));
2892 ds_queue->split_queue_len--;
2893 }
2894 if (!--sc->nr_to_scan)
2895 break;
2896 }
2897 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2898
2899 list_for_each_safe(pos, next, &list) {
2900 page = list_entry((void *)pos, struct page, deferred_list);
2901 if (!trylock_page(page))
2902 goto next;
2903 /* split_huge_page() removes page from list on success */
2904 if (!split_huge_page(page))
2905 split++;
2906 unlock_page(page);
2907next:
2908 put_page(page);
2909 }
2910
2911 spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
2912 list_splice_tail(&list, &ds_queue->split_queue);
2913 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
2914
2915 /*
2916 * Stop shrinker if we didn't split any page, but the queue is empty.
2917 * This can happen if pages were freed under us.
2918 */
2919 if (!split && list_empty(&ds_queue->split_queue))
2920 return SHRINK_STOP;
2921 return split;
2922}
2923
2924static struct shrinker deferred_split_shrinker = {
2925 .count_objects = deferred_split_count,
2926 .scan_objects = deferred_split_scan,
2927 .seeks = DEFAULT_SEEKS,
2928 .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
2929 SHRINKER_NONSLAB,
2930};
2931
2932#ifdef CONFIG_DEBUG_FS
2933static void split_huge_pages_all(void)
2934{
2935 struct zone *zone;
2936 struct page *page;
2937 unsigned long pfn, max_zone_pfn;
2938 unsigned long total = 0, split = 0;
2939
2940 pr_debug("Split all THPs\n");
2941 for_each_zone(zone) {
2942 if (!managed_zone(zone))
2943 continue;
2944 max_zone_pfn = zone_end_pfn(zone);
2945 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
2946 int nr_pages;
2947
2948 page = pfn_to_online_page(pfn);
2949 if (!page || !get_page_unless_zero(page))
2950 continue;
2951
2952 if (zone != page_zone(page))
2953 goto next;
2954
2955 if (!PageHead(page) || PageHuge(page) || !PageLRU(page))
2956 goto next;
2957
2958 total++;
2959 lock_page(page);
2960 nr_pages = thp_nr_pages(page);
2961 if (!split_huge_page(page))
2962 split++;
2963 pfn += nr_pages - 1;
2964 unlock_page(page);
2965next:
2966 put_page(page);
2967 cond_resched();
2968 }
2969 }
2970
2971 pr_debug("%lu of %lu THP split\n", split, total);
2972}
2973
2974static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma)
2975{
2976 return vma_is_special_huge(vma) || (vma->vm_flags & VM_IO) ||
2977 is_vm_hugetlb_page(vma);
2978}
2979
2980static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
2981 unsigned long vaddr_end)
2982{
2983 int ret = 0;
2984 struct task_struct *task;
2985 struct mm_struct *mm;
2986 unsigned long total = 0, split = 0;
2987 unsigned long addr;
2988
2989 vaddr_start &= PAGE_MASK;
2990 vaddr_end &= PAGE_MASK;
2991
2992 /* Find the task_struct from pid */
2993 rcu_read_lock();
2994 task = find_task_by_vpid(pid);
2995 if (!task) {
2996 rcu_read_unlock();
2997 ret = -ESRCH;
2998 goto out;
2999 }
3000 get_task_struct(task);
3001 rcu_read_unlock();
3002
3003 /* Find the mm_struct */
3004 mm = get_task_mm(task);
3005 put_task_struct(task);
3006
3007 if (!mm) {
3008 ret = -EINVAL;
3009 goto out;
3010 }
3011
3012 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3013 pid, vaddr_start, vaddr_end);
3014
3015 mmap_read_lock(mm);
3016 /*
3017 * always increase addr by PAGE_SIZE, since we could have a PTE page
3018 * table filled with PTE-mapped THPs, each of which is distinct.
3019 */
3020 for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
3021 struct vm_area_struct *vma = vma_lookup(mm, addr);
3022 struct page *page;
3023
3024 if (!vma)
3025 break;
3026
3027 /* skip special VMA and hugetlb VMA */
3028 if (vma_not_suitable_for_thp_split(vma)) {
3029 addr = vma->vm_end;
3030 continue;
3031 }
3032
3033 /* FOLL_DUMP to ignore special (like zero) pages */
3034 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3035
3036 if (IS_ERR_OR_NULL(page))
3037 continue;
3038
3039 if (!is_transparent_hugepage(page))
3040 goto next;
3041
3042 total++;
3043 if (!can_split_folio(page_folio(page), NULL))
3044 goto next;
3045
3046 if (!trylock_page(page))
3047 goto next;
3048
3049 if (!split_huge_page(page))
3050 split++;
3051
3052 unlock_page(page);
3053next:
3054 put_page(page);
3055 cond_resched();
3056 }
3057 mmap_read_unlock(mm);
3058 mmput(mm);
3059
3060 pr_debug("%lu of %lu THP split\n", split, total);
3061
3062out:
3063 return ret;
3064}
3065
3066static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
3067 pgoff_t off_end)
3068{
3069 struct filename *file;
3070 struct file *candidate;
3071 struct address_space *mapping;
3072 int ret = -EINVAL;
3073 pgoff_t index;
3074 int nr_pages = 1;
3075 unsigned long total = 0, split = 0;
3076
3077 file = getname_kernel(file_path);
3078 if (IS_ERR(file))
3079 return ret;
3080
3081 candidate = file_open_name(file, O_RDONLY, 0);
3082 if (IS_ERR(candidate))
3083 goto out;
3084
3085 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3086 file_path, off_start, off_end);
3087
3088 mapping = candidate->f_mapping;
3089
3090 for (index = off_start; index < off_end; index += nr_pages) {
3091 struct folio *folio = __filemap_get_folio(mapping, index,
3092 FGP_ENTRY, 0);
3093
3094 nr_pages = 1;
3095 if (xa_is_value(folio) || !folio)
3096 continue;
3097
3098 if (!folio_test_large(folio))
3099 goto next;
3100
3101 total++;
3102 nr_pages = folio_nr_pages(folio);
3103
3104 if (!folio_trylock(folio))
3105 goto next;
3106
3107 if (!split_folio(folio))
3108 split++;
3109
3110 folio_unlock(folio);
3111next:
3112 folio_put(folio);
3113 cond_resched();
3114 }
3115
3116 filp_close(candidate, NULL);
3117 ret = 0;
3118
3119 pr_debug("%lu of %lu file-backed THP split\n", split, total);
3120out:
3121 putname(file);
3122 return ret;
3123}
3124
3125#define MAX_INPUT_BUF_SZ 255
3126
3127static ssize_t split_huge_pages_write(struct file *file, const char __user *buf,
3128 size_t count, loff_t *ppops)
3129{
3130 static DEFINE_MUTEX(split_debug_mutex);
3131 ssize_t ret;
3132 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3133 char input_buf[MAX_INPUT_BUF_SZ];
3134 int pid;
3135 unsigned long vaddr_start, vaddr_end;
3136
3137 ret = mutex_lock_interruptible(&split_debug_mutex);
3138 if (ret)
3139 return ret;
3140
3141 ret = -EFAULT;
3142
3143 memset(input_buf, 0, MAX_INPUT_BUF_SZ);
3144 if (copy_from_user(input_buf, buf, min_t(size_t, count, MAX_INPUT_BUF_SZ)))
3145 goto out;
3146
3147 input_buf[MAX_INPUT_BUF_SZ - 1] = '\0';
3148
3149 if (input_buf[0] == '/') {
3150 char *tok;
3151 char *buf = input_buf;
3152 char file_path[MAX_INPUT_BUF_SZ];
3153 pgoff_t off_start = 0, off_end = 0;
3154 size_t input_len = strlen(input_buf);
3155
3156 tok = strsep(&buf, ",");
3157 if (tok) {
3158 strcpy(file_path, tok);
3159 } else {
3160 ret = -EINVAL;
3161 goto out;
3162 }
3163
3164 ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end);
3165 if (ret != 2) {
3166 ret = -EINVAL;
3167 goto out;
3168 }
3169 ret = split_huge_pages_in_file(file_path, off_start, off_end);
3170 if (!ret)
3171 ret = input_len;
3172
3173 goto out;
3174 }
3175
3176 ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end);
3177 if (ret == 1 && pid == 1) {
3178 split_huge_pages_all();
3179 ret = strlen(input_buf);
3180 goto out;
3181 } else if (ret != 3) {
3182 ret = -EINVAL;
3183 goto out;
3184 }
3185
3186 ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end);
3187 if (!ret)
3188 ret = strlen(input_buf);
3189out:
3190 mutex_unlock(&split_debug_mutex);
3191 return ret;
3192
3193}
3194
3195static const struct file_operations split_huge_pages_fops = {
3196 .owner = THIS_MODULE,
3197 .write = split_huge_pages_write,
3198 .llseek = no_llseek,
3199};
3200
3201static int __init split_huge_pages_debugfs(void)
3202{
3203 debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
3204 &split_huge_pages_fops);
3205 return 0;
3206}
3207late_initcall(split_huge_pages_debugfs);
3208#endif
3209
3210#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3211int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
3212 struct page *page)
3213{
3214 struct vm_area_struct *vma = pvmw->vma;
3215 struct mm_struct *mm = vma->vm_mm;
3216 unsigned long address = pvmw->address;
3217 bool anon_exclusive;
3218 pmd_t pmdval;
3219 swp_entry_t entry;
3220 pmd_t pmdswp;
3221
3222 if (!(pvmw->pmd && !pvmw->pte))
3223 return 0;
3224
3225 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
3226 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
3227
3228 /* See page_try_share_anon_rmap(): invalidate PMD first. */
3229 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3230 if (anon_exclusive && page_try_share_anon_rmap(page)) {
3231 set_pmd_at(mm, address, pvmw->pmd, pmdval);
3232 return -EBUSY;
3233 }
3234
3235 if (pmd_dirty(pmdval))
3236 set_page_dirty(page);
3237 if (pmd_write(pmdval))
3238 entry = make_writable_migration_entry(page_to_pfn(page));
3239 else if (anon_exclusive)
3240 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3241 else
3242 entry = make_readable_migration_entry(page_to_pfn(page));
3243 if (pmd_young(pmdval))
3244 entry = make_migration_entry_young(entry);
3245 if (pmd_dirty(pmdval))
3246 entry = make_migration_entry_dirty(entry);
3247 pmdswp = swp_entry_to_pmd(entry);
3248 if (pmd_soft_dirty(pmdval))
3249 pmdswp = pmd_swp_mksoft_dirty(pmdswp);
3250 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
3251 page_remove_rmap(page, vma, true);
3252 put_page(page);
3253 trace_set_migration_pmd(address, pmd_val(pmdswp));
3254
3255 return 0;
3256}
3257
3258void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
3259{
3260 struct vm_area_struct *vma = pvmw->vma;
3261 struct mm_struct *mm = vma->vm_mm;
3262 unsigned long address = pvmw->address;
3263 unsigned long haddr = address & HPAGE_PMD_MASK;
3264 pmd_t pmde;
3265 swp_entry_t entry;
3266
3267 if (!(pvmw->pmd && !pvmw->pte))
3268 return;
3269
3270 entry = pmd_to_swp_entry(*pvmw->pmd);
3271 get_page(new);
3272 pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
3273 if (pmd_swp_soft_dirty(*pvmw->pmd))
3274 pmde = pmd_mksoft_dirty(pmde);
3275 if (pmd_swp_uffd_wp(*pvmw->pmd))
3276 pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
3277 if (!is_migration_entry_young(entry))
3278 pmde = pmd_mkold(pmde);
3279 /* NOTE: this may contain setting soft-dirty on some archs */
3280 if (PageDirty(new) && is_migration_entry_dirty(entry))
3281 pmde = pmd_mkdirty(pmde);
3282 if (is_writable_migration_entry(entry))
3283 pmde = maybe_pmd_mkwrite(pmde, vma);
3284 else
3285 pmde = pmd_wrprotect(pmde);
3286
3287 if (PageAnon(new)) {
3288 rmap_t rmap_flags = RMAP_COMPOUND;
3289
3290 if (!is_readable_migration_entry(entry))
3291 rmap_flags |= RMAP_EXCLUSIVE;
3292
3293 page_add_anon_rmap(new, vma, haddr, rmap_flags);
3294 } else {
3295 page_add_file_rmap(new, vma, true);
3296 }
3297 VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new));
3298 set_pmd_at(mm, haddr, pvmw->pmd, pmde);
3299
3300 /* No need to invalidate - it was non-present before */
3301 update_mmu_cache_pmd(vma, address, pvmw->pmd);
3302 trace_remove_migration_pmd(address, pmd_val(pmde));
3303}
3304#endif