Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16#include <linux/migrate.h>
17#include <linux/export.h>
18#include <linux/swap.h>
19#include <linux/swapops.h>
20#include <linux/pagemap.h>
21#include <linux/buffer_head.h>
22#include <linux/mm_inline.h>
23#include <linux/nsproxy.h>
24#include <linux/pagevec.h>
25#include <linux/ksm.h>
26#include <linux/rmap.h>
27#include <linux/topology.h>
28#include <linux/cpu.h>
29#include <linux/cpuset.h>
30#include <linux/writeback.h>
31#include <linux/mempolicy.h>
32#include <linux/vmalloc.h>
33#include <linux/security.h>
34#include <linux/backing-dev.h>
35#include <linux/compaction.h>
36#include <linux/syscalls.h>
37#include <linux/compat.h>
38#include <linux/hugetlb.h>
39#include <linux/hugetlb_cgroup.h>
40#include <linux/gfp.h>
41#include <linux/pagewalk.h>
42#include <linux/pfn_t.h>
43#include <linux/memremap.h>
44#include <linux/userfaultfd_k.h>
45#include <linux/balloon_compaction.h>
46#include <linux/mmu_notifier.h>
47#include <linux/page_idle.h>
48#include <linux/page_owner.h>
49#include <linux/sched/mm.h>
50#include <linux/ptrace.h>
51#include <linux/oom.h>
52
53#include <asm/tlbflush.h>
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/migrate.h>
57
58#include "internal.h"
59
60/*
61 * migrate_prep() needs to be called before we start compiling a list of pages
62 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
63 * undesirable, use migrate_prep_local()
64 */
65int migrate_prep(void)
66{
67 /*
68 * Clear the LRU lists so pages can be isolated.
69 * Note that pages may be moved off the LRU after we have
70 * drained them. Those pages will fail to migrate like other
71 * pages that may be busy.
72 */
73 lru_add_drain_all();
74
75 return 0;
76}
77
78/* Do the necessary work of migrate_prep but not if it involves other CPUs */
79int migrate_prep_local(void)
80{
81 lru_add_drain();
82
83 return 0;
84}
85
86int isolate_movable_page(struct page *page, isolate_mode_t mode)
87{
88 struct address_space *mapping;
89
90 /*
91 * Avoid burning cycles with pages that are yet under __free_pages(),
92 * or just got freed under us.
93 *
94 * In case we 'win' a race for a movable page being freed under us and
95 * raise its refcount preventing __free_pages() from doing its job
96 * the put_page() at the end of this block will take care of
97 * release this page, thus avoiding a nasty leakage.
98 */
99 if (unlikely(!get_page_unless_zero(page)))
100 goto out;
101
102 /*
103 * Check PageMovable before holding a PG_lock because page's owner
104 * assumes anybody doesn't touch PG_lock of newly allocated page
105 * so unconditionally grabbing the lock ruins page's owner side.
106 */
107 if (unlikely(!__PageMovable(page)))
108 goto out_putpage;
109 /*
110 * As movable pages are not isolated from LRU lists, concurrent
111 * compaction threads can race against page migration functions
112 * as well as race against the releasing a page.
113 *
114 * In order to avoid having an already isolated movable page
115 * being (wrongly) re-isolated while it is under migration,
116 * or to avoid attempting to isolate pages being released,
117 * lets be sure we have the page lock
118 * before proceeding with the movable page isolation steps.
119 */
120 if (unlikely(!trylock_page(page)))
121 goto out_putpage;
122
123 if (!PageMovable(page) || PageIsolated(page))
124 goto out_no_isolated;
125
126 mapping = page_mapping(page);
127 VM_BUG_ON_PAGE(!mapping, page);
128
129 if (!mapping->a_ops->isolate_page(page, mode))
130 goto out_no_isolated;
131
132 /* Driver shouldn't use PG_isolated bit of page->flags */
133 WARN_ON_ONCE(PageIsolated(page));
134 __SetPageIsolated(page);
135 unlock_page(page);
136
137 return 0;
138
139out_no_isolated:
140 unlock_page(page);
141out_putpage:
142 put_page(page);
143out:
144 return -EBUSY;
145}
146
147/* It should be called on page which is PG_movable */
148void putback_movable_page(struct page *page)
149{
150 struct address_space *mapping;
151
152 VM_BUG_ON_PAGE(!PageLocked(page), page);
153 VM_BUG_ON_PAGE(!PageMovable(page), page);
154 VM_BUG_ON_PAGE(!PageIsolated(page), page);
155
156 mapping = page_mapping(page);
157 mapping->a_ops->putback_page(page);
158 __ClearPageIsolated(page);
159}
160
161/*
162 * Put previously isolated pages back onto the appropriate lists
163 * from where they were once taken off for compaction/migration.
164 *
165 * This function shall be used whenever the isolated pageset has been
166 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
167 * and isolate_huge_page().
168 */
169void putback_movable_pages(struct list_head *l)
170{
171 struct page *page;
172 struct page *page2;
173
174 list_for_each_entry_safe(page, page2, l, lru) {
175 if (unlikely(PageHuge(page))) {
176 putback_active_hugepage(page);
177 continue;
178 }
179 list_del(&page->lru);
180 /*
181 * We isolated non-lru movable page so here we can use
182 * __PageMovable because LRU page's mapping cannot have
183 * PAGE_MAPPING_MOVABLE.
184 */
185 if (unlikely(__PageMovable(page))) {
186 VM_BUG_ON_PAGE(!PageIsolated(page), page);
187 lock_page(page);
188 if (PageMovable(page))
189 putback_movable_page(page);
190 else
191 __ClearPageIsolated(page);
192 unlock_page(page);
193 put_page(page);
194 } else {
195 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
196 page_is_file_lru(page), -thp_nr_pages(page));
197 putback_lru_page(page);
198 }
199 }
200}
201
202/*
203 * Restore a potential migration pte to a working pte entry
204 */
205static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
206 unsigned long addr, void *old)
207{
208 struct page_vma_mapped_walk pvmw = {
209 .page = old,
210 .vma = vma,
211 .address = addr,
212 .flags = PVMW_SYNC | PVMW_MIGRATION,
213 };
214 struct page *new;
215 pte_t pte;
216 swp_entry_t entry;
217
218 VM_BUG_ON_PAGE(PageTail(page), page);
219 while (page_vma_mapped_walk(&pvmw)) {
220 if (PageKsm(page))
221 new = page;
222 else
223 new = page - pvmw.page->index +
224 linear_page_index(vma, pvmw.address);
225
226#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
227 /* PMD-mapped THP migration entry */
228 if (!pvmw.pte) {
229 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
230 remove_migration_pmd(&pvmw, new);
231 continue;
232 }
233#endif
234
235 get_page(new);
236 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
237 if (pte_swp_soft_dirty(*pvmw.pte))
238 pte = pte_mksoft_dirty(pte);
239
240 /*
241 * Recheck VMA as permissions can change since migration started
242 */
243 entry = pte_to_swp_entry(*pvmw.pte);
244 if (is_write_migration_entry(entry))
245 pte = maybe_mkwrite(pte, vma);
246 else if (pte_swp_uffd_wp(*pvmw.pte))
247 pte = pte_mkuffd_wp(pte);
248
249 if (unlikely(is_device_private_page(new))) {
250 entry = make_device_private_entry(new, pte_write(pte));
251 pte = swp_entry_to_pte(entry);
252 if (pte_swp_soft_dirty(*pvmw.pte))
253 pte = pte_swp_mksoft_dirty(pte);
254 if (pte_swp_uffd_wp(*pvmw.pte))
255 pte = pte_swp_mkuffd_wp(pte);
256 }
257
258#ifdef CONFIG_HUGETLB_PAGE
259 if (PageHuge(new)) {
260 pte = pte_mkhuge(pte);
261 pte = arch_make_huge_pte(pte, vma, new, 0);
262 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
263 if (PageAnon(new))
264 hugepage_add_anon_rmap(new, vma, pvmw.address);
265 else
266 page_dup_rmap(new, true);
267 } else
268#endif
269 {
270 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
271
272 if (PageAnon(new))
273 page_add_anon_rmap(new, vma, pvmw.address, false);
274 else
275 page_add_file_rmap(new, false);
276 }
277 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
278 mlock_vma_page(new);
279
280 if (PageTransHuge(page) && PageMlocked(page))
281 clear_page_mlock(page);
282
283 /* No need to invalidate - it was non-present before */
284 update_mmu_cache(vma, pvmw.address, pvmw.pte);
285 }
286
287 return true;
288}
289
290/*
291 * Get rid of all migration entries and replace them by
292 * references to the indicated page.
293 */
294void remove_migration_ptes(struct page *old, struct page *new, bool locked)
295{
296 struct rmap_walk_control rwc = {
297 .rmap_one = remove_migration_pte,
298 .arg = old,
299 };
300
301 if (locked)
302 rmap_walk_locked(new, &rwc);
303 else
304 rmap_walk(new, &rwc);
305}
306
307/*
308 * Something used the pte of a page under migration. We need to
309 * get to the page and wait until migration is finished.
310 * When we return from this function the fault will be retried.
311 */
312void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
313 spinlock_t *ptl)
314{
315 pte_t pte;
316 swp_entry_t entry;
317 struct page *page;
318
319 spin_lock(ptl);
320 pte = *ptep;
321 if (!is_swap_pte(pte))
322 goto out;
323
324 entry = pte_to_swp_entry(pte);
325 if (!is_migration_entry(entry))
326 goto out;
327
328 page = migration_entry_to_page(entry);
329
330 /*
331 * Once page cache replacement of page migration started, page_count
332 * is zero; but we must not call put_and_wait_on_page_locked() without
333 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
334 */
335 if (!get_page_unless_zero(page))
336 goto out;
337 pte_unmap_unlock(ptep, ptl);
338 put_and_wait_on_page_locked(page);
339 return;
340out:
341 pte_unmap_unlock(ptep, ptl);
342}
343
344void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
345 unsigned long address)
346{
347 spinlock_t *ptl = pte_lockptr(mm, pmd);
348 pte_t *ptep = pte_offset_map(pmd, address);
349 __migration_entry_wait(mm, ptep, ptl);
350}
351
352void migration_entry_wait_huge(struct vm_area_struct *vma,
353 struct mm_struct *mm, pte_t *pte)
354{
355 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
356 __migration_entry_wait(mm, pte, ptl);
357}
358
359#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
360void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
361{
362 spinlock_t *ptl;
363 struct page *page;
364
365 ptl = pmd_lock(mm, pmd);
366 if (!is_pmd_migration_entry(*pmd))
367 goto unlock;
368 page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
369 if (!get_page_unless_zero(page))
370 goto unlock;
371 spin_unlock(ptl);
372 put_and_wait_on_page_locked(page);
373 return;
374unlock:
375 spin_unlock(ptl);
376}
377#endif
378
379static int expected_page_refs(struct address_space *mapping, struct page *page)
380{
381 int expected_count = 1;
382
383 /*
384 * Device public or private pages have an extra refcount as they are
385 * ZONE_DEVICE pages.
386 */
387 expected_count += is_device_private_page(page);
388 if (mapping)
389 expected_count += thp_nr_pages(page) + page_has_private(page);
390
391 return expected_count;
392}
393
394/*
395 * Replace the page in the mapping.
396 *
397 * The number of remaining references must be:
398 * 1 for anonymous pages without a mapping
399 * 2 for pages with a mapping
400 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
401 */
402int migrate_page_move_mapping(struct address_space *mapping,
403 struct page *newpage, struct page *page, int extra_count)
404{
405 XA_STATE(xas, &mapping->i_pages, page_index(page));
406 struct zone *oldzone, *newzone;
407 int dirty;
408 int expected_count = expected_page_refs(mapping, page) + extra_count;
409
410 if (!mapping) {
411 /* Anonymous page without mapping */
412 if (page_count(page) != expected_count)
413 return -EAGAIN;
414
415 /* No turning back from here */
416 newpage->index = page->index;
417 newpage->mapping = page->mapping;
418 if (PageSwapBacked(page))
419 __SetPageSwapBacked(newpage);
420
421 return MIGRATEPAGE_SUCCESS;
422 }
423
424 oldzone = page_zone(page);
425 newzone = page_zone(newpage);
426
427 xas_lock_irq(&xas);
428 if (page_count(page) != expected_count || xas_load(&xas) != page) {
429 xas_unlock_irq(&xas);
430 return -EAGAIN;
431 }
432
433 if (!page_ref_freeze(page, expected_count)) {
434 xas_unlock_irq(&xas);
435 return -EAGAIN;
436 }
437
438 /*
439 * Now we know that no one else is looking at the page:
440 * no turning back from here.
441 */
442 newpage->index = page->index;
443 newpage->mapping = page->mapping;
444 page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
445 if (PageSwapBacked(page)) {
446 __SetPageSwapBacked(newpage);
447 if (PageSwapCache(page)) {
448 SetPageSwapCache(newpage);
449 set_page_private(newpage, page_private(page));
450 }
451 } else {
452 VM_BUG_ON_PAGE(PageSwapCache(page), page);
453 }
454
455 /* Move dirty while page refs frozen and newpage not yet exposed */
456 dirty = PageDirty(page);
457 if (dirty) {
458 ClearPageDirty(page);
459 SetPageDirty(newpage);
460 }
461
462 xas_store(&xas, newpage);
463 if (PageTransHuge(page)) {
464 int i;
465
466 for (i = 1; i < HPAGE_PMD_NR; i++) {
467 xas_next(&xas);
468 xas_store(&xas, newpage);
469 }
470 }
471
472 /*
473 * Drop cache reference from old page by unfreezing
474 * to one less reference.
475 * We know this isn't the last reference.
476 */
477 page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
478
479 xas_unlock(&xas);
480 /* Leave irq disabled to prevent preemption while updating stats */
481
482 /*
483 * If moved to a different zone then also account
484 * the page for that zone. Other VM counters will be
485 * taken care of when we establish references to the
486 * new page and drop references to the old page.
487 *
488 * Note that anonymous pages are accounted for
489 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
490 * are mapped to swap space.
491 */
492 if (newzone != oldzone) {
493 struct lruvec *old_lruvec, *new_lruvec;
494 struct mem_cgroup *memcg;
495
496 memcg = page_memcg(page);
497 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
498 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
499
500 __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
501 __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
502 if (PageSwapBacked(page) && !PageSwapCache(page)) {
503 __dec_lruvec_state(old_lruvec, NR_SHMEM);
504 __inc_lruvec_state(new_lruvec, NR_SHMEM);
505 }
506 if (dirty && mapping_cap_account_dirty(mapping)) {
507 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
508 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
509 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
510 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
511 }
512 }
513 local_irq_enable();
514
515 return MIGRATEPAGE_SUCCESS;
516}
517EXPORT_SYMBOL(migrate_page_move_mapping);
518
519/*
520 * The expected number of remaining references is the same as that
521 * of migrate_page_move_mapping().
522 */
523int migrate_huge_page_move_mapping(struct address_space *mapping,
524 struct page *newpage, struct page *page)
525{
526 XA_STATE(xas, &mapping->i_pages, page_index(page));
527 int expected_count;
528
529 xas_lock_irq(&xas);
530 expected_count = 2 + page_has_private(page);
531 if (page_count(page) != expected_count || xas_load(&xas) != page) {
532 xas_unlock_irq(&xas);
533 return -EAGAIN;
534 }
535
536 if (!page_ref_freeze(page, expected_count)) {
537 xas_unlock_irq(&xas);
538 return -EAGAIN;
539 }
540
541 newpage->index = page->index;
542 newpage->mapping = page->mapping;
543
544 get_page(newpage);
545
546 xas_store(&xas, newpage);
547
548 page_ref_unfreeze(page, expected_count - 1);
549
550 xas_unlock_irq(&xas);
551
552 return MIGRATEPAGE_SUCCESS;
553}
554
555/*
556 * Gigantic pages are so large that we do not guarantee that page++ pointer
557 * arithmetic will work across the entire page. We need something more
558 * specialized.
559 */
560static void __copy_gigantic_page(struct page *dst, struct page *src,
561 int nr_pages)
562{
563 int i;
564 struct page *dst_base = dst;
565 struct page *src_base = src;
566
567 for (i = 0; i < nr_pages; ) {
568 cond_resched();
569 copy_highpage(dst, src);
570
571 i++;
572 dst = mem_map_next(dst, dst_base, i);
573 src = mem_map_next(src, src_base, i);
574 }
575}
576
577static void copy_huge_page(struct page *dst, struct page *src)
578{
579 int i;
580 int nr_pages;
581
582 if (PageHuge(src)) {
583 /* hugetlbfs page */
584 struct hstate *h = page_hstate(src);
585 nr_pages = pages_per_huge_page(h);
586
587 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
588 __copy_gigantic_page(dst, src, nr_pages);
589 return;
590 }
591 } else {
592 /* thp page */
593 BUG_ON(!PageTransHuge(src));
594 nr_pages = thp_nr_pages(src);
595 }
596
597 for (i = 0; i < nr_pages; i++) {
598 cond_resched();
599 copy_highpage(dst + i, src + i);
600 }
601}
602
603/*
604 * Copy the page to its new location
605 */
606void migrate_page_states(struct page *newpage, struct page *page)
607{
608 int cpupid;
609
610 if (PageError(page))
611 SetPageError(newpage);
612 if (PageReferenced(page))
613 SetPageReferenced(newpage);
614 if (PageUptodate(page))
615 SetPageUptodate(newpage);
616 if (TestClearPageActive(page)) {
617 VM_BUG_ON_PAGE(PageUnevictable(page), page);
618 SetPageActive(newpage);
619 } else if (TestClearPageUnevictable(page))
620 SetPageUnevictable(newpage);
621 if (PageWorkingset(page))
622 SetPageWorkingset(newpage);
623 if (PageChecked(page))
624 SetPageChecked(newpage);
625 if (PageMappedToDisk(page))
626 SetPageMappedToDisk(newpage);
627
628 /* Move dirty on pages not done by migrate_page_move_mapping() */
629 if (PageDirty(page))
630 SetPageDirty(newpage);
631
632 if (page_is_young(page))
633 set_page_young(newpage);
634 if (page_is_idle(page))
635 set_page_idle(newpage);
636
637 /*
638 * Copy NUMA information to the new page, to prevent over-eager
639 * future migrations of this same page.
640 */
641 cpupid = page_cpupid_xchg_last(page, -1);
642 page_cpupid_xchg_last(newpage, cpupid);
643
644 ksm_migrate_page(newpage, page);
645 /*
646 * Please do not reorder this without considering how mm/ksm.c's
647 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
648 */
649 if (PageSwapCache(page))
650 ClearPageSwapCache(page);
651 ClearPagePrivate(page);
652 set_page_private(page, 0);
653
654 /*
655 * If any waiters have accumulated on the new page then
656 * wake them up.
657 */
658 if (PageWriteback(newpage))
659 end_page_writeback(newpage);
660
661 /*
662 * PG_readahead shares the same bit with PG_reclaim. The above
663 * end_page_writeback() may clear PG_readahead mistakenly, so set the
664 * bit after that.
665 */
666 if (PageReadahead(page))
667 SetPageReadahead(newpage);
668
669 copy_page_owner(page, newpage);
670
671 if (!PageHuge(page))
672 mem_cgroup_migrate(page, newpage);
673}
674EXPORT_SYMBOL(migrate_page_states);
675
676void migrate_page_copy(struct page *newpage, struct page *page)
677{
678 if (PageHuge(page) || PageTransHuge(page))
679 copy_huge_page(newpage, page);
680 else
681 copy_highpage(newpage, page);
682
683 migrate_page_states(newpage, page);
684}
685EXPORT_SYMBOL(migrate_page_copy);
686
687/************************************************************
688 * Migration functions
689 ***********************************************************/
690
691/*
692 * Common logic to directly migrate a single LRU page suitable for
693 * pages that do not use PagePrivate/PagePrivate2.
694 *
695 * Pages are locked upon entry and exit.
696 */
697int migrate_page(struct address_space *mapping,
698 struct page *newpage, struct page *page,
699 enum migrate_mode mode)
700{
701 int rc;
702
703 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
704
705 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
706
707 if (rc != MIGRATEPAGE_SUCCESS)
708 return rc;
709
710 if (mode != MIGRATE_SYNC_NO_COPY)
711 migrate_page_copy(newpage, page);
712 else
713 migrate_page_states(newpage, page);
714 return MIGRATEPAGE_SUCCESS;
715}
716EXPORT_SYMBOL(migrate_page);
717
718#ifdef CONFIG_BLOCK
719/* Returns true if all buffers are successfully locked */
720static bool buffer_migrate_lock_buffers(struct buffer_head *head,
721 enum migrate_mode mode)
722{
723 struct buffer_head *bh = head;
724
725 /* Simple case, sync compaction */
726 if (mode != MIGRATE_ASYNC) {
727 do {
728 lock_buffer(bh);
729 bh = bh->b_this_page;
730
731 } while (bh != head);
732
733 return true;
734 }
735
736 /* async case, we cannot block on lock_buffer so use trylock_buffer */
737 do {
738 if (!trylock_buffer(bh)) {
739 /*
740 * We failed to lock the buffer and cannot stall in
741 * async migration. Release the taken locks
742 */
743 struct buffer_head *failed_bh = bh;
744 bh = head;
745 while (bh != failed_bh) {
746 unlock_buffer(bh);
747 bh = bh->b_this_page;
748 }
749 return false;
750 }
751
752 bh = bh->b_this_page;
753 } while (bh != head);
754 return true;
755}
756
757static int __buffer_migrate_page(struct address_space *mapping,
758 struct page *newpage, struct page *page, enum migrate_mode mode,
759 bool check_refs)
760{
761 struct buffer_head *bh, *head;
762 int rc;
763 int expected_count;
764
765 if (!page_has_buffers(page))
766 return migrate_page(mapping, newpage, page, mode);
767
768 /* Check whether page does not have extra refs before we do more work */
769 expected_count = expected_page_refs(mapping, page);
770 if (page_count(page) != expected_count)
771 return -EAGAIN;
772
773 head = page_buffers(page);
774 if (!buffer_migrate_lock_buffers(head, mode))
775 return -EAGAIN;
776
777 if (check_refs) {
778 bool busy;
779 bool invalidated = false;
780
781recheck_buffers:
782 busy = false;
783 spin_lock(&mapping->private_lock);
784 bh = head;
785 do {
786 if (atomic_read(&bh->b_count)) {
787 busy = true;
788 break;
789 }
790 bh = bh->b_this_page;
791 } while (bh != head);
792 if (busy) {
793 if (invalidated) {
794 rc = -EAGAIN;
795 goto unlock_buffers;
796 }
797 spin_unlock(&mapping->private_lock);
798 invalidate_bh_lrus();
799 invalidated = true;
800 goto recheck_buffers;
801 }
802 }
803
804 rc = migrate_page_move_mapping(mapping, newpage, page, 0);
805 if (rc != MIGRATEPAGE_SUCCESS)
806 goto unlock_buffers;
807
808 attach_page_private(newpage, detach_page_private(page));
809
810 bh = head;
811 do {
812 set_bh_page(bh, newpage, bh_offset(bh));
813 bh = bh->b_this_page;
814
815 } while (bh != head);
816
817 if (mode != MIGRATE_SYNC_NO_COPY)
818 migrate_page_copy(newpage, page);
819 else
820 migrate_page_states(newpage, page);
821
822 rc = MIGRATEPAGE_SUCCESS;
823unlock_buffers:
824 if (check_refs)
825 spin_unlock(&mapping->private_lock);
826 bh = head;
827 do {
828 unlock_buffer(bh);
829 bh = bh->b_this_page;
830
831 } while (bh != head);
832
833 return rc;
834}
835
836/*
837 * Migration function for pages with buffers. This function can only be used
838 * if the underlying filesystem guarantees that no other references to "page"
839 * exist. For example attached buffer heads are accessed only under page lock.
840 */
841int buffer_migrate_page(struct address_space *mapping,
842 struct page *newpage, struct page *page, enum migrate_mode mode)
843{
844 return __buffer_migrate_page(mapping, newpage, page, mode, false);
845}
846EXPORT_SYMBOL(buffer_migrate_page);
847
848/*
849 * Same as above except that this variant is more careful and checks that there
850 * are also no buffer head references. This function is the right one for
851 * mappings where buffer heads are directly looked up and referenced (such as
852 * block device mappings).
853 */
854int buffer_migrate_page_norefs(struct address_space *mapping,
855 struct page *newpage, struct page *page, enum migrate_mode mode)
856{
857 return __buffer_migrate_page(mapping, newpage, page, mode, true);
858}
859#endif
860
861/*
862 * Writeback a page to clean the dirty state
863 */
864static int writeout(struct address_space *mapping, struct page *page)
865{
866 struct writeback_control wbc = {
867 .sync_mode = WB_SYNC_NONE,
868 .nr_to_write = 1,
869 .range_start = 0,
870 .range_end = LLONG_MAX,
871 .for_reclaim = 1
872 };
873 int rc;
874
875 if (!mapping->a_ops->writepage)
876 /* No write method for the address space */
877 return -EINVAL;
878
879 if (!clear_page_dirty_for_io(page))
880 /* Someone else already triggered a write */
881 return -EAGAIN;
882
883 /*
884 * A dirty page may imply that the underlying filesystem has
885 * the page on some queue. So the page must be clean for
886 * migration. Writeout may mean we loose the lock and the
887 * page state is no longer what we checked for earlier.
888 * At this point we know that the migration attempt cannot
889 * be successful.
890 */
891 remove_migration_ptes(page, page, false);
892
893 rc = mapping->a_ops->writepage(page, &wbc);
894
895 if (rc != AOP_WRITEPAGE_ACTIVATE)
896 /* unlocked. Relock */
897 lock_page(page);
898
899 return (rc < 0) ? -EIO : -EAGAIN;
900}
901
902/*
903 * Default handling if a filesystem does not provide a migration function.
904 */
905static int fallback_migrate_page(struct address_space *mapping,
906 struct page *newpage, struct page *page, enum migrate_mode mode)
907{
908 if (PageDirty(page)) {
909 /* Only writeback pages in full synchronous migration */
910 switch (mode) {
911 case MIGRATE_SYNC:
912 case MIGRATE_SYNC_NO_COPY:
913 break;
914 default:
915 return -EBUSY;
916 }
917 return writeout(mapping, page);
918 }
919
920 /*
921 * Buffers may be managed in a filesystem specific way.
922 * We must have no buffers or drop them.
923 */
924 if (page_has_private(page) &&
925 !try_to_release_page(page, GFP_KERNEL))
926 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
927
928 return migrate_page(mapping, newpage, page, mode);
929}
930
931/*
932 * Move a page to a newly allocated page
933 * The page is locked and all ptes have been successfully removed.
934 *
935 * The new page will have replaced the old page if this function
936 * is successful.
937 *
938 * Return value:
939 * < 0 - error code
940 * MIGRATEPAGE_SUCCESS - success
941 */
942static int move_to_new_page(struct page *newpage, struct page *page,
943 enum migrate_mode mode)
944{
945 struct address_space *mapping;
946 int rc = -EAGAIN;
947 bool is_lru = !__PageMovable(page);
948
949 VM_BUG_ON_PAGE(!PageLocked(page), page);
950 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
951
952 mapping = page_mapping(page);
953
954 if (likely(is_lru)) {
955 if (!mapping)
956 rc = migrate_page(mapping, newpage, page, mode);
957 else if (mapping->a_ops->migratepage)
958 /*
959 * Most pages have a mapping and most filesystems
960 * provide a migratepage callback. Anonymous pages
961 * are part of swap space which also has its own
962 * migratepage callback. This is the most common path
963 * for page migration.
964 */
965 rc = mapping->a_ops->migratepage(mapping, newpage,
966 page, mode);
967 else
968 rc = fallback_migrate_page(mapping, newpage,
969 page, mode);
970 } else {
971 /*
972 * In case of non-lru page, it could be released after
973 * isolation step. In that case, we shouldn't try migration.
974 */
975 VM_BUG_ON_PAGE(!PageIsolated(page), page);
976 if (!PageMovable(page)) {
977 rc = MIGRATEPAGE_SUCCESS;
978 __ClearPageIsolated(page);
979 goto out;
980 }
981
982 rc = mapping->a_ops->migratepage(mapping, newpage,
983 page, mode);
984 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
985 !PageIsolated(page));
986 }
987
988 /*
989 * When successful, old pagecache page->mapping must be cleared before
990 * page is freed; but stats require that PageAnon be left as PageAnon.
991 */
992 if (rc == MIGRATEPAGE_SUCCESS) {
993 if (__PageMovable(page)) {
994 VM_BUG_ON_PAGE(!PageIsolated(page), page);
995
996 /*
997 * We clear PG_movable under page_lock so any compactor
998 * cannot try to migrate this page.
999 */
1000 __ClearPageIsolated(page);
1001 }
1002
1003 /*
1004 * Anonymous and movable page->mapping will be cleared by
1005 * free_pages_prepare so don't reset it here for keeping
1006 * the type to work PageAnon, for example.
1007 */
1008 if (!PageMappingFlags(page))
1009 page->mapping = NULL;
1010
1011 if (likely(!is_zone_device_page(newpage)))
1012 flush_dcache_page(newpage);
1013
1014 }
1015out:
1016 return rc;
1017}
1018
1019static int __unmap_and_move(struct page *page, struct page *newpage,
1020 int force, enum migrate_mode mode)
1021{
1022 int rc = -EAGAIN;
1023 int page_was_mapped = 0;
1024 struct anon_vma *anon_vma = NULL;
1025 bool is_lru = !__PageMovable(page);
1026
1027 if (!trylock_page(page)) {
1028 if (!force || mode == MIGRATE_ASYNC)
1029 goto out;
1030
1031 /*
1032 * It's not safe for direct compaction to call lock_page.
1033 * For example, during page readahead pages are added locked
1034 * to the LRU. Later, when the IO completes the pages are
1035 * marked uptodate and unlocked. However, the queueing
1036 * could be merging multiple pages for one bio (e.g.
1037 * mpage_readahead). If an allocation happens for the
1038 * second or third page, the process can end up locking
1039 * the same page twice and deadlocking. Rather than
1040 * trying to be clever about what pages can be locked,
1041 * avoid the use of lock_page for direct compaction
1042 * altogether.
1043 */
1044 if (current->flags & PF_MEMALLOC)
1045 goto out;
1046
1047 lock_page(page);
1048 }
1049
1050 if (PageWriteback(page)) {
1051 /*
1052 * Only in the case of a full synchronous migration is it
1053 * necessary to wait for PageWriteback. In the async case,
1054 * the retry loop is too short and in the sync-light case,
1055 * the overhead of stalling is too much
1056 */
1057 switch (mode) {
1058 case MIGRATE_SYNC:
1059 case MIGRATE_SYNC_NO_COPY:
1060 break;
1061 default:
1062 rc = -EBUSY;
1063 goto out_unlock;
1064 }
1065 if (!force)
1066 goto out_unlock;
1067 wait_on_page_writeback(page);
1068 }
1069
1070 /*
1071 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1072 * we cannot notice that anon_vma is freed while we migrates a page.
1073 * This get_anon_vma() delays freeing anon_vma pointer until the end
1074 * of migration. File cache pages are no problem because of page_lock()
1075 * File Caches may use write_page() or lock_page() in migration, then,
1076 * just care Anon page here.
1077 *
1078 * Only page_get_anon_vma() understands the subtleties of
1079 * getting a hold on an anon_vma from outside one of its mms.
1080 * But if we cannot get anon_vma, then we won't need it anyway,
1081 * because that implies that the anon page is no longer mapped
1082 * (and cannot be remapped so long as we hold the page lock).
1083 */
1084 if (PageAnon(page) && !PageKsm(page))
1085 anon_vma = page_get_anon_vma(page);
1086
1087 /*
1088 * Block others from accessing the new page when we get around to
1089 * establishing additional references. We are usually the only one
1090 * holding a reference to newpage at this point. We used to have a BUG
1091 * here if trylock_page(newpage) fails, but would like to allow for
1092 * cases where there might be a race with the previous use of newpage.
1093 * This is much like races on refcount of oldpage: just don't BUG().
1094 */
1095 if (unlikely(!trylock_page(newpage)))
1096 goto out_unlock;
1097
1098 if (unlikely(!is_lru)) {
1099 rc = move_to_new_page(newpage, page, mode);
1100 goto out_unlock_both;
1101 }
1102
1103 /*
1104 * Corner case handling:
1105 * 1. When a new swap-cache page is read into, it is added to the LRU
1106 * and treated as swapcache but it has no rmap yet.
1107 * Calling try_to_unmap() against a page->mapping==NULL page will
1108 * trigger a BUG. So handle it here.
1109 * 2. An orphaned page (see truncate_complete_page) might have
1110 * fs-private metadata. The page can be picked up due to memory
1111 * offlining. Everywhere else except page reclaim, the page is
1112 * invisible to the vm, so the page can not be migrated. So try to
1113 * free the metadata, so the page can be freed.
1114 */
1115 if (!page->mapping) {
1116 VM_BUG_ON_PAGE(PageAnon(page), page);
1117 if (page_has_private(page)) {
1118 try_to_free_buffers(page);
1119 goto out_unlock_both;
1120 }
1121 } else if (page_mapped(page)) {
1122 /* Establish migration ptes */
1123 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1124 page);
1125 try_to_unmap(page,
1126 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1127 page_was_mapped = 1;
1128 }
1129
1130 if (!page_mapped(page))
1131 rc = move_to_new_page(newpage, page, mode);
1132
1133 if (page_was_mapped)
1134 remove_migration_ptes(page,
1135 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1136
1137out_unlock_both:
1138 unlock_page(newpage);
1139out_unlock:
1140 /* Drop an anon_vma reference if we took one */
1141 if (anon_vma)
1142 put_anon_vma(anon_vma);
1143 unlock_page(page);
1144out:
1145 /*
1146 * If migration is successful, decrease refcount of the newpage
1147 * which will not free the page because new page owner increased
1148 * refcounter. As well, if it is LRU page, add the page to LRU
1149 * list in here. Use the old state of the isolated source page to
1150 * determine if we migrated a LRU page. newpage was already unlocked
1151 * and possibly modified by its owner - don't rely on the page
1152 * state.
1153 */
1154 if (rc == MIGRATEPAGE_SUCCESS) {
1155 if (unlikely(!is_lru))
1156 put_page(newpage);
1157 else
1158 putback_lru_page(newpage);
1159 }
1160
1161 return rc;
1162}
1163
1164/*
1165 * Obtain the lock on page, remove all ptes and migrate the page
1166 * to the newly allocated page in newpage.
1167 */
1168static int unmap_and_move(new_page_t get_new_page,
1169 free_page_t put_new_page,
1170 unsigned long private, struct page *page,
1171 int force, enum migrate_mode mode,
1172 enum migrate_reason reason)
1173{
1174 int rc = MIGRATEPAGE_SUCCESS;
1175 struct page *newpage = NULL;
1176
1177 if (!thp_migration_supported() && PageTransHuge(page))
1178 return -ENOMEM;
1179
1180 if (page_count(page) == 1) {
1181 /* page was freed from under us. So we are done. */
1182 ClearPageActive(page);
1183 ClearPageUnevictable(page);
1184 if (unlikely(__PageMovable(page))) {
1185 lock_page(page);
1186 if (!PageMovable(page))
1187 __ClearPageIsolated(page);
1188 unlock_page(page);
1189 }
1190 goto out;
1191 }
1192
1193 newpage = get_new_page(page, private);
1194 if (!newpage)
1195 return -ENOMEM;
1196
1197 rc = __unmap_and_move(page, newpage, force, mode);
1198 if (rc == MIGRATEPAGE_SUCCESS)
1199 set_page_owner_migrate_reason(newpage, reason);
1200
1201out:
1202 if (rc != -EAGAIN) {
1203 /*
1204 * A page that has been migrated has all references
1205 * removed and will be freed. A page that has not been
1206 * migrated will have kept its references and be restored.
1207 */
1208 list_del(&page->lru);
1209
1210 /*
1211 * Compaction can migrate also non-LRU pages which are
1212 * not accounted to NR_ISOLATED_*. They can be recognized
1213 * as __PageMovable
1214 */
1215 if (likely(!__PageMovable(page)))
1216 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1217 page_is_file_lru(page), -thp_nr_pages(page));
1218 }
1219
1220 /*
1221 * If migration is successful, releases reference grabbed during
1222 * isolation. Otherwise, restore the page to right list unless
1223 * we want to retry.
1224 */
1225 if (rc == MIGRATEPAGE_SUCCESS) {
1226 put_page(page);
1227 if (reason == MR_MEMORY_FAILURE) {
1228 /*
1229 * Set PG_HWPoison on just freed page
1230 * intentionally. Although it's rather weird,
1231 * it's how HWPoison flag works at the moment.
1232 */
1233 if (set_hwpoison_free_buddy_page(page))
1234 num_poisoned_pages_inc();
1235 }
1236 } else {
1237 if (rc != -EAGAIN) {
1238 if (likely(!__PageMovable(page))) {
1239 putback_lru_page(page);
1240 goto put_new;
1241 }
1242
1243 lock_page(page);
1244 if (PageMovable(page))
1245 putback_movable_page(page);
1246 else
1247 __ClearPageIsolated(page);
1248 unlock_page(page);
1249 put_page(page);
1250 }
1251put_new:
1252 if (put_new_page)
1253 put_new_page(newpage, private);
1254 else
1255 put_page(newpage);
1256 }
1257
1258 return rc;
1259}
1260
1261/*
1262 * Counterpart of unmap_and_move_page() for hugepage migration.
1263 *
1264 * This function doesn't wait the completion of hugepage I/O
1265 * because there is no race between I/O and migration for hugepage.
1266 * Note that currently hugepage I/O occurs only in direct I/O
1267 * where no lock is held and PG_writeback is irrelevant,
1268 * and writeback status of all subpages are counted in the reference
1269 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1270 * under direct I/O, the reference of the head page is 512 and a bit more.)
1271 * This means that when we try to migrate hugepage whose subpages are
1272 * doing direct I/O, some references remain after try_to_unmap() and
1273 * hugepage migration fails without data corruption.
1274 *
1275 * There is also no race when direct I/O is issued on the page under migration,
1276 * because then pte is replaced with migration swap entry and direct I/O code
1277 * will wait in the page fault for migration to complete.
1278 */
1279static int unmap_and_move_huge_page(new_page_t get_new_page,
1280 free_page_t put_new_page, unsigned long private,
1281 struct page *hpage, int force,
1282 enum migrate_mode mode, int reason)
1283{
1284 int rc = -EAGAIN;
1285 int page_was_mapped = 0;
1286 struct page *new_hpage;
1287 struct anon_vma *anon_vma = NULL;
1288 struct address_space *mapping = NULL;
1289
1290 /*
1291 * Migratability of hugepages depends on architectures and their size.
1292 * This check is necessary because some callers of hugepage migration
1293 * like soft offline and memory hotremove don't walk through page
1294 * tables or check whether the hugepage is pmd-based or not before
1295 * kicking migration.
1296 */
1297 if (!hugepage_migration_supported(page_hstate(hpage))) {
1298 putback_active_hugepage(hpage);
1299 return -ENOSYS;
1300 }
1301
1302 new_hpage = get_new_page(hpage, private);
1303 if (!new_hpage)
1304 return -ENOMEM;
1305
1306 if (!trylock_page(hpage)) {
1307 if (!force)
1308 goto out;
1309 switch (mode) {
1310 case MIGRATE_SYNC:
1311 case MIGRATE_SYNC_NO_COPY:
1312 break;
1313 default:
1314 goto out;
1315 }
1316 lock_page(hpage);
1317 }
1318
1319 /*
1320 * Check for pages which are in the process of being freed. Without
1321 * page_mapping() set, hugetlbfs specific move page routine will not
1322 * be called and we could leak usage counts for subpools.
1323 */
1324 if (page_private(hpage) && !page_mapping(hpage)) {
1325 rc = -EBUSY;
1326 goto out_unlock;
1327 }
1328
1329 if (PageAnon(hpage))
1330 anon_vma = page_get_anon_vma(hpage);
1331
1332 if (unlikely(!trylock_page(new_hpage)))
1333 goto put_anon;
1334
1335 if (page_mapped(hpage)) {
1336 /*
1337 * try_to_unmap could potentially call huge_pmd_unshare.
1338 * Because of this, take semaphore in write mode here and
1339 * set TTU_RMAP_LOCKED to let lower levels know we have
1340 * taken the lock.
1341 */
1342 mapping = hugetlb_page_mapping_lock_write(hpage);
1343 if (unlikely(!mapping))
1344 goto unlock_put_anon;
1345
1346 try_to_unmap(hpage,
1347 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
1348 TTU_RMAP_LOCKED);
1349 page_was_mapped = 1;
1350 /*
1351 * Leave mapping locked until after subsequent call to
1352 * remove_migration_ptes()
1353 */
1354 }
1355
1356 if (!page_mapped(hpage))
1357 rc = move_to_new_page(new_hpage, hpage, mode);
1358
1359 if (page_was_mapped) {
1360 remove_migration_ptes(hpage,
1361 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, true);
1362 i_mmap_unlock_write(mapping);
1363 }
1364
1365unlock_put_anon:
1366 unlock_page(new_hpage);
1367
1368put_anon:
1369 if (anon_vma)
1370 put_anon_vma(anon_vma);
1371
1372 if (rc == MIGRATEPAGE_SUCCESS) {
1373 move_hugetlb_state(hpage, new_hpage, reason);
1374 put_new_page = NULL;
1375 }
1376
1377out_unlock:
1378 unlock_page(hpage);
1379out:
1380 if (rc != -EAGAIN)
1381 putback_active_hugepage(hpage);
1382
1383 /*
1384 * If migration was not successful and there's a freeing callback, use
1385 * it. Otherwise, put_page() will drop the reference grabbed during
1386 * isolation.
1387 */
1388 if (put_new_page)
1389 put_new_page(new_hpage, private);
1390 else
1391 putback_active_hugepage(new_hpage);
1392
1393 return rc;
1394}
1395
1396/*
1397 * migrate_pages - migrate the pages specified in a list, to the free pages
1398 * supplied as the target for the page migration
1399 *
1400 * @from: The list of pages to be migrated.
1401 * @get_new_page: The function used to allocate free pages to be used
1402 * as the target of the page migration.
1403 * @put_new_page: The function used to free target pages if migration
1404 * fails, or NULL if no special handling is necessary.
1405 * @private: Private data to be passed on to get_new_page()
1406 * @mode: The migration mode that specifies the constraints for
1407 * page migration, if any.
1408 * @reason: The reason for page migration.
1409 *
1410 * The function returns after 10 attempts or if no pages are movable any more
1411 * because the list has become empty or no retryable pages exist any more.
1412 * The caller should call putback_movable_pages() to return pages to the LRU
1413 * or free list only if ret != 0.
1414 *
1415 * Returns the number of pages that were not migrated, or an error code.
1416 */
1417int migrate_pages(struct list_head *from, new_page_t get_new_page,
1418 free_page_t put_new_page, unsigned long private,
1419 enum migrate_mode mode, int reason)
1420{
1421 int retry = 1;
1422 int thp_retry = 1;
1423 int nr_failed = 0;
1424 int nr_succeeded = 0;
1425 int nr_thp_succeeded = 0;
1426 int nr_thp_failed = 0;
1427 int nr_thp_split = 0;
1428 int pass = 0;
1429 bool is_thp = false;
1430 struct page *page;
1431 struct page *page2;
1432 int swapwrite = current->flags & PF_SWAPWRITE;
1433 int rc, nr_subpages;
1434
1435 if (!swapwrite)
1436 current->flags |= PF_SWAPWRITE;
1437
1438 for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1439 retry = 0;
1440 thp_retry = 0;
1441
1442 list_for_each_entry_safe(page, page2, from, lru) {
1443retry:
1444 /*
1445 * THP statistics is based on the source huge page.
1446 * Capture required information that might get lost
1447 * during migration.
1448 */
1449 is_thp = PageTransHuge(page) && !PageHuge(page);
1450 nr_subpages = thp_nr_pages(page);
1451 cond_resched();
1452
1453 if (PageHuge(page))
1454 rc = unmap_and_move_huge_page(get_new_page,
1455 put_new_page, private, page,
1456 pass > 2, mode, reason);
1457 else
1458 rc = unmap_and_move(get_new_page, put_new_page,
1459 private, page, pass > 2, mode,
1460 reason);
1461
1462 switch(rc) {
1463 case -ENOMEM:
1464 /*
1465 * THP migration might be unsupported or the
1466 * allocation could've failed so we should
1467 * retry on the same page with the THP split
1468 * to base pages.
1469 *
1470 * Head page is retried immediately and tail
1471 * pages are added to the tail of the list so
1472 * we encounter them after the rest of the list
1473 * is processed.
1474 */
1475 if (is_thp) {
1476 lock_page(page);
1477 rc = split_huge_page_to_list(page, from);
1478 unlock_page(page);
1479 if (!rc) {
1480 list_safe_reset_next(page, page2, lru);
1481 nr_thp_split++;
1482 goto retry;
1483 }
1484
1485 nr_thp_failed++;
1486 nr_failed += nr_subpages;
1487 goto out;
1488 }
1489 nr_failed++;
1490 goto out;
1491 case -EAGAIN:
1492 if (is_thp) {
1493 thp_retry++;
1494 break;
1495 }
1496 retry++;
1497 break;
1498 case MIGRATEPAGE_SUCCESS:
1499 if (is_thp) {
1500 nr_thp_succeeded++;
1501 nr_succeeded += nr_subpages;
1502 break;
1503 }
1504 nr_succeeded++;
1505 break;
1506 default:
1507 /*
1508 * Permanent failure (-EBUSY, -ENOSYS, etc.):
1509 * unlike -EAGAIN case, the failed page is
1510 * removed from migration page list and not
1511 * retried in the next outer loop.
1512 */
1513 if (is_thp) {
1514 nr_thp_failed++;
1515 nr_failed += nr_subpages;
1516 break;
1517 }
1518 nr_failed++;
1519 break;
1520 }
1521 }
1522 }
1523 nr_failed += retry + thp_retry;
1524 nr_thp_failed += thp_retry;
1525 rc = nr_failed;
1526out:
1527 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1528 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1529 count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1530 count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1531 count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1532 trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1533 nr_thp_failed, nr_thp_split, mode, reason);
1534
1535 if (!swapwrite)
1536 current->flags &= ~PF_SWAPWRITE;
1537
1538 return rc;
1539}
1540
1541struct page *alloc_migration_target(struct page *page, unsigned long private)
1542{
1543 struct migration_target_control *mtc;
1544 gfp_t gfp_mask;
1545 unsigned int order = 0;
1546 struct page *new_page = NULL;
1547 int nid;
1548 int zidx;
1549
1550 mtc = (struct migration_target_control *)private;
1551 gfp_mask = mtc->gfp_mask;
1552 nid = mtc->nid;
1553 if (nid == NUMA_NO_NODE)
1554 nid = page_to_nid(page);
1555
1556 if (PageHuge(page)) {
1557 struct hstate *h = page_hstate(compound_head(page));
1558
1559 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1560 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1561 }
1562
1563 if (PageTransHuge(page)) {
1564 /*
1565 * clear __GFP_RECLAIM to make the migration callback
1566 * consistent with regular THP allocations.
1567 */
1568 gfp_mask &= ~__GFP_RECLAIM;
1569 gfp_mask |= GFP_TRANSHUGE;
1570 order = HPAGE_PMD_ORDER;
1571 }
1572 zidx = zone_idx(page_zone(page));
1573 if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1574 gfp_mask |= __GFP_HIGHMEM;
1575
1576 new_page = __alloc_pages_nodemask(gfp_mask, order, nid, mtc->nmask);
1577
1578 if (new_page && PageTransHuge(new_page))
1579 prep_transhuge_page(new_page);
1580
1581 return new_page;
1582}
1583
1584#ifdef CONFIG_NUMA
1585
1586static int store_status(int __user *status, int start, int value, int nr)
1587{
1588 while (nr-- > 0) {
1589 if (put_user(value, status + start))
1590 return -EFAULT;
1591 start++;
1592 }
1593
1594 return 0;
1595}
1596
1597static int do_move_pages_to_node(struct mm_struct *mm,
1598 struct list_head *pagelist, int node)
1599{
1600 int err;
1601 struct migration_target_control mtc = {
1602 .nid = node,
1603 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1604 };
1605
1606 err = migrate_pages(pagelist, alloc_migration_target, NULL,
1607 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1608 if (err)
1609 putback_movable_pages(pagelist);
1610 return err;
1611}
1612
1613/*
1614 * Resolves the given address to a struct page, isolates it from the LRU and
1615 * puts it to the given pagelist.
1616 * Returns:
1617 * errno - if the page cannot be found/isolated
1618 * 0 - when it doesn't have to be migrated because it is already on the
1619 * target node
1620 * 1 - when it has been queued
1621 */
1622static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1623 int node, struct list_head *pagelist, bool migrate_all)
1624{
1625 struct vm_area_struct *vma;
1626 struct page *page;
1627 unsigned int follflags;
1628 int err;
1629
1630 mmap_read_lock(mm);
1631 err = -EFAULT;
1632 vma = find_vma(mm, addr);
1633 if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1634 goto out;
1635
1636 /* FOLL_DUMP to ignore special (like zero) pages */
1637 follflags = FOLL_GET | FOLL_DUMP;
1638 page = follow_page(vma, addr, follflags);
1639
1640 err = PTR_ERR(page);
1641 if (IS_ERR(page))
1642 goto out;
1643
1644 err = -ENOENT;
1645 if (!page)
1646 goto out;
1647
1648 err = 0;
1649 if (page_to_nid(page) == node)
1650 goto out_putpage;
1651
1652 err = -EACCES;
1653 if (page_mapcount(page) > 1 && !migrate_all)
1654 goto out_putpage;
1655
1656 if (PageHuge(page)) {
1657 if (PageHead(page)) {
1658 isolate_huge_page(page, pagelist);
1659 err = 1;
1660 }
1661 } else {
1662 struct page *head;
1663
1664 head = compound_head(page);
1665 err = isolate_lru_page(head);
1666 if (err)
1667 goto out_putpage;
1668
1669 err = 1;
1670 list_add_tail(&head->lru, pagelist);
1671 mod_node_page_state(page_pgdat(head),
1672 NR_ISOLATED_ANON + page_is_file_lru(head),
1673 thp_nr_pages(head));
1674 }
1675out_putpage:
1676 /*
1677 * Either remove the duplicate refcount from
1678 * isolate_lru_page() or drop the page ref if it was
1679 * not isolated.
1680 */
1681 put_page(page);
1682out:
1683 mmap_read_unlock(mm);
1684 return err;
1685}
1686
1687static int move_pages_and_store_status(struct mm_struct *mm, int node,
1688 struct list_head *pagelist, int __user *status,
1689 int start, int i, unsigned long nr_pages)
1690{
1691 int err;
1692
1693 if (list_empty(pagelist))
1694 return 0;
1695
1696 err = do_move_pages_to_node(mm, pagelist, node);
1697 if (err) {
1698 /*
1699 * Positive err means the number of failed
1700 * pages to migrate. Since we are going to
1701 * abort and return the number of non-migrated
1702 * pages, so need to incude the rest of the
1703 * nr_pages that have not been attempted as
1704 * well.
1705 */
1706 if (err > 0)
1707 err += nr_pages - i - 1;
1708 return err;
1709 }
1710 return store_status(status, start, node, i - start);
1711}
1712
1713/*
1714 * Migrate an array of page address onto an array of nodes and fill
1715 * the corresponding array of status.
1716 */
1717static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1718 unsigned long nr_pages,
1719 const void __user * __user *pages,
1720 const int __user *nodes,
1721 int __user *status, int flags)
1722{
1723 int current_node = NUMA_NO_NODE;
1724 LIST_HEAD(pagelist);
1725 int start, i;
1726 int err = 0, err1;
1727
1728 migrate_prep();
1729
1730 for (i = start = 0; i < nr_pages; i++) {
1731 const void __user *p;
1732 unsigned long addr;
1733 int node;
1734
1735 err = -EFAULT;
1736 if (get_user(p, pages + i))
1737 goto out_flush;
1738 if (get_user(node, nodes + i))
1739 goto out_flush;
1740 addr = (unsigned long)untagged_addr(p);
1741
1742 err = -ENODEV;
1743 if (node < 0 || node >= MAX_NUMNODES)
1744 goto out_flush;
1745 if (!node_state(node, N_MEMORY))
1746 goto out_flush;
1747
1748 err = -EACCES;
1749 if (!node_isset(node, task_nodes))
1750 goto out_flush;
1751
1752 if (current_node == NUMA_NO_NODE) {
1753 current_node = node;
1754 start = i;
1755 } else if (node != current_node) {
1756 err = move_pages_and_store_status(mm, current_node,
1757 &pagelist, status, start, i, nr_pages);
1758 if (err)
1759 goto out;
1760 start = i;
1761 current_node = node;
1762 }
1763
1764 /*
1765 * Errors in the page lookup or isolation are not fatal and we simply
1766 * report them via status
1767 */
1768 err = add_page_for_migration(mm, addr, current_node,
1769 &pagelist, flags & MPOL_MF_MOVE_ALL);
1770
1771 if (err > 0) {
1772 /* The page is successfully queued for migration */
1773 continue;
1774 }
1775
1776 /*
1777 * If the page is already on the target node (!err), store the
1778 * node, otherwise, store the err.
1779 */
1780 err = store_status(status, i, err ? : current_node, 1);
1781 if (err)
1782 goto out_flush;
1783
1784 err = move_pages_and_store_status(mm, current_node, &pagelist,
1785 status, start, i, nr_pages);
1786 if (err)
1787 goto out;
1788 current_node = NUMA_NO_NODE;
1789 }
1790out_flush:
1791 /* Make sure we do not overwrite the existing error */
1792 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1793 status, start, i, nr_pages);
1794 if (err >= 0)
1795 err = err1;
1796out:
1797 return err;
1798}
1799
1800/*
1801 * Determine the nodes of an array of pages and store it in an array of status.
1802 */
1803static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1804 const void __user **pages, int *status)
1805{
1806 unsigned long i;
1807
1808 mmap_read_lock(mm);
1809
1810 for (i = 0; i < nr_pages; i++) {
1811 unsigned long addr = (unsigned long)(*pages);
1812 struct vm_area_struct *vma;
1813 struct page *page;
1814 int err = -EFAULT;
1815
1816 vma = find_vma(mm, addr);
1817 if (!vma || addr < vma->vm_start)
1818 goto set_status;
1819
1820 /* FOLL_DUMP to ignore special (like zero) pages */
1821 page = follow_page(vma, addr, FOLL_DUMP);
1822
1823 err = PTR_ERR(page);
1824 if (IS_ERR(page))
1825 goto set_status;
1826
1827 err = page ? page_to_nid(page) : -ENOENT;
1828set_status:
1829 *status = err;
1830
1831 pages++;
1832 status++;
1833 }
1834
1835 mmap_read_unlock(mm);
1836}
1837
1838/*
1839 * Determine the nodes of a user array of pages and store it in
1840 * a user array of status.
1841 */
1842static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1843 const void __user * __user *pages,
1844 int __user *status)
1845{
1846#define DO_PAGES_STAT_CHUNK_NR 16
1847 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1848 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1849
1850 while (nr_pages) {
1851 unsigned long chunk_nr;
1852
1853 chunk_nr = nr_pages;
1854 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1855 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1856
1857 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1858 break;
1859
1860 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1861
1862 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1863 break;
1864
1865 pages += chunk_nr;
1866 status += chunk_nr;
1867 nr_pages -= chunk_nr;
1868 }
1869 return nr_pages ? -EFAULT : 0;
1870}
1871
1872/*
1873 * Move a list of pages in the address space of the currently executing
1874 * process.
1875 */
1876static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1877 const void __user * __user *pages,
1878 const int __user *nodes,
1879 int __user *status, int flags)
1880{
1881 struct task_struct *task;
1882 struct mm_struct *mm;
1883 int err;
1884 nodemask_t task_nodes;
1885
1886 /* Check flags */
1887 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1888 return -EINVAL;
1889
1890 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1891 return -EPERM;
1892
1893 /* Find the mm_struct */
1894 rcu_read_lock();
1895 task = pid ? find_task_by_vpid(pid) : current;
1896 if (!task) {
1897 rcu_read_unlock();
1898 return -ESRCH;
1899 }
1900 get_task_struct(task);
1901
1902 /*
1903 * Check if this process has the right to modify the specified
1904 * process. Use the regular "ptrace_may_access()" checks.
1905 */
1906 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1907 rcu_read_unlock();
1908 err = -EPERM;
1909 goto out;
1910 }
1911 rcu_read_unlock();
1912
1913 err = security_task_movememory(task);
1914 if (err)
1915 goto out;
1916
1917 task_nodes = cpuset_mems_allowed(task);
1918 mm = get_task_mm(task);
1919 put_task_struct(task);
1920
1921 if (!mm)
1922 return -EINVAL;
1923
1924 if (nodes)
1925 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1926 nodes, status, flags);
1927 else
1928 err = do_pages_stat(mm, nr_pages, pages, status);
1929
1930 mmput(mm);
1931 return err;
1932
1933out:
1934 put_task_struct(task);
1935 return err;
1936}
1937
1938SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1939 const void __user * __user *, pages,
1940 const int __user *, nodes,
1941 int __user *, status, int, flags)
1942{
1943 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1944}
1945
1946#ifdef CONFIG_COMPAT
1947COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1948 compat_uptr_t __user *, pages32,
1949 const int __user *, nodes,
1950 int __user *, status,
1951 int, flags)
1952{
1953 const void __user * __user *pages;
1954 int i;
1955
1956 pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1957 for (i = 0; i < nr_pages; i++) {
1958 compat_uptr_t p;
1959
1960 if (get_user(p, pages32 + i) ||
1961 put_user(compat_ptr(p), pages + i))
1962 return -EFAULT;
1963 }
1964 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1965}
1966#endif /* CONFIG_COMPAT */
1967
1968#ifdef CONFIG_NUMA_BALANCING
1969/*
1970 * Returns true if this is a safe migration target node for misplaced NUMA
1971 * pages. Currently it only checks the watermarks which crude
1972 */
1973static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1974 unsigned long nr_migrate_pages)
1975{
1976 int z;
1977
1978 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1979 struct zone *zone = pgdat->node_zones + z;
1980
1981 if (!populated_zone(zone))
1982 continue;
1983
1984 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1985 if (!zone_watermark_ok(zone, 0,
1986 high_wmark_pages(zone) +
1987 nr_migrate_pages,
1988 ZONE_MOVABLE, 0))
1989 continue;
1990 return true;
1991 }
1992 return false;
1993}
1994
1995static struct page *alloc_misplaced_dst_page(struct page *page,
1996 unsigned long data)
1997{
1998 int nid = (int) data;
1999 struct page *newpage;
2000
2001 newpage = __alloc_pages_node(nid,
2002 (GFP_HIGHUSER_MOVABLE |
2003 __GFP_THISNODE | __GFP_NOMEMALLOC |
2004 __GFP_NORETRY | __GFP_NOWARN) &
2005 ~__GFP_RECLAIM, 0);
2006
2007 return newpage;
2008}
2009
2010static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2011{
2012 int page_lru;
2013
2014 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2015
2016 /* Avoid migrating to a node that is nearly full */
2017 if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
2018 return 0;
2019
2020 if (isolate_lru_page(page))
2021 return 0;
2022
2023 /*
2024 * migrate_misplaced_transhuge_page() skips page migration's usual
2025 * check on page_count(), so we must do it here, now that the page
2026 * has been isolated: a GUP pin, or any other pin, prevents migration.
2027 * The expected page count is 3: 1 for page's mapcount and 1 for the
2028 * caller's pin and 1 for the reference taken by isolate_lru_page().
2029 */
2030 if (PageTransHuge(page) && page_count(page) != 3) {
2031 putback_lru_page(page);
2032 return 0;
2033 }
2034
2035 page_lru = page_is_file_lru(page);
2036 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2037 thp_nr_pages(page));
2038
2039 /*
2040 * Isolating the page has taken another reference, so the
2041 * caller's reference can be safely dropped without the page
2042 * disappearing underneath us during migration.
2043 */
2044 put_page(page);
2045 return 1;
2046}
2047
2048bool pmd_trans_migrating(pmd_t pmd)
2049{
2050 struct page *page = pmd_page(pmd);
2051 return PageLocked(page);
2052}
2053
2054/*
2055 * Attempt to migrate a misplaced page to the specified destination
2056 * node. Caller is expected to have an elevated reference count on
2057 * the page that will be dropped by this function before returning.
2058 */
2059int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2060 int node)
2061{
2062 pg_data_t *pgdat = NODE_DATA(node);
2063 int isolated;
2064 int nr_remaining;
2065 LIST_HEAD(migratepages);
2066
2067 /*
2068 * Don't migrate file pages that are mapped in multiple processes
2069 * with execute permissions as they are probably shared libraries.
2070 */
2071 if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2072 (vma->vm_flags & VM_EXEC))
2073 goto out;
2074
2075 /*
2076 * Also do not migrate dirty pages as not all filesystems can move
2077 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2078 */
2079 if (page_is_file_lru(page) && PageDirty(page))
2080 goto out;
2081
2082 isolated = numamigrate_isolate_page(pgdat, page);
2083 if (!isolated)
2084 goto out;
2085
2086 list_add(&page->lru, &migratepages);
2087 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
2088 NULL, node, MIGRATE_ASYNC,
2089 MR_NUMA_MISPLACED);
2090 if (nr_remaining) {
2091 if (!list_empty(&migratepages)) {
2092 list_del(&page->lru);
2093 dec_node_page_state(page, NR_ISOLATED_ANON +
2094 page_is_file_lru(page));
2095 putback_lru_page(page);
2096 }
2097 isolated = 0;
2098 } else
2099 count_vm_numa_event(NUMA_PAGE_MIGRATE);
2100 BUG_ON(!list_empty(&migratepages));
2101 return isolated;
2102
2103out:
2104 put_page(page);
2105 return 0;
2106}
2107#endif /* CONFIG_NUMA_BALANCING */
2108
2109#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2110/*
2111 * Migrates a THP to a given target node. page must be locked and is unlocked
2112 * before returning.
2113 */
2114int migrate_misplaced_transhuge_page(struct mm_struct *mm,
2115 struct vm_area_struct *vma,
2116 pmd_t *pmd, pmd_t entry,
2117 unsigned long address,
2118 struct page *page, int node)
2119{
2120 spinlock_t *ptl;
2121 pg_data_t *pgdat = NODE_DATA(node);
2122 int isolated = 0;
2123 struct page *new_page = NULL;
2124 int page_lru = page_is_file_lru(page);
2125 unsigned long start = address & HPAGE_PMD_MASK;
2126
2127 new_page = alloc_pages_node(node,
2128 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2129 HPAGE_PMD_ORDER);
2130 if (!new_page)
2131 goto out_fail;
2132 prep_transhuge_page(new_page);
2133
2134 isolated = numamigrate_isolate_page(pgdat, page);
2135 if (!isolated) {
2136 put_page(new_page);
2137 goto out_fail;
2138 }
2139
2140 /* Prepare a page as a migration target */
2141 __SetPageLocked(new_page);
2142 if (PageSwapBacked(page))
2143 __SetPageSwapBacked(new_page);
2144
2145 /* anon mapping, we can simply copy page->mapping to the new page: */
2146 new_page->mapping = page->mapping;
2147 new_page->index = page->index;
2148 /* flush the cache before copying using the kernel virtual address */
2149 flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2150 migrate_page_copy(new_page, page);
2151 WARN_ON(PageLRU(new_page));
2152
2153 /* Recheck the target PMD */
2154 ptl = pmd_lock(mm, pmd);
2155 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2156 spin_unlock(ptl);
2157
2158 /* Reverse changes made by migrate_page_copy() */
2159 if (TestClearPageActive(new_page))
2160 SetPageActive(page);
2161 if (TestClearPageUnevictable(new_page))
2162 SetPageUnevictable(page);
2163
2164 unlock_page(new_page);
2165 put_page(new_page); /* Free it */
2166
2167 /* Retake the callers reference and putback on LRU */
2168 get_page(page);
2169 putback_lru_page(page);
2170 mod_node_page_state(page_pgdat(page),
2171 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2172
2173 goto out_unlock;
2174 }
2175
2176 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2177 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2178
2179 /*
2180 * Overwrite the old entry under pagetable lock and establish
2181 * the new PTE. Any parallel GUP will either observe the old
2182 * page blocking on the page lock, block on the page table
2183 * lock or observe the new page. The SetPageUptodate on the
2184 * new page and page_add_new_anon_rmap guarantee the copy is
2185 * visible before the pagetable update.
2186 */
2187 page_add_anon_rmap(new_page, vma, start, true);
2188 /*
2189 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2190 * has already been flushed globally. So no TLB can be currently
2191 * caching this non present pmd mapping. There's no need to clear the
2192 * pmd before doing set_pmd_at(), nor to flush the TLB after
2193 * set_pmd_at(). Clearing the pmd here would introduce a race
2194 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2195 * mmap_lock for reading. If the pmd is set to NULL at any given time,
2196 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2197 * pmd.
2198 */
2199 set_pmd_at(mm, start, pmd, entry);
2200 update_mmu_cache_pmd(vma, address, &entry);
2201
2202 page_ref_unfreeze(page, 2);
2203 mlock_migrate_page(new_page, page);
2204 page_remove_rmap(page, true);
2205 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2206
2207 spin_unlock(ptl);
2208
2209 /* Take an "isolate" reference and put new page on the LRU. */
2210 get_page(new_page);
2211 putback_lru_page(new_page);
2212
2213 unlock_page(new_page);
2214 unlock_page(page);
2215 put_page(page); /* Drop the rmap reference */
2216 put_page(page); /* Drop the LRU isolation reference */
2217
2218 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2219 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2220
2221 mod_node_page_state(page_pgdat(page),
2222 NR_ISOLATED_ANON + page_lru,
2223 -HPAGE_PMD_NR);
2224 return isolated;
2225
2226out_fail:
2227 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2228 ptl = pmd_lock(mm, pmd);
2229 if (pmd_same(*pmd, entry)) {
2230 entry = pmd_modify(entry, vma->vm_page_prot);
2231 set_pmd_at(mm, start, pmd, entry);
2232 update_mmu_cache_pmd(vma, address, &entry);
2233 }
2234 spin_unlock(ptl);
2235
2236out_unlock:
2237 unlock_page(page);
2238 put_page(page);
2239 return 0;
2240}
2241#endif /* CONFIG_NUMA_BALANCING */
2242
2243#endif /* CONFIG_NUMA */
2244
2245#ifdef CONFIG_DEVICE_PRIVATE
2246static int migrate_vma_collect_hole(unsigned long start,
2247 unsigned long end,
2248 __always_unused int depth,
2249 struct mm_walk *walk)
2250{
2251 struct migrate_vma *migrate = walk->private;
2252 unsigned long addr;
2253
2254 /* Only allow populating anonymous memory. */
2255 if (!vma_is_anonymous(walk->vma)) {
2256 for (addr = start; addr < end; addr += PAGE_SIZE) {
2257 migrate->src[migrate->npages] = 0;
2258 migrate->dst[migrate->npages] = 0;
2259 migrate->npages++;
2260 }
2261 return 0;
2262 }
2263
2264 for (addr = start; addr < end; addr += PAGE_SIZE) {
2265 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2266 migrate->dst[migrate->npages] = 0;
2267 migrate->npages++;
2268 migrate->cpages++;
2269 }
2270
2271 return 0;
2272}
2273
2274static int migrate_vma_collect_skip(unsigned long start,
2275 unsigned long end,
2276 struct mm_walk *walk)
2277{
2278 struct migrate_vma *migrate = walk->private;
2279 unsigned long addr;
2280
2281 for (addr = start; addr < end; addr += PAGE_SIZE) {
2282 migrate->dst[migrate->npages] = 0;
2283 migrate->src[migrate->npages++] = 0;
2284 }
2285
2286 return 0;
2287}
2288
2289static int migrate_vma_collect_pmd(pmd_t *pmdp,
2290 unsigned long start,
2291 unsigned long end,
2292 struct mm_walk *walk)
2293{
2294 struct migrate_vma *migrate = walk->private;
2295 struct vm_area_struct *vma = walk->vma;
2296 struct mm_struct *mm = vma->vm_mm;
2297 unsigned long addr = start, unmapped = 0;
2298 spinlock_t *ptl;
2299 pte_t *ptep;
2300
2301again:
2302 if (pmd_none(*pmdp))
2303 return migrate_vma_collect_hole(start, end, -1, walk);
2304
2305 if (pmd_trans_huge(*pmdp)) {
2306 struct page *page;
2307
2308 ptl = pmd_lock(mm, pmdp);
2309 if (unlikely(!pmd_trans_huge(*pmdp))) {
2310 spin_unlock(ptl);
2311 goto again;
2312 }
2313
2314 page = pmd_page(*pmdp);
2315 if (is_huge_zero_page(page)) {
2316 spin_unlock(ptl);
2317 split_huge_pmd(vma, pmdp, addr);
2318 if (pmd_trans_unstable(pmdp))
2319 return migrate_vma_collect_skip(start, end,
2320 walk);
2321 } else {
2322 int ret;
2323
2324 get_page(page);
2325 spin_unlock(ptl);
2326 if (unlikely(!trylock_page(page)))
2327 return migrate_vma_collect_skip(start, end,
2328 walk);
2329 ret = split_huge_page(page);
2330 unlock_page(page);
2331 put_page(page);
2332 if (ret)
2333 return migrate_vma_collect_skip(start, end,
2334 walk);
2335 if (pmd_none(*pmdp))
2336 return migrate_vma_collect_hole(start, end, -1,
2337 walk);
2338 }
2339 }
2340
2341 if (unlikely(pmd_bad(*pmdp)))
2342 return migrate_vma_collect_skip(start, end, walk);
2343
2344 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2345 arch_enter_lazy_mmu_mode();
2346
2347 for (; addr < end; addr += PAGE_SIZE, ptep++) {
2348 unsigned long mpfn = 0, pfn;
2349 struct page *page;
2350 swp_entry_t entry;
2351 pte_t pte;
2352
2353 pte = *ptep;
2354
2355 if (pte_none(pte)) {
2356 if (vma_is_anonymous(vma)) {
2357 mpfn = MIGRATE_PFN_MIGRATE;
2358 migrate->cpages++;
2359 }
2360 goto next;
2361 }
2362
2363 if (!pte_present(pte)) {
2364 /*
2365 * Only care about unaddressable device page special
2366 * page table entry. Other special swap entries are not
2367 * migratable, and we ignore regular swapped page.
2368 */
2369 entry = pte_to_swp_entry(pte);
2370 if (!is_device_private_entry(entry))
2371 goto next;
2372
2373 page = device_private_entry_to_page(entry);
2374 if (!(migrate->flags &
2375 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2376 page->pgmap->owner != migrate->pgmap_owner)
2377 goto next;
2378
2379 mpfn = migrate_pfn(page_to_pfn(page)) |
2380 MIGRATE_PFN_MIGRATE;
2381 if (is_write_device_private_entry(entry))
2382 mpfn |= MIGRATE_PFN_WRITE;
2383 } else {
2384 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2385 goto next;
2386 pfn = pte_pfn(pte);
2387 if (is_zero_pfn(pfn)) {
2388 mpfn = MIGRATE_PFN_MIGRATE;
2389 migrate->cpages++;
2390 goto next;
2391 }
2392 page = vm_normal_page(migrate->vma, addr, pte);
2393 mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2394 mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2395 }
2396
2397 /* FIXME support THP */
2398 if (!page || !page->mapping || PageTransCompound(page)) {
2399 mpfn = 0;
2400 goto next;
2401 }
2402
2403 /*
2404 * By getting a reference on the page we pin it and that blocks
2405 * any kind of migration. Side effect is that it "freezes" the
2406 * pte.
2407 *
2408 * We drop this reference after isolating the page from the lru
2409 * for non device page (device page are not on the lru and thus
2410 * can't be dropped from it).
2411 */
2412 get_page(page);
2413 migrate->cpages++;
2414
2415 /*
2416 * Optimize for the common case where page is only mapped once
2417 * in one process. If we can lock the page, then we can safely
2418 * set up a special migration page table entry now.
2419 */
2420 if (trylock_page(page)) {
2421 pte_t swp_pte;
2422
2423 mpfn |= MIGRATE_PFN_LOCKED;
2424 ptep_get_and_clear(mm, addr, ptep);
2425
2426 /* Setup special migration page table entry */
2427 entry = make_migration_entry(page, mpfn &
2428 MIGRATE_PFN_WRITE);
2429 swp_pte = swp_entry_to_pte(entry);
2430 if (pte_present(pte)) {
2431 if (pte_soft_dirty(pte))
2432 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2433 if (pte_uffd_wp(pte))
2434 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2435 } else {
2436 if (pte_swp_soft_dirty(pte))
2437 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2438 if (pte_swp_uffd_wp(pte))
2439 swp_pte = pte_swp_mkuffd_wp(swp_pte);
2440 }
2441 set_pte_at(mm, addr, ptep, swp_pte);
2442
2443 /*
2444 * This is like regular unmap: we remove the rmap and
2445 * drop page refcount. Page won't be freed, as we took
2446 * a reference just above.
2447 */
2448 page_remove_rmap(page, false);
2449 put_page(page);
2450
2451 if (pte_present(pte))
2452 unmapped++;
2453 }
2454
2455next:
2456 migrate->dst[migrate->npages] = 0;
2457 migrate->src[migrate->npages++] = mpfn;
2458 }
2459 arch_leave_lazy_mmu_mode();
2460 pte_unmap_unlock(ptep - 1, ptl);
2461
2462 /* Only flush the TLB if we actually modified any entries */
2463 if (unmapped)
2464 flush_tlb_range(walk->vma, start, end);
2465
2466 return 0;
2467}
2468
2469static const struct mm_walk_ops migrate_vma_walk_ops = {
2470 .pmd_entry = migrate_vma_collect_pmd,
2471 .pte_hole = migrate_vma_collect_hole,
2472};
2473
2474/*
2475 * migrate_vma_collect() - collect pages over a range of virtual addresses
2476 * @migrate: migrate struct containing all migration information
2477 *
2478 * This will walk the CPU page table. For each virtual address backed by a
2479 * valid page, it updates the src array and takes a reference on the page, in
2480 * order to pin the page until we lock it and unmap it.
2481 */
2482static void migrate_vma_collect(struct migrate_vma *migrate)
2483{
2484 struct mmu_notifier_range range;
2485
2486 /*
2487 * Note that the pgmap_owner is passed to the mmu notifier callback so
2488 * that the registered device driver can skip invalidating device
2489 * private page mappings that won't be migrated.
2490 */
2491 mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
2492 migrate->vma->vm_mm, migrate->start, migrate->end,
2493 migrate->pgmap_owner);
2494 mmu_notifier_invalidate_range_start(&range);
2495
2496 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2497 &migrate_vma_walk_ops, migrate);
2498
2499 mmu_notifier_invalidate_range_end(&range);
2500 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2501}
2502
2503/*
2504 * migrate_vma_check_page() - check if page is pinned or not
2505 * @page: struct page to check
2506 *
2507 * Pinned pages cannot be migrated. This is the same test as in
2508 * migrate_page_move_mapping(), except that here we allow migration of a
2509 * ZONE_DEVICE page.
2510 */
2511static bool migrate_vma_check_page(struct page *page)
2512{
2513 /*
2514 * One extra ref because caller holds an extra reference, either from
2515 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2516 * a device page.
2517 */
2518 int extra = 1;
2519
2520 /*
2521 * FIXME support THP (transparent huge page), it is bit more complex to
2522 * check them than regular pages, because they can be mapped with a pmd
2523 * or with a pte (split pte mapping).
2524 */
2525 if (PageCompound(page))
2526 return false;
2527
2528 /* Page from ZONE_DEVICE have one extra reference */
2529 if (is_zone_device_page(page)) {
2530 /*
2531 * Private page can never be pin as they have no valid pte and
2532 * GUP will fail for those. Yet if there is a pending migration
2533 * a thread might try to wait on the pte migration entry and
2534 * will bump the page reference count. Sadly there is no way to
2535 * differentiate a regular pin from migration wait. Hence to
2536 * avoid 2 racing thread trying to migrate back to CPU to enter
2537 * infinite loop (one stoping migration because the other is
2538 * waiting on pte migration entry). We always return true here.
2539 *
2540 * FIXME proper solution is to rework migration_entry_wait() so
2541 * it does not need to take a reference on page.
2542 */
2543 return is_device_private_page(page);
2544 }
2545
2546 /* For file back page */
2547 if (page_mapping(page))
2548 extra += 1 + page_has_private(page);
2549
2550 if ((page_count(page) - extra) > page_mapcount(page))
2551 return false;
2552
2553 return true;
2554}
2555
2556/*
2557 * migrate_vma_prepare() - lock pages and isolate them from the lru
2558 * @migrate: migrate struct containing all migration information
2559 *
2560 * This locks pages that have been collected by migrate_vma_collect(). Once each
2561 * page is locked it is isolated from the lru (for non-device pages). Finally,
2562 * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2563 * migrated by concurrent kernel threads.
2564 */
2565static void migrate_vma_prepare(struct migrate_vma *migrate)
2566{
2567 const unsigned long npages = migrate->npages;
2568 const unsigned long start = migrate->start;
2569 unsigned long addr, i, restore = 0;
2570 bool allow_drain = true;
2571
2572 lru_add_drain();
2573
2574 for (i = 0; (i < npages) && migrate->cpages; i++) {
2575 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2576 bool remap = true;
2577
2578 if (!page)
2579 continue;
2580
2581 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2582 /*
2583 * Because we are migrating several pages there can be
2584 * a deadlock between 2 concurrent migration where each
2585 * are waiting on each other page lock.
2586 *
2587 * Make migrate_vma() a best effort thing and backoff
2588 * for any page we can not lock right away.
2589 */
2590 if (!trylock_page(page)) {
2591 migrate->src[i] = 0;
2592 migrate->cpages--;
2593 put_page(page);
2594 continue;
2595 }
2596 remap = false;
2597 migrate->src[i] |= MIGRATE_PFN_LOCKED;
2598 }
2599
2600 /* ZONE_DEVICE pages are not on LRU */
2601 if (!is_zone_device_page(page)) {
2602 if (!PageLRU(page) && allow_drain) {
2603 /* Drain CPU's pagevec */
2604 lru_add_drain_all();
2605 allow_drain = false;
2606 }
2607
2608 if (isolate_lru_page(page)) {
2609 if (remap) {
2610 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2611 migrate->cpages--;
2612 restore++;
2613 } else {
2614 migrate->src[i] = 0;
2615 unlock_page(page);
2616 migrate->cpages--;
2617 put_page(page);
2618 }
2619 continue;
2620 }
2621
2622 /* Drop the reference we took in collect */
2623 put_page(page);
2624 }
2625
2626 if (!migrate_vma_check_page(page)) {
2627 if (remap) {
2628 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2629 migrate->cpages--;
2630 restore++;
2631
2632 if (!is_zone_device_page(page)) {
2633 get_page(page);
2634 putback_lru_page(page);
2635 }
2636 } else {
2637 migrate->src[i] = 0;
2638 unlock_page(page);
2639 migrate->cpages--;
2640
2641 if (!is_zone_device_page(page))
2642 putback_lru_page(page);
2643 else
2644 put_page(page);
2645 }
2646 }
2647 }
2648
2649 for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2650 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2651
2652 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2653 continue;
2654
2655 remove_migration_pte(page, migrate->vma, addr, page);
2656
2657 migrate->src[i] = 0;
2658 unlock_page(page);
2659 put_page(page);
2660 restore--;
2661 }
2662}
2663
2664/*
2665 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2666 * @migrate: migrate struct containing all migration information
2667 *
2668 * Replace page mapping (CPU page table pte) with a special migration pte entry
2669 * and check again if it has been pinned. Pinned pages are restored because we
2670 * cannot migrate them.
2671 *
2672 * This is the last step before we call the device driver callback to allocate
2673 * destination memory and copy contents of original page over to new page.
2674 */
2675static void migrate_vma_unmap(struct migrate_vma *migrate)
2676{
2677 int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2678 const unsigned long npages = migrate->npages;
2679 const unsigned long start = migrate->start;
2680 unsigned long addr, i, restore = 0;
2681
2682 for (i = 0; i < npages; i++) {
2683 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2684
2685 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2686 continue;
2687
2688 if (page_mapped(page)) {
2689 try_to_unmap(page, flags);
2690 if (page_mapped(page))
2691 goto restore;
2692 }
2693
2694 if (migrate_vma_check_page(page))
2695 continue;
2696
2697restore:
2698 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2699 migrate->cpages--;
2700 restore++;
2701 }
2702
2703 for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2704 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2705
2706 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2707 continue;
2708
2709 remove_migration_ptes(page, page, false);
2710
2711 migrate->src[i] = 0;
2712 unlock_page(page);
2713 restore--;
2714
2715 if (is_zone_device_page(page))
2716 put_page(page);
2717 else
2718 putback_lru_page(page);
2719 }
2720}
2721
2722/**
2723 * migrate_vma_setup() - prepare to migrate a range of memory
2724 * @args: contains the vma, start, and pfns arrays for the migration
2725 *
2726 * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2727 * without an error.
2728 *
2729 * Prepare to migrate a range of memory virtual address range by collecting all
2730 * the pages backing each virtual address in the range, saving them inside the
2731 * src array. Then lock those pages and unmap them. Once the pages are locked
2732 * and unmapped, check whether each page is pinned or not. Pages that aren't
2733 * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2734 * corresponding src array entry. Then restores any pages that are pinned, by
2735 * remapping and unlocking those pages.
2736 *
2737 * The caller should then allocate destination memory and copy source memory to
2738 * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2739 * flag set). Once these are allocated and copied, the caller must update each
2740 * corresponding entry in the dst array with the pfn value of the destination
2741 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2742 * (destination pages must have their struct pages locked, via lock_page()).
2743 *
2744 * Note that the caller does not have to migrate all the pages that are marked
2745 * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2746 * device memory to system memory. If the caller cannot migrate a device page
2747 * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2748 * consequences for the userspace process, so it must be avoided if at all
2749 * possible.
2750 *
2751 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2752 * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2753 * allowing the caller to allocate device memory for those unback virtual
2754 * address. For this the caller simply has to allocate device memory and
2755 * properly set the destination entry like for regular migration. Note that
2756 * this can still fails and thus inside the device driver must check if the
2757 * migration was successful for those entries after calling migrate_vma_pages()
2758 * just like for regular migration.
2759 *
2760 * After that, the callers must call migrate_vma_pages() to go over each entry
2761 * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2762 * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2763 * then migrate_vma_pages() to migrate struct page information from the source
2764 * struct page to the destination struct page. If it fails to migrate the
2765 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2766 * src array.
2767 *
2768 * At this point all successfully migrated pages have an entry in the src
2769 * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2770 * array entry with MIGRATE_PFN_VALID flag set.
2771 *
2772 * Once migrate_vma_pages() returns the caller may inspect which pages were
2773 * successfully migrated, and which were not. Successfully migrated pages will
2774 * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2775 *
2776 * It is safe to update device page table after migrate_vma_pages() because
2777 * both destination and source page are still locked, and the mmap_lock is held
2778 * in read mode (hence no one can unmap the range being migrated).
2779 *
2780 * Once the caller is done cleaning up things and updating its page table (if it
2781 * chose to do so, this is not an obligation) it finally calls
2782 * migrate_vma_finalize() to update the CPU page table to point to new pages
2783 * for successfully migrated pages or otherwise restore the CPU page table to
2784 * point to the original source pages.
2785 */
2786int migrate_vma_setup(struct migrate_vma *args)
2787{
2788 long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2789
2790 args->start &= PAGE_MASK;
2791 args->end &= PAGE_MASK;
2792 if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2793 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2794 return -EINVAL;
2795 if (nr_pages <= 0)
2796 return -EINVAL;
2797 if (args->start < args->vma->vm_start ||
2798 args->start >= args->vma->vm_end)
2799 return -EINVAL;
2800 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2801 return -EINVAL;
2802 if (!args->src || !args->dst)
2803 return -EINVAL;
2804
2805 memset(args->src, 0, sizeof(*args->src) * nr_pages);
2806 args->cpages = 0;
2807 args->npages = 0;
2808
2809 migrate_vma_collect(args);
2810
2811 if (args->cpages)
2812 migrate_vma_prepare(args);
2813 if (args->cpages)
2814 migrate_vma_unmap(args);
2815
2816 /*
2817 * At this point pages are locked and unmapped, and thus they have
2818 * stable content and can safely be copied to destination memory that
2819 * is allocated by the drivers.
2820 */
2821 return 0;
2822
2823}
2824EXPORT_SYMBOL(migrate_vma_setup);
2825
2826/*
2827 * This code closely matches the code in:
2828 * __handle_mm_fault()
2829 * handle_pte_fault()
2830 * do_anonymous_page()
2831 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2832 * private page.
2833 */
2834static void migrate_vma_insert_page(struct migrate_vma *migrate,
2835 unsigned long addr,
2836 struct page *page,
2837 unsigned long *src,
2838 unsigned long *dst)
2839{
2840 struct vm_area_struct *vma = migrate->vma;
2841 struct mm_struct *mm = vma->vm_mm;
2842 bool flush = false;
2843 spinlock_t *ptl;
2844 pte_t entry;
2845 pgd_t *pgdp;
2846 p4d_t *p4dp;
2847 pud_t *pudp;
2848 pmd_t *pmdp;
2849 pte_t *ptep;
2850
2851 /* Only allow populating anonymous memory */
2852 if (!vma_is_anonymous(vma))
2853 goto abort;
2854
2855 pgdp = pgd_offset(mm, addr);
2856 p4dp = p4d_alloc(mm, pgdp, addr);
2857 if (!p4dp)
2858 goto abort;
2859 pudp = pud_alloc(mm, p4dp, addr);
2860 if (!pudp)
2861 goto abort;
2862 pmdp = pmd_alloc(mm, pudp, addr);
2863 if (!pmdp)
2864 goto abort;
2865
2866 if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2867 goto abort;
2868
2869 /*
2870 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2871 * pte_offset_map() on pmds where a huge pmd might be created
2872 * from a different thread.
2873 *
2874 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2875 * parallel threads are excluded by other means.
2876 *
2877 * Here we only have mmap_read_lock(mm).
2878 */
2879 if (pte_alloc(mm, pmdp))
2880 goto abort;
2881
2882 /* See the comment in pte_alloc_one_map() */
2883 if (unlikely(pmd_trans_unstable(pmdp)))
2884 goto abort;
2885
2886 if (unlikely(anon_vma_prepare(vma)))
2887 goto abort;
2888 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2889 goto abort;
2890
2891 /*
2892 * The memory barrier inside __SetPageUptodate makes sure that
2893 * preceding stores to the page contents become visible before
2894 * the set_pte_at() write.
2895 */
2896 __SetPageUptodate(page);
2897
2898 if (is_zone_device_page(page)) {
2899 if (is_device_private_page(page)) {
2900 swp_entry_t swp_entry;
2901
2902 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2903 entry = swp_entry_to_pte(swp_entry);
2904 }
2905 } else {
2906 entry = mk_pte(page, vma->vm_page_prot);
2907 if (vma->vm_flags & VM_WRITE)
2908 entry = pte_mkwrite(pte_mkdirty(entry));
2909 }
2910
2911 ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2912
2913 if (check_stable_address_space(mm))
2914 goto unlock_abort;
2915
2916 if (pte_present(*ptep)) {
2917 unsigned long pfn = pte_pfn(*ptep);
2918
2919 if (!is_zero_pfn(pfn))
2920 goto unlock_abort;
2921 flush = true;
2922 } else if (!pte_none(*ptep))
2923 goto unlock_abort;
2924
2925 /*
2926 * Check for userfaultfd but do not deliver the fault. Instead,
2927 * just back off.
2928 */
2929 if (userfaultfd_missing(vma))
2930 goto unlock_abort;
2931
2932 inc_mm_counter(mm, MM_ANONPAGES);
2933 page_add_new_anon_rmap(page, vma, addr, false);
2934 if (!is_zone_device_page(page))
2935 lru_cache_add_inactive_or_unevictable(page, vma);
2936 get_page(page);
2937
2938 if (flush) {
2939 flush_cache_page(vma, addr, pte_pfn(*ptep));
2940 ptep_clear_flush_notify(vma, addr, ptep);
2941 set_pte_at_notify(mm, addr, ptep, entry);
2942 update_mmu_cache(vma, addr, ptep);
2943 } else {
2944 /* No need to invalidate - it was non-present before */
2945 set_pte_at(mm, addr, ptep, entry);
2946 update_mmu_cache(vma, addr, ptep);
2947 }
2948
2949 pte_unmap_unlock(ptep, ptl);
2950 *src = MIGRATE_PFN_MIGRATE;
2951 return;
2952
2953unlock_abort:
2954 pte_unmap_unlock(ptep, ptl);
2955abort:
2956 *src &= ~MIGRATE_PFN_MIGRATE;
2957}
2958
2959/**
2960 * migrate_vma_pages() - migrate meta-data from src page to dst page
2961 * @migrate: migrate struct containing all migration information
2962 *
2963 * This migrates struct page meta-data from source struct page to destination
2964 * struct page. This effectively finishes the migration from source page to the
2965 * destination page.
2966 */
2967void migrate_vma_pages(struct migrate_vma *migrate)
2968{
2969 const unsigned long npages = migrate->npages;
2970 const unsigned long start = migrate->start;
2971 struct mmu_notifier_range range;
2972 unsigned long addr, i;
2973 bool notified = false;
2974
2975 for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2976 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2977 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2978 struct address_space *mapping;
2979 int r;
2980
2981 if (!newpage) {
2982 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2983 continue;
2984 }
2985
2986 if (!page) {
2987 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2988 continue;
2989 if (!notified) {
2990 notified = true;
2991
2992 mmu_notifier_range_init(&range,
2993 MMU_NOTIFY_CLEAR, 0,
2994 NULL,
2995 migrate->vma->vm_mm,
2996 addr, migrate->end);
2997 mmu_notifier_invalidate_range_start(&range);
2998 }
2999 migrate_vma_insert_page(migrate, addr, newpage,
3000 &migrate->src[i],
3001 &migrate->dst[i]);
3002 continue;
3003 }
3004
3005 mapping = page_mapping(page);
3006
3007 if (is_zone_device_page(newpage)) {
3008 if (is_device_private_page(newpage)) {
3009 /*
3010 * For now only support private anonymous when
3011 * migrating to un-addressable device memory.
3012 */
3013 if (mapping) {
3014 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3015 continue;
3016 }
3017 } else {
3018 /*
3019 * Other types of ZONE_DEVICE page are not
3020 * supported.
3021 */
3022 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3023 continue;
3024 }
3025 }
3026
3027 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
3028 if (r != MIGRATEPAGE_SUCCESS)
3029 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3030 }
3031
3032 /*
3033 * No need to double call mmu_notifier->invalidate_range() callback as
3034 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
3035 * did already call it.
3036 */
3037 if (notified)
3038 mmu_notifier_invalidate_range_only_end(&range);
3039}
3040EXPORT_SYMBOL(migrate_vma_pages);
3041
3042/**
3043 * migrate_vma_finalize() - restore CPU page table entry
3044 * @migrate: migrate struct containing all migration information
3045 *
3046 * This replaces the special migration pte entry with either a mapping to the
3047 * new page if migration was successful for that page, or to the original page
3048 * otherwise.
3049 *
3050 * This also unlocks the pages and puts them back on the lru, or drops the extra
3051 * refcount, for device pages.
3052 */
3053void migrate_vma_finalize(struct migrate_vma *migrate)
3054{
3055 const unsigned long npages = migrate->npages;
3056 unsigned long i;
3057
3058 for (i = 0; i < npages; i++) {
3059 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3060 struct page *page = migrate_pfn_to_page(migrate->src[i]);
3061
3062 if (!page) {
3063 if (newpage) {
3064 unlock_page(newpage);
3065 put_page(newpage);
3066 }
3067 continue;
3068 }
3069
3070 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
3071 if (newpage) {
3072 unlock_page(newpage);
3073 put_page(newpage);
3074 }
3075 newpage = page;
3076 }
3077
3078 remove_migration_ptes(page, newpage, false);
3079 unlock_page(page);
3080 migrate->cpages--;
3081
3082 if (is_zone_device_page(page))
3083 put_page(page);
3084 else
3085 putback_lru_page(page);
3086
3087 if (newpage != page) {
3088 unlock_page(newpage);
3089 if (is_zone_device_page(newpage))
3090 put_page(newpage);
3091 else
3092 putback_lru_page(newpage);
3093 }
3094 }
3095}
3096EXPORT_SYMBOL(migrate_vma_finalize);
3097#endif /* CONFIG_DEVICE_PRIVATE */
1/*
2 * Memory Migration functionality - linux/mm/migration.c
3 *
4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
5 *
6 * Page migration was first developed in the context of the memory hotplug
7 * project. The main authors of the migration code are:
8 *
9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
10 * Hirokazu Takahashi <taka@valinux.co.jp>
11 * Dave Hansen <haveblue@us.ibm.com>
12 * Christoph Lameter
13 */
14
15#include <linux/migrate.h>
16#include <linux/export.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/memcontrol.h>
34#include <linux/syscalls.h>
35#include <linux/hugetlb.h>
36#include <linux/gfp.h>
37
38#include <asm/tlbflush.h>
39
40#include "internal.h"
41
42/*
43 * migrate_prep() needs to be called before we start compiling a list of pages
44 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
45 * undesirable, use migrate_prep_local()
46 */
47int migrate_prep(void)
48{
49 /*
50 * Clear the LRU lists so pages can be isolated.
51 * Note that pages may be moved off the LRU after we have
52 * drained them. Those pages will fail to migrate like other
53 * pages that may be busy.
54 */
55 lru_add_drain_all();
56
57 return 0;
58}
59
60/* Do the necessary work of migrate_prep but not if it involves other CPUs */
61int migrate_prep_local(void)
62{
63 lru_add_drain();
64
65 return 0;
66}
67
68/*
69 * Add isolated pages on the list back to the LRU under page lock
70 * to avoid leaking evictable pages back onto unevictable list.
71 */
72void putback_lru_pages(struct list_head *l)
73{
74 struct page *page;
75 struct page *page2;
76
77 list_for_each_entry_safe(page, page2, l, lru) {
78 list_del(&page->lru);
79 dec_zone_page_state(page, NR_ISOLATED_ANON +
80 page_is_file_cache(page));
81 putback_lru_page(page);
82 }
83}
84
85/*
86 * Restore a potential migration pte to a working pte entry
87 */
88static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
89 unsigned long addr, void *old)
90{
91 struct mm_struct *mm = vma->vm_mm;
92 swp_entry_t entry;
93 pgd_t *pgd;
94 pud_t *pud;
95 pmd_t *pmd;
96 pte_t *ptep, pte;
97 spinlock_t *ptl;
98
99 if (unlikely(PageHuge(new))) {
100 ptep = huge_pte_offset(mm, addr);
101 if (!ptep)
102 goto out;
103 ptl = &mm->page_table_lock;
104 } else {
105 pgd = pgd_offset(mm, addr);
106 if (!pgd_present(*pgd))
107 goto out;
108
109 pud = pud_offset(pgd, addr);
110 if (!pud_present(*pud))
111 goto out;
112
113 pmd = pmd_offset(pud, addr);
114 if (pmd_trans_huge(*pmd))
115 goto out;
116 if (!pmd_present(*pmd))
117 goto out;
118
119 ptep = pte_offset_map(pmd, addr);
120
121 /*
122 * Peek to check is_swap_pte() before taking ptlock? No, we
123 * can race mremap's move_ptes(), which skips anon_vma lock.
124 */
125
126 ptl = pte_lockptr(mm, pmd);
127 }
128
129 spin_lock(ptl);
130 pte = *ptep;
131 if (!is_swap_pte(pte))
132 goto unlock;
133
134 entry = pte_to_swp_entry(pte);
135
136 if (!is_migration_entry(entry) ||
137 migration_entry_to_page(entry) != old)
138 goto unlock;
139
140 get_page(new);
141 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
142 if (is_write_migration_entry(entry))
143 pte = pte_mkwrite(pte);
144#ifdef CONFIG_HUGETLB_PAGE
145 if (PageHuge(new))
146 pte = pte_mkhuge(pte);
147#endif
148 flush_cache_page(vma, addr, pte_pfn(pte));
149 set_pte_at(mm, addr, ptep, pte);
150
151 if (PageHuge(new)) {
152 if (PageAnon(new))
153 hugepage_add_anon_rmap(new, vma, addr);
154 else
155 page_dup_rmap(new);
156 } else if (PageAnon(new))
157 page_add_anon_rmap(new, vma, addr);
158 else
159 page_add_file_rmap(new);
160
161 /* No need to invalidate - it was non-present before */
162 update_mmu_cache(vma, addr, ptep);
163unlock:
164 pte_unmap_unlock(ptep, ptl);
165out:
166 return SWAP_AGAIN;
167}
168
169/*
170 * Get rid of all migration entries and replace them by
171 * references to the indicated page.
172 */
173static void remove_migration_ptes(struct page *old, struct page *new)
174{
175 rmap_walk(new, remove_migration_pte, old);
176}
177
178/*
179 * Something used the pte of a page under migration. We need to
180 * get to the page and wait until migration is finished.
181 * When we return from this function the fault will be retried.
182 */
183void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
184 unsigned long address)
185{
186 pte_t *ptep, pte;
187 spinlock_t *ptl;
188 swp_entry_t entry;
189 struct page *page;
190
191 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
192 pte = *ptep;
193 if (!is_swap_pte(pte))
194 goto out;
195
196 entry = pte_to_swp_entry(pte);
197 if (!is_migration_entry(entry))
198 goto out;
199
200 page = migration_entry_to_page(entry);
201
202 /*
203 * Once radix-tree replacement of page migration started, page_count
204 * *must* be zero. And, we don't want to call wait_on_page_locked()
205 * against a page without get_page().
206 * So, we use get_page_unless_zero(), here. Even failed, page fault
207 * will occur again.
208 */
209 if (!get_page_unless_zero(page))
210 goto out;
211 pte_unmap_unlock(ptep, ptl);
212 wait_on_page_locked(page);
213 put_page(page);
214 return;
215out:
216 pte_unmap_unlock(ptep, ptl);
217}
218
219#ifdef CONFIG_BLOCK
220/* Returns true if all buffers are successfully locked */
221static bool buffer_migrate_lock_buffers(struct buffer_head *head,
222 enum migrate_mode mode)
223{
224 struct buffer_head *bh = head;
225
226 /* Simple case, sync compaction */
227 if (mode != MIGRATE_ASYNC) {
228 do {
229 get_bh(bh);
230 lock_buffer(bh);
231 bh = bh->b_this_page;
232
233 } while (bh != head);
234
235 return true;
236 }
237
238 /* async case, we cannot block on lock_buffer so use trylock_buffer */
239 do {
240 get_bh(bh);
241 if (!trylock_buffer(bh)) {
242 /*
243 * We failed to lock the buffer and cannot stall in
244 * async migration. Release the taken locks
245 */
246 struct buffer_head *failed_bh = bh;
247 put_bh(failed_bh);
248 bh = head;
249 while (bh != failed_bh) {
250 unlock_buffer(bh);
251 put_bh(bh);
252 bh = bh->b_this_page;
253 }
254 return false;
255 }
256
257 bh = bh->b_this_page;
258 } while (bh != head);
259 return true;
260}
261#else
262static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
263 enum migrate_mode mode)
264{
265 return true;
266}
267#endif /* CONFIG_BLOCK */
268
269/*
270 * Replace the page in the mapping.
271 *
272 * The number of remaining references must be:
273 * 1 for anonymous pages without a mapping
274 * 2 for pages with a mapping
275 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
276 */
277static int migrate_page_move_mapping(struct address_space *mapping,
278 struct page *newpage, struct page *page,
279 struct buffer_head *head, enum migrate_mode mode)
280{
281 int expected_count;
282 void **pslot;
283
284 if (!mapping) {
285 /* Anonymous page without mapping */
286 if (page_count(page) != 1)
287 return -EAGAIN;
288 return 0;
289 }
290
291 spin_lock_irq(&mapping->tree_lock);
292
293 pslot = radix_tree_lookup_slot(&mapping->page_tree,
294 page_index(page));
295
296 expected_count = 2 + page_has_private(page);
297 if (page_count(page) != expected_count ||
298 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
299 spin_unlock_irq(&mapping->tree_lock);
300 return -EAGAIN;
301 }
302
303 if (!page_freeze_refs(page, expected_count)) {
304 spin_unlock_irq(&mapping->tree_lock);
305 return -EAGAIN;
306 }
307
308 /*
309 * In the async migration case of moving a page with buffers, lock the
310 * buffers using trylock before the mapping is moved. If the mapping
311 * was moved, we later failed to lock the buffers and could not move
312 * the mapping back due to an elevated page count, we would have to
313 * block waiting on other references to be dropped.
314 */
315 if (mode == MIGRATE_ASYNC && head &&
316 !buffer_migrate_lock_buffers(head, mode)) {
317 page_unfreeze_refs(page, expected_count);
318 spin_unlock_irq(&mapping->tree_lock);
319 return -EAGAIN;
320 }
321
322 /*
323 * Now we know that no one else is looking at the page.
324 */
325 get_page(newpage); /* add cache reference */
326 if (PageSwapCache(page)) {
327 SetPageSwapCache(newpage);
328 set_page_private(newpage, page_private(page));
329 }
330
331 radix_tree_replace_slot(pslot, newpage);
332
333 /*
334 * Drop cache reference from old page by unfreezing
335 * to one less reference.
336 * We know this isn't the last reference.
337 */
338 page_unfreeze_refs(page, expected_count - 1);
339
340 /*
341 * If moved to a different zone then also account
342 * the page for that zone. Other VM counters will be
343 * taken care of when we establish references to the
344 * new page and drop references to the old page.
345 *
346 * Note that anonymous pages are accounted for
347 * via NR_FILE_PAGES and NR_ANON_PAGES if they
348 * are mapped to swap space.
349 */
350 __dec_zone_page_state(page, NR_FILE_PAGES);
351 __inc_zone_page_state(newpage, NR_FILE_PAGES);
352 if (!PageSwapCache(page) && PageSwapBacked(page)) {
353 __dec_zone_page_state(page, NR_SHMEM);
354 __inc_zone_page_state(newpage, NR_SHMEM);
355 }
356 spin_unlock_irq(&mapping->tree_lock);
357
358 return 0;
359}
360
361/*
362 * The expected number of remaining references is the same as that
363 * of migrate_page_move_mapping().
364 */
365int migrate_huge_page_move_mapping(struct address_space *mapping,
366 struct page *newpage, struct page *page)
367{
368 int expected_count;
369 void **pslot;
370
371 if (!mapping) {
372 if (page_count(page) != 1)
373 return -EAGAIN;
374 return 0;
375 }
376
377 spin_lock_irq(&mapping->tree_lock);
378
379 pslot = radix_tree_lookup_slot(&mapping->page_tree,
380 page_index(page));
381
382 expected_count = 2 + page_has_private(page);
383 if (page_count(page) != expected_count ||
384 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
385 spin_unlock_irq(&mapping->tree_lock);
386 return -EAGAIN;
387 }
388
389 if (!page_freeze_refs(page, expected_count)) {
390 spin_unlock_irq(&mapping->tree_lock);
391 return -EAGAIN;
392 }
393
394 get_page(newpage);
395
396 radix_tree_replace_slot(pslot, newpage);
397
398 page_unfreeze_refs(page, expected_count - 1);
399
400 spin_unlock_irq(&mapping->tree_lock);
401 return 0;
402}
403
404/*
405 * Copy the page to its new location
406 */
407void migrate_page_copy(struct page *newpage, struct page *page)
408{
409 if (PageHuge(page))
410 copy_huge_page(newpage, page);
411 else
412 copy_highpage(newpage, page);
413
414 if (PageError(page))
415 SetPageError(newpage);
416 if (PageReferenced(page))
417 SetPageReferenced(newpage);
418 if (PageUptodate(page))
419 SetPageUptodate(newpage);
420 if (TestClearPageActive(page)) {
421 VM_BUG_ON(PageUnevictable(page));
422 SetPageActive(newpage);
423 } else if (TestClearPageUnevictable(page))
424 SetPageUnevictable(newpage);
425 if (PageChecked(page))
426 SetPageChecked(newpage);
427 if (PageMappedToDisk(page))
428 SetPageMappedToDisk(newpage);
429
430 if (PageDirty(page)) {
431 clear_page_dirty_for_io(page);
432 /*
433 * Want to mark the page and the radix tree as dirty, and
434 * redo the accounting that clear_page_dirty_for_io undid,
435 * but we can't use set_page_dirty because that function
436 * is actually a signal that all of the page has become dirty.
437 * Whereas only part of our page may be dirty.
438 */
439 if (PageSwapBacked(page))
440 SetPageDirty(newpage);
441 else
442 __set_page_dirty_nobuffers(newpage);
443 }
444
445 mlock_migrate_page(newpage, page);
446 ksm_migrate_page(newpage, page);
447
448 ClearPageSwapCache(page);
449 ClearPagePrivate(page);
450 set_page_private(page, 0);
451
452 /*
453 * If any waiters have accumulated on the new page then
454 * wake them up.
455 */
456 if (PageWriteback(newpage))
457 end_page_writeback(newpage);
458}
459
460/************************************************************
461 * Migration functions
462 ***********************************************************/
463
464/* Always fail migration. Used for mappings that are not movable */
465int fail_migrate_page(struct address_space *mapping,
466 struct page *newpage, struct page *page)
467{
468 return -EIO;
469}
470EXPORT_SYMBOL(fail_migrate_page);
471
472/*
473 * Common logic to directly migrate a single page suitable for
474 * pages that do not use PagePrivate/PagePrivate2.
475 *
476 * Pages are locked upon entry and exit.
477 */
478int migrate_page(struct address_space *mapping,
479 struct page *newpage, struct page *page,
480 enum migrate_mode mode)
481{
482 int rc;
483
484 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
485
486 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
487
488 if (rc)
489 return rc;
490
491 migrate_page_copy(newpage, page);
492 return 0;
493}
494EXPORT_SYMBOL(migrate_page);
495
496#ifdef CONFIG_BLOCK
497/*
498 * Migration function for pages with buffers. This function can only be used
499 * if the underlying filesystem guarantees that no other references to "page"
500 * exist.
501 */
502int buffer_migrate_page(struct address_space *mapping,
503 struct page *newpage, struct page *page, enum migrate_mode mode)
504{
505 struct buffer_head *bh, *head;
506 int rc;
507
508 if (!page_has_buffers(page))
509 return migrate_page(mapping, newpage, page, mode);
510
511 head = page_buffers(page);
512
513 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
514
515 if (rc)
516 return rc;
517
518 /*
519 * In the async case, migrate_page_move_mapping locked the buffers
520 * with an IRQ-safe spinlock held. In the sync case, the buffers
521 * need to be locked now
522 */
523 if (mode != MIGRATE_ASYNC)
524 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
525
526 ClearPagePrivate(page);
527 set_page_private(newpage, page_private(page));
528 set_page_private(page, 0);
529 put_page(page);
530 get_page(newpage);
531
532 bh = head;
533 do {
534 set_bh_page(bh, newpage, bh_offset(bh));
535 bh = bh->b_this_page;
536
537 } while (bh != head);
538
539 SetPagePrivate(newpage);
540
541 migrate_page_copy(newpage, page);
542
543 bh = head;
544 do {
545 unlock_buffer(bh);
546 put_bh(bh);
547 bh = bh->b_this_page;
548
549 } while (bh != head);
550
551 return 0;
552}
553EXPORT_SYMBOL(buffer_migrate_page);
554#endif
555
556/*
557 * Writeback a page to clean the dirty state
558 */
559static int writeout(struct address_space *mapping, struct page *page)
560{
561 struct writeback_control wbc = {
562 .sync_mode = WB_SYNC_NONE,
563 .nr_to_write = 1,
564 .range_start = 0,
565 .range_end = LLONG_MAX,
566 .for_reclaim = 1
567 };
568 int rc;
569
570 if (!mapping->a_ops->writepage)
571 /* No write method for the address space */
572 return -EINVAL;
573
574 if (!clear_page_dirty_for_io(page))
575 /* Someone else already triggered a write */
576 return -EAGAIN;
577
578 /*
579 * A dirty page may imply that the underlying filesystem has
580 * the page on some queue. So the page must be clean for
581 * migration. Writeout may mean we loose the lock and the
582 * page state is no longer what we checked for earlier.
583 * At this point we know that the migration attempt cannot
584 * be successful.
585 */
586 remove_migration_ptes(page, page);
587
588 rc = mapping->a_ops->writepage(page, &wbc);
589
590 if (rc != AOP_WRITEPAGE_ACTIVATE)
591 /* unlocked. Relock */
592 lock_page(page);
593
594 return (rc < 0) ? -EIO : -EAGAIN;
595}
596
597/*
598 * Default handling if a filesystem does not provide a migration function.
599 */
600static int fallback_migrate_page(struct address_space *mapping,
601 struct page *newpage, struct page *page, enum migrate_mode mode)
602{
603 if (PageDirty(page)) {
604 /* Only writeback pages in full synchronous migration */
605 if (mode != MIGRATE_SYNC)
606 return -EBUSY;
607 return writeout(mapping, page);
608 }
609
610 /*
611 * Buffers may be managed in a filesystem specific way.
612 * We must have no buffers or drop them.
613 */
614 if (page_has_private(page) &&
615 !try_to_release_page(page, GFP_KERNEL))
616 return -EAGAIN;
617
618 return migrate_page(mapping, newpage, page, mode);
619}
620
621/*
622 * Move a page to a newly allocated page
623 * The page is locked and all ptes have been successfully removed.
624 *
625 * The new page will have replaced the old page if this function
626 * is successful.
627 *
628 * Return value:
629 * < 0 - error code
630 * == 0 - success
631 */
632static int move_to_new_page(struct page *newpage, struct page *page,
633 int remap_swapcache, enum migrate_mode mode)
634{
635 struct address_space *mapping;
636 int rc;
637
638 /*
639 * Block others from accessing the page when we get around to
640 * establishing additional references. We are the only one
641 * holding a reference to the new page at this point.
642 */
643 if (!trylock_page(newpage))
644 BUG();
645
646 /* Prepare mapping for the new page.*/
647 newpage->index = page->index;
648 newpage->mapping = page->mapping;
649 if (PageSwapBacked(page))
650 SetPageSwapBacked(newpage);
651
652 mapping = page_mapping(page);
653 if (!mapping)
654 rc = migrate_page(mapping, newpage, page, mode);
655 else if (mapping->a_ops->migratepage)
656 /*
657 * Most pages have a mapping and most filesystems provide a
658 * migratepage callback. Anonymous pages are part of swap
659 * space which also has its own migratepage callback. This
660 * is the most common path for page migration.
661 */
662 rc = mapping->a_ops->migratepage(mapping,
663 newpage, page, mode);
664 else
665 rc = fallback_migrate_page(mapping, newpage, page, mode);
666
667 if (rc) {
668 newpage->mapping = NULL;
669 } else {
670 if (remap_swapcache)
671 remove_migration_ptes(page, newpage);
672 page->mapping = NULL;
673 }
674
675 unlock_page(newpage);
676
677 return rc;
678}
679
680static int __unmap_and_move(struct page *page, struct page *newpage,
681 int force, bool offlining, enum migrate_mode mode)
682{
683 int rc = -EAGAIN;
684 int remap_swapcache = 1;
685 int charge = 0;
686 struct mem_cgroup *mem;
687 struct anon_vma *anon_vma = NULL;
688
689 if (!trylock_page(page)) {
690 if (!force || mode == MIGRATE_ASYNC)
691 goto out;
692
693 /*
694 * It's not safe for direct compaction to call lock_page.
695 * For example, during page readahead pages are added locked
696 * to the LRU. Later, when the IO completes the pages are
697 * marked uptodate and unlocked. However, the queueing
698 * could be merging multiple pages for one bio (e.g.
699 * mpage_readpages). If an allocation happens for the
700 * second or third page, the process can end up locking
701 * the same page twice and deadlocking. Rather than
702 * trying to be clever about what pages can be locked,
703 * avoid the use of lock_page for direct compaction
704 * altogether.
705 */
706 if (current->flags & PF_MEMALLOC)
707 goto out;
708
709 lock_page(page);
710 }
711
712 /*
713 * Only memory hotplug's offline_pages() caller has locked out KSM,
714 * and can safely migrate a KSM page. The other cases have skipped
715 * PageKsm along with PageReserved - but it is only now when we have
716 * the page lock that we can be certain it will not go KSM beneath us
717 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
718 * its pagecount raised, but only here do we take the page lock which
719 * serializes that).
720 */
721 if (PageKsm(page) && !offlining) {
722 rc = -EBUSY;
723 goto unlock;
724 }
725
726 /* charge against new page */
727 charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
728 if (charge == -ENOMEM) {
729 rc = -ENOMEM;
730 goto unlock;
731 }
732 BUG_ON(charge);
733
734 if (PageWriteback(page)) {
735 /*
736 * Only in the case of a full syncronous migration is it
737 * necessary to wait for PageWriteback. In the async case,
738 * the retry loop is too short and in the sync-light case,
739 * the overhead of stalling is too much
740 */
741 if (mode != MIGRATE_SYNC) {
742 rc = -EBUSY;
743 goto uncharge;
744 }
745 if (!force)
746 goto uncharge;
747 wait_on_page_writeback(page);
748 }
749 /*
750 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
751 * we cannot notice that anon_vma is freed while we migrates a page.
752 * This get_anon_vma() delays freeing anon_vma pointer until the end
753 * of migration. File cache pages are no problem because of page_lock()
754 * File Caches may use write_page() or lock_page() in migration, then,
755 * just care Anon page here.
756 */
757 if (PageAnon(page)) {
758 /*
759 * Only page_lock_anon_vma() understands the subtleties of
760 * getting a hold on an anon_vma from outside one of its mms.
761 */
762 anon_vma = page_get_anon_vma(page);
763 if (anon_vma) {
764 /*
765 * Anon page
766 */
767 } else if (PageSwapCache(page)) {
768 /*
769 * We cannot be sure that the anon_vma of an unmapped
770 * swapcache page is safe to use because we don't
771 * know in advance if the VMA that this page belonged
772 * to still exists. If the VMA and others sharing the
773 * data have been freed, then the anon_vma could
774 * already be invalid.
775 *
776 * To avoid this possibility, swapcache pages get
777 * migrated but are not remapped when migration
778 * completes
779 */
780 remap_swapcache = 0;
781 } else {
782 goto uncharge;
783 }
784 }
785
786 /*
787 * Corner case handling:
788 * 1. When a new swap-cache page is read into, it is added to the LRU
789 * and treated as swapcache but it has no rmap yet.
790 * Calling try_to_unmap() against a page->mapping==NULL page will
791 * trigger a BUG. So handle it here.
792 * 2. An orphaned page (see truncate_complete_page) might have
793 * fs-private metadata. The page can be picked up due to memory
794 * offlining. Everywhere else except page reclaim, the page is
795 * invisible to the vm, so the page can not be migrated. So try to
796 * free the metadata, so the page can be freed.
797 */
798 if (!page->mapping) {
799 VM_BUG_ON(PageAnon(page));
800 if (page_has_private(page)) {
801 try_to_free_buffers(page);
802 goto uncharge;
803 }
804 goto skip_unmap;
805 }
806
807 /* Establish migration ptes or remove ptes */
808 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
809
810skip_unmap:
811 if (!page_mapped(page))
812 rc = move_to_new_page(newpage, page, remap_swapcache, mode);
813
814 if (rc && remap_swapcache)
815 remove_migration_ptes(page, page);
816
817 /* Drop an anon_vma reference if we took one */
818 if (anon_vma)
819 put_anon_vma(anon_vma);
820
821uncharge:
822 if (!charge)
823 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
824unlock:
825 unlock_page(page);
826out:
827 return rc;
828}
829
830/*
831 * Obtain the lock on page, remove all ptes and migrate the page
832 * to the newly allocated page in newpage.
833 */
834static int unmap_and_move(new_page_t get_new_page, unsigned long private,
835 struct page *page, int force, bool offlining,
836 enum migrate_mode mode)
837{
838 int rc = 0;
839 int *result = NULL;
840 struct page *newpage = get_new_page(page, private, &result);
841
842 if (!newpage)
843 return -ENOMEM;
844
845 if (page_count(page) == 1) {
846 /* page was freed from under us. So we are done. */
847 goto out;
848 }
849
850 if (unlikely(PageTransHuge(page)))
851 if (unlikely(split_huge_page(page)))
852 goto out;
853
854 rc = __unmap_and_move(page, newpage, force, offlining, mode);
855out:
856 if (rc != -EAGAIN) {
857 /*
858 * A page that has been migrated has all references
859 * removed and will be freed. A page that has not been
860 * migrated will have kepts its references and be
861 * restored.
862 */
863 list_del(&page->lru);
864 dec_zone_page_state(page, NR_ISOLATED_ANON +
865 page_is_file_cache(page));
866 putback_lru_page(page);
867 }
868 /*
869 * Move the new page to the LRU. If migration was not successful
870 * then this will free the page.
871 */
872 putback_lru_page(newpage);
873 if (result) {
874 if (rc)
875 *result = rc;
876 else
877 *result = page_to_nid(newpage);
878 }
879 return rc;
880}
881
882/*
883 * Counterpart of unmap_and_move_page() for hugepage migration.
884 *
885 * This function doesn't wait the completion of hugepage I/O
886 * because there is no race between I/O and migration for hugepage.
887 * Note that currently hugepage I/O occurs only in direct I/O
888 * where no lock is held and PG_writeback is irrelevant,
889 * and writeback status of all subpages are counted in the reference
890 * count of the head page (i.e. if all subpages of a 2MB hugepage are
891 * under direct I/O, the reference of the head page is 512 and a bit more.)
892 * This means that when we try to migrate hugepage whose subpages are
893 * doing direct I/O, some references remain after try_to_unmap() and
894 * hugepage migration fails without data corruption.
895 *
896 * There is also no race when direct I/O is issued on the page under migration,
897 * because then pte is replaced with migration swap entry and direct I/O code
898 * will wait in the page fault for migration to complete.
899 */
900static int unmap_and_move_huge_page(new_page_t get_new_page,
901 unsigned long private, struct page *hpage,
902 int force, bool offlining,
903 enum migrate_mode mode)
904{
905 int rc = 0;
906 int *result = NULL;
907 struct page *new_hpage = get_new_page(hpage, private, &result);
908 struct anon_vma *anon_vma = NULL;
909
910 if (!new_hpage)
911 return -ENOMEM;
912
913 rc = -EAGAIN;
914
915 if (!trylock_page(hpage)) {
916 if (!force || mode != MIGRATE_SYNC)
917 goto out;
918 lock_page(hpage);
919 }
920
921 if (PageAnon(hpage))
922 anon_vma = page_get_anon_vma(hpage);
923
924 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
925
926 if (!page_mapped(hpage))
927 rc = move_to_new_page(new_hpage, hpage, 1, mode);
928
929 if (rc)
930 remove_migration_ptes(hpage, hpage);
931
932 if (anon_vma)
933 put_anon_vma(anon_vma);
934 unlock_page(hpage);
935
936out:
937 if (rc != -EAGAIN) {
938 list_del(&hpage->lru);
939 put_page(hpage);
940 }
941
942 put_page(new_hpage);
943
944 if (result) {
945 if (rc)
946 *result = rc;
947 else
948 *result = page_to_nid(new_hpage);
949 }
950 return rc;
951}
952
953/*
954 * migrate_pages
955 *
956 * The function takes one list of pages to migrate and a function
957 * that determines from the page to be migrated and the private data
958 * the target of the move and allocates the page.
959 *
960 * The function returns after 10 attempts or if no pages
961 * are movable anymore because to has become empty
962 * or no retryable pages exist anymore.
963 * Caller should call putback_lru_pages to return pages to the LRU
964 * or free list only if ret != 0.
965 *
966 * Return: Number of pages not migrated or error code.
967 */
968int migrate_pages(struct list_head *from,
969 new_page_t get_new_page, unsigned long private, bool offlining,
970 enum migrate_mode mode)
971{
972 int retry = 1;
973 int nr_failed = 0;
974 int pass = 0;
975 struct page *page;
976 struct page *page2;
977 int swapwrite = current->flags & PF_SWAPWRITE;
978 int rc;
979
980 if (!swapwrite)
981 current->flags |= PF_SWAPWRITE;
982
983 for(pass = 0; pass < 10 && retry; pass++) {
984 retry = 0;
985
986 list_for_each_entry_safe(page, page2, from, lru) {
987 cond_resched();
988
989 rc = unmap_and_move(get_new_page, private,
990 page, pass > 2, offlining,
991 mode);
992
993 switch(rc) {
994 case -ENOMEM:
995 goto out;
996 case -EAGAIN:
997 retry++;
998 break;
999 case 0:
1000 break;
1001 default:
1002 /* Permanent failure */
1003 nr_failed++;
1004 break;
1005 }
1006 }
1007 }
1008 rc = 0;
1009out:
1010 if (!swapwrite)
1011 current->flags &= ~PF_SWAPWRITE;
1012
1013 if (rc)
1014 return rc;
1015
1016 return nr_failed + retry;
1017}
1018
1019int migrate_huge_pages(struct list_head *from,
1020 new_page_t get_new_page, unsigned long private, bool offlining,
1021 enum migrate_mode mode)
1022{
1023 int retry = 1;
1024 int nr_failed = 0;
1025 int pass = 0;
1026 struct page *page;
1027 struct page *page2;
1028 int rc;
1029
1030 for (pass = 0; pass < 10 && retry; pass++) {
1031 retry = 0;
1032
1033 list_for_each_entry_safe(page, page2, from, lru) {
1034 cond_resched();
1035
1036 rc = unmap_and_move_huge_page(get_new_page,
1037 private, page, pass > 2, offlining,
1038 mode);
1039
1040 switch(rc) {
1041 case -ENOMEM:
1042 goto out;
1043 case -EAGAIN:
1044 retry++;
1045 break;
1046 case 0:
1047 break;
1048 default:
1049 /* Permanent failure */
1050 nr_failed++;
1051 break;
1052 }
1053 }
1054 }
1055 rc = 0;
1056out:
1057 if (rc)
1058 return rc;
1059
1060 return nr_failed + retry;
1061}
1062
1063#ifdef CONFIG_NUMA
1064/*
1065 * Move a list of individual pages
1066 */
1067struct page_to_node {
1068 unsigned long addr;
1069 struct page *page;
1070 int node;
1071 int status;
1072};
1073
1074static struct page *new_page_node(struct page *p, unsigned long private,
1075 int **result)
1076{
1077 struct page_to_node *pm = (struct page_to_node *)private;
1078
1079 while (pm->node != MAX_NUMNODES && pm->page != p)
1080 pm++;
1081
1082 if (pm->node == MAX_NUMNODES)
1083 return NULL;
1084
1085 *result = &pm->status;
1086
1087 return alloc_pages_exact_node(pm->node,
1088 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1089}
1090
1091/*
1092 * Move a set of pages as indicated in the pm array. The addr
1093 * field must be set to the virtual address of the page to be moved
1094 * and the node number must contain a valid target node.
1095 * The pm array ends with node = MAX_NUMNODES.
1096 */
1097static int do_move_page_to_node_array(struct mm_struct *mm,
1098 struct page_to_node *pm,
1099 int migrate_all)
1100{
1101 int err;
1102 struct page_to_node *pp;
1103 LIST_HEAD(pagelist);
1104
1105 down_read(&mm->mmap_sem);
1106
1107 /*
1108 * Build a list of pages to migrate
1109 */
1110 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1111 struct vm_area_struct *vma;
1112 struct page *page;
1113
1114 err = -EFAULT;
1115 vma = find_vma(mm, pp->addr);
1116 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1117 goto set_status;
1118
1119 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1120
1121 err = PTR_ERR(page);
1122 if (IS_ERR(page))
1123 goto set_status;
1124
1125 err = -ENOENT;
1126 if (!page)
1127 goto set_status;
1128
1129 /* Use PageReserved to check for zero page */
1130 if (PageReserved(page) || PageKsm(page))
1131 goto put_and_set;
1132
1133 pp->page = page;
1134 err = page_to_nid(page);
1135
1136 if (err == pp->node)
1137 /*
1138 * Node already in the right place
1139 */
1140 goto put_and_set;
1141
1142 err = -EACCES;
1143 if (page_mapcount(page) > 1 &&
1144 !migrate_all)
1145 goto put_and_set;
1146
1147 err = isolate_lru_page(page);
1148 if (!err) {
1149 list_add_tail(&page->lru, &pagelist);
1150 inc_zone_page_state(page, NR_ISOLATED_ANON +
1151 page_is_file_cache(page));
1152 }
1153put_and_set:
1154 /*
1155 * Either remove the duplicate refcount from
1156 * isolate_lru_page() or drop the page ref if it was
1157 * not isolated.
1158 */
1159 put_page(page);
1160set_status:
1161 pp->status = err;
1162 }
1163
1164 err = 0;
1165 if (!list_empty(&pagelist)) {
1166 err = migrate_pages(&pagelist, new_page_node,
1167 (unsigned long)pm, 0, MIGRATE_SYNC);
1168 if (err)
1169 putback_lru_pages(&pagelist);
1170 }
1171
1172 up_read(&mm->mmap_sem);
1173 return err;
1174}
1175
1176/*
1177 * Migrate an array of page address onto an array of nodes and fill
1178 * the corresponding array of status.
1179 */
1180static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1181 unsigned long nr_pages,
1182 const void __user * __user *pages,
1183 const int __user *nodes,
1184 int __user *status, int flags)
1185{
1186 struct page_to_node *pm;
1187 unsigned long chunk_nr_pages;
1188 unsigned long chunk_start;
1189 int err;
1190
1191 err = -ENOMEM;
1192 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1193 if (!pm)
1194 goto out;
1195
1196 migrate_prep();
1197
1198 /*
1199 * Store a chunk of page_to_node array in a page,
1200 * but keep the last one as a marker
1201 */
1202 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1203
1204 for (chunk_start = 0;
1205 chunk_start < nr_pages;
1206 chunk_start += chunk_nr_pages) {
1207 int j;
1208
1209 if (chunk_start + chunk_nr_pages > nr_pages)
1210 chunk_nr_pages = nr_pages - chunk_start;
1211
1212 /* fill the chunk pm with addrs and nodes from user-space */
1213 for (j = 0; j < chunk_nr_pages; j++) {
1214 const void __user *p;
1215 int node;
1216
1217 err = -EFAULT;
1218 if (get_user(p, pages + j + chunk_start))
1219 goto out_pm;
1220 pm[j].addr = (unsigned long) p;
1221
1222 if (get_user(node, nodes + j + chunk_start))
1223 goto out_pm;
1224
1225 err = -ENODEV;
1226 if (node < 0 || node >= MAX_NUMNODES)
1227 goto out_pm;
1228
1229 if (!node_state(node, N_HIGH_MEMORY))
1230 goto out_pm;
1231
1232 err = -EACCES;
1233 if (!node_isset(node, task_nodes))
1234 goto out_pm;
1235
1236 pm[j].node = node;
1237 }
1238
1239 /* End marker for this chunk */
1240 pm[chunk_nr_pages].node = MAX_NUMNODES;
1241
1242 /* Migrate this chunk */
1243 err = do_move_page_to_node_array(mm, pm,
1244 flags & MPOL_MF_MOVE_ALL);
1245 if (err < 0)
1246 goto out_pm;
1247
1248 /* Return status information */
1249 for (j = 0; j < chunk_nr_pages; j++)
1250 if (put_user(pm[j].status, status + j + chunk_start)) {
1251 err = -EFAULT;
1252 goto out_pm;
1253 }
1254 }
1255 err = 0;
1256
1257out_pm:
1258 free_page((unsigned long)pm);
1259out:
1260 return err;
1261}
1262
1263/*
1264 * Determine the nodes of an array of pages and store it in an array of status.
1265 */
1266static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1267 const void __user **pages, int *status)
1268{
1269 unsigned long i;
1270
1271 down_read(&mm->mmap_sem);
1272
1273 for (i = 0; i < nr_pages; i++) {
1274 unsigned long addr = (unsigned long)(*pages);
1275 struct vm_area_struct *vma;
1276 struct page *page;
1277 int err = -EFAULT;
1278
1279 vma = find_vma(mm, addr);
1280 if (!vma || addr < vma->vm_start)
1281 goto set_status;
1282
1283 page = follow_page(vma, addr, 0);
1284
1285 err = PTR_ERR(page);
1286 if (IS_ERR(page))
1287 goto set_status;
1288
1289 err = -ENOENT;
1290 /* Use PageReserved to check for zero page */
1291 if (!page || PageReserved(page) || PageKsm(page))
1292 goto set_status;
1293
1294 err = page_to_nid(page);
1295set_status:
1296 *status = err;
1297
1298 pages++;
1299 status++;
1300 }
1301
1302 up_read(&mm->mmap_sem);
1303}
1304
1305/*
1306 * Determine the nodes of a user array of pages and store it in
1307 * a user array of status.
1308 */
1309static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1310 const void __user * __user *pages,
1311 int __user *status)
1312{
1313#define DO_PAGES_STAT_CHUNK_NR 16
1314 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1315 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1316
1317 while (nr_pages) {
1318 unsigned long chunk_nr;
1319
1320 chunk_nr = nr_pages;
1321 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1322 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1323
1324 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1325 break;
1326
1327 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1328
1329 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1330 break;
1331
1332 pages += chunk_nr;
1333 status += chunk_nr;
1334 nr_pages -= chunk_nr;
1335 }
1336 return nr_pages ? -EFAULT : 0;
1337}
1338
1339/*
1340 * Move a list of pages in the address space of the currently executing
1341 * process.
1342 */
1343SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1344 const void __user * __user *, pages,
1345 const int __user *, nodes,
1346 int __user *, status, int, flags)
1347{
1348 const struct cred *cred = current_cred(), *tcred;
1349 struct task_struct *task;
1350 struct mm_struct *mm;
1351 int err;
1352 nodemask_t task_nodes;
1353
1354 /* Check flags */
1355 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1356 return -EINVAL;
1357
1358 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1359 return -EPERM;
1360
1361 /* Find the mm_struct */
1362 rcu_read_lock();
1363 task = pid ? find_task_by_vpid(pid) : current;
1364 if (!task) {
1365 rcu_read_unlock();
1366 return -ESRCH;
1367 }
1368 get_task_struct(task);
1369
1370 /*
1371 * Check if this process has the right to modify the specified
1372 * process. The right exists if the process has administrative
1373 * capabilities, superuser privileges or the same
1374 * userid as the target process.
1375 */
1376 tcred = __task_cred(task);
1377 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1378 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1379 !capable(CAP_SYS_NICE)) {
1380 rcu_read_unlock();
1381 err = -EPERM;
1382 goto out;
1383 }
1384 rcu_read_unlock();
1385
1386 err = security_task_movememory(task);
1387 if (err)
1388 goto out;
1389
1390 task_nodes = cpuset_mems_allowed(task);
1391 mm = get_task_mm(task);
1392 put_task_struct(task);
1393
1394 if (!mm)
1395 return -EINVAL;
1396
1397 if (nodes)
1398 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1399 nodes, status, flags);
1400 else
1401 err = do_pages_stat(mm, nr_pages, pages, status);
1402
1403 mmput(mm);
1404 return err;
1405
1406out:
1407 put_task_struct(task);
1408 return err;
1409}
1410
1411/*
1412 * Call migration functions in the vma_ops that may prepare
1413 * memory in a vm for migration. migration functions may perform
1414 * the migration for vmas that do not have an underlying page struct.
1415 */
1416int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1417 const nodemask_t *from, unsigned long flags)
1418{
1419 struct vm_area_struct *vma;
1420 int err = 0;
1421
1422 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1423 if (vma->vm_ops && vma->vm_ops->migrate) {
1424 err = vma->vm_ops->migrate(vma, to, from, flags);
1425 if (err)
1426 break;
1427 }
1428 }
1429 return err;
1430}
1431#endif