Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *	mm/mremap.c
   4 *
   5 *	(C) Copyright 1996 Linus Torvalds
   6 *
   7 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
   8 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/mm_inline.h>
  13#include <linux/hugetlb.h>
  14#include <linux/shm.h>
  15#include <linux/ksm.h>
  16#include <linux/mman.h>
  17#include <linux/swap.h>
  18#include <linux/capability.h>
  19#include <linux/fs.h>
  20#include <linux/swapops.h>
  21#include <linux/highmem.h>
  22#include <linux/security.h>
  23#include <linux/syscalls.h>
  24#include <linux/mmu_notifier.h>
  25#include <linux/uaccess.h>
  26#include <linux/userfaultfd_k.h>
  27#include <linux/mempolicy.h>
  28
 
  29#include <asm/cacheflush.h>
  30#include <asm/tlb.h>
  31#include <asm/pgalloc.h>
  32
  33#include "internal.h"
  34
  35static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
  36{
  37	pgd_t *pgd;
  38	p4d_t *p4d;
  39	pud_t *pud;
 
  40
  41	pgd = pgd_offset(mm, addr);
  42	if (pgd_none_or_clear_bad(pgd))
  43		return NULL;
  44
  45	p4d = p4d_offset(pgd, addr);
  46	if (p4d_none_or_clear_bad(p4d))
  47		return NULL;
  48
  49	pud = pud_offset(p4d, addr);
  50	if (pud_none_or_clear_bad(pud))
  51		return NULL;
  52
  53	return pud;
  54}
  55
  56static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
  57{
  58	pud_t *pud;
  59	pmd_t *pmd;
  60
  61	pud = get_old_pud(mm, addr);
  62	if (!pud)
  63		return NULL;
  64
  65	pmd = pmd_offset(pud, addr);
  66	if (pmd_none(*pmd))
 
  67		return NULL;
  68
  69	return pmd;
  70}
  71
  72static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
  73			    unsigned long addr)
  74{
  75	pgd_t *pgd;
  76	p4d_t *p4d;
  77
  78	pgd = pgd_offset(mm, addr);
  79	p4d = p4d_alloc(mm, pgd, addr);
  80	if (!p4d)
  81		return NULL;
  82
  83	return pud_alloc(mm, p4d, addr);
  84}
  85
  86static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
  87			    unsigned long addr)
  88{
 
  89	pud_t *pud;
  90	pmd_t *pmd;
  91
  92	pud = alloc_new_pud(mm, vma, addr);
 
  93	if (!pud)
  94		return NULL;
  95
  96	pmd = pmd_alloc(mm, pud, addr);
  97	if (!pmd)
  98		return NULL;
  99
 100	VM_BUG_ON(pmd_trans_huge(*pmd));
 
 
 101
 102	return pmd;
 103}
 104
 105static void take_rmap_locks(struct vm_area_struct *vma)
 106{
 107	if (vma->vm_file)
 108		i_mmap_lock_write(vma->vm_file->f_mapping);
 109	if (vma->anon_vma)
 110		anon_vma_lock_write(vma->anon_vma);
 111}
 112
 113static void drop_rmap_locks(struct vm_area_struct *vma)
 114{
 115	if (vma->anon_vma)
 116		anon_vma_unlock_write(vma->anon_vma);
 117	if (vma->vm_file)
 118		i_mmap_unlock_write(vma->vm_file->f_mapping);
 119}
 120
 121static pte_t move_soft_dirty_pte(pte_t pte)
 122{
 123	/*
 124	 * Set soft dirty bit so we can notice
 125	 * in userspace the ptes were moved.
 126	 */
 127#ifdef CONFIG_MEM_SOFT_DIRTY
 128	if (pte_present(pte))
 129		pte = pte_mksoft_dirty(pte);
 130	else if (is_swap_pte(pte))
 131		pte = pte_swp_mksoft_dirty(pte);
 132#endif
 133	return pte;
 134}
 135
 136static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 137		unsigned long old_addr, unsigned long old_end,
 138		struct vm_area_struct *new_vma, pmd_t *new_pmd,
 139		unsigned long new_addr, bool need_rmap_locks)
 140{
 
 141	struct mm_struct *mm = vma->vm_mm;
 142	pte_t *old_pte, *new_pte, pte;
 143	spinlock_t *old_ptl, *new_ptl;
 144	bool force_flush = false;
 145	unsigned long len = old_end - old_addr;
 146	int err = 0;
 147
 148	/*
 149	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
 150	 * locks to ensure that rmap will always observe either the old or the
 151	 * new ptes. This is the easiest way to avoid races with
 152	 * truncate_pagecache(), page migration, etc...
 153	 *
 154	 * When need_rmap_locks is false, we use other ways to avoid
 155	 * such races:
 156	 *
 157	 * - During exec() shift_arg_pages(), we use a specially tagged vma
 158	 *   which rmap call sites look for using vma_is_temporary_stack().
 159	 *
 160	 * - During mremap(), new_vma is often known to be placed after vma
 161	 *   in rmap traversal order. This ensures rmap will always observe
 162	 *   either the old pte, or the new pte, or both (the page table locks
 163	 *   serialize access to individual ptes, but only rmap traversal
 164	 *   order guarantees that we won't miss both the old and new ptes).
 165	 */
 166	if (need_rmap_locks)
 167		take_rmap_locks(vma);
 168
 169	/*
 170	 * We don't have to worry about the ordering of src and dst
 171	 * pte locks because exclusive mmap_lock prevents deadlock.
 172	 */
 173	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
 174	if (!old_pte) {
 175		err = -EAGAIN;
 176		goto out;
 177	}
 178	new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
 179	if (!new_pte) {
 180		pte_unmap_unlock(old_pte, old_ptl);
 181		err = -EAGAIN;
 182		goto out;
 183	}
 184	if (new_ptl != old_ptl)
 185		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 186	flush_tlb_batched_pending(vma->vm_mm);
 187	arch_enter_lazy_mmu_mode();
 188
 189	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
 190				   new_pte++, new_addr += PAGE_SIZE) {
 191		if (pte_none(ptep_get(old_pte)))
 192			continue;
 193
 194		pte = ptep_get_and_clear(mm, old_addr, old_pte);
 195		/*
 196		 * If we are remapping a valid PTE, make sure
 197		 * to flush TLB before we drop the PTL for the
 198		 * PTE.
 199		 *
 200		 * NOTE! Both old and new PTL matter: the old one
 201		 * for racing with page_mkclean(), the new one to
 202		 * make sure the physical page stays valid until
 203		 * the TLB entry for the old mapping has been
 204		 * flushed.
 205		 */
 206		if (pte_present(pte))
 207			force_flush = true;
 208		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
 209		pte = move_soft_dirty_pte(pte);
 210		set_pte_at(mm, new_addr, new_pte, pte);
 211	}
 212
 213	arch_leave_lazy_mmu_mode();
 214	if (force_flush)
 215		flush_tlb_range(vma, old_end - len, old_end);
 216	if (new_ptl != old_ptl)
 217		spin_unlock(new_ptl);
 218	pte_unmap(new_pte - 1);
 219	pte_unmap_unlock(old_pte - 1, old_ptl);
 220out:
 221	if (need_rmap_locks)
 222		drop_rmap_locks(vma);
 223	return err;
 224}
 225
 226#ifndef arch_supports_page_table_move
 227#define arch_supports_page_table_move arch_supports_page_table_move
 228static inline bool arch_supports_page_table_move(void)
 229{
 230	return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
 231		IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
 232}
 233#endif
 234
 235#ifdef CONFIG_HAVE_MOVE_PMD
 236static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
 237		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
 238{
 239	spinlock_t *old_ptl, *new_ptl;
 240	struct mm_struct *mm = vma->vm_mm;
 241	pmd_t pmd;
 242
 243	if (!arch_supports_page_table_move())
 244		return false;
 245	/*
 246	 * The destination pmd shouldn't be established, free_pgtables()
 247	 * should have released it.
 248	 *
 249	 * However, there's a case during execve() where we use mremap
 250	 * to move the initial stack, and in that case the target area
 251	 * may overlap the source area (always moving down).
 252	 *
 253	 * If everything is PMD-aligned, that works fine, as moving
 254	 * each pmd down will clear the source pmd. But if we first
 255	 * have a few 4kB-only pages that get moved down, and then
 256	 * hit the "now the rest is PMD-aligned, let's do everything
 257	 * one pmd at a time", we will still have the old (now empty
 258	 * of any 4kB pages, but still there) PMD in the page table
 259	 * tree.
 260	 *
 261	 * Warn on it once - because we really should try to figure
 262	 * out how to do this better - but then say "I won't move
 263	 * this pmd".
 264	 *
 265	 * One alternative might be to just unmap the target pmd at
 266	 * this point, and verify that it really is empty. We'll see.
 267	 */
 268	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
 269		return false;
 270
 271	/*
 272	 * We don't have to worry about the ordering of src and dst
 273	 * ptlocks because exclusive mmap_lock prevents deadlock.
 274	 */
 275	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
 276	new_ptl = pmd_lockptr(mm, new_pmd);
 277	if (new_ptl != old_ptl)
 278		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 279
 280	/* Clear the pmd */
 281	pmd = *old_pmd;
 282	pmd_clear(old_pmd);
 283
 284	VM_BUG_ON(!pmd_none(*new_pmd));
 285
 286	pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
 287	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
 288	if (new_ptl != old_ptl)
 289		spin_unlock(new_ptl);
 290	spin_unlock(old_ptl);
 291
 292	return true;
 293}
 294#else
 295static inline bool move_normal_pmd(struct vm_area_struct *vma,
 296		unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
 297		pmd_t *new_pmd)
 298{
 299	return false;
 300}
 301#endif
 302
 303#if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
 304static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
 305		  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
 306{
 307	spinlock_t *old_ptl, *new_ptl;
 308	struct mm_struct *mm = vma->vm_mm;
 309	pud_t pud;
 310
 311	if (!arch_supports_page_table_move())
 312		return false;
 313	/*
 314	 * The destination pud shouldn't be established, free_pgtables()
 315	 * should have released it.
 316	 */
 317	if (WARN_ON_ONCE(!pud_none(*new_pud)))
 318		return false;
 319
 320	/*
 321	 * We don't have to worry about the ordering of src and dst
 322	 * ptlocks because exclusive mmap_lock prevents deadlock.
 323	 */
 324	old_ptl = pud_lock(vma->vm_mm, old_pud);
 325	new_ptl = pud_lockptr(mm, new_pud);
 326	if (new_ptl != old_ptl)
 327		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 328
 329	/* Clear the pud */
 330	pud = *old_pud;
 331	pud_clear(old_pud);
 332
 333	VM_BUG_ON(!pud_none(*new_pud));
 334
 335	pud_populate(mm, new_pud, pud_pgtable(pud));
 336	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
 337	if (new_ptl != old_ptl)
 338		spin_unlock(new_ptl);
 339	spin_unlock(old_ptl);
 340
 341	return true;
 342}
 343#else
 344static inline bool move_normal_pud(struct vm_area_struct *vma,
 345		unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
 346		pud_t *new_pud)
 347{
 348	return false;
 349}
 350#endif
 351
 352#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
 353static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
 354			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
 355{
 356	spinlock_t *old_ptl, *new_ptl;
 357	struct mm_struct *mm = vma->vm_mm;
 358	pud_t pud;
 359
 360	/*
 361	 * The destination pud shouldn't be established, free_pgtables()
 362	 * should have released it.
 363	 */
 364	if (WARN_ON_ONCE(!pud_none(*new_pud)))
 365		return false;
 366
 367	/*
 368	 * We don't have to worry about the ordering of src and dst
 369	 * ptlocks because exclusive mmap_lock prevents deadlock.
 370	 */
 371	old_ptl = pud_lock(vma->vm_mm, old_pud);
 372	new_ptl = pud_lockptr(mm, new_pud);
 373	if (new_ptl != old_ptl)
 374		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 375
 376	/* Clear the pud */
 377	pud = *old_pud;
 378	pud_clear(old_pud);
 379
 380	VM_BUG_ON(!pud_none(*new_pud));
 381
 382	/* Set the new pud */
 383	/* mark soft_ditry when we add pud level soft dirty support */
 384	set_pud_at(mm, new_addr, new_pud, pud);
 385	flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
 386	if (new_ptl != old_ptl)
 387		spin_unlock(new_ptl);
 388	spin_unlock(old_ptl);
 389
 390	return true;
 391}
 392#else
 393static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
 394			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
 395{
 396	WARN_ON_ONCE(1);
 397	return false;
 398
 399}
 400#endif
 401
 402enum pgt_entry {
 403	NORMAL_PMD,
 404	HPAGE_PMD,
 405	NORMAL_PUD,
 406	HPAGE_PUD,
 407};
 408
 409/*
 410 * Returns an extent of the corresponding size for the pgt_entry specified if
 411 * valid. Else returns a smaller extent bounded by the end of the source and
 412 * destination pgt_entry.
 413 */
 414static __always_inline unsigned long get_extent(enum pgt_entry entry,
 415			unsigned long old_addr, unsigned long old_end,
 416			unsigned long new_addr)
 417{
 418	unsigned long next, extent, mask, size;
 419
 420	switch (entry) {
 421	case HPAGE_PMD:
 422	case NORMAL_PMD:
 423		mask = PMD_MASK;
 424		size = PMD_SIZE;
 425		break;
 426	case HPAGE_PUD:
 427	case NORMAL_PUD:
 428		mask = PUD_MASK;
 429		size = PUD_SIZE;
 430		break;
 431	default:
 432		BUILD_BUG();
 433		break;
 434	}
 435
 436	next = (old_addr + size) & mask;
 437	/* even if next overflowed, extent below will be ok */
 438	extent = next - old_addr;
 439	if (extent > old_end - old_addr)
 440		extent = old_end - old_addr;
 441	next = (new_addr + size) & mask;
 442	if (extent > next - new_addr)
 443		extent = next - new_addr;
 444	return extent;
 445}
 446
 447/*
 448 * Attempts to speedup the move by moving entry at the level corresponding to
 449 * pgt_entry. Returns true if the move was successful, else false.
 450 */
 451static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
 452			unsigned long old_addr, unsigned long new_addr,
 453			void *old_entry, void *new_entry, bool need_rmap_locks)
 454{
 455	bool moved = false;
 456
 457	/* See comment in move_ptes() */
 458	if (need_rmap_locks)
 459		take_rmap_locks(vma);
 460
 461	switch (entry) {
 462	case NORMAL_PMD:
 463		moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
 464					new_entry);
 465		break;
 466	case NORMAL_PUD:
 467		moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
 468					new_entry);
 469		break;
 470	case HPAGE_PMD:
 471		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
 472			move_huge_pmd(vma, old_addr, new_addr, old_entry,
 473				      new_entry);
 474		break;
 475	case HPAGE_PUD:
 476		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
 477			move_huge_pud(vma, old_addr, new_addr, old_entry,
 478				      new_entry);
 479		break;
 480
 481	default:
 482		WARN_ON_ONCE(1);
 483		break;
 484	}
 485
 486	if (need_rmap_locks)
 487		drop_rmap_locks(vma);
 488
 489	return moved;
 490}
 491
 492/*
 493 * A helper to check if aligning down is OK. The aligned address should fall
 494 * on *no mapping*. For the stack moving down, that's a special move within
 495 * the VMA that is created to span the source and destination of the move,
 496 * so we make an exception for it.
 497 */
 498static bool can_align_down(struct vm_area_struct *vma, unsigned long addr_to_align,
 499			    unsigned long mask, bool for_stack)
 500{
 501	unsigned long addr_masked = addr_to_align & mask;
 502
 503	/*
 504	 * If @addr_to_align of either source or destination is not the beginning
 505	 * of the corresponding VMA, we can't align down or we will destroy part
 506	 * of the current mapping.
 507	 */
 508	if (!for_stack && vma->vm_start != addr_to_align)
 509		return false;
 510
 511	/* In the stack case we explicitly permit in-VMA alignment. */
 512	if (for_stack && addr_masked >= vma->vm_start)
 513		return true;
 514
 515	/*
 516	 * Make sure the realignment doesn't cause the address to fall on an
 517	 * existing mapping.
 518	 */
 519	return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL;
 520}
 521
 522/* Opportunistically realign to specified boundary for faster copy. */
 523static void try_realign_addr(unsigned long *old_addr, struct vm_area_struct *old_vma,
 524			     unsigned long *new_addr, struct vm_area_struct *new_vma,
 525			     unsigned long mask, bool for_stack)
 526{
 527	/* Skip if the addresses are already aligned. */
 528	if ((*old_addr & ~mask) == 0)
 529		return;
 530
 531	/* Only realign if the new and old addresses are mutually aligned. */
 532	if ((*old_addr & ~mask) != (*new_addr & ~mask))
 533		return;
 534
 535	/* Ensure realignment doesn't cause overlap with existing mappings. */
 536	if (!can_align_down(old_vma, *old_addr, mask, for_stack) ||
 537	    !can_align_down(new_vma, *new_addr, mask, for_stack))
 538		return;
 539
 540	*old_addr = *old_addr & mask;
 541	*new_addr = *new_addr & mask;
 542}
 543
 544unsigned long move_page_tables(struct vm_area_struct *vma,
 545		unsigned long old_addr, struct vm_area_struct *new_vma,
 546		unsigned long new_addr, unsigned long len,
 547		bool need_rmap_locks, bool for_stack)
 548{
 549	unsigned long extent, old_end;
 550	struct mmu_notifier_range range;
 551	pmd_t *old_pmd, *new_pmd;
 552	pud_t *old_pud, *new_pud;
 553
 554	if (!len)
 555		return 0;
 556
 557	old_end = old_addr + len;
 558
 559	if (is_vm_hugetlb_page(vma))
 560		return move_hugetlb_page_tables(vma, new_vma, old_addr,
 561						new_addr, len);
 562
 563	/*
 564	 * If possible, realign addresses to PMD boundary for faster copy.
 565	 * Only realign if the mremap copying hits a PMD boundary.
 566	 */
 567	if (len >= PMD_SIZE - (old_addr & ~PMD_MASK))
 568		try_realign_addr(&old_addr, vma, &new_addr, new_vma, PMD_MASK,
 569				 for_stack);
 570
 571	flush_cache_range(vma, old_addr, old_end);
 572	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
 573				old_addr, old_end);
 574	mmu_notifier_invalidate_range_start(&range);
 575
 576	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
 577		cond_resched();
 578		/*
 579		 * If extent is PUD-sized try to speed up the move by moving at the
 580		 * PUD level if possible.
 581		 */
 582		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
 583
 584		old_pud = get_old_pud(vma->vm_mm, old_addr);
 585		if (!old_pud)
 586			continue;
 587		new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
 588		if (!new_pud)
 589			break;
 590		if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
 591			if (extent == HPAGE_PUD_SIZE) {
 592				move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
 593					       old_pud, new_pud, need_rmap_locks);
 594				/* We ignore and continue on error? */
 595				continue;
 596			}
 597		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
 598
 599			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
 600					   old_pud, new_pud, true))
 601				continue;
 602		}
 603
 604		extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
 605		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
 606		if (!old_pmd)
 607			continue;
 608		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
 609		if (!new_pmd)
 610			break;
 611again:
 612		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
 613		    pmd_devmap(*old_pmd)) {
 614			if (extent == HPAGE_PMD_SIZE &&
 615			    move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
 616					   old_pmd, new_pmd, need_rmap_locks))
 617				continue;
 618			split_huge_pmd(vma, old_pmd, old_addr);
 619		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
 620			   extent == PMD_SIZE) {
 621			/*
 622			 * If the extent is PMD-sized, try to speed the move by
 623			 * moving at the PMD level if possible.
 624			 */
 625			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
 626					   old_pmd, new_pmd, true))
 627				continue;
 628		}
 629		if (pmd_none(*old_pmd))
 630			continue;
 631		if (pte_alloc(new_vma->vm_mm, new_pmd))
 632			break;
 633		if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
 634			      new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
 635			goto again;
 636	}
 637
 638	mmu_notifier_invalidate_range_end(&range);
 639
 640	/*
 641	 * Prevent negative return values when {old,new}_addr was realigned
 642	 * but we broke out of the above loop for the first PMD itself.
 643	 */
 644	if (len + old_addr < old_end)
 645		return 0;
 646
 647	return len + old_addr - old_end;	/* how much done */
 648}
 649
 650static unsigned long move_vma(struct vm_area_struct *vma,
 651		unsigned long old_addr, unsigned long old_len,
 652		unsigned long new_len, unsigned long new_addr,
 653		bool *locked, unsigned long flags,
 654		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
 655{
 656	long to_account = new_len - old_len;
 657	struct mm_struct *mm = vma->vm_mm;
 658	struct vm_area_struct *new_vma;
 659	unsigned long vm_flags = vma->vm_flags;
 660	unsigned long new_pgoff;
 661	unsigned long moved_len;
 662	unsigned long account_start = 0;
 663	unsigned long account_end = 0;
 664	unsigned long hiwater_vm;
 665	int err = 0;
 666	bool need_rmap_locks;
 667	struct vma_iterator vmi;
 668
 669	/*
 670	 * We'd prefer to avoid failure later on in do_munmap:
 671	 * which may split one vma into three before unmapping.
 672	 */
 673	if (mm->map_count >= sysctl_max_map_count - 3)
 674		return -ENOMEM;
 675
 676	if (unlikely(flags & MREMAP_DONTUNMAP))
 677		to_account = new_len;
 678
 679	if (vma->vm_ops && vma->vm_ops->may_split) {
 680		if (vma->vm_start != old_addr)
 681			err = vma->vm_ops->may_split(vma, old_addr);
 682		if (!err && vma->vm_end != old_addr + old_len)
 683			err = vma->vm_ops->may_split(vma, old_addr + old_len);
 684		if (err)
 685			return err;
 686	}
 687
 688	/*
 689	 * Advise KSM to break any KSM pages in the area to be moved:
 690	 * it would be confusing if they were to turn up at the new
 691	 * location, where they happen to coincide with different KSM
 692	 * pages recently unmapped.  But leave vma->vm_flags as it was,
 693	 * so KSM can come around to merge on vma and new_vma afterwards.
 694	 */
 695	err = ksm_madvise(vma, old_addr, old_addr + old_len,
 696						MADV_UNMERGEABLE, &vm_flags);
 697	if (err)
 698		return err;
 699
 700	if (vm_flags & VM_ACCOUNT) {
 701		if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
 702			return -ENOMEM;
 703	}
 704
 705	vma_start_write(vma);
 706	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
 707	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
 708			   &need_rmap_locks);
 709	if (!new_vma) {
 710		if (vm_flags & VM_ACCOUNT)
 711			vm_unacct_memory(to_account >> PAGE_SHIFT);
 712		return -ENOMEM;
 713	}
 714
 715	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
 716				     need_rmap_locks, false);
 717	if (moved_len < old_len) {
 718		err = -ENOMEM;
 719	} else if (vma->vm_ops && vma->vm_ops->mremap) {
 720		err = vma->vm_ops->mremap(new_vma);
 721	}
 722
 723	if (unlikely(err)) {
 724		/*
 725		 * On error, move entries back from new area to old,
 726		 * which will succeed since page tables still there,
 727		 * and then proceed to unmap new area instead of old.
 728		 */
 729		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
 730				 true, false);
 731		vma = new_vma;
 732		old_len = new_len;
 733		old_addr = new_addr;
 734		new_addr = err;
 735	} else {
 736		mremap_userfaultfd_prep(new_vma, uf);
 737	}
 738
 739	if (is_vm_hugetlb_page(vma)) {
 740		clear_vma_resv_huge_pages(vma);
 741	}
 742
 743	/* Conceal VM_ACCOUNT so old reservation is not undone */
 744	if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
 745		vm_flags_clear(vma, VM_ACCOUNT);
 746		if (vma->vm_start < old_addr)
 747			account_start = vma->vm_start;
 748		if (vma->vm_end > old_addr + old_len)
 749			account_end = vma->vm_end;
 750	}
 751
 752	/*
 753	 * If we failed to move page tables we still do total_vm increment
 754	 * since do_munmap() will decrement it by old_len == new_len.
 755	 *
 756	 * Since total_vm is about to be raised artificially high for a
 757	 * moment, we need to restore high watermark afterwards: if stats
 758	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
 759	 * If this were a serious issue, we'd add a flag to do_munmap().
 760	 */
 761	hiwater_vm = mm->hiwater_vm;
 762	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
 763
 764	/* Tell pfnmap has moved from this vma */
 765	if (unlikely(vma->vm_flags & VM_PFNMAP))
 766		untrack_pfn_clear(vma);
 767
 768	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
 769		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
 770		vm_flags_clear(vma, VM_LOCKED_MASK);
 771
 772		/*
 773		 * anon_vma links of the old vma is no longer needed after its page
 774		 * table has been moved.
 775		 */
 776		if (new_vma != vma && vma->vm_start == old_addr &&
 777			vma->vm_end == (old_addr + old_len))
 778			unlink_anon_vmas(vma);
 779
 780		/* Because we won't unmap we don't need to touch locked_vm */
 781		return new_addr;
 782	}
 783
 784	vma_iter_init(&vmi, mm, old_addr);
 785	if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
 786		/* OOM: unable to split vma, just get accounts right */
 787		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
 788			vm_acct_memory(old_len >> PAGE_SHIFT);
 789		account_start = account_end = 0;
 790	}
 791
 792	if (vm_flags & VM_LOCKED) {
 793		mm->locked_vm += new_len >> PAGE_SHIFT;
 794		*locked = true;
 795	}
 796
 797	mm->hiwater_vm = hiwater_vm;
 798
 799	/* Restore VM_ACCOUNT if one or two pieces of vma left */
 800	if (account_start) {
 801		vma = vma_prev(&vmi);
 802		vm_flags_set(vma, VM_ACCOUNT);
 
 803	}
 804
 805	if (account_end) {
 806		vma = vma_next(&vmi);
 807		vm_flags_set(vma, VM_ACCOUNT);
 
 
 808	}
 809
 810	return new_addr;
 811}
 812
 813static struct vm_area_struct *vma_to_resize(unsigned long addr,
 814	unsigned long old_len, unsigned long new_len, unsigned long flags)
 815{
 816	struct mm_struct *mm = current->mm;
 817	struct vm_area_struct *vma;
 818	unsigned long pgoff;
 819
 820	vma = vma_lookup(mm, addr);
 821	if (!vma)
 822		return ERR_PTR(-EFAULT);
 823
 824	/*
 825	 * !old_len is a special case where an attempt is made to 'duplicate'
 826	 * a mapping.  This makes no sense for private mappings as it will
 827	 * instead create a fresh/new mapping unrelated to the original.  This
 828	 * is contrary to the basic idea of mremap which creates new mappings
 829	 * based on the original.  There are no known use cases for this
 830	 * behavior.  As a result, fail such attempts.
 831	 */
 832	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
 833		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
 834		return ERR_PTR(-EINVAL);
 835	}
 836
 837	if ((flags & MREMAP_DONTUNMAP) &&
 838			(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
 839		return ERR_PTR(-EINVAL);
 840
 841	/* We can't remap across vm area boundaries */
 842	if (old_len > vma->vm_end - addr)
 843		return ERR_PTR(-EFAULT);
 844
 845	if (new_len == old_len)
 846		return vma;
 847
 848	/* Need to be careful about a growing mapping */
 849	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
 850	pgoff += vma->vm_pgoff;
 851	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
 852		return ERR_PTR(-EINVAL);
 853
 854	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
 855		return ERR_PTR(-EFAULT);
 856
 857	if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
 858		return ERR_PTR(-EAGAIN);
 859
 860	if (!may_expand_vm(mm, vma->vm_flags,
 861				(new_len - old_len) >> PAGE_SHIFT))
 862		return ERR_PTR(-ENOMEM);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863
 864	return vma;
 
 
 
 
 
 
 
 
 
 865}
 866
 867static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
 868		unsigned long new_addr, unsigned long new_len, bool *locked,
 869		unsigned long flags, struct vm_userfaultfd_ctx *uf,
 870		struct list_head *uf_unmap_early,
 871		struct list_head *uf_unmap)
 872{
 873	struct mm_struct *mm = current->mm;
 874	struct vm_area_struct *vma;
 875	unsigned long ret = -EINVAL;
 876	unsigned long map_flags = 0;
 
 877
 878	if (offset_in_page(new_addr))
 879		goto out;
 880
 881	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
 882		goto out;
 883
 884	/* Ensure the old/new locations do not overlap */
 885	if (addr + old_len > new_addr && new_addr + new_len > addr)
 
 
 886		goto out;
 887
 888	/*
 889	 * move_vma() need us to stay 4 maps below the threshold, otherwise
 890	 * it will bail out at the very beginning.
 891	 * That is a problem if we have already unmaped the regions here
 892	 * (new_addr, and old_addr), because userspace will not know the
 893	 * state of the vma's after it gets -ENOMEM.
 894	 * So, to avoid such scenario we can pre-compute if the whole
 895	 * operation has high chances to success map-wise.
 896	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
 897	 * split in 3 before unmapping it.
 898	 * That means 2 more maps (1 for each) to the ones we already hold.
 899	 * Check whether current map count plus 2 still leads us to 4 maps below
 900	 * the threshold, otherwise return -ENOMEM here to be more safe.
 901	 */
 902	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
 903		return -ENOMEM;
 904
 905	if (flags & MREMAP_FIXED) {
 906		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
 907		if (ret)
 908			goto out;
 909	}
 910
 911	if (old_len > new_len) {
 912		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
 913		if (ret)
 
 
 
 
 914			goto out;
 915		old_len = new_len;
 916	}
 917
 918	vma = vma_to_resize(addr, old_len, new_len, flags);
 919	if (IS_ERR(vma)) {
 920		ret = PTR_ERR(vma);
 921		goto out;
 922	}
 923
 924	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
 925	if (flags & MREMAP_DONTUNMAP &&
 926		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
 927		ret = -ENOMEM;
 928		goto out;
 929	}
 930
 931	if (flags & MREMAP_FIXED)
 932		map_flags |= MAP_FIXED;
 933
 934	if (vma->vm_flags & VM_MAYSHARE)
 935		map_flags |= MAP_SHARED;
 936
 937	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
 938				((addr - vma->vm_start) >> PAGE_SHIFT),
 939				map_flags);
 940	if (IS_ERR_VALUE(ret))
 941		goto out;
 942
 943	/* We got a new mapping */
 944	if (!(flags & MREMAP_FIXED))
 945		new_addr = ret;
 946
 947	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
 948		       uf_unmap);
 949
 950out:
 951	return ret;
 952}
 953
 954static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
 955{
 956	unsigned long end = vma->vm_end + delta;
 957
 958	if (end < vma->vm_end) /* overflow */
 959		return 0;
 960	if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
 961		return 0;
 962	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
 963			      0, MAP_FIXED) & ~PAGE_MASK)
 964		return 0;
 965	return 1;
 966}
 967
 968/*
 969 * Expand (or shrink) an existing mapping, potentially moving it at the
 970 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 971 *
 972 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
 973 * This option implies MREMAP_MAYMOVE.
 974 */
 975SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
 976		unsigned long, new_len, unsigned long, flags,
 977		unsigned long, new_addr)
 978{
 979	struct mm_struct *mm = current->mm;
 980	struct vm_area_struct *vma;
 981	unsigned long ret = -EINVAL;
 982	bool locked = false;
 983	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
 984	LIST_HEAD(uf_unmap_early);
 985	LIST_HEAD(uf_unmap);
 986
 987	/*
 988	 * There is a deliberate asymmetry here: we strip the pointer tag
 989	 * from the old address but leave the new address alone. This is
 990	 * for consistency with mmap(), where we prevent the creation of
 991	 * aliasing mappings in userspace by leaving the tag bits of the
 992	 * mapping address intact. A non-zero tag will cause the subsequent
 993	 * range checks to reject the address as invalid.
 994	 *
 995	 * See Documentation/arch/arm64/tagged-address-abi.rst for more
 996	 * information.
 997	 */
 998	addr = untagged_addr(addr);
 999
1000	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
1001		return ret;
1002
1003	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
1004		return ret;
1005
1006	/*
1007	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
1008	 * in the process.
1009	 */
1010	if (flags & MREMAP_DONTUNMAP &&
1011			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
1012		return ret;
1013
 
 
1014
1015	if (offset_in_page(addr))
1016		return ret;
1017
1018	old_len = PAGE_ALIGN(old_len);
1019	new_len = PAGE_ALIGN(new_len);
1020
1021	/*
1022	 * We allow a zero old-len as a special case
1023	 * for DOS-emu "duplicate shm area" thing. But
1024	 * a zero new-len is nonsensical.
1025	 */
1026	if (!new_len)
1027		return ret;
1028
1029	if (mmap_write_lock_killable(current->mm))
1030		return -EINTR;
1031	vma = vma_lookup(mm, addr);
1032	if (!vma) {
1033		ret = -EFAULT;
1034		goto out;
1035	}
1036
1037	if (is_vm_hugetlb_page(vma)) {
1038		struct hstate *h __maybe_unused = hstate_vma(vma);
1039
1040		old_len = ALIGN(old_len, huge_page_size(h));
1041		new_len = ALIGN(new_len, huge_page_size(h));
1042
1043		/* addrs must be huge page aligned */
1044		if (addr & ~huge_page_mask(h))
1045			goto out;
1046		if (new_addr & ~huge_page_mask(h))
1047			goto out;
1048
1049		/*
1050		 * Don't allow remap expansion, because the underlying hugetlb
1051		 * reservation is not yet capable to handle split reservation.
1052		 */
1053		if (new_len > old_len)
1054			goto out;
1055	}
1056
1057	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
1058		ret = mremap_to(addr, old_len, new_addr, new_len,
1059				&locked, flags, &uf, &uf_unmap_early,
1060				&uf_unmap);
1061		goto out;
1062	}
1063
1064	/*
1065	 * Always allow a shrinking remap: that just unmaps
1066	 * the unnecessary pages..
1067	 * do_vmi_munmap does all the needed commit accounting, and
1068	 * unlocks the mmap_lock if so directed.
1069	 */
1070	if (old_len >= new_len) {
1071		VMA_ITERATOR(vmi, mm, addr + new_len);
1072
1073		if (old_len == new_len) {
1074			ret = addr;
1075			goto out;
1076		}
1077
1078		ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
1079				    &uf_unmap, true);
1080		if (ret)
1081			goto out;
1082
1083		ret = addr;
1084		goto out_unlocked;
1085	}
1086
1087	/*
1088	 * Ok, we need to grow..
1089	 */
1090	vma = vma_to_resize(addr, old_len, new_len, flags);
1091	if (IS_ERR(vma)) {
1092		ret = PTR_ERR(vma);
1093		goto out;
1094	}
1095
1096	/* old_len exactly to the end of the area..
1097	 */
1098	if (old_len == vma->vm_end - addr) {
1099		unsigned long delta = new_len - old_len;
1100
1101		/* can we just expand the current mapping? */
1102		if (vma_expandable(vma, delta)) {
1103			long pages = delta >> PAGE_SHIFT;
1104			VMA_ITERATOR(vmi, mm, vma->vm_end);
1105			long charged = 0;
1106
1107			if (vma->vm_flags & VM_ACCOUNT) {
1108				if (security_vm_enough_memory_mm(mm, pages)) {
1109					ret = -ENOMEM;
1110					goto out;
1111				}
1112				charged = pages;
1113			}
1114
1115			/*
1116			 * Function vma_merge_extend() is called on the
1117			 * extension we are adding to the already existing vma,
1118			 * vma_merge_extend() will merge this extension with the
1119			 * already existing vma (expand operation itself) and
1120			 * possibly also with the next vma if it becomes
1121			 * adjacent to the expanded vma and otherwise
1122			 * compatible.
1123			 */
1124			vma = vma_merge_extend(&vmi, vma, delta);
1125			if (!vma) {
1126				vm_unacct_memory(charged);
1127				ret = -ENOMEM;
1128				goto out;
1129			}
1130
1131			vm_stat_account(mm, vma->vm_flags, pages);
 
1132			if (vma->vm_flags & VM_LOCKED) {
1133				mm->locked_vm += pages;
1134				locked = true;
1135				new_addr = addr;
1136			}
1137			ret = addr;
1138			goto out;
1139		}
1140	}
1141
1142	/*
1143	 * We weren't able to just expand or shrink the area,
1144	 * we need to create a new one and move it..
1145	 */
1146	ret = -ENOMEM;
1147	if (flags & MREMAP_MAYMOVE) {
1148		unsigned long map_flags = 0;
1149		if (vma->vm_flags & VM_MAYSHARE)
1150			map_flags |= MAP_SHARED;
1151
1152		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1153					vma->vm_pgoff +
1154					((addr - vma->vm_start) >> PAGE_SHIFT),
1155					map_flags);
1156		if (IS_ERR_VALUE(new_addr)) {
1157			ret = new_addr;
1158			goto out;
1159		}
1160
1161		ret = move_vma(vma, addr, old_len, new_len, new_addr,
1162			       &locked, flags, &uf, &uf_unmap);
 
 
1163	}
1164out:
1165	if (offset_in_page(ret))
1166		locked = false;
1167	mmap_write_unlock(current->mm);
1168	if (locked && new_len > old_len)
1169		mm_populate(new_addr + old_len, new_len - old_len);
1170out_unlocked:
1171	userfaultfd_unmap_complete(mm, &uf_unmap_early);
1172	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1173	userfaultfd_unmap_complete(mm, &uf_unmap);
 
 
 
 
 
1174	return ret;
1175}
v3.1
 
  1/*
  2 *	mm/mremap.c
  3 *
  4 *	(C) Copyright 1996 Linus Torvalds
  5 *
  6 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  7 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
  8 */
  9
 10#include <linux/mm.h>
 
 11#include <linux/hugetlb.h>
 12#include <linux/shm.h>
 13#include <linux/ksm.h>
 14#include <linux/mman.h>
 15#include <linux/swap.h>
 16#include <linux/capability.h>
 17#include <linux/fs.h>
 
 18#include <linux/highmem.h>
 19#include <linux/security.h>
 20#include <linux/syscalls.h>
 21#include <linux/mmu_notifier.h>
 
 
 
 22
 23#include <asm/uaccess.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 
 26
 27#include "internal.h"
 28
 29static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 30{
 31	pgd_t *pgd;
 
 32	pud_t *pud;
 33	pmd_t *pmd;
 34
 35	pgd = pgd_offset(mm, addr);
 36	if (pgd_none_or_clear_bad(pgd))
 37		return NULL;
 38
 39	pud = pud_offset(pgd, addr);
 
 
 
 
 40	if (pud_none_or_clear_bad(pud))
 41		return NULL;
 42
 
 
 
 
 
 
 
 
 
 
 
 
 43	pmd = pmd_offset(pud, addr);
 44	split_huge_page_pmd(mm, pmd);
 45	if (pmd_none_or_clear_bad(pmd))
 46		return NULL;
 47
 48	return pmd;
 49}
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 52			    unsigned long addr)
 53{
 54	pgd_t *pgd;
 55	pud_t *pud;
 56	pmd_t *pmd;
 57
 58	pgd = pgd_offset(mm, addr);
 59	pud = pud_alloc(mm, pgd, addr);
 60	if (!pud)
 61		return NULL;
 62
 63	pmd = pmd_alloc(mm, pud, addr);
 64	if (!pmd)
 65		return NULL;
 66
 67	VM_BUG_ON(pmd_trans_huge(*pmd));
 68	if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
 69		return NULL;
 70
 71	return pmd;
 72}
 73
 74static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75		unsigned long old_addr, unsigned long old_end,
 76		struct vm_area_struct *new_vma, pmd_t *new_pmd,
 77		unsigned long new_addr)
 78{
 79	struct address_space *mapping = NULL;
 80	struct mm_struct *mm = vma->vm_mm;
 81	pte_t *old_pte, *new_pte, pte;
 82	spinlock_t *old_ptl, *new_ptl;
 83	unsigned long old_start;
 
 
 84
 85	old_start = old_addr;
 86	mmu_notifier_invalidate_range_start(vma->vm_mm,
 87					    old_start, old_end);
 88	if (vma->vm_file) {
 89		/*
 90		 * Subtle point from Rajesh Venkatasubramanian: before
 91		 * moving file-based ptes, we must lock truncate_pagecache
 92		 * out, since it might clean the dst vma before the src vma,
 93		 * and we propagate stale pages into the dst afterward.
 94		 */
 95		mapping = vma->vm_file->f_mapping;
 96		mutex_lock(&mapping->i_mmap_mutex);
 97	}
 
 
 
 
 
 
 
 98
 99	/*
100	 * We don't have to worry about the ordering of src and dst
101	 * pte locks because exclusive mmap_sem prevents deadlock.
102	 */
103	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
104	new_pte = pte_offset_map(new_pmd, new_addr);
105	new_ptl = pte_lockptr(mm, new_pmd);
 
 
 
 
 
 
 
 
106	if (new_ptl != old_ptl)
107		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 
108	arch_enter_lazy_mmu_mode();
109
110	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
111				   new_pte++, new_addr += PAGE_SIZE) {
112		if (pte_none(*old_pte))
113			continue;
114		pte = ptep_clear_flush(vma, old_addr, old_pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
 
116		set_pte_at(mm, new_addr, new_pte, pte);
117	}
118
119	arch_leave_lazy_mmu_mode();
 
 
120	if (new_ptl != old_ptl)
121		spin_unlock(new_ptl);
122	pte_unmap(new_pte - 1);
123	pte_unmap_unlock(old_pte - 1, old_ptl);
124	if (mapping)
125		mutex_unlock(&mapping->i_mmap_mutex);
126	mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
 
 
 
 
 
 
 
 
 
127}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
129#define LATENCY_LIMIT	(64 * PAGE_SIZE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
131unsigned long move_page_tables(struct vm_area_struct *vma,
132		unsigned long old_addr, struct vm_area_struct *new_vma,
133		unsigned long new_addr, unsigned long len)
 
134{
135	unsigned long extent, next, old_end;
 
136	pmd_t *old_pmd, *new_pmd;
 
 
 
 
137
138	old_end = old_addr + len;
 
 
 
 
 
 
 
 
 
 
 
 
 
139	flush_cache_range(vma, old_addr, old_end);
 
 
 
140
141	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
142		cond_resched();
143		next = (old_addr + PMD_SIZE) & PMD_MASK;
144		if (next - 1 > old_end)
145			next = old_end;
146		extent = next - old_addr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
148		if (!old_pmd)
149			continue;
150		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
151		if (!new_pmd)
152			break;
153		next = (new_addr + PMD_SIZE) & PMD_MASK;
154		if (extent > next - new_addr)
155			extent = next - new_addr;
156		if (extent > LATENCY_LIMIT)
157			extent = LATENCY_LIMIT;
158		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
159				new_vma, new_pmd, new_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160	}
161
 
 
 
 
 
 
 
 
 
162	return len + old_addr - old_end;	/* how much done */
163}
164
165static unsigned long move_vma(struct vm_area_struct *vma,
166		unsigned long old_addr, unsigned long old_len,
167		unsigned long new_len, unsigned long new_addr)
 
 
168{
 
169	struct mm_struct *mm = vma->vm_mm;
170	struct vm_area_struct *new_vma;
171	unsigned long vm_flags = vma->vm_flags;
172	unsigned long new_pgoff;
173	unsigned long moved_len;
174	unsigned long excess = 0;
 
175	unsigned long hiwater_vm;
176	int split = 0;
177	int err;
 
178
179	/*
180	 * We'd prefer to avoid failure later on in do_munmap:
181	 * which may split one vma into three before unmapping.
182	 */
183	if (mm->map_count >= sysctl_max_map_count - 3)
184		return -ENOMEM;
185
 
 
 
 
 
 
 
 
 
 
 
 
186	/*
187	 * Advise KSM to break any KSM pages in the area to be moved:
188	 * it would be confusing if they were to turn up at the new
189	 * location, where they happen to coincide with different KSM
190	 * pages recently unmapped.  But leave vma->vm_flags as it was,
191	 * so KSM can come around to merge on vma and new_vma afterwards.
192	 */
193	err = ksm_madvise(vma, old_addr, old_addr + old_len,
194						MADV_UNMERGEABLE, &vm_flags);
195	if (err)
196		return err;
197
 
 
 
 
 
 
198	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
199	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
200	if (!new_vma)
 
 
 
201		return -ENOMEM;
 
202
203	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
 
204	if (moved_len < old_len) {
 
 
 
 
 
 
205		/*
206		 * On error, move entries back from new area to old,
207		 * which will succeed since page tables still there,
208		 * and then proceed to unmap new area instead of old.
209		 */
210		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
 
211		vma = new_vma;
212		old_len = new_len;
213		old_addr = new_addr;
214		new_addr = -ENOMEM;
 
 
 
 
 
 
215	}
216
217	/* Conceal VM_ACCOUNT so old reservation is not undone */
218	if (vm_flags & VM_ACCOUNT) {
219		vma->vm_flags &= ~VM_ACCOUNT;
220		excess = vma->vm_end - vma->vm_start - old_len;
221		if (old_addr > vma->vm_start &&
222		    old_addr + old_len < vma->vm_end)
223			split = 1;
224	}
225
226	/*
227	 * If we failed to move page tables we still do total_vm increment
228	 * since do_munmap() will decrement it by old_len == new_len.
229	 *
230	 * Since total_vm is about to be raised artificially high for a
231	 * moment, we need to restore high watermark afterwards: if stats
232	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
233	 * If this were a serious issue, we'd add a flag to do_munmap().
234	 */
235	hiwater_vm = mm->hiwater_vm;
236	mm->total_vm += new_len >> PAGE_SHIFT;
237	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
239	if (do_munmap(mm, old_addr, old_len) < 0) {
 
 
 
 
 
240		/* OOM: unable to split vma, just get accounts right */
241		vm_unacct_memory(excess >> PAGE_SHIFT);
242		excess = 0;
 
243	}
 
 
 
 
 
 
244	mm->hiwater_vm = hiwater_vm;
245
246	/* Restore VM_ACCOUNT if one or two pieces of vma left */
247	if (excess) {
248		vma->vm_flags |= VM_ACCOUNT;
249		if (split)
250			vma->vm_next->vm_flags |= VM_ACCOUNT;
251	}
252
253	if (vm_flags & VM_LOCKED) {
254		mm->locked_vm += new_len >> PAGE_SHIFT;
255		if (new_len > old_len)
256			mlock_vma_pages_range(new_vma, new_addr + old_len,
257						       new_addr + new_len);
258	}
259
260	return new_addr;
261}
262
263static struct vm_area_struct *vma_to_resize(unsigned long addr,
264	unsigned long old_len, unsigned long new_len, unsigned long *p)
265{
266	struct mm_struct *mm = current->mm;
267	struct vm_area_struct *vma = find_vma(mm, addr);
 
268
269	if (!vma || vma->vm_start > addr)
270		goto Efault;
 
271
272	if (is_vm_hugetlb_page(vma))
273		goto Einval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
275	/* We can't remap across vm area boundaries */
276	if (old_len > vma->vm_end - addr)
277		goto Efault;
 
 
 
278
279	/* Need to be careful about a growing mapping */
280	if (new_len > old_len) {
281		unsigned long pgoff;
282
283		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
284			goto Efault;
285		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
286		pgoff += vma->vm_pgoff;
287		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
288			goto Einval;
289	}
290
291	if (vma->vm_flags & VM_LOCKED) {
292		unsigned long locked, lock_limit;
293		locked = mm->locked_vm << PAGE_SHIFT;
294		lock_limit = rlimit(RLIMIT_MEMLOCK);
295		locked += new_len - old_len;
296		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
297			goto Eagain;
298	}
299
300	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
301		goto Enomem;
302
303	if (vma->vm_flags & VM_ACCOUNT) {
304		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
305		if (security_vm_enough_memory(charged))
306			goto Efault;
307		*p = charged;
308	}
309
310	return vma;
311
312Efault:	/* very odd choice for most of the cases, but... */
313	return ERR_PTR(-EFAULT);
314Einval:
315	return ERR_PTR(-EINVAL);
316Enomem:
317	return ERR_PTR(-ENOMEM);
318Eagain:
319	return ERR_PTR(-EAGAIN);
320}
321
322static unsigned long mremap_to(unsigned long addr,
323	unsigned long old_len, unsigned long new_addr,
324	unsigned long new_len)
 
 
325{
326	struct mm_struct *mm = current->mm;
327	struct vm_area_struct *vma;
328	unsigned long ret = -EINVAL;
329	unsigned long charged = 0;
330	unsigned long map_flags;
331
332	if (new_addr & ~PAGE_MASK)
333		goto out;
334
335	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
336		goto out;
337
338	/* Check if the location we're moving into overlaps the
339	 * old location at all, and fail if it does.
340	 */
341	if ((new_addr <= addr) && (new_addr+new_len) > addr)
342		goto out;
343
344	if ((addr <= new_addr) && (addr+old_len) > new_addr)
345		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
347	ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
348	if (ret)
349		goto out;
 
 
350
351	ret = do_munmap(mm, new_addr, new_len);
352	if (ret)
353		goto out;
354
355	if (old_len >= new_len) {
356		ret = do_munmap(mm, addr+new_len, old_len - new_len);
357		if (ret && old_len != new_len)
358			goto out;
359		old_len = new_len;
360	}
361
362	vma = vma_to_resize(addr, old_len, new_len, &charged);
363	if (IS_ERR(vma)) {
364		ret = PTR_ERR(vma);
365		goto out;
366	}
367
368	map_flags = MAP_FIXED;
 
 
 
 
 
 
 
 
 
369	if (vma->vm_flags & VM_MAYSHARE)
370		map_flags |= MAP_SHARED;
371
372	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
373				((addr - vma->vm_start) >> PAGE_SHIFT),
374				map_flags);
375	if (ret & ~PAGE_MASK)
376		goto out1;
377
378	ret = move_vma(vma, addr, old_len, new_len, new_addr);
379	if (!(ret & ~PAGE_MASK))
380		goto out;
381out1:
382	vm_unacct_memory(charged);
 
383
384out:
385	return ret;
386}
387
388static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
389{
390	unsigned long end = vma->vm_end + delta;
 
391	if (end < vma->vm_end) /* overflow */
392		return 0;
393	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
394		return 0;
395	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
396			      0, MAP_FIXED) & ~PAGE_MASK)
397		return 0;
398	return 1;
399}
400
401/*
402 * Expand (or shrink) an existing mapping, potentially moving it at the
403 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
404 *
405 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
406 * This option implies MREMAP_MAYMOVE.
407 */
408unsigned long do_mremap(unsigned long addr,
409	unsigned long old_len, unsigned long new_len,
410	unsigned long flags, unsigned long new_addr)
411{
412	struct mm_struct *mm = current->mm;
413	struct vm_area_struct *vma;
414	unsigned long ret = -EINVAL;
415	unsigned long charged = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
417	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
418		goto out;
419
420	if (addr & ~PAGE_MASK)
421		goto out;
422
423	old_len = PAGE_ALIGN(old_len);
424	new_len = PAGE_ALIGN(new_len);
425
426	/*
427	 * We allow a zero old-len as a special case
428	 * for DOS-emu "duplicate shm area" thing. But
429	 * a zero new-len is nonsensical.
430	 */
431	if (!new_len)
 
 
 
 
 
 
 
432		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
433
434	if (flags & MREMAP_FIXED) {
435		if (flags & MREMAP_MAYMOVE)
436			ret = mremap_to(addr, old_len, new_addr, new_len);
 
 
 
 
 
 
 
 
 
437		goto out;
438	}
439
440	/*
441	 * Always allow a shrinking remap: that just unmaps
442	 * the unnecessary pages..
443	 * do_munmap does all the needed commit accounting
 
444	 */
445	if (old_len >= new_len) {
446		ret = do_munmap(mm, addr+new_len, old_len - new_len);
447		if (ret && old_len != new_len)
 
 
448			goto out;
 
 
 
 
 
 
 
449		ret = addr;
450		goto out;
451	}
452
453	/*
454	 * Ok, we need to grow..
455	 */
456	vma = vma_to_resize(addr, old_len, new_len, &charged);
457	if (IS_ERR(vma)) {
458		ret = PTR_ERR(vma);
459		goto out;
460	}
461
462	/* old_len exactly to the end of the area..
463	 */
464	if (old_len == vma->vm_end - addr) {
 
 
465		/* can we just expand the current mapping? */
466		if (vma_expandable(vma, new_len - old_len)) {
467			int pages = (new_len - old_len) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
468
469			if (vma_adjust(vma, vma->vm_start, addr + new_len,
470				       vma->vm_pgoff, NULL)) {
 
 
 
 
 
 
 
 
 
 
471				ret = -ENOMEM;
472				goto out;
473			}
474
475			mm->total_vm += pages;
476			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
477			if (vma->vm_flags & VM_LOCKED) {
478				mm->locked_vm += pages;
479				mlock_vma_pages_range(vma, addr + old_len,
480						   addr + new_len);
481			}
482			ret = addr;
483			goto out;
484		}
485	}
486
487	/*
488	 * We weren't able to just expand or shrink the area,
489	 * we need to create a new one and move it..
490	 */
491	ret = -ENOMEM;
492	if (flags & MREMAP_MAYMOVE) {
493		unsigned long map_flags = 0;
494		if (vma->vm_flags & VM_MAYSHARE)
495			map_flags |= MAP_SHARED;
496
497		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
498					vma->vm_pgoff +
499					((addr - vma->vm_start) >> PAGE_SHIFT),
500					map_flags);
501		if (new_addr & ~PAGE_MASK) {
502			ret = new_addr;
503			goto out;
504		}
505
506		ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
507		if (ret)
508			goto out;
509		ret = move_vma(vma, addr, old_len, new_len, new_addr);
510	}
511out:
512	if (ret & ~PAGE_MASK)
513		vm_unacct_memory(charged);
514	return ret;
515}
516
517SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
518		unsigned long, new_len, unsigned long, flags,
519		unsigned long, new_addr)
520{
521	unsigned long ret;
522
523	down_write(&current->mm->mmap_sem);
524	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
525	up_write(&current->mm->mmap_sem);
526	return ret;
527}