Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3/*
   4 * VMA-specific functions.
   5 */
   6
   7#include "vma_internal.h"
   8#include "vma.h"
   9
  10struct mmap_state {
  11	struct mm_struct *mm;
  12	struct vma_iterator *vmi;
  13
  14	unsigned long addr;
  15	unsigned long end;
  16	pgoff_t pgoff;
  17	unsigned long pglen;
  18	unsigned long flags;
  19	struct file *file;
  20
  21	unsigned long charged;
  22	bool retry_merge;
  23
  24	struct vm_area_struct *prev;
  25	struct vm_area_struct *next;
  26
  27	/* Unmapping state. */
  28	struct vma_munmap_struct vms;
  29	struct ma_state mas_detach;
  30	struct maple_tree mt_detach;
  31};
  32
  33#define MMAP_STATE(name, mm_, vmi_, addr_, len_, pgoff_, flags_, file_) \
  34	struct mmap_state name = {					\
  35		.mm = mm_,						\
  36		.vmi = vmi_,						\
  37		.addr = addr_,						\
  38		.end = (addr_) + (len_),				\
  39		.pgoff = pgoff_,					\
  40		.pglen = PHYS_PFN(len_),				\
  41		.flags = flags_,					\
  42		.file = file_,						\
  43	}
  44
  45#define VMG_MMAP_STATE(name, map_, vma_)				\
  46	struct vma_merge_struct name = {				\
  47		.mm = (map_)->mm,					\
  48		.vmi = (map_)->vmi,					\
  49		.start = (map_)->addr,					\
  50		.end = (map_)->end,					\
  51		.flags = (map_)->flags,					\
  52		.pgoff = (map_)->pgoff,					\
  53		.file = (map_)->file,					\
  54		.prev = (map_)->prev,					\
  55		.vma = vma_,						\
  56		.next = (vma_) ? NULL : (map_)->next,			\
  57		.state = VMA_MERGE_START,				\
  58		.merge_flags = VMG_FLAG_DEFAULT,			\
  59	}
  60
  61static inline bool is_mergeable_vma(struct vma_merge_struct *vmg, bool merge_next)
  62{
  63	struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev;
  64
  65	if (!mpol_equal(vmg->policy, vma_policy(vma)))
  66		return false;
  67	/*
  68	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
  69	 * match the flags but dirty bit -- the caller should mark
  70	 * merged VMA as dirty. If dirty bit won't be excluded from
  71	 * comparison, we increase pressure on the memory system forcing
  72	 * the kernel to generate new VMAs when old one could be
  73	 * extended instead.
  74	 */
  75	if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY)
  76		return false;
  77	if (vma->vm_file != vmg->file)
  78		return false;
  79	if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx))
  80		return false;
  81	if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name))
  82		return false;
  83	return true;
  84}
  85
  86static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
  87		 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
  88{
  89	/*
  90	 * The list_is_singular() test is to avoid merging VMA cloned from
  91	 * parents. This can improve scalability caused by anon_vma lock.
  92	 */
  93	if ((!anon_vma1 || !anon_vma2) && (!vma ||
  94		list_is_singular(&vma->anon_vma_chain)))
  95		return true;
  96	return anon_vma1 == anon_vma2;
  97}
  98
  99/* Are the anon_vma's belonging to each VMA compatible with one another? */
 100static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
 101					    struct vm_area_struct *vma2)
 102{
 103	return is_mergeable_anon_vma(vma1->anon_vma, vma2->anon_vma, NULL);
 104}
 105
 106/*
 107 * init_multi_vma_prep() - Initializer for struct vma_prepare
 108 * @vp: The vma_prepare struct
 109 * @vma: The vma that will be altered once locked
 110 * @next: The next vma if it is to be adjusted
 111 * @remove: The first vma to be removed
 112 * @remove2: The second vma to be removed
 113 */
 114static void init_multi_vma_prep(struct vma_prepare *vp,
 115				struct vm_area_struct *vma,
 116				struct vm_area_struct *next,
 117				struct vm_area_struct *remove,
 118				struct vm_area_struct *remove2)
 119{
 120	memset(vp, 0, sizeof(struct vma_prepare));
 121	vp->vma = vma;
 122	vp->anon_vma = vma->anon_vma;
 123	vp->remove = remove;
 124	vp->remove2 = remove2;
 125	vp->adj_next = next;
 126	if (!vp->anon_vma && next)
 127		vp->anon_vma = next->anon_vma;
 128
 129	vp->file = vma->vm_file;
 130	if (vp->file)
 131		vp->mapping = vma->vm_file->f_mapping;
 132
 133}
 134
 135/*
 136 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
 137 * in front of (at a lower virtual address and file offset than) the vma.
 138 *
 139 * We cannot merge two vmas if they have differently assigned (non-NULL)
 140 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
 141 *
 142 * We don't check here for the merged mmap wrapping around the end of pagecache
 143 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
 144 * wrap, nor mmaps which cover the final page at index -1UL.
 145 *
 146 * We assume the vma may be removed as part of the merge.
 147 */
 148static bool can_vma_merge_before(struct vma_merge_struct *vmg)
 149{
 150	pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
 151
 152	if (is_mergeable_vma(vmg, /* merge_next = */ true) &&
 153	    is_mergeable_anon_vma(vmg->anon_vma, vmg->next->anon_vma, vmg->next)) {
 154		if (vmg->next->vm_pgoff == vmg->pgoff + pglen)
 155			return true;
 156	}
 157
 158	return false;
 159}
 160
 161/*
 162 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
 163 * beyond (at a higher virtual address and file offset than) the vma.
 164 *
 165 * We cannot merge two vmas if they have differently assigned (non-NULL)
 166 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
 167 *
 168 * We assume that vma is not removed as part of the merge.
 169 */
 170static bool can_vma_merge_after(struct vma_merge_struct *vmg)
 171{
 172	if (is_mergeable_vma(vmg, /* merge_next = */ false) &&
 173	    is_mergeable_anon_vma(vmg->anon_vma, vmg->prev->anon_vma, vmg->prev)) {
 174		if (vmg->prev->vm_pgoff + vma_pages(vmg->prev) == vmg->pgoff)
 175			return true;
 176	}
 177	return false;
 178}
 179
 180static void __vma_link_file(struct vm_area_struct *vma,
 181			    struct address_space *mapping)
 182{
 183	if (vma_is_shared_maywrite(vma))
 184		mapping_allow_writable(mapping);
 185
 186	flush_dcache_mmap_lock(mapping);
 187	vma_interval_tree_insert(vma, &mapping->i_mmap);
 188	flush_dcache_mmap_unlock(mapping);
 189}
 190
 191/*
 192 * Requires inode->i_mapping->i_mmap_rwsem
 193 */
 194static void __remove_shared_vm_struct(struct vm_area_struct *vma,
 195				      struct address_space *mapping)
 196{
 197	if (vma_is_shared_maywrite(vma))
 198		mapping_unmap_writable(mapping);
 199
 200	flush_dcache_mmap_lock(mapping);
 201	vma_interval_tree_remove(vma, &mapping->i_mmap);
 202	flush_dcache_mmap_unlock(mapping);
 203}
 204
 205/*
 206 * vma_prepare() - Helper function for handling locking VMAs prior to altering
 207 * @vp: The initialized vma_prepare struct
 208 */
 209static void vma_prepare(struct vma_prepare *vp)
 210{
 211	if (vp->file) {
 212		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
 213
 214		if (vp->adj_next)
 215			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
 216				      vp->adj_next->vm_end);
 217
 218		i_mmap_lock_write(vp->mapping);
 219		if (vp->insert && vp->insert->vm_file) {
 220			/*
 221			 * Put into interval tree now, so instantiated pages
 222			 * are visible to arm/parisc __flush_dcache_page
 223			 * throughout; but we cannot insert into address
 224			 * space until vma start or end is updated.
 225			 */
 226			__vma_link_file(vp->insert,
 227					vp->insert->vm_file->f_mapping);
 228		}
 229	}
 230
 231	if (vp->anon_vma) {
 232		anon_vma_lock_write(vp->anon_vma);
 233		anon_vma_interval_tree_pre_update_vma(vp->vma);
 234		if (vp->adj_next)
 235			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
 236	}
 237
 238	if (vp->file) {
 239		flush_dcache_mmap_lock(vp->mapping);
 240		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
 241		if (vp->adj_next)
 242			vma_interval_tree_remove(vp->adj_next,
 243						 &vp->mapping->i_mmap);
 244	}
 245
 246}
 247
 248/*
 249 * vma_complete- Helper function for handling the unlocking after altering VMAs,
 250 * or for inserting a VMA.
 251 *
 252 * @vp: The vma_prepare struct
 253 * @vmi: The vma iterator
 254 * @mm: The mm_struct
 255 */
 256static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi,
 257			 struct mm_struct *mm)
 258{
 259	if (vp->file) {
 260		if (vp->adj_next)
 261			vma_interval_tree_insert(vp->adj_next,
 262						 &vp->mapping->i_mmap);
 263		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
 264		flush_dcache_mmap_unlock(vp->mapping);
 265	}
 266
 267	if (vp->remove && vp->file) {
 268		__remove_shared_vm_struct(vp->remove, vp->mapping);
 269		if (vp->remove2)
 270			__remove_shared_vm_struct(vp->remove2, vp->mapping);
 271	} else if (vp->insert) {
 272		/*
 273		 * split_vma has split insert from vma, and needs
 274		 * us to insert it before dropping the locks
 275		 * (it may either follow vma or precede it).
 276		 */
 277		vma_iter_store(vmi, vp->insert);
 278		mm->map_count++;
 279	}
 280
 281	if (vp->anon_vma) {
 282		anon_vma_interval_tree_post_update_vma(vp->vma);
 283		if (vp->adj_next)
 284			anon_vma_interval_tree_post_update_vma(vp->adj_next);
 285		anon_vma_unlock_write(vp->anon_vma);
 286	}
 287
 288	if (vp->file) {
 289		i_mmap_unlock_write(vp->mapping);
 290		uprobe_mmap(vp->vma);
 291
 292		if (vp->adj_next)
 293			uprobe_mmap(vp->adj_next);
 294	}
 295
 296	if (vp->remove) {
 297again:
 298		vma_mark_detached(vp->remove, true);
 299		if (vp->file) {
 300			uprobe_munmap(vp->remove, vp->remove->vm_start,
 301				      vp->remove->vm_end);
 302			fput(vp->file);
 303		}
 304		if (vp->remove->anon_vma)
 305			anon_vma_merge(vp->vma, vp->remove);
 306		mm->map_count--;
 307		mpol_put(vma_policy(vp->remove));
 308		if (!vp->remove2)
 309			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
 310		vm_area_free(vp->remove);
 311
 312		/*
 313		 * In mprotect's case 6 (see comments on vma_merge),
 314		 * we are removing both mid and next vmas
 315		 */
 316		if (vp->remove2) {
 317			vp->remove = vp->remove2;
 318			vp->remove2 = NULL;
 319			goto again;
 320		}
 321	}
 322	if (vp->insert && vp->file)
 323		uprobe_mmap(vp->insert);
 324}
 325
 326/*
 327 * init_vma_prep() - Initializer wrapper for vma_prepare struct
 328 * @vp: The vma_prepare struct
 329 * @vma: The vma that will be altered once locked
 330 */
 331static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
 332{
 333	init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
 334}
 335
 336/*
 337 * Can the proposed VMA be merged with the left (previous) VMA taking into
 338 * account the start position of the proposed range.
 339 */
 340static bool can_vma_merge_left(struct vma_merge_struct *vmg)
 341
 342{
 343	return vmg->prev && vmg->prev->vm_end == vmg->start &&
 344		can_vma_merge_after(vmg);
 345}
 346
 347/*
 348 * Can the proposed VMA be merged with the right (next) VMA taking into
 349 * account the end position of the proposed range.
 350 *
 351 * In addition, if we can merge with the left VMA, ensure that left and right
 352 * anon_vma's are also compatible.
 353 */
 354static bool can_vma_merge_right(struct vma_merge_struct *vmg,
 355				bool can_merge_left)
 356{
 357	if (!vmg->next || vmg->end != vmg->next->vm_start ||
 358	    !can_vma_merge_before(vmg))
 359		return false;
 360
 361	if (!can_merge_left)
 362		return true;
 363
 364	/*
 365	 * If we can merge with prev (left) and next (right), indicating that
 366	 * each VMA's anon_vma is compatible with the proposed anon_vma, this
 367	 * does not mean prev and next are compatible with EACH OTHER.
 368	 *
 369	 * We therefore check this in addition to mergeability to either side.
 370	 */
 371	return are_anon_vmas_compatible(vmg->prev, vmg->next);
 372}
 373
 374/*
 375 * Close a vm structure and free it.
 376 */
 377void remove_vma(struct vm_area_struct *vma, bool unreachable)
 378{
 379	might_sleep();
 380	vma_close(vma);
 381	if (vma->vm_file)
 382		fput(vma->vm_file);
 383	mpol_put(vma_policy(vma));
 384	if (unreachable)
 385		__vm_area_free(vma);
 386	else
 387		vm_area_free(vma);
 388}
 389
 390/*
 391 * Get rid of page table information in the indicated region.
 392 *
 393 * Called with the mm semaphore held.
 394 */
 395void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
 396		struct vm_area_struct *prev, struct vm_area_struct *next)
 397{
 398	struct mm_struct *mm = vma->vm_mm;
 399	struct mmu_gather tlb;
 400
 401	lru_add_drain();
 402	tlb_gather_mmu(&tlb, mm);
 403	update_hiwater_rss(mm);
 404	unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
 405		   /* mm_wr_locked = */ true);
 406	mas_set(mas, vma->vm_end);
 407	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
 408		      next ? next->vm_start : USER_PGTABLES_CEILING,
 409		      /* mm_wr_locked = */ true);
 410	tlb_finish_mmu(&tlb);
 411}
 412
 413/*
 414 * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
 415 * has already been checked or doesn't make sense to fail.
 416 * VMA Iterator will point to the original VMA.
 417 */
 418static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 419		       unsigned long addr, int new_below)
 420{
 421	struct vma_prepare vp;
 422	struct vm_area_struct *new;
 423	int err;
 424
 425	WARN_ON(vma->vm_start >= addr);
 426	WARN_ON(vma->vm_end <= addr);
 427
 428	if (vma->vm_ops && vma->vm_ops->may_split) {
 429		err = vma->vm_ops->may_split(vma, addr);
 430		if (err)
 431			return err;
 432	}
 433
 434	new = vm_area_dup(vma);
 435	if (!new)
 436		return -ENOMEM;
 437
 438	if (new_below) {
 439		new->vm_end = addr;
 440	} else {
 441		new->vm_start = addr;
 442		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
 443	}
 444
 445	err = -ENOMEM;
 446	vma_iter_config(vmi, new->vm_start, new->vm_end);
 447	if (vma_iter_prealloc(vmi, new))
 448		goto out_free_vma;
 449
 450	err = vma_dup_policy(vma, new);
 451	if (err)
 452		goto out_free_vmi;
 453
 454	err = anon_vma_clone(new, vma);
 455	if (err)
 456		goto out_free_mpol;
 457
 458	if (new->vm_file)
 459		get_file(new->vm_file);
 460
 461	if (new->vm_ops && new->vm_ops->open)
 462		new->vm_ops->open(new);
 463
 464	vma_start_write(vma);
 465	vma_start_write(new);
 466
 467	init_vma_prep(&vp, vma);
 468	vp.insert = new;
 469	vma_prepare(&vp);
 470	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
 471
 472	if (new_below) {
 473		vma->vm_start = addr;
 474		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
 475	} else {
 476		vma->vm_end = addr;
 477	}
 478
 479	/* vma_complete stores the new vma */
 480	vma_complete(&vp, vmi, vma->vm_mm);
 481	validate_mm(vma->vm_mm);
 482
 483	/* Success. */
 484	if (new_below)
 485		vma_next(vmi);
 486	else
 487		vma_prev(vmi);
 488
 489	return 0;
 490
 491out_free_mpol:
 492	mpol_put(vma_policy(new));
 493out_free_vmi:
 494	vma_iter_free(vmi);
 495out_free_vma:
 496	vm_area_free(new);
 497	return err;
 498}
 499
 500/*
 501 * Split a vma into two pieces at address 'addr', a new vma is allocated
 502 * either for the first part or the tail.
 503 */
 504static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
 505		     unsigned long addr, int new_below)
 506{
 507	if (vma->vm_mm->map_count >= sysctl_max_map_count)
 508		return -ENOMEM;
 509
 510	return __split_vma(vmi, vma, addr, new_below);
 511}
 512
 513/*
 514 * vma has some anon_vma assigned, and is already inserted on that
 515 * anon_vma's interval trees.
 516 *
 517 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
 518 * vma must be removed from the anon_vma's interval trees using
 519 * anon_vma_interval_tree_pre_update_vma().
 520 *
 521 * After the update, the vma will be reinserted using
 522 * anon_vma_interval_tree_post_update_vma().
 523 *
 524 * The entire update must be protected by exclusive mmap_lock and by
 525 * the root anon_vma's mutex.
 526 */
 527void
 528anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
 529{
 530	struct anon_vma_chain *avc;
 531
 532	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 533		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
 534}
 535
 536void
 537anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
 538{
 539	struct anon_vma_chain *avc;
 540
 541	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 542		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
 543}
 544
 545/*
 546 * dup_anon_vma() - Helper function to duplicate anon_vma
 547 * @dst: The destination VMA
 548 * @src: The source VMA
 549 * @dup: Pointer to the destination VMA when successful.
 550 *
 551 * Returns: 0 on success.
 552 */
 553static int dup_anon_vma(struct vm_area_struct *dst,
 554			struct vm_area_struct *src, struct vm_area_struct **dup)
 555{
 556	/*
 557	 * Easily overlooked: when mprotect shifts the boundary, make sure the
 558	 * expanding vma has anon_vma set if the shrinking vma had, to cover any
 559	 * anon pages imported.
 560	 */
 561	if (src->anon_vma && !dst->anon_vma) {
 562		int ret;
 563
 564		vma_assert_write_locked(dst);
 565		dst->anon_vma = src->anon_vma;
 566		ret = anon_vma_clone(dst, src);
 567		if (ret)
 568			return ret;
 569
 570		*dup = dst;
 571	}
 572
 573	return 0;
 574}
 575
 576#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
 577void validate_mm(struct mm_struct *mm)
 578{
 579	int bug = 0;
 580	int i = 0;
 581	struct vm_area_struct *vma;
 582	VMA_ITERATOR(vmi, mm, 0);
 583
 584	mt_validate(&mm->mm_mt);
 585	for_each_vma(vmi, vma) {
 586#ifdef CONFIG_DEBUG_VM_RB
 587		struct anon_vma *anon_vma = vma->anon_vma;
 588		struct anon_vma_chain *avc;
 589#endif
 590		unsigned long vmi_start, vmi_end;
 591		bool warn = 0;
 592
 593		vmi_start = vma_iter_addr(&vmi);
 594		vmi_end = vma_iter_end(&vmi);
 595		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
 596			warn = 1;
 597
 598		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
 599			warn = 1;
 600
 601		if (warn) {
 602			pr_emerg("issue in %s\n", current->comm);
 603			dump_stack();
 604			dump_vma(vma);
 605			pr_emerg("tree range: %px start %lx end %lx\n", vma,
 606				 vmi_start, vmi_end - 1);
 607			vma_iter_dump_tree(&vmi);
 608		}
 609
 610#ifdef CONFIG_DEBUG_VM_RB
 611		if (anon_vma) {
 612			anon_vma_lock_read(anon_vma);
 613			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 614				anon_vma_interval_tree_verify(avc);
 615			anon_vma_unlock_read(anon_vma);
 616		}
 617#endif
 618		/* Check for a infinite loop */
 619		if (++i > mm->map_count + 10) {
 620			i = -1;
 621			break;
 622		}
 623	}
 624	if (i != mm->map_count) {
 625		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
 626		bug = 1;
 627	}
 628	VM_BUG_ON_MM(bug, mm);
 629}
 630#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
 631
 632/* Actually perform the VMA merge operation. */
 633static int commit_merge(struct vma_merge_struct *vmg,
 634			struct vm_area_struct *adjust,
 635			struct vm_area_struct *remove,
 636			struct vm_area_struct *remove2,
 637			long adj_start,
 638			bool expanded)
 639{
 640	struct vma_prepare vp;
 641
 642	init_multi_vma_prep(&vp, vmg->vma, adjust, remove, remove2);
 643
 644	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
 645		   vp.anon_vma != adjust->anon_vma);
 646
 647	if (expanded) {
 648		/* Note: vma iterator must be pointing to 'start'. */
 649		vma_iter_config(vmg->vmi, vmg->start, vmg->end);
 650	} else {
 651		vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
 652				adjust->vm_end);
 653	}
 654
 655	if (vma_iter_prealloc(vmg->vmi, vmg->vma))
 656		return -ENOMEM;
 657
 658	vma_prepare(&vp);
 659	vma_adjust_trans_huge(vmg->vma, vmg->start, vmg->end, adj_start);
 660	vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff);
 661
 662	if (expanded)
 663		vma_iter_store(vmg->vmi, vmg->vma);
 664
 665	if (adj_start) {
 666		adjust->vm_start += adj_start;
 667		adjust->vm_pgoff += PHYS_PFN(adj_start);
 668		if (adj_start < 0) {
 669			WARN_ON(expanded);
 670			vma_iter_store(vmg->vmi, adjust);
 671		}
 672	}
 673
 674	vma_complete(&vp, vmg->vmi, vmg->vma->vm_mm);
 675
 676	return 0;
 677}
 678
 679/* We can only remove VMAs when merging if they do not have a close hook. */
 680static bool can_merge_remove_vma(struct vm_area_struct *vma)
 681{
 682	return !vma->vm_ops || !vma->vm_ops->close;
 683}
 684
 685/*
 686 * vma_merge_existing_range - Attempt to merge VMAs based on a VMA having its
 687 * attributes modified.
 688 *
 689 * @vmg: Describes the modifications being made to a VMA and associated
 690 *       metadata.
 691 *
 692 * When the attributes of a range within a VMA change, then it might be possible
 693 * for immediately adjacent VMAs to be merged into that VMA due to having
 694 * identical properties.
 695 *
 696 * This function checks for the existence of any such mergeable VMAs and updates
 697 * the maple tree describing the @vmg->vma->vm_mm address space to account for
 698 * this, as well as any VMAs shrunk/expanded/deleted as a result of this merge.
 699 *
 700 * As part of this operation, if a merge occurs, the @vmg object will have its
 701 * vma, start, end, and pgoff fields modified to execute the merge. Subsequent
 702 * calls to this function should reset these fields.
 703 *
 704 * Returns: The merged VMA if merge succeeds, or NULL otherwise.
 705 *
 706 * ASSUMPTIONS:
 707 * - The caller must assign the VMA to be modifed to @vmg->vma.
 708 * - The caller must have set @vmg->prev to the previous VMA, if there is one.
 709 * - The caller must not set @vmg->next, as we determine this.
 710 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
 711 * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end).
 712 */
 713static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg)
 714{
 715	struct vm_area_struct *vma = vmg->vma;
 716	struct vm_area_struct *prev = vmg->prev;
 717	struct vm_area_struct *next, *res;
 718	struct vm_area_struct *anon_dup = NULL;
 719	struct vm_area_struct *adjust = NULL;
 720	unsigned long start = vmg->start;
 721	unsigned long end = vmg->end;
 722	bool left_side = vma && start == vma->vm_start;
 723	bool right_side = vma && end == vma->vm_end;
 724	int err = 0;
 725	long adj_start = 0;
 726	bool merge_will_delete_vma, merge_will_delete_next;
 727	bool merge_left, merge_right, merge_both;
 728	bool expanded;
 729
 730	mmap_assert_write_locked(vmg->mm);
 731	VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */
 732	VM_WARN_ON(vmg->next); /* We set this. */
 733	VM_WARN_ON(prev && start <= prev->vm_start);
 734	VM_WARN_ON(start >= end);
 735	/*
 736	 * If vma == prev, then we are offset into a VMA. Otherwise, if we are
 737	 * not, we must span a portion of the VMA.
 738	 */
 739	VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) ||
 740			   vmg->end > vma->vm_end));
 741	/* The vmi must be positioned within vmg->vma. */
 742	VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start &&
 743			    vma_iter_addr(vmg->vmi) < vma->vm_end));
 744
 745	vmg->state = VMA_MERGE_NOMERGE;
 746
 747	/*
 748	 * If a special mapping or if the range being modified is neither at the
 749	 * furthermost left or right side of the VMA, then we have no chance of
 750	 * merging and should abort.
 751	 */
 752	if (vmg->flags & VM_SPECIAL || (!left_side && !right_side))
 753		return NULL;
 754
 755	if (left_side)
 756		merge_left = can_vma_merge_left(vmg);
 757	else
 758		merge_left = false;
 759
 760	if (right_side) {
 761		next = vmg->next = vma_iter_next_range(vmg->vmi);
 762		vma_iter_prev_range(vmg->vmi);
 763
 764		merge_right = can_vma_merge_right(vmg, merge_left);
 765	} else {
 766		merge_right = false;
 767		next = NULL;
 768	}
 769
 770	if (merge_left)		/* If merging prev, position iterator there. */
 771		vma_prev(vmg->vmi);
 772	else if (!merge_right)	/* If we have nothing to merge, abort. */
 773		return NULL;
 774
 775	merge_both = merge_left && merge_right;
 776	/* If we span the entire VMA, a merge implies it will be deleted. */
 777	merge_will_delete_vma = left_side && right_side;
 778
 779	/*
 780	 * If we need to remove vma in its entirety but are unable to do so,
 781	 * we have no sensible recourse but to abort the merge.
 782	 */
 783	if (merge_will_delete_vma && !can_merge_remove_vma(vma))
 784		return NULL;
 785
 786	/*
 787	 * If we merge both VMAs, then next is also deleted. This implies
 788	 * merge_will_delete_vma also.
 789	 */
 790	merge_will_delete_next = merge_both;
 791
 792	/*
 793	 * If we cannot delete next, then we can reduce the operation to merging
 794	 * prev and vma (thereby deleting vma).
 795	 */
 796	if (merge_will_delete_next && !can_merge_remove_vma(next)) {
 797		merge_will_delete_next = false;
 798		merge_right = false;
 799		merge_both = false;
 800	}
 801
 802	/* No matter what happens, we will be adjusting vma. */
 803	vma_start_write(vma);
 804
 805	if (merge_left)
 806		vma_start_write(prev);
 807
 808	if (merge_right)
 809		vma_start_write(next);
 810
 811	if (merge_both) {
 812		/*
 813		 *         |<----->|
 814		 * |-------*********-------|
 815		 *   prev     vma     next
 816		 *  extend   delete  delete
 817		 */
 818
 819		vmg->vma = prev;
 820		vmg->start = prev->vm_start;
 821		vmg->end = next->vm_end;
 822		vmg->pgoff = prev->vm_pgoff;
 823
 824		/*
 825		 * We already ensured anon_vma compatibility above, so now it's
 826		 * simply a case of, if prev has no anon_vma object, which of
 827		 * next or vma contains the anon_vma we must duplicate.
 828		 */
 829		err = dup_anon_vma(prev, next->anon_vma ? next : vma, &anon_dup);
 830	} else if (merge_left) {
 831		/*
 832		 *         |<----->| OR
 833		 *         |<--------->|
 834		 * |-------*************
 835		 *   prev       vma
 836		 *  extend shrink/delete
 837		 */
 838
 839		vmg->vma = prev;
 840		vmg->start = prev->vm_start;
 841		vmg->pgoff = prev->vm_pgoff;
 842
 843		if (!merge_will_delete_vma) {
 844			adjust = vma;
 845			adj_start = vmg->end - vma->vm_start;
 846		}
 847
 848		err = dup_anon_vma(prev, vma, &anon_dup);
 849	} else { /* merge_right */
 850		/*
 851		 *     |<----->| OR
 852		 * |<--------->|
 853		 * *************-------|
 854		 *      vma       next
 855		 * shrink/delete extend
 856		 */
 857
 858		pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start);
 859
 860		VM_WARN_ON(!merge_right);
 861		/* If we are offset into a VMA, then prev must be vma. */
 862		VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev);
 863
 864		if (merge_will_delete_vma) {
 865			vmg->vma = next;
 866			vmg->end = next->vm_end;
 867			vmg->pgoff = next->vm_pgoff - pglen;
 868		} else {
 869			/*
 870			 * We shrink vma and expand next.
 871			 *
 872			 * IMPORTANT: This is the ONLY case where the final
 873			 * merged VMA is NOT vmg->vma, but rather vmg->next.
 874			 */
 875
 876			vmg->start = vma->vm_start;
 877			vmg->end = start;
 878			vmg->pgoff = vma->vm_pgoff;
 879
 880			adjust = next;
 881			adj_start = -(vma->vm_end - start);
 882		}
 883
 884		err = dup_anon_vma(next, vma, &anon_dup);
 885	}
 886
 887	if (err)
 888		goto abort;
 889
 890	/*
 891	 * In nearly all cases, we expand vmg->vma. There is one exception -
 892	 * merge_right where we partially span the VMA. In this case we shrink
 893	 * the end of vmg->vma and adjust the start of vmg->next accordingly.
 894	 */
 895	expanded = !merge_right || merge_will_delete_vma;
 896
 897	if (commit_merge(vmg, adjust,
 898			 merge_will_delete_vma ? vma : NULL,
 899			 merge_will_delete_next ? next : NULL,
 900			 adj_start, expanded)) {
 901		if (anon_dup)
 902			unlink_anon_vmas(anon_dup);
 903
 904		vmg->state = VMA_MERGE_ERROR_NOMEM;
 905		return NULL;
 906	}
 907
 908	res = merge_left ? prev : next;
 909	khugepaged_enter_vma(res, vmg->flags);
 910
 911	vmg->state = VMA_MERGE_SUCCESS;
 912	return res;
 913
 914abort:
 915	vma_iter_set(vmg->vmi, start);
 916	vma_iter_load(vmg->vmi);
 917	vmg->state = VMA_MERGE_ERROR_NOMEM;
 918	return NULL;
 919}
 920
 921/*
 922 * vma_merge_new_range - Attempt to merge a new VMA into address space
 923 *
 924 * @vmg: Describes the VMA we are adding, in the range @vmg->start to @vmg->end
 925 *       (exclusive), which we try to merge with any adjacent VMAs if possible.
 926 *
 927 * We are about to add a VMA to the address space starting at @vmg->start and
 928 * ending at @vmg->end. There are three different possible scenarios:
 929 *
 930 * 1. There is a VMA with identical properties immediately adjacent to the
 931 *    proposed new VMA [@vmg->start, @vmg->end) either before or after it -
 932 *    EXPAND that VMA:
 933 *
 934 * Proposed:       |-----|  or  |-----|
 935 * Existing:  |----|                  |----|
 936 *
 937 * 2. There are VMAs with identical properties immediately adjacent to the
 938 *    proposed new VMA [@vmg->start, @vmg->end) both before AND after it -
 939 *    EXPAND the former and REMOVE the latter:
 940 *
 941 * Proposed:       |-----|
 942 * Existing:  |----|     |----|
 943 *
 944 * 3. There are no VMAs immediately adjacent to the proposed new VMA or those
 945 *    VMAs do not have identical attributes - NO MERGE POSSIBLE.
 946 *
 947 * In instances where we can merge, this function returns the expanded VMA which
 948 * will have its range adjusted accordingly and the underlying maple tree also
 949 * adjusted.
 950 *
 951 * Returns: In instances where no merge was possible, NULL. Otherwise, a pointer
 952 *          to the VMA we expanded.
 953 *
 954 * This function adjusts @vmg to provide @vmg->next if not already specified,
 955 * and adjusts [@vmg->start, @vmg->end) to span the expanded range.
 956 *
 957 * ASSUMPTIONS:
 958 * - The caller must hold a WRITE lock on the mm_struct->mmap_lock.
 959 * - The caller must have determined that [@vmg->start, @vmg->end) is empty,
 960     other than VMAs that will be unmapped should the operation succeed.
 961 * - The caller must have specified the previous vma in @vmg->prev.
 962 * - The caller must have specified the next vma in @vmg->next.
 963 * - The caller must have positioned the vmi at or before the gap.
 964 */
 965struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg)
 966{
 967	struct vm_area_struct *prev = vmg->prev;
 968	struct vm_area_struct *next = vmg->next;
 969	unsigned long end = vmg->end;
 970	bool can_merge_left, can_merge_right;
 971	bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND;
 972
 973	mmap_assert_write_locked(vmg->mm);
 974	VM_WARN_ON(vmg->vma);
 975	/* vmi must point at or before the gap. */
 976	VM_WARN_ON(vma_iter_addr(vmg->vmi) > end);
 977
 978	vmg->state = VMA_MERGE_NOMERGE;
 979
 980	/* Special VMAs are unmergeable, also if no prev/next. */
 981	if ((vmg->flags & VM_SPECIAL) || (!prev && !next))
 982		return NULL;
 983
 984	can_merge_left = can_vma_merge_left(vmg);
 985	can_merge_right = !just_expand && can_vma_merge_right(vmg, can_merge_left);
 986
 987	/* If we can merge with the next VMA, adjust vmg accordingly. */
 988	if (can_merge_right) {
 989		vmg->end = next->vm_end;
 990		vmg->vma = next;
 991	}
 992
 993	/* If we can merge with the previous VMA, adjust vmg accordingly. */
 994	if (can_merge_left) {
 995		vmg->start = prev->vm_start;
 996		vmg->vma = prev;
 997		vmg->pgoff = prev->vm_pgoff;
 998
 999		/*
1000		 * If this merge would result in removal of the next VMA but we
1001		 * are not permitted to do so, reduce the operation to merging
1002		 * prev and vma.
1003		 */
1004		if (can_merge_right && !can_merge_remove_vma(next))
1005			vmg->end = end;
1006
1007		/* In expand-only case we are already positioned at prev. */
1008		if (!just_expand) {
1009			/* Equivalent to going to the previous range. */
1010			vma_prev(vmg->vmi);
1011		}
1012	}
1013
1014	/*
1015	 * Now try to expand adjacent VMA(s). This takes care of removing the
1016	 * following VMA if we have VMAs on both sides.
1017	 */
1018	if (vmg->vma && !vma_expand(vmg)) {
1019		khugepaged_enter_vma(vmg->vma, vmg->flags);
1020		vmg->state = VMA_MERGE_SUCCESS;
1021		return vmg->vma;
1022	}
1023
1024	return NULL;
1025}
1026
1027/*
1028 * vma_expand - Expand an existing VMA
1029 *
1030 * @vmg: Describes a VMA expansion operation.
1031 *
1032 * Expand @vma to vmg->start and vmg->end.  Can expand off the start and end.
1033 * Will expand over vmg->next if it's different from vmg->vma and vmg->end ==
1034 * vmg->next->vm_end.  Checking if the vmg->vma can expand and merge with
1035 * vmg->next needs to be handled by the caller.
1036 *
1037 * Returns: 0 on success.
1038 *
1039 * ASSUMPTIONS:
1040 * - The caller must hold a WRITE lock on vmg->vma->mm->mmap_lock.
1041 * - The caller must have set @vmg->vma and @vmg->next.
1042 */
1043int vma_expand(struct vma_merge_struct *vmg)
1044{
1045	struct vm_area_struct *anon_dup = NULL;
1046	bool remove_next = false;
1047	struct vm_area_struct *vma = vmg->vma;
1048	struct vm_area_struct *next = vmg->next;
1049
1050	mmap_assert_write_locked(vmg->mm);
1051
1052	vma_start_write(vma);
1053	if (next && (vma != next) && (vmg->end == next->vm_end)) {
1054		int ret;
1055
1056		remove_next = true;
1057		/* This should already have been checked by this point. */
1058		VM_WARN_ON(!can_merge_remove_vma(next));
1059		vma_start_write(next);
1060		ret = dup_anon_vma(vma, next, &anon_dup);
1061		if (ret)
1062			return ret;
1063	}
1064
1065	/* Not merging but overwriting any part of next is not handled. */
1066	VM_WARN_ON(next && !remove_next &&
1067		  next != vma && vmg->end > next->vm_start);
1068	/* Only handles expanding */
1069	VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end);
1070
1071	if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true))
1072		goto nomem;
1073
1074	return 0;
1075
1076nomem:
1077	vmg->state = VMA_MERGE_ERROR_NOMEM;
1078	if (anon_dup)
1079		unlink_anon_vmas(anon_dup);
1080	return -ENOMEM;
1081}
1082
1083/*
1084 * vma_shrink() - Reduce an existing VMAs memory area
1085 * @vmi: The vma iterator
1086 * @vma: The VMA to modify
1087 * @start: The new start
1088 * @end: The new end
1089 *
1090 * Returns: 0 on success, -ENOMEM otherwise
1091 */
1092int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
1093	       unsigned long start, unsigned long end, pgoff_t pgoff)
1094{
1095	struct vma_prepare vp;
1096
1097	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
1098
1099	if (vma->vm_start < start)
1100		vma_iter_config(vmi, vma->vm_start, start);
1101	else
1102		vma_iter_config(vmi, end, vma->vm_end);
1103
1104	if (vma_iter_prealloc(vmi, NULL))
1105		return -ENOMEM;
1106
1107	vma_start_write(vma);
1108
1109	init_vma_prep(&vp, vma);
1110	vma_prepare(&vp);
1111	vma_adjust_trans_huge(vma, start, end, 0);
1112
1113	vma_iter_clear(vmi);
1114	vma_set_range(vma, start, end, pgoff);
1115	vma_complete(&vp, vmi, vma->vm_mm);
1116	validate_mm(vma->vm_mm);
1117	return 0;
1118}
1119
1120static inline void vms_clear_ptes(struct vma_munmap_struct *vms,
1121		    struct ma_state *mas_detach, bool mm_wr_locked)
1122{
1123	struct mmu_gather tlb;
1124
1125	if (!vms->clear_ptes) /* Nothing to do */
1126		return;
1127
1128	/*
1129	 * We can free page tables without write-locking mmap_lock because VMAs
1130	 * were isolated before we downgraded mmap_lock.
1131	 */
1132	mas_set(mas_detach, 1);
1133	lru_add_drain();
1134	tlb_gather_mmu(&tlb, vms->vma->vm_mm);
1135	update_hiwater_rss(vms->vma->vm_mm);
1136	unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end,
1137		   vms->vma_count, mm_wr_locked);
1138
1139	mas_set(mas_detach, 1);
1140	/* start and end may be different if there is no prev or next vma. */
1141	free_pgtables(&tlb, mas_detach, vms->vma, vms->unmap_start,
1142		      vms->unmap_end, mm_wr_locked);
1143	tlb_finish_mmu(&tlb);
1144	vms->clear_ptes = false;
1145}
1146
1147static void vms_clean_up_area(struct vma_munmap_struct *vms,
1148		struct ma_state *mas_detach)
1149{
1150	struct vm_area_struct *vma;
1151
1152	if (!vms->nr_pages)
1153		return;
1154
1155	vms_clear_ptes(vms, mas_detach, true);
1156	mas_set(mas_detach, 0);
1157	mas_for_each(mas_detach, vma, ULONG_MAX)
1158		vma_close(vma);
1159}
1160
1161/*
1162 * vms_complete_munmap_vmas() - Finish the munmap() operation
1163 * @vms: The vma munmap struct
1164 * @mas_detach: The maple state of the detached vmas
1165 *
1166 * This updates the mm_struct, unmaps the region, frees the resources
1167 * used for the munmap() and may downgrade the lock - if requested.  Everything
1168 * needed to be done once the vma maple tree is updated.
1169 */
1170static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms,
1171		struct ma_state *mas_detach)
1172{
1173	struct vm_area_struct *vma;
1174	struct mm_struct *mm;
1175
1176	mm = current->mm;
1177	mm->map_count -= vms->vma_count;
1178	mm->locked_vm -= vms->locked_vm;
1179	if (vms->unlock)
1180		mmap_write_downgrade(mm);
1181
1182	if (!vms->nr_pages)
1183		return;
1184
1185	vms_clear_ptes(vms, mas_detach, !vms->unlock);
1186	/* Update high watermark before we lower total_vm */
1187	update_hiwater_vm(mm);
1188	/* Stat accounting */
1189	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm) - vms->nr_pages);
1190	/* Paranoid bookkeeping */
1191	VM_WARN_ON(vms->exec_vm > mm->exec_vm);
1192	VM_WARN_ON(vms->stack_vm > mm->stack_vm);
1193	VM_WARN_ON(vms->data_vm > mm->data_vm);
1194	mm->exec_vm -= vms->exec_vm;
1195	mm->stack_vm -= vms->stack_vm;
1196	mm->data_vm -= vms->data_vm;
1197
1198	/* Remove and clean up vmas */
1199	mas_set(mas_detach, 0);
1200	mas_for_each(mas_detach, vma, ULONG_MAX)
1201		remove_vma(vma, /* unreachable = */ false);
1202
1203	vm_unacct_memory(vms->nr_accounted);
1204	validate_mm(mm);
1205	if (vms->unlock)
1206		mmap_read_unlock(mm);
1207
1208	__mt_destroy(mas_detach->tree);
1209}
1210
1211/*
1212 * reattach_vmas() - Undo any munmap work and free resources
1213 * @mas_detach: The maple state with the detached maple tree
1214 *
1215 * Reattach any detached vmas and free up the maple tree used to track the vmas.
1216 */
1217static void reattach_vmas(struct ma_state *mas_detach)
1218{
1219	struct vm_area_struct *vma;
1220
1221	mas_set(mas_detach, 0);
1222	mas_for_each(mas_detach, vma, ULONG_MAX)
1223		vma_mark_detached(vma, false);
1224
1225	__mt_destroy(mas_detach->tree);
1226}
1227
1228/*
1229 * vms_gather_munmap_vmas() - Put all VMAs within a range into a maple tree
1230 * for removal at a later date.  Handles splitting first and last if necessary
1231 * and marking the vmas as isolated.
1232 *
1233 * @vms: The vma munmap struct
1234 * @mas_detach: The maple state tracking the detached tree
1235 *
1236 * Return: 0 on success, error otherwise
1237 */
1238static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
1239		struct ma_state *mas_detach)
1240{
1241	struct vm_area_struct *next = NULL;
1242	int error;
1243
1244	/*
1245	 * If we need to split any vma, do it now to save pain later.
1246	 * Does it split the first one?
1247	 */
1248	if (vms->start > vms->vma->vm_start) {
1249
1250		/*
1251		 * Make sure that map_count on return from munmap() will
1252		 * not exceed its limit; but let map_count go just above
1253		 * its limit temporarily, to help free resources as expected.
1254		 */
1255		if (vms->end < vms->vma->vm_end &&
1256		    vms->vma->vm_mm->map_count >= sysctl_max_map_count) {
1257			error = -ENOMEM;
1258			goto map_count_exceeded;
1259		}
1260
1261		/* Don't bother splitting the VMA if we can't unmap it anyway */
1262		if (!can_modify_vma(vms->vma)) {
1263			error = -EPERM;
1264			goto start_split_failed;
1265		}
1266
1267		error = __split_vma(vms->vmi, vms->vma, vms->start, 1);
1268		if (error)
1269			goto start_split_failed;
1270	}
1271	vms->prev = vma_prev(vms->vmi);
1272	if (vms->prev)
1273		vms->unmap_start = vms->prev->vm_end;
1274
1275	/*
1276	 * Detach a range of VMAs from the mm. Using next as a temp variable as
1277	 * it is always overwritten.
1278	 */
1279	for_each_vma_range(*(vms->vmi), next, vms->end) {
1280		long nrpages;
1281
1282		if (!can_modify_vma(next)) {
1283			error = -EPERM;
1284			goto modify_vma_failed;
1285		}
1286		/* Does it split the end? */
1287		if (next->vm_end > vms->end) {
1288			error = __split_vma(vms->vmi, next, vms->end, 0);
1289			if (error)
1290				goto end_split_failed;
1291		}
1292		vma_start_write(next);
1293		mas_set(mas_detach, vms->vma_count++);
1294		error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
1295		if (error)
1296			goto munmap_gather_failed;
1297
1298		vma_mark_detached(next, true);
1299		nrpages = vma_pages(next);
1300
1301		vms->nr_pages += nrpages;
1302		if (next->vm_flags & VM_LOCKED)
1303			vms->locked_vm += nrpages;
1304
1305		if (next->vm_flags & VM_ACCOUNT)
1306			vms->nr_accounted += nrpages;
1307
1308		if (is_exec_mapping(next->vm_flags))
1309			vms->exec_vm += nrpages;
1310		else if (is_stack_mapping(next->vm_flags))
1311			vms->stack_vm += nrpages;
1312		else if (is_data_mapping(next->vm_flags))
1313			vms->data_vm += nrpages;
1314
1315		if (vms->uf) {
1316			/*
1317			 * If userfaultfd_unmap_prep returns an error the vmas
1318			 * will remain split, but userland will get a
1319			 * highly unexpected error anyway. This is no
1320			 * different than the case where the first of the two
1321			 * __split_vma fails, but we don't undo the first
1322			 * split, despite we could. This is unlikely enough
1323			 * failure that it's not worth optimizing it for.
1324			 */
1325			error = userfaultfd_unmap_prep(next, vms->start,
1326						       vms->end, vms->uf);
1327			if (error)
1328				goto userfaultfd_error;
1329		}
1330#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
1331		BUG_ON(next->vm_start < vms->start);
1332		BUG_ON(next->vm_start > vms->end);
1333#endif
1334	}
1335
1336	vms->next = vma_next(vms->vmi);
1337	if (vms->next)
1338		vms->unmap_end = vms->next->vm_start;
1339
1340#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
1341	/* Make sure no VMAs are about to be lost. */
1342	{
1343		MA_STATE(test, mas_detach->tree, 0, 0);
1344		struct vm_area_struct *vma_mas, *vma_test;
1345		int test_count = 0;
1346
1347		vma_iter_set(vms->vmi, vms->start);
1348		rcu_read_lock();
1349		vma_test = mas_find(&test, vms->vma_count - 1);
1350		for_each_vma_range(*(vms->vmi), vma_mas, vms->end) {
1351			BUG_ON(vma_mas != vma_test);
1352			test_count++;
1353			vma_test = mas_next(&test, vms->vma_count - 1);
1354		}
1355		rcu_read_unlock();
1356		BUG_ON(vms->vma_count != test_count);
1357	}
1358#endif
1359
1360	while (vma_iter_addr(vms->vmi) > vms->start)
1361		vma_iter_prev_range(vms->vmi);
1362
1363	vms->clear_ptes = true;
1364	return 0;
1365
1366userfaultfd_error:
1367munmap_gather_failed:
1368end_split_failed:
1369modify_vma_failed:
1370	reattach_vmas(mas_detach);
1371start_split_failed:
1372map_count_exceeded:
1373	return error;
1374}
1375
1376/*
1377 * init_vma_munmap() - Initializer wrapper for vma_munmap_struct
1378 * @vms: The vma munmap struct
1379 * @vmi: The vma iterator
1380 * @vma: The first vm_area_struct to munmap
1381 * @start: The aligned start address to munmap
1382 * @end: The aligned end address to munmap
1383 * @uf: The userfaultfd list_head
1384 * @unlock: Unlock after the operation.  Only unlocked on success
1385 */
1386static void init_vma_munmap(struct vma_munmap_struct *vms,
1387		struct vma_iterator *vmi, struct vm_area_struct *vma,
1388		unsigned long start, unsigned long end, struct list_head *uf,
1389		bool unlock)
1390{
1391	vms->vmi = vmi;
1392	vms->vma = vma;
1393	if (vma) {
1394		vms->start = start;
1395		vms->end = end;
1396	} else {
1397		vms->start = vms->end = 0;
1398	}
1399	vms->unlock = unlock;
1400	vms->uf = uf;
1401	vms->vma_count = 0;
1402	vms->nr_pages = vms->locked_vm = vms->nr_accounted = 0;
1403	vms->exec_vm = vms->stack_vm = vms->data_vm = 0;
1404	vms->unmap_start = FIRST_USER_ADDRESS;
1405	vms->unmap_end = USER_PGTABLES_CEILING;
1406	vms->clear_ptes = false;
1407}
1408
1409/*
1410 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
1411 * @vmi: The vma iterator
1412 * @vma: The starting vm_area_struct
1413 * @mm: The mm_struct
1414 * @start: The aligned start address to munmap.
1415 * @end: The aligned end address to munmap.
1416 * @uf: The userfaultfd list_head
1417 * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
1418 * success.
1419 *
1420 * Return: 0 on success and drops the lock if so directed, error and leaves the
1421 * lock held otherwise.
1422 */
1423int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
1424		struct mm_struct *mm, unsigned long start, unsigned long end,
1425		struct list_head *uf, bool unlock)
1426{
1427	struct maple_tree mt_detach;
1428	MA_STATE(mas_detach, &mt_detach, 0, 0);
1429	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1430	mt_on_stack(mt_detach);
1431	struct vma_munmap_struct vms;
1432	int error;
1433
1434	init_vma_munmap(&vms, vmi, vma, start, end, uf, unlock);
1435	error = vms_gather_munmap_vmas(&vms, &mas_detach);
1436	if (error)
1437		goto gather_failed;
1438
1439	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
1440	if (error)
1441		goto clear_tree_failed;
1442
1443	/* Point of no return */
1444	vms_complete_munmap_vmas(&vms, &mas_detach);
1445	return 0;
1446
1447clear_tree_failed:
1448	reattach_vmas(&mas_detach);
1449gather_failed:
1450	validate_mm(mm);
1451	return error;
1452}
1453
1454/*
1455 * do_vmi_munmap() - munmap a given range.
1456 * @vmi: The vma iterator
1457 * @mm: The mm_struct
1458 * @start: The start address to munmap
1459 * @len: The length of the range to munmap
1460 * @uf: The userfaultfd list_head
1461 * @unlock: set to true if the user wants to drop the mmap_lock on success
1462 *
1463 * This function takes a @mas that is either pointing to the previous VMA or set
1464 * to MA_START and sets it up to remove the mapping(s).  The @len will be
1465 * aligned.
1466 *
1467 * Return: 0 on success and drops the lock if so directed, error and leaves the
1468 * lock held otherwise.
1469 */
1470int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
1471		  unsigned long start, size_t len, struct list_head *uf,
1472		  bool unlock)
1473{
1474	unsigned long end;
1475	struct vm_area_struct *vma;
1476
1477	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
1478		return -EINVAL;
1479
1480	end = start + PAGE_ALIGN(len);
1481	if (end == start)
1482		return -EINVAL;
1483
1484	/* Find the first overlapping VMA */
1485	vma = vma_find(vmi, end);
1486	if (!vma) {
1487		if (unlock)
1488			mmap_write_unlock(mm);
1489		return 0;
1490	}
1491
1492	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
1493}
1494
1495/*
1496 * We are about to modify one or multiple of a VMA's flags, policy, userfaultfd
1497 * context and anonymous VMA name within the range [start, end).
1498 *
1499 * As a result, we might be able to merge the newly modified VMA range with an
1500 * adjacent VMA with identical properties.
1501 *
1502 * If no merge is possible and the range does not span the entirety of the VMA,
1503 * we then need to split the VMA to accommodate the change.
1504 *
1505 * The function returns either the merged VMA, the original VMA if a split was
1506 * required instead, or an error if the split failed.
1507 */
1508static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg)
1509{
1510	struct vm_area_struct *vma = vmg->vma;
1511	unsigned long start = vmg->start;
1512	unsigned long end = vmg->end;
1513	struct vm_area_struct *merged;
1514
1515	/* First, try to merge. */
1516	merged = vma_merge_existing_range(vmg);
1517	if (merged)
1518		return merged;
1519	if (vmg_nomem(vmg))
1520		return ERR_PTR(-ENOMEM);
1521
1522	/* Split any preceding portion of the VMA. */
1523	if (vma->vm_start < start) {
1524		int err = split_vma(vmg->vmi, vma, start, 1);
1525
1526		if (err)
1527			return ERR_PTR(err);
1528	}
1529
1530	/* Split any trailing portion of the VMA. */
1531	if (vma->vm_end > end) {
1532		int err = split_vma(vmg->vmi, vma, end, 0);
1533
1534		if (err)
1535			return ERR_PTR(err);
1536	}
1537
1538	return vma;
1539}
1540
1541struct vm_area_struct *vma_modify_flags(
1542	struct vma_iterator *vmi, struct vm_area_struct *prev,
1543	struct vm_area_struct *vma, unsigned long start, unsigned long end,
1544	unsigned long new_flags)
1545{
1546	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1547
1548	vmg.flags = new_flags;
1549
1550	return vma_modify(&vmg);
1551}
1552
1553struct vm_area_struct
1554*vma_modify_flags_name(struct vma_iterator *vmi,
1555		       struct vm_area_struct *prev,
1556		       struct vm_area_struct *vma,
1557		       unsigned long start,
1558		       unsigned long end,
1559		       unsigned long new_flags,
1560		       struct anon_vma_name *new_name)
1561{
1562	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1563
1564	vmg.flags = new_flags;
1565	vmg.anon_name = new_name;
1566
1567	return vma_modify(&vmg);
1568}
1569
1570struct vm_area_struct
1571*vma_modify_policy(struct vma_iterator *vmi,
1572		   struct vm_area_struct *prev,
1573		   struct vm_area_struct *vma,
1574		   unsigned long start, unsigned long end,
1575		   struct mempolicy *new_pol)
1576{
1577	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1578
1579	vmg.policy = new_pol;
1580
1581	return vma_modify(&vmg);
1582}
1583
1584struct vm_area_struct
1585*vma_modify_flags_uffd(struct vma_iterator *vmi,
1586		       struct vm_area_struct *prev,
1587		       struct vm_area_struct *vma,
1588		       unsigned long start, unsigned long end,
1589		       unsigned long new_flags,
1590		       struct vm_userfaultfd_ctx new_ctx)
1591{
1592	VMG_VMA_STATE(vmg, vmi, prev, vma, start, end);
1593
1594	vmg.flags = new_flags;
1595	vmg.uffd_ctx = new_ctx;
1596
1597	return vma_modify(&vmg);
1598}
1599
1600/*
1601 * Expand vma by delta bytes, potentially merging with an immediately adjacent
1602 * VMA with identical properties.
1603 */
1604struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi,
1605					struct vm_area_struct *vma,
1606					unsigned long delta)
1607{
1608	VMG_VMA_STATE(vmg, vmi, vma, vma, vma->vm_end, vma->vm_end + delta);
1609
1610	vmg.next = vma_iter_next_rewind(vmi, NULL);
1611	vmg.vma = NULL; /* We use the VMA to populate VMG fields only. */
1612
1613	return vma_merge_new_range(&vmg);
1614}
1615
1616void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb)
1617{
1618	vb->count = 0;
1619}
1620
1621static void unlink_file_vma_batch_process(struct unlink_vma_file_batch *vb)
1622{
1623	struct address_space *mapping;
1624	int i;
1625
1626	mapping = vb->vmas[0]->vm_file->f_mapping;
1627	i_mmap_lock_write(mapping);
1628	for (i = 0; i < vb->count; i++) {
1629		VM_WARN_ON_ONCE(vb->vmas[i]->vm_file->f_mapping != mapping);
1630		__remove_shared_vm_struct(vb->vmas[i], mapping);
1631	}
1632	i_mmap_unlock_write(mapping);
1633
1634	unlink_file_vma_batch_init(vb);
1635}
1636
1637void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
1638			       struct vm_area_struct *vma)
1639{
1640	if (vma->vm_file == NULL)
1641		return;
1642
1643	if ((vb->count > 0 && vb->vmas[0]->vm_file != vma->vm_file) ||
1644	    vb->count == ARRAY_SIZE(vb->vmas))
1645		unlink_file_vma_batch_process(vb);
1646
1647	vb->vmas[vb->count] = vma;
1648	vb->count++;
1649}
1650
1651void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb)
1652{
1653	if (vb->count > 0)
1654		unlink_file_vma_batch_process(vb);
1655}
1656
1657/*
1658 * Unlink a file-based vm structure from its interval tree, to hide
1659 * vma from rmap and vmtruncate before freeing its page tables.
1660 */
1661void unlink_file_vma(struct vm_area_struct *vma)
1662{
1663	struct file *file = vma->vm_file;
1664
1665	if (file) {
1666		struct address_space *mapping = file->f_mapping;
1667
1668		i_mmap_lock_write(mapping);
1669		__remove_shared_vm_struct(vma, mapping);
1670		i_mmap_unlock_write(mapping);
1671	}
1672}
1673
1674void vma_link_file(struct vm_area_struct *vma)
1675{
1676	struct file *file = vma->vm_file;
1677	struct address_space *mapping;
1678
1679	if (file) {
1680		mapping = file->f_mapping;
1681		i_mmap_lock_write(mapping);
1682		__vma_link_file(vma, mapping);
1683		i_mmap_unlock_write(mapping);
1684	}
1685}
1686
1687int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
1688{
1689	VMA_ITERATOR(vmi, mm, 0);
1690
1691	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1692	if (vma_iter_prealloc(&vmi, vma))
1693		return -ENOMEM;
1694
1695	vma_start_write(vma);
1696	vma_iter_store(&vmi, vma);
1697	vma_link_file(vma);
1698	mm->map_count++;
1699	validate_mm(mm);
1700	return 0;
1701}
1702
1703/*
1704 * Copy the vma structure to a new location in the same mm,
1705 * prior to moving page table entries, to effect an mremap move.
1706 */
1707struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
1708	unsigned long addr, unsigned long len, pgoff_t pgoff,
1709	bool *need_rmap_locks)
1710{
1711	struct vm_area_struct *vma = *vmap;
1712	unsigned long vma_start = vma->vm_start;
1713	struct mm_struct *mm = vma->vm_mm;
1714	struct vm_area_struct *new_vma;
1715	bool faulted_in_anon_vma = true;
1716	VMA_ITERATOR(vmi, mm, addr);
1717	VMG_VMA_STATE(vmg, &vmi, NULL, vma, addr, addr + len);
1718
1719	/*
1720	 * If anonymous vma has not yet been faulted, update new pgoff
1721	 * to match new location, to increase its chance of merging.
1722	 */
1723	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
1724		pgoff = addr >> PAGE_SHIFT;
1725		faulted_in_anon_vma = false;
1726	}
1727
1728	new_vma = find_vma_prev(mm, addr, &vmg.prev);
1729	if (new_vma && new_vma->vm_start < addr + len)
1730		return NULL;	/* should never get here */
1731
1732	vmg.vma = NULL; /* New VMA range. */
1733	vmg.pgoff = pgoff;
1734	vmg.next = vma_iter_next_rewind(&vmi, NULL);
1735	new_vma = vma_merge_new_range(&vmg);
1736
1737	if (new_vma) {
1738		/*
1739		 * Source vma may have been merged into new_vma
1740		 */
1741		if (unlikely(vma_start >= new_vma->vm_start &&
1742			     vma_start < new_vma->vm_end)) {
1743			/*
1744			 * The only way we can get a vma_merge with
1745			 * self during an mremap is if the vma hasn't
1746			 * been faulted in yet and we were allowed to
1747			 * reset the dst vma->vm_pgoff to the
1748			 * destination address of the mremap to allow
1749			 * the merge to happen. mremap must change the
1750			 * vm_pgoff linearity between src and dst vmas
1751			 * (in turn preventing a vma_merge) to be
1752			 * safe. It is only safe to keep the vm_pgoff
1753			 * linear if there are no pages mapped yet.
1754			 */
1755			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
1756			*vmap = vma = new_vma;
1757		}
1758		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
1759	} else {
1760		new_vma = vm_area_dup(vma);
1761		if (!new_vma)
1762			goto out;
1763		vma_set_range(new_vma, addr, addr + len, pgoff);
1764		if (vma_dup_policy(vma, new_vma))
1765			goto out_free_vma;
1766		if (anon_vma_clone(new_vma, vma))
1767			goto out_free_mempol;
1768		if (new_vma->vm_file)
1769			get_file(new_vma->vm_file);
1770		if (new_vma->vm_ops && new_vma->vm_ops->open)
1771			new_vma->vm_ops->open(new_vma);
1772		if (vma_link(mm, new_vma))
1773			goto out_vma_link;
1774		*need_rmap_locks = false;
1775	}
1776	return new_vma;
1777
1778out_vma_link:
1779	vma_close(new_vma);
1780
1781	if (new_vma->vm_file)
1782		fput(new_vma->vm_file);
1783
1784	unlink_anon_vmas(new_vma);
1785out_free_mempol:
1786	mpol_put(vma_policy(new_vma));
1787out_free_vma:
1788	vm_area_free(new_vma);
1789out:
1790	return NULL;
1791}
1792
1793/*
1794 * Rough compatibility check to quickly see if it's even worth looking
1795 * at sharing an anon_vma.
1796 *
1797 * They need to have the same vm_file, and the flags can only differ
1798 * in things that mprotect may change.
1799 *
1800 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1801 * we can merge the two vma's. For example, we refuse to merge a vma if
1802 * there is a vm_ops->close() function, because that indicates that the
1803 * driver is doing some kind of reference counting. But that doesn't
1804 * really matter for the anon_vma sharing case.
1805 */
1806static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1807{
1808	return a->vm_end == b->vm_start &&
1809		mpol_equal(vma_policy(a), vma_policy(b)) &&
1810		a->vm_file == b->vm_file &&
1811		!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1812		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1813}
1814
1815/*
1816 * Do some basic sanity checking to see if we can re-use the anon_vma
1817 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1818 * the same as 'old', the other will be the new one that is trying
1819 * to share the anon_vma.
1820 *
1821 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1822 * the anon_vma of 'old' is concurrently in the process of being set up
1823 * by another page fault trying to merge _that_. But that's ok: if it
1824 * is being set up, that automatically means that it will be a singleton
1825 * acceptable for merging, so we can do all of this optimistically. But
1826 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1827 *
1828 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1829 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1830 * is to return an anon_vma that is "complex" due to having gone through
1831 * a fork).
1832 *
1833 * We also make sure that the two vma's are compatible (adjacent,
1834 * and with the same memory policies). That's all stable, even with just
1835 * a read lock on the mmap_lock.
1836 */
1837static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old,
1838					  struct vm_area_struct *a,
1839					  struct vm_area_struct *b)
1840{
1841	if (anon_vma_compatible(a, b)) {
1842		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1843
1844		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1845			return anon_vma;
1846	}
1847	return NULL;
1848}
1849
1850/*
1851 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1852 * neighbouring vmas for a suitable anon_vma, before it goes off
1853 * to allocate a new anon_vma.  It checks because a repetitive
1854 * sequence of mprotects and faults may otherwise lead to distinct
1855 * anon_vmas being allocated, preventing vma merge in subsequent
1856 * mprotect.
1857 */
1858struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1859{
1860	struct anon_vma *anon_vma = NULL;
1861	struct vm_area_struct *prev, *next;
1862	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_end);
1863
1864	/* Try next first. */
1865	next = vma_iter_load(&vmi);
1866	if (next) {
1867		anon_vma = reusable_anon_vma(next, vma, next);
1868		if (anon_vma)
1869			return anon_vma;
1870	}
1871
1872	prev = vma_prev(&vmi);
1873	VM_BUG_ON_VMA(prev != vma, vma);
1874	prev = vma_prev(&vmi);
1875	/* Try prev next. */
1876	if (prev)
1877		anon_vma = reusable_anon_vma(prev, prev, vma);
1878
1879	/*
1880	 * We might reach here with anon_vma == NULL if we can't find
1881	 * any reusable anon_vma.
1882	 * There's no absolute need to look only at touching neighbours:
1883	 * we could search further afield for "compatible" anon_vmas.
1884	 * But it would probably just be a waste of time searching,
1885	 * or lead to too many vmas hanging off the same anon_vma.
1886	 * We're trying to allow mprotect remerging later on,
1887	 * not trying to minimize memory used for anon_vmas.
1888	 */
1889	return anon_vma;
1890}
1891
1892static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1893{
1894	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1895}
1896
1897static bool vma_is_shared_writable(struct vm_area_struct *vma)
1898{
1899	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1900		(VM_WRITE | VM_SHARED);
1901}
1902
1903static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1904{
1905	/* No managed pages to writeback. */
1906	if (vma->vm_flags & VM_PFNMAP)
1907		return false;
1908
1909	return vma->vm_file && vma->vm_file->f_mapping &&
1910		mapping_can_writeback(vma->vm_file->f_mapping);
1911}
1912
1913/*
1914 * Does this VMA require the underlying folios to have their dirty state
1915 * tracked?
1916 */
1917bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1918{
1919	/* Only shared, writable VMAs require dirty tracking. */
1920	if (!vma_is_shared_writable(vma))
1921		return false;
1922
1923	/* Does the filesystem need to be notified? */
1924	if (vm_ops_needs_writenotify(vma->vm_ops))
1925		return true;
1926
1927	/*
1928	 * Even if the filesystem doesn't indicate a need for writenotify, if it
1929	 * can writeback, dirty tracking is still required.
1930	 */
1931	return vma_fs_can_writeback(vma);
1932}
1933
1934/*
1935 * Some shared mappings will want the pages marked read-only
1936 * to track write events. If so, we'll downgrade vm_page_prot
1937 * to the private version (using protection_map[] without the
1938 * VM_SHARED bit).
1939 */
1940bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1941{
1942	/* If it was private or non-writable, the write bit is already clear */
1943	if (!vma_is_shared_writable(vma))
1944		return false;
1945
1946	/* The backer wishes to know when pages are first written to? */
1947	if (vm_ops_needs_writenotify(vma->vm_ops))
1948		return true;
1949
1950	/* The open routine did something to the protections that pgprot_modify
1951	 * won't preserve? */
1952	if (pgprot_val(vm_page_prot) !=
1953	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1954		return false;
1955
1956	/*
1957	 * Do we need to track softdirty? hugetlb does not support softdirty
1958	 * tracking yet.
1959	 */
1960	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1961		return true;
1962
1963	/* Do we need write faults for uffd-wp tracking? */
1964	if (userfaultfd_wp(vma))
1965		return true;
1966
1967	/* Can the mapping track the dirty pages? */
1968	return vma_fs_can_writeback(vma);
1969}
1970
1971static DEFINE_MUTEX(mm_all_locks_mutex);
1972
1973static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
1974{
1975	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
1976		/*
1977		 * The LSB of head.next can't change from under us
1978		 * because we hold the mm_all_locks_mutex.
1979		 */
1980		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
1981		/*
1982		 * We can safely modify head.next after taking the
1983		 * anon_vma->root->rwsem. If some other vma in this mm shares
1984		 * the same anon_vma we won't take it again.
1985		 *
1986		 * No need of atomic instructions here, head.next
1987		 * can't change from under us thanks to the
1988		 * anon_vma->root->rwsem.
1989		 */
1990		if (__test_and_set_bit(0, (unsigned long *)
1991				       &anon_vma->root->rb_root.rb_root.rb_node))
1992			BUG();
1993	}
1994}
1995
1996static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
1997{
1998	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
1999		/*
2000		 * AS_MM_ALL_LOCKS can't change from under us because
2001		 * we hold the mm_all_locks_mutex.
2002		 *
2003		 * Operations on ->flags have to be atomic because
2004		 * even if AS_MM_ALL_LOCKS is stable thanks to the
2005		 * mm_all_locks_mutex, there may be other cpus
2006		 * changing other bitflags in parallel to us.
2007		 */
2008		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2009			BUG();
2010		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
2011	}
2012}
2013
2014/*
2015 * This operation locks against the VM for all pte/vma/mm related
2016 * operations that could ever happen on a certain mm. This includes
2017 * vmtruncate, try_to_unmap, and all page faults.
2018 *
2019 * The caller must take the mmap_lock in write mode before calling
2020 * mm_take_all_locks(). The caller isn't allowed to release the
2021 * mmap_lock until mm_drop_all_locks() returns.
2022 *
2023 * mmap_lock in write mode is required in order to block all operations
2024 * that could modify pagetables and free pages without need of
2025 * altering the vma layout. It's also needed in write mode to avoid new
2026 * anon_vmas to be associated with existing vmas.
2027 *
2028 * A single task can't take more than one mm_take_all_locks() in a row
2029 * or it would deadlock.
2030 *
2031 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
2032 * mapping->flags avoid to take the same lock twice, if more than one
2033 * vma in this mm is backed by the same anon_vma or address_space.
2034 *
2035 * We take locks in following order, accordingly to comment at beginning
2036 * of mm/rmap.c:
2037 *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
2038 *     hugetlb mapping);
2039 *   - all vmas marked locked
2040 *   - all i_mmap_rwsem locks;
2041 *   - all anon_vma->rwseml
2042 *
2043 * We can take all locks within these types randomly because the VM code
2044 * doesn't nest them and we protected from parallel mm_take_all_locks() by
2045 * mm_all_locks_mutex.
2046 *
2047 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2048 * that may have to take thousand of locks.
2049 *
2050 * mm_take_all_locks() can fail if it's interrupted by signals.
2051 */
2052int mm_take_all_locks(struct mm_struct *mm)
2053{
2054	struct vm_area_struct *vma;
2055	struct anon_vma_chain *avc;
2056	VMA_ITERATOR(vmi, mm, 0);
2057
2058	mmap_assert_write_locked(mm);
2059
2060	mutex_lock(&mm_all_locks_mutex);
2061
2062	/*
2063	 * vma_start_write() does not have a complement in mm_drop_all_locks()
2064	 * because vma_start_write() is always asymmetrical; it marks a VMA as
2065	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
2066	 * is reached.
2067	 */
2068	for_each_vma(vmi, vma) {
2069		if (signal_pending(current))
2070			goto out_unlock;
2071		vma_start_write(vma);
2072	}
2073
2074	vma_iter_init(&vmi, mm, 0);
2075	for_each_vma(vmi, vma) {
2076		if (signal_pending(current))
2077			goto out_unlock;
2078		if (vma->vm_file && vma->vm_file->f_mapping &&
2079				is_vm_hugetlb_page(vma))
2080			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2081	}
2082
2083	vma_iter_init(&vmi, mm, 0);
2084	for_each_vma(vmi, vma) {
2085		if (signal_pending(current))
2086			goto out_unlock;
2087		if (vma->vm_file && vma->vm_file->f_mapping &&
2088				!is_vm_hugetlb_page(vma))
2089			vm_lock_mapping(mm, vma->vm_file->f_mapping);
2090	}
2091
2092	vma_iter_init(&vmi, mm, 0);
2093	for_each_vma(vmi, vma) {
2094		if (signal_pending(current))
2095			goto out_unlock;
2096		if (vma->anon_vma)
2097			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2098				vm_lock_anon_vma(mm, avc->anon_vma);
2099	}
2100
2101	return 0;
2102
2103out_unlock:
2104	mm_drop_all_locks(mm);
2105	return -EINTR;
2106}
2107
2108static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2109{
2110	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
2111		/*
2112		 * The LSB of head.next can't change to 0 from under
2113		 * us because we hold the mm_all_locks_mutex.
2114		 *
2115		 * We must however clear the bitflag before unlocking
2116		 * the vma so the users using the anon_vma->rb_root will
2117		 * never see our bitflag.
2118		 *
2119		 * No need of atomic instructions here, head.next
2120		 * can't change from under us until we release the
2121		 * anon_vma->root->rwsem.
2122		 */
2123		if (!__test_and_clear_bit(0, (unsigned long *)
2124					  &anon_vma->root->rb_root.rb_root.rb_node))
2125			BUG();
2126		anon_vma_unlock_write(anon_vma);
2127	}
2128}
2129
2130static void vm_unlock_mapping(struct address_space *mapping)
2131{
2132	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2133		/*
2134		 * AS_MM_ALL_LOCKS can't change to 0 from under us
2135		 * because we hold the mm_all_locks_mutex.
2136		 */
2137		i_mmap_unlock_write(mapping);
2138		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2139					&mapping->flags))
2140			BUG();
2141	}
2142}
2143
2144/*
2145 * The mmap_lock cannot be released by the caller until
2146 * mm_drop_all_locks() returns.
2147 */
2148void mm_drop_all_locks(struct mm_struct *mm)
2149{
2150	struct vm_area_struct *vma;
2151	struct anon_vma_chain *avc;
2152	VMA_ITERATOR(vmi, mm, 0);
2153
2154	mmap_assert_write_locked(mm);
2155	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2156
2157	for_each_vma(vmi, vma) {
2158		if (vma->anon_vma)
2159			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2160				vm_unlock_anon_vma(avc->anon_vma);
2161		if (vma->vm_file && vma->vm_file->f_mapping)
2162			vm_unlock_mapping(vma->vm_file->f_mapping);
2163	}
2164
2165	mutex_unlock(&mm_all_locks_mutex);
2166}
2167
2168/*
2169 * We account for memory if it's a private writeable mapping,
2170 * not hugepages and VM_NORESERVE wasn't set.
2171 */
2172static bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
2173{
2174	/*
2175	 * hugetlb has its own accounting separate from the core VM
2176	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
2177	 */
2178	if (file && is_file_hugepages(file))
2179		return false;
2180
2181	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
2182}
2183
2184/*
2185 * vms_abort_munmap_vmas() - Undo as much as possible from an aborted munmap()
2186 * operation.
2187 * @vms: The vma unmap structure
2188 * @mas_detach: The maple state with the detached maple tree
2189 *
2190 * Reattach any detached vmas, free up the maple tree used to track the vmas.
2191 * If that's not possible because the ptes are cleared (and vm_ops->closed() may
2192 * have been called), then a NULL is written over the vmas and the vmas are
2193 * removed (munmap() completed).
2194 */
2195static void vms_abort_munmap_vmas(struct vma_munmap_struct *vms,
2196		struct ma_state *mas_detach)
2197{
2198	struct ma_state *mas = &vms->vmi->mas;
2199
2200	if (!vms->nr_pages)
2201		return;
2202
2203	if (vms->clear_ptes)
2204		return reattach_vmas(mas_detach);
2205
2206	/*
2207	 * Aborting cannot just call the vm_ops open() because they are often
2208	 * not symmetrical and state data has been lost.  Resort to the old
2209	 * failure method of leaving a gap where the MAP_FIXED mapping failed.
2210	 */
2211	mas_set_range(mas, vms->start, vms->end - 1);
2212	mas_store_gfp(mas, NULL, GFP_KERNEL|__GFP_NOFAIL);
2213	/* Clean up the insertion of the unfortunate gap */
2214	vms_complete_munmap_vmas(vms, mas_detach);
2215}
2216
2217/*
2218 * __mmap_prepare() - Prepare to gather any overlapping VMAs that need to be
2219 * unmapped once the map operation is completed, check limits, account mapping
2220 * and clean up any pre-existing VMAs.
2221 *
2222 * @map: Mapping state.
2223 * @uf:  Userfaultfd context list.
2224 *
2225 * Returns: 0 on success, error code otherwise.
2226 */
2227static int __mmap_prepare(struct mmap_state *map, struct list_head *uf)
2228{
2229	int error;
2230	struct vma_iterator *vmi = map->vmi;
2231	struct vma_munmap_struct *vms = &map->vms;
2232
2233	/* Find the first overlapping VMA and initialise unmap state. */
2234	vms->vma = vma_find(vmi, map->end);
2235	init_vma_munmap(vms, vmi, vms->vma, map->addr, map->end, uf,
2236			/* unlock = */ false);
2237
2238	/* OK, we have overlapping VMAs - prepare to unmap them. */
2239	if (vms->vma) {
2240		mt_init_flags(&map->mt_detach,
2241			      vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2242		mt_on_stack(map->mt_detach);
2243		mas_init(&map->mas_detach, &map->mt_detach, /* addr = */ 0);
2244		/* Prepare to unmap any existing mapping in the area */
2245		error = vms_gather_munmap_vmas(vms, &map->mas_detach);
2246		if (error) {
2247			/* On error VMAs will already have been reattached. */
2248			vms->nr_pages = 0;
2249			return error;
2250		}
2251
2252		map->next = vms->next;
2253		map->prev = vms->prev;
2254	} else {
2255		map->next = vma_iter_next_rewind(vmi, &map->prev);
2256	}
2257
2258	/* Check against address space limit. */
2259	if (!may_expand_vm(map->mm, map->flags, map->pglen - vms->nr_pages))
2260		return -ENOMEM;
2261
2262	/* Private writable mapping: check memory availability. */
2263	if (accountable_mapping(map->file, map->flags)) {
2264		map->charged = map->pglen;
2265		map->charged -= vms->nr_accounted;
2266		if (map->charged) {
2267			error = security_vm_enough_memory_mm(map->mm, map->charged);
2268			if (error)
2269				return error;
2270		}
2271
2272		vms->nr_accounted = 0;
2273		map->flags |= VM_ACCOUNT;
2274	}
2275
2276	/*
2277	 * Clear PTEs while the vma is still in the tree so that rmap
2278	 * cannot race with the freeing later in the truncate scenario.
2279	 * This is also needed for mmap_file(), which is why vm_ops
2280	 * close function is called.
2281	 */
2282	vms_clean_up_area(vms, &map->mas_detach);
2283
2284	return 0;
2285}
2286
2287
2288static int __mmap_new_file_vma(struct mmap_state *map,
2289			       struct vm_area_struct *vma)
2290{
2291	struct vma_iterator *vmi = map->vmi;
2292	int error;
2293
2294	vma->vm_file = get_file(map->file);
2295	error = mmap_file(vma->vm_file, vma);
2296	if (error) {
2297		fput(vma->vm_file);
2298		vma->vm_file = NULL;
2299
2300		vma_iter_set(vmi, vma->vm_end);
2301		/* Undo any partial mapping done by a device driver. */
2302		unmap_region(&vmi->mas, vma, map->prev, map->next);
2303
2304		return error;
2305	}
2306
2307	/* Drivers cannot alter the address of the VMA. */
2308	WARN_ON_ONCE(map->addr != vma->vm_start);
2309	/*
2310	 * Drivers should not permit writability when previously it was
2311	 * disallowed.
2312	 */
2313	VM_WARN_ON_ONCE(map->flags != vma->vm_flags &&
2314			!(map->flags & VM_MAYWRITE) &&
2315			(vma->vm_flags & VM_MAYWRITE));
2316
2317	/* If the flags change (and are mergeable), let's retry later. */
2318	map->retry_merge = vma->vm_flags != map->flags && !(vma->vm_flags & VM_SPECIAL);
2319	map->flags = vma->vm_flags;
2320
2321	return 0;
2322}
2323
2324/*
2325 * __mmap_new_vma() - Allocate a new VMA for the region, as merging was not
2326 * possible.
2327 *
2328 * @map:  Mapping state.
2329 * @vmap: Output pointer for the new VMA.
2330 *
2331 * Returns: Zero on success, or an error.
2332 */
2333static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
2334{
2335	struct vma_iterator *vmi = map->vmi;
2336	int error = 0;
2337	struct vm_area_struct *vma;
2338
2339	/*
2340	 * Determine the object being mapped and call the appropriate
2341	 * specific mapper. the address has already been validated, but
2342	 * not unmapped, but the maps are removed from the list.
2343	 */
2344	vma = vm_area_alloc(map->mm);
2345	if (!vma)
2346		return -ENOMEM;
2347
2348	vma_iter_config(vmi, map->addr, map->end);
2349	vma_set_range(vma, map->addr, map->end, map->pgoff);
2350	vm_flags_init(vma, map->flags);
2351	vma->vm_page_prot = vm_get_page_prot(map->flags);
2352
2353	if (vma_iter_prealloc(vmi, vma)) {
2354		error = -ENOMEM;
2355		goto free_vma;
2356	}
2357
2358	if (map->file)
2359		error = __mmap_new_file_vma(map, vma);
2360	else if (map->flags & VM_SHARED)
2361		error = shmem_zero_setup(vma);
2362	else
2363		vma_set_anonymous(vma);
2364
2365	if (error)
2366		goto free_iter_vma;
2367
2368#ifdef CONFIG_SPARC64
2369	/* TODO: Fix SPARC ADI! */
2370	WARN_ON_ONCE(!arch_validate_flags(map->flags));
2371#endif
2372
2373	/* Lock the VMA since it is modified after insertion into VMA tree */
2374	vma_start_write(vma);
2375	vma_iter_store(vmi, vma);
2376	map->mm->map_count++;
2377	vma_link_file(vma);
2378
2379	/*
2380	 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
2381	 * call covers the non-merge case.
2382	 */
2383	khugepaged_enter_vma(vma, map->flags);
2384	ksm_add_vma(vma);
2385	*vmap = vma;
2386	return 0;
2387
2388free_iter_vma:
2389	vma_iter_free(vmi);
2390free_vma:
2391	vm_area_free(vma);
2392	return error;
2393}
2394
2395/*
2396 * __mmap_complete() - Unmap any VMAs we overlap, account memory mapping
2397 *                     statistics, handle locking and finalise the VMA.
2398 *
2399 * @map: Mapping state.
2400 * @vma: Merged or newly allocated VMA for the mmap()'d region.
2401 */
2402static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma)
2403{
2404	struct mm_struct *mm = map->mm;
2405	unsigned long vm_flags = vma->vm_flags;
2406
2407	perf_event_mmap(vma);
2408
2409	/* Unmap any existing mapping in the area. */
2410	vms_complete_munmap_vmas(&map->vms, &map->mas_detach);
2411
2412	vm_stat_account(mm, vma->vm_flags, map->pglen);
2413	if (vm_flags & VM_LOCKED) {
2414		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2415					is_vm_hugetlb_page(vma) ||
2416					vma == get_gate_vma(mm))
2417			vm_flags_clear(vma, VM_LOCKED_MASK);
2418		else
2419			mm->locked_vm += map->pglen;
2420	}
2421
2422	if (vma->vm_file)
2423		uprobe_mmap(vma);
2424
2425	/*
2426	 * New (or expanded) vma always get soft dirty status.
2427	 * Otherwise user-space soft-dirty page tracker won't
2428	 * be able to distinguish situation when vma area unmapped,
2429	 * then new mapped in-place (which must be aimed as
2430	 * a completely new data area).
2431	 */
2432	vm_flags_set(vma, VM_SOFTDIRTY);
2433
2434	vma_set_page_prot(vma);
2435}
2436
2437unsigned long __mmap_region(struct file *file, unsigned long addr,
2438		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2439		struct list_head *uf)
2440{
2441	struct mm_struct *mm = current->mm;
2442	struct vm_area_struct *vma = NULL;
2443	int error;
2444	VMA_ITERATOR(vmi, mm, addr);
2445	MMAP_STATE(map, mm, &vmi, addr, len, pgoff, vm_flags, file);
2446
2447	error = __mmap_prepare(&map, uf);
2448	if (error)
2449		goto abort_munmap;
2450
2451	/* Attempt to merge with adjacent VMAs... */
2452	if (map.prev || map.next) {
2453		VMG_MMAP_STATE(vmg, &map, /* vma = */ NULL);
2454
2455		vma = vma_merge_new_range(&vmg);
2456	}
2457
2458	/* ...but if we can't, allocate a new VMA. */
2459	if (!vma) {
2460		error = __mmap_new_vma(&map, &vma);
2461		if (error)
2462			goto unacct_error;
2463	}
2464
2465	/* If flags changed, we might be able to merge, so try again. */
2466	if (map.retry_merge) {
2467		struct vm_area_struct *merged;
2468		VMG_MMAP_STATE(vmg, &map, vma);
2469
2470		vma_iter_config(map.vmi, map.addr, map.end);
2471		merged = vma_merge_existing_range(&vmg);
2472		if (merged)
2473			vma = merged;
2474	}
2475
2476	__mmap_complete(&map, vma);
2477
2478	return addr;
2479
2480	/* Accounting was done by __mmap_prepare(). */
2481unacct_error:
2482	if (map.charged)
2483		vm_unacct_memory(map.charged);
2484abort_munmap:
2485	vms_abort_munmap_vmas(&map.vms, &map.mas_detach);
2486	return error;
2487}