Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	mm/mremap.c
  4 *
  5 *	(C) Copyright 1996 Linus Torvalds
  6 *
  7 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  8 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/shm.h>
 14#include <linux/ksm.h>
 15#include <linux/mman.h>
 16#include <linux/swap.h>
 17#include <linux/capability.h>
 18#include <linux/fs.h>
 19#include <linux/swapops.h>
 20#include <linux/highmem.h>
 21#include <linux/security.h>
 22#include <linux/syscalls.h>
 23#include <linux/mmu_notifier.h>
 24#include <linux/uaccess.h>
 25#include <linux/mm-arch-hooks.h>
 26#include <linux/userfaultfd_k.h>
 27
 28#include <asm/cacheflush.h>
 29#include <asm/tlbflush.h>
 30
 31#include "internal.h"
 32
 33static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 34{
 35	pgd_t *pgd;
 36	p4d_t *p4d;
 37	pud_t *pud;
 38	pmd_t *pmd;
 39
 40	pgd = pgd_offset(mm, addr);
 41	if (pgd_none_or_clear_bad(pgd))
 42		return NULL;
 43
 44	p4d = p4d_offset(pgd, addr);
 45	if (p4d_none_or_clear_bad(p4d))
 46		return NULL;
 47
 48	pud = pud_offset(p4d, addr);
 49	if (pud_none_or_clear_bad(pud))
 50		return NULL;
 51
 52	pmd = pmd_offset(pud, addr);
 53	if (pmd_none(*pmd))
 54		return NULL;
 55
 56	return pmd;
 57}
 58
 59static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 60			    unsigned long addr)
 61{
 62	pgd_t *pgd;
 63	p4d_t *p4d;
 64	pud_t *pud;
 65	pmd_t *pmd;
 66
 67	pgd = pgd_offset(mm, addr);
 68	p4d = p4d_alloc(mm, pgd, addr);
 69	if (!p4d)
 70		return NULL;
 71	pud = pud_alloc(mm, p4d, addr);
 72	if (!pud)
 73		return NULL;
 74
 75	pmd = pmd_alloc(mm, pud, addr);
 76	if (!pmd)
 77		return NULL;
 78
 79	VM_BUG_ON(pmd_trans_huge(*pmd));
 80
 81	return pmd;
 82}
 83
 84static void take_rmap_locks(struct vm_area_struct *vma)
 85{
 86	if (vma->vm_file)
 87		i_mmap_lock_write(vma->vm_file->f_mapping);
 88	if (vma->anon_vma)
 89		anon_vma_lock_write(vma->anon_vma);
 90}
 91
 92static void drop_rmap_locks(struct vm_area_struct *vma)
 93{
 94	if (vma->anon_vma)
 95		anon_vma_unlock_write(vma->anon_vma);
 96	if (vma->vm_file)
 97		i_mmap_unlock_write(vma->vm_file->f_mapping);
 98}
 99
100static pte_t move_soft_dirty_pte(pte_t pte)
101{
102	/*
103	 * Set soft dirty bit so we can notice
104	 * in userspace the ptes were moved.
105	 */
106#ifdef CONFIG_MEM_SOFT_DIRTY
107	if (pte_present(pte))
108		pte = pte_mksoft_dirty(pte);
109	else if (is_swap_pte(pte))
110		pte = pte_swp_mksoft_dirty(pte);
111#endif
112	return pte;
113}
114
115static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
116		unsigned long old_addr, unsigned long old_end,
117		struct vm_area_struct *new_vma, pmd_t *new_pmd,
118		unsigned long new_addr, bool need_rmap_locks)
119{
120	struct mm_struct *mm = vma->vm_mm;
121	pte_t *old_pte, *new_pte, pte;
122	spinlock_t *old_ptl, *new_ptl;
123	bool force_flush = false;
124	unsigned long len = old_end - old_addr;
125
126	/*
127	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
128	 * locks to ensure that rmap will always observe either the old or the
129	 * new ptes. This is the easiest way to avoid races with
130	 * truncate_pagecache(), page migration, etc...
131	 *
132	 * When need_rmap_locks is false, we use other ways to avoid
133	 * such races:
134	 *
135	 * - During exec() shift_arg_pages(), we use a specially tagged vma
136	 *   which rmap call sites look for using vma_is_temporary_stack().
137	 *
138	 * - During mremap(), new_vma is often known to be placed after vma
139	 *   in rmap traversal order. This ensures rmap will always observe
140	 *   either the old pte, or the new pte, or both (the page table locks
141	 *   serialize access to individual ptes, but only rmap traversal
142	 *   order guarantees that we won't miss both the old and new ptes).
143	 */
144	if (need_rmap_locks)
145		take_rmap_locks(vma);
146
147	/*
148	 * We don't have to worry about the ordering of src and dst
149	 * pte locks because exclusive mmap_lock prevents deadlock.
150	 */
151	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
152	new_pte = pte_offset_map(new_pmd, new_addr);
153	new_ptl = pte_lockptr(mm, new_pmd);
154	if (new_ptl != old_ptl)
155		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
156	flush_tlb_batched_pending(vma->vm_mm);
157	arch_enter_lazy_mmu_mode();
158
159	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
160				   new_pte++, new_addr += PAGE_SIZE) {
161		if (pte_none(*old_pte))
162			continue;
163
164		pte = ptep_get_and_clear(mm, old_addr, old_pte);
165		/*
166		 * If we are remapping a valid PTE, make sure
167		 * to flush TLB before we drop the PTL for the
168		 * PTE.
169		 *
170		 * NOTE! Both old and new PTL matter: the old one
171		 * for racing with page_mkclean(), the new one to
172		 * make sure the physical page stays valid until
173		 * the TLB entry for the old mapping has been
174		 * flushed.
175		 */
176		if (pte_present(pte))
177			force_flush = true;
178		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
179		pte = move_soft_dirty_pte(pte);
180		set_pte_at(mm, new_addr, new_pte, pte);
181	}
182
183	arch_leave_lazy_mmu_mode();
184	if (force_flush)
185		flush_tlb_range(vma, old_end - len, old_end);
186	if (new_ptl != old_ptl)
187		spin_unlock(new_ptl);
188	pte_unmap(new_pte - 1);
 
 
 
 
189	pte_unmap_unlock(old_pte - 1, old_ptl);
190	if (need_rmap_locks)
191		drop_rmap_locks(vma);
192}
193
194#ifdef CONFIG_HAVE_MOVE_PMD
195static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
196		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
197{
198	spinlock_t *old_ptl, *new_ptl;
199	struct mm_struct *mm = vma->vm_mm;
200	pmd_t pmd;
201
202	/*
203	 * The destination pmd shouldn't be established, free_pgtables()
204	 * should have released it.
205	 *
206	 * However, there's a case during execve() where we use mremap
207	 * to move the initial stack, and in that case the target area
208	 * may overlap the source area (always moving down).
209	 *
210	 * If everything is PMD-aligned, that works fine, as moving
211	 * each pmd down will clear the source pmd. But if we first
212	 * have a few 4kB-only pages that get moved down, and then
213	 * hit the "now the rest is PMD-aligned, let's do everything
214	 * one pmd at a time", we will still have the old (now empty
215	 * of any 4kB pages, but still there) PMD in the page table
216	 * tree.
217	 *
218	 * Warn on it once - because we really should try to figure
219	 * out how to do this better - but then say "I won't move
220	 * this pmd".
221	 *
222	 * One alternative might be to just unmap the target pmd at
223	 * this point, and verify that it really is empty. We'll see.
224	 */
225	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
226		return false;
227
228	/*
229	 * We don't have to worry about the ordering of src and dst
230	 * ptlocks because exclusive mmap_lock prevents deadlock.
231	 */
232	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
233	new_ptl = pmd_lockptr(mm, new_pmd);
234	if (new_ptl != old_ptl)
235		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
236
237	/* Clear the pmd */
238	pmd = *old_pmd;
239	pmd_clear(old_pmd);
240
241	VM_BUG_ON(!pmd_none(*new_pmd));
242
243	/* Set the new pmd */
244	set_pmd_at(mm, new_addr, new_pmd, pmd);
245	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
246	if (new_ptl != old_ptl)
247		spin_unlock(new_ptl);
248	spin_unlock(old_ptl);
249
250	return true;
251}
252#endif
253
254unsigned long move_page_tables(struct vm_area_struct *vma,
255		unsigned long old_addr, struct vm_area_struct *new_vma,
256		unsigned long new_addr, unsigned long len,
257		bool need_rmap_locks)
258{
259	unsigned long extent, next, old_end;
260	struct mmu_notifier_range range;
261	pmd_t *old_pmd, *new_pmd;
 
 
 
262
263	old_end = old_addr + len;
264	flush_cache_range(vma, old_addr, old_end);
265
266	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
267				old_addr, old_end);
268	mmu_notifier_invalidate_range_start(&range);
269
270	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
271		cond_resched();
272		next = (old_addr + PMD_SIZE) & PMD_MASK;
273		/* even if next overflowed, extent below will be ok */
274		extent = next - old_addr;
275		if (extent > old_end - old_addr)
276			extent = old_end - old_addr;
277		next = (new_addr + PMD_SIZE) & PMD_MASK;
278		if (extent > next - new_addr)
279			extent = next - new_addr;
280		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
281		if (!old_pmd)
282			continue;
283		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
284		if (!new_pmd)
285			break;
286		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
287			if (extent == HPAGE_PMD_SIZE) {
288				bool moved;
289				/* See comment in move_ptes() */
290				if (need_rmap_locks)
291					take_rmap_locks(vma);
292				moved = move_huge_pmd(vma, old_addr, new_addr,
293						      old_pmd, new_pmd);
 
294				if (need_rmap_locks)
295					drop_rmap_locks(vma);
296				if (moved)
297					continue;
298			}
299			split_huge_pmd(vma, old_pmd, old_addr);
300			if (pmd_trans_unstable(old_pmd))
301				continue;
302		} else if (extent == PMD_SIZE) {
303#ifdef CONFIG_HAVE_MOVE_PMD
304			/*
305			 * If the extent is PMD-sized, try to speed the move by
306			 * moving at the PMD level if possible.
307			 */
308			bool moved;
309
310			if (need_rmap_locks)
311				take_rmap_locks(vma);
312			moved = move_normal_pmd(vma, old_addr, new_addr,
313						old_pmd, new_pmd);
314			if (need_rmap_locks)
315				drop_rmap_locks(vma);
316			if (moved)
317				continue;
318#endif
319		}
320
321		if (pte_alloc(new_vma->vm_mm, new_pmd))
322			break;
 
 
 
 
 
323		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
324			  new_pmd, new_addr, need_rmap_locks);
325	}
 
 
326
327	mmu_notifier_invalidate_range_end(&range);
328
329	return len + old_addr - old_end;	/* how much done */
330}
331
332static unsigned long move_vma(struct vm_area_struct *vma,
333		unsigned long old_addr, unsigned long old_len,
334		unsigned long new_len, unsigned long new_addr,
335		bool *locked, unsigned long flags,
336		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
337{
338	struct mm_struct *mm = vma->vm_mm;
339	struct vm_area_struct *new_vma;
340	unsigned long vm_flags = vma->vm_flags;
341	unsigned long new_pgoff;
342	unsigned long moved_len;
343	unsigned long excess = 0;
344	unsigned long hiwater_vm;
345	int split = 0;
346	int err;
347	bool need_rmap_locks;
348
349	/*
350	 * We'd prefer to avoid failure later on in do_munmap:
351	 * which may split one vma into three before unmapping.
352	 */
353	if (mm->map_count >= sysctl_max_map_count - 3)
354		return -ENOMEM;
355
356	/*
357	 * Advise KSM to break any KSM pages in the area to be moved:
358	 * it would be confusing if they were to turn up at the new
359	 * location, where they happen to coincide with different KSM
360	 * pages recently unmapped.  But leave vma->vm_flags as it was,
361	 * so KSM can come around to merge on vma and new_vma afterwards.
362	 */
363	err = ksm_madvise(vma, old_addr, old_addr + old_len,
364						MADV_UNMERGEABLE, &vm_flags);
365	if (err)
366		return err;
367
368	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
369	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
370			   &need_rmap_locks);
371	if (!new_vma)
372		return -ENOMEM;
373
374	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
375				     need_rmap_locks);
376	if (moved_len < old_len) {
377		err = -ENOMEM;
378	} else if (vma->vm_ops && vma->vm_ops->mremap) {
379		err = vma->vm_ops->mremap(new_vma);
380	}
381
382	if (unlikely(err)) {
383		/*
384		 * On error, move entries back from new area to old,
385		 * which will succeed since page tables still there,
386		 * and then proceed to unmap new area instead of old.
387		 */
388		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
389				 true);
390		vma = new_vma;
391		old_len = new_len;
392		old_addr = new_addr;
393		new_addr = err;
394	} else {
395		mremap_userfaultfd_prep(new_vma, uf);
396		arch_remap(mm, old_addr, old_addr + old_len,
397			   new_addr, new_addr + new_len);
398	}
399
400	/* Conceal VM_ACCOUNT so old reservation is not undone */
401	if (vm_flags & VM_ACCOUNT) {
402		vma->vm_flags &= ~VM_ACCOUNT;
403		excess = vma->vm_end - vma->vm_start - old_len;
404		if (old_addr > vma->vm_start &&
405		    old_addr + old_len < vma->vm_end)
406			split = 1;
407	}
408
409	/*
410	 * If we failed to move page tables we still do total_vm increment
411	 * since do_munmap() will decrement it by old_len == new_len.
412	 *
413	 * Since total_vm is about to be raised artificially high for a
414	 * moment, we need to restore high watermark afterwards: if stats
415	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
416	 * If this were a serious issue, we'd add a flag to do_munmap().
417	 */
418	hiwater_vm = mm->hiwater_vm;
419	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
420
421	/* Tell pfnmap has moved from this vma */
422	if (unlikely(vma->vm_flags & VM_PFNMAP))
423		untrack_pfn_moved(vma);
424
425	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
426		if (vm_flags & VM_ACCOUNT) {
427			/* Always put back VM_ACCOUNT since we won't unmap */
428			vma->vm_flags |= VM_ACCOUNT;
429
430			vm_acct_memory(new_len >> PAGE_SHIFT);
431		}
432
433		/*
434		 * VMAs can actually be merged back together in copy_vma
435		 * calling merge_vma. This can happen with anonymous vmas
436		 * which have not yet been faulted, so if we were to consider
437		 * this VMA split we'll end up adding VM_ACCOUNT on the
438		 * next VMA, which is completely unrelated if this VMA
439		 * was re-merged.
440		 */
441		if (split && new_vma == vma)
442			split = 0;
443
444		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
445		vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
446
447		/* Because we won't unmap we don't need to touch locked_vm */
448		goto out;
449	}
450
451	if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
452		/* OOM: unable to split vma, just get accounts right */
453		vm_unacct_memory(excess >> PAGE_SHIFT);
454		excess = 0;
455	}
456
457	if (vm_flags & VM_LOCKED) {
458		mm->locked_vm += new_len >> PAGE_SHIFT;
459		*locked = true;
460	}
461out:
462	mm->hiwater_vm = hiwater_vm;
463
464	/* Restore VM_ACCOUNT if one or two pieces of vma left */
465	if (excess) {
466		vma->vm_flags |= VM_ACCOUNT;
467		if (split)
468			vma->vm_next->vm_flags |= VM_ACCOUNT;
469	}
470
 
 
 
 
 
471	return new_addr;
472}
473
474static struct vm_area_struct *vma_to_resize(unsigned long addr,
475	unsigned long old_len, unsigned long new_len, unsigned long flags,
476	unsigned long *p)
477{
478	struct mm_struct *mm = current->mm;
479	struct vm_area_struct *vma = find_vma(mm, addr);
480	unsigned long pgoff;
481
482	if (!vma || vma->vm_start > addr)
483		return ERR_PTR(-EFAULT);
484
485	/*
486	 * !old_len is a special case where an attempt is made to 'duplicate'
487	 * a mapping.  This makes no sense for private mappings as it will
488	 * instead create a fresh/new mapping unrelated to the original.  This
489	 * is contrary to the basic idea of mremap which creates new mappings
490	 * based on the original.  There are no known use cases for this
491	 * behavior.  As a result, fail such attempts.
492	 */
493	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
494		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
495		return ERR_PTR(-EINVAL);
496	}
497
498	if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) ||
499			vma->vm_flags & VM_SHARED))
500		return ERR_PTR(-EINVAL);
501
502	if (is_vm_hugetlb_page(vma))
503		return ERR_PTR(-EINVAL);
504
505	/* We can't remap across vm area boundaries */
506	if (old_len > vma->vm_end - addr)
507		return ERR_PTR(-EFAULT);
508
509	if (new_len == old_len)
510		return vma;
511
512	/* Need to be careful about a growing mapping */
513	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
514	pgoff += vma->vm_pgoff;
515	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
516		return ERR_PTR(-EINVAL);
517
518	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
519		return ERR_PTR(-EFAULT);
520
521	if (vma->vm_flags & VM_LOCKED) {
522		unsigned long locked, lock_limit;
523		locked = mm->locked_vm << PAGE_SHIFT;
524		lock_limit = rlimit(RLIMIT_MEMLOCK);
525		locked += new_len - old_len;
526		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
527			return ERR_PTR(-EAGAIN);
528	}
529
530	if (!may_expand_vm(mm, vma->vm_flags,
531				(new_len - old_len) >> PAGE_SHIFT))
532		return ERR_PTR(-ENOMEM);
533
534	if (vma->vm_flags & VM_ACCOUNT) {
535		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
536		if (security_vm_enough_memory_mm(mm, charged))
537			return ERR_PTR(-ENOMEM);
538		*p = charged;
539	}
540
541	return vma;
542}
543
544static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
545		unsigned long new_addr, unsigned long new_len, bool *locked,
546		unsigned long flags, struct vm_userfaultfd_ctx *uf,
547		struct list_head *uf_unmap_early,
548		struct list_head *uf_unmap)
549{
550	struct mm_struct *mm = current->mm;
551	struct vm_area_struct *vma;
552	unsigned long ret = -EINVAL;
553	unsigned long charged = 0;
554	unsigned long map_flags = 0;
555
556	if (offset_in_page(new_addr))
557		goto out;
558
559	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
560		goto out;
561
562	/* Ensure the old/new locations do not overlap */
563	if (addr + old_len > new_addr && new_addr + new_len > addr)
564		goto out;
565
566	/*
567	 * move_vma() need us to stay 4 maps below the threshold, otherwise
568	 * it will bail out at the very beginning.
569	 * That is a problem if we have already unmaped the regions here
570	 * (new_addr, and old_addr), because userspace will not know the
571	 * state of the vma's after it gets -ENOMEM.
572	 * So, to avoid such scenario we can pre-compute if the whole
573	 * operation has high chances to success map-wise.
574	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
575	 * split in 3 before unmaping it.
576	 * That means 2 more maps (1 for each) to the ones we already hold.
577	 * Check whether current map count plus 2 still leads us to 4 maps below
578	 * the threshold, otherwise return -ENOMEM here to be more safe.
579	 */
580	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
581		return -ENOMEM;
582
583	if (flags & MREMAP_FIXED) {
584		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
585		if (ret)
586			goto out;
587	}
588
589	if (old_len >= new_len) {
590		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
591		if (ret && old_len != new_len)
592			goto out;
593		old_len = new_len;
594	}
595
596	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
597	if (IS_ERR(vma)) {
598		ret = PTR_ERR(vma);
599		goto out;
600	}
601
602	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
603	if (flags & MREMAP_DONTUNMAP &&
604		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
605		ret = -ENOMEM;
606		goto out;
607	}
608
609	if (flags & MREMAP_FIXED)
610		map_flags |= MAP_FIXED;
611
612	if (vma->vm_flags & VM_MAYSHARE)
613		map_flags |= MAP_SHARED;
614
615	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
616				((addr - vma->vm_start) >> PAGE_SHIFT),
617				map_flags);
618	if (IS_ERR_VALUE(ret))
619		goto out1;
620
621	/* We got a new mapping */
622	if (!(flags & MREMAP_FIXED))
623		new_addr = ret;
624
625	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
626		       uf_unmap);
627
628	if (!(offset_in_page(ret)))
629		goto out;
630
631out1:
632	vm_unacct_memory(charged);
633
634out:
635	return ret;
636}
637
638static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
639{
640	unsigned long end = vma->vm_end + delta;
641	if (end < vma->vm_end) /* overflow */
642		return 0;
643	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
644		return 0;
645	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
646			      0, MAP_FIXED) & ~PAGE_MASK)
647		return 0;
648	return 1;
649}
650
651/*
652 * Expand (or shrink) an existing mapping, potentially moving it at the
653 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
654 *
655 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
656 * This option implies MREMAP_MAYMOVE.
657 */
658SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
659		unsigned long, new_len, unsigned long, flags,
660		unsigned long, new_addr)
661{
662	struct mm_struct *mm = current->mm;
663	struct vm_area_struct *vma;
664	unsigned long ret = -EINVAL;
665	unsigned long charged = 0;
666	bool locked = false;
667	bool downgraded = false;
668	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
669	LIST_HEAD(uf_unmap_early);
670	LIST_HEAD(uf_unmap);
671
672	/*
673	 * There is a deliberate asymmetry here: we strip the pointer tag
674	 * from the old address but leave the new address alone. This is
675	 * for consistency with mmap(), where we prevent the creation of
676	 * aliasing mappings in userspace by leaving the tag bits of the
677	 * mapping address intact. A non-zero tag will cause the subsequent
678	 * range checks to reject the address as invalid.
679	 *
680	 * See Documentation/arm64/tagged-address-abi.rst for more information.
681	 */
682	addr = untagged_addr(addr);
683
684	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
685		return ret;
686
687	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
688		return ret;
689
690	/*
691	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
692	 * in the process.
693	 */
694	if (flags & MREMAP_DONTUNMAP &&
695			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
696		return ret;
697
698
699	if (offset_in_page(addr))
700		return ret;
701
702	old_len = PAGE_ALIGN(old_len);
703	new_len = PAGE_ALIGN(new_len);
704
705	/*
706	 * We allow a zero old-len as a special case
707	 * for DOS-emu "duplicate shm area" thing. But
708	 * a zero new-len is nonsensical.
709	 */
710	if (!new_len)
711		return ret;
712
713	if (mmap_write_lock_killable(current->mm))
714		return -EINTR;
715
716	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
717		ret = mremap_to(addr, old_len, new_addr, new_len,
718				&locked, flags, &uf, &uf_unmap_early,
719				&uf_unmap);
720		goto out;
721	}
722
723	/*
724	 * Always allow a shrinking remap: that just unmaps
725	 * the unnecessary pages..
726	 * __do_munmap does all the needed commit accounting, and
727	 * downgrades mmap_lock to read if so directed.
728	 */
729	if (old_len >= new_len) {
730		int retval;
731
732		retval = __do_munmap(mm, addr+new_len, old_len - new_len,
733				  &uf_unmap, true);
734		if (retval < 0 && old_len != new_len) {
735			ret = retval;
736			goto out;
737		/* Returning 1 indicates mmap_lock is downgraded to read. */
738		} else if (retval == 1)
739			downgraded = true;
740		ret = addr;
741		goto out;
742	}
743
744	/*
745	 * Ok, we need to grow..
746	 */
747	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
748	if (IS_ERR(vma)) {
749		ret = PTR_ERR(vma);
750		goto out;
751	}
752
753	/* old_len exactly to the end of the area..
754	 */
755	if (old_len == vma->vm_end - addr) {
756		/* can we just expand the current mapping? */
757		if (vma_expandable(vma, new_len - old_len)) {
758			int pages = (new_len - old_len) >> PAGE_SHIFT;
759
760			if (vma_adjust(vma, vma->vm_start, addr + new_len,
761				       vma->vm_pgoff, NULL)) {
762				ret = -ENOMEM;
763				goto out;
764			}
765
766			vm_stat_account(mm, vma->vm_flags, pages);
767			if (vma->vm_flags & VM_LOCKED) {
768				mm->locked_vm += pages;
769				locked = true;
770				new_addr = addr;
771			}
772			ret = addr;
773			goto out;
774		}
775	}
776
777	/*
778	 * We weren't able to just expand or shrink the area,
779	 * we need to create a new one and move it..
780	 */
781	ret = -ENOMEM;
782	if (flags & MREMAP_MAYMOVE) {
783		unsigned long map_flags = 0;
784		if (vma->vm_flags & VM_MAYSHARE)
785			map_flags |= MAP_SHARED;
786
787		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
788					vma->vm_pgoff +
789					((addr - vma->vm_start) >> PAGE_SHIFT),
790					map_flags);
791		if (IS_ERR_VALUE(new_addr)) {
792			ret = new_addr;
793			goto out;
794		}
795
796		ret = move_vma(vma, addr, old_len, new_len, new_addr,
797			       &locked, flags, &uf, &uf_unmap);
798	}
799out:
800	if (offset_in_page(ret)) {
801		vm_unacct_memory(charged);
802		locked = false;
803	}
804	if (downgraded)
805		mmap_read_unlock(current->mm);
806	else
807		mmap_write_unlock(current->mm);
808	if (locked && new_len > old_len)
809		mm_populate(new_addr + old_len, new_len - old_len);
810	userfaultfd_unmap_complete(mm, &uf_unmap_early);
811	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
812	userfaultfd_unmap_complete(mm, &uf_unmap);
813	return ret;
814}
v4.10.11
 
  1/*
  2 *	mm/mremap.c
  3 *
  4 *	(C) Copyright 1996 Linus Torvalds
  5 *
  6 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  7 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/shm.h>
 13#include <linux/ksm.h>
 14#include <linux/mman.h>
 15#include <linux/swap.h>
 16#include <linux/capability.h>
 17#include <linux/fs.h>
 18#include <linux/swapops.h>
 19#include <linux/highmem.h>
 20#include <linux/security.h>
 21#include <linux/syscalls.h>
 22#include <linux/mmu_notifier.h>
 23#include <linux/uaccess.h>
 24#include <linux/mm-arch-hooks.h>
 
 25
 26#include <asm/cacheflush.h>
 27#include <asm/tlbflush.h>
 28
 29#include "internal.h"
 30
 31static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 32{
 33	pgd_t *pgd;
 
 34	pud_t *pud;
 35	pmd_t *pmd;
 36
 37	pgd = pgd_offset(mm, addr);
 38	if (pgd_none_or_clear_bad(pgd))
 39		return NULL;
 40
 41	pud = pud_offset(pgd, addr);
 
 
 
 
 42	if (pud_none_or_clear_bad(pud))
 43		return NULL;
 44
 45	pmd = pmd_offset(pud, addr);
 46	if (pmd_none(*pmd))
 47		return NULL;
 48
 49	return pmd;
 50}
 51
 52static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 53			    unsigned long addr)
 54{
 55	pgd_t *pgd;
 
 56	pud_t *pud;
 57	pmd_t *pmd;
 58
 59	pgd = pgd_offset(mm, addr);
 60	pud = pud_alloc(mm, pgd, addr);
 
 
 
 61	if (!pud)
 62		return NULL;
 63
 64	pmd = pmd_alloc(mm, pud, addr);
 65	if (!pmd)
 66		return NULL;
 67
 68	VM_BUG_ON(pmd_trans_huge(*pmd));
 69
 70	return pmd;
 71}
 72
 73static void take_rmap_locks(struct vm_area_struct *vma)
 74{
 75	if (vma->vm_file)
 76		i_mmap_lock_write(vma->vm_file->f_mapping);
 77	if (vma->anon_vma)
 78		anon_vma_lock_write(vma->anon_vma);
 79}
 80
 81static void drop_rmap_locks(struct vm_area_struct *vma)
 82{
 83	if (vma->anon_vma)
 84		anon_vma_unlock_write(vma->anon_vma);
 85	if (vma->vm_file)
 86		i_mmap_unlock_write(vma->vm_file->f_mapping);
 87}
 88
 89static pte_t move_soft_dirty_pte(pte_t pte)
 90{
 91	/*
 92	 * Set soft dirty bit so we can notice
 93	 * in userspace the ptes were moved.
 94	 */
 95#ifdef CONFIG_MEM_SOFT_DIRTY
 96	if (pte_present(pte))
 97		pte = pte_mksoft_dirty(pte);
 98	else if (is_swap_pte(pte))
 99		pte = pte_swp_mksoft_dirty(pte);
100#endif
101	return pte;
102}
103
104static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
105		unsigned long old_addr, unsigned long old_end,
106		struct vm_area_struct *new_vma, pmd_t *new_pmd,
107		unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
108{
109	struct mm_struct *mm = vma->vm_mm;
110	pte_t *old_pte, *new_pte, pte;
111	spinlock_t *old_ptl, *new_ptl;
112	bool force_flush = false;
113	unsigned long len = old_end - old_addr;
114
115	/*
116	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
117	 * locks to ensure that rmap will always observe either the old or the
118	 * new ptes. This is the easiest way to avoid races with
119	 * truncate_pagecache(), page migration, etc...
120	 *
121	 * When need_rmap_locks is false, we use other ways to avoid
122	 * such races:
123	 *
124	 * - During exec() shift_arg_pages(), we use a specially tagged vma
125	 *   which rmap call sites look for using is_vma_temporary_stack().
126	 *
127	 * - During mremap(), new_vma is often known to be placed after vma
128	 *   in rmap traversal order. This ensures rmap will always observe
129	 *   either the old pte, or the new pte, or both (the page table locks
130	 *   serialize access to individual ptes, but only rmap traversal
131	 *   order guarantees that we won't miss both the old and new ptes).
132	 */
133	if (need_rmap_locks)
134		take_rmap_locks(vma);
135
136	/*
137	 * We don't have to worry about the ordering of src and dst
138	 * pte locks because exclusive mmap_sem prevents deadlock.
139	 */
140	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
141	new_pte = pte_offset_map(new_pmd, new_addr);
142	new_ptl = pte_lockptr(mm, new_pmd);
143	if (new_ptl != old_ptl)
144		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
 
145	arch_enter_lazy_mmu_mode();
146
147	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
148				   new_pte++, new_addr += PAGE_SIZE) {
149		if (pte_none(*old_pte))
150			continue;
151
152		pte = ptep_get_and_clear(mm, old_addr, old_pte);
153		/*
154		 * If we are remapping a dirty PTE, make sure
155		 * to flush TLB before we drop the PTL for the
156		 * old PTE or we may race with page_mkclean().
157		 *
158		 * This check has to be done after we removed the
159		 * old PTE from page tables or another thread may
160		 * dirty it after the check and before the removal.
 
 
161		 */
162		if (pte_present(pte) && pte_dirty(pte))
163			force_flush = true;
164		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
165		pte = move_soft_dirty_pte(pte);
166		set_pte_at(mm, new_addr, new_pte, pte);
167	}
168
169	arch_leave_lazy_mmu_mode();
 
 
170	if (new_ptl != old_ptl)
171		spin_unlock(new_ptl);
172	pte_unmap(new_pte - 1);
173	if (force_flush)
174		flush_tlb_range(vma, old_end - len, old_end);
175	else
176		*need_flush = true;
177	pte_unmap_unlock(old_pte - 1, old_ptl);
178	if (need_rmap_locks)
179		drop_rmap_locks(vma);
180}
181
182#define LATENCY_LIMIT	(64 * PAGE_SIZE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
184unsigned long move_page_tables(struct vm_area_struct *vma,
185		unsigned long old_addr, struct vm_area_struct *new_vma,
186		unsigned long new_addr, unsigned long len,
187		bool need_rmap_locks)
188{
189	unsigned long extent, next, old_end;
 
190	pmd_t *old_pmd, *new_pmd;
191	bool need_flush = false;
192	unsigned long mmun_start;	/* For mmu_notifiers */
193	unsigned long mmun_end;		/* For mmu_notifiers */
194
195	old_end = old_addr + len;
196	flush_cache_range(vma, old_addr, old_end);
197
198	mmun_start = old_addr;
199	mmun_end   = old_end;
200	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
201
202	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
203		cond_resched();
204		next = (old_addr + PMD_SIZE) & PMD_MASK;
205		/* even if next overflowed, extent below will be ok */
206		extent = next - old_addr;
207		if (extent > old_end - old_addr)
208			extent = old_end - old_addr;
 
 
 
209		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
210		if (!old_pmd)
211			continue;
212		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
213		if (!new_pmd)
214			break;
215		if (pmd_trans_huge(*old_pmd)) {
216			if (extent == HPAGE_PMD_SIZE) {
217				bool moved;
218				/* See comment in move_ptes() */
219				if (need_rmap_locks)
220					take_rmap_locks(vma);
221				moved = move_huge_pmd(vma, old_addr, new_addr,
222						    old_end, old_pmd, new_pmd,
223						    &need_flush);
224				if (need_rmap_locks)
225					drop_rmap_locks(vma);
226				if (moved)
227					continue;
228			}
229			split_huge_pmd(vma, old_pmd, old_addr);
230			if (pmd_trans_unstable(old_pmd))
231				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232		}
233		if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
 
234			break;
235		next = (new_addr + PMD_SIZE) & PMD_MASK;
236		if (extent > next - new_addr)
237			extent = next - new_addr;
238		if (extent > LATENCY_LIMIT)
239			extent = LATENCY_LIMIT;
240		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
241			  new_pmd, new_addr, need_rmap_locks, &need_flush);
242	}
243	if (need_flush)
244		flush_tlb_range(vma, old_end-len, old_addr);
245
246	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
247
248	return len + old_addr - old_end;	/* how much done */
249}
250
251static unsigned long move_vma(struct vm_area_struct *vma,
252		unsigned long old_addr, unsigned long old_len,
253		unsigned long new_len, unsigned long new_addr, bool *locked)
 
 
254{
255	struct mm_struct *mm = vma->vm_mm;
256	struct vm_area_struct *new_vma;
257	unsigned long vm_flags = vma->vm_flags;
258	unsigned long new_pgoff;
259	unsigned long moved_len;
260	unsigned long excess = 0;
261	unsigned long hiwater_vm;
262	int split = 0;
263	int err;
264	bool need_rmap_locks;
265
266	/*
267	 * We'd prefer to avoid failure later on in do_munmap:
268	 * which may split one vma into three before unmapping.
269	 */
270	if (mm->map_count >= sysctl_max_map_count - 3)
271		return -ENOMEM;
272
273	/*
274	 * Advise KSM to break any KSM pages in the area to be moved:
275	 * it would be confusing if they were to turn up at the new
276	 * location, where they happen to coincide with different KSM
277	 * pages recently unmapped.  But leave vma->vm_flags as it was,
278	 * so KSM can come around to merge on vma and new_vma afterwards.
279	 */
280	err = ksm_madvise(vma, old_addr, old_addr + old_len,
281						MADV_UNMERGEABLE, &vm_flags);
282	if (err)
283		return err;
284
285	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
286	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
287			   &need_rmap_locks);
288	if (!new_vma)
289		return -ENOMEM;
290
291	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
292				     need_rmap_locks);
293	if (moved_len < old_len) {
294		err = -ENOMEM;
295	} else if (vma->vm_ops && vma->vm_ops->mremap) {
296		err = vma->vm_ops->mremap(new_vma);
297	}
298
299	if (unlikely(err)) {
300		/*
301		 * On error, move entries back from new area to old,
302		 * which will succeed since page tables still there,
303		 * and then proceed to unmap new area instead of old.
304		 */
305		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
306				 true);
307		vma = new_vma;
308		old_len = new_len;
309		old_addr = new_addr;
310		new_addr = err;
311	} else {
 
312		arch_remap(mm, old_addr, old_addr + old_len,
313			   new_addr, new_addr + new_len);
314	}
315
316	/* Conceal VM_ACCOUNT so old reservation is not undone */
317	if (vm_flags & VM_ACCOUNT) {
318		vma->vm_flags &= ~VM_ACCOUNT;
319		excess = vma->vm_end - vma->vm_start - old_len;
320		if (old_addr > vma->vm_start &&
321		    old_addr + old_len < vma->vm_end)
322			split = 1;
323	}
324
325	/*
326	 * If we failed to move page tables we still do total_vm increment
327	 * since do_munmap() will decrement it by old_len == new_len.
328	 *
329	 * Since total_vm is about to be raised artificially high for a
330	 * moment, we need to restore high watermark afterwards: if stats
331	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
332	 * If this were a serious issue, we'd add a flag to do_munmap().
333	 */
334	hiwater_vm = mm->hiwater_vm;
335	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
336
337	/* Tell pfnmap has moved from this vma */
338	if (unlikely(vma->vm_flags & VM_PFNMAP))
339		untrack_pfn_moved(vma);
340
341	if (do_munmap(mm, old_addr, old_len) < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342		/* OOM: unable to split vma, just get accounts right */
343		vm_unacct_memory(excess >> PAGE_SHIFT);
344		excess = 0;
345	}
 
 
 
 
 
 
346	mm->hiwater_vm = hiwater_vm;
347
348	/* Restore VM_ACCOUNT if one or two pieces of vma left */
349	if (excess) {
350		vma->vm_flags |= VM_ACCOUNT;
351		if (split)
352			vma->vm_next->vm_flags |= VM_ACCOUNT;
353	}
354
355	if (vm_flags & VM_LOCKED) {
356		mm->locked_vm += new_len >> PAGE_SHIFT;
357		*locked = true;
358	}
359
360	return new_addr;
361}
362
363static struct vm_area_struct *vma_to_resize(unsigned long addr,
364	unsigned long old_len, unsigned long new_len, unsigned long *p)
 
365{
366	struct mm_struct *mm = current->mm;
367	struct vm_area_struct *vma = find_vma(mm, addr);
368	unsigned long pgoff;
369
370	if (!vma || vma->vm_start > addr)
371		return ERR_PTR(-EFAULT);
372
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
373	if (is_vm_hugetlb_page(vma))
374		return ERR_PTR(-EINVAL);
375
376	/* We can't remap across vm area boundaries */
377	if (old_len > vma->vm_end - addr)
378		return ERR_PTR(-EFAULT);
379
380	if (new_len == old_len)
381		return vma;
382
383	/* Need to be careful about a growing mapping */
384	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
385	pgoff += vma->vm_pgoff;
386	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
387		return ERR_PTR(-EINVAL);
388
389	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
390		return ERR_PTR(-EFAULT);
391
392	if (vma->vm_flags & VM_LOCKED) {
393		unsigned long locked, lock_limit;
394		locked = mm->locked_vm << PAGE_SHIFT;
395		lock_limit = rlimit(RLIMIT_MEMLOCK);
396		locked += new_len - old_len;
397		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
398			return ERR_PTR(-EAGAIN);
399	}
400
401	if (!may_expand_vm(mm, vma->vm_flags,
402				(new_len - old_len) >> PAGE_SHIFT))
403		return ERR_PTR(-ENOMEM);
404
405	if (vma->vm_flags & VM_ACCOUNT) {
406		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
407		if (security_vm_enough_memory_mm(mm, charged))
408			return ERR_PTR(-ENOMEM);
409		*p = charged;
410	}
411
412	return vma;
413}
414
415static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
416		unsigned long new_addr, unsigned long new_len, bool *locked)
 
 
 
417{
418	struct mm_struct *mm = current->mm;
419	struct vm_area_struct *vma;
420	unsigned long ret = -EINVAL;
421	unsigned long charged = 0;
422	unsigned long map_flags;
423
424	if (offset_in_page(new_addr))
425		goto out;
426
427	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
428		goto out;
429
430	/* Ensure the old/new locations do not overlap */
431	if (addr + old_len > new_addr && new_addr + new_len > addr)
432		goto out;
433
434	ret = do_munmap(mm, new_addr, new_len);
435	if (ret)
436		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
438	if (old_len >= new_len) {
439		ret = do_munmap(mm, addr+new_len, old_len - new_len);
440		if (ret && old_len != new_len)
441			goto out;
442		old_len = new_len;
443	}
444
445	vma = vma_to_resize(addr, old_len, new_len, &charged);
446	if (IS_ERR(vma)) {
447		ret = PTR_ERR(vma);
448		goto out;
449	}
450
451	map_flags = MAP_FIXED;
 
 
 
 
 
 
 
 
 
452	if (vma->vm_flags & VM_MAYSHARE)
453		map_flags |= MAP_SHARED;
454
455	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
456				((addr - vma->vm_start) >> PAGE_SHIFT),
457				map_flags);
458	if (offset_in_page(ret))
459		goto out1;
460
461	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
 
 
 
 
 
 
462	if (!(offset_in_page(ret)))
463		goto out;
 
464out1:
465	vm_unacct_memory(charged);
466
467out:
468	return ret;
469}
470
471static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
472{
473	unsigned long end = vma->vm_end + delta;
474	if (end < vma->vm_end) /* overflow */
475		return 0;
476	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
477		return 0;
478	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
479			      0, MAP_FIXED) & ~PAGE_MASK)
480		return 0;
481	return 1;
482}
483
484/*
485 * Expand (or shrink) an existing mapping, potentially moving it at the
486 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
487 *
488 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
489 * This option implies MREMAP_MAYMOVE.
490 */
491SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
492		unsigned long, new_len, unsigned long, flags,
493		unsigned long, new_addr)
494{
495	struct mm_struct *mm = current->mm;
496	struct vm_area_struct *vma;
497	unsigned long ret = -EINVAL;
498	unsigned long charged = 0;
499	bool locked = false;
 
 
 
 
500
501	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
 
 
 
 
 
 
 
 
 
 
 
 
502		return ret;
503
504	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
505		return ret;
506
 
 
 
 
 
 
 
 
 
507	if (offset_in_page(addr))
508		return ret;
509
510	old_len = PAGE_ALIGN(old_len);
511	new_len = PAGE_ALIGN(new_len);
512
513	/*
514	 * We allow a zero old-len as a special case
515	 * for DOS-emu "duplicate shm area" thing. But
516	 * a zero new-len is nonsensical.
517	 */
518	if (!new_len)
519		return ret;
520
521	if (down_write_killable(&current->mm->mmap_sem))
522		return -EINTR;
523
524	if (flags & MREMAP_FIXED) {
525		ret = mremap_to(addr, old_len, new_addr, new_len,
526				&locked);
 
527		goto out;
528	}
529
530	/*
531	 * Always allow a shrinking remap: that just unmaps
532	 * the unnecessary pages..
533	 * do_munmap does all the needed commit accounting
 
534	 */
535	if (old_len >= new_len) {
536		ret = do_munmap(mm, addr+new_len, old_len - new_len);
537		if (ret && old_len != new_len)
 
 
 
 
538			goto out;
 
 
 
539		ret = addr;
540		goto out;
541	}
542
543	/*
544	 * Ok, we need to grow..
545	 */
546	vma = vma_to_resize(addr, old_len, new_len, &charged);
547	if (IS_ERR(vma)) {
548		ret = PTR_ERR(vma);
549		goto out;
550	}
551
552	/* old_len exactly to the end of the area..
553	 */
554	if (old_len == vma->vm_end - addr) {
555		/* can we just expand the current mapping? */
556		if (vma_expandable(vma, new_len - old_len)) {
557			int pages = (new_len - old_len) >> PAGE_SHIFT;
558
559			if (vma_adjust(vma, vma->vm_start, addr + new_len,
560				       vma->vm_pgoff, NULL)) {
561				ret = -ENOMEM;
562				goto out;
563			}
564
565			vm_stat_account(mm, vma->vm_flags, pages);
566			if (vma->vm_flags & VM_LOCKED) {
567				mm->locked_vm += pages;
568				locked = true;
569				new_addr = addr;
570			}
571			ret = addr;
572			goto out;
573		}
574	}
575
576	/*
577	 * We weren't able to just expand or shrink the area,
578	 * we need to create a new one and move it..
579	 */
580	ret = -ENOMEM;
581	if (flags & MREMAP_MAYMOVE) {
582		unsigned long map_flags = 0;
583		if (vma->vm_flags & VM_MAYSHARE)
584			map_flags |= MAP_SHARED;
585
586		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
587					vma->vm_pgoff +
588					((addr - vma->vm_start) >> PAGE_SHIFT),
589					map_flags);
590		if (offset_in_page(new_addr)) {
591			ret = new_addr;
592			goto out;
593		}
594
595		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
 
596	}
597out:
598	if (offset_in_page(ret)) {
599		vm_unacct_memory(charged);
600		locked = 0;
601	}
602	up_write(&current->mm->mmap_sem);
 
 
 
603	if (locked && new_len > old_len)
604		mm_populate(new_addr + old_len, new_len - old_len);
 
 
 
605	return ret;
606}