Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 *	mm/mremap.c
  3 *
  4 *	(C) Copyright 1996 Linus Torvalds
  5 *
  6 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  7 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/shm.h>
 13#include <linux/ksm.h>
 14#include <linux/mman.h>
 15#include <linux/swap.h>
 16#include <linux/capability.h>
 17#include <linux/fs.h>
 18#include <linux/swapops.h>
 19#include <linux/highmem.h>
 20#include <linux/security.h>
 21#include <linux/syscalls.h>
 22#include <linux/mmu_notifier.h>
 23#include <linux/sched/sysctl.h>
 
 24
 25#include <asm/uaccess.h>
 26#include <asm/cacheflush.h>
 27#include <asm/tlbflush.h>
 28
 29#include "internal.h"
 30
 31static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 32{
 33	pgd_t *pgd;
 34	pud_t *pud;
 35	pmd_t *pmd;
 36
 37	pgd = pgd_offset(mm, addr);
 38	if (pgd_none_or_clear_bad(pgd))
 39		return NULL;
 40
 41	pud = pud_offset(pgd, addr);
 42	if (pud_none_or_clear_bad(pud))
 43		return NULL;
 44
 45	pmd = pmd_offset(pud, addr);
 46	if (pmd_none(*pmd))
 47		return NULL;
 48
 49	return pmd;
 50}
 51
 52static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 53			    unsigned long addr)
 54{
 55	pgd_t *pgd;
 56	pud_t *pud;
 57	pmd_t *pmd;
 58
 59	pgd = pgd_offset(mm, addr);
 60	pud = pud_alloc(mm, pgd, addr);
 61	if (!pud)
 62		return NULL;
 63
 64	pmd = pmd_alloc(mm, pud, addr);
 65	if (!pmd)
 66		return NULL;
 67
 68	VM_BUG_ON(pmd_trans_huge(*pmd));
 69
 70	return pmd;
 71}
 72
 73static pte_t move_soft_dirty_pte(pte_t pte)
 74{
 75	/*
 76	 * Set soft dirty bit so we can notice
 77	 * in userspace the ptes were moved.
 78	 */
 79#ifdef CONFIG_MEM_SOFT_DIRTY
 80	if (pte_present(pte))
 81		pte = pte_mksoft_dirty(pte);
 82	else if (is_swap_pte(pte))
 83		pte = pte_swp_mksoft_dirty(pte);
 84	else if (pte_file(pte))
 85		pte = pte_file_mksoft_dirty(pte);
 86#endif
 87	return pte;
 88}
 89
 90static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 91		unsigned long old_addr, unsigned long old_end,
 92		struct vm_area_struct *new_vma, pmd_t *new_pmd,
 93		unsigned long new_addr, bool need_rmap_locks)
 94{
 95	struct address_space *mapping = NULL;
 96	struct anon_vma *anon_vma = NULL;
 97	struct mm_struct *mm = vma->vm_mm;
 98	pte_t *old_pte, *new_pte, pte;
 99	spinlock_t *old_ptl, *new_ptl;
100
101	/*
102	 * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
103	 * locks to ensure that rmap will always observe either the old or the
104	 * new ptes. This is the easiest way to avoid races with
105	 * truncate_pagecache(), page migration, etc...
106	 *
107	 * When need_rmap_locks is false, we use other ways to avoid
108	 * such races:
109	 *
110	 * - During exec() shift_arg_pages(), we use a specially tagged vma
111	 *   which rmap call sites look for using is_vma_temporary_stack().
112	 *
113	 * - During mremap(), new_vma is often known to be placed after vma
114	 *   in rmap traversal order. This ensures rmap will always observe
115	 *   either the old pte, or the new pte, or both (the page table locks
116	 *   serialize access to individual ptes, but only rmap traversal
117	 *   order guarantees that we won't miss both the old and new ptes).
118	 */
119	if (need_rmap_locks) {
120		if (vma->vm_file) {
121			mapping = vma->vm_file->f_mapping;
122			mutex_lock(&mapping->i_mmap_mutex);
123		}
124		if (vma->anon_vma) {
125			anon_vma = vma->anon_vma;
126			anon_vma_lock_write(anon_vma);
127		}
128	}
129
130	/*
131	 * We don't have to worry about the ordering of src and dst
132	 * pte locks because exclusive mmap_sem prevents deadlock.
133	 */
134	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
135	new_pte = pte_offset_map(new_pmd, new_addr);
136	new_ptl = pte_lockptr(mm, new_pmd);
137	if (new_ptl != old_ptl)
138		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
139	arch_enter_lazy_mmu_mode();
140
141	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
142				   new_pte++, new_addr += PAGE_SIZE) {
143		if (pte_none(*old_pte))
144			continue;
145		pte = ptep_get_and_clear(mm, old_addr, old_pte);
146		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
147		pte = move_soft_dirty_pte(pte);
148		set_pte_at(mm, new_addr, new_pte, pte);
149	}
150
151	arch_leave_lazy_mmu_mode();
152	if (new_ptl != old_ptl)
153		spin_unlock(new_ptl);
154	pte_unmap(new_pte - 1);
155	pte_unmap_unlock(old_pte - 1, old_ptl);
156	if (anon_vma)
157		anon_vma_unlock_write(anon_vma);
158	if (mapping)
159		mutex_unlock(&mapping->i_mmap_mutex);
160}
161
162#define LATENCY_LIMIT	(64 * PAGE_SIZE)
163
164unsigned long move_page_tables(struct vm_area_struct *vma,
165		unsigned long old_addr, struct vm_area_struct *new_vma,
166		unsigned long new_addr, unsigned long len,
167		bool need_rmap_locks)
168{
169	unsigned long extent, next, old_end;
170	pmd_t *old_pmd, *new_pmd;
171	bool need_flush = false;
172	unsigned long mmun_start;	/* For mmu_notifiers */
173	unsigned long mmun_end;		/* For mmu_notifiers */
174
175	old_end = old_addr + len;
176	flush_cache_range(vma, old_addr, old_end);
177
178	mmun_start = old_addr;
179	mmun_end   = old_end;
180	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
181
182	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
183		cond_resched();
184		next = (old_addr + PMD_SIZE) & PMD_MASK;
185		/* even if next overflowed, extent below will be ok */
186		extent = next - old_addr;
187		if (extent > old_end - old_addr)
188			extent = old_end - old_addr;
189		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
190		if (!old_pmd)
191			continue;
192		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
193		if (!new_pmd)
194			break;
195		if (pmd_trans_huge(*old_pmd)) {
196			int err = 0;
197			if (extent == HPAGE_PMD_SIZE) {
198				VM_BUG_ON(vma->vm_file || !vma->anon_vma);
 
 
199				/* See comment in move_ptes() */
200				if (need_rmap_locks)
201					anon_vma_lock_write(vma->anon_vma);
202				err = move_huge_pmd(vma, new_vma, old_addr,
203						    new_addr, old_end,
204						    old_pmd, new_pmd);
205				if (need_rmap_locks)
206					anon_vma_unlock_write(vma->anon_vma);
 
 
 
 
207			}
208			if (err > 0) {
209				need_flush = true;
210				continue;
211			} else if (!err) {
212				split_huge_page_pmd(vma, old_addr, old_pmd);
213			}
214			VM_BUG_ON(pmd_trans_huge(*old_pmd));
215		}
216		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
217						      new_pmd, new_addr))
218			break;
219		next = (new_addr + PMD_SIZE) & PMD_MASK;
220		if (extent > next - new_addr)
221			extent = next - new_addr;
222		if (extent > LATENCY_LIMIT)
223			extent = LATENCY_LIMIT;
224		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
225			  new_vma, new_pmd, new_addr, need_rmap_locks);
226		need_flush = true;
227	}
228	if (likely(need_flush))
229		flush_tlb_range(vma, old_end-len, old_addr);
230
231	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
232
233	return len + old_addr - old_end;	/* how much done */
234}
235
236static unsigned long move_vma(struct vm_area_struct *vma,
237		unsigned long old_addr, unsigned long old_len,
238		unsigned long new_len, unsigned long new_addr, bool *locked)
239{
240	struct mm_struct *mm = vma->vm_mm;
241	struct vm_area_struct *new_vma;
242	unsigned long vm_flags = vma->vm_flags;
243	unsigned long new_pgoff;
244	unsigned long moved_len;
245	unsigned long excess = 0;
246	unsigned long hiwater_vm;
247	int split = 0;
248	int err;
249	bool need_rmap_locks;
250
251	/*
252	 * We'd prefer to avoid failure later on in do_munmap:
253	 * which may split one vma into three before unmapping.
254	 */
255	if (mm->map_count >= sysctl_max_map_count - 3)
256		return -ENOMEM;
257
258	/*
259	 * Advise KSM to break any KSM pages in the area to be moved:
260	 * it would be confusing if they were to turn up at the new
261	 * location, where they happen to coincide with different KSM
262	 * pages recently unmapped.  But leave vma->vm_flags as it was,
263	 * so KSM can come around to merge on vma and new_vma afterwards.
264	 */
265	err = ksm_madvise(vma, old_addr, old_addr + old_len,
266						MADV_UNMERGEABLE, &vm_flags);
267	if (err)
268		return err;
269
270	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
271	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
272			   &need_rmap_locks);
273	if (!new_vma)
274		return -ENOMEM;
275
276	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
277				     need_rmap_locks);
278	if (moved_len < old_len) {
 
 
 
 
 
 
279		/*
280		 * On error, move entries back from new area to old,
281		 * which will succeed since page tables still there,
282		 * and then proceed to unmap new area instead of old.
283		 */
284		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
285				 true);
286		vma = new_vma;
287		old_len = new_len;
288		old_addr = new_addr;
289		new_addr = -ENOMEM;
 
 
 
290	}
291
292	/* Conceal VM_ACCOUNT so old reservation is not undone */
293	if (vm_flags & VM_ACCOUNT) {
294		vma->vm_flags &= ~VM_ACCOUNT;
295		excess = vma->vm_end - vma->vm_start - old_len;
296		if (old_addr > vma->vm_start &&
297		    old_addr + old_len < vma->vm_end)
298			split = 1;
299	}
300
301	/*
302	 * If we failed to move page tables we still do total_vm increment
303	 * since do_munmap() will decrement it by old_len == new_len.
304	 *
305	 * Since total_vm is about to be raised artificially high for a
306	 * moment, we need to restore high watermark afterwards: if stats
307	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
308	 * If this were a serious issue, we'd add a flag to do_munmap().
309	 */
310	hiwater_vm = mm->hiwater_vm;
311	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
 
 
 
 
312
313	if (do_munmap(mm, old_addr, old_len) < 0) {
314		/* OOM: unable to split vma, just get accounts right */
315		vm_unacct_memory(excess >> PAGE_SHIFT);
316		excess = 0;
317	}
318	mm->hiwater_vm = hiwater_vm;
319
320	/* Restore VM_ACCOUNT if one or two pieces of vma left */
321	if (excess) {
322		vma->vm_flags |= VM_ACCOUNT;
323		if (split)
324			vma->vm_next->vm_flags |= VM_ACCOUNT;
325	}
326
327	if (vm_flags & VM_LOCKED) {
328		mm->locked_vm += new_len >> PAGE_SHIFT;
329		*locked = true;
330	}
331
332	return new_addr;
333}
334
335static struct vm_area_struct *vma_to_resize(unsigned long addr,
336	unsigned long old_len, unsigned long new_len, unsigned long *p)
337{
338	struct mm_struct *mm = current->mm;
339	struct vm_area_struct *vma = find_vma(mm, addr);
 
340
341	if (!vma || vma->vm_start > addr)
342		goto Efault;
343
344	if (is_vm_hugetlb_page(vma))
345		goto Einval;
346
347	/* We can't remap across vm area boundaries */
348	if (old_len > vma->vm_end - addr)
349		goto Efault;
 
 
 
350
351	/* Need to be careful about a growing mapping */
352	if (new_len > old_len) {
353		unsigned long pgoff;
 
 
354
355		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
356			goto Efault;
357		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
358		pgoff += vma->vm_pgoff;
359		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
360			goto Einval;
361	}
362
363	if (vma->vm_flags & VM_LOCKED) {
364		unsigned long locked, lock_limit;
365		locked = mm->locked_vm << PAGE_SHIFT;
366		lock_limit = rlimit(RLIMIT_MEMLOCK);
367		locked += new_len - old_len;
368		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
369			goto Eagain;
370	}
371
372	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
373		goto Enomem;
 
374
375	if (vma->vm_flags & VM_ACCOUNT) {
376		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
377		if (security_vm_enough_memory_mm(mm, charged))
378			goto Efault;
379		*p = charged;
380	}
381
382	return vma;
383
384Efault:	/* very odd choice for most of the cases, but... */
385	return ERR_PTR(-EFAULT);
386Einval:
387	return ERR_PTR(-EINVAL);
388Enomem:
389	return ERR_PTR(-ENOMEM);
390Eagain:
391	return ERR_PTR(-EAGAIN);
392}
393
394static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
395		unsigned long new_addr, unsigned long new_len, bool *locked)
396{
397	struct mm_struct *mm = current->mm;
398	struct vm_area_struct *vma;
399	unsigned long ret = -EINVAL;
400	unsigned long charged = 0;
401	unsigned long map_flags;
402
403	if (new_addr & ~PAGE_MASK)
404		goto out;
405
406	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
407		goto out;
408
409	/* Check if the location we're moving into overlaps the
410	 * old location at all, and fail if it does.
411	 */
412	if ((new_addr <= addr) && (new_addr+new_len) > addr)
413		goto out;
414
415	if ((addr <= new_addr) && (addr+old_len) > new_addr)
416		goto out;
417
418	ret = do_munmap(mm, new_addr, new_len);
419	if (ret)
420		goto out;
421
422	if (old_len >= new_len) {
423		ret = do_munmap(mm, addr+new_len, old_len - new_len);
424		if (ret && old_len != new_len)
425			goto out;
426		old_len = new_len;
427	}
428
429	vma = vma_to_resize(addr, old_len, new_len, &charged);
430	if (IS_ERR(vma)) {
431		ret = PTR_ERR(vma);
432		goto out;
433	}
434
435	map_flags = MAP_FIXED;
436	if (vma->vm_flags & VM_MAYSHARE)
437		map_flags |= MAP_SHARED;
438
439	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
440				((addr - vma->vm_start) >> PAGE_SHIFT),
441				map_flags);
442	if (ret & ~PAGE_MASK)
443		goto out1;
444
445	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
446	if (!(ret & ~PAGE_MASK))
447		goto out;
448out1:
449	vm_unacct_memory(charged);
450
451out:
452	return ret;
453}
454
455static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
456{
457	unsigned long end = vma->vm_end + delta;
458	if (end < vma->vm_end) /* overflow */
459		return 0;
460	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
461		return 0;
462	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
463			      0, MAP_FIXED) & ~PAGE_MASK)
464		return 0;
465	return 1;
466}
467
468/*
469 * Expand (or shrink) an existing mapping, potentially moving it at the
470 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
471 *
472 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
473 * This option implies MREMAP_MAYMOVE.
474 */
475SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
476		unsigned long, new_len, unsigned long, flags,
477		unsigned long, new_addr)
478{
479	struct mm_struct *mm = current->mm;
480	struct vm_area_struct *vma;
481	unsigned long ret = -EINVAL;
482	unsigned long charged = 0;
483	bool locked = false;
484
485	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
486		return ret;
487
488	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
489		return ret;
490
491	if (addr & ~PAGE_MASK)
492		return ret;
493
494	old_len = PAGE_ALIGN(old_len);
495	new_len = PAGE_ALIGN(new_len);
496
497	/*
498	 * We allow a zero old-len as a special case
499	 * for DOS-emu "duplicate shm area" thing. But
500	 * a zero new-len is nonsensical.
501	 */
502	if (!new_len)
503		return ret;
504
505	down_write(&current->mm->mmap_sem);
506
507	if (flags & MREMAP_FIXED) {
508		ret = mremap_to(addr, old_len, new_addr, new_len,
509				&locked);
510		goto out;
511	}
512
513	/*
514	 * Always allow a shrinking remap: that just unmaps
515	 * the unnecessary pages..
516	 * do_munmap does all the needed commit accounting
517	 */
518	if (old_len >= new_len) {
519		ret = do_munmap(mm, addr+new_len, old_len - new_len);
520		if (ret && old_len != new_len)
521			goto out;
522		ret = addr;
523		goto out;
524	}
525
526	/*
527	 * Ok, we need to grow..
528	 */
529	vma = vma_to_resize(addr, old_len, new_len, &charged);
530	if (IS_ERR(vma)) {
531		ret = PTR_ERR(vma);
532		goto out;
533	}
534
535	/* old_len exactly to the end of the area..
536	 */
537	if (old_len == vma->vm_end - addr) {
538		/* can we just expand the current mapping? */
539		if (vma_expandable(vma, new_len - old_len)) {
540			int pages = (new_len - old_len) >> PAGE_SHIFT;
541
542			if (vma_adjust(vma, vma->vm_start, addr + new_len,
543				       vma->vm_pgoff, NULL)) {
544				ret = -ENOMEM;
545				goto out;
546			}
547
548			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
549			if (vma->vm_flags & VM_LOCKED) {
550				mm->locked_vm += pages;
551				locked = true;
552				new_addr = addr;
553			}
554			ret = addr;
555			goto out;
556		}
557	}
558
559	/*
560	 * We weren't able to just expand or shrink the area,
561	 * we need to create a new one and move it..
562	 */
563	ret = -ENOMEM;
564	if (flags & MREMAP_MAYMOVE) {
565		unsigned long map_flags = 0;
566		if (vma->vm_flags & VM_MAYSHARE)
567			map_flags |= MAP_SHARED;
568
569		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
570					vma->vm_pgoff +
571					((addr - vma->vm_start) >> PAGE_SHIFT),
572					map_flags);
573		if (new_addr & ~PAGE_MASK) {
574			ret = new_addr;
575			goto out;
576		}
577
578		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
579	}
580out:
581	if (ret & ~PAGE_MASK)
582		vm_unacct_memory(charged);
 
 
583	up_write(&current->mm->mmap_sem);
584	if (locked && new_len > old_len)
585		mm_populate(new_addr + old_len, new_len - old_len);
586	return ret;
587}
v4.6
  1/*
  2 *	mm/mremap.c
  3 *
  4 *	(C) Copyright 1996 Linus Torvalds
  5 *
  6 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  7 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
  8 */
  9
 10#include <linux/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/shm.h>
 13#include <linux/ksm.h>
 14#include <linux/mman.h>
 15#include <linux/swap.h>
 16#include <linux/capability.h>
 17#include <linux/fs.h>
 18#include <linux/swapops.h>
 19#include <linux/highmem.h>
 20#include <linux/security.h>
 21#include <linux/syscalls.h>
 22#include <linux/mmu_notifier.h>
 23#include <linux/uaccess.h>
 24#include <linux/mm-arch-hooks.h>
 25
 
 26#include <asm/cacheflush.h>
 27#include <asm/tlbflush.h>
 28
 29#include "internal.h"
 30
 31static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 32{
 33	pgd_t *pgd;
 34	pud_t *pud;
 35	pmd_t *pmd;
 36
 37	pgd = pgd_offset(mm, addr);
 38	if (pgd_none_or_clear_bad(pgd))
 39		return NULL;
 40
 41	pud = pud_offset(pgd, addr);
 42	if (pud_none_or_clear_bad(pud))
 43		return NULL;
 44
 45	pmd = pmd_offset(pud, addr);
 46	if (pmd_none(*pmd))
 47		return NULL;
 48
 49	return pmd;
 50}
 51
 52static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 53			    unsigned long addr)
 54{
 55	pgd_t *pgd;
 56	pud_t *pud;
 57	pmd_t *pmd;
 58
 59	pgd = pgd_offset(mm, addr);
 60	pud = pud_alloc(mm, pgd, addr);
 61	if (!pud)
 62		return NULL;
 63
 64	pmd = pmd_alloc(mm, pud, addr);
 65	if (!pmd)
 66		return NULL;
 67
 68	VM_BUG_ON(pmd_trans_huge(*pmd));
 69
 70	return pmd;
 71}
 72
 73static pte_t move_soft_dirty_pte(pte_t pte)
 74{
 75	/*
 76	 * Set soft dirty bit so we can notice
 77	 * in userspace the ptes were moved.
 78	 */
 79#ifdef CONFIG_MEM_SOFT_DIRTY
 80	if (pte_present(pte))
 81		pte = pte_mksoft_dirty(pte);
 82	else if (is_swap_pte(pte))
 83		pte = pte_swp_mksoft_dirty(pte);
 
 
 84#endif
 85	return pte;
 86}
 87
 88static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 89		unsigned long old_addr, unsigned long old_end,
 90		struct vm_area_struct *new_vma, pmd_t *new_pmd,
 91		unsigned long new_addr, bool need_rmap_locks)
 92{
 93	struct address_space *mapping = NULL;
 94	struct anon_vma *anon_vma = NULL;
 95	struct mm_struct *mm = vma->vm_mm;
 96	pte_t *old_pte, *new_pte, pte;
 97	spinlock_t *old_ptl, *new_ptl;
 98
 99	/*
100	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
101	 * locks to ensure that rmap will always observe either the old or the
102	 * new ptes. This is the easiest way to avoid races with
103	 * truncate_pagecache(), page migration, etc...
104	 *
105	 * When need_rmap_locks is false, we use other ways to avoid
106	 * such races:
107	 *
108	 * - During exec() shift_arg_pages(), we use a specially tagged vma
109	 *   which rmap call sites look for using is_vma_temporary_stack().
110	 *
111	 * - During mremap(), new_vma is often known to be placed after vma
112	 *   in rmap traversal order. This ensures rmap will always observe
113	 *   either the old pte, or the new pte, or both (the page table locks
114	 *   serialize access to individual ptes, but only rmap traversal
115	 *   order guarantees that we won't miss both the old and new ptes).
116	 */
117	if (need_rmap_locks) {
118		if (vma->vm_file) {
119			mapping = vma->vm_file->f_mapping;
120			i_mmap_lock_write(mapping);
121		}
122		if (vma->anon_vma) {
123			anon_vma = vma->anon_vma;
124			anon_vma_lock_write(anon_vma);
125		}
126	}
127
128	/*
129	 * We don't have to worry about the ordering of src and dst
130	 * pte locks because exclusive mmap_sem prevents deadlock.
131	 */
132	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
133	new_pte = pte_offset_map(new_pmd, new_addr);
134	new_ptl = pte_lockptr(mm, new_pmd);
135	if (new_ptl != old_ptl)
136		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
137	arch_enter_lazy_mmu_mode();
138
139	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
140				   new_pte++, new_addr += PAGE_SIZE) {
141		if (pte_none(*old_pte))
142			continue;
143		pte = ptep_get_and_clear(mm, old_addr, old_pte);
144		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
145		pte = move_soft_dirty_pte(pte);
146		set_pte_at(mm, new_addr, new_pte, pte);
147	}
148
149	arch_leave_lazy_mmu_mode();
150	if (new_ptl != old_ptl)
151		spin_unlock(new_ptl);
152	pte_unmap(new_pte - 1);
153	pte_unmap_unlock(old_pte - 1, old_ptl);
154	if (anon_vma)
155		anon_vma_unlock_write(anon_vma);
156	if (mapping)
157		i_mmap_unlock_write(mapping);
158}
159
160#define LATENCY_LIMIT	(64 * PAGE_SIZE)
161
162unsigned long move_page_tables(struct vm_area_struct *vma,
163		unsigned long old_addr, struct vm_area_struct *new_vma,
164		unsigned long new_addr, unsigned long len,
165		bool need_rmap_locks)
166{
167	unsigned long extent, next, old_end;
168	pmd_t *old_pmd, *new_pmd;
169	bool need_flush = false;
170	unsigned long mmun_start;	/* For mmu_notifiers */
171	unsigned long mmun_end;		/* For mmu_notifiers */
172
173	old_end = old_addr + len;
174	flush_cache_range(vma, old_addr, old_end);
175
176	mmun_start = old_addr;
177	mmun_end   = old_end;
178	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
179
180	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
181		cond_resched();
182		next = (old_addr + PMD_SIZE) & PMD_MASK;
183		/* even if next overflowed, extent below will be ok */
184		extent = next - old_addr;
185		if (extent > old_end - old_addr)
186			extent = old_end - old_addr;
187		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
188		if (!old_pmd)
189			continue;
190		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
191		if (!new_pmd)
192			break;
193		if (pmd_trans_huge(*old_pmd)) {
 
194			if (extent == HPAGE_PMD_SIZE) {
195				bool moved;
196				VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
197					      vma);
198				/* See comment in move_ptes() */
199				if (need_rmap_locks)
200					anon_vma_lock_write(vma->anon_vma);
201				moved = move_huge_pmd(vma, new_vma, old_addr,
202						    new_addr, old_end,
203						    old_pmd, new_pmd);
204				if (need_rmap_locks)
205					anon_vma_unlock_write(vma->anon_vma);
206				if (moved) {
207					need_flush = true;
208					continue;
209				}
210			}
211			split_huge_pmd(vma, old_pmd, old_addr);
212			if (pmd_none(*old_pmd))
213				continue;
 
 
 
214			VM_BUG_ON(pmd_trans_huge(*old_pmd));
215		}
216		if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
 
217			break;
218		next = (new_addr + PMD_SIZE) & PMD_MASK;
219		if (extent > next - new_addr)
220			extent = next - new_addr;
221		if (extent > LATENCY_LIMIT)
222			extent = LATENCY_LIMIT;
223		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
224			  new_vma, new_pmd, new_addr, need_rmap_locks);
225		need_flush = true;
226	}
227	if (likely(need_flush))
228		flush_tlb_range(vma, old_end-len, old_addr);
229
230	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
231
232	return len + old_addr - old_end;	/* how much done */
233}
234
235static unsigned long move_vma(struct vm_area_struct *vma,
236		unsigned long old_addr, unsigned long old_len,
237		unsigned long new_len, unsigned long new_addr, bool *locked)
238{
239	struct mm_struct *mm = vma->vm_mm;
240	struct vm_area_struct *new_vma;
241	unsigned long vm_flags = vma->vm_flags;
242	unsigned long new_pgoff;
243	unsigned long moved_len;
244	unsigned long excess = 0;
245	unsigned long hiwater_vm;
246	int split = 0;
247	int err;
248	bool need_rmap_locks;
249
250	/*
251	 * We'd prefer to avoid failure later on in do_munmap:
252	 * which may split one vma into three before unmapping.
253	 */
254	if (mm->map_count >= sysctl_max_map_count - 3)
255		return -ENOMEM;
256
257	/*
258	 * Advise KSM to break any KSM pages in the area to be moved:
259	 * it would be confusing if they were to turn up at the new
260	 * location, where they happen to coincide with different KSM
261	 * pages recently unmapped.  But leave vma->vm_flags as it was,
262	 * so KSM can come around to merge on vma and new_vma afterwards.
263	 */
264	err = ksm_madvise(vma, old_addr, old_addr + old_len,
265						MADV_UNMERGEABLE, &vm_flags);
266	if (err)
267		return err;
268
269	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
270	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
271			   &need_rmap_locks);
272	if (!new_vma)
273		return -ENOMEM;
274
275	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
276				     need_rmap_locks);
277	if (moved_len < old_len) {
278		err = -ENOMEM;
279	} else if (vma->vm_ops && vma->vm_ops->mremap) {
280		err = vma->vm_ops->mremap(new_vma);
281	}
282
283	if (unlikely(err)) {
284		/*
285		 * On error, move entries back from new area to old,
286		 * which will succeed since page tables still there,
287		 * and then proceed to unmap new area instead of old.
288		 */
289		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
290				 true);
291		vma = new_vma;
292		old_len = new_len;
293		old_addr = new_addr;
294		new_addr = err;
295	} else {
296		arch_remap(mm, old_addr, old_addr + old_len,
297			   new_addr, new_addr + new_len);
298	}
299
300	/* Conceal VM_ACCOUNT so old reservation is not undone */
301	if (vm_flags & VM_ACCOUNT) {
302		vma->vm_flags &= ~VM_ACCOUNT;
303		excess = vma->vm_end - vma->vm_start - old_len;
304		if (old_addr > vma->vm_start &&
305		    old_addr + old_len < vma->vm_end)
306			split = 1;
307	}
308
309	/*
310	 * If we failed to move page tables we still do total_vm increment
311	 * since do_munmap() will decrement it by old_len == new_len.
312	 *
313	 * Since total_vm is about to be raised artificially high for a
314	 * moment, we need to restore high watermark afterwards: if stats
315	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
316	 * If this were a serious issue, we'd add a flag to do_munmap().
317	 */
318	hiwater_vm = mm->hiwater_vm;
319	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
320
321	/* Tell pfnmap has moved from this vma */
322	if (unlikely(vma->vm_flags & VM_PFNMAP))
323		untrack_pfn_moved(vma);
324
325	if (do_munmap(mm, old_addr, old_len) < 0) {
326		/* OOM: unable to split vma, just get accounts right */
327		vm_unacct_memory(excess >> PAGE_SHIFT);
328		excess = 0;
329	}
330	mm->hiwater_vm = hiwater_vm;
331
332	/* Restore VM_ACCOUNT if one or two pieces of vma left */
333	if (excess) {
334		vma->vm_flags |= VM_ACCOUNT;
335		if (split)
336			vma->vm_next->vm_flags |= VM_ACCOUNT;
337	}
338
339	if (vm_flags & VM_LOCKED) {
340		mm->locked_vm += new_len >> PAGE_SHIFT;
341		*locked = true;
342	}
343
344	return new_addr;
345}
346
347static struct vm_area_struct *vma_to_resize(unsigned long addr,
348	unsigned long old_len, unsigned long new_len, unsigned long *p)
349{
350	struct mm_struct *mm = current->mm;
351	struct vm_area_struct *vma = find_vma(mm, addr);
352	unsigned long pgoff;
353
354	if (!vma || vma->vm_start > addr)
355		return ERR_PTR(-EFAULT);
356
357	if (is_vm_hugetlb_page(vma))
358		return ERR_PTR(-EINVAL);
359
360	/* We can't remap across vm area boundaries */
361	if (old_len > vma->vm_end - addr)
362		return ERR_PTR(-EFAULT);
363
364	if (new_len == old_len)
365		return vma;
366
367	/* Need to be careful about a growing mapping */
368	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
369	pgoff += vma->vm_pgoff;
370	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
371		return ERR_PTR(-EINVAL);
372
373	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
374		return ERR_PTR(-EFAULT);
 
 
 
 
 
375
376	if (vma->vm_flags & VM_LOCKED) {
377		unsigned long locked, lock_limit;
378		locked = mm->locked_vm << PAGE_SHIFT;
379		lock_limit = rlimit(RLIMIT_MEMLOCK);
380		locked += new_len - old_len;
381		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
382			return ERR_PTR(-EAGAIN);
383	}
384
385	if (!may_expand_vm(mm, vma->vm_flags,
386				(new_len - old_len) >> PAGE_SHIFT))
387		return ERR_PTR(-ENOMEM);
388
389	if (vma->vm_flags & VM_ACCOUNT) {
390		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
391		if (security_vm_enough_memory_mm(mm, charged))
392			return ERR_PTR(-ENOMEM);
393		*p = charged;
394	}
395
396	return vma;
 
 
 
 
 
 
 
 
 
397}
398
399static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
400		unsigned long new_addr, unsigned long new_len, bool *locked)
401{
402	struct mm_struct *mm = current->mm;
403	struct vm_area_struct *vma;
404	unsigned long ret = -EINVAL;
405	unsigned long charged = 0;
406	unsigned long map_flags;
407
408	if (offset_in_page(new_addr))
409		goto out;
410
411	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
412		goto out;
413
414	/* Ensure the old/new locations do not overlap */
415	if (addr + old_len > new_addr && new_addr + new_len > addr)
 
 
 
 
 
416		goto out;
417
418	ret = do_munmap(mm, new_addr, new_len);
419	if (ret)
420		goto out;
421
422	if (old_len >= new_len) {
423		ret = do_munmap(mm, addr+new_len, old_len - new_len);
424		if (ret && old_len != new_len)
425			goto out;
426		old_len = new_len;
427	}
428
429	vma = vma_to_resize(addr, old_len, new_len, &charged);
430	if (IS_ERR(vma)) {
431		ret = PTR_ERR(vma);
432		goto out;
433	}
434
435	map_flags = MAP_FIXED;
436	if (vma->vm_flags & VM_MAYSHARE)
437		map_flags |= MAP_SHARED;
438
439	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
440				((addr - vma->vm_start) >> PAGE_SHIFT),
441				map_flags);
442	if (offset_in_page(ret))
443		goto out1;
444
445	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
446	if (!(offset_in_page(ret)))
447		goto out;
448out1:
449	vm_unacct_memory(charged);
450
451out:
452	return ret;
453}
454
455static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
456{
457	unsigned long end = vma->vm_end + delta;
458	if (end < vma->vm_end) /* overflow */
459		return 0;
460	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
461		return 0;
462	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
463			      0, MAP_FIXED) & ~PAGE_MASK)
464		return 0;
465	return 1;
466}
467
468/*
469 * Expand (or shrink) an existing mapping, potentially moving it at the
470 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
471 *
472 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
473 * This option implies MREMAP_MAYMOVE.
474 */
475SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
476		unsigned long, new_len, unsigned long, flags,
477		unsigned long, new_addr)
478{
479	struct mm_struct *mm = current->mm;
480	struct vm_area_struct *vma;
481	unsigned long ret = -EINVAL;
482	unsigned long charged = 0;
483	bool locked = false;
484
485	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
486		return ret;
487
488	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
489		return ret;
490
491	if (offset_in_page(addr))
492		return ret;
493
494	old_len = PAGE_ALIGN(old_len);
495	new_len = PAGE_ALIGN(new_len);
496
497	/*
498	 * We allow a zero old-len as a special case
499	 * for DOS-emu "duplicate shm area" thing. But
500	 * a zero new-len is nonsensical.
501	 */
502	if (!new_len)
503		return ret;
504
505	down_write(&current->mm->mmap_sem);
506
507	if (flags & MREMAP_FIXED) {
508		ret = mremap_to(addr, old_len, new_addr, new_len,
509				&locked);
510		goto out;
511	}
512
513	/*
514	 * Always allow a shrinking remap: that just unmaps
515	 * the unnecessary pages..
516	 * do_munmap does all the needed commit accounting
517	 */
518	if (old_len >= new_len) {
519		ret = do_munmap(mm, addr+new_len, old_len - new_len);
520		if (ret && old_len != new_len)
521			goto out;
522		ret = addr;
523		goto out;
524	}
525
526	/*
527	 * Ok, we need to grow..
528	 */
529	vma = vma_to_resize(addr, old_len, new_len, &charged);
530	if (IS_ERR(vma)) {
531		ret = PTR_ERR(vma);
532		goto out;
533	}
534
535	/* old_len exactly to the end of the area..
536	 */
537	if (old_len == vma->vm_end - addr) {
538		/* can we just expand the current mapping? */
539		if (vma_expandable(vma, new_len - old_len)) {
540			int pages = (new_len - old_len) >> PAGE_SHIFT;
541
542			if (vma_adjust(vma, vma->vm_start, addr + new_len,
543				       vma->vm_pgoff, NULL)) {
544				ret = -ENOMEM;
545				goto out;
546			}
547
548			vm_stat_account(mm, vma->vm_flags, pages);
549			if (vma->vm_flags & VM_LOCKED) {
550				mm->locked_vm += pages;
551				locked = true;
552				new_addr = addr;
553			}
554			ret = addr;
555			goto out;
556		}
557	}
558
559	/*
560	 * We weren't able to just expand or shrink the area,
561	 * we need to create a new one and move it..
562	 */
563	ret = -ENOMEM;
564	if (flags & MREMAP_MAYMOVE) {
565		unsigned long map_flags = 0;
566		if (vma->vm_flags & VM_MAYSHARE)
567			map_flags |= MAP_SHARED;
568
569		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
570					vma->vm_pgoff +
571					((addr - vma->vm_start) >> PAGE_SHIFT),
572					map_flags);
573		if (offset_in_page(new_addr)) {
574			ret = new_addr;
575			goto out;
576		}
577
578		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
579	}
580out:
581	if (offset_in_page(ret)) {
582		vm_unacct_memory(charged);
583		locked = 0;
584	}
585	up_write(&current->mm->mmap_sem);
586	if (locked && new_len > old_len)
587		mm_populate(new_addr + old_len, new_len - old_len);
588	return ret;
589}