Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *  linux/mm/nommu.c
   3 *
   4 *  Replacement code for mm functions to support CPU's that don't
   5 *  have any form of memory management unit (thus no virtual memory).
   6 *
   7 *  See Documentation/nommu-mmap.txt
   8 *
   9 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  10 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  13 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  14 */
  15
  16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17
  18#include <linux/export.h>
  19#include <linux/mm.h>
  20#include <linux/vmacache.h>
  21#include <linux/mman.h>
  22#include <linux/swap.h>
  23#include <linux/file.h>
  24#include <linux/highmem.h>
  25#include <linux/pagemap.h>
  26#include <linux/slab.h>
  27#include <linux/vmalloc.h>
  28#include <linux/blkdev.h>
  29#include <linux/backing-dev.h>
  30#include <linux/compiler.h>
  31#include <linux/mount.h>
  32#include <linux/personality.h>
  33#include <linux/security.h>
  34#include <linux/syscalls.h>
  35#include <linux/audit.h>
  36#include <linux/printk.h>
  37
  38#include <asm/uaccess.h>
 
  39#include <asm/tlb.h>
  40#include <asm/tlbflush.h>
  41#include <asm/mmu_context.h>
  42#include "internal.h"
  43
  44void *high_memory;
  45EXPORT_SYMBOL(high_memory);
  46struct page *mem_map;
  47unsigned long max_mapnr;
  48EXPORT_SYMBOL(max_mapnr);
  49unsigned long highest_memmap_pfn;
  50int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  51int heap_stack_gap = 0;
  52
  53atomic_long_t mmap_pages_allocated;
  54
  55EXPORT_SYMBOL(mem_map);
  56
  57/* list of mapped, potentially shareable regions */
  58static struct kmem_cache *vm_region_jar;
  59struct rb_root nommu_region_tree = RB_ROOT;
  60DECLARE_RWSEM(nommu_region_sem);
  61
  62const struct vm_operations_struct generic_file_vm_ops = {
  63};
  64
  65/*
  66 * Return the total memory allocated for this pointer, not
  67 * just what the caller asked for.
  68 *
  69 * Doesn't have to be accurate, i.e. may have races.
  70 */
  71unsigned int kobjsize(const void *objp)
  72{
  73	struct page *page;
  74
  75	/*
  76	 * If the object we have should not have ksize performed on it,
  77	 * return size of 0
  78	 */
  79	if (!objp || !virt_addr_valid(objp))
  80		return 0;
  81
  82	page = virt_to_head_page(objp);
  83
  84	/*
  85	 * If the allocator sets PageSlab, we know the pointer came from
  86	 * kmalloc().
  87	 */
  88	if (PageSlab(page))
  89		return ksize(objp);
  90
  91	/*
  92	 * If it's not a compound page, see if we have a matching VMA
  93	 * region. This test is intentionally done in reverse order,
  94	 * so if there's no VMA, we still fall through and hand back
  95	 * PAGE_SIZE for 0-order pages.
  96	 */
  97	if (!PageCompound(page)) {
  98		struct vm_area_struct *vma;
  99
 100		vma = find_vma(current->mm, (unsigned long)objp);
 101		if (vma)
 102			return vma->vm_end - vma->vm_start;
 103	}
 104
 105	/*
 106	 * The ksize() function is only guaranteed to work for pointers
 107	 * returned by kmalloc(). So handle arbitrary pointers here.
 108	 */
 109	return PAGE_SIZE << compound_order(page);
 110}
 111
 112long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 113		      unsigned long start, unsigned long nr_pages,
 114		      unsigned int foll_flags, struct page **pages,
 115		      struct vm_area_struct **vmas, int *nonblocking)
 116{
 117	struct vm_area_struct *vma;
 118	unsigned long vm_flags;
 119	int i;
 120
 121	/* calculate required read or write permissions.
 122	 * If FOLL_FORCE is set, we only require the "MAY" flags.
 123	 */
 124	vm_flags  = (foll_flags & FOLL_WRITE) ?
 125			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 126	vm_flags &= (foll_flags & FOLL_FORCE) ?
 127			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 128
 129	for (i = 0; i < nr_pages; i++) {
 130		vma = find_vma(mm, start);
 131		if (!vma)
 132			goto finish_or_fault;
 133
 134		/* protect what we can, including chardevs */
 135		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 136		    !(vm_flags & vma->vm_flags))
 137			goto finish_or_fault;
 138
 139		if (pages) {
 140			pages[i] = virt_to_page(start);
 141			if (pages[i])
 142				get_page(pages[i]);
 143		}
 144		if (vmas)
 145			vmas[i] = vma;
 146		start = (start + PAGE_SIZE) & PAGE_MASK;
 147	}
 148
 149	return i;
 150
 151finish_or_fault:
 152	return i ? : -EFAULT;
 153}
 154
 155/*
 156 * get a list of pages in an address range belonging to the specified process
 157 * and indicate the VMA that covers each page
 158 * - this is potentially dodgy as we may end incrementing the page count of a
 159 *   slab page or a secondary page from a compound page
 160 * - don't permit access to VMAs that don't support it, such as I/O mappings
 161 */
 162long get_user_pages(unsigned long start, unsigned long nr_pages,
 163		    int write, int force, struct page **pages,
 164		    struct vm_area_struct **vmas)
 165{
 166	int flags = 0;
 167
 168	if (write)
 169		flags |= FOLL_WRITE;
 170	if (force)
 171		flags |= FOLL_FORCE;
 172
 173	return __get_user_pages(current, current->mm, start, nr_pages, flags,
 174				pages, vmas, NULL);
 175}
 176EXPORT_SYMBOL(get_user_pages);
 177
 178long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
 179			    int write, int force, struct page **pages,
 180			    int *locked)
 181{
 182	return get_user_pages(start, nr_pages, write, force, pages, NULL);
 183}
 184EXPORT_SYMBOL(get_user_pages_locked);
 185
 186long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
 187			       unsigned long start, unsigned long nr_pages,
 188			       int write, int force, struct page **pages,
 189			       unsigned int gup_flags)
 190{
 191	long ret;
 192	down_read(&mm->mmap_sem);
 193	ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
 194				NULL, NULL);
 195	up_read(&mm->mmap_sem);
 196	return ret;
 197}
 198EXPORT_SYMBOL(__get_user_pages_unlocked);
 199
 200long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
 201			     int write, int force, struct page **pages)
 202{
 203	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
 204					 write, force, pages, 0);
 
 
 
 205}
 206EXPORT_SYMBOL(get_user_pages_unlocked);
 207
 208/**
 209 * follow_pfn - look up PFN at a user virtual address
 210 * @vma: memory mapping
 211 * @address: user virtual address
 212 * @pfn: location to store found PFN
 213 *
 214 * Only IO mappings and raw PFN mappings are allowed.
 215 *
 216 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 217 */
 218int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 219	unsigned long *pfn)
 220{
 221	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 222		return -EINVAL;
 223
 224	*pfn = address >> PAGE_SHIFT;
 225	return 0;
 226}
 227EXPORT_SYMBOL(follow_pfn);
 228
 229LIST_HEAD(vmap_area_list);
 230
 231void vfree(const void *addr)
 
 
 
 232{
 233	kfree(addr);
 234}
 235EXPORT_SYMBOL(vfree);
 236
 237void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 
 238{
 239	/*
 240	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 241	 * returns only a logical address.
 242	 */
 243	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 244}
 245EXPORT_SYMBOL(__vmalloc);
 246
 247void *vmalloc_user(unsigned long size)
 248{
 249	void *ret;
 250
 251	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 252			PAGE_KERNEL);
 253	if (ret) {
 254		struct vm_area_struct *vma;
 255
 256		down_write(&current->mm->mmap_sem);
 257		vma = find_vma(current->mm, (unsigned long)ret);
 258		if (vma)
 259			vma->vm_flags |= VM_USERMAP;
 260		up_write(&current->mm->mmap_sem);
 261	}
 262
 263	return ret;
 264}
 265EXPORT_SYMBOL(vmalloc_user);
 
 
 
 
 
 266
 267struct page *vmalloc_to_page(const void *addr)
 268{
 269	return virt_to_page(addr);
 270}
 271EXPORT_SYMBOL(vmalloc_to_page);
 272
 273unsigned long vmalloc_to_pfn(const void *addr)
 274{
 275	return page_to_pfn(virt_to_page(addr));
 276}
 277EXPORT_SYMBOL(vmalloc_to_pfn);
 278
 279long vread(char *buf, char *addr, unsigned long count)
 280{
 281	/* Don't allow overflow */
 282	if ((unsigned long) buf + count < count)
 283		count = -(unsigned long) buf;
 284
 285	memcpy(buf, addr, count);
 286	return count;
 287}
 288
 289long vwrite(char *buf, char *addr, unsigned long count)
 290{
 291	/* Don't allow overflow */
 292	if ((unsigned long) addr + count < count)
 293		count = -(unsigned long) addr;
 294
 295	memcpy(addr, buf, count);
 296	return count;
 297}
 298
 299/*
 300 *	vmalloc  -  allocate virtually contiguous memory
 301 *
 302 *	@size:		allocation size
 303 *
 304 *	Allocate enough pages to cover @size from the page level
 305 *	allocator and map them into contiguous kernel virtual space.
 306 *
 307 *	For tight control over page level allocator and protection flags
 308 *	use __vmalloc() instead.
 309 */
 310void *vmalloc(unsigned long size)
 311{
 312       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 313}
 314EXPORT_SYMBOL(vmalloc);
 
 
 315
 316/*
 317 *	vzalloc - allocate virtually contiguous memory with zero fill
 318 *
 319 *	@size:		allocation size
 320 *
 321 *	Allocate enough pages to cover @size from the page level
 322 *	allocator and map them into contiguous kernel virtual space.
 323 *	The memory allocated is set to zero.
 324 *
 325 *	For tight control over page level allocator and protection flags
 326 *	use __vmalloc() instead.
 327 */
 328void *vzalloc(unsigned long size)
 329{
 330	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 331			PAGE_KERNEL);
 332}
 333EXPORT_SYMBOL(vzalloc);
 334
 335/**
 336 * vmalloc_node - allocate memory on a specific node
 337 * @size:	allocation size
 338 * @node:	numa node
 339 *
 340 * Allocate enough pages to cover @size from the page level
 341 * allocator and map them into contiguous kernel virtual space.
 342 *
 343 * For tight control over page level allocator and protection flags
 344 * use __vmalloc() instead.
 345 */
 346void *vmalloc_node(unsigned long size, int node)
 347{
 348	return vmalloc(size);
 349}
 350EXPORT_SYMBOL(vmalloc_node);
 351
 352/**
 353 * vzalloc_node - allocate memory on a specific node with zero fill
 354 * @size:	allocation size
 355 * @node:	numa node
 356 *
 357 * Allocate enough pages to cover @size from the page level
 358 * allocator and map them into contiguous kernel virtual space.
 359 * The memory allocated is set to zero.
 360 *
 361 * For tight control over page level allocator and protection flags
 362 * use __vmalloc() instead.
 363 */
 364void *vzalloc_node(unsigned long size, int node)
 365{
 366	return vzalloc(size);
 367}
 368EXPORT_SYMBOL(vzalloc_node);
 369
 370#ifndef PAGE_KERNEL_EXEC
 371# define PAGE_KERNEL_EXEC PAGE_KERNEL
 372#endif
 373
 374/**
 375 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 376 *	@size:		allocation size
 377 *
 378 *	Kernel-internal function to allocate enough pages to cover @size
 379 *	the page level allocator and map them into contiguous and
 380 *	executable kernel virtual space.
 381 *
 382 *	For tight control over page level allocator and protection flags
 383 *	use __vmalloc() instead.
 384 */
 385
 386void *vmalloc_exec(unsigned long size)
 387{
 388	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
 389}
 
 390
 391/**
 392 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 393 *	@size:		allocation size
 394 *
 395 *	Allocate enough 32bit PA addressable pages to cover @size from the
 396 *	page level allocator and map them into contiguous kernel virtual space.
 397 */
 398void *vmalloc_32(unsigned long size)
 399{
 400	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
 401}
 402EXPORT_SYMBOL(vmalloc_32);
 403
 404/**
 405 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 406 *	@size:		allocation size
 407 *
 408 * The resulting memory area is 32bit addressable and zeroed so it can be
 409 * mapped to userspace without leaking data.
 410 *
 411 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 412 * remap_vmalloc_range() are permissible.
 413 */
 414void *vmalloc_32_user(unsigned long size)
 415{
 416	/*
 417	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 418	 * but for now this can simply use vmalloc_user() directly.
 419	 */
 420	return vmalloc_user(size);
 421}
 422EXPORT_SYMBOL(vmalloc_32_user);
 423
 424void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 425{
 426	BUG();
 427	return NULL;
 428}
 429EXPORT_SYMBOL(vmap);
 430
 431void vunmap(const void *addr)
 432{
 433	BUG();
 434}
 435EXPORT_SYMBOL(vunmap);
 436
 437void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 438{
 439	BUG();
 440	return NULL;
 441}
 442EXPORT_SYMBOL(vm_map_ram);
 443
 444void vm_unmap_ram(const void *mem, unsigned int count)
 445{
 446	BUG();
 447}
 448EXPORT_SYMBOL(vm_unmap_ram);
 449
 450void vm_unmap_aliases(void)
 451{
 452}
 453EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 454
 455/*
 456 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 457 * have one.
 458 */
 459void __weak vmalloc_sync_all(void)
 460{
 461}
 462
 463/**
 464 *	alloc_vm_area - allocate a range of kernel address space
 465 *	@size:		size of the area
 466 *
 467 *	Returns:	NULL on failure, vm_struct on success
 468 *
 469 *	This function reserves a range of kernel address space, and
 470 *	allocates pagetables to map that range.  No actual mappings
 471 *	are created.  If the kernel address space is not shared
 472 *	between processes, it syncs the pagetable across all
 473 *	processes.
 474 */
 475struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 476{
 477	BUG();
 478	return NULL;
 479}
 480EXPORT_SYMBOL_GPL(alloc_vm_area);
 481
 482void free_vm_area(struct vm_struct *area)
 483{
 484	BUG();
 485}
 486EXPORT_SYMBOL_GPL(free_vm_area);
 487
 488int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 489		   struct page *page)
 490{
 491	return -EINVAL;
 492}
 493EXPORT_SYMBOL(vm_insert_page);
 494
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 495/*
 496 *  sys_brk() for the most part doesn't need the global kernel
 497 *  lock, except when an application is doing something nasty
 498 *  like trying to un-brk an area that has already been mapped
 499 *  to a regular file.  in this case, the unmapping will need
 500 *  to invoke file system routines that need the global lock.
 501 */
 502SYSCALL_DEFINE1(brk, unsigned long, brk)
 503{
 504	struct mm_struct *mm = current->mm;
 505
 506	if (brk < mm->start_brk || brk > mm->context.end_brk)
 507		return mm->brk;
 508
 509	if (mm->brk == brk)
 510		return mm->brk;
 511
 512	/*
 513	 * Always allow shrinking brk
 514	 */
 515	if (brk <= mm->brk) {
 516		mm->brk = brk;
 517		return brk;
 518	}
 519
 520	/*
 521	 * Ok, looks good - let it rip.
 522	 */
 523	flush_icache_range(mm->brk, brk);
 524	return mm->brk = brk;
 525}
 526
 527/*
 528 * initialise the VMA and region record slabs
 529 */
 530void __init mmap_init(void)
 531{
 532	int ret;
 533
 534	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 535	VM_BUG_ON(ret);
 536	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 537}
 538
 539/*
 540 * validate the region tree
 541 * - the caller must hold the region lock
 542 */
 543#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 544static noinline void validate_nommu_regions(void)
 545{
 546	struct vm_region *region, *last;
 547	struct rb_node *p, *lastp;
 548
 549	lastp = rb_first(&nommu_region_tree);
 550	if (!lastp)
 551		return;
 552
 553	last = rb_entry(lastp, struct vm_region, vm_rb);
 554	BUG_ON(last->vm_end <= last->vm_start);
 555	BUG_ON(last->vm_top < last->vm_end);
 556
 557	while ((p = rb_next(lastp))) {
 558		region = rb_entry(p, struct vm_region, vm_rb);
 559		last = rb_entry(lastp, struct vm_region, vm_rb);
 560
 561		BUG_ON(region->vm_end <= region->vm_start);
 562		BUG_ON(region->vm_top < region->vm_end);
 563		BUG_ON(region->vm_start < last->vm_top);
 564
 565		lastp = p;
 566	}
 567}
 568#else
 569static void validate_nommu_regions(void)
 570{
 571}
 572#endif
 573
 574/*
 575 * add a region into the global tree
 576 */
 577static void add_nommu_region(struct vm_region *region)
 578{
 579	struct vm_region *pregion;
 580	struct rb_node **p, *parent;
 581
 582	validate_nommu_regions();
 583
 584	parent = NULL;
 585	p = &nommu_region_tree.rb_node;
 586	while (*p) {
 587		parent = *p;
 588		pregion = rb_entry(parent, struct vm_region, vm_rb);
 589		if (region->vm_start < pregion->vm_start)
 590			p = &(*p)->rb_left;
 591		else if (region->vm_start > pregion->vm_start)
 592			p = &(*p)->rb_right;
 593		else if (pregion == region)
 594			return;
 595		else
 596			BUG();
 597	}
 598
 599	rb_link_node(&region->vm_rb, parent, p);
 600	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 601
 602	validate_nommu_regions();
 603}
 604
 605/*
 606 * delete a region from the global tree
 607 */
 608static void delete_nommu_region(struct vm_region *region)
 609{
 610	BUG_ON(!nommu_region_tree.rb_node);
 611
 612	validate_nommu_regions();
 613	rb_erase(&region->vm_rb, &nommu_region_tree);
 614	validate_nommu_regions();
 615}
 616
 617/*
 618 * free a contiguous series of pages
 619 */
 620static void free_page_series(unsigned long from, unsigned long to)
 621{
 622	for (; from < to; from += PAGE_SIZE) {
 623		struct page *page = virt_to_page(from);
 624
 625		atomic_long_dec(&mmap_pages_allocated);
 626		put_page(page);
 627	}
 628}
 629
 630/*
 631 * release a reference to a region
 632 * - the caller must hold the region semaphore for writing, which this releases
 633 * - the region may not have been added to the tree yet, in which case vm_top
 634 *   will equal vm_start
 635 */
 636static void __put_nommu_region(struct vm_region *region)
 637	__releases(nommu_region_sem)
 638{
 639	BUG_ON(!nommu_region_tree.rb_node);
 640
 641	if (--region->vm_usage == 0) {
 642		if (region->vm_top > region->vm_start)
 643			delete_nommu_region(region);
 644		up_write(&nommu_region_sem);
 645
 646		if (region->vm_file)
 647			fput(region->vm_file);
 648
 649		/* IO memory and memory shared directly out of the pagecache
 650		 * from ramfs/tmpfs mustn't be released here */
 651		if (region->vm_flags & VM_MAPPED_COPY)
 652			free_page_series(region->vm_start, region->vm_top);
 653		kmem_cache_free(vm_region_jar, region);
 654	} else {
 655		up_write(&nommu_region_sem);
 656	}
 657}
 658
 659/*
 660 * release a reference to a region
 661 */
 662static void put_nommu_region(struct vm_region *region)
 663{
 664	down_write(&nommu_region_sem);
 665	__put_nommu_region(region);
 666}
 667
 668/*
 669 * update protection on a vma
 670 */
 671static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
 672{
 673#ifdef CONFIG_MPU
 674	struct mm_struct *mm = vma->vm_mm;
 675	long start = vma->vm_start & PAGE_MASK;
 676	while (start < vma->vm_end) {
 677		protect_page(mm, start, flags);
 678		start += PAGE_SIZE;
 679	}
 680	update_protections(mm);
 681#endif
 682}
 683
 684/*
 685 * add a VMA into a process's mm_struct in the appropriate place in the list
 686 * and tree and add to the address space's page tree also if not an anonymous
 687 * page
 688 * - should be called with mm->mmap_sem held writelocked
 689 */
 690static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 691{
 692	struct vm_area_struct *pvma, *prev;
 693	struct address_space *mapping;
 694	struct rb_node **p, *parent, *rb_prev;
 695
 696	BUG_ON(!vma->vm_region);
 697
 698	mm->map_count++;
 699	vma->vm_mm = mm;
 700
 701	protect_vma(vma, vma->vm_flags);
 702
 703	/* add the VMA to the mapping */
 704	if (vma->vm_file) {
 705		mapping = vma->vm_file->f_mapping;
 706
 707		i_mmap_lock_write(mapping);
 708		flush_dcache_mmap_lock(mapping);
 709		vma_interval_tree_insert(vma, &mapping->i_mmap);
 710		flush_dcache_mmap_unlock(mapping);
 711		i_mmap_unlock_write(mapping);
 712	}
 713
 714	/* add the VMA to the tree */
 715	parent = rb_prev = NULL;
 716	p = &mm->mm_rb.rb_node;
 717	while (*p) {
 718		parent = *p;
 719		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 720
 721		/* sort by: start addr, end addr, VMA struct addr in that order
 722		 * (the latter is necessary as we may get identical VMAs) */
 723		if (vma->vm_start < pvma->vm_start)
 724			p = &(*p)->rb_left;
 725		else if (vma->vm_start > pvma->vm_start) {
 726			rb_prev = parent;
 727			p = &(*p)->rb_right;
 728		} else if (vma->vm_end < pvma->vm_end)
 729			p = &(*p)->rb_left;
 730		else if (vma->vm_end > pvma->vm_end) {
 731			rb_prev = parent;
 732			p = &(*p)->rb_right;
 733		} else if (vma < pvma)
 734			p = &(*p)->rb_left;
 735		else if (vma > pvma) {
 736			rb_prev = parent;
 737			p = &(*p)->rb_right;
 738		} else
 739			BUG();
 740	}
 741
 742	rb_link_node(&vma->vm_rb, parent, p);
 743	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 744
 745	/* add VMA to the VMA list also */
 746	prev = NULL;
 747	if (rb_prev)
 748		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 749
 750	__vma_link_list(mm, vma, prev, parent);
 751}
 752
 753/*
 754 * delete a VMA from its owning mm_struct and address space
 755 */
 756static void delete_vma_from_mm(struct vm_area_struct *vma)
 757{
 758	int i;
 759	struct address_space *mapping;
 760	struct mm_struct *mm = vma->vm_mm;
 761	struct task_struct *curr = current;
 762
 763	protect_vma(vma, 0);
 764
 765	mm->map_count--;
 766	for (i = 0; i < VMACACHE_SIZE; i++) {
 767		/* if the vma is cached, invalidate the entire cache */
 768		if (curr->vmacache[i] == vma) {
 769			vmacache_invalidate(mm);
 770			break;
 771		}
 772	}
 773
 774	/* remove the VMA from the mapping */
 775	if (vma->vm_file) {
 
 776		mapping = vma->vm_file->f_mapping;
 777
 778		i_mmap_lock_write(mapping);
 779		flush_dcache_mmap_lock(mapping);
 780		vma_interval_tree_remove(vma, &mapping->i_mmap);
 781		flush_dcache_mmap_unlock(mapping);
 782		i_mmap_unlock_write(mapping);
 783	}
 
 784
 785	/* remove from the MM's tree and list */
 786	rb_erase(&vma->vm_rb, &mm->mm_rb);
 
 
 
 
 787
 788	if (vma->vm_prev)
 789		vma->vm_prev->vm_next = vma->vm_next;
 790	else
 791		mm->mmap = vma->vm_next;
 
 
 
 792
 793	if (vma->vm_next)
 794		vma->vm_next->vm_prev = vma->vm_prev;
 
 795}
 796
 797/*
 798 * destroy a VMA record
 799 */
 800static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 801{
 802	if (vma->vm_ops && vma->vm_ops->close)
 803		vma->vm_ops->close(vma);
 804	if (vma->vm_file)
 805		fput(vma->vm_file);
 806	put_nommu_region(vma->vm_region);
 807	kmem_cache_free(vm_area_cachep, vma);
 808}
 809
 
 
 
 
 
 
 
 
 
 
 
 810/*
 811 * look up the first VMA in which addr resides, NULL if none
 812 * - should be called with mm->mmap_sem at least held readlocked
 813 */
 814struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 815{
 816	struct vm_area_struct *vma;
 817
 818	/* check the cache first */
 819	vma = vmacache_find(mm, addr);
 820	if (likely(vma))
 821		return vma;
 822
 823	/* trawl the list (there may be multiple mappings in which addr
 824	 * resides) */
 825	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 826		if (vma->vm_start > addr)
 827			return NULL;
 828		if (vma->vm_end > addr) {
 829			vmacache_update(addr, vma);
 830			return vma;
 831		}
 832	}
 833
 834	return NULL;
 835}
 836EXPORT_SYMBOL(find_vma);
 837
 838/*
 839 * find a VMA
 840 * - we don't extend stack VMAs under NOMMU conditions
 841 */
 842struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 
 843{
 844	return find_vma(mm, addr);
 
 
 
 
 
 
 845}
 846
 847/*
 848 * expand a stack to a given address
 849 * - not supported under NOMMU conditions
 850 */
 851int expand_stack(struct vm_area_struct *vma, unsigned long address)
 852{
 853	return -ENOMEM;
 854}
 855
 
 
 
 
 
 
 856/*
 857 * look up the first VMA exactly that exactly matches addr
 858 * - should be called with mm->mmap_sem at least held readlocked
 859 */
 860static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 861					     unsigned long addr,
 862					     unsigned long len)
 863{
 864	struct vm_area_struct *vma;
 865	unsigned long end = addr + len;
 
 866
 867	/* check the cache first */
 868	vma = vmacache_find_exact(mm, addr, end);
 869	if (vma)
 870		return vma;
 871
 872	/* trawl the list (there may be multiple mappings in which addr
 873	 * resides) */
 874	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 875		if (vma->vm_start < addr)
 876			continue;
 877		if (vma->vm_start > addr)
 878			return NULL;
 879		if (vma->vm_end == end) {
 880			vmacache_update(addr, vma);
 881			return vma;
 882		}
 883	}
 884
 885	return NULL;
 886}
 887
 888/*
 889 * determine whether a mapping should be permitted and, if so, what sort of
 890 * mapping we're capable of supporting
 891 */
 892static int validate_mmap_request(struct file *file,
 893				 unsigned long addr,
 894				 unsigned long len,
 895				 unsigned long prot,
 896				 unsigned long flags,
 897				 unsigned long pgoff,
 898				 unsigned long *_capabilities)
 899{
 900	unsigned long capabilities, rlen;
 901	int ret;
 902
 903	/* do the simple checks first */
 904	if (flags & MAP_FIXED)
 905		return -EINVAL;
 906
 907	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 908	    (flags & MAP_TYPE) != MAP_SHARED)
 909		return -EINVAL;
 910
 911	if (!len)
 912		return -EINVAL;
 913
 914	/* Careful about overflows.. */
 915	rlen = PAGE_ALIGN(len);
 916	if (!rlen || rlen > TASK_SIZE)
 917		return -ENOMEM;
 918
 919	/* offset overflow? */
 920	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 921		return -EOVERFLOW;
 922
 923	if (file) {
 924		/* files must support mmap */
 925		if (!file->f_op->mmap)
 926			return -ENODEV;
 927
 928		/* work out if what we've got could possibly be shared
 929		 * - we support chardevs that provide their own "memory"
 930		 * - we support files/blockdevs that are memory backed
 931		 */
 932		if (file->f_op->mmap_capabilities) {
 933			capabilities = file->f_op->mmap_capabilities(file);
 934		} else {
 935			/* no explicit capabilities set, so assume some
 936			 * defaults */
 937			switch (file_inode(file)->i_mode & S_IFMT) {
 938			case S_IFREG:
 939			case S_IFBLK:
 940				capabilities = NOMMU_MAP_COPY;
 941				break;
 942
 943			case S_IFCHR:
 944				capabilities =
 945					NOMMU_MAP_DIRECT |
 946					NOMMU_MAP_READ |
 947					NOMMU_MAP_WRITE;
 948				break;
 949
 950			default:
 951				return -EINVAL;
 952			}
 953		}
 954
 955		/* eliminate any capabilities that we can't support on this
 956		 * device */
 957		if (!file->f_op->get_unmapped_area)
 958			capabilities &= ~NOMMU_MAP_DIRECT;
 959		if (!(file->f_mode & FMODE_CAN_READ))
 960			capabilities &= ~NOMMU_MAP_COPY;
 961
 962		/* The file shall have been opened with read permission. */
 963		if (!(file->f_mode & FMODE_READ))
 964			return -EACCES;
 965
 966		if (flags & MAP_SHARED) {
 967			/* do checks for writing, appending and locking */
 968			if ((prot & PROT_WRITE) &&
 969			    !(file->f_mode & FMODE_WRITE))
 970				return -EACCES;
 971
 972			if (IS_APPEND(file_inode(file)) &&
 973			    (file->f_mode & FMODE_WRITE))
 974				return -EACCES;
 975
 976			if (locks_verify_locked(file))
 977				return -EAGAIN;
 978
 979			if (!(capabilities & NOMMU_MAP_DIRECT))
 980				return -ENODEV;
 981
 982			/* we mustn't privatise shared mappings */
 983			capabilities &= ~NOMMU_MAP_COPY;
 984		} else {
 985			/* we're going to read the file into private memory we
 986			 * allocate */
 987			if (!(capabilities & NOMMU_MAP_COPY))
 988				return -ENODEV;
 989
 990			/* we don't permit a private writable mapping to be
 991			 * shared with the backing device */
 992			if (prot & PROT_WRITE)
 993				capabilities &= ~NOMMU_MAP_DIRECT;
 994		}
 995
 996		if (capabilities & NOMMU_MAP_DIRECT) {
 997			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 998			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 999			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
1000			    ) {
1001				capabilities &= ~NOMMU_MAP_DIRECT;
1002				if (flags & MAP_SHARED) {
1003					pr_warn("MAP_SHARED not completely supported on !MMU\n");
1004					return -EINVAL;
1005				}
1006			}
1007		}
1008
1009		/* handle executable mappings and implied executable
1010		 * mappings */
1011		if (path_noexec(&file->f_path)) {
1012			if (prot & PROT_EXEC)
1013				return -EPERM;
1014		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1015			/* handle implication of PROT_EXEC by PROT_READ */
1016			if (current->personality & READ_IMPLIES_EXEC) {
1017				if (capabilities & NOMMU_MAP_EXEC)
1018					prot |= PROT_EXEC;
1019			}
1020		} else if ((prot & PROT_READ) &&
1021			 (prot & PROT_EXEC) &&
1022			 !(capabilities & NOMMU_MAP_EXEC)
1023			 ) {
1024			/* backing file is not executable, try to copy */
1025			capabilities &= ~NOMMU_MAP_DIRECT;
1026		}
1027	} else {
1028		/* anonymous mappings are always memory backed and can be
1029		 * privately mapped
1030		 */
1031		capabilities = NOMMU_MAP_COPY;
1032
1033		/* handle PROT_EXEC implication by PROT_READ */
1034		if ((prot & PROT_READ) &&
1035		    (current->personality & READ_IMPLIES_EXEC))
1036			prot |= PROT_EXEC;
1037	}
1038
1039	/* allow the security API to have its say */
1040	ret = security_mmap_addr(addr);
1041	if (ret < 0)
1042		return ret;
1043
1044	/* looks okay */
1045	*_capabilities = capabilities;
1046	return 0;
1047}
1048
1049/*
1050 * we've determined that we can make the mapping, now translate what we
1051 * now know into VMA flags
1052 */
1053static unsigned long determine_vm_flags(struct file *file,
1054					unsigned long prot,
1055					unsigned long flags,
1056					unsigned long capabilities)
1057{
1058	unsigned long vm_flags;
1059
1060	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
1061	/* vm_flags |= mm->def_flags; */
1062
1063	if (!(capabilities & NOMMU_MAP_DIRECT)) {
1064		/* attempt to share read-only copies of mapped file chunks */
 
 
 
1065		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1066		if (file && !(prot & PROT_WRITE))
1067			vm_flags |= VM_MAYSHARE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1068	} else {
1069		/* overlay a shareable mapping on the backing device or inode
1070		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1071		 * romfs/cramfs */
1072		vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
1073		if (flags & MAP_SHARED)
1074			vm_flags |= VM_SHARED;
1075	}
1076
1077	/* refuse to let anyone share private mappings with this process if
1078	 * it's being traced - otherwise breakpoints set in it may interfere
1079	 * with another untraced process
1080	 */
1081	if ((flags & MAP_PRIVATE) && current->ptrace)
1082		vm_flags &= ~VM_MAYSHARE;
1083
1084	return vm_flags;
1085}
1086
1087/*
1088 * set up a shared mapping on a file (the driver or filesystem provides and
1089 * pins the storage)
1090 */
1091static int do_mmap_shared_file(struct vm_area_struct *vma)
1092{
1093	int ret;
1094
1095	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1096	if (ret == 0) {
1097		vma->vm_region->vm_top = vma->vm_region->vm_end;
1098		return 0;
1099	}
1100	if (ret != -ENOSYS)
1101		return ret;
1102
1103	/* getting -ENOSYS indicates that direct mmap isn't possible (as
1104	 * opposed to tried but failed) so we can only give a suitable error as
1105	 * it's not possible to make a private copy if MAP_SHARED was given */
1106	return -ENODEV;
1107}
1108
1109/*
1110 * set up a private mapping or an anonymous shared mapping
1111 */
1112static int do_mmap_private(struct vm_area_struct *vma,
1113			   struct vm_region *region,
1114			   unsigned long len,
1115			   unsigned long capabilities)
1116{
1117	unsigned long total, point;
1118	void *base;
1119	int ret, order;
1120
1121	/* invoke the file's mapping function so that it can keep track of
1122	 * shared mappings on devices or memory
1123	 * - VM_MAYSHARE will be set if it may attempt to share
 
 
1124	 */
1125	if (capabilities & NOMMU_MAP_DIRECT) {
1126		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
 
 
 
1127		if (ret == 0) {
1128			/* shouldn't return success if we're not sharing */
1129			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1130			vma->vm_region->vm_top = vma->vm_region->vm_end;
1131			return 0;
1132		}
1133		if (ret != -ENOSYS)
1134			return ret;
1135
1136		/* getting an ENOSYS error indicates that direct mmap isn't
1137		 * possible (as opposed to tried but failed) so we'll try to
1138		 * make a private copy of the data and map that instead */
1139	}
1140
1141
1142	/* allocate some memory to hold the mapping
1143	 * - note that this may not return a page-aligned address if the object
1144	 *   we're allocating is smaller than a page
1145	 */
1146	order = get_order(len);
1147	total = 1 << order;
1148	point = len >> PAGE_SHIFT;
1149
1150	/* we don't want to allocate a power-of-2 sized page set */
1151	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1152		total = point;
1153
1154	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1155	if (!base)
1156		goto enomem;
1157
1158	atomic_long_add(total, &mmap_pages_allocated);
1159
1160	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
 
1161	region->vm_start = (unsigned long) base;
1162	region->vm_end   = region->vm_start + len;
1163	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1164
1165	vma->vm_start = region->vm_start;
1166	vma->vm_end   = region->vm_start + len;
1167
1168	if (vma->vm_file) {
1169		/* read the contents of a file into the copy */
1170		mm_segment_t old_fs;
1171		loff_t fpos;
1172
1173		fpos = vma->vm_pgoff;
1174		fpos <<= PAGE_SHIFT;
1175
1176		old_fs = get_fs();
1177		set_fs(KERNEL_DS);
1178		ret = __vfs_read(vma->vm_file, base, len, &fpos);
1179		set_fs(old_fs);
1180
1181		if (ret < 0)
1182			goto error_free;
1183
1184		/* clear the last little bit */
1185		if (ret < len)
1186			memset(base + ret, 0, len - ret);
1187
 
 
1188	}
1189
1190	return 0;
1191
1192error_free:
1193	free_page_series(region->vm_start, region->vm_top);
1194	region->vm_start = vma->vm_start = 0;
1195	region->vm_end   = vma->vm_end = 0;
1196	region->vm_top   = 0;
1197	return ret;
1198
1199enomem:
1200	pr_err("Allocation of length %lu from process %d (%s) failed\n",
1201	       len, current->pid, current->comm);
1202	show_free_areas(0);
1203	return -ENOMEM;
1204}
1205
1206/*
1207 * handle mapping creation for uClinux
1208 */
1209unsigned long do_mmap(struct file *file,
1210			unsigned long addr,
1211			unsigned long len,
1212			unsigned long prot,
1213			unsigned long flags,
1214			vm_flags_t vm_flags,
1215			unsigned long pgoff,
1216			unsigned long *populate)
 
1217{
1218	struct vm_area_struct *vma;
1219	struct vm_region *region;
1220	struct rb_node *rb;
1221	unsigned long capabilities, result;
1222	int ret;
 
1223
1224	*populate = 0;
1225
1226	/* decide whether we should attempt the mapping, and if so what sort of
1227	 * mapping */
1228	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1229				    &capabilities);
1230	if (ret < 0)
1231		return ret;
1232
1233	/* we ignore the address hint */
1234	addr = 0;
1235	len = PAGE_ALIGN(len);
1236
1237	/* we've determined that we can make the mapping, now translate what we
1238	 * now know into VMA flags */
1239	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1240
 
1241	/* we're going to need to record the mapping */
1242	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1243	if (!region)
1244		goto error_getting_region;
1245
1246	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1247	if (!vma)
1248		goto error_getting_vma;
1249
1250	region->vm_usage = 1;
1251	region->vm_flags = vm_flags;
1252	region->vm_pgoff = pgoff;
1253
1254	INIT_LIST_HEAD(&vma->anon_vma_chain);
1255	vma->vm_flags = vm_flags;
1256	vma->vm_pgoff = pgoff;
1257
1258	if (file) {
1259		region->vm_file = get_file(file);
1260		vma->vm_file = get_file(file);
1261	}
1262
1263	down_write(&nommu_region_sem);
1264
1265	/* if we want to share, we need to check for regions created by other
1266	 * mmap() calls that overlap with our proposed mapping
1267	 * - we can only share with a superset match on most regular files
1268	 * - shared mappings on character devices and memory backed files are
1269	 *   permitted to overlap inexactly as far as we are concerned for in
1270	 *   these cases, sharing is handled in the driver or filesystem rather
1271	 *   than here
1272	 */
1273	if (vm_flags & VM_MAYSHARE) {
1274		struct vm_region *pregion;
1275		unsigned long pglen, rpglen, pgend, rpgend, start;
1276
1277		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1278		pgend = pgoff + pglen;
1279
1280		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1281			pregion = rb_entry(rb, struct vm_region, vm_rb);
1282
1283			if (!(pregion->vm_flags & VM_MAYSHARE))
1284				continue;
1285
1286			/* search for overlapping mappings on the same file */
1287			if (file_inode(pregion->vm_file) !=
1288			    file_inode(file))
1289				continue;
1290
1291			if (pregion->vm_pgoff >= pgend)
1292				continue;
1293
1294			rpglen = pregion->vm_end - pregion->vm_start;
1295			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1296			rpgend = pregion->vm_pgoff + rpglen;
1297			if (pgoff >= rpgend)
1298				continue;
1299
1300			/* handle inexactly overlapping matches between
1301			 * mappings */
1302			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1303			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1304				/* new mapping is not a subset of the region */
1305				if (!(capabilities & NOMMU_MAP_DIRECT))
1306					goto sharing_violation;
1307				continue;
1308			}
1309
1310			/* we've found a region we can share */
1311			pregion->vm_usage++;
1312			vma->vm_region = pregion;
1313			start = pregion->vm_start;
1314			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1315			vma->vm_start = start;
1316			vma->vm_end = start + len;
1317
1318			if (pregion->vm_flags & VM_MAPPED_COPY)
1319				vma->vm_flags |= VM_MAPPED_COPY;
1320			else {
1321				ret = do_mmap_shared_file(vma);
1322				if (ret < 0) {
1323					vma->vm_region = NULL;
1324					vma->vm_start = 0;
1325					vma->vm_end = 0;
1326					pregion->vm_usage--;
1327					pregion = NULL;
1328					goto error_just_free;
1329				}
1330			}
1331			fput(region->vm_file);
1332			kmem_cache_free(vm_region_jar, region);
1333			region = pregion;
1334			result = start;
1335			goto share;
1336		}
1337
1338		/* obtain the address at which to make a shared mapping
1339		 * - this is the hook for quasi-memory character devices to
1340		 *   tell us the location of a shared mapping
1341		 */
1342		if (capabilities & NOMMU_MAP_DIRECT) {
1343			addr = file->f_op->get_unmapped_area(file, addr, len,
1344							     pgoff, flags);
1345			if (IS_ERR_VALUE(addr)) {
1346				ret = addr;
1347				if (ret != -ENOSYS)
1348					goto error_just_free;
1349
1350				/* the driver refused to tell us where to site
1351				 * the mapping so we'll have to attempt to copy
1352				 * it */
1353				ret = -ENODEV;
1354				if (!(capabilities & NOMMU_MAP_COPY))
1355					goto error_just_free;
1356
1357				capabilities &= ~NOMMU_MAP_DIRECT;
1358			} else {
1359				vma->vm_start = region->vm_start = addr;
1360				vma->vm_end = region->vm_end = addr + len;
1361			}
1362		}
1363	}
1364
1365	vma->vm_region = region;
1366
1367	/* set up the mapping
1368	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1369	 */
1370	if (file && vma->vm_flags & VM_SHARED)
1371		ret = do_mmap_shared_file(vma);
1372	else
1373		ret = do_mmap_private(vma, region, len, capabilities);
1374	if (ret < 0)
1375		goto error_just_free;
1376	add_nommu_region(region);
1377
1378	/* clear anonymous mappings that don't ask for uninitialized data */
1379	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
 
 
1380		memset((void *)region->vm_start, 0,
1381		       region->vm_end - region->vm_start);
1382
1383	/* okay... we have a mapping; now we have to register it */
1384	result = vma->vm_start;
1385
1386	current->mm->total_vm += len >> PAGE_SHIFT;
1387
1388share:
1389	add_vma_to_mm(current->mm, vma);
 
 
 
 
 
 
 
 
1390
1391	/* we flush the region from the icache only when the first executable
1392	 * mapping of it is made  */
1393	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1394		flush_icache_range(region->vm_start, region->vm_end);
1395		region->vm_icache_flushed = true;
1396	}
1397
1398	up_write(&nommu_region_sem);
1399
1400	return result;
1401
1402error_just_free:
1403	up_write(&nommu_region_sem);
1404error:
 
1405	if (region->vm_file)
1406		fput(region->vm_file);
1407	kmem_cache_free(vm_region_jar, region);
1408	if (vma->vm_file)
1409		fput(vma->vm_file);
1410	kmem_cache_free(vm_area_cachep, vma);
1411	return ret;
1412
1413sharing_violation:
1414	up_write(&nommu_region_sem);
1415	pr_warn("Attempt to share mismatched mappings\n");
1416	ret = -EINVAL;
1417	goto error;
1418
1419error_getting_vma:
1420	kmem_cache_free(vm_region_jar, region);
1421	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1422			len, current->pid);
1423	show_free_areas(0);
1424	return -ENOMEM;
1425
1426error_getting_region:
1427	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1428			len, current->pid);
1429	show_free_areas(0);
1430	return -ENOMEM;
1431}
1432
1433SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1434		unsigned long, prot, unsigned long, flags,
1435		unsigned long, fd, unsigned long, pgoff)
1436{
1437	struct file *file = NULL;
1438	unsigned long retval = -EBADF;
1439
1440	audit_mmap_fd(fd, flags);
1441	if (!(flags & MAP_ANONYMOUS)) {
1442		file = fget(fd);
1443		if (!file)
1444			goto out;
1445	}
1446
1447	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1448
1449	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1450
1451	if (file)
1452		fput(file);
1453out:
1454	return retval;
1455}
1456
 
 
 
 
 
 
 
1457#ifdef __ARCH_WANT_SYS_OLD_MMAP
1458struct mmap_arg_struct {
1459	unsigned long addr;
1460	unsigned long len;
1461	unsigned long prot;
1462	unsigned long flags;
1463	unsigned long fd;
1464	unsigned long offset;
1465};
1466
1467SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1468{
1469	struct mmap_arg_struct a;
1470
1471	if (copy_from_user(&a, arg, sizeof(a)))
1472		return -EFAULT;
1473	if (offset_in_page(a.offset))
1474		return -EINVAL;
1475
1476	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1477			      a.offset >> PAGE_SHIFT);
1478}
1479#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1480
1481/*
1482 * split a vma into two pieces at address 'addr', a new vma is allocated either
1483 * for the first part or the tail.
1484 */
1485int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1486	      unsigned long addr, int new_below)
1487{
1488	struct vm_area_struct *new;
1489	struct vm_region *region;
1490	unsigned long npages;
 
1491
1492	/* we're only permitted to split anonymous regions (these should have
1493	 * only a single usage on the region) */
1494	if (vma->vm_file)
1495		return -ENOMEM;
1496
 
1497	if (mm->map_count >= sysctl_max_map_count)
1498		return -ENOMEM;
1499
1500	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1501	if (!region)
1502		return -ENOMEM;
1503
1504	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1505	if (!new) {
1506		kmem_cache_free(vm_region_jar, region);
1507		return -ENOMEM;
1508	}
1509
1510	/* most fields are the same, copy all, and then fixup */
1511	*new = *vma;
1512	*region = *vma->vm_region;
1513	new->vm_region = region;
1514
1515	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1516
1517	if (new_below) {
1518		region->vm_top = region->vm_end = new->vm_end = addr;
1519	} else {
1520		region->vm_start = new->vm_start = addr;
1521		region->vm_pgoff = new->vm_pgoff += npages;
1522	}
1523
 
 
 
 
 
 
 
1524	if (new->vm_ops && new->vm_ops->open)
1525		new->vm_ops->open(new);
1526
1527	delete_vma_from_mm(vma);
1528	down_write(&nommu_region_sem);
1529	delete_nommu_region(vma->vm_region);
1530	if (new_below) {
1531		vma->vm_region->vm_start = vma->vm_start = addr;
1532		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1533	} else {
1534		vma->vm_region->vm_end = vma->vm_end = addr;
1535		vma->vm_region->vm_top = addr;
1536	}
1537	add_nommu_region(vma->vm_region);
1538	add_nommu_region(new->vm_region);
1539	up_write(&nommu_region_sem);
1540	add_vma_to_mm(mm, vma);
1541	add_vma_to_mm(mm, new);
 
 
 
1542	return 0;
 
 
 
 
 
 
1543}
1544
1545/*
1546 * shrink a VMA by removing the specified chunk from either the beginning or
1547 * the end
1548 */
1549static int shrink_vma(struct mm_struct *mm,
1550		      struct vm_area_struct *vma,
1551		      unsigned long from, unsigned long to)
1552{
1553	struct vm_region *region;
1554
1555	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1556	 * and list */
1557	delete_vma_from_mm(vma);
1558	if (from > vma->vm_start)
 
1559		vma->vm_end = from;
1560	else
 
 
1561		vma->vm_start = to;
1562	add_vma_to_mm(mm, vma);
1563
1564	/* cut the backing region down to size */
1565	region = vma->vm_region;
1566	BUG_ON(region->vm_usage != 1);
1567
1568	down_write(&nommu_region_sem);
1569	delete_nommu_region(region);
1570	if (from > region->vm_start) {
1571		to = region->vm_top;
1572		region->vm_top = region->vm_end = from;
1573	} else {
1574		region->vm_start = to;
1575	}
1576	add_nommu_region(region);
1577	up_write(&nommu_region_sem);
1578
1579	free_page_series(from, to);
1580	return 0;
1581}
1582
1583/*
1584 * release a mapping
1585 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1586 *   VMA, though it need not cover the whole VMA
1587 */
1588int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1589{
 
1590	struct vm_area_struct *vma;
1591	unsigned long end;
1592	int ret;
1593
1594	len = PAGE_ALIGN(len);
1595	if (len == 0)
1596		return -EINVAL;
1597
1598	end = start + len;
1599
1600	/* find the first potentially overlapping VMA */
1601	vma = find_vma(mm, start);
1602	if (!vma) {
1603		static int limit;
1604		if (limit < 5) {
1605			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1606					current->pid, current->comm,
1607					start, start + len - 1);
1608			limit++;
1609		}
1610		return -EINVAL;
1611	}
1612
1613	/* we're allowed to split an anonymous VMA but not a file-backed one */
1614	if (vma->vm_file) {
1615		do {
1616			if (start > vma->vm_start)
1617				return -EINVAL;
1618			if (end == vma->vm_end)
1619				goto erase_whole_vma;
1620			vma = vma->vm_next;
1621		} while (vma);
1622		return -EINVAL;
1623	} else {
1624		/* the chunk must be a subset of the VMA found */
1625		if (start == vma->vm_start && end == vma->vm_end)
1626			goto erase_whole_vma;
1627		if (start < vma->vm_start || end > vma->vm_end)
1628			return -EINVAL;
1629		if (offset_in_page(start))
1630			return -EINVAL;
1631		if (end != vma->vm_end && offset_in_page(end))
1632			return -EINVAL;
1633		if (start != vma->vm_start && end != vma->vm_end) {
1634			ret = split_vma(mm, vma, start, 1);
1635			if (ret < 0)
1636				return ret;
1637		}
1638		return shrink_vma(mm, vma, start, end);
1639	}
1640
1641erase_whole_vma:
1642	delete_vma_from_mm(vma);
1643	delete_vma(mm, vma);
1644	return 0;
 
 
1645}
1646EXPORT_SYMBOL(do_munmap);
1647
1648int vm_munmap(unsigned long addr, size_t len)
1649{
1650	struct mm_struct *mm = current->mm;
1651	int ret;
1652
1653	down_write(&mm->mmap_sem);
1654	ret = do_munmap(mm, addr, len);
1655	up_write(&mm->mmap_sem);
1656	return ret;
1657}
1658EXPORT_SYMBOL(vm_munmap);
1659
1660SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1661{
1662	return vm_munmap(addr, len);
1663}
1664
1665/*
1666 * release all the mappings made in a process's VM space
1667 */
1668void exit_mmap(struct mm_struct *mm)
1669{
 
1670	struct vm_area_struct *vma;
1671
1672	if (!mm)
1673		return;
1674
1675	mm->total_vm = 0;
1676
1677	while ((vma = mm->mmap)) {
1678		mm->mmap = vma->vm_next;
1679		delete_vma_from_mm(vma);
 
 
 
 
1680		delete_vma(mm, vma);
1681		cond_resched();
1682	}
1683}
1684
1685unsigned long vm_brk(unsigned long addr, unsigned long len)
1686{
1687	return -ENOMEM;
1688}
1689
1690/*
1691 * expand (or shrink) an existing mapping, potentially moving it at the same
1692 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1693 *
1694 * under NOMMU conditions, we only permit changing a mapping's size, and only
1695 * as long as it stays within the region allocated by do_mmap_private() and the
1696 * block is not shareable
1697 *
1698 * MREMAP_FIXED is not supported under NOMMU conditions
1699 */
1700static unsigned long do_mremap(unsigned long addr,
1701			unsigned long old_len, unsigned long new_len,
1702			unsigned long flags, unsigned long new_addr)
1703{
1704	struct vm_area_struct *vma;
1705
1706	/* insanity checks first */
1707	old_len = PAGE_ALIGN(old_len);
1708	new_len = PAGE_ALIGN(new_len);
1709	if (old_len == 0 || new_len == 0)
1710		return (unsigned long) -EINVAL;
1711
1712	if (offset_in_page(addr))
1713		return -EINVAL;
1714
1715	if (flags & MREMAP_FIXED && new_addr != addr)
1716		return (unsigned long) -EINVAL;
1717
1718	vma = find_vma_exact(current->mm, addr, old_len);
1719	if (!vma)
1720		return (unsigned long) -EINVAL;
1721
1722	if (vma->vm_end != vma->vm_start + old_len)
1723		return (unsigned long) -EFAULT;
1724
1725	if (vma->vm_flags & VM_MAYSHARE)
1726		return (unsigned long) -EPERM;
1727
1728	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1729		return (unsigned long) -ENOMEM;
1730
1731	/* all checks complete - do it */
1732	vma->vm_end = vma->vm_start + new_len;
1733	return vma->vm_start;
1734}
1735
1736SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1737		unsigned long, new_len, unsigned long, flags,
1738		unsigned long, new_addr)
1739{
1740	unsigned long ret;
1741
1742	down_write(&current->mm->mmap_sem);
1743	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1744	up_write(&current->mm->mmap_sem);
1745	return ret;
1746}
1747
1748struct page *follow_page_mask(struct vm_area_struct *vma,
1749			      unsigned long address, unsigned int flags,
1750			      unsigned int *page_mask)
1751{
1752	*page_mask = 0;
1753	return NULL;
1754}
1755
1756int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1757		unsigned long pfn, unsigned long size, pgprot_t prot)
1758{
1759	if (addr != (pfn << PAGE_SHIFT))
1760		return -EINVAL;
1761
1762	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1763	return 0;
1764}
1765EXPORT_SYMBOL(remap_pfn_range);
1766
1767int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1768{
1769	unsigned long pfn = start >> PAGE_SHIFT;
1770	unsigned long vm_len = vma->vm_end - vma->vm_start;
1771
1772	pfn += vma->vm_pgoff;
1773	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1774}
1775EXPORT_SYMBOL(vm_iomap_memory);
1776
1777int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1778			unsigned long pgoff)
1779{
1780	unsigned int size = vma->vm_end - vma->vm_start;
1781
1782	if (!(vma->vm_flags & VM_USERMAP))
1783		return -EINVAL;
1784
1785	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1786	vma->vm_end = vma->vm_start + size;
1787
1788	return 0;
1789}
1790EXPORT_SYMBOL(remap_vmalloc_range);
1791
1792unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1793	unsigned long len, unsigned long pgoff, unsigned long flags)
1794{
1795	return -ENOMEM;
1796}
1797
1798void unmap_mapping_range(struct address_space *mapping,
1799			 loff_t const holebegin, loff_t const holelen,
1800			 int even_cows)
1801{
1802}
1803EXPORT_SYMBOL(unmap_mapping_range);
1804
1805int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1806{
1807	BUG();
1808	return 0;
1809}
1810EXPORT_SYMBOL(filemap_fault);
1811
1812void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
 
1813{
1814	BUG();
 
1815}
1816EXPORT_SYMBOL(filemap_map_pages);
1817
1818static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1819		unsigned long addr, void *buf, int len, int write)
1820{
1821	struct vm_area_struct *vma;
 
1822
1823	down_read(&mm->mmap_sem);
 
1824
1825	/* the access must start within one of the target process's mappings */
1826	vma = find_vma(mm, addr);
1827	if (vma) {
1828		/* don't overrun this mapping */
1829		if (addr + len >= vma->vm_end)
1830			len = vma->vm_end - addr;
1831
1832		/* only read or write mappings where it is permitted */
1833		if (write && vma->vm_flags & VM_MAYWRITE)
1834			copy_to_user_page(vma, NULL, addr,
1835					 (void *) addr, buf, len);
1836		else if (!write && vma->vm_flags & VM_MAYREAD)
1837			copy_from_user_page(vma, NULL, addr,
1838					    buf, (void *) addr, len);
1839		else
1840			len = 0;
1841	} else {
1842		len = 0;
1843	}
1844
1845	up_read(&mm->mmap_sem);
1846
1847	return len;
1848}
1849
1850/**
1851 * @access_remote_vm - access another process' address space
1852 * @mm:		the mm_struct of the target address space
1853 * @addr:	start address to access
1854 * @buf:	source or destination buffer
1855 * @len:	number of bytes to transfer
1856 * @write:	whether the access is a write
1857 *
1858 * The caller must hold a reference on @mm.
1859 */
1860int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1861		void *buf, int len, int write)
1862{
1863	return __access_remote_vm(NULL, mm, addr, buf, len, write);
1864}
1865
1866/*
1867 * Access another process' address space.
1868 * - source/target buffer must be kernel space
1869 */
1870int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
 
1871{
1872	struct mm_struct *mm;
1873
1874	if (addr + len < addr)
1875		return 0;
1876
1877	mm = get_task_mm(tsk);
1878	if (!mm)
1879		return 0;
1880
1881	len = __access_remote_vm(tsk, mm, addr, buf, len, write);
1882
1883	mmput(mm);
1884	return len;
1885}
 
1886
1887/**
1888 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1889 * @inode: The inode to check
1890 * @size: The current filesize of the inode
1891 * @newsize: The proposed filesize of the inode
1892 *
1893 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1894 * make sure that that any outstanding VMAs aren't broken and then shrink the
1895 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1896 * automatically grant mappings that are too large.
1897 */
1898int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1899				size_t newsize)
1900{
1901	struct vm_area_struct *vma;
1902	struct vm_region *region;
1903	pgoff_t low, high;
1904	size_t r_size, r_top;
1905
1906	low = newsize >> PAGE_SHIFT;
1907	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1908
1909	down_write(&nommu_region_sem);
1910	i_mmap_lock_read(inode->i_mapping);
1911
1912	/* search for VMAs that fall within the dead zone */
1913	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1914		/* found one - only interested if it's shared out of the page
1915		 * cache */
1916		if (vma->vm_flags & VM_SHARED) {
1917			i_mmap_unlock_read(inode->i_mapping);
1918			up_write(&nommu_region_sem);
1919			return -ETXTBSY; /* not quite true, but near enough */
1920		}
1921	}
1922
1923	/* reduce any regions that overlap the dead zone - if in existence,
1924	 * these will be pointed to by VMAs that don't overlap the dead zone
1925	 *
1926	 * we don't check for any regions that start beyond the EOF as there
1927	 * shouldn't be any
1928	 */
1929	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1930		if (!(vma->vm_flags & VM_SHARED))
1931			continue;
1932
1933		region = vma->vm_region;
1934		r_size = region->vm_top - region->vm_start;
1935		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1936
1937		if (r_top > newsize) {
1938			region->vm_top -= r_top - newsize;
1939			if (region->vm_end > region->vm_top)
1940				region->vm_end = region->vm_top;
1941		}
1942	}
1943
1944	i_mmap_unlock_read(inode->i_mapping);
1945	up_write(&nommu_region_sem);
1946	return 0;
1947}
1948
1949/*
1950 * Initialise sysctl_user_reserve_kbytes.
1951 *
1952 * This is intended to prevent a user from starting a single memory hogging
1953 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1954 * mode.
1955 *
1956 * The default value is min(3% of free memory, 128MB)
1957 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1958 */
1959static int __meminit init_user_reserve(void)
1960{
1961	unsigned long free_kbytes;
1962
1963	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1964
1965	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1966	return 0;
1967}
1968subsys_initcall(init_user_reserve);
1969
1970/*
1971 * Initialise sysctl_admin_reserve_kbytes.
1972 *
1973 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1974 * to log in and kill a memory hogging process.
1975 *
1976 * Systems with more than 256MB will reserve 8MB, enough to recover
1977 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1978 * only reserve 3% of free pages by default.
1979 */
1980static int __meminit init_admin_reserve(void)
1981{
1982	unsigned long free_kbytes;
1983
1984	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1985
1986	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1987	return 0;
1988}
1989subsys_initcall(init_admin_reserve);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/admin-guide/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
  22#include <linux/mman.h>
  23#include <linux/swap.h>
  24#include <linux/file.h>
  25#include <linux/highmem.h>
  26#include <linux/pagemap.h>
  27#include <linux/slab.h>
  28#include <linux/vmalloc.h>
 
  29#include <linux/backing-dev.h>
  30#include <linux/compiler.h>
  31#include <linux/mount.h>
  32#include <linux/personality.h>
  33#include <linux/security.h>
  34#include <linux/syscalls.h>
  35#include <linux/audit.h>
  36#include <linux/printk.h>
  37
  38#include <linux/uaccess.h>
  39#include <linux/uio.h>
  40#include <asm/tlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/mmu_context.h>
  43#include "internal.h"
  44
  45void *high_memory;
  46EXPORT_SYMBOL(high_memory);
  47struct page *mem_map;
  48unsigned long max_mapnr;
  49EXPORT_SYMBOL(max_mapnr);
  50unsigned long highest_memmap_pfn;
  51int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  52int heap_stack_gap = 0;
  53
  54atomic_long_t mmap_pages_allocated;
  55
  56EXPORT_SYMBOL(mem_map);
  57
  58/* list of mapped, potentially shareable regions */
  59static struct kmem_cache *vm_region_jar;
  60struct rb_root nommu_region_tree = RB_ROOT;
  61DECLARE_RWSEM(nommu_region_sem);
  62
  63const struct vm_operations_struct generic_file_vm_ops = {
  64};
  65
  66/*
  67 * Return the total memory allocated for this pointer, not
  68 * just what the caller asked for.
  69 *
  70 * Doesn't have to be accurate, i.e. may have races.
  71 */
  72unsigned int kobjsize(const void *objp)
  73{
  74	struct page *page;
  75
  76	/*
  77	 * If the object we have should not have ksize performed on it,
  78	 * return size of 0
  79	 */
  80	if (!objp || !virt_addr_valid(objp))
  81		return 0;
  82
  83	page = virt_to_head_page(objp);
  84
  85	/*
  86	 * If the allocator sets PageSlab, we know the pointer came from
  87	 * kmalloc().
  88	 */
  89	if (PageSlab(page))
  90		return ksize(objp);
  91
  92	/*
  93	 * If it's not a compound page, see if we have a matching VMA
  94	 * region. This test is intentionally done in reverse order,
  95	 * so if there's no VMA, we still fall through and hand back
  96	 * PAGE_SIZE for 0-order pages.
  97	 */
  98	if (!PageCompound(page)) {
  99		struct vm_area_struct *vma;
 100
 101		vma = find_vma(current->mm, (unsigned long)objp);
 102		if (vma)
 103			return vma->vm_end - vma->vm_start;
 104	}
 105
 106	/*
 107	 * The ksize() function is only guaranteed to work for pointers
 108	 * returned by kmalloc(). So handle arbitrary pointers here.
 109	 */
 110	return page_size(page);
 111}
 112
 113void vfree(const void *addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114{
 115	kfree(addr);
 
 
 
 
 
 116}
 117EXPORT_SYMBOL(vfree);
 118
 119void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 
 120{
 121	/*
 122	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 123	 * returns only a logical address.
 124	 */
 125	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 126}
 127EXPORT_SYMBOL(__vmalloc_noprof);
 128
 129void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 
 
 
 
 
 
 
 
 
 
 
 130{
 131	return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
 
 
 
 
 132}
 
 
 
 133
 134void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 135		unsigned long start, unsigned long end, gfp_t gfp_mask,
 136		pgprot_t prot, unsigned long vm_flags, int node,
 137		const void *caller)
 138{
 139	return __vmalloc_noprof(size, gfp_mask);
 140}
 
 141
 142void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 143		int node, const void *caller)
 144{
 145	return __vmalloc_noprof(size, gfp_mask);
 
 
 
 
 146}
 
 147
 148static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 149{
 150	void *ret;
 151
 152	ret = __vmalloc(size, flags);
 
 153	if (ret) {
 154		struct vm_area_struct *vma;
 155
 156		mmap_write_lock(current->mm);
 157		vma = find_vma(current->mm, (unsigned long)ret);
 158		if (vma)
 159			vm_flags_set(vma, VM_USERMAP);
 160		mmap_write_unlock(current->mm);
 161	}
 162
 163	return ret;
 164}
 165
 166void *vmalloc_user_noprof(unsigned long size)
 167{
 168	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 169}
 170EXPORT_SYMBOL(vmalloc_user_noprof);
 171
 172struct page *vmalloc_to_page(const void *addr)
 173{
 174	return virt_to_page(addr);
 175}
 176EXPORT_SYMBOL(vmalloc_to_page);
 177
 178unsigned long vmalloc_to_pfn(const void *addr)
 179{
 180	return page_to_pfn(virt_to_page(addr));
 181}
 182EXPORT_SYMBOL(vmalloc_to_pfn);
 183
 184long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
 
 
 
 
 
 
 
 
 
 185{
 186	/* Don't allow overflow */
 187	if ((unsigned long) addr + count < count)
 188		count = -(unsigned long) addr;
 189
 190	return copy_to_iter(addr, count, iter);
 
 191}
 192
 193/*
 194 *	vmalloc  -  allocate virtually contiguous memory
 195 *
 196 *	@size:		allocation size
 197 *
 198 *	Allocate enough pages to cover @size from the page level
 199 *	allocator and map them into contiguous kernel virtual space.
 200 *
 201 *	For tight control over page level allocator and protection flags
 202 *	use __vmalloc() instead.
 203 */
 204void *vmalloc_noprof(unsigned long size)
 205{
 206	return __vmalloc_noprof(size, GFP_KERNEL);
 207}
 208EXPORT_SYMBOL(vmalloc_noprof);
 209
 210void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
 211
 212/*
 213 *	vzalloc - allocate virtually contiguous memory with zero fill
 214 *
 215 *	@size:		allocation size
 216 *
 217 *	Allocate enough pages to cover @size from the page level
 218 *	allocator and map them into contiguous kernel virtual space.
 219 *	The memory allocated is set to zero.
 220 *
 221 *	For tight control over page level allocator and protection flags
 222 *	use __vmalloc() instead.
 223 */
 224void *vzalloc_noprof(unsigned long size)
 225{
 226	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
 
 227}
 228EXPORT_SYMBOL(vzalloc_noprof);
 229
 230/**
 231 * vmalloc_node - allocate memory on a specific node
 232 * @size:	allocation size
 233 * @node:	numa node
 234 *
 235 * Allocate enough pages to cover @size from the page level
 236 * allocator and map them into contiguous kernel virtual space.
 237 *
 238 * For tight control over page level allocator and protection flags
 239 * use __vmalloc() instead.
 240 */
 241void *vmalloc_node_noprof(unsigned long size, int node)
 242{
 243	return vmalloc_noprof(size);
 244}
 245EXPORT_SYMBOL(vmalloc_node_noprof);
 246
 247/**
 248 * vzalloc_node - allocate memory on a specific node with zero fill
 249 * @size:	allocation size
 250 * @node:	numa node
 251 *
 252 * Allocate enough pages to cover @size from the page level
 253 * allocator and map them into contiguous kernel virtual space.
 254 * The memory allocated is set to zero.
 255 *
 256 * For tight control over page level allocator and protection flags
 257 * use __vmalloc() instead.
 258 */
 259void *vzalloc_node_noprof(unsigned long size, int node)
 260{
 261	return vzalloc_noprof(size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 262}
 263EXPORT_SYMBOL(vzalloc_node_noprof);
 264
 265/**
 266 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 267 *	@size:		allocation size
 268 *
 269 *	Allocate enough 32bit PA addressable pages to cover @size from the
 270 *	page level allocator and map them into contiguous kernel virtual space.
 271 */
 272void *vmalloc_32_noprof(unsigned long size)
 273{
 274	return __vmalloc_noprof(size, GFP_KERNEL);
 275}
 276EXPORT_SYMBOL(vmalloc_32_noprof);
 277
 278/**
 279 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 280 *	@size:		allocation size
 281 *
 282 * The resulting memory area is 32bit addressable and zeroed so it can be
 283 * mapped to userspace without leaking data.
 284 *
 285 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 286 * remap_vmalloc_range() are permissible.
 287 */
 288void *vmalloc_32_user_noprof(unsigned long size)
 289{
 290	/*
 291	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 292	 * but for now this can simply use vmalloc_user() directly.
 293	 */
 294	return vmalloc_user_noprof(size);
 295}
 296EXPORT_SYMBOL(vmalloc_32_user_noprof);
 297
 298void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 299{
 300	BUG();
 301	return NULL;
 302}
 303EXPORT_SYMBOL(vmap);
 304
 305void vunmap(const void *addr)
 306{
 307	BUG();
 308}
 309EXPORT_SYMBOL(vunmap);
 310
 311void *vm_map_ram(struct page **pages, unsigned int count, int node)
 312{
 313	BUG();
 314	return NULL;
 315}
 316EXPORT_SYMBOL(vm_map_ram);
 317
 318void vm_unmap_ram(const void *mem, unsigned int count)
 319{
 320	BUG();
 321}
 322EXPORT_SYMBOL(vm_unmap_ram);
 323
 324void vm_unmap_aliases(void)
 325{
 326}
 327EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329void free_vm_area(struct vm_struct *area)
 330{
 331	BUG();
 332}
 333EXPORT_SYMBOL_GPL(free_vm_area);
 334
 335int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 336		   struct page *page)
 337{
 338	return -EINVAL;
 339}
 340EXPORT_SYMBOL(vm_insert_page);
 341
 342int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 343			struct page **pages, unsigned long *num)
 344{
 345	return -EINVAL;
 346}
 347EXPORT_SYMBOL(vm_insert_pages);
 348
 349int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 350			unsigned long num)
 351{
 352	return -EINVAL;
 353}
 354EXPORT_SYMBOL(vm_map_pages);
 355
 356int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 357				unsigned long num)
 358{
 359	return -EINVAL;
 360}
 361EXPORT_SYMBOL(vm_map_pages_zero);
 362
 363/*
 364 *  sys_brk() for the most part doesn't need the global kernel
 365 *  lock, except when an application is doing something nasty
 366 *  like trying to un-brk an area that has already been mapped
 367 *  to a regular file.  in this case, the unmapping will need
 368 *  to invoke file system routines that need the global lock.
 369 */
 370SYSCALL_DEFINE1(brk, unsigned long, brk)
 371{
 372	struct mm_struct *mm = current->mm;
 373
 374	if (brk < mm->start_brk || brk > mm->context.end_brk)
 375		return mm->brk;
 376
 377	if (mm->brk == brk)
 378		return mm->brk;
 379
 380	/*
 381	 * Always allow shrinking brk
 382	 */
 383	if (brk <= mm->brk) {
 384		mm->brk = brk;
 385		return brk;
 386	}
 387
 388	/*
 389	 * Ok, looks good - let it rip.
 390	 */
 391	flush_icache_user_range(mm->brk, brk);
 392	return mm->brk = brk;
 393}
 394
 395/*
 396 * initialise the percpu counter for VM and region record slabs
 397 */
 398void __init mmap_init(void)
 399{
 400	int ret;
 401
 402	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 403	VM_BUG_ON(ret);
 404	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 405}
 406
 407/*
 408 * validate the region tree
 409 * - the caller must hold the region lock
 410 */
 411#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 412static noinline void validate_nommu_regions(void)
 413{
 414	struct vm_region *region, *last;
 415	struct rb_node *p, *lastp;
 416
 417	lastp = rb_first(&nommu_region_tree);
 418	if (!lastp)
 419		return;
 420
 421	last = rb_entry(lastp, struct vm_region, vm_rb);
 422	BUG_ON(last->vm_end <= last->vm_start);
 423	BUG_ON(last->vm_top < last->vm_end);
 424
 425	while ((p = rb_next(lastp))) {
 426		region = rb_entry(p, struct vm_region, vm_rb);
 427		last = rb_entry(lastp, struct vm_region, vm_rb);
 428
 429		BUG_ON(region->vm_end <= region->vm_start);
 430		BUG_ON(region->vm_top < region->vm_end);
 431		BUG_ON(region->vm_start < last->vm_top);
 432
 433		lastp = p;
 434	}
 435}
 436#else
 437static void validate_nommu_regions(void)
 438{
 439}
 440#endif
 441
 442/*
 443 * add a region into the global tree
 444 */
 445static void add_nommu_region(struct vm_region *region)
 446{
 447	struct vm_region *pregion;
 448	struct rb_node **p, *parent;
 449
 450	validate_nommu_regions();
 451
 452	parent = NULL;
 453	p = &nommu_region_tree.rb_node;
 454	while (*p) {
 455		parent = *p;
 456		pregion = rb_entry(parent, struct vm_region, vm_rb);
 457		if (region->vm_start < pregion->vm_start)
 458			p = &(*p)->rb_left;
 459		else if (region->vm_start > pregion->vm_start)
 460			p = &(*p)->rb_right;
 461		else if (pregion == region)
 462			return;
 463		else
 464			BUG();
 465	}
 466
 467	rb_link_node(&region->vm_rb, parent, p);
 468	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 469
 470	validate_nommu_regions();
 471}
 472
 473/*
 474 * delete a region from the global tree
 475 */
 476static void delete_nommu_region(struct vm_region *region)
 477{
 478	BUG_ON(!nommu_region_tree.rb_node);
 479
 480	validate_nommu_regions();
 481	rb_erase(&region->vm_rb, &nommu_region_tree);
 482	validate_nommu_regions();
 483}
 484
 485/*
 486 * free a contiguous series of pages
 487 */
 488static void free_page_series(unsigned long from, unsigned long to)
 489{
 490	for (; from < to; from += PAGE_SIZE) {
 491		struct page *page = virt_to_page((void *)from);
 492
 493		atomic_long_dec(&mmap_pages_allocated);
 494		put_page(page);
 495	}
 496}
 497
 498/*
 499 * release a reference to a region
 500 * - the caller must hold the region semaphore for writing, which this releases
 501 * - the region may not have been added to the tree yet, in which case vm_top
 502 *   will equal vm_start
 503 */
 504static void __put_nommu_region(struct vm_region *region)
 505	__releases(nommu_region_sem)
 506{
 507	BUG_ON(!nommu_region_tree.rb_node);
 508
 509	if (--region->vm_usage == 0) {
 510		if (region->vm_top > region->vm_start)
 511			delete_nommu_region(region);
 512		up_write(&nommu_region_sem);
 513
 514		if (region->vm_file)
 515			fput(region->vm_file);
 516
 517		/* IO memory and memory shared directly out of the pagecache
 518		 * from ramfs/tmpfs mustn't be released here */
 519		if (region->vm_flags & VM_MAPPED_COPY)
 520			free_page_series(region->vm_start, region->vm_top);
 521		kmem_cache_free(vm_region_jar, region);
 522	} else {
 523		up_write(&nommu_region_sem);
 524	}
 525}
 526
 527/*
 528 * release a reference to a region
 529 */
 530static void put_nommu_region(struct vm_region *region)
 531{
 532	down_write(&nommu_region_sem);
 533	__put_nommu_region(region);
 534}
 535
 536static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 
 
 
 537{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538	vma->vm_mm = mm;
 539
 
 
 540	/* add the VMA to the mapping */
 541	if (vma->vm_file) {
 542		struct address_space *mapping = vma->vm_file->f_mapping;
 543
 544		i_mmap_lock_write(mapping);
 545		flush_dcache_mmap_lock(mapping);
 546		vma_interval_tree_insert(vma, &mapping->i_mmap);
 547		flush_dcache_mmap_unlock(mapping);
 548		i_mmap_unlock_write(mapping);
 549	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void cleanup_vma_from_mm(struct vm_area_struct *vma)
 
 
 
 553{
 554	vma->vm_mm->map_count--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555	/* remove the VMA from the mapping */
 556	if (vma->vm_file) {
 557		struct address_space *mapping;
 558		mapping = vma->vm_file->f_mapping;
 559
 560		i_mmap_lock_write(mapping);
 561		flush_dcache_mmap_lock(mapping);
 562		vma_interval_tree_remove(vma, &mapping->i_mmap);
 563		flush_dcache_mmap_unlock(mapping);
 564		i_mmap_unlock_write(mapping);
 565	}
 566}
 567
 568/*
 569 * delete a VMA from its owning mm_struct and address space
 570 */
 571static int delete_vma_from_mm(struct vm_area_struct *vma)
 572{
 573	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
 574
 575	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
 576	if (vma_iter_prealloc(&vmi, NULL)) {
 577		pr_warn("Allocation of vma tree for process %d failed\n",
 578		       current->pid);
 579		return -ENOMEM;
 580	}
 581	cleanup_vma_from_mm(vma);
 582
 583	/* remove from the MM's tree and list */
 584	vma_iter_clear(&vmi);
 585	return 0;
 586}
 
 587/*
 588 * destroy a VMA record
 589 */
 590static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 591{
 592	vma_close(vma);
 
 593	if (vma->vm_file)
 594		fput(vma->vm_file);
 595	put_nommu_region(vma->vm_region);
 596	vm_area_free(vma);
 597}
 598
 599struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 600					     unsigned long start_addr,
 601					     unsigned long end_addr)
 602{
 603	unsigned long index = start_addr;
 604
 605	mmap_assert_locked(mm);
 606	return mt_find(&mm->mm_mt, &index, end_addr - 1);
 607}
 608EXPORT_SYMBOL(find_vma_intersection);
 609
 610/*
 611 * look up the first VMA in which addr resides, NULL if none
 612 * - should be called with mm->mmap_lock at least held readlocked
 613 */
 614struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 615{
 616	VMA_ITERATOR(vmi, mm, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617
 618	return vma_iter_load(&vmi);
 619}
 620EXPORT_SYMBOL(find_vma);
 621
 622/*
 623 * At least xtensa ends up having protection faults even with no
 624 * MMU.. No stack expansion, at least.
 625 */
 626struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
 627			unsigned long addr, struct pt_regs *regs)
 628{
 629	struct vm_area_struct *vma;
 630
 631	mmap_read_lock(mm);
 632	vma = vma_lookup(mm, addr);
 633	if (!vma)
 634		mmap_read_unlock(mm);
 635	return vma;
 636}
 637
 638/*
 639 * expand a stack to a given address
 640 * - not supported under NOMMU conditions
 641 */
 642int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
 643{
 644	return -ENOMEM;
 645}
 646
 647struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 648{
 649	mmap_read_unlock(mm);
 650	return NULL;
 651}
 652
 653/*
 654 * look up the first VMA exactly that exactly matches addr
 655 * - should be called with mm->mmap_lock at least held readlocked
 656 */
 657static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 658					     unsigned long addr,
 659					     unsigned long len)
 660{
 661	struct vm_area_struct *vma;
 662	unsigned long end = addr + len;
 663	VMA_ITERATOR(vmi, mm, addr);
 664
 665	vma = vma_iter_load(&vmi);
 666	if (!vma)
 667		return NULL;
 668	if (vma->vm_start != addr)
 669		return NULL;
 670	if (vma->vm_end != end)
 671		return NULL;
 
 
 
 
 
 
 
 
 
 
 672
 673	return vma;
 674}
 675
 676/*
 677 * determine whether a mapping should be permitted and, if so, what sort of
 678 * mapping we're capable of supporting
 679 */
 680static int validate_mmap_request(struct file *file,
 681				 unsigned long addr,
 682				 unsigned long len,
 683				 unsigned long prot,
 684				 unsigned long flags,
 685				 unsigned long pgoff,
 686				 unsigned long *_capabilities)
 687{
 688	unsigned long capabilities, rlen;
 689	int ret;
 690
 691	/* do the simple checks first */
 692	if (flags & MAP_FIXED)
 693		return -EINVAL;
 694
 695	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 696	    (flags & MAP_TYPE) != MAP_SHARED)
 697		return -EINVAL;
 698
 699	if (!len)
 700		return -EINVAL;
 701
 702	/* Careful about overflows.. */
 703	rlen = PAGE_ALIGN(len);
 704	if (!rlen || rlen > TASK_SIZE)
 705		return -ENOMEM;
 706
 707	/* offset overflow? */
 708	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 709		return -EOVERFLOW;
 710
 711	if (file) {
 712		/* files must support mmap */
 713		if (!file->f_op->mmap)
 714			return -ENODEV;
 715
 716		/* work out if what we've got could possibly be shared
 717		 * - we support chardevs that provide their own "memory"
 718		 * - we support files/blockdevs that are memory backed
 719		 */
 720		if (file->f_op->mmap_capabilities) {
 721			capabilities = file->f_op->mmap_capabilities(file);
 722		} else {
 723			/* no explicit capabilities set, so assume some
 724			 * defaults */
 725			switch (file_inode(file)->i_mode & S_IFMT) {
 726			case S_IFREG:
 727			case S_IFBLK:
 728				capabilities = NOMMU_MAP_COPY;
 729				break;
 730
 731			case S_IFCHR:
 732				capabilities =
 733					NOMMU_MAP_DIRECT |
 734					NOMMU_MAP_READ |
 735					NOMMU_MAP_WRITE;
 736				break;
 737
 738			default:
 739				return -EINVAL;
 740			}
 741		}
 742
 743		/* eliminate any capabilities that we can't support on this
 744		 * device */
 745		if (!file->f_op->get_unmapped_area)
 746			capabilities &= ~NOMMU_MAP_DIRECT;
 747		if (!(file->f_mode & FMODE_CAN_READ))
 748			capabilities &= ~NOMMU_MAP_COPY;
 749
 750		/* The file shall have been opened with read permission. */
 751		if (!(file->f_mode & FMODE_READ))
 752			return -EACCES;
 753
 754		if (flags & MAP_SHARED) {
 755			/* do checks for writing, appending and locking */
 756			if ((prot & PROT_WRITE) &&
 757			    !(file->f_mode & FMODE_WRITE))
 758				return -EACCES;
 759
 760			if (IS_APPEND(file_inode(file)) &&
 761			    (file->f_mode & FMODE_WRITE))
 762				return -EACCES;
 763
 
 
 
 764			if (!(capabilities & NOMMU_MAP_DIRECT))
 765				return -ENODEV;
 766
 767			/* we mustn't privatise shared mappings */
 768			capabilities &= ~NOMMU_MAP_COPY;
 769		} else {
 770			/* we're going to read the file into private memory we
 771			 * allocate */
 772			if (!(capabilities & NOMMU_MAP_COPY))
 773				return -ENODEV;
 774
 775			/* we don't permit a private writable mapping to be
 776			 * shared with the backing device */
 777			if (prot & PROT_WRITE)
 778				capabilities &= ~NOMMU_MAP_DIRECT;
 779		}
 780
 781		if (capabilities & NOMMU_MAP_DIRECT) {
 782			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 783			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 784			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 785			    ) {
 786				capabilities &= ~NOMMU_MAP_DIRECT;
 787				if (flags & MAP_SHARED) {
 788					pr_warn("MAP_SHARED not completely supported on !MMU\n");
 789					return -EINVAL;
 790				}
 791			}
 792		}
 793
 794		/* handle executable mappings and implied executable
 795		 * mappings */
 796		if (path_noexec(&file->f_path)) {
 797			if (prot & PROT_EXEC)
 798				return -EPERM;
 799		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 800			/* handle implication of PROT_EXEC by PROT_READ */
 801			if (current->personality & READ_IMPLIES_EXEC) {
 802				if (capabilities & NOMMU_MAP_EXEC)
 803					prot |= PROT_EXEC;
 804			}
 805		} else if ((prot & PROT_READ) &&
 806			 (prot & PROT_EXEC) &&
 807			 !(capabilities & NOMMU_MAP_EXEC)
 808			 ) {
 809			/* backing file is not executable, try to copy */
 810			capabilities &= ~NOMMU_MAP_DIRECT;
 811		}
 812	} else {
 813		/* anonymous mappings are always memory backed and can be
 814		 * privately mapped
 815		 */
 816		capabilities = NOMMU_MAP_COPY;
 817
 818		/* handle PROT_EXEC implication by PROT_READ */
 819		if ((prot & PROT_READ) &&
 820		    (current->personality & READ_IMPLIES_EXEC))
 821			prot |= PROT_EXEC;
 822	}
 823
 824	/* allow the security API to have its say */
 825	ret = security_mmap_addr(addr);
 826	if (ret < 0)
 827		return ret;
 828
 829	/* looks okay */
 830	*_capabilities = capabilities;
 831	return 0;
 832}
 833
 834/*
 835 * we've determined that we can make the mapping, now translate what we
 836 * now know into VMA flags
 837 */
 838static unsigned long determine_vm_flags(struct file *file,
 839					unsigned long prot,
 840					unsigned long flags,
 841					unsigned long capabilities)
 842{
 843	unsigned long vm_flags;
 844
 845	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
 
 846
 847	if (!file) {
 848		/*
 849		 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
 850		 * there is no fork().
 851		 */
 852		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 853	} else if (flags & MAP_PRIVATE) {
 854		/* MAP_PRIVATE file mapping */
 855		if (capabilities & NOMMU_MAP_DIRECT)
 856			vm_flags |= (capabilities & NOMMU_VMFLAGS);
 857		else
 858			vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 859
 860		if (!(prot & PROT_WRITE) && !current->ptrace)
 861			/*
 862			 * R/O private file mapping which cannot be used to
 863			 * modify memory, especially also not via active ptrace
 864			 * (e.g., set breakpoints) or later by upgrading
 865			 * permissions (no mprotect()). We can try overlaying
 866			 * the file mapping, which will work e.g., on chardevs,
 867			 * ramfs/tmpfs/shmfs and romfs/cramf.
 868			 */
 869			vm_flags |= VM_MAYOVERLAY;
 870	} else {
 871		/* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
 872		vm_flags |= VM_SHARED | VM_MAYSHARE |
 873			    (capabilities & NOMMU_VMFLAGS);
 
 
 
 874	}
 875
 
 
 
 
 
 
 
 876	return vm_flags;
 877}
 878
 879/*
 880 * set up a shared mapping on a file (the driver or filesystem provides and
 881 * pins the storage)
 882 */
 883static int do_mmap_shared_file(struct vm_area_struct *vma)
 884{
 885	int ret;
 886
 887	ret = mmap_file(vma->vm_file, vma);
 888	if (ret == 0) {
 889		vma->vm_region->vm_top = vma->vm_region->vm_end;
 890		return 0;
 891	}
 892	if (ret != -ENOSYS)
 893		return ret;
 894
 895	/* getting -ENOSYS indicates that direct mmap isn't possible (as
 896	 * opposed to tried but failed) so we can only give a suitable error as
 897	 * it's not possible to make a private copy if MAP_SHARED was given */
 898	return -ENODEV;
 899}
 900
 901/*
 902 * set up a private mapping or an anonymous shared mapping
 903 */
 904static int do_mmap_private(struct vm_area_struct *vma,
 905			   struct vm_region *region,
 906			   unsigned long len,
 907			   unsigned long capabilities)
 908{
 909	unsigned long total, point;
 910	void *base;
 911	int ret, order;
 912
 913	/*
 914	 * Invoke the file's mapping function so that it can keep track of
 915	 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
 916	 * it may attempt to share, which will make is_nommu_shared_mapping()
 917	 * happy.
 918	 */
 919	if (capabilities & NOMMU_MAP_DIRECT) {
 920		ret = mmap_file(vma->vm_file, vma);
 921		/* shouldn't return success if we're not sharing */
 922		if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
 923			ret = -ENOSYS;
 924		if (ret == 0) {
 
 
 925			vma->vm_region->vm_top = vma->vm_region->vm_end;
 926			return 0;
 927		}
 928		if (ret != -ENOSYS)
 929			return ret;
 930
 931		/* getting an ENOSYS error indicates that direct mmap isn't
 932		 * possible (as opposed to tried but failed) so we'll try to
 933		 * make a private copy of the data and map that instead */
 934	}
 935
 936
 937	/* allocate some memory to hold the mapping
 938	 * - note that this may not return a page-aligned address if the object
 939	 *   we're allocating is smaller than a page
 940	 */
 941	order = get_order(len);
 942	total = 1 << order;
 943	point = len >> PAGE_SHIFT;
 944
 945	/* we don't want to allocate a power-of-2 sized page set */
 946	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
 947		total = point;
 948
 949	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
 950	if (!base)
 951		goto enomem;
 952
 953	atomic_long_add(total, &mmap_pages_allocated);
 954
 955	vm_flags_set(vma, VM_MAPPED_COPY);
 956	region->vm_flags = vma->vm_flags;
 957	region->vm_start = (unsigned long) base;
 958	region->vm_end   = region->vm_start + len;
 959	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
 960
 961	vma->vm_start = region->vm_start;
 962	vma->vm_end   = region->vm_start + len;
 963
 964	if (vma->vm_file) {
 965		/* read the contents of a file into the copy */
 
 966		loff_t fpos;
 967
 968		fpos = vma->vm_pgoff;
 969		fpos <<= PAGE_SHIFT;
 970
 971		ret = kernel_read(vma->vm_file, base, len, &fpos);
 
 
 
 
 972		if (ret < 0)
 973			goto error_free;
 974
 975		/* clear the last little bit */
 976		if (ret < len)
 977			memset(base + ret, 0, len - ret);
 978
 979	} else {
 980		vma_set_anonymous(vma);
 981	}
 982
 983	return 0;
 984
 985error_free:
 986	free_page_series(region->vm_start, region->vm_top);
 987	region->vm_start = vma->vm_start = 0;
 988	region->vm_end   = vma->vm_end = 0;
 989	region->vm_top   = 0;
 990	return ret;
 991
 992enomem:
 993	pr_err("Allocation of length %lu from process %d (%s) failed\n",
 994	       len, current->pid, current->comm);
 995	show_mem();
 996	return -ENOMEM;
 997}
 998
 999/*
1000 * handle mapping creation for uClinux
1001 */
1002unsigned long do_mmap(struct file *file,
1003			unsigned long addr,
1004			unsigned long len,
1005			unsigned long prot,
1006			unsigned long flags,
1007			vm_flags_t vm_flags,
1008			unsigned long pgoff,
1009			unsigned long *populate,
1010			struct list_head *uf)
1011{
1012	struct vm_area_struct *vma;
1013	struct vm_region *region;
1014	struct rb_node *rb;
1015	unsigned long capabilities, result;
1016	int ret;
1017	VMA_ITERATOR(vmi, current->mm, 0);
1018
1019	*populate = 0;
1020
1021	/* decide whether we should attempt the mapping, and if so what sort of
1022	 * mapping */
1023	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1024				    &capabilities);
1025	if (ret < 0)
1026		return ret;
1027
1028	/* we ignore the address hint */
1029	addr = 0;
1030	len = PAGE_ALIGN(len);
1031
1032	/* we've determined that we can make the mapping, now translate what we
1033	 * now know into VMA flags */
1034	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1035
1036
1037	/* we're going to need to record the mapping */
1038	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1039	if (!region)
1040		goto error_getting_region;
1041
1042	vma = vm_area_alloc(current->mm);
1043	if (!vma)
1044		goto error_getting_vma;
1045
1046	region->vm_usage = 1;
1047	region->vm_flags = vm_flags;
1048	region->vm_pgoff = pgoff;
1049
1050	vm_flags_init(vma, vm_flags);
 
1051	vma->vm_pgoff = pgoff;
1052
1053	if (file) {
1054		region->vm_file = get_file(file);
1055		vma->vm_file = get_file(file);
1056	}
1057
1058	down_write(&nommu_region_sem);
1059
1060	/* if we want to share, we need to check for regions created by other
1061	 * mmap() calls that overlap with our proposed mapping
1062	 * - we can only share with a superset match on most regular files
1063	 * - shared mappings on character devices and memory backed files are
1064	 *   permitted to overlap inexactly as far as we are concerned for in
1065	 *   these cases, sharing is handled in the driver or filesystem rather
1066	 *   than here
1067	 */
1068	if (is_nommu_shared_mapping(vm_flags)) {
1069		struct vm_region *pregion;
1070		unsigned long pglen, rpglen, pgend, rpgend, start;
1071
1072		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1073		pgend = pgoff + pglen;
1074
1075		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1076			pregion = rb_entry(rb, struct vm_region, vm_rb);
1077
1078			if (!is_nommu_shared_mapping(pregion->vm_flags))
1079				continue;
1080
1081			/* search for overlapping mappings on the same file */
1082			if (file_inode(pregion->vm_file) !=
1083			    file_inode(file))
1084				continue;
1085
1086			if (pregion->vm_pgoff >= pgend)
1087				continue;
1088
1089			rpglen = pregion->vm_end - pregion->vm_start;
1090			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1091			rpgend = pregion->vm_pgoff + rpglen;
1092			if (pgoff >= rpgend)
1093				continue;
1094
1095			/* handle inexactly overlapping matches between
1096			 * mappings */
1097			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1098			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1099				/* new mapping is not a subset of the region */
1100				if (!(capabilities & NOMMU_MAP_DIRECT))
1101					goto sharing_violation;
1102				continue;
1103			}
1104
1105			/* we've found a region we can share */
1106			pregion->vm_usage++;
1107			vma->vm_region = pregion;
1108			start = pregion->vm_start;
1109			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1110			vma->vm_start = start;
1111			vma->vm_end = start + len;
1112
1113			if (pregion->vm_flags & VM_MAPPED_COPY)
1114				vm_flags_set(vma, VM_MAPPED_COPY);
1115			else {
1116				ret = do_mmap_shared_file(vma);
1117				if (ret < 0) {
1118					vma->vm_region = NULL;
1119					vma->vm_start = 0;
1120					vma->vm_end = 0;
1121					pregion->vm_usage--;
1122					pregion = NULL;
1123					goto error_just_free;
1124				}
1125			}
1126			fput(region->vm_file);
1127			kmem_cache_free(vm_region_jar, region);
1128			region = pregion;
1129			result = start;
1130			goto share;
1131		}
1132
1133		/* obtain the address at which to make a shared mapping
1134		 * - this is the hook for quasi-memory character devices to
1135		 *   tell us the location of a shared mapping
1136		 */
1137		if (capabilities & NOMMU_MAP_DIRECT) {
1138			addr = file->f_op->get_unmapped_area(file, addr, len,
1139							     pgoff, flags);
1140			if (IS_ERR_VALUE(addr)) {
1141				ret = addr;
1142				if (ret != -ENOSYS)
1143					goto error_just_free;
1144
1145				/* the driver refused to tell us where to site
1146				 * the mapping so we'll have to attempt to copy
1147				 * it */
1148				ret = -ENODEV;
1149				if (!(capabilities & NOMMU_MAP_COPY))
1150					goto error_just_free;
1151
1152				capabilities &= ~NOMMU_MAP_DIRECT;
1153			} else {
1154				vma->vm_start = region->vm_start = addr;
1155				vma->vm_end = region->vm_end = addr + len;
1156			}
1157		}
1158	}
1159
1160	vma->vm_region = region;
1161
1162	/* set up the mapping
1163	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1164	 */
1165	if (file && vma->vm_flags & VM_SHARED)
1166		ret = do_mmap_shared_file(vma);
1167	else
1168		ret = do_mmap_private(vma, region, len, capabilities);
1169	if (ret < 0)
1170		goto error_just_free;
1171	add_nommu_region(region);
1172
1173	/* clear anonymous mappings that don't ask for uninitialized data */
1174	if (!vma->vm_file &&
1175	    (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1176	     !(flags & MAP_UNINITIALIZED)))
1177		memset((void *)region->vm_start, 0,
1178		       region->vm_end - region->vm_start);
1179
1180	/* okay... we have a mapping; now we have to register it */
1181	result = vma->vm_start;
1182
1183	current->mm->total_vm += len >> PAGE_SHIFT;
1184
1185share:
1186	BUG_ON(!vma->vm_region);
1187	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1188	if (vma_iter_prealloc(&vmi, vma))
1189		goto error_just_free;
1190
1191	setup_vma_to_mm(vma, current->mm);
1192	current->mm->map_count++;
1193	/* add the VMA to the tree */
1194	vma_iter_store(&vmi, vma);
1195
1196	/* we flush the region from the icache only when the first executable
1197	 * mapping of it is made  */
1198	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1199		flush_icache_user_range(region->vm_start, region->vm_end);
1200		region->vm_icache_flushed = true;
1201	}
1202
1203	up_write(&nommu_region_sem);
1204
1205	return result;
1206
1207error_just_free:
1208	up_write(&nommu_region_sem);
1209error:
1210	vma_iter_free(&vmi);
1211	if (region->vm_file)
1212		fput(region->vm_file);
1213	kmem_cache_free(vm_region_jar, region);
1214	if (vma->vm_file)
1215		fput(vma->vm_file);
1216	vm_area_free(vma);
1217	return ret;
1218
1219sharing_violation:
1220	up_write(&nommu_region_sem);
1221	pr_warn("Attempt to share mismatched mappings\n");
1222	ret = -EINVAL;
1223	goto error;
1224
1225error_getting_vma:
1226	kmem_cache_free(vm_region_jar, region);
1227	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1228			len, current->pid);
1229	show_mem();
1230	return -ENOMEM;
1231
1232error_getting_region:
1233	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1234			len, current->pid);
1235	show_mem();
1236	return -ENOMEM;
1237}
1238
1239unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1240			      unsigned long prot, unsigned long flags,
1241			      unsigned long fd, unsigned long pgoff)
1242{
1243	struct file *file = NULL;
1244	unsigned long retval = -EBADF;
1245
1246	audit_mmap_fd(fd, flags);
1247	if (!(flags & MAP_ANONYMOUS)) {
1248		file = fget(fd);
1249		if (!file)
1250			goto out;
1251	}
1252
 
 
1253	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1254
1255	if (file)
1256		fput(file);
1257out:
1258	return retval;
1259}
1260
1261SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1262		unsigned long, prot, unsigned long, flags,
1263		unsigned long, fd, unsigned long, pgoff)
1264{
1265	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1266}
1267
1268#ifdef __ARCH_WANT_SYS_OLD_MMAP
1269struct mmap_arg_struct {
1270	unsigned long addr;
1271	unsigned long len;
1272	unsigned long prot;
1273	unsigned long flags;
1274	unsigned long fd;
1275	unsigned long offset;
1276};
1277
1278SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1279{
1280	struct mmap_arg_struct a;
1281
1282	if (copy_from_user(&a, arg, sizeof(a)))
1283		return -EFAULT;
1284	if (offset_in_page(a.offset))
1285		return -EINVAL;
1286
1287	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1288			       a.offset >> PAGE_SHIFT);
1289}
1290#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1291
1292/*
1293 * split a vma into two pieces at address 'addr', a new vma is allocated either
1294 * for the first part or the tail.
1295 */
1296static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1297		     unsigned long addr, int new_below)
1298{
1299	struct vm_area_struct *new;
1300	struct vm_region *region;
1301	unsigned long npages;
1302	struct mm_struct *mm;
1303
1304	/* we're only permitted to split anonymous regions (these should have
1305	 * only a single usage on the region) */
1306	if (vma->vm_file)
1307		return -ENOMEM;
1308
1309	mm = vma->vm_mm;
1310	if (mm->map_count >= sysctl_max_map_count)
1311		return -ENOMEM;
1312
1313	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1314	if (!region)
1315		return -ENOMEM;
1316
1317	new = vm_area_dup(vma);
1318	if (!new)
1319		goto err_vma_dup;
 
 
1320
1321	/* most fields are the same, copy all, and then fixup */
 
1322	*region = *vma->vm_region;
1323	new->vm_region = region;
1324
1325	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1326
1327	if (new_below) {
1328		region->vm_top = region->vm_end = new->vm_end = addr;
1329	} else {
1330		region->vm_start = new->vm_start = addr;
1331		region->vm_pgoff = new->vm_pgoff += npages;
1332	}
1333
1334	vma_iter_config(vmi, new->vm_start, new->vm_end);
1335	if (vma_iter_prealloc(vmi, vma)) {
1336		pr_warn("Allocation of vma tree for process %d failed\n",
1337			current->pid);
1338		goto err_vmi_preallocate;
1339	}
1340
1341	if (new->vm_ops && new->vm_ops->open)
1342		new->vm_ops->open(new);
1343
 
1344	down_write(&nommu_region_sem);
1345	delete_nommu_region(vma->vm_region);
1346	if (new_below) {
1347		vma->vm_region->vm_start = vma->vm_start = addr;
1348		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1349	} else {
1350		vma->vm_region->vm_end = vma->vm_end = addr;
1351		vma->vm_region->vm_top = addr;
1352	}
1353	add_nommu_region(vma->vm_region);
1354	add_nommu_region(new->vm_region);
1355	up_write(&nommu_region_sem);
1356
1357	setup_vma_to_mm(vma, mm);
1358	setup_vma_to_mm(new, mm);
1359	vma_iter_store(vmi, new);
1360	mm->map_count++;
1361	return 0;
1362
1363err_vmi_preallocate:
1364	vm_area_free(new);
1365err_vma_dup:
1366	kmem_cache_free(vm_region_jar, region);
1367	return -ENOMEM;
1368}
1369
1370/*
1371 * shrink a VMA by removing the specified chunk from either the beginning or
1372 * the end
1373 */
1374static int vmi_shrink_vma(struct vma_iterator *vmi,
1375		      struct vm_area_struct *vma,
1376		      unsigned long from, unsigned long to)
1377{
1378	struct vm_region *region;
1379
1380	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1381	 * and list */
1382	if (from > vma->vm_start) {
1383		if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1384			return -ENOMEM;
1385		vma->vm_end = from;
1386	} else {
1387		if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1388			return -ENOMEM;
1389		vma->vm_start = to;
1390	}
1391
1392	/* cut the backing region down to size */
1393	region = vma->vm_region;
1394	BUG_ON(region->vm_usage != 1);
1395
1396	down_write(&nommu_region_sem);
1397	delete_nommu_region(region);
1398	if (from > region->vm_start) {
1399		to = region->vm_top;
1400		region->vm_top = region->vm_end = from;
1401	} else {
1402		region->vm_start = to;
1403	}
1404	add_nommu_region(region);
1405	up_write(&nommu_region_sem);
1406
1407	free_page_series(from, to);
1408	return 0;
1409}
1410
1411/*
1412 * release a mapping
1413 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1414 *   VMA, though it need not cover the whole VMA
1415 */
1416int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1417{
1418	VMA_ITERATOR(vmi, mm, start);
1419	struct vm_area_struct *vma;
1420	unsigned long end;
1421	int ret = 0;
1422
1423	len = PAGE_ALIGN(len);
1424	if (len == 0)
1425		return -EINVAL;
1426
1427	end = start + len;
1428
1429	/* find the first potentially overlapping VMA */
1430	vma = vma_find(&vmi, end);
1431	if (!vma) {
1432		static int limit;
1433		if (limit < 5) {
1434			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1435					current->pid, current->comm,
1436					start, start + len - 1);
1437			limit++;
1438		}
1439		return -EINVAL;
1440	}
1441
1442	/* we're allowed to split an anonymous VMA but not a file-backed one */
1443	if (vma->vm_file) {
1444		do {
1445			if (start > vma->vm_start)
1446				return -EINVAL;
1447			if (end == vma->vm_end)
1448				goto erase_whole_vma;
1449			vma = vma_find(&vmi, end);
1450		} while (vma);
1451		return -EINVAL;
1452	} else {
1453		/* the chunk must be a subset of the VMA found */
1454		if (start == vma->vm_start && end == vma->vm_end)
1455			goto erase_whole_vma;
1456		if (start < vma->vm_start || end > vma->vm_end)
1457			return -EINVAL;
1458		if (offset_in_page(start))
1459			return -EINVAL;
1460		if (end != vma->vm_end && offset_in_page(end))
1461			return -EINVAL;
1462		if (start != vma->vm_start && end != vma->vm_end) {
1463			ret = split_vma(&vmi, vma, start, 1);
1464			if (ret < 0)
1465				return ret;
1466		}
1467		return vmi_shrink_vma(&vmi, vma, start, end);
1468	}
1469
1470erase_whole_vma:
1471	if (delete_vma_from_mm(vma))
1472		ret = -ENOMEM;
1473	else
1474		delete_vma(mm, vma);
1475	return ret;
1476}
 
1477
1478int vm_munmap(unsigned long addr, size_t len)
1479{
1480	struct mm_struct *mm = current->mm;
1481	int ret;
1482
1483	mmap_write_lock(mm);
1484	ret = do_munmap(mm, addr, len, NULL);
1485	mmap_write_unlock(mm);
1486	return ret;
1487}
1488EXPORT_SYMBOL(vm_munmap);
1489
1490SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1491{
1492	return vm_munmap(addr, len);
1493}
1494
1495/*
1496 * release all the mappings made in a process's VM space
1497 */
1498void exit_mmap(struct mm_struct *mm)
1499{
1500	VMA_ITERATOR(vmi, mm, 0);
1501	struct vm_area_struct *vma;
1502
1503	if (!mm)
1504		return;
1505
1506	mm->total_vm = 0;
1507
1508	/*
1509	 * Lock the mm to avoid assert complaining even though this is the only
1510	 * user of the mm
1511	 */
1512	mmap_write_lock(mm);
1513	for_each_vma(vmi, vma) {
1514		cleanup_vma_from_mm(vma);
1515		delete_vma(mm, vma);
1516		cond_resched();
1517	}
1518	__mt_destroy(&mm->mm_mt);
1519	mmap_write_unlock(mm);
 
 
 
1520}
1521
1522/*
1523 * expand (or shrink) an existing mapping, potentially moving it at the same
1524 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1525 *
1526 * under NOMMU conditions, we only permit changing a mapping's size, and only
1527 * as long as it stays within the region allocated by do_mmap_private() and the
1528 * block is not shareable
1529 *
1530 * MREMAP_FIXED is not supported under NOMMU conditions
1531 */
1532static unsigned long do_mremap(unsigned long addr,
1533			unsigned long old_len, unsigned long new_len,
1534			unsigned long flags, unsigned long new_addr)
1535{
1536	struct vm_area_struct *vma;
1537
1538	/* insanity checks first */
1539	old_len = PAGE_ALIGN(old_len);
1540	new_len = PAGE_ALIGN(new_len);
1541	if (old_len == 0 || new_len == 0)
1542		return (unsigned long) -EINVAL;
1543
1544	if (offset_in_page(addr))
1545		return -EINVAL;
1546
1547	if (flags & MREMAP_FIXED && new_addr != addr)
1548		return (unsigned long) -EINVAL;
1549
1550	vma = find_vma_exact(current->mm, addr, old_len);
1551	if (!vma)
1552		return (unsigned long) -EINVAL;
1553
1554	if (vma->vm_end != vma->vm_start + old_len)
1555		return (unsigned long) -EFAULT;
1556
1557	if (is_nommu_shared_mapping(vma->vm_flags))
1558		return (unsigned long) -EPERM;
1559
1560	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1561		return (unsigned long) -ENOMEM;
1562
1563	/* all checks complete - do it */
1564	vma->vm_end = vma->vm_start + new_len;
1565	return vma->vm_start;
1566}
1567
1568SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1569		unsigned long, new_len, unsigned long, flags,
1570		unsigned long, new_addr)
1571{
1572	unsigned long ret;
1573
1574	mmap_write_lock(current->mm);
1575	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1576	mmap_write_unlock(current->mm);
1577	return ret;
1578}
1579
 
 
 
 
 
 
 
 
1580int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1581		unsigned long pfn, unsigned long size, pgprot_t prot)
1582{
1583	if (addr != (pfn << PAGE_SHIFT))
1584		return -EINVAL;
1585
1586	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1587	return 0;
1588}
1589EXPORT_SYMBOL(remap_pfn_range);
1590
1591int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1592{
1593	unsigned long pfn = start >> PAGE_SHIFT;
1594	unsigned long vm_len = vma->vm_end - vma->vm_start;
1595
1596	pfn += vma->vm_pgoff;
1597	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1598}
1599EXPORT_SYMBOL(vm_iomap_memory);
1600
1601int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1602			unsigned long pgoff)
1603{
1604	unsigned int size = vma->vm_end - vma->vm_start;
1605
1606	if (!(vma->vm_flags & VM_USERMAP))
1607		return -EINVAL;
1608
1609	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1610	vma->vm_end = vma->vm_start + size;
1611
1612	return 0;
1613}
1614EXPORT_SYMBOL(remap_vmalloc_range);
1615
1616vm_fault_t filemap_fault(struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
1617{
1618	BUG();
1619	return 0;
1620}
1621EXPORT_SYMBOL(filemap_fault);
1622
1623vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1624		pgoff_t start_pgoff, pgoff_t end_pgoff)
1625{
1626	BUG();
1627	return 0;
1628}
1629EXPORT_SYMBOL(filemap_map_pages);
1630
1631static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1632			      void *buf, int len, unsigned int gup_flags)
1633{
1634	struct vm_area_struct *vma;
1635	int write = gup_flags & FOLL_WRITE;
1636
1637	if (mmap_read_lock_killable(mm))
1638		return 0;
1639
1640	/* the access must start within one of the target process's mappings */
1641	vma = find_vma(mm, addr);
1642	if (vma) {
1643		/* don't overrun this mapping */
1644		if (addr + len >= vma->vm_end)
1645			len = vma->vm_end - addr;
1646
1647		/* only read or write mappings where it is permitted */
1648		if (write && vma->vm_flags & VM_MAYWRITE)
1649			copy_to_user_page(vma, NULL, addr,
1650					 (void *) addr, buf, len);
1651		else if (!write && vma->vm_flags & VM_MAYREAD)
1652			copy_from_user_page(vma, NULL, addr,
1653					    buf, (void *) addr, len);
1654		else
1655			len = 0;
1656	} else {
1657		len = 0;
1658	}
1659
1660	mmap_read_unlock(mm);
1661
1662	return len;
1663}
1664
1665/**
1666 * access_remote_vm - access another process' address space
1667 * @mm:		the mm_struct of the target address space
1668 * @addr:	start address to access
1669 * @buf:	source or destination buffer
1670 * @len:	number of bytes to transfer
1671 * @gup_flags:	flags modifying lookup behaviour
1672 *
1673 * The caller must hold a reference on @mm.
1674 */
1675int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1676		void *buf, int len, unsigned int gup_flags)
1677{
1678	return __access_remote_vm(mm, addr, buf, len, gup_flags);
1679}
1680
1681/*
1682 * Access another process' address space.
1683 * - source/target buffer must be kernel space
1684 */
1685int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1686		unsigned int gup_flags)
1687{
1688	struct mm_struct *mm;
1689
1690	if (addr + len < addr)
1691		return 0;
1692
1693	mm = get_task_mm(tsk);
1694	if (!mm)
1695		return 0;
1696
1697	len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1698
1699	mmput(mm);
1700	return len;
1701}
1702EXPORT_SYMBOL_GPL(access_process_vm);
1703
1704/**
1705 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1706 * @inode: The inode to check
1707 * @size: The current filesize of the inode
1708 * @newsize: The proposed filesize of the inode
1709 *
1710 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1711 * make sure that any outstanding VMAs aren't broken and then shrink the
1712 * vm_regions that extend beyond so that do_mmap() doesn't
1713 * automatically grant mappings that are too large.
1714 */
1715int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1716				size_t newsize)
1717{
1718	struct vm_area_struct *vma;
1719	struct vm_region *region;
1720	pgoff_t low, high;
1721	size_t r_size, r_top;
1722
1723	low = newsize >> PAGE_SHIFT;
1724	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1725
1726	down_write(&nommu_region_sem);
1727	i_mmap_lock_read(inode->i_mapping);
1728
1729	/* search for VMAs that fall within the dead zone */
1730	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1731		/* found one - only interested if it's shared out of the page
1732		 * cache */
1733		if (vma->vm_flags & VM_SHARED) {
1734			i_mmap_unlock_read(inode->i_mapping);
1735			up_write(&nommu_region_sem);
1736			return -ETXTBSY; /* not quite true, but near enough */
1737		}
1738	}
1739
1740	/* reduce any regions that overlap the dead zone - if in existence,
1741	 * these will be pointed to by VMAs that don't overlap the dead zone
1742	 *
1743	 * we don't check for any regions that start beyond the EOF as there
1744	 * shouldn't be any
1745	 */
1746	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1747		if (!(vma->vm_flags & VM_SHARED))
1748			continue;
1749
1750		region = vma->vm_region;
1751		r_size = region->vm_top - region->vm_start;
1752		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1753
1754		if (r_top > newsize) {
1755			region->vm_top -= r_top - newsize;
1756			if (region->vm_end > region->vm_top)
1757				region->vm_end = region->vm_top;
1758		}
1759	}
1760
1761	i_mmap_unlock_read(inode->i_mapping);
1762	up_write(&nommu_region_sem);
1763	return 0;
1764}
1765
1766/*
1767 * Initialise sysctl_user_reserve_kbytes.
1768 *
1769 * This is intended to prevent a user from starting a single memory hogging
1770 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1771 * mode.
1772 *
1773 * The default value is min(3% of free memory, 128MB)
1774 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1775 */
1776static int __meminit init_user_reserve(void)
1777{
1778	unsigned long free_kbytes;
1779
1780	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1781
1782	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1783	return 0;
1784}
1785subsys_initcall(init_user_reserve);
1786
1787/*
1788 * Initialise sysctl_admin_reserve_kbytes.
1789 *
1790 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1791 * to log in and kill a memory hogging process.
1792 *
1793 * Systems with more than 256MB will reserve 8MB, enough to recover
1794 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1795 * only reserve 3% of free pages by default.
1796 */
1797static int __meminit init_admin_reserve(void)
1798{
1799	unsigned long free_kbytes;
1800
1801	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1802
1803	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1804	return 0;
1805}
1806subsys_initcall(init_admin_reserve);