Linux Audio

Check our new training course

Loading...
v3.15
 
   1/*
   2 *  linux/mm/nommu.c
   3 *
   4 *  Replacement code for mm functions to support CPU's that don't
   5 *  have any form of memory management unit (thus no virtual memory).
   6 *
   7 *  See Documentation/nommu-mmap.txt
   8 *
   9 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  10 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  13 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  14 */
  15
 
 
  16#include <linux/export.h>
  17#include <linux/mm.h>
  18#include <linux/vmacache.h>
  19#include <linux/mman.h>
  20#include <linux/swap.h>
  21#include <linux/file.h>
  22#include <linux/highmem.h>
  23#include <linux/pagemap.h>
  24#include <linux/slab.h>
  25#include <linux/vmalloc.h>
  26#include <linux/blkdev.h>
  27#include <linux/backing-dev.h>
  28#include <linux/compiler.h>
  29#include <linux/mount.h>
  30#include <linux/personality.h>
  31#include <linux/security.h>
  32#include <linux/syscalls.h>
  33#include <linux/audit.h>
  34#include <linux/sched/sysctl.h>
  35
  36#include <asm/uaccess.h>
 
  37#include <asm/tlb.h>
  38#include <asm/tlbflush.h>
  39#include <asm/mmu_context.h>
  40#include "internal.h"
  41
  42#if 0
  43#define kenter(FMT, ...) \
  44	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  45#define kleave(FMT, ...) \
  46	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  47#define kdebug(FMT, ...) \
  48	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
  49#else
  50#define kenter(FMT, ...) \
  51	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  52#define kleave(FMT, ...) \
  53	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  54#define kdebug(FMT, ...) \
  55	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
  56#endif
  57
  58void *high_memory;
 
  59struct page *mem_map;
  60unsigned long max_mapnr;
 
  61unsigned long highest_memmap_pfn;
  62struct percpu_counter vm_committed_as;
  63int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  64int sysctl_overcommit_ratio = 50; /* default is 50% */
  65unsigned long sysctl_overcommit_kbytes __read_mostly;
  66int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  67int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  68unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  69unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
  70int heap_stack_gap = 0;
  71
  72atomic_long_t mmap_pages_allocated;
  73
  74/*
  75 * The global memory commitment made in the system can be a metric
  76 * that can be used to drive ballooning decisions when Linux is hosted
  77 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
  78 * balancing memory across competing virtual machines that are hosted.
  79 * Several metrics drive this policy engine including the guest reported
  80 * memory commitment.
  81 */
  82unsigned long vm_memory_committed(void)
  83{
  84	return percpu_counter_read_positive(&vm_committed_as);
  85}
  86
  87EXPORT_SYMBOL_GPL(vm_memory_committed);
  88
  89EXPORT_SYMBOL(mem_map);
  90
  91/* list of mapped, potentially shareable regions */
  92static struct kmem_cache *vm_region_jar;
  93struct rb_root nommu_region_tree = RB_ROOT;
  94DECLARE_RWSEM(nommu_region_sem);
  95
  96const struct vm_operations_struct generic_file_vm_ops = {
  97};
  98
  99/*
 100 * Return the total memory allocated for this pointer, not
 101 * just what the caller asked for.
 102 *
 103 * Doesn't have to be accurate, i.e. may have races.
 104 */
 105unsigned int kobjsize(const void *objp)
 106{
 107	struct page *page;
 108
 109	/*
 110	 * If the object we have should not have ksize performed on it,
 111	 * return size of 0
 112	 */
 113	if (!objp || !virt_addr_valid(objp))
 114		return 0;
 115
 116	page = virt_to_head_page(objp);
 117
 118	/*
 119	 * If the allocator sets PageSlab, we know the pointer came from
 120	 * kmalloc().
 121	 */
 122	if (PageSlab(page))
 123		return ksize(objp);
 124
 125	/*
 126	 * If it's not a compound page, see if we have a matching VMA
 127	 * region. This test is intentionally done in reverse order,
 128	 * so if there's no VMA, we still fall through and hand back
 129	 * PAGE_SIZE for 0-order pages.
 130	 */
 131	if (!PageCompound(page)) {
 132		struct vm_area_struct *vma;
 133
 134		vma = find_vma(current->mm, (unsigned long)objp);
 135		if (vma)
 136			return vma->vm_end - vma->vm_start;
 137	}
 138
 139	/*
 140	 * The ksize() function is only guaranteed to work for pointers
 141	 * returned by kmalloc(). So handle arbitrary pointers here.
 142	 */
 143	return PAGE_SIZE << compound_order(page);
 144}
 145
 146long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 147		      unsigned long start, unsigned long nr_pages,
 148		      unsigned int foll_flags, struct page **pages,
 149		      struct vm_area_struct **vmas, int *nonblocking)
 150{
 151	struct vm_area_struct *vma;
 152	unsigned long vm_flags;
 153	int i;
 154
 155	/* calculate required read or write permissions.
 156	 * If FOLL_FORCE is set, we only require the "MAY" flags.
 157	 */
 158	vm_flags  = (foll_flags & FOLL_WRITE) ?
 159			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 160	vm_flags &= (foll_flags & FOLL_FORCE) ?
 161			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 162
 163	for (i = 0; i < nr_pages; i++) {
 164		vma = find_vma(mm, start);
 165		if (!vma)
 166			goto finish_or_fault;
 167
 168		/* protect what we can, including chardevs */
 169		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 170		    !(vm_flags & vma->vm_flags))
 171			goto finish_or_fault;
 172
 173		if (pages) {
 174			pages[i] = virt_to_page(start);
 175			if (pages[i])
 176				page_cache_get(pages[i]);
 177		}
 178		if (vmas)
 179			vmas[i] = vma;
 180		start = (start + PAGE_SIZE) & PAGE_MASK;
 181	}
 182
 183	return i;
 184
 185finish_or_fault:
 186	return i ? : -EFAULT;
 187}
 
 188
 189/*
 190 * get a list of pages in an address range belonging to the specified process
 191 * and indicate the VMA that covers each page
 192 * - this is potentially dodgy as we may end incrementing the page count of a
 193 *   slab page or a secondary page from a compound page
 194 * - don't permit access to VMAs that don't support it, such as I/O mappings
 195 */
 196long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 197		    unsigned long start, unsigned long nr_pages,
 198		    int write, int force, struct page **pages,
 199		    struct vm_area_struct **vmas)
 200{
 201	int flags = 0;
 202
 203	if (write)
 204		flags |= FOLL_WRITE;
 205	if (force)
 206		flags |= FOLL_FORCE;
 207
 208	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
 209				NULL);
 210}
 211EXPORT_SYMBOL(get_user_pages);
 212
 213/**
 214 * follow_pfn - look up PFN at a user virtual address
 215 * @vma: memory mapping
 216 * @address: user virtual address
 217 * @pfn: location to store found PFN
 218 *
 219 * Only IO mappings and raw PFN mappings are allowed.
 220 *
 221 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 222 */
 223int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 224	unsigned long *pfn)
 225{
 226	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 227		return -EINVAL;
 228
 229	*pfn = address >> PAGE_SHIFT;
 230	return 0;
 231}
 232EXPORT_SYMBOL(follow_pfn);
 233
 234LIST_HEAD(vmap_area_list);
 235
 236void vfree(const void *addr)
 
 
 
 237{
 238	kfree(addr);
 239}
 240EXPORT_SYMBOL(vfree);
 241
 242void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 
 243{
 244	/*
 245	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 246	 * returns only a logical address.
 247	 */
 248	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 249}
 250EXPORT_SYMBOL(__vmalloc);
 251
 252void *vmalloc_user(unsigned long size)
 253{
 254	void *ret;
 255
 256	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 257			PAGE_KERNEL);
 258	if (ret) {
 259		struct vm_area_struct *vma;
 260
 261		down_write(&current->mm->mmap_sem);
 262		vma = find_vma(current->mm, (unsigned long)ret);
 263		if (vma)
 264			vma->vm_flags |= VM_USERMAP;
 265		up_write(&current->mm->mmap_sem);
 266	}
 267
 268	return ret;
 269}
 270EXPORT_SYMBOL(vmalloc_user);
 
 
 
 
 
 271
 272struct page *vmalloc_to_page(const void *addr)
 273{
 274	return virt_to_page(addr);
 275}
 276EXPORT_SYMBOL(vmalloc_to_page);
 277
 278unsigned long vmalloc_to_pfn(const void *addr)
 279{
 280	return page_to_pfn(virt_to_page(addr));
 281}
 282EXPORT_SYMBOL(vmalloc_to_pfn);
 283
 284long vread(char *buf, char *addr, unsigned long count)
 285{
 286	/* Don't allow overflow */
 287	if ((unsigned long) buf + count < count)
 288		count = -(unsigned long) buf;
 289
 290	memcpy(buf, addr, count);
 291	return count;
 292}
 293
 294long vwrite(char *buf, char *addr, unsigned long count)
 295{
 296	/* Don't allow overflow */
 297	if ((unsigned long) addr + count < count)
 298		count = -(unsigned long) addr;
 299
 300	memcpy(addr, buf, count);
 301	return count;
 302}
 303
 304/*
 305 *	vmalloc  -  allocate virtually continguos memory
 306 *
 307 *	@size:		allocation size
 308 *
 309 *	Allocate enough pages to cover @size from the page level
 310 *	allocator and map them into continguos kernel virtual space.
 311 *
 312 *	For tight control over page level allocator and protection flags
 313 *	use __vmalloc() instead.
 314 */
 315void *vmalloc(unsigned long size)
 316{
 317       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 318}
 319EXPORT_SYMBOL(vmalloc);
 
 
 320
 321/*
 322 *	vzalloc - allocate virtually continguos memory with zero fill
 323 *
 324 *	@size:		allocation size
 325 *
 326 *	Allocate enough pages to cover @size from the page level
 327 *	allocator and map them into continguos kernel virtual space.
 328 *	The memory allocated is set to zero.
 329 *
 330 *	For tight control over page level allocator and protection flags
 331 *	use __vmalloc() instead.
 332 */
 333void *vzalloc(unsigned long size)
 334{
 335	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 336			PAGE_KERNEL);
 337}
 338EXPORT_SYMBOL(vzalloc);
 339
 340/**
 341 * vmalloc_node - allocate memory on a specific node
 342 * @size:	allocation size
 343 * @node:	numa node
 344 *
 345 * Allocate enough pages to cover @size from the page level
 346 * allocator and map them into contiguous kernel virtual space.
 347 *
 348 * For tight control over page level allocator and protection flags
 349 * use __vmalloc() instead.
 350 */
 351void *vmalloc_node(unsigned long size, int node)
 352{
 353	return vmalloc(size);
 354}
 355EXPORT_SYMBOL(vmalloc_node);
 356
 357/**
 358 * vzalloc_node - allocate memory on a specific node with zero fill
 359 * @size:	allocation size
 360 * @node:	numa node
 361 *
 362 * Allocate enough pages to cover @size from the page level
 363 * allocator and map them into contiguous kernel virtual space.
 364 * The memory allocated is set to zero.
 365 *
 366 * For tight control over page level allocator and protection flags
 367 * use __vmalloc() instead.
 368 */
 369void *vzalloc_node(unsigned long size, int node)
 370{
 371	return vzalloc(size);
 372}
 373EXPORT_SYMBOL(vzalloc_node);
 374
 375#ifndef PAGE_KERNEL_EXEC
 376# define PAGE_KERNEL_EXEC PAGE_KERNEL
 377#endif
 378
 379/**
 380 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 381 *	@size:		allocation size
 382 *
 383 *	Kernel-internal function to allocate enough pages to cover @size
 384 *	the page level allocator and map them into contiguous and
 385 *	executable kernel virtual space.
 386 *
 387 *	For tight control over page level allocator and protection flags
 388 *	use __vmalloc() instead.
 389 */
 390
 391void *vmalloc_exec(unsigned long size)
 392{
 393	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
 394}
 
 395
 396/**
 397 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 398 *	@size:		allocation size
 399 *
 400 *	Allocate enough 32bit PA addressable pages to cover @size from the
 401 *	page level allocator and map them into continguos kernel virtual space.
 402 */
 403void *vmalloc_32(unsigned long size)
 404{
 405	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
 406}
 407EXPORT_SYMBOL(vmalloc_32);
 408
 409/**
 410 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 411 *	@size:		allocation size
 412 *
 413 * The resulting memory area is 32bit addressable and zeroed so it can be
 414 * mapped to userspace without leaking data.
 415 *
 416 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 417 * remap_vmalloc_range() are permissible.
 418 */
 419void *vmalloc_32_user(unsigned long size)
 420{
 421	/*
 422	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 423	 * but for now this can simply use vmalloc_user() directly.
 424	 */
 425	return vmalloc_user(size);
 426}
 427EXPORT_SYMBOL(vmalloc_32_user);
 428
 429void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 430{
 431	BUG();
 432	return NULL;
 433}
 434EXPORT_SYMBOL(vmap);
 435
 436void vunmap(const void *addr)
 437{
 438	BUG();
 439}
 440EXPORT_SYMBOL(vunmap);
 441
 442void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 443{
 444	BUG();
 445	return NULL;
 446}
 447EXPORT_SYMBOL(vm_map_ram);
 448
 449void vm_unmap_ram(const void *mem, unsigned int count)
 450{
 451	BUG();
 452}
 453EXPORT_SYMBOL(vm_unmap_ram);
 454
 455void vm_unmap_aliases(void)
 456{
 457}
 458EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 459
 460/*
 461 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 462 * have one.
 463 */
 464void __weak vmalloc_sync_all(void)
 465{
 466}
 467
 468/**
 469 *	alloc_vm_area - allocate a range of kernel address space
 470 *	@size:		size of the area
 471 *
 472 *	Returns:	NULL on failure, vm_struct on success
 473 *
 474 *	This function reserves a range of kernel address space, and
 475 *	allocates pagetables to map that range.  No actual mappings
 476 *	are created.  If the kernel address space is not shared
 477 *	between processes, it syncs the pagetable across all
 478 *	processes.
 479 */
 480struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 481{
 482	BUG();
 483	return NULL;
 484}
 485EXPORT_SYMBOL_GPL(alloc_vm_area);
 486
 487void free_vm_area(struct vm_struct *area)
 488{
 489	BUG();
 490}
 491EXPORT_SYMBOL_GPL(free_vm_area);
 492
 493int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 494		   struct page *page)
 495{
 496	return -EINVAL;
 497}
 498EXPORT_SYMBOL(vm_insert_page);
 499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500/*
 501 *  sys_brk() for the most part doesn't need the global kernel
 502 *  lock, except when an application is doing something nasty
 503 *  like trying to un-brk an area that has already been mapped
 504 *  to a regular file.  in this case, the unmapping will need
 505 *  to invoke file system routines that need the global lock.
 506 */
 507SYSCALL_DEFINE1(brk, unsigned long, brk)
 508{
 509	struct mm_struct *mm = current->mm;
 510
 511	if (brk < mm->start_brk || brk > mm->context.end_brk)
 512		return mm->brk;
 513
 514	if (mm->brk == brk)
 515		return mm->brk;
 516
 517	/*
 518	 * Always allow shrinking brk
 519	 */
 520	if (brk <= mm->brk) {
 521		mm->brk = brk;
 522		return brk;
 523	}
 524
 525	/*
 526	 * Ok, looks good - let it rip.
 527	 */
 528	flush_icache_range(mm->brk, brk);
 529	return mm->brk = brk;
 530}
 531
 532/*
 533 * initialise the VMA and region record slabs
 534 */
 535void __init mmap_init(void)
 536{
 537	int ret;
 538
 539	ret = percpu_counter_init(&vm_committed_as, 0);
 540	VM_BUG_ON(ret);
 541	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
 542}
 543
 544/*
 545 * validate the region tree
 546 * - the caller must hold the region lock
 547 */
 548#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 549static noinline void validate_nommu_regions(void)
 550{
 551	struct vm_region *region, *last;
 552	struct rb_node *p, *lastp;
 553
 554	lastp = rb_first(&nommu_region_tree);
 555	if (!lastp)
 556		return;
 557
 558	last = rb_entry(lastp, struct vm_region, vm_rb);
 559	BUG_ON(unlikely(last->vm_end <= last->vm_start));
 560	BUG_ON(unlikely(last->vm_top < last->vm_end));
 561
 562	while ((p = rb_next(lastp))) {
 563		region = rb_entry(p, struct vm_region, vm_rb);
 564		last = rb_entry(lastp, struct vm_region, vm_rb);
 565
 566		BUG_ON(unlikely(region->vm_end <= region->vm_start));
 567		BUG_ON(unlikely(region->vm_top < region->vm_end));
 568		BUG_ON(unlikely(region->vm_start < last->vm_top));
 569
 570		lastp = p;
 571	}
 572}
 573#else
 574static void validate_nommu_regions(void)
 575{
 576}
 577#endif
 578
 579/*
 580 * add a region into the global tree
 581 */
 582static void add_nommu_region(struct vm_region *region)
 583{
 584	struct vm_region *pregion;
 585	struct rb_node **p, *parent;
 586
 587	validate_nommu_regions();
 588
 589	parent = NULL;
 590	p = &nommu_region_tree.rb_node;
 591	while (*p) {
 592		parent = *p;
 593		pregion = rb_entry(parent, struct vm_region, vm_rb);
 594		if (region->vm_start < pregion->vm_start)
 595			p = &(*p)->rb_left;
 596		else if (region->vm_start > pregion->vm_start)
 597			p = &(*p)->rb_right;
 598		else if (pregion == region)
 599			return;
 600		else
 601			BUG();
 602	}
 603
 604	rb_link_node(&region->vm_rb, parent, p);
 605	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 606
 607	validate_nommu_regions();
 608}
 609
 610/*
 611 * delete a region from the global tree
 612 */
 613static void delete_nommu_region(struct vm_region *region)
 614{
 615	BUG_ON(!nommu_region_tree.rb_node);
 616
 617	validate_nommu_regions();
 618	rb_erase(&region->vm_rb, &nommu_region_tree);
 619	validate_nommu_regions();
 620}
 621
 622/*
 623 * free a contiguous series of pages
 624 */
 625static void free_page_series(unsigned long from, unsigned long to)
 626{
 627	for (; from < to; from += PAGE_SIZE) {
 628		struct page *page = virt_to_page(from);
 629
 630		kdebug("- free %lx", from);
 631		atomic_long_dec(&mmap_pages_allocated);
 632		if (page_count(page) != 1)
 633			kdebug("free page %p: refcount not one: %d",
 634			       page, page_count(page));
 635		put_page(page);
 636	}
 637}
 638
 639/*
 640 * release a reference to a region
 641 * - the caller must hold the region semaphore for writing, which this releases
 642 * - the region may not have been added to the tree yet, in which case vm_top
 643 *   will equal vm_start
 644 */
 645static void __put_nommu_region(struct vm_region *region)
 646	__releases(nommu_region_sem)
 647{
 648	kenter("%p{%d}", region, region->vm_usage);
 649
 650	BUG_ON(!nommu_region_tree.rb_node);
 651
 652	if (--region->vm_usage == 0) {
 653		if (region->vm_top > region->vm_start)
 654			delete_nommu_region(region);
 655		up_write(&nommu_region_sem);
 656
 657		if (region->vm_file)
 658			fput(region->vm_file);
 659
 660		/* IO memory and memory shared directly out of the pagecache
 661		 * from ramfs/tmpfs mustn't be released here */
 662		if (region->vm_flags & VM_MAPPED_COPY) {
 663			kdebug("free series");
 664			free_page_series(region->vm_start, region->vm_top);
 665		}
 666		kmem_cache_free(vm_region_jar, region);
 667	} else {
 668		up_write(&nommu_region_sem);
 669	}
 670}
 671
 672/*
 673 * release a reference to a region
 674 */
 675static void put_nommu_region(struct vm_region *region)
 676{
 677	down_write(&nommu_region_sem);
 678	__put_nommu_region(region);
 679}
 680
 681/*
 682 * update protection on a vma
 683 */
 684static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
 685{
 686#ifdef CONFIG_MPU
 687	struct mm_struct *mm = vma->vm_mm;
 688	long start = vma->vm_start & PAGE_MASK;
 689	while (start < vma->vm_end) {
 690		protect_page(mm, start, flags);
 691		start += PAGE_SIZE;
 692	}
 693	update_protections(mm);
 694#endif
 695}
 696
 697/*
 698 * add a VMA into a process's mm_struct in the appropriate place in the list
 699 * and tree and add to the address space's page tree also if not an anonymous
 700 * page
 701 * - should be called with mm->mmap_sem held writelocked
 702 */
 703static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 704{
 705	struct vm_area_struct *pvma, *prev;
 706	struct address_space *mapping;
 707	struct rb_node **p, *parent, *rb_prev;
 708
 709	kenter(",%p", vma);
 710
 711	BUG_ON(!vma->vm_region);
 712
 713	mm->map_count++;
 714	vma->vm_mm = mm;
 715
 716	protect_vma(vma, vma->vm_flags);
 717
 718	/* add the VMA to the mapping */
 719	if (vma->vm_file) {
 720		mapping = vma->vm_file->f_mapping;
 721
 722		mutex_lock(&mapping->i_mmap_mutex);
 723		flush_dcache_mmap_lock(mapping);
 724		vma_interval_tree_insert(vma, &mapping->i_mmap);
 725		flush_dcache_mmap_unlock(mapping);
 726		mutex_unlock(&mapping->i_mmap_mutex);
 727	}
 728
 729	/* add the VMA to the tree */
 730	parent = rb_prev = NULL;
 731	p = &mm->mm_rb.rb_node;
 732	while (*p) {
 733		parent = *p;
 734		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 735
 736		/* sort by: start addr, end addr, VMA struct addr in that order
 737		 * (the latter is necessary as we may get identical VMAs) */
 738		if (vma->vm_start < pvma->vm_start)
 739			p = &(*p)->rb_left;
 740		else if (vma->vm_start > pvma->vm_start) {
 741			rb_prev = parent;
 742			p = &(*p)->rb_right;
 743		} else if (vma->vm_end < pvma->vm_end)
 744			p = &(*p)->rb_left;
 745		else if (vma->vm_end > pvma->vm_end) {
 746			rb_prev = parent;
 747			p = &(*p)->rb_right;
 748		} else if (vma < pvma)
 749			p = &(*p)->rb_left;
 750		else if (vma > pvma) {
 751			rb_prev = parent;
 752			p = &(*p)->rb_right;
 753		} else
 754			BUG();
 755	}
 756
 757	rb_link_node(&vma->vm_rb, parent, p);
 758	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 759
 760	/* add VMA to the VMA list also */
 761	prev = NULL;
 762	if (rb_prev)
 763		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 764
 765	__vma_link_list(mm, vma, prev, parent);
 766}
 767
 768/*
 769 * delete a VMA from its owning mm_struct and address space
 770 */
 771static void delete_vma_from_mm(struct vm_area_struct *vma)
 772{
 773	int i;
 774	struct address_space *mapping;
 775	struct mm_struct *mm = vma->vm_mm;
 776	struct task_struct *curr = current;
 777
 778	kenter("%p", vma);
 779
 780	protect_vma(vma, 0);
 781
 782	mm->map_count--;
 783	for (i = 0; i < VMACACHE_SIZE; i++) {
 784		/* if the vma is cached, invalidate the entire cache */
 785		if (curr->vmacache[i] == vma) {
 786			vmacache_invalidate(curr->mm);
 787			break;
 788		}
 789	}
 790
 791	/* remove the VMA from the mapping */
 792	if (vma->vm_file) {
 
 793		mapping = vma->vm_file->f_mapping;
 794
 795		mutex_lock(&mapping->i_mmap_mutex);
 796		flush_dcache_mmap_lock(mapping);
 797		vma_interval_tree_remove(vma, &mapping->i_mmap);
 798		flush_dcache_mmap_unlock(mapping);
 799		mutex_unlock(&mapping->i_mmap_mutex);
 800	}
 
 801
 802	/* remove from the MM's tree and list */
 803	rb_erase(&vma->vm_rb, &mm->mm_rb);
 
 
 
 
 804
 805	if (vma->vm_prev)
 806		vma->vm_prev->vm_next = vma->vm_next;
 807	else
 808		mm->mmap = vma->vm_next;
 
 
 
 809
 810	if (vma->vm_next)
 811		vma->vm_next->vm_prev = vma->vm_prev;
 
 812}
 813
 814/*
 815 * destroy a VMA record
 816 */
 817static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 818{
 819	kenter("%p", vma);
 820	if (vma->vm_ops && vma->vm_ops->close)
 821		vma->vm_ops->close(vma);
 822	if (vma->vm_file)
 823		fput(vma->vm_file);
 824	put_nommu_region(vma->vm_region);
 825	kmem_cache_free(vm_area_cachep, vma);
 
 
 
 
 
 
 
 
 
 
 826}
 
 827
 828/*
 829 * look up the first VMA in which addr resides, NULL if none
 830 * - should be called with mm->mmap_sem at least held readlocked
 831 */
 832struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 833{
 834	struct vm_area_struct *vma;
 835
 836	/* check the cache first */
 837	vma = vmacache_find(mm, addr);
 838	if (likely(vma))
 839		return vma;
 840
 841	/* trawl the list (there may be multiple mappings in which addr
 842	 * resides) */
 843	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 844		if (vma->vm_start > addr)
 845			return NULL;
 846		if (vma->vm_end > addr) {
 847			vmacache_update(addr, vma);
 848			return vma;
 849		}
 850	}
 851
 852	return NULL;
 853}
 854EXPORT_SYMBOL(find_vma);
 855
 856/*
 857 * find a VMA
 858 * - we don't extend stack VMAs under NOMMU conditions
 859 */
 860struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 
 861{
 862	return find_vma(mm, addr);
 
 
 
 
 
 
 863}
 864
 865/*
 866 * expand a stack to a given address
 867 * - not supported under NOMMU conditions
 868 */
 869int expand_stack(struct vm_area_struct *vma, unsigned long address)
 870{
 871	return -ENOMEM;
 872}
 873
 
 
 
 
 
 
 874/*
 875 * look up the first VMA exactly that exactly matches addr
 876 * - should be called with mm->mmap_sem at least held readlocked
 877 */
 878static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 879					     unsigned long addr,
 880					     unsigned long len)
 881{
 882	struct vm_area_struct *vma;
 883	unsigned long end = addr + len;
 
 884
 885	/* check the cache first */
 886	vma = vmacache_find_exact(mm, addr, end);
 887	if (vma)
 888		return vma;
 889
 890	/* trawl the list (there may be multiple mappings in which addr
 891	 * resides) */
 892	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 893		if (vma->vm_start < addr)
 894			continue;
 895		if (vma->vm_start > addr)
 896			return NULL;
 897		if (vma->vm_end == end) {
 898			vmacache_update(addr, vma);
 899			return vma;
 900		}
 901	}
 902
 903	return NULL;
 904}
 905
 906/*
 907 * determine whether a mapping should be permitted and, if so, what sort of
 908 * mapping we're capable of supporting
 909 */
 910static int validate_mmap_request(struct file *file,
 911				 unsigned long addr,
 912				 unsigned long len,
 913				 unsigned long prot,
 914				 unsigned long flags,
 915				 unsigned long pgoff,
 916				 unsigned long *_capabilities)
 917{
 918	unsigned long capabilities, rlen;
 919	int ret;
 920
 921	/* do the simple checks first */
 922	if (flags & MAP_FIXED) {
 923		printk(KERN_DEBUG
 924		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
 925		       current->pid);
 926		return -EINVAL;
 927	}
 928
 929	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 930	    (flags & MAP_TYPE) != MAP_SHARED)
 931		return -EINVAL;
 932
 933	if (!len)
 934		return -EINVAL;
 935
 936	/* Careful about overflows.. */
 937	rlen = PAGE_ALIGN(len);
 938	if (!rlen || rlen > TASK_SIZE)
 939		return -ENOMEM;
 940
 941	/* offset overflow? */
 942	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 943		return -EOVERFLOW;
 944
 945	if (file) {
 946		/* validate file mapping requests */
 947		struct address_space *mapping;
 948
 949		/* files must support mmap */
 950		if (!file->f_op->mmap)
 951			return -ENODEV;
 952
 953		/* work out if what we've got could possibly be shared
 954		 * - we support chardevs that provide their own "memory"
 955		 * - we support files/blockdevs that are memory backed
 956		 */
 957		mapping = file->f_mapping;
 958		if (!mapping)
 959			mapping = file_inode(file)->i_mapping;
 960
 961		capabilities = 0;
 962		if (mapping && mapping->backing_dev_info)
 963			capabilities = mapping->backing_dev_info->capabilities;
 964
 965		if (!capabilities) {
 966			/* no explicit capabilities set, so assume some
 967			 * defaults */
 968			switch (file_inode(file)->i_mode & S_IFMT) {
 969			case S_IFREG:
 970			case S_IFBLK:
 971				capabilities = BDI_CAP_MAP_COPY;
 972				break;
 973
 974			case S_IFCHR:
 975				capabilities =
 976					BDI_CAP_MAP_DIRECT |
 977					BDI_CAP_READ_MAP |
 978					BDI_CAP_WRITE_MAP;
 979				break;
 980
 981			default:
 982				return -EINVAL;
 983			}
 984		}
 985
 986		/* eliminate any capabilities that we can't support on this
 987		 * device */
 988		if (!file->f_op->get_unmapped_area)
 989			capabilities &= ~BDI_CAP_MAP_DIRECT;
 990		if (!file->f_op->read)
 991			capabilities &= ~BDI_CAP_MAP_COPY;
 992
 993		/* The file shall have been opened with read permission. */
 994		if (!(file->f_mode & FMODE_READ))
 995			return -EACCES;
 996
 997		if (flags & MAP_SHARED) {
 998			/* do checks for writing, appending and locking */
 999			if ((prot & PROT_WRITE) &&
1000			    !(file->f_mode & FMODE_WRITE))
1001				return -EACCES;
1002
1003			if (IS_APPEND(file_inode(file)) &&
1004			    (file->f_mode & FMODE_WRITE))
1005				return -EACCES;
1006
1007			if (locks_verify_locked(file))
1008				return -EAGAIN;
1009
1010			if (!(capabilities & BDI_CAP_MAP_DIRECT))
1011				return -ENODEV;
1012
1013			/* we mustn't privatise shared mappings */
1014			capabilities &= ~BDI_CAP_MAP_COPY;
1015		} else {
1016			/* we're going to read the file into private memory we
1017			 * allocate */
1018			if (!(capabilities & BDI_CAP_MAP_COPY))
1019				return -ENODEV;
1020
1021			/* we don't permit a private writable mapping to be
1022			 * shared with the backing device */
1023			if (prot & PROT_WRITE)
1024				capabilities &= ~BDI_CAP_MAP_DIRECT;
1025		}
1026
1027		if (capabilities & BDI_CAP_MAP_DIRECT) {
1028			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
1029			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1030			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
1031			    ) {
1032				capabilities &= ~BDI_CAP_MAP_DIRECT;
1033				if (flags & MAP_SHARED) {
1034					printk(KERN_WARNING
1035					       "MAP_SHARED not completely supported on !MMU\n");
1036					return -EINVAL;
1037				}
1038			}
1039		}
1040
1041		/* handle executable mappings and implied executable
1042		 * mappings */
1043		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1044			if (prot & PROT_EXEC)
1045				return -EPERM;
1046		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1047			/* handle implication of PROT_EXEC by PROT_READ */
1048			if (current->personality & READ_IMPLIES_EXEC) {
1049				if (capabilities & BDI_CAP_EXEC_MAP)
1050					prot |= PROT_EXEC;
1051			}
1052		} else if ((prot & PROT_READ) &&
1053			 (prot & PROT_EXEC) &&
1054			 !(capabilities & BDI_CAP_EXEC_MAP)
1055			 ) {
1056			/* backing file is not executable, try to copy */
1057			capabilities &= ~BDI_CAP_MAP_DIRECT;
1058		}
1059	} else {
1060		/* anonymous mappings are always memory backed and can be
1061		 * privately mapped
1062		 */
1063		capabilities = BDI_CAP_MAP_COPY;
1064
1065		/* handle PROT_EXEC implication by PROT_READ */
1066		if ((prot & PROT_READ) &&
1067		    (current->personality & READ_IMPLIES_EXEC))
1068			prot |= PROT_EXEC;
1069	}
1070
1071	/* allow the security API to have its say */
1072	ret = security_mmap_addr(addr);
1073	if (ret < 0)
1074		return ret;
1075
1076	/* looks okay */
1077	*_capabilities = capabilities;
1078	return 0;
1079}
1080
1081/*
1082 * we've determined that we can make the mapping, now translate what we
1083 * now know into VMA flags
1084 */
1085static unsigned long determine_vm_flags(struct file *file,
1086					unsigned long prot,
1087					unsigned long flags,
1088					unsigned long capabilities)
1089{
1090	unsigned long vm_flags;
1091
1092	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1093	/* vm_flags |= mm->def_flags; */
1094
1095	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1096		/* attempt to share read-only copies of mapped file chunks */
 
 
 
1097		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1098		if (file && !(prot & PROT_WRITE))
1099			vm_flags |= VM_MAYSHARE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100	} else {
1101		/* overlay a shareable mapping on the backing device or inode
1102		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1103		 * romfs/cramfs */
1104		vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1105		if (flags & MAP_SHARED)
1106			vm_flags |= VM_SHARED;
1107	}
1108
1109	/* refuse to let anyone share private mappings with this process if
1110	 * it's being traced - otherwise breakpoints set in it may interfere
1111	 * with another untraced process
1112	 */
1113	if ((flags & MAP_PRIVATE) && current->ptrace)
1114		vm_flags &= ~VM_MAYSHARE;
1115
1116	return vm_flags;
1117}
1118
1119/*
1120 * set up a shared mapping on a file (the driver or filesystem provides and
1121 * pins the storage)
1122 */
1123static int do_mmap_shared_file(struct vm_area_struct *vma)
1124{
1125	int ret;
1126
1127	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1128	if (ret == 0) {
1129		vma->vm_region->vm_top = vma->vm_region->vm_end;
1130		return 0;
1131	}
1132	if (ret != -ENOSYS)
1133		return ret;
1134
1135	/* getting -ENOSYS indicates that direct mmap isn't possible (as
1136	 * opposed to tried but failed) so we can only give a suitable error as
1137	 * it's not possible to make a private copy if MAP_SHARED was given */
1138	return -ENODEV;
1139}
1140
1141/*
1142 * set up a private mapping or an anonymous shared mapping
1143 */
1144static int do_mmap_private(struct vm_area_struct *vma,
1145			   struct vm_region *region,
1146			   unsigned long len,
1147			   unsigned long capabilities)
1148{
1149	struct page *pages;
1150	unsigned long total, point, n;
1151	void *base;
1152	int ret, order;
1153
1154	/* invoke the file's mapping function so that it can keep track of
1155	 * shared mappings on devices or memory
1156	 * - VM_MAYSHARE will be set if it may attempt to share
 
 
1157	 */
1158	if (capabilities & BDI_CAP_MAP_DIRECT) {
1159		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
 
 
 
1160		if (ret == 0) {
1161			/* shouldn't return success if we're not sharing */
1162			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1163			vma->vm_region->vm_top = vma->vm_region->vm_end;
1164			return 0;
1165		}
1166		if (ret != -ENOSYS)
1167			return ret;
1168
1169		/* getting an ENOSYS error indicates that direct mmap isn't
1170		 * possible (as opposed to tried but failed) so we'll try to
1171		 * make a private copy of the data and map that instead */
1172	}
1173
1174
1175	/* allocate some memory to hold the mapping
1176	 * - note that this may not return a page-aligned address if the object
1177	 *   we're allocating is smaller than a page
1178	 */
1179	order = get_order(len);
1180	kdebug("alloc order %d for %lx", order, len);
1181
1182	pages = alloc_pages(GFP_KERNEL, order);
1183	if (!pages)
1184		goto enomem;
1185
1186	total = 1 << order;
1187	atomic_long_add(total, &mmap_pages_allocated);
1188
1189	point = len >> PAGE_SHIFT;
1190
1191	/* we allocated a power-of-2 sized page set, so we may want to trim off
1192	 * the excess */
1193	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1194		while (total > point) {
1195			order = ilog2(total - point);
1196			n = 1 << order;
1197			kdebug("shave %lu/%lu @%lu", n, total - point, total);
1198			atomic_long_sub(n, &mmap_pages_allocated);
1199			total -= n;
1200			set_page_refcounted(pages + total);
1201			__free_pages(pages + total, order);
1202		}
1203	}
1204
1205	for (point = 1; point < total; point++)
1206		set_page_refcounted(&pages[point]);
 
1207
1208	base = page_address(pages);
1209	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
 
 
1210	region->vm_start = (unsigned long) base;
1211	region->vm_end   = region->vm_start + len;
1212	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1213
1214	vma->vm_start = region->vm_start;
1215	vma->vm_end   = region->vm_start + len;
1216
1217	if (vma->vm_file) {
1218		/* read the contents of a file into the copy */
1219		mm_segment_t old_fs;
1220		loff_t fpos;
1221
1222		fpos = vma->vm_pgoff;
1223		fpos <<= PAGE_SHIFT;
1224
1225		old_fs = get_fs();
1226		set_fs(KERNEL_DS);
1227		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1228		set_fs(old_fs);
1229
1230		if (ret < 0)
1231			goto error_free;
1232
1233		/* clear the last little bit */
1234		if (ret < len)
1235			memset(base + ret, 0, len - ret);
1236
 
 
1237	}
1238
1239	return 0;
1240
1241error_free:
1242	free_page_series(region->vm_start, region->vm_top);
1243	region->vm_start = vma->vm_start = 0;
1244	region->vm_end   = vma->vm_end = 0;
1245	region->vm_top   = 0;
1246	return ret;
1247
1248enomem:
1249	printk("Allocation of length %lu from process %d (%s) failed\n",
1250	       len, current->pid, current->comm);
1251	show_free_areas(0);
1252	return -ENOMEM;
1253}
1254
1255/*
1256 * handle mapping creation for uClinux
1257 */
1258unsigned long do_mmap_pgoff(struct file *file,
1259			    unsigned long addr,
1260			    unsigned long len,
1261			    unsigned long prot,
1262			    unsigned long flags,
1263			    unsigned long pgoff,
1264			    unsigned long *populate)
 
 
1265{
1266	struct vm_area_struct *vma;
1267	struct vm_region *region;
1268	struct rb_node *rb;
1269	unsigned long capabilities, vm_flags, result;
1270	int ret;
1271
1272	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1273
1274	*populate = 0;
1275
1276	/* decide whether we should attempt the mapping, and if so what sort of
1277	 * mapping */
1278	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1279				    &capabilities);
1280	if (ret < 0) {
1281		kleave(" = %d [val]", ret);
1282		return ret;
1283	}
1284
1285	/* we ignore the address hint */
1286	addr = 0;
1287	len = PAGE_ALIGN(len);
1288
1289	/* we've determined that we can make the mapping, now translate what we
1290	 * now know into VMA flags */
1291	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
1292
1293	/* we're going to need to record the mapping */
1294	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1295	if (!region)
1296		goto error_getting_region;
1297
1298	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1299	if (!vma)
1300		goto error_getting_vma;
1301
1302	region->vm_usage = 1;
1303	region->vm_flags = vm_flags;
1304	region->vm_pgoff = pgoff;
1305
1306	INIT_LIST_HEAD(&vma->anon_vma_chain);
1307	vma->vm_flags = vm_flags;
1308	vma->vm_pgoff = pgoff;
1309
1310	if (file) {
1311		region->vm_file = get_file(file);
1312		vma->vm_file = get_file(file);
1313	}
1314
1315	down_write(&nommu_region_sem);
1316
1317	/* if we want to share, we need to check for regions created by other
1318	 * mmap() calls that overlap with our proposed mapping
1319	 * - we can only share with a superset match on most regular files
1320	 * - shared mappings on character devices and memory backed files are
1321	 *   permitted to overlap inexactly as far as we are concerned for in
1322	 *   these cases, sharing is handled in the driver or filesystem rather
1323	 *   than here
1324	 */
1325	if (vm_flags & VM_MAYSHARE) {
1326		struct vm_region *pregion;
1327		unsigned long pglen, rpglen, pgend, rpgend, start;
1328
1329		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1330		pgend = pgoff + pglen;
1331
1332		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1333			pregion = rb_entry(rb, struct vm_region, vm_rb);
1334
1335			if (!(pregion->vm_flags & VM_MAYSHARE))
1336				continue;
1337
1338			/* search for overlapping mappings on the same file */
1339			if (file_inode(pregion->vm_file) !=
1340			    file_inode(file))
1341				continue;
1342
1343			if (pregion->vm_pgoff >= pgend)
1344				continue;
1345
1346			rpglen = pregion->vm_end - pregion->vm_start;
1347			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1348			rpgend = pregion->vm_pgoff + rpglen;
1349			if (pgoff >= rpgend)
1350				continue;
1351
1352			/* handle inexactly overlapping matches between
1353			 * mappings */
1354			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1355			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1356				/* new mapping is not a subset of the region */
1357				if (!(capabilities & BDI_CAP_MAP_DIRECT))
1358					goto sharing_violation;
1359				continue;
1360			}
1361
1362			/* we've found a region we can share */
1363			pregion->vm_usage++;
1364			vma->vm_region = pregion;
1365			start = pregion->vm_start;
1366			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1367			vma->vm_start = start;
1368			vma->vm_end = start + len;
1369
1370			if (pregion->vm_flags & VM_MAPPED_COPY) {
1371				kdebug("share copy");
1372				vma->vm_flags |= VM_MAPPED_COPY;
1373			} else {
1374				kdebug("share mmap");
1375				ret = do_mmap_shared_file(vma);
1376				if (ret < 0) {
1377					vma->vm_region = NULL;
1378					vma->vm_start = 0;
1379					vma->vm_end = 0;
1380					pregion->vm_usage--;
1381					pregion = NULL;
1382					goto error_just_free;
1383				}
1384			}
1385			fput(region->vm_file);
1386			kmem_cache_free(vm_region_jar, region);
1387			region = pregion;
1388			result = start;
1389			goto share;
1390		}
1391
1392		/* obtain the address at which to make a shared mapping
1393		 * - this is the hook for quasi-memory character devices to
1394		 *   tell us the location of a shared mapping
1395		 */
1396		if (capabilities & BDI_CAP_MAP_DIRECT) {
1397			addr = file->f_op->get_unmapped_area(file, addr, len,
1398							     pgoff, flags);
1399			if (IS_ERR_VALUE(addr)) {
1400				ret = addr;
1401				if (ret != -ENOSYS)
1402					goto error_just_free;
1403
1404				/* the driver refused to tell us where to site
1405				 * the mapping so we'll have to attempt to copy
1406				 * it */
1407				ret = -ENODEV;
1408				if (!(capabilities & BDI_CAP_MAP_COPY))
1409					goto error_just_free;
1410
1411				capabilities &= ~BDI_CAP_MAP_DIRECT;
1412			} else {
1413				vma->vm_start = region->vm_start = addr;
1414				vma->vm_end = region->vm_end = addr + len;
1415			}
1416		}
1417	}
1418
1419	vma->vm_region = region;
1420
1421	/* set up the mapping
1422	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1423	 */
1424	if (file && vma->vm_flags & VM_SHARED)
1425		ret = do_mmap_shared_file(vma);
1426	else
1427		ret = do_mmap_private(vma, region, len, capabilities);
1428	if (ret < 0)
1429		goto error_just_free;
1430	add_nommu_region(region);
1431
1432	/* clear anonymous mappings that don't ask for uninitialized data */
1433	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
 
 
1434		memset((void *)region->vm_start, 0,
1435		       region->vm_end - region->vm_start);
1436
1437	/* okay... we have a mapping; now we have to register it */
1438	result = vma->vm_start;
1439
1440	current->mm->total_vm += len >> PAGE_SHIFT;
1441
1442share:
1443	add_vma_to_mm(current->mm, vma);
 
 
 
 
 
 
 
 
1444
1445	/* we flush the region from the icache only when the first executable
1446	 * mapping of it is made  */
1447	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1448		flush_icache_range(region->vm_start, region->vm_end);
1449		region->vm_icache_flushed = true;
1450	}
1451
1452	up_write(&nommu_region_sem);
1453
1454	kleave(" = %lx", result);
1455	return result;
1456
1457error_just_free:
1458	up_write(&nommu_region_sem);
1459error:
 
1460	if (region->vm_file)
1461		fput(region->vm_file);
1462	kmem_cache_free(vm_region_jar, region);
1463	if (vma->vm_file)
1464		fput(vma->vm_file);
1465	kmem_cache_free(vm_area_cachep, vma);
1466	kleave(" = %d", ret);
1467	return ret;
1468
1469sharing_violation:
1470	up_write(&nommu_region_sem);
1471	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1472	ret = -EINVAL;
1473	goto error;
1474
1475error_getting_vma:
1476	kmem_cache_free(vm_region_jar, region);
1477	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1478	       " from process %d failed\n",
1479	       len, current->pid);
1480	show_free_areas(0);
1481	return -ENOMEM;
1482
1483error_getting_region:
1484	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1485	       " from process %d failed\n",
1486	       len, current->pid);
1487	show_free_areas(0);
1488	return -ENOMEM;
1489}
1490
1491SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1492		unsigned long, prot, unsigned long, flags,
1493		unsigned long, fd, unsigned long, pgoff)
1494{
1495	struct file *file = NULL;
1496	unsigned long retval = -EBADF;
1497
1498	audit_mmap_fd(fd, flags);
1499	if (!(flags & MAP_ANONYMOUS)) {
1500		file = fget(fd);
1501		if (!file)
1502			goto out;
1503	}
1504
1505	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1506
1507	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1508
1509	if (file)
1510		fput(file);
1511out:
1512	return retval;
1513}
1514
 
 
 
 
 
 
 
1515#ifdef __ARCH_WANT_SYS_OLD_MMAP
1516struct mmap_arg_struct {
1517	unsigned long addr;
1518	unsigned long len;
1519	unsigned long prot;
1520	unsigned long flags;
1521	unsigned long fd;
1522	unsigned long offset;
1523};
1524
1525SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1526{
1527	struct mmap_arg_struct a;
1528
1529	if (copy_from_user(&a, arg, sizeof(a)))
1530		return -EFAULT;
1531	if (a.offset & ~PAGE_MASK)
1532		return -EINVAL;
1533
1534	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1535			      a.offset >> PAGE_SHIFT);
1536}
1537#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1538
1539/*
1540 * split a vma into two pieces at address 'addr', a new vma is allocated either
1541 * for the first part or the tail.
1542 */
1543int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1544	      unsigned long addr, int new_below)
1545{
1546	struct vm_area_struct *new;
1547	struct vm_region *region;
1548	unsigned long npages;
1549
1550	kenter("");
1551
1552	/* we're only permitted to split anonymous regions (these should have
1553	 * only a single usage on the region) */
1554	if (vma->vm_file)
1555		return -ENOMEM;
1556
 
1557	if (mm->map_count >= sysctl_max_map_count)
1558		return -ENOMEM;
1559
1560	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1561	if (!region)
1562		return -ENOMEM;
1563
1564	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1565	if (!new) {
1566		kmem_cache_free(vm_region_jar, region);
1567		return -ENOMEM;
1568	}
1569
1570	/* most fields are the same, copy all, and then fixup */
1571	*new = *vma;
1572	*region = *vma->vm_region;
1573	new->vm_region = region;
1574
1575	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1576
1577	if (new_below) {
1578		region->vm_top = region->vm_end = new->vm_end = addr;
1579	} else {
1580		region->vm_start = new->vm_start = addr;
1581		region->vm_pgoff = new->vm_pgoff += npages;
1582	}
1583
 
 
 
 
 
 
 
1584	if (new->vm_ops && new->vm_ops->open)
1585		new->vm_ops->open(new);
1586
1587	delete_vma_from_mm(vma);
1588	down_write(&nommu_region_sem);
1589	delete_nommu_region(vma->vm_region);
1590	if (new_below) {
1591		vma->vm_region->vm_start = vma->vm_start = addr;
1592		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1593	} else {
1594		vma->vm_region->vm_end = vma->vm_end = addr;
1595		vma->vm_region->vm_top = addr;
1596	}
1597	add_nommu_region(vma->vm_region);
1598	add_nommu_region(new->vm_region);
1599	up_write(&nommu_region_sem);
1600	add_vma_to_mm(mm, vma);
1601	add_vma_to_mm(mm, new);
 
 
 
1602	return 0;
 
 
 
 
 
 
1603}
1604
1605/*
1606 * shrink a VMA by removing the specified chunk from either the beginning or
1607 * the end
1608 */
1609static int shrink_vma(struct mm_struct *mm,
1610		      struct vm_area_struct *vma,
1611		      unsigned long from, unsigned long to)
1612{
1613	struct vm_region *region;
1614
1615	kenter("");
1616
1617	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1618	 * and list */
1619	delete_vma_from_mm(vma);
1620	if (from > vma->vm_start)
 
1621		vma->vm_end = from;
1622	else
 
 
1623		vma->vm_start = to;
1624	add_vma_to_mm(mm, vma);
1625
1626	/* cut the backing region down to size */
1627	region = vma->vm_region;
1628	BUG_ON(region->vm_usage != 1);
1629
1630	down_write(&nommu_region_sem);
1631	delete_nommu_region(region);
1632	if (from > region->vm_start) {
1633		to = region->vm_top;
1634		region->vm_top = region->vm_end = from;
1635	} else {
1636		region->vm_start = to;
1637	}
1638	add_nommu_region(region);
1639	up_write(&nommu_region_sem);
1640
1641	free_page_series(from, to);
1642	return 0;
1643}
1644
1645/*
1646 * release a mapping
1647 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1648 *   VMA, though it need not cover the whole VMA
1649 */
1650int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1651{
 
1652	struct vm_area_struct *vma;
1653	unsigned long end;
1654	int ret;
1655
1656	kenter(",%lx,%zx", start, len);
1657
1658	len = PAGE_ALIGN(len);
1659	if (len == 0)
1660		return -EINVAL;
1661
1662	end = start + len;
1663
1664	/* find the first potentially overlapping VMA */
1665	vma = find_vma(mm, start);
1666	if (!vma) {
1667		static int limit;
1668		if (limit < 5) {
1669			printk(KERN_WARNING
1670			       "munmap of memory not mmapped by process %d"
1671			       " (%s): 0x%lx-0x%lx\n",
1672			       current->pid, current->comm,
1673			       start, start + len - 1);
1674			limit++;
1675		}
1676		return -EINVAL;
1677	}
1678
1679	/* we're allowed to split an anonymous VMA but not a file-backed one */
1680	if (vma->vm_file) {
1681		do {
1682			if (start > vma->vm_start) {
1683				kleave(" = -EINVAL [miss]");
1684				return -EINVAL;
1685			}
1686			if (end == vma->vm_end)
1687				goto erase_whole_vma;
1688			vma = vma->vm_next;
1689		} while (vma);
1690		kleave(" = -EINVAL [split file]");
1691		return -EINVAL;
1692	} else {
1693		/* the chunk must be a subset of the VMA found */
1694		if (start == vma->vm_start && end == vma->vm_end)
1695			goto erase_whole_vma;
1696		if (start < vma->vm_start || end > vma->vm_end) {
1697			kleave(" = -EINVAL [superset]");
1698			return -EINVAL;
1699		}
1700		if (start & ~PAGE_MASK) {
1701			kleave(" = -EINVAL [unaligned start]");
1702			return -EINVAL;
1703		}
1704		if (end != vma->vm_end && end & ~PAGE_MASK) {
1705			kleave(" = -EINVAL [unaligned split]");
1706			return -EINVAL;
1707		}
1708		if (start != vma->vm_start && end != vma->vm_end) {
1709			ret = split_vma(mm, vma, start, 1);
1710			if (ret < 0) {
1711				kleave(" = %d [split]", ret);
1712				return ret;
1713			}
1714		}
1715		return shrink_vma(mm, vma, start, end);
1716	}
1717
1718erase_whole_vma:
1719	delete_vma_from_mm(vma);
1720	delete_vma(mm, vma);
1721	kleave(" = 0");
1722	return 0;
 
1723}
1724EXPORT_SYMBOL(do_munmap);
1725
1726int vm_munmap(unsigned long addr, size_t len)
1727{
1728	struct mm_struct *mm = current->mm;
1729	int ret;
1730
1731	down_write(&mm->mmap_sem);
1732	ret = do_munmap(mm, addr, len);
1733	up_write(&mm->mmap_sem);
1734	return ret;
1735}
1736EXPORT_SYMBOL(vm_munmap);
1737
1738SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1739{
1740	return vm_munmap(addr, len);
1741}
1742
1743/*
1744 * release all the mappings made in a process's VM space
1745 */
1746void exit_mmap(struct mm_struct *mm)
1747{
 
1748	struct vm_area_struct *vma;
1749
1750	if (!mm)
1751		return;
1752
1753	kenter("");
1754
1755	mm->total_vm = 0;
1756
1757	while ((vma = mm->mmap)) {
1758		mm->mmap = vma->vm_next;
1759		delete_vma_from_mm(vma);
 
 
 
 
1760		delete_vma(mm, vma);
1761		cond_resched();
1762	}
1763
1764	kleave("");
1765}
1766
1767unsigned long vm_brk(unsigned long addr, unsigned long len)
1768{
1769	return -ENOMEM;
1770}
1771
1772/*
1773 * expand (or shrink) an existing mapping, potentially moving it at the same
1774 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1775 *
1776 * under NOMMU conditions, we only permit changing a mapping's size, and only
1777 * as long as it stays within the region allocated by do_mmap_private() and the
1778 * block is not shareable
1779 *
1780 * MREMAP_FIXED is not supported under NOMMU conditions
1781 */
1782static unsigned long do_mremap(unsigned long addr,
1783			unsigned long old_len, unsigned long new_len,
1784			unsigned long flags, unsigned long new_addr)
1785{
1786	struct vm_area_struct *vma;
1787
1788	/* insanity checks first */
1789	old_len = PAGE_ALIGN(old_len);
1790	new_len = PAGE_ALIGN(new_len);
1791	if (old_len == 0 || new_len == 0)
1792		return (unsigned long) -EINVAL;
1793
1794	if (addr & ~PAGE_MASK)
1795		return -EINVAL;
1796
1797	if (flags & MREMAP_FIXED && new_addr != addr)
1798		return (unsigned long) -EINVAL;
1799
1800	vma = find_vma_exact(current->mm, addr, old_len);
1801	if (!vma)
1802		return (unsigned long) -EINVAL;
1803
1804	if (vma->vm_end != vma->vm_start + old_len)
1805		return (unsigned long) -EFAULT;
1806
1807	if (vma->vm_flags & VM_MAYSHARE)
1808		return (unsigned long) -EPERM;
1809
1810	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1811		return (unsigned long) -ENOMEM;
1812
1813	/* all checks complete - do it */
1814	vma->vm_end = vma->vm_start + new_len;
1815	return vma->vm_start;
1816}
1817
1818SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1819		unsigned long, new_len, unsigned long, flags,
1820		unsigned long, new_addr)
1821{
1822	unsigned long ret;
1823
1824	down_write(&current->mm->mmap_sem);
1825	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1826	up_write(&current->mm->mmap_sem);
1827	return ret;
1828}
1829
1830struct page *follow_page_mask(struct vm_area_struct *vma,
1831			      unsigned long address, unsigned int flags,
1832			      unsigned int *page_mask)
1833{
1834	*page_mask = 0;
1835	return NULL;
1836}
1837
1838int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1839		unsigned long pfn, unsigned long size, pgprot_t prot)
1840{
1841	if (addr != (pfn << PAGE_SHIFT))
1842		return -EINVAL;
1843
1844	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1845	return 0;
1846}
1847EXPORT_SYMBOL(remap_pfn_range);
1848
1849int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1850{
1851	unsigned long pfn = start >> PAGE_SHIFT;
1852	unsigned long vm_len = vma->vm_end - vma->vm_start;
1853
1854	pfn += vma->vm_pgoff;
1855	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1856}
1857EXPORT_SYMBOL(vm_iomap_memory);
1858
1859int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1860			unsigned long pgoff)
1861{
1862	unsigned int size = vma->vm_end - vma->vm_start;
1863
1864	if (!(vma->vm_flags & VM_USERMAP))
1865		return -EINVAL;
1866
1867	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1868	vma->vm_end = vma->vm_start + size;
1869
1870	return 0;
1871}
1872EXPORT_SYMBOL(remap_vmalloc_range);
1873
1874unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1875	unsigned long len, unsigned long pgoff, unsigned long flags)
1876{
1877	return -ENOMEM;
1878}
1879
1880void unmap_mapping_range(struct address_space *mapping,
1881			 loff_t const holebegin, loff_t const holelen,
1882			 int even_cows)
1883{
1884}
1885EXPORT_SYMBOL(unmap_mapping_range);
1886
1887/*
1888 * Check that a process has enough memory to allocate a new virtual
1889 * mapping. 0 means there is enough memory for the allocation to
1890 * succeed and -ENOMEM implies there is not.
1891 *
1892 * We currently support three overcommit policies, which are set via the
1893 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1894 *
1895 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1896 * Additional code 2002 Jul 20 by Robert Love.
1897 *
1898 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1899 *
1900 * Note this is a helper function intended to be used by LSMs which
1901 * wish to use this logic.
1902 */
1903int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1904{
1905	unsigned long free, allowed, reserve;
1906
1907	vm_acct_memory(pages);
1908
1909	/*
1910	 * Sometimes we want to use more memory than we have
1911	 */
1912	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1913		return 0;
1914
1915	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1916		free = global_page_state(NR_FREE_PAGES);
1917		free += global_page_state(NR_FILE_PAGES);
1918
1919		/*
1920		 * shmem pages shouldn't be counted as free in this
1921		 * case, they can't be purged, only swapped out, and
1922		 * that won't affect the overall amount of available
1923		 * memory in the system.
1924		 */
1925		free -= global_page_state(NR_SHMEM);
1926
1927		free += get_nr_swap_pages();
1928
1929		/*
1930		 * Any slabs which are created with the
1931		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1932		 * which are reclaimable, under pressure.  The dentry
1933		 * cache and most inode caches should fall into this
1934		 */
1935		free += global_page_state(NR_SLAB_RECLAIMABLE);
1936
1937		/*
1938		 * Leave reserved pages. The pages are not for anonymous pages.
1939		 */
1940		if (free <= totalreserve_pages)
1941			goto error;
1942		else
1943			free -= totalreserve_pages;
1944
1945		/*
1946		 * Reserve some for root
1947		 */
1948		if (!cap_sys_admin)
1949			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1950
1951		if (free > pages)
1952			return 0;
1953
1954		goto error;
1955	}
1956
1957	allowed = vm_commit_limit();
1958	/*
1959	 * Reserve some 3% for root
1960	 */
1961	if (!cap_sys_admin)
1962		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1963
1964	/*
1965	 * Don't let a single process grow so big a user can't recover
1966	 */
1967	if (mm) {
1968		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1969		allowed -= min(mm->total_vm / 32, reserve);
1970	}
1971
1972	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1973		return 0;
1974
1975error:
1976	vm_unacct_memory(pages);
1977
1978	return -ENOMEM;
1979}
1980
1981int in_gate_area_no_mm(unsigned long addr)
1982{
1983	return 0;
1984}
1985
1986int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1987{
1988	BUG();
1989	return 0;
1990}
1991EXPORT_SYMBOL(filemap_fault);
1992
1993void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
1994{
1995	BUG();
1996}
1997EXPORT_SYMBOL(filemap_map_pages);
1998
1999int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
2000			     unsigned long size, pgoff_t pgoff)
2001{
2002	BUG();
2003	return 0;
2004}
2005EXPORT_SYMBOL(generic_file_remap_pages);
2006
2007static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
2008		unsigned long addr, void *buf, int len, int write)
2009{
2010	struct vm_area_struct *vma;
 
2011
2012	down_read(&mm->mmap_sem);
 
2013
2014	/* the access must start within one of the target process's mappings */
2015	vma = find_vma(mm, addr);
2016	if (vma) {
2017		/* don't overrun this mapping */
2018		if (addr + len >= vma->vm_end)
2019			len = vma->vm_end - addr;
2020
2021		/* only read or write mappings where it is permitted */
2022		if (write && vma->vm_flags & VM_MAYWRITE)
2023			copy_to_user_page(vma, NULL, addr,
2024					 (void *) addr, buf, len);
2025		else if (!write && vma->vm_flags & VM_MAYREAD)
2026			copy_from_user_page(vma, NULL, addr,
2027					    buf, (void *) addr, len);
2028		else
2029			len = 0;
2030	} else {
2031		len = 0;
2032	}
2033
2034	up_read(&mm->mmap_sem);
2035
2036	return len;
2037}
2038
2039/**
2040 * @access_remote_vm - access another process' address space
2041 * @mm:		the mm_struct of the target address space
2042 * @addr:	start address to access
2043 * @buf:	source or destination buffer
2044 * @len:	number of bytes to transfer
2045 * @write:	whether the access is a write
2046 *
2047 * The caller must hold a reference on @mm.
2048 */
2049int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2050		void *buf, int len, int write)
2051{
2052	return __access_remote_vm(NULL, mm, addr, buf, len, write);
2053}
2054
2055/*
2056 * Access another process' address space.
2057 * - source/target buffer must be kernel space
2058 */
2059int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
 
2060{
2061	struct mm_struct *mm;
2062
2063	if (addr + len < addr)
2064		return 0;
2065
2066	mm = get_task_mm(tsk);
2067	if (!mm)
2068		return 0;
2069
2070	len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2071
2072	mmput(mm);
2073	return len;
2074}
 
2075
2076/**
2077 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
2078 * @inode: The inode to check
2079 * @size: The current filesize of the inode
2080 * @newsize: The proposed filesize of the inode
2081 *
2082 * Check the shared mappings on an inode on behalf of a shrinking truncate to
2083 * make sure that that any outstanding VMAs aren't broken and then shrink the
2084 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2085 * automatically grant mappings that are too large.
2086 */
2087int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2088				size_t newsize)
2089{
2090	struct vm_area_struct *vma;
2091	struct vm_region *region;
2092	pgoff_t low, high;
2093	size_t r_size, r_top;
2094
2095	low = newsize >> PAGE_SHIFT;
2096	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2097
2098	down_write(&nommu_region_sem);
2099	mutex_lock(&inode->i_mapping->i_mmap_mutex);
2100
2101	/* search for VMAs that fall within the dead zone */
2102	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2103		/* found one - only interested if it's shared out of the page
2104		 * cache */
2105		if (vma->vm_flags & VM_SHARED) {
2106			mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2107			up_write(&nommu_region_sem);
2108			return -ETXTBSY; /* not quite true, but near enough */
2109		}
2110	}
2111
2112	/* reduce any regions that overlap the dead zone - if in existence,
2113	 * these will be pointed to by VMAs that don't overlap the dead zone
2114	 *
2115	 * we don't check for any regions that start beyond the EOF as there
2116	 * shouldn't be any
2117	 */
2118	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
2119				  0, ULONG_MAX) {
2120		if (!(vma->vm_flags & VM_SHARED))
2121			continue;
2122
2123		region = vma->vm_region;
2124		r_size = region->vm_top - region->vm_start;
2125		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2126
2127		if (r_top > newsize) {
2128			region->vm_top -= r_top - newsize;
2129			if (region->vm_end > region->vm_top)
2130				region->vm_end = region->vm_top;
2131		}
2132	}
2133
2134	mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2135	up_write(&nommu_region_sem);
2136	return 0;
2137}
2138
2139/*
2140 * Initialise sysctl_user_reserve_kbytes.
2141 *
2142 * This is intended to prevent a user from starting a single memory hogging
2143 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
2144 * mode.
2145 *
2146 * The default value is min(3% of free memory, 128MB)
2147 * 128MB is enough to recover with sshd/login, bash, and top/kill.
2148 */
2149static int __meminit init_user_reserve(void)
2150{
2151	unsigned long free_kbytes;
2152
2153	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2154
2155	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
2156	return 0;
2157}
2158module_init(init_user_reserve)
2159
2160/*
2161 * Initialise sysctl_admin_reserve_kbytes.
2162 *
2163 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
2164 * to log in and kill a memory hogging process.
2165 *
2166 * Systems with more than 256MB will reserve 8MB, enough to recover
2167 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
2168 * only reserve 3% of free pages by default.
2169 */
2170static int __meminit init_admin_reserve(void)
2171{
2172	unsigned long free_kbytes;
2173
2174	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
2175
2176	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
2177	return 0;
2178}
2179module_init(init_admin_reserve)
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/admin-guide/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
  22#include <linux/mman.h>
  23#include <linux/swap.h>
  24#include <linux/file.h>
  25#include <linux/highmem.h>
  26#include <linux/pagemap.h>
  27#include <linux/slab.h>
  28#include <linux/vmalloc.h>
 
  29#include <linux/backing-dev.h>
  30#include <linux/compiler.h>
  31#include <linux/mount.h>
  32#include <linux/personality.h>
  33#include <linux/security.h>
  34#include <linux/syscalls.h>
  35#include <linux/audit.h>
  36#include <linux/printk.h>
  37
  38#include <linux/uaccess.h>
  39#include <linux/uio.h>
  40#include <asm/tlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/mmu_context.h>
  43#include "internal.h"
  44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45void *high_memory;
  46EXPORT_SYMBOL(high_memory);
  47struct page *mem_map;
  48unsigned long max_mapnr;
  49EXPORT_SYMBOL(max_mapnr);
  50unsigned long highest_memmap_pfn;
 
 
 
 
 
  51int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
 
 
  52int heap_stack_gap = 0;
  53
  54atomic_long_t mmap_pages_allocated;
  55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56EXPORT_SYMBOL(mem_map);
  57
  58/* list of mapped, potentially shareable regions */
  59static struct kmem_cache *vm_region_jar;
  60struct rb_root nommu_region_tree = RB_ROOT;
  61DECLARE_RWSEM(nommu_region_sem);
  62
  63const struct vm_operations_struct generic_file_vm_ops = {
  64};
  65
  66/*
  67 * Return the total memory allocated for this pointer, not
  68 * just what the caller asked for.
  69 *
  70 * Doesn't have to be accurate, i.e. may have races.
  71 */
  72unsigned int kobjsize(const void *objp)
  73{
  74	struct page *page;
  75
  76	/*
  77	 * If the object we have should not have ksize performed on it,
  78	 * return size of 0
  79	 */
  80	if (!objp || !virt_addr_valid(objp))
  81		return 0;
  82
  83	page = virt_to_head_page(objp);
  84
  85	/*
  86	 * If the allocator sets PageSlab, we know the pointer came from
  87	 * kmalloc().
  88	 */
  89	if (PageSlab(page))
  90		return ksize(objp);
  91
  92	/*
  93	 * If it's not a compound page, see if we have a matching VMA
  94	 * region. This test is intentionally done in reverse order,
  95	 * so if there's no VMA, we still fall through and hand back
  96	 * PAGE_SIZE for 0-order pages.
  97	 */
  98	if (!PageCompound(page)) {
  99		struct vm_area_struct *vma;
 100
 101		vma = find_vma(current->mm, (unsigned long)objp);
 102		if (vma)
 103			return vma->vm_end - vma->vm_start;
 104	}
 105
 106	/*
 107	 * The ksize() function is only guaranteed to work for pointers
 108	 * returned by kmalloc(). So handle arbitrary pointers here.
 109	 */
 110	return page_size(page);
 111}
 112
 113void vfree(const void *addr)
 
 
 
 114{
 115	kfree(addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116}
 117EXPORT_SYMBOL(vfree);
 118
 119void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 
 
 
 
 
 
 
 
 
 
 120{
 121	/*
 122	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 123	 * returns only a logical address.
 124	 */
 125	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 
 
 
 
 126}
 127EXPORT_SYMBOL(__vmalloc_noprof);
 128
 129void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 
 
 
 
 
 
 
 
 
 
 
 130{
 131	return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
 
 
 
 
 132}
 
 
 
 133
 134void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 135		unsigned long start, unsigned long end, gfp_t gfp_mask,
 136		pgprot_t prot, unsigned long vm_flags, int node,
 137		const void *caller)
 138{
 139	return __vmalloc_noprof(size, gfp_mask);
 140}
 
 141
 142void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 143		int node, const void *caller)
 144{
 145	return __vmalloc_noprof(size, gfp_mask);
 
 
 
 
 146}
 
 147
 148static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 149{
 150	void *ret;
 151
 152	ret = __vmalloc(size, flags);
 
 153	if (ret) {
 154		struct vm_area_struct *vma;
 155
 156		mmap_write_lock(current->mm);
 157		vma = find_vma(current->mm, (unsigned long)ret);
 158		if (vma)
 159			vm_flags_set(vma, VM_USERMAP);
 160		mmap_write_unlock(current->mm);
 161	}
 162
 163	return ret;
 164}
 165
 166void *vmalloc_user_noprof(unsigned long size)
 167{
 168	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 169}
 170EXPORT_SYMBOL(vmalloc_user_noprof);
 171
 172struct page *vmalloc_to_page(const void *addr)
 173{
 174	return virt_to_page(addr);
 175}
 176EXPORT_SYMBOL(vmalloc_to_page);
 177
 178unsigned long vmalloc_to_pfn(const void *addr)
 179{
 180	return page_to_pfn(virt_to_page(addr));
 181}
 182EXPORT_SYMBOL(vmalloc_to_pfn);
 183
 184long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
 
 
 
 
 
 
 
 
 
 185{
 186	/* Don't allow overflow */
 187	if ((unsigned long) addr + count < count)
 188		count = -(unsigned long) addr;
 189
 190	return copy_to_iter(addr, count, iter);
 
 191}
 192
 193/*
 194 *	vmalloc  -  allocate virtually contiguous memory
 195 *
 196 *	@size:		allocation size
 197 *
 198 *	Allocate enough pages to cover @size from the page level
 199 *	allocator and map them into contiguous kernel virtual space.
 200 *
 201 *	For tight control over page level allocator and protection flags
 202 *	use __vmalloc() instead.
 203 */
 204void *vmalloc_noprof(unsigned long size)
 205{
 206	return __vmalloc_noprof(size, GFP_KERNEL);
 207}
 208EXPORT_SYMBOL(vmalloc_noprof);
 209
 210void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
 211
 212/*
 213 *	vzalloc - allocate virtually contiguous memory with zero fill
 214 *
 215 *	@size:		allocation size
 216 *
 217 *	Allocate enough pages to cover @size from the page level
 218 *	allocator and map them into contiguous kernel virtual space.
 219 *	The memory allocated is set to zero.
 220 *
 221 *	For tight control over page level allocator and protection flags
 222 *	use __vmalloc() instead.
 223 */
 224void *vzalloc_noprof(unsigned long size)
 225{
 226	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
 
 227}
 228EXPORT_SYMBOL(vzalloc_noprof);
 229
 230/**
 231 * vmalloc_node - allocate memory on a specific node
 232 * @size:	allocation size
 233 * @node:	numa node
 234 *
 235 * Allocate enough pages to cover @size from the page level
 236 * allocator and map them into contiguous kernel virtual space.
 237 *
 238 * For tight control over page level allocator and protection flags
 239 * use __vmalloc() instead.
 240 */
 241void *vmalloc_node_noprof(unsigned long size, int node)
 242{
 243	return vmalloc_noprof(size);
 244}
 245EXPORT_SYMBOL(vmalloc_node_noprof);
 246
 247/**
 248 * vzalloc_node - allocate memory on a specific node with zero fill
 249 * @size:	allocation size
 250 * @node:	numa node
 251 *
 252 * Allocate enough pages to cover @size from the page level
 253 * allocator and map them into contiguous kernel virtual space.
 254 * The memory allocated is set to zero.
 255 *
 256 * For tight control over page level allocator and protection flags
 257 * use __vmalloc() instead.
 258 */
 259void *vzalloc_node_noprof(unsigned long size, int node)
 260{
 261	return vzalloc_noprof(size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 262}
 263EXPORT_SYMBOL(vzalloc_node_noprof);
 264
 265/**
 266 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 267 *	@size:		allocation size
 268 *
 269 *	Allocate enough 32bit PA addressable pages to cover @size from the
 270 *	page level allocator and map them into contiguous kernel virtual space.
 271 */
 272void *vmalloc_32_noprof(unsigned long size)
 273{
 274	return __vmalloc_noprof(size, GFP_KERNEL);
 275}
 276EXPORT_SYMBOL(vmalloc_32_noprof);
 277
 278/**
 279 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 280 *	@size:		allocation size
 281 *
 282 * The resulting memory area is 32bit addressable and zeroed so it can be
 283 * mapped to userspace without leaking data.
 284 *
 285 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 286 * remap_vmalloc_range() are permissible.
 287 */
 288void *vmalloc_32_user_noprof(unsigned long size)
 289{
 290	/*
 291	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 292	 * but for now this can simply use vmalloc_user() directly.
 293	 */
 294	return vmalloc_user_noprof(size);
 295}
 296EXPORT_SYMBOL(vmalloc_32_user_noprof);
 297
 298void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 299{
 300	BUG();
 301	return NULL;
 302}
 303EXPORT_SYMBOL(vmap);
 304
 305void vunmap(const void *addr)
 306{
 307	BUG();
 308}
 309EXPORT_SYMBOL(vunmap);
 310
 311void *vm_map_ram(struct page **pages, unsigned int count, int node)
 312{
 313	BUG();
 314	return NULL;
 315}
 316EXPORT_SYMBOL(vm_map_ram);
 317
 318void vm_unmap_ram(const void *mem, unsigned int count)
 319{
 320	BUG();
 321}
 322EXPORT_SYMBOL(vm_unmap_ram);
 323
 324void vm_unmap_aliases(void)
 325{
 326}
 327EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 329void free_vm_area(struct vm_struct *area)
 330{
 331	BUG();
 332}
 333EXPORT_SYMBOL_GPL(free_vm_area);
 334
 335int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 336		   struct page *page)
 337{
 338	return -EINVAL;
 339}
 340EXPORT_SYMBOL(vm_insert_page);
 341
 342int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 343			struct page **pages, unsigned long *num)
 344{
 345	return -EINVAL;
 346}
 347EXPORT_SYMBOL(vm_insert_pages);
 348
 349int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 350			unsigned long num)
 351{
 352	return -EINVAL;
 353}
 354EXPORT_SYMBOL(vm_map_pages);
 355
 356int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 357				unsigned long num)
 358{
 359	return -EINVAL;
 360}
 361EXPORT_SYMBOL(vm_map_pages_zero);
 362
 363/*
 364 *  sys_brk() for the most part doesn't need the global kernel
 365 *  lock, except when an application is doing something nasty
 366 *  like trying to un-brk an area that has already been mapped
 367 *  to a regular file.  in this case, the unmapping will need
 368 *  to invoke file system routines that need the global lock.
 369 */
 370SYSCALL_DEFINE1(brk, unsigned long, brk)
 371{
 372	struct mm_struct *mm = current->mm;
 373
 374	if (brk < mm->start_brk || brk > mm->context.end_brk)
 375		return mm->brk;
 376
 377	if (mm->brk == brk)
 378		return mm->brk;
 379
 380	/*
 381	 * Always allow shrinking brk
 382	 */
 383	if (brk <= mm->brk) {
 384		mm->brk = brk;
 385		return brk;
 386	}
 387
 388	/*
 389	 * Ok, looks good - let it rip.
 390	 */
 391	flush_icache_user_range(mm->brk, brk);
 392	return mm->brk = brk;
 393}
 394
 395/*
 396 * initialise the percpu counter for VM and region record slabs
 397 */
 398void __init mmap_init(void)
 399{
 400	int ret;
 401
 402	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 403	VM_BUG_ON(ret);
 404	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 405}
 406
 407/*
 408 * validate the region tree
 409 * - the caller must hold the region lock
 410 */
 411#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 412static noinline void validate_nommu_regions(void)
 413{
 414	struct vm_region *region, *last;
 415	struct rb_node *p, *lastp;
 416
 417	lastp = rb_first(&nommu_region_tree);
 418	if (!lastp)
 419		return;
 420
 421	last = rb_entry(lastp, struct vm_region, vm_rb);
 422	BUG_ON(last->vm_end <= last->vm_start);
 423	BUG_ON(last->vm_top < last->vm_end);
 424
 425	while ((p = rb_next(lastp))) {
 426		region = rb_entry(p, struct vm_region, vm_rb);
 427		last = rb_entry(lastp, struct vm_region, vm_rb);
 428
 429		BUG_ON(region->vm_end <= region->vm_start);
 430		BUG_ON(region->vm_top < region->vm_end);
 431		BUG_ON(region->vm_start < last->vm_top);
 432
 433		lastp = p;
 434	}
 435}
 436#else
 437static void validate_nommu_regions(void)
 438{
 439}
 440#endif
 441
 442/*
 443 * add a region into the global tree
 444 */
 445static void add_nommu_region(struct vm_region *region)
 446{
 447	struct vm_region *pregion;
 448	struct rb_node **p, *parent;
 449
 450	validate_nommu_regions();
 451
 452	parent = NULL;
 453	p = &nommu_region_tree.rb_node;
 454	while (*p) {
 455		parent = *p;
 456		pregion = rb_entry(parent, struct vm_region, vm_rb);
 457		if (region->vm_start < pregion->vm_start)
 458			p = &(*p)->rb_left;
 459		else if (region->vm_start > pregion->vm_start)
 460			p = &(*p)->rb_right;
 461		else if (pregion == region)
 462			return;
 463		else
 464			BUG();
 465	}
 466
 467	rb_link_node(&region->vm_rb, parent, p);
 468	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 469
 470	validate_nommu_regions();
 471}
 472
 473/*
 474 * delete a region from the global tree
 475 */
 476static void delete_nommu_region(struct vm_region *region)
 477{
 478	BUG_ON(!nommu_region_tree.rb_node);
 479
 480	validate_nommu_regions();
 481	rb_erase(&region->vm_rb, &nommu_region_tree);
 482	validate_nommu_regions();
 483}
 484
 485/*
 486 * free a contiguous series of pages
 487 */
 488static void free_page_series(unsigned long from, unsigned long to)
 489{
 490	for (; from < to; from += PAGE_SIZE) {
 491		struct page *page = virt_to_page((void *)from);
 492
 
 493		atomic_long_dec(&mmap_pages_allocated);
 
 
 
 494		put_page(page);
 495	}
 496}
 497
 498/*
 499 * release a reference to a region
 500 * - the caller must hold the region semaphore for writing, which this releases
 501 * - the region may not have been added to the tree yet, in which case vm_top
 502 *   will equal vm_start
 503 */
 504static void __put_nommu_region(struct vm_region *region)
 505	__releases(nommu_region_sem)
 506{
 
 
 507	BUG_ON(!nommu_region_tree.rb_node);
 508
 509	if (--region->vm_usage == 0) {
 510		if (region->vm_top > region->vm_start)
 511			delete_nommu_region(region);
 512		up_write(&nommu_region_sem);
 513
 514		if (region->vm_file)
 515			fput(region->vm_file);
 516
 517		/* IO memory and memory shared directly out of the pagecache
 518		 * from ramfs/tmpfs mustn't be released here */
 519		if (region->vm_flags & VM_MAPPED_COPY)
 
 520			free_page_series(region->vm_start, region->vm_top);
 
 521		kmem_cache_free(vm_region_jar, region);
 522	} else {
 523		up_write(&nommu_region_sem);
 524	}
 525}
 526
 527/*
 528 * release a reference to a region
 529 */
 530static void put_nommu_region(struct vm_region *region)
 531{
 532	down_write(&nommu_region_sem);
 533	__put_nommu_region(region);
 534}
 535
 536static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 
 
 
 537{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538	vma->vm_mm = mm;
 539
 
 
 540	/* add the VMA to the mapping */
 541	if (vma->vm_file) {
 542		struct address_space *mapping = vma->vm_file->f_mapping;
 543
 544		i_mmap_lock_write(mapping);
 545		flush_dcache_mmap_lock(mapping);
 546		vma_interval_tree_insert(vma, &mapping->i_mmap);
 547		flush_dcache_mmap_unlock(mapping);
 548		i_mmap_unlock_write(mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 549	}
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void cleanup_vma_from_mm(struct vm_area_struct *vma)
 
 
 
 553{
 554	vma->vm_mm->map_count--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555	/* remove the VMA from the mapping */
 556	if (vma->vm_file) {
 557		struct address_space *mapping;
 558		mapping = vma->vm_file->f_mapping;
 559
 560		i_mmap_lock_write(mapping);
 561		flush_dcache_mmap_lock(mapping);
 562		vma_interval_tree_remove(vma, &mapping->i_mmap);
 563		flush_dcache_mmap_unlock(mapping);
 564		i_mmap_unlock_write(mapping);
 565	}
 566}
 567
 568/*
 569 * delete a VMA from its owning mm_struct and address space
 570 */
 571static int delete_vma_from_mm(struct vm_area_struct *vma)
 572{
 573	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
 574
 575	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
 576	if (vma_iter_prealloc(&vmi, NULL)) {
 577		pr_warn("Allocation of vma tree for process %d failed\n",
 578		       current->pid);
 579		return -ENOMEM;
 580	}
 581	cleanup_vma_from_mm(vma);
 582
 583	/* remove from the MM's tree and list */
 584	vma_iter_clear(&vmi);
 585	return 0;
 586}
 
 587/*
 588 * destroy a VMA record
 589 */
 590static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 591{
 592	vma_close(vma);
 
 
 593	if (vma->vm_file)
 594		fput(vma->vm_file);
 595	put_nommu_region(vma->vm_region);
 596	vm_area_free(vma);
 597}
 598
 599struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 600					     unsigned long start_addr,
 601					     unsigned long end_addr)
 602{
 603	unsigned long index = start_addr;
 604
 605	mmap_assert_locked(mm);
 606	return mt_find(&mm->mm_mt, &index, end_addr - 1);
 607}
 608EXPORT_SYMBOL(find_vma_intersection);
 609
 610/*
 611 * look up the first VMA in which addr resides, NULL if none
 612 * - should be called with mm->mmap_lock at least held readlocked
 613 */
 614struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 615{
 616	VMA_ITERATOR(vmi, mm, addr);
 617
 618	return vma_iter_load(&vmi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619}
 620EXPORT_SYMBOL(find_vma);
 621
 622/*
 623 * At least xtensa ends up having protection faults even with no
 624 * MMU.. No stack expansion, at least.
 625 */
 626struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
 627			unsigned long addr, struct pt_regs *regs)
 628{
 629	struct vm_area_struct *vma;
 630
 631	mmap_read_lock(mm);
 632	vma = vma_lookup(mm, addr);
 633	if (!vma)
 634		mmap_read_unlock(mm);
 635	return vma;
 636}
 637
 638/*
 639 * expand a stack to a given address
 640 * - not supported under NOMMU conditions
 641 */
 642int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
 643{
 644	return -ENOMEM;
 645}
 646
 647struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 648{
 649	mmap_read_unlock(mm);
 650	return NULL;
 651}
 652
 653/*
 654 * look up the first VMA exactly that exactly matches addr
 655 * - should be called with mm->mmap_lock at least held readlocked
 656 */
 657static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 658					     unsigned long addr,
 659					     unsigned long len)
 660{
 661	struct vm_area_struct *vma;
 662	unsigned long end = addr + len;
 663	VMA_ITERATOR(vmi, mm, addr);
 664
 665	vma = vma_iter_load(&vmi);
 666	if (!vma)
 667		return NULL;
 668	if (vma->vm_start != addr)
 669		return NULL;
 670	if (vma->vm_end != end)
 671		return NULL;
 
 
 
 
 
 
 
 
 
 
 672
 673	return vma;
 674}
 675
 676/*
 677 * determine whether a mapping should be permitted and, if so, what sort of
 678 * mapping we're capable of supporting
 679 */
 680static int validate_mmap_request(struct file *file,
 681				 unsigned long addr,
 682				 unsigned long len,
 683				 unsigned long prot,
 684				 unsigned long flags,
 685				 unsigned long pgoff,
 686				 unsigned long *_capabilities)
 687{
 688	unsigned long capabilities, rlen;
 689	int ret;
 690
 691	/* do the simple checks first */
 692	if (flags & MAP_FIXED)
 
 
 
 693		return -EINVAL;
 
 694
 695	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 696	    (flags & MAP_TYPE) != MAP_SHARED)
 697		return -EINVAL;
 698
 699	if (!len)
 700		return -EINVAL;
 701
 702	/* Careful about overflows.. */
 703	rlen = PAGE_ALIGN(len);
 704	if (!rlen || rlen > TASK_SIZE)
 705		return -ENOMEM;
 706
 707	/* offset overflow? */
 708	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 709		return -EOVERFLOW;
 710
 711	if (file) {
 
 
 
 712		/* files must support mmap */
 713		if (!file->f_op->mmap)
 714			return -ENODEV;
 715
 716		/* work out if what we've got could possibly be shared
 717		 * - we support chardevs that provide their own "memory"
 718		 * - we support files/blockdevs that are memory backed
 719		 */
 720		if (file->f_op->mmap_capabilities) {
 721			capabilities = file->f_op->mmap_capabilities(file);
 722		} else {
 
 
 
 
 
 
 723			/* no explicit capabilities set, so assume some
 724			 * defaults */
 725			switch (file_inode(file)->i_mode & S_IFMT) {
 726			case S_IFREG:
 727			case S_IFBLK:
 728				capabilities = NOMMU_MAP_COPY;
 729				break;
 730
 731			case S_IFCHR:
 732				capabilities =
 733					NOMMU_MAP_DIRECT |
 734					NOMMU_MAP_READ |
 735					NOMMU_MAP_WRITE;
 736				break;
 737
 738			default:
 739				return -EINVAL;
 740			}
 741		}
 742
 743		/* eliminate any capabilities that we can't support on this
 744		 * device */
 745		if (!file->f_op->get_unmapped_area)
 746			capabilities &= ~NOMMU_MAP_DIRECT;
 747		if (!(file->f_mode & FMODE_CAN_READ))
 748			capabilities &= ~NOMMU_MAP_COPY;
 749
 750		/* The file shall have been opened with read permission. */
 751		if (!(file->f_mode & FMODE_READ))
 752			return -EACCES;
 753
 754		if (flags & MAP_SHARED) {
 755			/* do checks for writing, appending and locking */
 756			if ((prot & PROT_WRITE) &&
 757			    !(file->f_mode & FMODE_WRITE))
 758				return -EACCES;
 759
 760			if (IS_APPEND(file_inode(file)) &&
 761			    (file->f_mode & FMODE_WRITE))
 762				return -EACCES;
 763
 764			if (!(capabilities & NOMMU_MAP_DIRECT))
 
 
 
 765				return -ENODEV;
 766
 767			/* we mustn't privatise shared mappings */
 768			capabilities &= ~NOMMU_MAP_COPY;
 769		} else {
 770			/* we're going to read the file into private memory we
 771			 * allocate */
 772			if (!(capabilities & NOMMU_MAP_COPY))
 773				return -ENODEV;
 774
 775			/* we don't permit a private writable mapping to be
 776			 * shared with the backing device */
 777			if (prot & PROT_WRITE)
 778				capabilities &= ~NOMMU_MAP_DIRECT;
 779		}
 780
 781		if (capabilities & NOMMU_MAP_DIRECT) {
 782			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 783			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 784			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 785			    ) {
 786				capabilities &= ~NOMMU_MAP_DIRECT;
 787				if (flags & MAP_SHARED) {
 788					pr_warn("MAP_SHARED not completely supported on !MMU\n");
 
 789					return -EINVAL;
 790				}
 791			}
 792		}
 793
 794		/* handle executable mappings and implied executable
 795		 * mappings */
 796		if (path_noexec(&file->f_path)) {
 797			if (prot & PROT_EXEC)
 798				return -EPERM;
 799		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 800			/* handle implication of PROT_EXEC by PROT_READ */
 801			if (current->personality & READ_IMPLIES_EXEC) {
 802				if (capabilities & NOMMU_MAP_EXEC)
 803					prot |= PROT_EXEC;
 804			}
 805		} else if ((prot & PROT_READ) &&
 806			 (prot & PROT_EXEC) &&
 807			 !(capabilities & NOMMU_MAP_EXEC)
 808			 ) {
 809			/* backing file is not executable, try to copy */
 810			capabilities &= ~NOMMU_MAP_DIRECT;
 811		}
 812	} else {
 813		/* anonymous mappings are always memory backed and can be
 814		 * privately mapped
 815		 */
 816		capabilities = NOMMU_MAP_COPY;
 817
 818		/* handle PROT_EXEC implication by PROT_READ */
 819		if ((prot & PROT_READ) &&
 820		    (current->personality & READ_IMPLIES_EXEC))
 821			prot |= PROT_EXEC;
 822	}
 823
 824	/* allow the security API to have its say */
 825	ret = security_mmap_addr(addr);
 826	if (ret < 0)
 827		return ret;
 828
 829	/* looks okay */
 830	*_capabilities = capabilities;
 831	return 0;
 832}
 833
 834/*
 835 * we've determined that we can make the mapping, now translate what we
 836 * now know into VMA flags
 837 */
 838static unsigned long determine_vm_flags(struct file *file,
 839					unsigned long prot,
 840					unsigned long flags,
 841					unsigned long capabilities)
 842{
 843	unsigned long vm_flags;
 844
 845	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
 
 846
 847	if (!file) {
 848		/*
 849		 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
 850		 * there is no fork().
 851		 */
 852		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 853	} else if (flags & MAP_PRIVATE) {
 854		/* MAP_PRIVATE file mapping */
 855		if (capabilities & NOMMU_MAP_DIRECT)
 856			vm_flags |= (capabilities & NOMMU_VMFLAGS);
 857		else
 858			vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 859
 860		if (!(prot & PROT_WRITE) && !current->ptrace)
 861			/*
 862			 * R/O private file mapping which cannot be used to
 863			 * modify memory, especially also not via active ptrace
 864			 * (e.g., set breakpoints) or later by upgrading
 865			 * permissions (no mprotect()). We can try overlaying
 866			 * the file mapping, which will work e.g., on chardevs,
 867			 * ramfs/tmpfs/shmfs and romfs/cramf.
 868			 */
 869			vm_flags |= VM_MAYOVERLAY;
 870	} else {
 871		/* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
 872		vm_flags |= VM_SHARED | VM_MAYSHARE |
 873			    (capabilities & NOMMU_VMFLAGS);
 
 
 
 874	}
 875
 
 
 
 
 
 
 
 876	return vm_flags;
 877}
 878
 879/*
 880 * set up a shared mapping on a file (the driver or filesystem provides and
 881 * pins the storage)
 882 */
 883static int do_mmap_shared_file(struct vm_area_struct *vma)
 884{
 885	int ret;
 886
 887	ret = mmap_file(vma->vm_file, vma);
 888	if (ret == 0) {
 889		vma->vm_region->vm_top = vma->vm_region->vm_end;
 890		return 0;
 891	}
 892	if (ret != -ENOSYS)
 893		return ret;
 894
 895	/* getting -ENOSYS indicates that direct mmap isn't possible (as
 896	 * opposed to tried but failed) so we can only give a suitable error as
 897	 * it's not possible to make a private copy if MAP_SHARED was given */
 898	return -ENODEV;
 899}
 900
 901/*
 902 * set up a private mapping or an anonymous shared mapping
 903 */
 904static int do_mmap_private(struct vm_area_struct *vma,
 905			   struct vm_region *region,
 906			   unsigned long len,
 907			   unsigned long capabilities)
 908{
 909	unsigned long total, point;
 
 910	void *base;
 911	int ret, order;
 912
 913	/*
 914	 * Invoke the file's mapping function so that it can keep track of
 915	 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
 916	 * it may attempt to share, which will make is_nommu_shared_mapping()
 917	 * happy.
 918	 */
 919	if (capabilities & NOMMU_MAP_DIRECT) {
 920		ret = mmap_file(vma->vm_file, vma);
 921		/* shouldn't return success if we're not sharing */
 922		if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
 923			ret = -ENOSYS;
 924		if (ret == 0) {
 
 
 925			vma->vm_region->vm_top = vma->vm_region->vm_end;
 926			return 0;
 927		}
 928		if (ret != -ENOSYS)
 929			return ret;
 930
 931		/* getting an ENOSYS error indicates that direct mmap isn't
 932		 * possible (as opposed to tried but failed) so we'll try to
 933		 * make a private copy of the data and map that instead */
 934	}
 935
 936
 937	/* allocate some memory to hold the mapping
 938	 * - note that this may not return a page-aligned address if the object
 939	 *   we're allocating is smaller than a page
 940	 */
 941	order = get_order(len);
 
 
 
 
 
 
 942	total = 1 << order;
 
 
 943	point = len >> PAGE_SHIFT;
 944
 945	/* we don't want to allocate a power-of-2 sized page set */
 946	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
 947		total = point;
 
 
 
 
 
 
 
 
 
 
 948
 949	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
 950	if (!base)
 951		goto enomem;
 952
 953	atomic_long_add(total, &mmap_pages_allocated);
 954
 955	vm_flags_set(vma, VM_MAPPED_COPY);
 956	region->vm_flags = vma->vm_flags;
 957	region->vm_start = (unsigned long) base;
 958	region->vm_end   = region->vm_start + len;
 959	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
 960
 961	vma->vm_start = region->vm_start;
 962	vma->vm_end   = region->vm_start + len;
 963
 964	if (vma->vm_file) {
 965		/* read the contents of a file into the copy */
 
 966		loff_t fpos;
 967
 968		fpos = vma->vm_pgoff;
 969		fpos <<= PAGE_SHIFT;
 970
 971		ret = kernel_read(vma->vm_file, base, len, &fpos);
 
 
 
 
 972		if (ret < 0)
 973			goto error_free;
 974
 975		/* clear the last little bit */
 976		if (ret < len)
 977			memset(base + ret, 0, len - ret);
 978
 979	} else {
 980		vma_set_anonymous(vma);
 981	}
 982
 983	return 0;
 984
 985error_free:
 986	free_page_series(region->vm_start, region->vm_top);
 987	region->vm_start = vma->vm_start = 0;
 988	region->vm_end   = vma->vm_end = 0;
 989	region->vm_top   = 0;
 990	return ret;
 991
 992enomem:
 993	pr_err("Allocation of length %lu from process %d (%s) failed\n",
 994	       len, current->pid, current->comm);
 995	show_mem();
 996	return -ENOMEM;
 997}
 998
 999/*
1000 * handle mapping creation for uClinux
1001 */
1002unsigned long do_mmap(struct file *file,
1003			unsigned long addr,
1004			unsigned long len,
1005			unsigned long prot,
1006			unsigned long flags,
1007			vm_flags_t vm_flags,
1008			unsigned long pgoff,
1009			unsigned long *populate,
1010			struct list_head *uf)
1011{
1012	struct vm_area_struct *vma;
1013	struct vm_region *region;
1014	struct rb_node *rb;
1015	unsigned long capabilities, result;
1016	int ret;
1017	VMA_ITERATOR(vmi, current->mm, 0);
 
1018
1019	*populate = 0;
1020
1021	/* decide whether we should attempt the mapping, and if so what sort of
1022	 * mapping */
1023	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1024				    &capabilities);
1025	if (ret < 0)
 
1026		return ret;
 
1027
1028	/* we ignore the address hint */
1029	addr = 0;
1030	len = PAGE_ALIGN(len);
1031
1032	/* we've determined that we can make the mapping, now translate what we
1033	 * now know into VMA flags */
1034	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1035
1036
1037	/* we're going to need to record the mapping */
1038	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1039	if (!region)
1040		goto error_getting_region;
1041
1042	vma = vm_area_alloc(current->mm);
1043	if (!vma)
1044		goto error_getting_vma;
1045
1046	region->vm_usage = 1;
1047	region->vm_flags = vm_flags;
1048	region->vm_pgoff = pgoff;
1049
1050	vm_flags_init(vma, vm_flags);
 
1051	vma->vm_pgoff = pgoff;
1052
1053	if (file) {
1054		region->vm_file = get_file(file);
1055		vma->vm_file = get_file(file);
1056	}
1057
1058	down_write(&nommu_region_sem);
1059
1060	/* if we want to share, we need to check for regions created by other
1061	 * mmap() calls that overlap with our proposed mapping
1062	 * - we can only share with a superset match on most regular files
1063	 * - shared mappings on character devices and memory backed files are
1064	 *   permitted to overlap inexactly as far as we are concerned for in
1065	 *   these cases, sharing is handled in the driver or filesystem rather
1066	 *   than here
1067	 */
1068	if (is_nommu_shared_mapping(vm_flags)) {
1069		struct vm_region *pregion;
1070		unsigned long pglen, rpglen, pgend, rpgend, start;
1071
1072		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1073		pgend = pgoff + pglen;
1074
1075		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1076			pregion = rb_entry(rb, struct vm_region, vm_rb);
1077
1078			if (!is_nommu_shared_mapping(pregion->vm_flags))
1079				continue;
1080
1081			/* search for overlapping mappings on the same file */
1082			if (file_inode(pregion->vm_file) !=
1083			    file_inode(file))
1084				continue;
1085
1086			if (pregion->vm_pgoff >= pgend)
1087				continue;
1088
1089			rpglen = pregion->vm_end - pregion->vm_start;
1090			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1091			rpgend = pregion->vm_pgoff + rpglen;
1092			if (pgoff >= rpgend)
1093				continue;
1094
1095			/* handle inexactly overlapping matches between
1096			 * mappings */
1097			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1098			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1099				/* new mapping is not a subset of the region */
1100				if (!(capabilities & NOMMU_MAP_DIRECT))
1101					goto sharing_violation;
1102				continue;
1103			}
1104
1105			/* we've found a region we can share */
1106			pregion->vm_usage++;
1107			vma->vm_region = pregion;
1108			start = pregion->vm_start;
1109			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1110			vma->vm_start = start;
1111			vma->vm_end = start + len;
1112
1113			if (pregion->vm_flags & VM_MAPPED_COPY)
1114				vm_flags_set(vma, VM_MAPPED_COPY);
1115			else {
 
 
1116				ret = do_mmap_shared_file(vma);
1117				if (ret < 0) {
1118					vma->vm_region = NULL;
1119					vma->vm_start = 0;
1120					vma->vm_end = 0;
1121					pregion->vm_usage--;
1122					pregion = NULL;
1123					goto error_just_free;
1124				}
1125			}
1126			fput(region->vm_file);
1127			kmem_cache_free(vm_region_jar, region);
1128			region = pregion;
1129			result = start;
1130			goto share;
1131		}
1132
1133		/* obtain the address at which to make a shared mapping
1134		 * - this is the hook for quasi-memory character devices to
1135		 *   tell us the location of a shared mapping
1136		 */
1137		if (capabilities & NOMMU_MAP_DIRECT) {
1138			addr = file->f_op->get_unmapped_area(file, addr, len,
1139							     pgoff, flags);
1140			if (IS_ERR_VALUE(addr)) {
1141				ret = addr;
1142				if (ret != -ENOSYS)
1143					goto error_just_free;
1144
1145				/* the driver refused to tell us where to site
1146				 * the mapping so we'll have to attempt to copy
1147				 * it */
1148				ret = -ENODEV;
1149				if (!(capabilities & NOMMU_MAP_COPY))
1150					goto error_just_free;
1151
1152				capabilities &= ~NOMMU_MAP_DIRECT;
1153			} else {
1154				vma->vm_start = region->vm_start = addr;
1155				vma->vm_end = region->vm_end = addr + len;
1156			}
1157		}
1158	}
1159
1160	vma->vm_region = region;
1161
1162	/* set up the mapping
1163	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1164	 */
1165	if (file && vma->vm_flags & VM_SHARED)
1166		ret = do_mmap_shared_file(vma);
1167	else
1168		ret = do_mmap_private(vma, region, len, capabilities);
1169	if (ret < 0)
1170		goto error_just_free;
1171	add_nommu_region(region);
1172
1173	/* clear anonymous mappings that don't ask for uninitialized data */
1174	if (!vma->vm_file &&
1175	    (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1176	     !(flags & MAP_UNINITIALIZED)))
1177		memset((void *)region->vm_start, 0,
1178		       region->vm_end - region->vm_start);
1179
1180	/* okay... we have a mapping; now we have to register it */
1181	result = vma->vm_start;
1182
1183	current->mm->total_vm += len >> PAGE_SHIFT;
1184
1185share:
1186	BUG_ON(!vma->vm_region);
1187	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1188	if (vma_iter_prealloc(&vmi, vma))
1189		goto error_just_free;
1190
1191	setup_vma_to_mm(vma, current->mm);
1192	current->mm->map_count++;
1193	/* add the VMA to the tree */
1194	vma_iter_store(&vmi, vma);
1195
1196	/* we flush the region from the icache only when the first executable
1197	 * mapping of it is made  */
1198	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1199		flush_icache_user_range(region->vm_start, region->vm_end);
1200		region->vm_icache_flushed = true;
1201	}
1202
1203	up_write(&nommu_region_sem);
1204
 
1205	return result;
1206
1207error_just_free:
1208	up_write(&nommu_region_sem);
1209error:
1210	vma_iter_free(&vmi);
1211	if (region->vm_file)
1212		fput(region->vm_file);
1213	kmem_cache_free(vm_region_jar, region);
1214	if (vma->vm_file)
1215		fput(vma->vm_file);
1216	vm_area_free(vma);
 
1217	return ret;
1218
1219sharing_violation:
1220	up_write(&nommu_region_sem);
1221	pr_warn("Attempt to share mismatched mappings\n");
1222	ret = -EINVAL;
1223	goto error;
1224
1225error_getting_vma:
1226	kmem_cache_free(vm_region_jar, region);
1227	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1228			len, current->pid);
1229	show_mem();
 
1230	return -ENOMEM;
1231
1232error_getting_region:
1233	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1234			len, current->pid);
1235	show_mem();
 
1236	return -ENOMEM;
1237}
1238
1239unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1240			      unsigned long prot, unsigned long flags,
1241			      unsigned long fd, unsigned long pgoff)
1242{
1243	struct file *file = NULL;
1244	unsigned long retval = -EBADF;
1245
1246	audit_mmap_fd(fd, flags);
1247	if (!(flags & MAP_ANONYMOUS)) {
1248		file = fget(fd);
1249		if (!file)
1250			goto out;
1251	}
1252
 
 
1253	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1254
1255	if (file)
1256		fput(file);
1257out:
1258	return retval;
1259}
1260
1261SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1262		unsigned long, prot, unsigned long, flags,
1263		unsigned long, fd, unsigned long, pgoff)
1264{
1265	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1266}
1267
1268#ifdef __ARCH_WANT_SYS_OLD_MMAP
1269struct mmap_arg_struct {
1270	unsigned long addr;
1271	unsigned long len;
1272	unsigned long prot;
1273	unsigned long flags;
1274	unsigned long fd;
1275	unsigned long offset;
1276};
1277
1278SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1279{
1280	struct mmap_arg_struct a;
1281
1282	if (copy_from_user(&a, arg, sizeof(a)))
1283		return -EFAULT;
1284	if (offset_in_page(a.offset))
1285		return -EINVAL;
1286
1287	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1288			       a.offset >> PAGE_SHIFT);
1289}
1290#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1291
1292/*
1293 * split a vma into two pieces at address 'addr', a new vma is allocated either
1294 * for the first part or the tail.
1295 */
1296static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1297		     unsigned long addr, int new_below)
1298{
1299	struct vm_area_struct *new;
1300	struct vm_region *region;
1301	unsigned long npages;
1302	struct mm_struct *mm;
 
1303
1304	/* we're only permitted to split anonymous regions (these should have
1305	 * only a single usage on the region) */
1306	if (vma->vm_file)
1307		return -ENOMEM;
1308
1309	mm = vma->vm_mm;
1310	if (mm->map_count >= sysctl_max_map_count)
1311		return -ENOMEM;
1312
1313	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1314	if (!region)
1315		return -ENOMEM;
1316
1317	new = vm_area_dup(vma);
1318	if (!new)
1319		goto err_vma_dup;
 
 
1320
1321	/* most fields are the same, copy all, and then fixup */
 
1322	*region = *vma->vm_region;
1323	new->vm_region = region;
1324
1325	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1326
1327	if (new_below) {
1328		region->vm_top = region->vm_end = new->vm_end = addr;
1329	} else {
1330		region->vm_start = new->vm_start = addr;
1331		region->vm_pgoff = new->vm_pgoff += npages;
1332	}
1333
1334	vma_iter_config(vmi, new->vm_start, new->vm_end);
1335	if (vma_iter_prealloc(vmi, vma)) {
1336		pr_warn("Allocation of vma tree for process %d failed\n",
1337			current->pid);
1338		goto err_vmi_preallocate;
1339	}
1340
1341	if (new->vm_ops && new->vm_ops->open)
1342		new->vm_ops->open(new);
1343
 
1344	down_write(&nommu_region_sem);
1345	delete_nommu_region(vma->vm_region);
1346	if (new_below) {
1347		vma->vm_region->vm_start = vma->vm_start = addr;
1348		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1349	} else {
1350		vma->vm_region->vm_end = vma->vm_end = addr;
1351		vma->vm_region->vm_top = addr;
1352	}
1353	add_nommu_region(vma->vm_region);
1354	add_nommu_region(new->vm_region);
1355	up_write(&nommu_region_sem);
1356
1357	setup_vma_to_mm(vma, mm);
1358	setup_vma_to_mm(new, mm);
1359	vma_iter_store(vmi, new);
1360	mm->map_count++;
1361	return 0;
1362
1363err_vmi_preallocate:
1364	vm_area_free(new);
1365err_vma_dup:
1366	kmem_cache_free(vm_region_jar, region);
1367	return -ENOMEM;
1368}
1369
1370/*
1371 * shrink a VMA by removing the specified chunk from either the beginning or
1372 * the end
1373 */
1374static int vmi_shrink_vma(struct vma_iterator *vmi,
1375		      struct vm_area_struct *vma,
1376		      unsigned long from, unsigned long to)
1377{
1378	struct vm_region *region;
1379
 
 
1380	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1381	 * and list */
1382	if (from > vma->vm_start) {
1383		if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1384			return -ENOMEM;
1385		vma->vm_end = from;
1386	} else {
1387		if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1388			return -ENOMEM;
1389		vma->vm_start = to;
1390	}
1391
1392	/* cut the backing region down to size */
1393	region = vma->vm_region;
1394	BUG_ON(region->vm_usage != 1);
1395
1396	down_write(&nommu_region_sem);
1397	delete_nommu_region(region);
1398	if (from > region->vm_start) {
1399		to = region->vm_top;
1400		region->vm_top = region->vm_end = from;
1401	} else {
1402		region->vm_start = to;
1403	}
1404	add_nommu_region(region);
1405	up_write(&nommu_region_sem);
1406
1407	free_page_series(from, to);
1408	return 0;
1409}
1410
1411/*
1412 * release a mapping
1413 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1414 *   VMA, though it need not cover the whole VMA
1415 */
1416int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1417{
1418	VMA_ITERATOR(vmi, mm, start);
1419	struct vm_area_struct *vma;
1420	unsigned long end;
1421	int ret = 0;
 
 
1422
1423	len = PAGE_ALIGN(len);
1424	if (len == 0)
1425		return -EINVAL;
1426
1427	end = start + len;
1428
1429	/* find the first potentially overlapping VMA */
1430	vma = vma_find(&vmi, end);
1431	if (!vma) {
1432		static int limit;
1433		if (limit < 5) {
1434			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1435					current->pid, current->comm,
1436					start, start + len - 1);
 
 
1437			limit++;
1438		}
1439		return -EINVAL;
1440	}
1441
1442	/* we're allowed to split an anonymous VMA but not a file-backed one */
1443	if (vma->vm_file) {
1444		do {
1445			if (start > vma->vm_start)
 
1446				return -EINVAL;
 
1447			if (end == vma->vm_end)
1448				goto erase_whole_vma;
1449			vma = vma_find(&vmi, end);
1450		} while (vma);
 
1451		return -EINVAL;
1452	} else {
1453		/* the chunk must be a subset of the VMA found */
1454		if (start == vma->vm_start && end == vma->vm_end)
1455			goto erase_whole_vma;
1456		if (start < vma->vm_start || end > vma->vm_end)
 
1457			return -EINVAL;
1458		if (offset_in_page(start))
 
 
1459			return -EINVAL;
1460		if (end != vma->vm_end && offset_in_page(end))
 
 
1461			return -EINVAL;
 
1462		if (start != vma->vm_start && end != vma->vm_end) {
1463			ret = split_vma(&vmi, vma, start, 1);
1464			if (ret < 0)
 
1465				return ret;
 
1466		}
1467		return vmi_shrink_vma(&vmi, vma, start, end);
1468	}
1469
1470erase_whole_vma:
1471	if (delete_vma_from_mm(vma))
1472		ret = -ENOMEM;
1473	else
1474		delete_vma(mm, vma);
1475	return ret;
1476}
 
1477
1478int vm_munmap(unsigned long addr, size_t len)
1479{
1480	struct mm_struct *mm = current->mm;
1481	int ret;
1482
1483	mmap_write_lock(mm);
1484	ret = do_munmap(mm, addr, len, NULL);
1485	mmap_write_unlock(mm);
1486	return ret;
1487}
1488EXPORT_SYMBOL(vm_munmap);
1489
1490SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1491{
1492	return vm_munmap(addr, len);
1493}
1494
1495/*
1496 * release all the mappings made in a process's VM space
1497 */
1498void exit_mmap(struct mm_struct *mm)
1499{
1500	VMA_ITERATOR(vmi, mm, 0);
1501	struct vm_area_struct *vma;
1502
1503	if (!mm)
1504		return;
1505
 
 
1506	mm->total_vm = 0;
1507
1508	/*
1509	 * Lock the mm to avoid assert complaining even though this is the only
1510	 * user of the mm
1511	 */
1512	mmap_write_lock(mm);
1513	for_each_vma(vmi, vma) {
1514		cleanup_vma_from_mm(vma);
1515		delete_vma(mm, vma);
1516		cond_resched();
1517	}
1518	__mt_destroy(&mm->mm_mt);
1519	mmap_write_unlock(mm);
 
 
 
 
 
1520}
1521
1522/*
1523 * expand (or shrink) an existing mapping, potentially moving it at the same
1524 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1525 *
1526 * under NOMMU conditions, we only permit changing a mapping's size, and only
1527 * as long as it stays within the region allocated by do_mmap_private() and the
1528 * block is not shareable
1529 *
1530 * MREMAP_FIXED is not supported under NOMMU conditions
1531 */
1532static unsigned long do_mremap(unsigned long addr,
1533			unsigned long old_len, unsigned long new_len,
1534			unsigned long flags, unsigned long new_addr)
1535{
1536	struct vm_area_struct *vma;
1537
1538	/* insanity checks first */
1539	old_len = PAGE_ALIGN(old_len);
1540	new_len = PAGE_ALIGN(new_len);
1541	if (old_len == 0 || new_len == 0)
1542		return (unsigned long) -EINVAL;
1543
1544	if (offset_in_page(addr))
1545		return -EINVAL;
1546
1547	if (flags & MREMAP_FIXED && new_addr != addr)
1548		return (unsigned long) -EINVAL;
1549
1550	vma = find_vma_exact(current->mm, addr, old_len);
1551	if (!vma)
1552		return (unsigned long) -EINVAL;
1553
1554	if (vma->vm_end != vma->vm_start + old_len)
1555		return (unsigned long) -EFAULT;
1556
1557	if (is_nommu_shared_mapping(vma->vm_flags))
1558		return (unsigned long) -EPERM;
1559
1560	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1561		return (unsigned long) -ENOMEM;
1562
1563	/* all checks complete - do it */
1564	vma->vm_end = vma->vm_start + new_len;
1565	return vma->vm_start;
1566}
1567
1568SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1569		unsigned long, new_len, unsigned long, flags,
1570		unsigned long, new_addr)
1571{
1572	unsigned long ret;
1573
1574	mmap_write_lock(current->mm);
1575	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1576	mmap_write_unlock(current->mm);
1577	return ret;
1578}
1579
 
 
 
 
 
 
 
 
1580int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1581		unsigned long pfn, unsigned long size, pgprot_t prot)
1582{
1583	if (addr != (pfn << PAGE_SHIFT))
1584		return -EINVAL;
1585
1586	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1587	return 0;
1588}
1589EXPORT_SYMBOL(remap_pfn_range);
1590
1591int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1592{
1593	unsigned long pfn = start >> PAGE_SHIFT;
1594	unsigned long vm_len = vma->vm_end - vma->vm_start;
1595
1596	pfn += vma->vm_pgoff;
1597	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1598}
1599EXPORT_SYMBOL(vm_iomap_memory);
1600
1601int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1602			unsigned long pgoff)
1603{
1604	unsigned int size = vma->vm_end - vma->vm_start;
1605
1606	if (!(vma->vm_flags & VM_USERMAP))
1607		return -EINVAL;
1608
1609	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1610	vma->vm_end = vma->vm_start + size;
1611
1612	return 0;
1613}
1614EXPORT_SYMBOL(remap_vmalloc_range);
1615
1616vm_fault_t filemap_fault(struct vm_fault *vmf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1617{
1618	BUG();
1619	return 0;
1620}
1621EXPORT_SYMBOL(filemap_fault);
1622
1623vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1624		pgoff_t start_pgoff, pgoff_t end_pgoff)
 
 
 
 
 
 
1625{
1626	BUG();
1627	return 0;
1628}
1629EXPORT_SYMBOL(filemap_map_pages);
1630
1631static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1632			      void *buf, int len, unsigned int gup_flags)
1633{
1634	struct vm_area_struct *vma;
1635	int write = gup_flags & FOLL_WRITE;
1636
1637	if (mmap_read_lock_killable(mm))
1638		return 0;
1639
1640	/* the access must start within one of the target process's mappings */
1641	vma = find_vma(mm, addr);
1642	if (vma) {
1643		/* don't overrun this mapping */
1644		if (addr + len >= vma->vm_end)
1645			len = vma->vm_end - addr;
1646
1647		/* only read or write mappings where it is permitted */
1648		if (write && vma->vm_flags & VM_MAYWRITE)
1649			copy_to_user_page(vma, NULL, addr,
1650					 (void *) addr, buf, len);
1651		else if (!write && vma->vm_flags & VM_MAYREAD)
1652			copy_from_user_page(vma, NULL, addr,
1653					    buf, (void *) addr, len);
1654		else
1655			len = 0;
1656	} else {
1657		len = 0;
1658	}
1659
1660	mmap_read_unlock(mm);
1661
1662	return len;
1663}
1664
1665/**
1666 * access_remote_vm - access another process' address space
1667 * @mm:		the mm_struct of the target address space
1668 * @addr:	start address to access
1669 * @buf:	source or destination buffer
1670 * @len:	number of bytes to transfer
1671 * @gup_flags:	flags modifying lookup behaviour
1672 *
1673 * The caller must hold a reference on @mm.
1674 */
1675int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1676		void *buf, int len, unsigned int gup_flags)
1677{
1678	return __access_remote_vm(mm, addr, buf, len, gup_flags);
1679}
1680
1681/*
1682 * Access another process' address space.
1683 * - source/target buffer must be kernel space
1684 */
1685int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1686		unsigned int gup_flags)
1687{
1688	struct mm_struct *mm;
1689
1690	if (addr + len < addr)
1691		return 0;
1692
1693	mm = get_task_mm(tsk);
1694	if (!mm)
1695		return 0;
1696
1697	len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1698
1699	mmput(mm);
1700	return len;
1701}
1702EXPORT_SYMBOL_GPL(access_process_vm);
1703
1704/**
1705 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1706 * @inode: The inode to check
1707 * @size: The current filesize of the inode
1708 * @newsize: The proposed filesize of the inode
1709 *
1710 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1711 * make sure that any outstanding VMAs aren't broken and then shrink the
1712 * vm_regions that extend beyond so that do_mmap() doesn't
1713 * automatically grant mappings that are too large.
1714 */
1715int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1716				size_t newsize)
1717{
1718	struct vm_area_struct *vma;
1719	struct vm_region *region;
1720	pgoff_t low, high;
1721	size_t r_size, r_top;
1722
1723	low = newsize >> PAGE_SHIFT;
1724	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1725
1726	down_write(&nommu_region_sem);
1727	i_mmap_lock_read(inode->i_mapping);
1728
1729	/* search for VMAs that fall within the dead zone */
1730	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1731		/* found one - only interested if it's shared out of the page
1732		 * cache */
1733		if (vma->vm_flags & VM_SHARED) {
1734			i_mmap_unlock_read(inode->i_mapping);
1735			up_write(&nommu_region_sem);
1736			return -ETXTBSY; /* not quite true, but near enough */
1737		}
1738	}
1739
1740	/* reduce any regions that overlap the dead zone - if in existence,
1741	 * these will be pointed to by VMAs that don't overlap the dead zone
1742	 *
1743	 * we don't check for any regions that start beyond the EOF as there
1744	 * shouldn't be any
1745	 */
1746	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
 
1747		if (!(vma->vm_flags & VM_SHARED))
1748			continue;
1749
1750		region = vma->vm_region;
1751		r_size = region->vm_top - region->vm_start;
1752		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1753
1754		if (r_top > newsize) {
1755			region->vm_top -= r_top - newsize;
1756			if (region->vm_end > region->vm_top)
1757				region->vm_end = region->vm_top;
1758		}
1759	}
1760
1761	i_mmap_unlock_read(inode->i_mapping);
1762	up_write(&nommu_region_sem);
1763	return 0;
1764}
1765
1766/*
1767 * Initialise sysctl_user_reserve_kbytes.
1768 *
1769 * This is intended to prevent a user from starting a single memory hogging
1770 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1771 * mode.
1772 *
1773 * The default value is min(3% of free memory, 128MB)
1774 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1775 */
1776static int __meminit init_user_reserve(void)
1777{
1778	unsigned long free_kbytes;
1779
1780	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1781
1782	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1783	return 0;
1784}
1785subsys_initcall(init_user_reserve);
1786
1787/*
1788 * Initialise sysctl_admin_reserve_kbytes.
1789 *
1790 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1791 * to log in and kill a memory hogging process.
1792 *
1793 * Systems with more than 256MB will reserve 8MB, enough to recover
1794 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1795 * only reserve 3% of free pages by default.
1796 */
1797static int __meminit init_admin_reserve(void)
1798{
1799	unsigned long free_kbytes;
1800
1801	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1802
1803	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1804	return 0;
1805}
1806subsys_initcall(init_admin_reserve);