Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
  22#include <linux/vmacache.h>
  23#include <linux/mman.h>
  24#include <linux/swap.h>
  25#include <linux/file.h>
  26#include <linux/highmem.h>
  27#include <linux/pagemap.h>
  28#include <linux/slab.h>
  29#include <linux/vmalloc.h>
  30#include <linux/blkdev.h>
  31#include <linux/backing-dev.h>
  32#include <linux/compiler.h>
  33#include <linux/mount.h>
  34#include <linux/personality.h>
  35#include <linux/security.h>
  36#include <linux/syscalls.h>
  37#include <linux/audit.h>
  38#include <linux/printk.h>
  39
  40#include <linux/uaccess.h>
 
  41#include <asm/tlb.h>
  42#include <asm/tlbflush.h>
  43#include <asm/mmu_context.h>
  44#include "internal.h"
  45
  46void *high_memory;
  47EXPORT_SYMBOL(high_memory);
  48struct page *mem_map;
  49unsigned long max_mapnr;
  50EXPORT_SYMBOL(max_mapnr);
  51unsigned long highest_memmap_pfn;
  52int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  53int heap_stack_gap = 0;
  54
  55atomic_long_t mmap_pages_allocated;
  56
  57EXPORT_SYMBOL(mem_map);
  58
  59/* list of mapped, potentially shareable regions */
  60static struct kmem_cache *vm_region_jar;
  61struct rb_root nommu_region_tree = RB_ROOT;
  62DECLARE_RWSEM(nommu_region_sem);
  63
  64const struct vm_operations_struct generic_file_vm_ops = {
  65};
  66
  67/*
  68 * Return the total memory allocated for this pointer, not
  69 * just what the caller asked for.
  70 *
  71 * Doesn't have to be accurate, i.e. may have races.
  72 */
  73unsigned int kobjsize(const void *objp)
  74{
  75	struct page *page;
  76
  77	/*
  78	 * If the object we have should not have ksize performed on it,
  79	 * return size of 0
  80	 */
  81	if (!objp || !virt_addr_valid(objp))
  82		return 0;
  83
  84	page = virt_to_head_page(objp);
  85
  86	/*
  87	 * If the allocator sets PageSlab, we know the pointer came from
  88	 * kmalloc().
  89	 */
  90	if (PageSlab(page))
  91		return ksize(objp);
  92
  93	/*
  94	 * If it's not a compound page, see if we have a matching VMA
  95	 * region. This test is intentionally done in reverse order,
  96	 * so if there's no VMA, we still fall through and hand back
  97	 * PAGE_SIZE for 0-order pages.
  98	 */
  99	if (!PageCompound(page)) {
 100		struct vm_area_struct *vma;
 101
 102		vma = find_vma(current->mm, (unsigned long)objp);
 103		if (vma)
 104			return vma->vm_end - vma->vm_start;
 105	}
 106
 107	/*
 108	 * The ksize() function is only guaranteed to work for pointers
 109	 * returned by kmalloc(). So handle arbitrary pointers here.
 110	 */
 111	return page_size(page);
 112}
 113
 114/**
 115 * follow_pfn - look up PFN at a user virtual address
 116 * @vma: memory mapping
 117 * @address: user virtual address
 118 * @pfn: location to store found PFN
 119 *
 120 * Only IO mappings and raw PFN mappings are allowed.
 121 *
 122 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 123 */
 124int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 125	unsigned long *pfn)
 126{
 127	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 128		return -EINVAL;
 129
 130	*pfn = address >> PAGE_SHIFT;
 131	return 0;
 132}
 133EXPORT_SYMBOL(follow_pfn);
 134
 135LIST_HEAD(vmap_area_list);
 136
 137void vfree(const void *addr)
 138{
 139	kfree(addr);
 140}
 141EXPORT_SYMBOL(vfree);
 142
 143void *__vmalloc(unsigned long size, gfp_t gfp_mask)
 144{
 145	/*
 146	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 147	 * returns only a logical address.
 148	 */
 149	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 
 
 
 
 
 
 150}
 151EXPORT_SYMBOL(__vmalloc);
 152
 153void *__vmalloc_node_range(unsigned long size, unsigned long align,
 154		unsigned long start, unsigned long end, gfp_t gfp_mask,
 155		pgprot_t prot, unsigned long vm_flags, int node,
 156		const void *caller)
 157{
 158	return __vmalloc(size, gfp_mask);
 159}
 160
 161void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
 162		int node, const void *caller)
 163{
 164	return __vmalloc(size, gfp_mask);
 165}
 166
 167static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 168{
 169	void *ret;
 170
 171	ret = __vmalloc(size, flags);
 172	if (ret) {
 173		struct vm_area_struct *vma;
 174
 175		mmap_write_lock(current->mm);
 176		vma = find_vma(current->mm, (unsigned long)ret);
 177		if (vma)
 178			vma->vm_flags |= VM_USERMAP;
 179		mmap_write_unlock(current->mm);
 180	}
 181
 182	return ret;
 183}
 184
 185void *vmalloc_user(unsigned long size)
 186{
 187	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 188}
 189EXPORT_SYMBOL(vmalloc_user);
 190
 191struct page *vmalloc_to_page(const void *addr)
 192{
 193	return virt_to_page(addr);
 194}
 195EXPORT_SYMBOL(vmalloc_to_page);
 196
 197unsigned long vmalloc_to_pfn(const void *addr)
 198{
 199	return page_to_pfn(virt_to_page(addr));
 200}
 201EXPORT_SYMBOL(vmalloc_to_pfn);
 202
 203long vread(char *buf, char *addr, unsigned long count)
 204{
 205	/* Don't allow overflow */
 206	if ((unsigned long) buf + count < count)
 207		count = -(unsigned long) buf;
 208
 209	memcpy(buf, addr, count);
 210	return count;
 211}
 212
 213long vwrite(char *buf, char *addr, unsigned long count)
 214{
 215	/* Don't allow overflow */
 216	if ((unsigned long) addr + count < count)
 217		count = -(unsigned long) addr;
 218
 219	memcpy(addr, buf, count);
 220	return count;
 221}
 222
 223/*
 224 *	vmalloc  -  allocate virtually contiguous memory
 225 *
 226 *	@size:		allocation size
 227 *
 228 *	Allocate enough pages to cover @size from the page level
 229 *	allocator and map them into contiguous kernel virtual space.
 230 *
 231 *	For tight control over page level allocator and protection flags
 232 *	use __vmalloc() instead.
 233 */
 234void *vmalloc(unsigned long size)
 235{
 236       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
 237}
 238EXPORT_SYMBOL(vmalloc);
 
 
 239
 240/*
 241 *	vzalloc - allocate virtually contiguous memory with zero fill
 242 *
 243 *	@size:		allocation size
 244 *
 245 *	Allocate enough pages to cover @size from the page level
 246 *	allocator and map them into contiguous kernel virtual space.
 247 *	The memory allocated is set to zero.
 248 *
 249 *	For tight control over page level allocator and protection flags
 250 *	use __vmalloc() instead.
 251 */
 252void *vzalloc(unsigned long size)
 253{
 254	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
 255}
 256EXPORT_SYMBOL(vzalloc);
 257
 258/**
 259 * vmalloc_node - allocate memory on a specific node
 260 * @size:	allocation size
 261 * @node:	numa node
 262 *
 263 * Allocate enough pages to cover @size from the page level
 264 * allocator and map them into contiguous kernel virtual space.
 265 *
 266 * For tight control over page level allocator and protection flags
 267 * use __vmalloc() instead.
 268 */
 269void *vmalloc_node(unsigned long size, int node)
 270{
 271	return vmalloc(size);
 272}
 273EXPORT_SYMBOL(vmalloc_node);
 274
 275/**
 276 * vzalloc_node - allocate memory on a specific node with zero fill
 277 * @size:	allocation size
 278 * @node:	numa node
 279 *
 280 * Allocate enough pages to cover @size from the page level
 281 * allocator and map them into contiguous kernel virtual space.
 282 * The memory allocated is set to zero.
 283 *
 284 * For tight control over page level allocator and protection flags
 285 * use __vmalloc() instead.
 286 */
 287void *vzalloc_node(unsigned long size, int node)
 288{
 289	return vzalloc(size);
 290}
 291EXPORT_SYMBOL(vzalloc_node);
 292
 293/**
 294 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 295 *	@size:		allocation size
 296 *
 297 *	Allocate enough 32bit PA addressable pages to cover @size from the
 298 *	page level allocator and map them into contiguous kernel virtual space.
 299 */
 300void *vmalloc_32(unsigned long size)
 301{
 302	return __vmalloc(size, GFP_KERNEL);
 303}
 304EXPORT_SYMBOL(vmalloc_32);
 305
 306/**
 307 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 308 *	@size:		allocation size
 309 *
 310 * The resulting memory area is 32bit addressable and zeroed so it can be
 311 * mapped to userspace without leaking data.
 312 *
 313 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 314 * remap_vmalloc_range() are permissible.
 315 */
 316void *vmalloc_32_user(unsigned long size)
 317{
 318	/*
 319	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 320	 * but for now this can simply use vmalloc_user() directly.
 321	 */
 322	return vmalloc_user(size);
 323}
 324EXPORT_SYMBOL(vmalloc_32_user);
 325
 326void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 327{
 328	BUG();
 329	return NULL;
 330}
 331EXPORT_SYMBOL(vmap);
 332
 333void vunmap(const void *addr)
 334{
 335	BUG();
 336}
 337EXPORT_SYMBOL(vunmap);
 338
 339void *vm_map_ram(struct page **pages, unsigned int count, int node)
 340{
 341	BUG();
 342	return NULL;
 343}
 344EXPORT_SYMBOL(vm_map_ram);
 345
 346void vm_unmap_ram(const void *mem, unsigned int count)
 347{
 348	BUG();
 349}
 350EXPORT_SYMBOL(vm_unmap_ram);
 351
 352void vm_unmap_aliases(void)
 353{
 354}
 355EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 356
 357struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 358{
 359	BUG();
 360	return NULL;
 361}
 362EXPORT_SYMBOL_GPL(alloc_vm_area);
 363
 364void free_vm_area(struct vm_struct *area)
 365{
 366	BUG();
 367}
 368EXPORT_SYMBOL_GPL(free_vm_area);
 369
 370int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 371		   struct page *page)
 372{
 373	return -EINVAL;
 374}
 375EXPORT_SYMBOL(vm_insert_page);
 376
 
 
 
 
 
 
 
 377int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 378			unsigned long num)
 379{
 380	return -EINVAL;
 381}
 382EXPORT_SYMBOL(vm_map_pages);
 383
 384int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 385				unsigned long num)
 386{
 387	return -EINVAL;
 388}
 389EXPORT_SYMBOL(vm_map_pages_zero);
 390
 391/*
 392 *  sys_brk() for the most part doesn't need the global kernel
 393 *  lock, except when an application is doing something nasty
 394 *  like trying to un-brk an area that has already been mapped
 395 *  to a regular file.  in this case, the unmapping will need
 396 *  to invoke file system routines that need the global lock.
 397 */
 398SYSCALL_DEFINE1(brk, unsigned long, brk)
 399{
 400	struct mm_struct *mm = current->mm;
 401
 402	if (brk < mm->start_brk || brk > mm->context.end_brk)
 403		return mm->brk;
 404
 405	if (mm->brk == brk)
 406		return mm->brk;
 407
 408	/*
 409	 * Always allow shrinking brk
 410	 */
 411	if (brk <= mm->brk) {
 412		mm->brk = brk;
 413		return brk;
 414	}
 415
 416	/*
 417	 * Ok, looks good - let it rip.
 418	 */
 419	flush_icache_user_range(mm->brk, brk);
 420	return mm->brk = brk;
 421}
 422
 423/*
 424 * initialise the percpu counter for VM and region record slabs
 425 */
 426void __init mmap_init(void)
 427{
 428	int ret;
 429
 430	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 431	VM_BUG_ON(ret);
 432	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 433}
 434
 435/*
 436 * validate the region tree
 437 * - the caller must hold the region lock
 438 */
 439#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 440static noinline void validate_nommu_regions(void)
 441{
 442	struct vm_region *region, *last;
 443	struct rb_node *p, *lastp;
 444
 445	lastp = rb_first(&nommu_region_tree);
 446	if (!lastp)
 447		return;
 448
 449	last = rb_entry(lastp, struct vm_region, vm_rb);
 450	BUG_ON(last->vm_end <= last->vm_start);
 451	BUG_ON(last->vm_top < last->vm_end);
 452
 453	while ((p = rb_next(lastp))) {
 454		region = rb_entry(p, struct vm_region, vm_rb);
 455		last = rb_entry(lastp, struct vm_region, vm_rb);
 456
 457		BUG_ON(region->vm_end <= region->vm_start);
 458		BUG_ON(region->vm_top < region->vm_end);
 459		BUG_ON(region->vm_start < last->vm_top);
 460
 461		lastp = p;
 462	}
 463}
 464#else
 465static void validate_nommu_regions(void)
 466{
 467}
 468#endif
 469
 470/*
 471 * add a region into the global tree
 472 */
 473static void add_nommu_region(struct vm_region *region)
 474{
 475	struct vm_region *pregion;
 476	struct rb_node **p, *parent;
 477
 478	validate_nommu_regions();
 479
 480	parent = NULL;
 481	p = &nommu_region_tree.rb_node;
 482	while (*p) {
 483		parent = *p;
 484		pregion = rb_entry(parent, struct vm_region, vm_rb);
 485		if (region->vm_start < pregion->vm_start)
 486			p = &(*p)->rb_left;
 487		else if (region->vm_start > pregion->vm_start)
 488			p = &(*p)->rb_right;
 489		else if (pregion == region)
 490			return;
 491		else
 492			BUG();
 493	}
 494
 495	rb_link_node(&region->vm_rb, parent, p);
 496	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 497
 498	validate_nommu_regions();
 499}
 500
 501/*
 502 * delete a region from the global tree
 503 */
 504static void delete_nommu_region(struct vm_region *region)
 505{
 506	BUG_ON(!nommu_region_tree.rb_node);
 507
 508	validate_nommu_regions();
 509	rb_erase(&region->vm_rb, &nommu_region_tree);
 510	validate_nommu_regions();
 511}
 512
 513/*
 514 * free a contiguous series of pages
 515 */
 516static void free_page_series(unsigned long from, unsigned long to)
 517{
 518	for (; from < to; from += PAGE_SIZE) {
 519		struct page *page = virt_to_page(from);
 520
 521		atomic_long_dec(&mmap_pages_allocated);
 522		put_page(page);
 523	}
 524}
 525
 526/*
 527 * release a reference to a region
 528 * - the caller must hold the region semaphore for writing, which this releases
 529 * - the region may not have been added to the tree yet, in which case vm_top
 530 *   will equal vm_start
 531 */
 532static void __put_nommu_region(struct vm_region *region)
 533	__releases(nommu_region_sem)
 534{
 535	BUG_ON(!nommu_region_tree.rb_node);
 536
 537	if (--region->vm_usage == 0) {
 538		if (region->vm_top > region->vm_start)
 539			delete_nommu_region(region);
 540		up_write(&nommu_region_sem);
 541
 542		if (region->vm_file)
 543			fput(region->vm_file);
 544
 545		/* IO memory and memory shared directly out of the pagecache
 546		 * from ramfs/tmpfs mustn't be released here */
 547		if (region->vm_flags & VM_MAPPED_COPY)
 548			free_page_series(region->vm_start, region->vm_top);
 549		kmem_cache_free(vm_region_jar, region);
 550	} else {
 551		up_write(&nommu_region_sem);
 552	}
 553}
 554
 555/*
 556 * release a reference to a region
 557 */
 558static void put_nommu_region(struct vm_region *region)
 559{
 560	down_write(&nommu_region_sem);
 561	__put_nommu_region(region);
 562}
 563
 564/*
 565 * add a VMA into a process's mm_struct in the appropriate place in the list
 566 * and tree and add to the address space's page tree also if not an anonymous
 567 * page
 568 * - should be called with mm->mmap_lock held writelocked
 569 */
 570static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 571{
 572	struct vm_area_struct *pvma, *prev;
 573	struct address_space *mapping;
 574	struct rb_node **p, *parent, *rb_prev;
 575
 576	BUG_ON(!vma->vm_region);
 577
 578	mm->map_count++;
 579	vma->vm_mm = mm;
 580
 581	/* add the VMA to the mapping */
 582	if (vma->vm_file) {
 583		mapping = vma->vm_file->f_mapping;
 584
 585		i_mmap_lock_write(mapping);
 586		flush_dcache_mmap_lock(mapping);
 587		vma_interval_tree_insert(vma, &mapping->i_mmap);
 588		flush_dcache_mmap_unlock(mapping);
 589		i_mmap_unlock_write(mapping);
 590	}
 591
 592	/* add the VMA to the tree */
 593	parent = rb_prev = NULL;
 594	p = &mm->mm_rb.rb_node;
 595	while (*p) {
 596		parent = *p;
 597		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 598
 599		/* sort by: start addr, end addr, VMA struct addr in that order
 600		 * (the latter is necessary as we may get identical VMAs) */
 601		if (vma->vm_start < pvma->vm_start)
 602			p = &(*p)->rb_left;
 603		else if (vma->vm_start > pvma->vm_start) {
 604			rb_prev = parent;
 605			p = &(*p)->rb_right;
 606		} else if (vma->vm_end < pvma->vm_end)
 607			p = &(*p)->rb_left;
 608		else if (vma->vm_end > pvma->vm_end) {
 609			rb_prev = parent;
 610			p = &(*p)->rb_right;
 611		} else if (vma < pvma)
 612			p = &(*p)->rb_left;
 613		else if (vma > pvma) {
 614			rb_prev = parent;
 615			p = &(*p)->rb_right;
 616		} else
 617			BUG();
 618	}
 619
 620	rb_link_node(&vma->vm_rb, parent, p);
 621	rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 622
 623	/* add VMA to the VMA list also */
 624	prev = NULL;
 625	if (rb_prev)
 626		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 627
 628	__vma_link_list(mm, vma, prev);
 629}
 630
 631/*
 632 * delete a VMA from its owning mm_struct and address space
 633 */
 634static void delete_vma_from_mm(struct vm_area_struct *vma)
 635{
 636	int i;
 637	struct address_space *mapping;
 638	struct mm_struct *mm = vma->vm_mm;
 639	struct task_struct *curr = current;
 640
 641	mm->map_count--;
 642	for (i = 0; i < VMACACHE_SIZE; i++) {
 643		/* if the vma is cached, invalidate the entire cache */
 644		if (curr->vmacache.vmas[i] == vma) {
 645			vmacache_invalidate(mm);
 646			break;
 647		}
 648	}
 649
 650	/* remove the VMA from the mapping */
 651	if (vma->vm_file) {
 
 652		mapping = vma->vm_file->f_mapping;
 653
 654		i_mmap_lock_write(mapping);
 655		flush_dcache_mmap_lock(mapping);
 656		vma_interval_tree_remove(vma, &mapping->i_mmap);
 657		flush_dcache_mmap_unlock(mapping);
 658		i_mmap_unlock_write(mapping);
 659	}
 
 660
 661	/* remove from the MM's tree and list */
 662	rb_erase(&vma->vm_rb, &mm->mm_rb);
 
 
 
 
 663
 664	__vma_unlink_list(mm, vma);
 665}
 
 
 
 
 
 666
 
 
 
 
 667/*
 668 * destroy a VMA record
 669 */
 670static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 671{
 672	if (vma->vm_ops && vma->vm_ops->close)
 673		vma->vm_ops->close(vma);
 674	if (vma->vm_file)
 675		fput(vma->vm_file);
 676	put_nommu_region(vma->vm_region);
 677	vm_area_free(vma);
 678}
 679
 
 
 
 
 
 
 
 
 
 
 
 680/*
 681 * look up the first VMA in which addr resides, NULL if none
 682 * - should be called with mm->mmap_lock at least held readlocked
 683 */
 684struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 685{
 686	struct vm_area_struct *vma;
 687
 688	/* check the cache first */
 689	vma = vmacache_find(mm, addr);
 690	if (likely(vma))
 691		return vma;
 692
 693	/* trawl the list (there may be multiple mappings in which addr
 694	 * resides) */
 695	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 696		if (vma->vm_start > addr)
 697			return NULL;
 698		if (vma->vm_end > addr) {
 699			vmacache_update(addr, vma);
 700			return vma;
 701		}
 702	}
 703
 704	return NULL;
 705}
 706EXPORT_SYMBOL(find_vma);
 707
 708/*
 709 * find a VMA
 710 * - we don't extend stack VMAs under NOMMU conditions
 711 */
 712struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 
 713{
 714	return find_vma(mm, addr);
 
 
 
 
 
 
 715}
 716
 717/*
 718 * expand a stack to a given address
 719 * - not supported under NOMMU conditions
 720 */
 721int expand_stack(struct vm_area_struct *vma, unsigned long address)
 722{
 723	return -ENOMEM;
 724}
 725
 
 
 
 
 
 
 726/*
 727 * look up the first VMA exactly that exactly matches addr
 728 * - should be called with mm->mmap_lock at least held readlocked
 729 */
 730static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 731					     unsigned long addr,
 732					     unsigned long len)
 733{
 734	struct vm_area_struct *vma;
 735	unsigned long end = addr + len;
 
 736
 737	/* check the cache first */
 738	vma = vmacache_find_exact(mm, addr, end);
 739	if (vma)
 740		return vma;
 741
 742	/* trawl the list (there may be multiple mappings in which addr
 743	 * resides) */
 744	for (vma = mm->mmap; vma; vma = vma->vm_next) {
 745		if (vma->vm_start < addr)
 746			continue;
 747		if (vma->vm_start > addr)
 748			return NULL;
 749		if (vma->vm_end == end) {
 750			vmacache_update(addr, vma);
 751			return vma;
 752		}
 753	}
 754
 755	return NULL;
 756}
 757
 758/*
 759 * determine whether a mapping should be permitted and, if so, what sort of
 760 * mapping we're capable of supporting
 761 */
 762static int validate_mmap_request(struct file *file,
 763				 unsigned long addr,
 764				 unsigned long len,
 765				 unsigned long prot,
 766				 unsigned long flags,
 767				 unsigned long pgoff,
 768				 unsigned long *_capabilities)
 769{
 770	unsigned long capabilities, rlen;
 771	int ret;
 772
 773	/* do the simple checks first */
 774	if (flags & MAP_FIXED)
 775		return -EINVAL;
 776
 777	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 778	    (flags & MAP_TYPE) != MAP_SHARED)
 779		return -EINVAL;
 780
 781	if (!len)
 782		return -EINVAL;
 783
 784	/* Careful about overflows.. */
 785	rlen = PAGE_ALIGN(len);
 786	if (!rlen || rlen > TASK_SIZE)
 787		return -ENOMEM;
 788
 789	/* offset overflow? */
 790	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 791		return -EOVERFLOW;
 792
 793	if (file) {
 794		/* files must support mmap */
 795		if (!file->f_op->mmap)
 796			return -ENODEV;
 797
 798		/* work out if what we've got could possibly be shared
 799		 * - we support chardevs that provide their own "memory"
 800		 * - we support files/blockdevs that are memory backed
 801		 */
 802		if (file->f_op->mmap_capabilities) {
 803			capabilities = file->f_op->mmap_capabilities(file);
 804		} else {
 805			/* no explicit capabilities set, so assume some
 806			 * defaults */
 807			switch (file_inode(file)->i_mode & S_IFMT) {
 808			case S_IFREG:
 809			case S_IFBLK:
 810				capabilities = NOMMU_MAP_COPY;
 811				break;
 812
 813			case S_IFCHR:
 814				capabilities =
 815					NOMMU_MAP_DIRECT |
 816					NOMMU_MAP_READ |
 817					NOMMU_MAP_WRITE;
 818				break;
 819
 820			default:
 821				return -EINVAL;
 822			}
 823		}
 824
 825		/* eliminate any capabilities that we can't support on this
 826		 * device */
 827		if (!file->f_op->get_unmapped_area)
 828			capabilities &= ~NOMMU_MAP_DIRECT;
 829		if (!(file->f_mode & FMODE_CAN_READ))
 830			capabilities &= ~NOMMU_MAP_COPY;
 831
 832		/* The file shall have been opened with read permission. */
 833		if (!(file->f_mode & FMODE_READ))
 834			return -EACCES;
 835
 836		if (flags & MAP_SHARED) {
 837			/* do checks for writing, appending and locking */
 838			if ((prot & PROT_WRITE) &&
 839			    !(file->f_mode & FMODE_WRITE))
 840				return -EACCES;
 841
 842			if (IS_APPEND(file_inode(file)) &&
 843			    (file->f_mode & FMODE_WRITE))
 844				return -EACCES;
 845
 846			if (locks_verify_locked(file))
 847				return -EAGAIN;
 848
 849			if (!(capabilities & NOMMU_MAP_DIRECT))
 850				return -ENODEV;
 851
 852			/* we mustn't privatise shared mappings */
 853			capabilities &= ~NOMMU_MAP_COPY;
 854		} else {
 855			/* we're going to read the file into private memory we
 856			 * allocate */
 857			if (!(capabilities & NOMMU_MAP_COPY))
 858				return -ENODEV;
 859
 860			/* we don't permit a private writable mapping to be
 861			 * shared with the backing device */
 862			if (prot & PROT_WRITE)
 863				capabilities &= ~NOMMU_MAP_DIRECT;
 864		}
 865
 866		if (capabilities & NOMMU_MAP_DIRECT) {
 867			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 868			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 869			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 870			    ) {
 871				capabilities &= ~NOMMU_MAP_DIRECT;
 872				if (flags & MAP_SHARED) {
 873					pr_warn("MAP_SHARED not completely supported on !MMU\n");
 874					return -EINVAL;
 875				}
 876			}
 877		}
 878
 879		/* handle executable mappings and implied executable
 880		 * mappings */
 881		if (path_noexec(&file->f_path)) {
 882			if (prot & PROT_EXEC)
 883				return -EPERM;
 884		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 885			/* handle implication of PROT_EXEC by PROT_READ */
 886			if (current->personality & READ_IMPLIES_EXEC) {
 887				if (capabilities & NOMMU_MAP_EXEC)
 888					prot |= PROT_EXEC;
 889			}
 890		} else if ((prot & PROT_READ) &&
 891			 (prot & PROT_EXEC) &&
 892			 !(capabilities & NOMMU_MAP_EXEC)
 893			 ) {
 894			/* backing file is not executable, try to copy */
 895			capabilities &= ~NOMMU_MAP_DIRECT;
 896		}
 897	} else {
 898		/* anonymous mappings are always memory backed and can be
 899		 * privately mapped
 900		 */
 901		capabilities = NOMMU_MAP_COPY;
 902
 903		/* handle PROT_EXEC implication by PROT_READ */
 904		if ((prot & PROT_READ) &&
 905		    (current->personality & READ_IMPLIES_EXEC))
 906			prot |= PROT_EXEC;
 907	}
 908
 909	/* allow the security API to have its say */
 910	ret = security_mmap_addr(addr);
 911	if (ret < 0)
 912		return ret;
 913
 914	/* looks okay */
 915	*_capabilities = capabilities;
 916	return 0;
 917}
 918
 919/*
 920 * we've determined that we can make the mapping, now translate what we
 921 * now know into VMA flags
 922 */
 923static unsigned long determine_vm_flags(struct file *file,
 924					unsigned long prot,
 925					unsigned long flags,
 926					unsigned long capabilities)
 927{
 928	unsigned long vm_flags;
 929
 930	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
 931	/* vm_flags |= mm->def_flags; */
 932
 933	if (!(capabilities & NOMMU_MAP_DIRECT)) {
 934		/* attempt to share read-only copies of mapped file chunks */
 
 
 
 935		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 936		if (file && !(prot & PROT_WRITE))
 937			vm_flags |= VM_MAYSHARE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 938	} else {
 939		/* overlay a shareable mapping on the backing device or inode
 940		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
 941		 * romfs/cramfs */
 942		vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
 943		if (flags & MAP_SHARED)
 944			vm_flags |= VM_SHARED;
 945	}
 946
 947	/* refuse to let anyone share private mappings with this process if
 948	 * it's being traced - otherwise breakpoints set in it may interfere
 949	 * with another untraced process
 950	 */
 951	if ((flags & MAP_PRIVATE) && current->ptrace)
 952		vm_flags &= ~VM_MAYSHARE;
 953
 954	return vm_flags;
 955}
 956
 957/*
 958 * set up a shared mapping on a file (the driver or filesystem provides and
 959 * pins the storage)
 960 */
 961static int do_mmap_shared_file(struct vm_area_struct *vma)
 962{
 963	int ret;
 964
 965	ret = call_mmap(vma->vm_file, vma);
 966	if (ret == 0) {
 967		vma->vm_region->vm_top = vma->vm_region->vm_end;
 968		return 0;
 969	}
 970	if (ret != -ENOSYS)
 971		return ret;
 972
 973	/* getting -ENOSYS indicates that direct mmap isn't possible (as
 974	 * opposed to tried but failed) so we can only give a suitable error as
 975	 * it's not possible to make a private copy if MAP_SHARED was given */
 976	return -ENODEV;
 977}
 978
 979/*
 980 * set up a private mapping or an anonymous shared mapping
 981 */
 982static int do_mmap_private(struct vm_area_struct *vma,
 983			   struct vm_region *region,
 984			   unsigned long len,
 985			   unsigned long capabilities)
 986{
 987	unsigned long total, point;
 988	void *base;
 989	int ret, order;
 990
 991	/* invoke the file's mapping function so that it can keep track of
 992	 * shared mappings on devices or memory
 993	 * - VM_MAYSHARE will be set if it may attempt to share
 
 
 994	 */
 995	if (capabilities & NOMMU_MAP_DIRECT) {
 996		ret = call_mmap(vma->vm_file, vma);
 
 
 
 997		if (ret == 0) {
 998			/* shouldn't return success if we're not sharing */
 999			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1000			vma->vm_region->vm_top = vma->vm_region->vm_end;
1001			return 0;
1002		}
1003		if (ret != -ENOSYS)
1004			return ret;
1005
1006		/* getting an ENOSYS error indicates that direct mmap isn't
1007		 * possible (as opposed to tried but failed) so we'll try to
1008		 * make a private copy of the data and map that instead */
1009	}
1010
1011
1012	/* allocate some memory to hold the mapping
1013	 * - note that this may not return a page-aligned address if the object
1014	 *   we're allocating is smaller than a page
1015	 */
1016	order = get_order(len);
1017	total = 1 << order;
1018	point = len >> PAGE_SHIFT;
1019
1020	/* we don't want to allocate a power-of-2 sized page set */
1021	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1022		total = point;
1023
1024	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1025	if (!base)
1026		goto enomem;
1027
1028	atomic_long_add(total, &mmap_pages_allocated);
1029
1030	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
 
1031	region->vm_start = (unsigned long) base;
1032	region->vm_end   = region->vm_start + len;
1033	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1034
1035	vma->vm_start = region->vm_start;
1036	vma->vm_end   = region->vm_start + len;
1037
1038	if (vma->vm_file) {
1039		/* read the contents of a file into the copy */
1040		loff_t fpos;
1041
1042		fpos = vma->vm_pgoff;
1043		fpos <<= PAGE_SHIFT;
1044
1045		ret = kernel_read(vma->vm_file, base, len, &fpos);
1046		if (ret < 0)
1047			goto error_free;
1048
1049		/* clear the last little bit */
1050		if (ret < len)
1051			memset(base + ret, 0, len - ret);
1052
1053	} else {
1054		vma_set_anonymous(vma);
1055	}
1056
1057	return 0;
1058
1059error_free:
1060	free_page_series(region->vm_start, region->vm_top);
1061	region->vm_start = vma->vm_start = 0;
1062	region->vm_end   = vma->vm_end = 0;
1063	region->vm_top   = 0;
1064	return ret;
1065
1066enomem:
1067	pr_err("Allocation of length %lu from process %d (%s) failed\n",
1068	       len, current->pid, current->comm);
1069	show_free_areas(0, NULL);
1070	return -ENOMEM;
1071}
1072
1073/*
1074 * handle mapping creation for uClinux
1075 */
1076unsigned long do_mmap(struct file *file,
1077			unsigned long addr,
1078			unsigned long len,
1079			unsigned long prot,
1080			unsigned long flags,
 
1081			unsigned long pgoff,
1082			unsigned long *populate,
1083			struct list_head *uf)
1084{
1085	struct vm_area_struct *vma;
1086	struct vm_region *region;
1087	struct rb_node *rb;
1088	vm_flags_t vm_flags;
1089	unsigned long capabilities, result;
1090	int ret;
 
1091
1092	*populate = 0;
1093
1094	/* decide whether we should attempt the mapping, and if so what sort of
1095	 * mapping */
1096	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1097				    &capabilities);
1098	if (ret < 0)
1099		return ret;
1100
1101	/* we ignore the address hint */
1102	addr = 0;
1103	len = PAGE_ALIGN(len);
1104
1105	/* we've determined that we can make the mapping, now translate what we
1106	 * now know into VMA flags */
1107	vm_flags = determine_vm_flags(file, prot, flags, capabilities);
 
1108
1109	/* we're going to need to record the mapping */
1110	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1111	if (!region)
1112		goto error_getting_region;
1113
1114	vma = vm_area_alloc(current->mm);
1115	if (!vma)
1116		goto error_getting_vma;
1117
1118	region->vm_usage = 1;
1119	region->vm_flags = vm_flags;
1120	region->vm_pgoff = pgoff;
1121
1122	vma->vm_flags = vm_flags;
1123	vma->vm_pgoff = pgoff;
1124
1125	if (file) {
1126		region->vm_file = get_file(file);
1127		vma->vm_file = get_file(file);
1128	}
1129
1130	down_write(&nommu_region_sem);
1131
1132	/* if we want to share, we need to check for regions created by other
1133	 * mmap() calls that overlap with our proposed mapping
1134	 * - we can only share with a superset match on most regular files
1135	 * - shared mappings on character devices and memory backed files are
1136	 *   permitted to overlap inexactly as far as we are concerned for in
1137	 *   these cases, sharing is handled in the driver or filesystem rather
1138	 *   than here
1139	 */
1140	if (vm_flags & VM_MAYSHARE) {
1141		struct vm_region *pregion;
1142		unsigned long pglen, rpglen, pgend, rpgend, start;
1143
1144		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1145		pgend = pgoff + pglen;
1146
1147		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1148			pregion = rb_entry(rb, struct vm_region, vm_rb);
1149
1150			if (!(pregion->vm_flags & VM_MAYSHARE))
1151				continue;
1152
1153			/* search for overlapping mappings on the same file */
1154			if (file_inode(pregion->vm_file) !=
1155			    file_inode(file))
1156				continue;
1157
1158			if (pregion->vm_pgoff >= pgend)
1159				continue;
1160
1161			rpglen = pregion->vm_end - pregion->vm_start;
1162			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1163			rpgend = pregion->vm_pgoff + rpglen;
1164			if (pgoff >= rpgend)
1165				continue;
1166
1167			/* handle inexactly overlapping matches between
1168			 * mappings */
1169			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1170			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1171				/* new mapping is not a subset of the region */
1172				if (!(capabilities & NOMMU_MAP_DIRECT))
1173					goto sharing_violation;
1174				continue;
1175			}
1176
1177			/* we've found a region we can share */
1178			pregion->vm_usage++;
1179			vma->vm_region = pregion;
1180			start = pregion->vm_start;
1181			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1182			vma->vm_start = start;
1183			vma->vm_end = start + len;
1184
1185			if (pregion->vm_flags & VM_MAPPED_COPY)
1186				vma->vm_flags |= VM_MAPPED_COPY;
1187			else {
1188				ret = do_mmap_shared_file(vma);
1189				if (ret < 0) {
1190					vma->vm_region = NULL;
1191					vma->vm_start = 0;
1192					vma->vm_end = 0;
1193					pregion->vm_usage--;
1194					pregion = NULL;
1195					goto error_just_free;
1196				}
1197			}
1198			fput(region->vm_file);
1199			kmem_cache_free(vm_region_jar, region);
1200			region = pregion;
1201			result = start;
1202			goto share;
1203		}
1204
1205		/* obtain the address at which to make a shared mapping
1206		 * - this is the hook for quasi-memory character devices to
1207		 *   tell us the location of a shared mapping
1208		 */
1209		if (capabilities & NOMMU_MAP_DIRECT) {
1210			addr = file->f_op->get_unmapped_area(file, addr, len,
1211							     pgoff, flags);
1212			if (IS_ERR_VALUE(addr)) {
1213				ret = addr;
1214				if (ret != -ENOSYS)
1215					goto error_just_free;
1216
1217				/* the driver refused to tell us where to site
1218				 * the mapping so we'll have to attempt to copy
1219				 * it */
1220				ret = -ENODEV;
1221				if (!(capabilities & NOMMU_MAP_COPY))
1222					goto error_just_free;
1223
1224				capabilities &= ~NOMMU_MAP_DIRECT;
1225			} else {
1226				vma->vm_start = region->vm_start = addr;
1227				vma->vm_end = region->vm_end = addr + len;
1228			}
1229		}
1230	}
1231
1232	vma->vm_region = region;
1233
1234	/* set up the mapping
1235	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1236	 */
1237	if (file && vma->vm_flags & VM_SHARED)
1238		ret = do_mmap_shared_file(vma);
1239	else
1240		ret = do_mmap_private(vma, region, len, capabilities);
1241	if (ret < 0)
1242		goto error_just_free;
1243	add_nommu_region(region);
1244
1245	/* clear anonymous mappings that don't ask for uninitialized data */
1246	if (!vma->vm_file &&
1247	    (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1248	     !(flags & MAP_UNINITIALIZED)))
1249		memset((void *)region->vm_start, 0,
1250		       region->vm_end - region->vm_start);
1251
1252	/* okay... we have a mapping; now we have to register it */
1253	result = vma->vm_start;
1254
1255	current->mm->total_vm += len >> PAGE_SHIFT;
1256
1257share:
1258	add_vma_to_mm(current->mm, vma);
 
 
 
 
 
 
 
 
1259
1260	/* we flush the region from the icache only when the first executable
1261	 * mapping of it is made  */
1262	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1263		flush_icache_user_range(region->vm_start, region->vm_end);
1264		region->vm_icache_flushed = true;
1265	}
1266
1267	up_write(&nommu_region_sem);
1268
1269	return result;
1270
1271error_just_free:
1272	up_write(&nommu_region_sem);
1273error:
 
1274	if (region->vm_file)
1275		fput(region->vm_file);
1276	kmem_cache_free(vm_region_jar, region);
1277	if (vma->vm_file)
1278		fput(vma->vm_file);
1279	vm_area_free(vma);
1280	return ret;
1281
1282sharing_violation:
1283	up_write(&nommu_region_sem);
1284	pr_warn("Attempt to share mismatched mappings\n");
1285	ret = -EINVAL;
1286	goto error;
1287
1288error_getting_vma:
1289	kmem_cache_free(vm_region_jar, region);
1290	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1291			len, current->pid);
1292	show_free_areas(0, NULL);
1293	return -ENOMEM;
1294
1295error_getting_region:
1296	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1297			len, current->pid);
1298	show_free_areas(0, NULL);
1299	return -ENOMEM;
1300}
1301
1302unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1303			      unsigned long prot, unsigned long flags,
1304			      unsigned long fd, unsigned long pgoff)
1305{
1306	struct file *file = NULL;
1307	unsigned long retval = -EBADF;
1308
1309	audit_mmap_fd(fd, flags);
1310	if (!(flags & MAP_ANONYMOUS)) {
1311		file = fget(fd);
1312		if (!file)
1313			goto out;
1314	}
1315
1316	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1317
1318	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1319
1320	if (file)
1321		fput(file);
1322out:
1323	return retval;
1324}
1325
1326SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1327		unsigned long, prot, unsigned long, flags,
1328		unsigned long, fd, unsigned long, pgoff)
1329{
1330	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1331}
1332
1333#ifdef __ARCH_WANT_SYS_OLD_MMAP
1334struct mmap_arg_struct {
1335	unsigned long addr;
1336	unsigned long len;
1337	unsigned long prot;
1338	unsigned long flags;
1339	unsigned long fd;
1340	unsigned long offset;
1341};
1342
1343SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1344{
1345	struct mmap_arg_struct a;
1346
1347	if (copy_from_user(&a, arg, sizeof(a)))
1348		return -EFAULT;
1349	if (offset_in_page(a.offset))
1350		return -EINVAL;
1351
1352	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1353			       a.offset >> PAGE_SHIFT);
1354}
1355#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1356
1357/*
1358 * split a vma into two pieces at address 'addr', a new vma is allocated either
1359 * for the first part or the tail.
1360 */
1361int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1362	      unsigned long addr, int new_below)
1363{
1364	struct vm_area_struct *new;
1365	struct vm_region *region;
1366	unsigned long npages;
 
1367
1368	/* we're only permitted to split anonymous regions (these should have
1369	 * only a single usage on the region) */
1370	if (vma->vm_file)
1371		return -ENOMEM;
1372
 
1373	if (mm->map_count >= sysctl_max_map_count)
1374		return -ENOMEM;
1375
1376	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1377	if (!region)
1378		return -ENOMEM;
1379
1380	new = vm_area_dup(vma);
1381	if (!new) {
1382		kmem_cache_free(vm_region_jar, region);
1383		return -ENOMEM;
1384	}
1385
1386	/* most fields are the same, copy all, and then fixup */
1387	*region = *vma->vm_region;
1388	new->vm_region = region;
1389
1390	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1391
1392	if (new_below) {
1393		region->vm_top = region->vm_end = new->vm_end = addr;
1394	} else {
1395		region->vm_start = new->vm_start = addr;
1396		region->vm_pgoff = new->vm_pgoff += npages;
1397	}
1398
 
 
 
 
 
 
 
1399	if (new->vm_ops && new->vm_ops->open)
1400		new->vm_ops->open(new);
1401
1402	delete_vma_from_mm(vma);
1403	down_write(&nommu_region_sem);
1404	delete_nommu_region(vma->vm_region);
1405	if (new_below) {
1406		vma->vm_region->vm_start = vma->vm_start = addr;
1407		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1408	} else {
1409		vma->vm_region->vm_end = vma->vm_end = addr;
1410		vma->vm_region->vm_top = addr;
1411	}
1412	add_nommu_region(vma->vm_region);
1413	add_nommu_region(new->vm_region);
1414	up_write(&nommu_region_sem);
1415	add_vma_to_mm(mm, vma);
1416	add_vma_to_mm(mm, new);
 
 
 
1417	return 0;
 
 
 
 
 
 
1418}
1419
1420/*
1421 * shrink a VMA by removing the specified chunk from either the beginning or
1422 * the end
1423 */
1424static int shrink_vma(struct mm_struct *mm,
1425		      struct vm_area_struct *vma,
1426		      unsigned long from, unsigned long to)
1427{
1428	struct vm_region *region;
1429
1430	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1431	 * and list */
1432	delete_vma_from_mm(vma);
1433	if (from > vma->vm_start)
 
1434		vma->vm_end = from;
1435	else
 
 
1436		vma->vm_start = to;
1437	add_vma_to_mm(mm, vma);
1438
1439	/* cut the backing region down to size */
1440	region = vma->vm_region;
1441	BUG_ON(region->vm_usage != 1);
1442
1443	down_write(&nommu_region_sem);
1444	delete_nommu_region(region);
1445	if (from > region->vm_start) {
1446		to = region->vm_top;
1447		region->vm_top = region->vm_end = from;
1448	} else {
1449		region->vm_start = to;
1450	}
1451	add_nommu_region(region);
1452	up_write(&nommu_region_sem);
1453
1454	free_page_series(from, to);
1455	return 0;
1456}
1457
1458/*
1459 * release a mapping
1460 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1461 *   VMA, though it need not cover the whole VMA
1462 */
1463int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1464{
 
1465	struct vm_area_struct *vma;
1466	unsigned long end;
1467	int ret;
1468
1469	len = PAGE_ALIGN(len);
1470	if (len == 0)
1471		return -EINVAL;
1472
1473	end = start + len;
1474
1475	/* find the first potentially overlapping VMA */
1476	vma = find_vma(mm, start);
1477	if (!vma) {
1478		static int limit;
1479		if (limit < 5) {
1480			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1481					current->pid, current->comm,
1482					start, start + len - 1);
1483			limit++;
1484		}
1485		return -EINVAL;
1486	}
1487
1488	/* we're allowed to split an anonymous VMA but not a file-backed one */
1489	if (vma->vm_file) {
1490		do {
1491			if (start > vma->vm_start)
1492				return -EINVAL;
1493			if (end == vma->vm_end)
1494				goto erase_whole_vma;
1495			vma = vma->vm_next;
1496		} while (vma);
1497		return -EINVAL;
1498	} else {
1499		/* the chunk must be a subset of the VMA found */
1500		if (start == vma->vm_start && end == vma->vm_end)
1501			goto erase_whole_vma;
1502		if (start < vma->vm_start || end > vma->vm_end)
1503			return -EINVAL;
1504		if (offset_in_page(start))
1505			return -EINVAL;
1506		if (end != vma->vm_end && offset_in_page(end))
1507			return -EINVAL;
1508		if (start != vma->vm_start && end != vma->vm_end) {
1509			ret = split_vma(mm, vma, start, 1);
1510			if (ret < 0)
1511				return ret;
1512		}
1513		return shrink_vma(mm, vma, start, end);
1514	}
1515
1516erase_whole_vma:
1517	delete_vma_from_mm(vma);
1518	delete_vma(mm, vma);
1519	return 0;
 
 
1520}
1521EXPORT_SYMBOL(do_munmap);
1522
1523int vm_munmap(unsigned long addr, size_t len)
1524{
1525	struct mm_struct *mm = current->mm;
1526	int ret;
1527
1528	mmap_write_lock(mm);
1529	ret = do_munmap(mm, addr, len, NULL);
1530	mmap_write_unlock(mm);
1531	return ret;
1532}
1533EXPORT_SYMBOL(vm_munmap);
1534
1535SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1536{
1537	return vm_munmap(addr, len);
1538}
1539
1540/*
1541 * release all the mappings made in a process's VM space
1542 */
1543void exit_mmap(struct mm_struct *mm)
1544{
 
1545	struct vm_area_struct *vma;
1546
1547	if (!mm)
1548		return;
1549
1550	mm->total_vm = 0;
1551
1552	while ((vma = mm->mmap)) {
1553		mm->mmap = vma->vm_next;
1554		delete_vma_from_mm(vma);
 
 
 
 
1555		delete_vma(mm, vma);
1556		cond_resched();
1557	}
1558}
1559
1560int vm_brk(unsigned long addr, unsigned long len)
1561{
1562	return -ENOMEM;
1563}
1564
1565/*
1566 * expand (or shrink) an existing mapping, potentially moving it at the same
1567 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1568 *
1569 * under NOMMU conditions, we only permit changing a mapping's size, and only
1570 * as long as it stays within the region allocated by do_mmap_private() and the
1571 * block is not shareable
1572 *
1573 * MREMAP_FIXED is not supported under NOMMU conditions
1574 */
1575static unsigned long do_mremap(unsigned long addr,
1576			unsigned long old_len, unsigned long new_len,
1577			unsigned long flags, unsigned long new_addr)
1578{
1579	struct vm_area_struct *vma;
1580
1581	/* insanity checks first */
1582	old_len = PAGE_ALIGN(old_len);
1583	new_len = PAGE_ALIGN(new_len);
1584	if (old_len == 0 || new_len == 0)
1585		return (unsigned long) -EINVAL;
1586
1587	if (offset_in_page(addr))
1588		return -EINVAL;
1589
1590	if (flags & MREMAP_FIXED && new_addr != addr)
1591		return (unsigned long) -EINVAL;
1592
1593	vma = find_vma_exact(current->mm, addr, old_len);
1594	if (!vma)
1595		return (unsigned long) -EINVAL;
1596
1597	if (vma->vm_end != vma->vm_start + old_len)
1598		return (unsigned long) -EFAULT;
1599
1600	if (vma->vm_flags & VM_MAYSHARE)
1601		return (unsigned long) -EPERM;
1602
1603	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1604		return (unsigned long) -ENOMEM;
1605
1606	/* all checks complete - do it */
1607	vma->vm_end = vma->vm_start + new_len;
1608	return vma->vm_start;
1609}
1610
1611SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1612		unsigned long, new_len, unsigned long, flags,
1613		unsigned long, new_addr)
1614{
1615	unsigned long ret;
1616
1617	mmap_write_lock(current->mm);
1618	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1619	mmap_write_unlock(current->mm);
1620	return ret;
1621}
1622
1623struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1624			 unsigned int foll_flags)
1625{
1626	return NULL;
1627}
1628
1629int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1630		unsigned long pfn, unsigned long size, pgprot_t prot)
1631{
1632	if (addr != (pfn << PAGE_SHIFT))
1633		return -EINVAL;
1634
1635	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1636	return 0;
1637}
1638EXPORT_SYMBOL(remap_pfn_range);
1639
1640int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1641{
1642	unsigned long pfn = start >> PAGE_SHIFT;
1643	unsigned long vm_len = vma->vm_end - vma->vm_start;
1644
1645	pfn += vma->vm_pgoff;
1646	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1647}
1648EXPORT_SYMBOL(vm_iomap_memory);
1649
1650int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1651			unsigned long pgoff)
1652{
1653	unsigned int size = vma->vm_end - vma->vm_start;
1654
1655	if (!(vma->vm_flags & VM_USERMAP))
1656		return -EINVAL;
1657
1658	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1659	vma->vm_end = vma->vm_start + size;
1660
1661	return 0;
1662}
1663EXPORT_SYMBOL(remap_vmalloc_range);
1664
1665unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1666	unsigned long len, unsigned long pgoff, unsigned long flags)
1667{
1668	return -ENOMEM;
1669}
1670
1671vm_fault_t filemap_fault(struct vm_fault *vmf)
1672{
1673	BUG();
1674	return 0;
1675}
1676EXPORT_SYMBOL(filemap_fault);
1677
1678void filemap_map_pages(struct vm_fault *vmf,
1679		pgoff_t start_pgoff, pgoff_t end_pgoff)
1680{
1681	BUG();
 
1682}
1683EXPORT_SYMBOL(filemap_map_pages);
1684
1685int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1686		unsigned long addr, void *buf, int len, unsigned int gup_flags)
1687{
1688	struct vm_area_struct *vma;
1689	int write = gup_flags & FOLL_WRITE;
1690
1691	if (mmap_read_lock_killable(mm))
1692		return 0;
1693
1694	/* the access must start within one of the target process's mappings */
1695	vma = find_vma(mm, addr);
1696	if (vma) {
1697		/* don't overrun this mapping */
1698		if (addr + len >= vma->vm_end)
1699			len = vma->vm_end - addr;
1700
1701		/* only read or write mappings where it is permitted */
1702		if (write && vma->vm_flags & VM_MAYWRITE)
1703			copy_to_user_page(vma, NULL, addr,
1704					 (void *) addr, buf, len);
1705		else if (!write && vma->vm_flags & VM_MAYREAD)
1706			copy_from_user_page(vma, NULL, addr,
1707					    buf, (void *) addr, len);
1708		else
1709			len = 0;
1710	} else {
1711		len = 0;
1712	}
1713
1714	mmap_read_unlock(mm);
1715
1716	return len;
1717}
1718
1719/**
1720 * access_remote_vm - access another process' address space
1721 * @mm:		the mm_struct of the target address space
1722 * @addr:	start address to access
1723 * @buf:	source or destination buffer
1724 * @len:	number of bytes to transfer
1725 * @gup_flags:	flags modifying lookup behaviour
1726 *
1727 * The caller must hold a reference on @mm.
1728 */
1729int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1730		void *buf, int len, unsigned int gup_flags)
1731{
1732	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
1733}
1734
1735/*
1736 * Access another process' address space.
1737 * - source/target buffer must be kernel space
1738 */
1739int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1740		unsigned int gup_flags)
1741{
1742	struct mm_struct *mm;
1743
1744	if (addr + len < addr)
1745		return 0;
1746
1747	mm = get_task_mm(tsk);
1748	if (!mm)
1749		return 0;
1750
1751	len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
1752
1753	mmput(mm);
1754	return len;
1755}
1756EXPORT_SYMBOL_GPL(access_process_vm);
1757
1758/**
1759 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1760 * @inode: The inode to check
1761 * @size: The current filesize of the inode
1762 * @newsize: The proposed filesize of the inode
1763 *
1764 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1765 * make sure that any outstanding VMAs aren't broken and then shrink the
1766 * vm_regions that extend beyond so that do_mmap() doesn't
1767 * automatically grant mappings that are too large.
1768 */
1769int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1770				size_t newsize)
1771{
1772	struct vm_area_struct *vma;
1773	struct vm_region *region;
1774	pgoff_t low, high;
1775	size_t r_size, r_top;
1776
1777	low = newsize >> PAGE_SHIFT;
1778	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1779
1780	down_write(&nommu_region_sem);
1781	i_mmap_lock_read(inode->i_mapping);
1782
1783	/* search for VMAs that fall within the dead zone */
1784	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1785		/* found one - only interested if it's shared out of the page
1786		 * cache */
1787		if (vma->vm_flags & VM_SHARED) {
1788			i_mmap_unlock_read(inode->i_mapping);
1789			up_write(&nommu_region_sem);
1790			return -ETXTBSY; /* not quite true, but near enough */
1791		}
1792	}
1793
1794	/* reduce any regions that overlap the dead zone - if in existence,
1795	 * these will be pointed to by VMAs that don't overlap the dead zone
1796	 *
1797	 * we don't check for any regions that start beyond the EOF as there
1798	 * shouldn't be any
1799	 */
1800	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1801		if (!(vma->vm_flags & VM_SHARED))
1802			continue;
1803
1804		region = vma->vm_region;
1805		r_size = region->vm_top - region->vm_start;
1806		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1807
1808		if (r_top > newsize) {
1809			region->vm_top -= r_top - newsize;
1810			if (region->vm_end > region->vm_top)
1811				region->vm_end = region->vm_top;
1812		}
1813	}
1814
1815	i_mmap_unlock_read(inode->i_mapping);
1816	up_write(&nommu_region_sem);
1817	return 0;
1818}
1819
1820/*
1821 * Initialise sysctl_user_reserve_kbytes.
1822 *
1823 * This is intended to prevent a user from starting a single memory hogging
1824 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1825 * mode.
1826 *
1827 * The default value is min(3% of free memory, 128MB)
1828 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1829 */
1830static int __meminit init_user_reserve(void)
1831{
1832	unsigned long free_kbytes;
1833
1834	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1835
1836	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1837	return 0;
1838}
1839subsys_initcall(init_user_reserve);
1840
1841/*
1842 * Initialise sysctl_admin_reserve_kbytes.
1843 *
1844 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1845 * to log in and kill a memory hogging process.
1846 *
1847 * Systems with more than 256MB will reserve 8MB, enough to recover
1848 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1849 * only reserve 3% of free pages by default.
1850 */
1851static int __meminit init_admin_reserve(void)
1852{
1853	unsigned long free_kbytes;
1854
1855	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1856
1857	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1858	return 0;
1859}
1860subsys_initcall(init_admin_reserve);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/nommu.c
   4 *
   5 *  Replacement code for mm functions to support CPU's that don't
   6 *  have any form of memory management unit (thus no virtual memory).
   7 *
   8 *  See Documentation/admin-guide/mm/nommu-mmap.rst
   9 *
  10 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  11 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  12 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  13 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  14 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/mm.h>
  21#include <linux/sched/mm.h>
 
  22#include <linux/mman.h>
  23#include <linux/swap.h>
  24#include <linux/file.h>
  25#include <linux/highmem.h>
  26#include <linux/pagemap.h>
  27#include <linux/slab.h>
  28#include <linux/vmalloc.h>
 
  29#include <linux/backing-dev.h>
  30#include <linux/compiler.h>
  31#include <linux/mount.h>
  32#include <linux/personality.h>
  33#include <linux/security.h>
  34#include <linux/syscalls.h>
  35#include <linux/audit.h>
  36#include <linux/printk.h>
  37
  38#include <linux/uaccess.h>
  39#include <linux/uio.h>
  40#include <asm/tlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/mmu_context.h>
  43#include "internal.h"
  44
  45void *high_memory;
  46EXPORT_SYMBOL(high_memory);
  47struct page *mem_map;
  48unsigned long max_mapnr;
  49EXPORT_SYMBOL(max_mapnr);
  50unsigned long highest_memmap_pfn;
  51int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  52int heap_stack_gap = 0;
  53
  54atomic_long_t mmap_pages_allocated;
  55
  56EXPORT_SYMBOL(mem_map);
  57
  58/* list of mapped, potentially shareable regions */
  59static struct kmem_cache *vm_region_jar;
  60struct rb_root nommu_region_tree = RB_ROOT;
  61DECLARE_RWSEM(nommu_region_sem);
  62
  63const struct vm_operations_struct generic_file_vm_ops = {
  64};
  65
  66/*
  67 * Return the total memory allocated for this pointer, not
  68 * just what the caller asked for.
  69 *
  70 * Doesn't have to be accurate, i.e. may have races.
  71 */
  72unsigned int kobjsize(const void *objp)
  73{
  74	struct page *page;
  75
  76	/*
  77	 * If the object we have should not have ksize performed on it,
  78	 * return size of 0
  79	 */
  80	if (!objp || !virt_addr_valid(objp))
  81		return 0;
  82
  83	page = virt_to_head_page(objp);
  84
  85	/*
  86	 * If the allocator sets PageSlab, we know the pointer came from
  87	 * kmalloc().
  88	 */
  89	if (PageSlab(page))
  90		return ksize(objp);
  91
  92	/*
  93	 * If it's not a compound page, see if we have a matching VMA
  94	 * region. This test is intentionally done in reverse order,
  95	 * so if there's no VMA, we still fall through and hand back
  96	 * PAGE_SIZE for 0-order pages.
  97	 */
  98	if (!PageCompound(page)) {
  99		struct vm_area_struct *vma;
 100
 101		vma = find_vma(current->mm, (unsigned long)objp);
 102		if (vma)
 103			return vma->vm_end - vma->vm_start;
 104	}
 105
 106	/*
 107	 * The ksize() function is only guaranteed to work for pointers
 108	 * returned by kmalloc(). So handle arbitrary pointers here.
 109	 */
 110	return page_size(page);
 111}
 112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113void vfree(const void *addr)
 114{
 115	kfree(addr);
 116}
 117EXPORT_SYMBOL(vfree);
 118
 119void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask)
 120{
 121	/*
 122	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 123	 * returns only a logical address.
 124	 */
 125	return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 126}
 127EXPORT_SYMBOL(__vmalloc_noprof);
 128
 129void *vrealloc_noprof(const void *p, size_t size, gfp_t flags)
 130{
 131	return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM);
 132}
 
 133
 134void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
 135		unsigned long start, unsigned long end, gfp_t gfp_mask,
 136		pgprot_t prot, unsigned long vm_flags, int node,
 137		const void *caller)
 138{
 139	return __vmalloc_noprof(size, gfp_mask);
 140}
 141
 142void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 143		int node, const void *caller)
 144{
 145	return __vmalloc_noprof(size, gfp_mask);
 146}
 147
 148static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
 149{
 150	void *ret;
 151
 152	ret = __vmalloc(size, flags);
 153	if (ret) {
 154		struct vm_area_struct *vma;
 155
 156		mmap_write_lock(current->mm);
 157		vma = find_vma(current->mm, (unsigned long)ret);
 158		if (vma)
 159			vm_flags_set(vma, VM_USERMAP);
 160		mmap_write_unlock(current->mm);
 161	}
 162
 163	return ret;
 164}
 165
 166void *vmalloc_user_noprof(unsigned long size)
 167{
 168	return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
 169}
 170EXPORT_SYMBOL(vmalloc_user_noprof);
 171
 172struct page *vmalloc_to_page(const void *addr)
 173{
 174	return virt_to_page(addr);
 175}
 176EXPORT_SYMBOL(vmalloc_to_page);
 177
 178unsigned long vmalloc_to_pfn(const void *addr)
 179{
 180	return page_to_pfn(virt_to_page(addr));
 181}
 182EXPORT_SYMBOL(vmalloc_to_pfn);
 183
 184long vread_iter(struct iov_iter *iter, const char *addr, size_t count)
 
 
 
 
 
 
 
 
 
 
 185{
 186	/* Don't allow overflow */
 187	if ((unsigned long) addr + count < count)
 188		count = -(unsigned long) addr;
 189
 190	return copy_to_iter(addr, count, iter);
 
 191}
 192
 193/*
 194 *	vmalloc  -  allocate virtually contiguous memory
 195 *
 196 *	@size:		allocation size
 197 *
 198 *	Allocate enough pages to cover @size from the page level
 199 *	allocator and map them into contiguous kernel virtual space.
 200 *
 201 *	For tight control over page level allocator and protection flags
 202 *	use __vmalloc() instead.
 203 */
 204void *vmalloc_noprof(unsigned long size)
 205{
 206	return __vmalloc_noprof(size, GFP_KERNEL);
 207}
 208EXPORT_SYMBOL(vmalloc_noprof);
 209
 210void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof);
 211
 212/*
 213 *	vzalloc - allocate virtually contiguous memory with zero fill
 214 *
 215 *	@size:		allocation size
 216 *
 217 *	Allocate enough pages to cover @size from the page level
 218 *	allocator and map them into contiguous kernel virtual space.
 219 *	The memory allocated is set to zero.
 220 *
 221 *	For tight control over page level allocator and protection flags
 222 *	use __vmalloc() instead.
 223 */
 224void *vzalloc_noprof(unsigned long size)
 225{
 226	return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO);
 227}
 228EXPORT_SYMBOL(vzalloc_noprof);
 229
 230/**
 231 * vmalloc_node - allocate memory on a specific node
 232 * @size:	allocation size
 233 * @node:	numa node
 234 *
 235 * Allocate enough pages to cover @size from the page level
 236 * allocator and map them into contiguous kernel virtual space.
 237 *
 238 * For tight control over page level allocator and protection flags
 239 * use __vmalloc() instead.
 240 */
 241void *vmalloc_node_noprof(unsigned long size, int node)
 242{
 243	return vmalloc_noprof(size);
 244}
 245EXPORT_SYMBOL(vmalloc_node_noprof);
 246
 247/**
 248 * vzalloc_node - allocate memory on a specific node with zero fill
 249 * @size:	allocation size
 250 * @node:	numa node
 251 *
 252 * Allocate enough pages to cover @size from the page level
 253 * allocator and map them into contiguous kernel virtual space.
 254 * The memory allocated is set to zero.
 255 *
 256 * For tight control over page level allocator and protection flags
 257 * use __vmalloc() instead.
 258 */
 259void *vzalloc_node_noprof(unsigned long size, int node)
 260{
 261	return vzalloc_noprof(size);
 262}
 263EXPORT_SYMBOL(vzalloc_node_noprof);
 264
 265/**
 266 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 267 *	@size:		allocation size
 268 *
 269 *	Allocate enough 32bit PA addressable pages to cover @size from the
 270 *	page level allocator and map them into contiguous kernel virtual space.
 271 */
 272void *vmalloc_32_noprof(unsigned long size)
 273{
 274	return __vmalloc_noprof(size, GFP_KERNEL);
 275}
 276EXPORT_SYMBOL(vmalloc_32_noprof);
 277
 278/**
 279 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 280 *	@size:		allocation size
 281 *
 282 * The resulting memory area is 32bit addressable and zeroed so it can be
 283 * mapped to userspace without leaking data.
 284 *
 285 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 286 * remap_vmalloc_range() are permissible.
 287 */
 288void *vmalloc_32_user_noprof(unsigned long size)
 289{
 290	/*
 291	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
 292	 * but for now this can simply use vmalloc_user() directly.
 293	 */
 294	return vmalloc_user_noprof(size);
 295}
 296EXPORT_SYMBOL(vmalloc_32_user_noprof);
 297
 298void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 299{
 300	BUG();
 301	return NULL;
 302}
 303EXPORT_SYMBOL(vmap);
 304
 305void vunmap(const void *addr)
 306{
 307	BUG();
 308}
 309EXPORT_SYMBOL(vunmap);
 310
 311void *vm_map_ram(struct page **pages, unsigned int count, int node)
 312{
 313	BUG();
 314	return NULL;
 315}
 316EXPORT_SYMBOL(vm_map_ram);
 317
 318void vm_unmap_ram(const void *mem, unsigned int count)
 319{
 320	BUG();
 321}
 322EXPORT_SYMBOL(vm_unmap_ram);
 323
 324void vm_unmap_aliases(void)
 325{
 326}
 327EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 328
 
 
 
 
 
 
 
 329void free_vm_area(struct vm_struct *area)
 330{
 331	BUG();
 332}
 333EXPORT_SYMBOL_GPL(free_vm_area);
 334
 335int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 336		   struct page *page)
 337{
 338	return -EINVAL;
 339}
 340EXPORT_SYMBOL(vm_insert_page);
 341
 342int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 343			struct page **pages, unsigned long *num)
 344{
 345	return -EINVAL;
 346}
 347EXPORT_SYMBOL(vm_insert_pages);
 348
 349int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
 350			unsigned long num)
 351{
 352	return -EINVAL;
 353}
 354EXPORT_SYMBOL(vm_map_pages);
 355
 356int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
 357				unsigned long num)
 358{
 359	return -EINVAL;
 360}
 361EXPORT_SYMBOL(vm_map_pages_zero);
 362
 363/*
 364 *  sys_brk() for the most part doesn't need the global kernel
 365 *  lock, except when an application is doing something nasty
 366 *  like trying to un-brk an area that has already been mapped
 367 *  to a regular file.  in this case, the unmapping will need
 368 *  to invoke file system routines that need the global lock.
 369 */
 370SYSCALL_DEFINE1(brk, unsigned long, brk)
 371{
 372	struct mm_struct *mm = current->mm;
 373
 374	if (brk < mm->start_brk || brk > mm->context.end_brk)
 375		return mm->brk;
 376
 377	if (mm->brk == brk)
 378		return mm->brk;
 379
 380	/*
 381	 * Always allow shrinking brk
 382	 */
 383	if (brk <= mm->brk) {
 384		mm->brk = brk;
 385		return brk;
 386	}
 387
 388	/*
 389	 * Ok, looks good - let it rip.
 390	 */
 391	flush_icache_user_range(mm->brk, brk);
 392	return mm->brk = brk;
 393}
 394
 395/*
 396 * initialise the percpu counter for VM and region record slabs
 397 */
 398void __init mmap_init(void)
 399{
 400	int ret;
 401
 402	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
 403	VM_BUG_ON(ret);
 404	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
 405}
 406
 407/*
 408 * validate the region tree
 409 * - the caller must hold the region lock
 410 */
 411#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 412static noinline void validate_nommu_regions(void)
 413{
 414	struct vm_region *region, *last;
 415	struct rb_node *p, *lastp;
 416
 417	lastp = rb_first(&nommu_region_tree);
 418	if (!lastp)
 419		return;
 420
 421	last = rb_entry(lastp, struct vm_region, vm_rb);
 422	BUG_ON(last->vm_end <= last->vm_start);
 423	BUG_ON(last->vm_top < last->vm_end);
 424
 425	while ((p = rb_next(lastp))) {
 426		region = rb_entry(p, struct vm_region, vm_rb);
 427		last = rb_entry(lastp, struct vm_region, vm_rb);
 428
 429		BUG_ON(region->vm_end <= region->vm_start);
 430		BUG_ON(region->vm_top < region->vm_end);
 431		BUG_ON(region->vm_start < last->vm_top);
 432
 433		lastp = p;
 434	}
 435}
 436#else
 437static void validate_nommu_regions(void)
 438{
 439}
 440#endif
 441
 442/*
 443 * add a region into the global tree
 444 */
 445static void add_nommu_region(struct vm_region *region)
 446{
 447	struct vm_region *pregion;
 448	struct rb_node **p, *parent;
 449
 450	validate_nommu_regions();
 451
 452	parent = NULL;
 453	p = &nommu_region_tree.rb_node;
 454	while (*p) {
 455		parent = *p;
 456		pregion = rb_entry(parent, struct vm_region, vm_rb);
 457		if (region->vm_start < pregion->vm_start)
 458			p = &(*p)->rb_left;
 459		else if (region->vm_start > pregion->vm_start)
 460			p = &(*p)->rb_right;
 461		else if (pregion == region)
 462			return;
 463		else
 464			BUG();
 465	}
 466
 467	rb_link_node(&region->vm_rb, parent, p);
 468	rb_insert_color(&region->vm_rb, &nommu_region_tree);
 469
 470	validate_nommu_regions();
 471}
 472
 473/*
 474 * delete a region from the global tree
 475 */
 476static void delete_nommu_region(struct vm_region *region)
 477{
 478	BUG_ON(!nommu_region_tree.rb_node);
 479
 480	validate_nommu_regions();
 481	rb_erase(&region->vm_rb, &nommu_region_tree);
 482	validate_nommu_regions();
 483}
 484
 485/*
 486 * free a contiguous series of pages
 487 */
 488static void free_page_series(unsigned long from, unsigned long to)
 489{
 490	for (; from < to; from += PAGE_SIZE) {
 491		struct page *page = virt_to_page((void *)from);
 492
 493		atomic_long_dec(&mmap_pages_allocated);
 494		put_page(page);
 495	}
 496}
 497
 498/*
 499 * release a reference to a region
 500 * - the caller must hold the region semaphore for writing, which this releases
 501 * - the region may not have been added to the tree yet, in which case vm_top
 502 *   will equal vm_start
 503 */
 504static void __put_nommu_region(struct vm_region *region)
 505	__releases(nommu_region_sem)
 506{
 507	BUG_ON(!nommu_region_tree.rb_node);
 508
 509	if (--region->vm_usage == 0) {
 510		if (region->vm_top > region->vm_start)
 511			delete_nommu_region(region);
 512		up_write(&nommu_region_sem);
 513
 514		if (region->vm_file)
 515			fput(region->vm_file);
 516
 517		/* IO memory and memory shared directly out of the pagecache
 518		 * from ramfs/tmpfs mustn't be released here */
 519		if (region->vm_flags & VM_MAPPED_COPY)
 520			free_page_series(region->vm_start, region->vm_top);
 521		kmem_cache_free(vm_region_jar, region);
 522	} else {
 523		up_write(&nommu_region_sem);
 524	}
 525}
 526
 527/*
 528 * release a reference to a region
 529 */
 530static void put_nommu_region(struct vm_region *region)
 531{
 532	down_write(&nommu_region_sem);
 533	__put_nommu_region(region);
 534}
 535
 536static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 537{
 
 
 
 
 
 
 
 
 
 
 
 
 
 538	vma->vm_mm = mm;
 539
 540	/* add the VMA to the mapping */
 541	if (vma->vm_file) {
 542		struct address_space *mapping = vma->vm_file->f_mapping;
 543
 544		i_mmap_lock_write(mapping);
 545		flush_dcache_mmap_lock(mapping);
 546		vma_interval_tree_insert(vma, &mapping->i_mmap);
 547		flush_dcache_mmap_unlock(mapping);
 548		i_mmap_unlock_write(mapping);
 549	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 550}
 551
 552static void cleanup_vma_from_mm(struct vm_area_struct *vma)
 
 
 
 553{
 554	vma->vm_mm->map_count--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 555	/* remove the VMA from the mapping */
 556	if (vma->vm_file) {
 557		struct address_space *mapping;
 558		mapping = vma->vm_file->f_mapping;
 559
 560		i_mmap_lock_write(mapping);
 561		flush_dcache_mmap_lock(mapping);
 562		vma_interval_tree_remove(vma, &mapping->i_mmap);
 563		flush_dcache_mmap_unlock(mapping);
 564		i_mmap_unlock_write(mapping);
 565	}
 566}
 567
 568/*
 569 * delete a VMA from its owning mm_struct and address space
 570 */
 571static int delete_vma_from_mm(struct vm_area_struct *vma)
 572{
 573	VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
 574
 575	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
 576	if (vma_iter_prealloc(&vmi, NULL)) {
 577		pr_warn("Allocation of vma tree for process %d failed\n",
 578		       current->pid);
 579		return -ENOMEM;
 580	}
 581	cleanup_vma_from_mm(vma);
 582
 583	/* remove from the MM's tree and list */
 584	vma_iter_clear(&vmi);
 585	return 0;
 586}
 587/*
 588 * destroy a VMA record
 589 */
 590static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 591{
 592	vma_close(vma);
 
 593	if (vma->vm_file)
 594		fput(vma->vm_file);
 595	put_nommu_region(vma->vm_region);
 596	vm_area_free(vma);
 597}
 598
 599struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
 600					     unsigned long start_addr,
 601					     unsigned long end_addr)
 602{
 603	unsigned long index = start_addr;
 604
 605	mmap_assert_locked(mm);
 606	return mt_find(&mm->mm_mt, &index, end_addr - 1);
 607}
 608EXPORT_SYMBOL(find_vma_intersection);
 609
 610/*
 611 * look up the first VMA in which addr resides, NULL if none
 612 * - should be called with mm->mmap_lock at least held readlocked
 613 */
 614struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 615{
 616	VMA_ITERATOR(vmi, mm, addr);
 617
 618	return vma_iter_load(&vmi);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619}
 620EXPORT_SYMBOL(find_vma);
 621
 622/*
 623 * At least xtensa ends up having protection faults even with no
 624 * MMU.. No stack expansion, at least.
 625 */
 626struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
 627			unsigned long addr, struct pt_regs *regs)
 628{
 629	struct vm_area_struct *vma;
 630
 631	mmap_read_lock(mm);
 632	vma = vma_lookup(mm, addr);
 633	if (!vma)
 634		mmap_read_unlock(mm);
 635	return vma;
 636}
 637
 638/*
 639 * expand a stack to a given address
 640 * - not supported under NOMMU conditions
 641 */
 642int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr)
 643{
 644	return -ENOMEM;
 645}
 646
 647struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
 648{
 649	mmap_read_unlock(mm);
 650	return NULL;
 651}
 652
 653/*
 654 * look up the first VMA exactly that exactly matches addr
 655 * - should be called with mm->mmap_lock at least held readlocked
 656 */
 657static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 658					     unsigned long addr,
 659					     unsigned long len)
 660{
 661	struct vm_area_struct *vma;
 662	unsigned long end = addr + len;
 663	VMA_ITERATOR(vmi, mm, addr);
 664
 665	vma = vma_iter_load(&vmi);
 666	if (!vma)
 667		return NULL;
 668	if (vma->vm_start != addr)
 669		return NULL;
 670	if (vma->vm_end != end)
 671		return NULL;
 
 
 
 
 
 
 
 
 
 
 672
 673	return vma;
 674}
 675
 676/*
 677 * determine whether a mapping should be permitted and, if so, what sort of
 678 * mapping we're capable of supporting
 679 */
 680static int validate_mmap_request(struct file *file,
 681				 unsigned long addr,
 682				 unsigned long len,
 683				 unsigned long prot,
 684				 unsigned long flags,
 685				 unsigned long pgoff,
 686				 unsigned long *_capabilities)
 687{
 688	unsigned long capabilities, rlen;
 689	int ret;
 690
 691	/* do the simple checks first */
 692	if (flags & MAP_FIXED)
 693		return -EINVAL;
 694
 695	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 696	    (flags & MAP_TYPE) != MAP_SHARED)
 697		return -EINVAL;
 698
 699	if (!len)
 700		return -EINVAL;
 701
 702	/* Careful about overflows.. */
 703	rlen = PAGE_ALIGN(len);
 704	if (!rlen || rlen > TASK_SIZE)
 705		return -ENOMEM;
 706
 707	/* offset overflow? */
 708	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 709		return -EOVERFLOW;
 710
 711	if (file) {
 712		/* files must support mmap */
 713		if (!file->f_op->mmap)
 714			return -ENODEV;
 715
 716		/* work out if what we've got could possibly be shared
 717		 * - we support chardevs that provide their own "memory"
 718		 * - we support files/blockdevs that are memory backed
 719		 */
 720		if (file->f_op->mmap_capabilities) {
 721			capabilities = file->f_op->mmap_capabilities(file);
 722		} else {
 723			/* no explicit capabilities set, so assume some
 724			 * defaults */
 725			switch (file_inode(file)->i_mode & S_IFMT) {
 726			case S_IFREG:
 727			case S_IFBLK:
 728				capabilities = NOMMU_MAP_COPY;
 729				break;
 730
 731			case S_IFCHR:
 732				capabilities =
 733					NOMMU_MAP_DIRECT |
 734					NOMMU_MAP_READ |
 735					NOMMU_MAP_WRITE;
 736				break;
 737
 738			default:
 739				return -EINVAL;
 740			}
 741		}
 742
 743		/* eliminate any capabilities that we can't support on this
 744		 * device */
 745		if (!file->f_op->get_unmapped_area)
 746			capabilities &= ~NOMMU_MAP_DIRECT;
 747		if (!(file->f_mode & FMODE_CAN_READ))
 748			capabilities &= ~NOMMU_MAP_COPY;
 749
 750		/* The file shall have been opened with read permission. */
 751		if (!(file->f_mode & FMODE_READ))
 752			return -EACCES;
 753
 754		if (flags & MAP_SHARED) {
 755			/* do checks for writing, appending and locking */
 756			if ((prot & PROT_WRITE) &&
 757			    !(file->f_mode & FMODE_WRITE))
 758				return -EACCES;
 759
 760			if (IS_APPEND(file_inode(file)) &&
 761			    (file->f_mode & FMODE_WRITE))
 762				return -EACCES;
 763
 
 
 
 764			if (!(capabilities & NOMMU_MAP_DIRECT))
 765				return -ENODEV;
 766
 767			/* we mustn't privatise shared mappings */
 768			capabilities &= ~NOMMU_MAP_COPY;
 769		} else {
 770			/* we're going to read the file into private memory we
 771			 * allocate */
 772			if (!(capabilities & NOMMU_MAP_COPY))
 773				return -ENODEV;
 774
 775			/* we don't permit a private writable mapping to be
 776			 * shared with the backing device */
 777			if (prot & PROT_WRITE)
 778				capabilities &= ~NOMMU_MAP_DIRECT;
 779		}
 780
 781		if (capabilities & NOMMU_MAP_DIRECT) {
 782			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
 783			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
 784			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
 785			    ) {
 786				capabilities &= ~NOMMU_MAP_DIRECT;
 787				if (flags & MAP_SHARED) {
 788					pr_warn("MAP_SHARED not completely supported on !MMU\n");
 789					return -EINVAL;
 790				}
 791			}
 792		}
 793
 794		/* handle executable mappings and implied executable
 795		 * mappings */
 796		if (path_noexec(&file->f_path)) {
 797			if (prot & PROT_EXEC)
 798				return -EPERM;
 799		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
 800			/* handle implication of PROT_EXEC by PROT_READ */
 801			if (current->personality & READ_IMPLIES_EXEC) {
 802				if (capabilities & NOMMU_MAP_EXEC)
 803					prot |= PROT_EXEC;
 804			}
 805		} else if ((prot & PROT_READ) &&
 806			 (prot & PROT_EXEC) &&
 807			 !(capabilities & NOMMU_MAP_EXEC)
 808			 ) {
 809			/* backing file is not executable, try to copy */
 810			capabilities &= ~NOMMU_MAP_DIRECT;
 811		}
 812	} else {
 813		/* anonymous mappings are always memory backed and can be
 814		 * privately mapped
 815		 */
 816		capabilities = NOMMU_MAP_COPY;
 817
 818		/* handle PROT_EXEC implication by PROT_READ */
 819		if ((prot & PROT_READ) &&
 820		    (current->personality & READ_IMPLIES_EXEC))
 821			prot |= PROT_EXEC;
 822	}
 823
 824	/* allow the security API to have its say */
 825	ret = security_mmap_addr(addr);
 826	if (ret < 0)
 827		return ret;
 828
 829	/* looks okay */
 830	*_capabilities = capabilities;
 831	return 0;
 832}
 833
 834/*
 835 * we've determined that we can make the mapping, now translate what we
 836 * now know into VMA flags
 837 */
 838static unsigned long determine_vm_flags(struct file *file,
 839					unsigned long prot,
 840					unsigned long flags,
 841					unsigned long capabilities)
 842{
 843	unsigned long vm_flags;
 844
 845	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags);
 
 846
 847	if (!file) {
 848		/*
 849		 * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
 850		 * there is no fork().
 851		 */
 852		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 853	} else if (flags & MAP_PRIVATE) {
 854		/* MAP_PRIVATE file mapping */
 855		if (capabilities & NOMMU_MAP_DIRECT)
 856			vm_flags |= (capabilities & NOMMU_VMFLAGS);
 857		else
 858			vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
 859
 860		if (!(prot & PROT_WRITE) && !current->ptrace)
 861			/*
 862			 * R/O private file mapping which cannot be used to
 863			 * modify memory, especially also not via active ptrace
 864			 * (e.g., set breakpoints) or later by upgrading
 865			 * permissions (no mprotect()). We can try overlaying
 866			 * the file mapping, which will work e.g., on chardevs,
 867			 * ramfs/tmpfs/shmfs and romfs/cramf.
 868			 */
 869			vm_flags |= VM_MAYOVERLAY;
 870	} else {
 871		/* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
 872		vm_flags |= VM_SHARED | VM_MAYSHARE |
 873			    (capabilities & NOMMU_VMFLAGS);
 
 
 
 874	}
 875
 
 
 
 
 
 
 
 876	return vm_flags;
 877}
 878
 879/*
 880 * set up a shared mapping on a file (the driver or filesystem provides and
 881 * pins the storage)
 882 */
 883static int do_mmap_shared_file(struct vm_area_struct *vma)
 884{
 885	int ret;
 886
 887	ret = mmap_file(vma->vm_file, vma);
 888	if (ret == 0) {
 889		vma->vm_region->vm_top = vma->vm_region->vm_end;
 890		return 0;
 891	}
 892	if (ret != -ENOSYS)
 893		return ret;
 894
 895	/* getting -ENOSYS indicates that direct mmap isn't possible (as
 896	 * opposed to tried but failed) so we can only give a suitable error as
 897	 * it's not possible to make a private copy if MAP_SHARED was given */
 898	return -ENODEV;
 899}
 900
 901/*
 902 * set up a private mapping or an anonymous shared mapping
 903 */
 904static int do_mmap_private(struct vm_area_struct *vma,
 905			   struct vm_region *region,
 906			   unsigned long len,
 907			   unsigned long capabilities)
 908{
 909	unsigned long total, point;
 910	void *base;
 911	int ret, order;
 912
 913	/*
 914	 * Invoke the file's mapping function so that it can keep track of
 915	 * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
 916	 * it may attempt to share, which will make is_nommu_shared_mapping()
 917	 * happy.
 918	 */
 919	if (capabilities & NOMMU_MAP_DIRECT) {
 920		ret = mmap_file(vma->vm_file, vma);
 921		/* shouldn't return success if we're not sharing */
 922		if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
 923			ret = -ENOSYS;
 924		if (ret == 0) {
 
 
 925			vma->vm_region->vm_top = vma->vm_region->vm_end;
 926			return 0;
 927		}
 928		if (ret != -ENOSYS)
 929			return ret;
 930
 931		/* getting an ENOSYS error indicates that direct mmap isn't
 932		 * possible (as opposed to tried but failed) so we'll try to
 933		 * make a private copy of the data and map that instead */
 934	}
 935
 936
 937	/* allocate some memory to hold the mapping
 938	 * - note that this may not return a page-aligned address if the object
 939	 *   we're allocating is smaller than a page
 940	 */
 941	order = get_order(len);
 942	total = 1 << order;
 943	point = len >> PAGE_SHIFT;
 944
 945	/* we don't want to allocate a power-of-2 sized page set */
 946	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
 947		total = point;
 948
 949	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
 950	if (!base)
 951		goto enomem;
 952
 953	atomic_long_add(total, &mmap_pages_allocated);
 954
 955	vm_flags_set(vma, VM_MAPPED_COPY);
 956	region->vm_flags = vma->vm_flags;
 957	region->vm_start = (unsigned long) base;
 958	region->vm_end   = region->vm_start + len;
 959	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
 960
 961	vma->vm_start = region->vm_start;
 962	vma->vm_end   = region->vm_start + len;
 963
 964	if (vma->vm_file) {
 965		/* read the contents of a file into the copy */
 966		loff_t fpos;
 967
 968		fpos = vma->vm_pgoff;
 969		fpos <<= PAGE_SHIFT;
 970
 971		ret = kernel_read(vma->vm_file, base, len, &fpos);
 972		if (ret < 0)
 973			goto error_free;
 974
 975		/* clear the last little bit */
 976		if (ret < len)
 977			memset(base + ret, 0, len - ret);
 978
 979	} else {
 980		vma_set_anonymous(vma);
 981	}
 982
 983	return 0;
 984
 985error_free:
 986	free_page_series(region->vm_start, region->vm_top);
 987	region->vm_start = vma->vm_start = 0;
 988	region->vm_end   = vma->vm_end = 0;
 989	region->vm_top   = 0;
 990	return ret;
 991
 992enomem:
 993	pr_err("Allocation of length %lu from process %d (%s) failed\n",
 994	       len, current->pid, current->comm);
 995	show_mem();
 996	return -ENOMEM;
 997}
 998
 999/*
1000 * handle mapping creation for uClinux
1001 */
1002unsigned long do_mmap(struct file *file,
1003			unsigned long addr,
1004			unsigned long len,
1005			unsigned long prot,
1006			unsigned long flags,
1007			vm_flags_t vm_flags,
1008			unsigned long pgoff,
1009			unsigned long *populate,
1010			struct list_head *uf)
1011{
1012	struct vm_area_struct *vma;
1013	struct vm_region *region;
1014	struct rb_node *rb;
 
1015	unsigned long capabilities, result;
1016	int ret;
1017	VMA_ITERATOR(vmi, current->mm, 0);
1018
1019	*populate = 0;
1020
1021	/* decide whether we should attempt the mapping, and if so what sort of
1022	 * mapping */
1023	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1024				    &capabilities);
1025	if (ret < 0)
1026		return ret;
1027
1028	/* we ignore the address hint */
1029	addr = 0;
1030	len = PAGE_ALIGN(len);
1031
1032	/* we've determined that we can make the mapping, now translate what we
1033	 * now know into VMA flags */
1034	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
1035
1036
1037	/* we're going to need to record the mapping */
1038	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1039	if (!region)
1040		goto error_getting_region;
1041
1042	vma = vm_area_alloc(current->mm);
1043	if (!vma)
1044		goto error_getting_vma;
1045
1046	region->vm_usage = 1;
1047	region->vm_flags = vm_flags;
1048	region->vm_pgoff = pgoff;
1049
1050	vm_flags_init(vma, vm_flags);
1051	vma->vm_pgoff = pgoff;
1052
1053	if (file) {
1054		region->vm_file = get_file(file);
1055		vma->vm_file = get_file(file);
1056	}
1057
1058	down_write(&nommu_region_sem);
1059
1060	/* if we want to share, we need to check for regions created by other
1061	 * mmap() calls that overlap with our proposed mapping
1062	 * - we can only share with a superset match on most regular files
1063	 * - shared mappings on character devices and memory backed files are
1064	 *   permitted to overlap inexactly as far as we are concerned for in
1065	 *   these cases, sharing is handled in the driver or filesystem rather
1066	 *   than here
1067	 */
1068	if (is_nommu_shared_mapping(vm_flags)) {
1069		struct vm_region *pregion;
1070		unsigned long pglen, rpglen, pgend, rpgend, start;
1071
1072		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1073		pgend = pgoff + pglen;
1074
1075		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1076			pregion = rb_entry(rb, struct vm_region, vm_rb);
1077
1078			if (!is_nommu_shared_mapping(pregion->vm_flags))
1079				continue;
1080
1081			/* search for overlapping mappings on the same file */
1082			if (file_inode(pregion->vm_file) !=
1083			    file_inode(file))
1084				continue;
1085
1086			if (pregion->vm_pgoff >= pgend)
1087				continue;
1088
1089			rpglen = pregion->vm_end - pregion->vm_start;
1090			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1091			rpgend = pregion->vm_pgoff + rpglen;
1092			if (pgoff >= rpgend)
1093				continue;
1094
1095			/* handle inexactly overlapping matches between
1096			 * mappings */
1097			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1098			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1099				/* new mapping is not a subset of the region */
1100				if (!(capabilities & NOMMU_MAP_DIRECT))
1101					goto sharing_violation;
1102				continue;
1103			}
1104
1105			/* we've found a region we can share */
1106			pregion->vm_usage++;
1107			vma->vm_region = pregion;
1108			start = pregion->vm_start;
1109			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1110			vma->vm_start = start;
1111			vma->vm_end = start + len;
1112
1113			if (pregion->vm_flags & VM_MAPPED_COPY)
1114				vm_flags_set(vma, VM_MAPPED_COPY);
1115			else {
1116				ret = do_mmap_shared_file(vma);
1117				if (ret < 0) {
1118					vma->vm_region = NULL;
1119					vma->vm_start = 0;
1120					vma->vm_end = 0;
1121					pregion->vm_usage--;
1122					pregion = NULL;
1123					goto error_just_free;
1124				}
1125			}
1126			fput(region->vm_file);
1127			kmem_cache_free(vm_region_jar, region);
1128			region = pregion;
1129			result = start;
1130			goto share;
1131		}
1132
1133		/* obtain the address at which to make a shared mapping
1134		 * - this is the hook for quasi-memory character devices to
1135		 *   tell us the location of a shared mapping
1136		 */
1137		if (capabilities & NOMMU_MAP_DIRECT) {
1138			addr = file->f_op->get_unmapped_area(file, addr, len,
1139							     pgoff, flags);
1140			if (IS_ERR_VALUE(addr)) {
1141				ret = addr;
1142				if (ret != -ENOSYS)
1143					goto error_just_free;
1144
1145				/* the driver refused to tell us where to site
1146				 * the mapping so we'll have to attempt to copy
1147				 * it */
1148				ret = -ENODEV;
1149				if (!(capabilities & NOMMU_MAP_COPY))
1150					goto error_just_free;
1151
1152				capabilities &= ~NOMMU_MAP_DIRECT;
1153			} else {
1154				vma->vm_start = region->vm_start = addr;
1155				vma->vm_end = region->vm_end = addr + len;
1156			}
1157		}
1158	}
1159
1160	vma->vm_region = region;
1161
1162	/* set up the mapping
1163	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1164	 */
1165	if (file && vma->vm_flags & VM_SHARED)
1166		ret = do_mmap_shared_file(vma);
1167	else
1168		ret = do_mmap_private(vma, region, len, capabilities);
1169	if (ret < 0)
1170		goto error_just_free;
1171	add_nommu_region(region);
1172
1173	/* clear anonymous mappings that don't ask for uninitialized data */
1174	if (!vma->vm_file &&
1175	    (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1176	     !(flags & MAP_UNINITIALIZED)))
1177		memset((void *)region->vm_start, 0,
1178		       region->vm_end - region->vm_start);
1179
1180	/* okay... we have a mapping; now we have to register it */
1181	result = vma->vm_start;
1182
1183	current->mm->total_vm += len >> PAGE_SHIFT;
1184
1185share:
1186	BUG_ON(!vma->vm_region);
1187	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
1188	if (vma_iter_prealloc(&vmi, vma))
1189		goto error_just_free;
1190
1191	setup_vma_to_mm(vma, current->mm);
1192	current->mm->map_count++;
1193	/* add the VMA to the tree */
1194	vma_iter_store(&vmi, vma);
1195
1196	/* we flush the region from the icache only when the first executable
1197	 * mapping of it is made  */
1198	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1199		flush_icache_user_range(region->vm_start, region->vm_end);
1200		region->vm_icache_flushed = true;
1201	}
1202
1203	up_write(&nommu_region_sem);
1204
1205	return result;
1206
1207error_just_free:
1208	up_write(&nommu_region_sem);
1209error:
1210	vma_iter_free(&vmi);
1211	if (region->vm_file)
1212		fput(region->vm_file);
1213	kmem_cache_free(vm_region_jar, region);
1214	if (vma->vm_file)
1215		fput(vma->vm_file);
1216	vm_area_free(vma);
1217	return ret;
1218
1219sharing_violation:
1220	up_write(&nommu_region_sem);
1221	pr_warn("Attempt to share mismatched mappings\n");
1222	ret = -EINVAL;
1223	goto error;
1224
1225error_getting_vma:
1226	kmem_cache_free(vm_region_jar, region);
1227	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1228			len, current->pid);
1229	show_mem();
1230	return -ENOMEM;
1231
1232error_getting_region:
1233	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1234			len, current->pid);
1235	show_mem();
1236	return -ENOMEM;
1237}
1238
1239unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1240			      unsigned long prot, unsigned long flags,
1241			      unsigned long fd, unsigned long pgoff)
1242{
1243	struct file *file = NULL;
1244	unsigned long retval = -EBADF;
1245
1246	audit_mmap_fd(fd, flags);
1247	if (!(flags & MAP_ANONYMOUS)) {
1248		file = fget(fd);
1249		if (!file)
1250			goto out;
1251	}
1252
 
 
1253	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1254
1255	if (file)
1256		fput(file);
1257out:
1258	return retval;
1259}
1260
1261SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1262		unsigned long, prot, unsigned long, flags,
1263		unsigned long, fd, unsigned long, pgoff)
1264{
1265	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1266}
1267
1268#ifdef __ARCH_WANT_SYS_OLD_MMAP
1269struct mmap_arg_struct {
1270	unsigned long addr;
1271	unsigned long len;
1272	unsigned long prot;
1273	unsigned long flags;
1274	unsigned long fd;
1275	unsigned long offset;
1276};
1277
1278SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1279{
1280	struct mmap_arg_struct a;
1281
1282	if (copy_from_user(&a, arg, sizeof(a)))
1283		return -EFAULT;
1284	if (offset_in_page(a.offset))
1285		return -EINVAL;
1286
1287	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1288			       a.offset >> PAGE_SHIFT);
1289}
1290#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1291
1292/*
1293 * split a vma into two pieces at address 'addr', a new vma is allocated either
1294 * for the first part or the tail.
1295 */
1296static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1297		     unsigned long addr, int new_below)
1298{
1299	struct vm_area_struct *new;
1300	struct vm_region *region;
1301	unsigned long npages;
1302	struct mm_struct *mm;
1303
1304	/* we're only permitted to split anonymous regions (these should have
1305	 * only a single usage on the region) */
1306	if (vma->vm_file)
1307		return -ENOMEM;
1308
1309	mm = vma->vm_mm;
1310	if (mm->map_count >= sysctl_max_map_count)
1311		return -ENOMEM;
1312
1313	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1314	if (!region)
1315		return -ENOMEM;
1316
1317	new = vm_area_dup(vma);
1318	if (!new)
1319		goto err_vma_dup;
 
 
1320
1321	/* most fields are the same, copy all, and then fixup */
1322	*region = *vma->vm_region;
1323	new->vm_region = region;
1324
1325	npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1326
1327	if (new_below) {
1328		region->vm_top = region->vm_end = new->vm_end = addr;
1329	} else {
1330		region->vm_start = new->vm_start = addr;
1331		region->vm_pgoff = new->vm_pgoff += npages;
1332	}
1333
1334	vma_iter_config(vmi, new->vm_start, new->vm_end);
1335	if (vma_iter_prealloc(vmi, vma)) {
1336		pr_warn("Allocation of vma tree for process %d failed\n",
1337			current->pid);
1338		goto err_vmi_preallocate;
1339	}
1340
1341	if (new->vm_ops && new->vm_ops->open)
1342		new->vm_ops->open(new);
1343
 
1344	down_write(&nommu_region_sem);
1345	delete_nommu_region(vma->vm_region);
1346	if (new_below) {
1347		vma->vm_region->vm_start = vma->vm_start = addr;
1348		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1349	} else {
1350		vma->vm_region->vm_end = vma->vm_end = addr;
1351		vma->vm_region->vm_top = addr;
1352	}
1353	add_nommu_region(vma->vm_region);
1354	add_nommu_region(new->vm_region);
1355	up_write(&nommu_region_sem);
1356
1357	setup_vma_to_mm(vma, mm);
1358	setup_vma_to_mm(new, mm);
1359	vma_iter_store(vmi, new);
1360	mm->map_count++;
1361	return 0;
1362
1363err_vmi_preallocate:
1364	vm_area_free(new);
1365err_vma_dup:
1366	kmem_cache_free(vm_region_jar, region);
1367	return -ENOMEM;
1368}
1369
1370/*
1371 * shrink a VMA by removing the specified chunk from either the beginning or
1372 * the end
1373 */
1374static int vmi_shrink_vma(struct vma_iterator *vmi,
1375		      struct vm_area_struct *vma,
1376		      unsigned long from, unsigned long to)
1377{
1378	struct vm_region *region;
1379
1380	/* adjust the VMA's pointers, which may reposition it in the MM's tree
1381	 * and list */
1382	if (from > vma->vm_start) {
1383		if (vma_iter_clear_gfp(vmi, from, vma->vm_end, GFP_KERNEL))
1384			return -ENOMEM;
1385		vma->vm_end = from;
1386	} else {
1387		if (vma_iter_clear_gfp(vmi, vma->vm_start, to, GFP_KERNEL))
1388			return -ENOMEM;
1389		vma->vm_start = to;
1390	}
1391
1392	/* cut the backing region down to size */
1393	region = vma->vm_region;
1394	BUG_ON(region->vm_usage != 1);
1395
1396	down_write(&nommu_region_sem);
1397	delete_nommu_region(region);
1398	if (from > region->vm_start) {
1399		to = region->vm_top;
1400		region->vm_top = region->vm_end = from;
1401	} else {
1402		region->vm_start = to;
1403	}
1404	add_nommu_region(region);
1405	up_write(&nommu_region_sem);
1406
1407	free_page_series(from, to);
1408	return 0;
1409}
1410
1411/*
1412 * release a mapping
1413 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1414 *   VMA, though it need not cover the whole VMA
1415 */
1416int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1417{
1418	VMA_ITERATOR(vmi, mm, start);
1419	struct vm_area_struct *vma;
1420	unsigned long end;
1421	int ret = 0;
1422
1423	len = PAGE_ALIGN(len);
1424	if (len == 0)
1425		return -EINVAL;
1426
1427	end = start + len;
1428
1429	/* find the first potentially overlapping VMA */
1430	vma = vma_find(&vmi, end);
1431	if (!vma) {
1432		static int limit;
1433		if (limit < 5) {
1434			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1435					current->pid, current->comm,
1436					start, start + len - 1);
1437			limit++;
1438		}
1439		return -EINVAL;
1440	}
1441
1442	/* we're allowed to split an anonymous VMA but not a file-backed one */
1443	if (vma->vm_file) {
1444		do {
1445			if (start > vma->vm_start)
1446				return -EINVAL;
1447			if (end == vma->vm_end)
1448				goto erase_whole_vma;
1449			vma = vma_find(&vmi, end);
1450		} while (vma);
1451		return -EINVAL;
1452	} else {
1453		/* the chunk must be a subset of the VMA found */
1454		if (start == vma->vm_start && end == vma->vm_end)
1455			goto erase_whole_vma;
1456		if (start < vma->vm_start || end > vma->vm_end)
1457			return -EINVAL;
1458		if (offset_in_page(start))
1459			return -EINVAL;
1460		if (end != vma->vm_end && offset_in_page(end))
1461			return -EINVAL;
1462		if (start != vma->vm_start && end != vma->vm_end) {
1463			ret = split_vma(&vmi, vma, start, 1);
1464			if (ret < 0)
1465				return ret;
1466		}
1467		return vmi_shrink_vma(&vmi, vma, start, end);
1468	}
1469
1470erase_whole_vma:
1471	if (delete_vma_from_mm(vma))
1472		ret = -ENOMEM;
1473	else
1474		delete_vma(mm, vma);
1475	return ret;
1476}
 
1477
1478int vm_munmap(unsigned long addr, size_t len)
1479{
1480	struct mm_struct *mm = current->mm;
1481	int ret;
1482
1483	mmap_write_lock(mm);
1484	ret = do_munmap(mm, addr, len, NULL);
1485	mmap_write_unlock(mm);
1486	return ret;
1487}
1488EXPORT_SYMBOL(vm_munmap);
1489
1490SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1491{
1492	return vm_munmap(addr, len);
1493}
1494
1495/*
1496 * release all the mappings made in a process's VM space
1497 */
1498void exit_mmap(struct mm_struct *mm)
1499{
1500	VMA_ITERATOR(vmi, mm, 0);
1501	struct vm_area_struct *vma;
1502
1503	if (!mm)
1504		return;
1505
1506	mm->total_vm = 0;
1507
1508	/*
1509	 * Lock the mm to avoid assert complaining even though this is the only
1510	 * user of the mm
1511	 */
1512	mmap_write_lock(mm);
1513	for_each_vma(vmi, vma) {
1514		cleanup_vma_from_mm(vma);
1515		delete_vma(mm, vma);
1516		cond_resched();
1517	}
1518	__mt_destroy(&mm->mm_mt);
1519	mmap_write_unlock(mm);
 
 
 
1520}
1521
1522/*
1523 * expand (or shrink) an existing mapping, potentially moving it at the same
1524 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1525 *
1526 * under NOMMU conditions, we only permit changing a mapping's size, and only
1527 * as long as it stays within the region allocated by do_mmap_private() and the
1528 * block is not shareable
1529 *
1530 * MREMAP_FIXED is not supported under NOMMU conditions
1531 */
1532static unsigned long do_mremap(unsigned long addr,
1533			unsigned long old_len, unsigned long new_len,
1534			unsigned long flags, unsigned long new_addr)
1535{
1536	struct vm_area_struct *vma;
1537
1538	/* insanity checks first */
1539	old_len = PAGE_ALIGN(old_len);
1540	new_len = PAGE_ALIGN(new_len);
1541	if (old_len == 0 || new_len == 0)
1542		return (unsigned long) -EINVAL;
1543
1544	if (offset_in_page(addr))
1545		return -EINVAL;
1546
1547	if (flags & MREMAP_FIXED && new_addr != addr)
1548		return (unsigned long) -EINVAL;
1549
1550	vma = find_vma_exact(current->mm, addr, old_len);
1551	if (!vma)
1552		return (unsigned long) -EINVAL;
1553
1554	if (vma->vm_end != vma->vm_start + old_len)
1555		return (unsigned long) -EFAULT;
1556
1557	if (is_nommu_shared_mapping(vma->vm_flags))
1558		return (unsigned long) -EPERM;
1559
1560	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1561		return (unsigned long) -ENOMEM;
1562
1563	/* all checks complete - do it */
1564	vma->vm_end = vma->vm_start + new_len;
1565	return vma->vm_start;
1566}
1567
1568SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1569		unsigned long, new_len, unsigned long, flags,
1570		unsigned long, new_addr)
1571{
1572	unsigned long ret;
1573
1574	mmap_write_lock(current->mm);
1575	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1576	mmap_write_unlock(current->mm);
1577	return ret;
1578}
1579
 
 
 
 
 
 
1580int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1581		unsigned long pfn, unsigned long size, pgprot_t prot)
1582{
1583	if (addr != (pfn << PAGE_SHIFT))
1584		return -EINVAL;
1585
1586	vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1587	return 0;
1588}
1589EXPORT_SYMBOL(remap_pfn_range);
1590
1591int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1592{
1593	unsigned long pfn = start >> PAGE_SHIFT;
1594	unsigned long vm_len = vma->vm_end - vma->vm_start;
1595
1596	pfn += vma->vm_pgoff;
1597	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1598}
1599EXPORT_SYMBOL(vm_iomap_memory);
1600
1601int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1602			unsigned long pgoff)
1603{
1604	unsigned int size = vma->vm_end - vma->vm_start;
1605
1606	if (!(vma->vm_flags & VM_USERMAP))
1607		return -EINVAL;
1608
1609	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1610	vma->vm_end = vma->vm_start + size;
1611
1612	return 0;
1613}
1614EXPORT_SYMBOL(remap_vmalloc_range);
1615
 
 
 
 
 
 
1616vm_fault_t filemap_fault(struct vm_fault *vmf)
1617{
1618	BUG();
1619	return 0;
1620}
1621EXPORT_SYMBOL(filemap_fault);
1622
1623vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1624		pgoff_t start_pgoff, pgoff_t end_pgoff)
1625{
1626	BUG();
1627	return 0;
1628}
1629EXPORT_SYMBOL(filemap_map_pages);
1630
1631static int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
1632			      void *buf, int len, unsigned int gup_flags)
1633{
1634	struct vm_area_struct *vma;
1635	int write = gup_flags & FOLL_WRITE;
1636
1637	if (mmap_read_lock_killable(mm))
1638		return 0;
1639
1640	/* the access must start within one of the target process's mappings */
1641	vma = find_vma(mm, addr);
1642	if (vma) {
1643		/* don't overrun this mapping */
1644		if (addr + len >= vma->vm_end)
1645			len = vma->vm_end - addr;
1646
1647		/* only read or write mappings where it is permitted */
1648		if (write && vma->vm_flags & VM_MAYWRITE)
1649			copy_to_user_page(vma, NULL, addr,
1650					 (void *) addr, buf, len);
1651		else if (!write && vma->vm_flags & VM_MAYREAD)
1652			copy_from_user_page(vma, NULL, addr,
1653					    buf, (void *) addr, len);
1654		else
1655			len = 0;
1656	} else {
1657		len = 0;
1658	}
1659
1660	mmap_read_unlock(mm);
1661
1662	return len;
1663}
1664
1665/**
1666 * access_remote_vm - access another process' address space
1667 * @mm:		the mm_struct of the target address space
1668 * @addr:	start address to access
1669 * @buf:	source or destination buffer
1670 * @len:	number of bytes to transfer
1671 * @gup_flags:	flags modifying lookup behaviour
1672 *
1673 * The caller must hold a reference on @mm.
1674 */
1675int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1676		void *buf, int len, unsigned int gup_flags)
1677{
1678	return __access_remote_vm(mm, addr, buf, len, gup_flags);
1679}
1680
1681/*
1682 * Access another process' address space.
1683 * - source/target buffer must be kernel space
1684 */
1685int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1686		unsigned int gup_flags)
1687{
1688	struct mm_struct *mm;
1689
1690	if (addr + len < addr)
1691		return 0;
1692
1693	mm = get_task_mm(tsk);
1694	if (!mm)
1695		return 0;
1696
1697	len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1698
1699	mmput(mm);
1700	return len;
1701}
1702EXPORT_SYMBOL_GPL(access_process_vm);
1703
1704/**
1705 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1706 * @inode: The inode to check
1707 * @size: The current filesize of the inode
1708 * @newsize: The proposed filesize of the inode
1709 *
1710 * Check the shared mappings on an inode on behalf of a shrinking truncate to
1711 * make sure that any outstanding VMAs aren't broken and then shrink the
1712 * vm_regions that extend beyond so that do_mmap() doesn't
1713 * automatically grant mappings that are too large.
1714 */
1715int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1716				size_t newsize)
1717{
1718	struct vm_area_struct *vma;
1719	struct vm_region *region;
1720	pgoff_t low, high;
1721	size_t r_size, r_top;
1722
1723	low = newsize >> PAGE_SHIFT;
1724	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1725
1726	down_write(&nommu_region_sem);
1727	i_mmap_lock_read(inode->i_mapping);
1728
1729	/* search for VMAs that fall within the dead zone */
1730	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1731		/* found one - only interested if it's shared out of the page
1732		 * cache */
1733		if (vma->vm_flags & VM_SHARED) {
1734			i_mmap_unlock_read(inode->i_mapping);
1735			up_write(&nommu_region_sem);
1736			return -ETXTBSY; /* not quite true, but near enough */
1737		}
1738	}
1739
1740	/* reduce any regions that overlap the dead zone - if in existence,
1741	 * these will be pointed to by VMAs that don't overlap the dead zone
1742	 *
1743	 * we don't check for any regions that start beyond the EOF as there
1744	 * shouldn't be any
1745	 */
1746	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1747		if (!(vma->vm_flags & VM_SHARED))
1748			continue;
1749
1750		region = vma->vm_region;
1751		r_size = region->vm_top - region->vm_start;
1752		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1753
1754		if (r_top > newsize) {
1755			region->vm_top -= r_top - newsize;
1756			if (region->vm_end > region->vm_top)
1757				region->vm_end = region->vm_top;
1758		}
1759	}
1760
1761	i_mmap_unlock_read(inode->i_mapping);
1762	up_write(&nommu_region_sem);
1763	return 0;
1764}
1765
1766/*
1767 * Initialise sysctl_user_reserve_kbytes.
1768 *
1769 * This is intended to prevent a user from starting a single memory hogging
1770 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1771 * mode.
1772 *
1773 * The default value is min(3% of free memory, 128MB)
1774 * 128MB is enough to recover with sshd/login, bash, and top/kill.
1775 */
1776static int __meminit init_user_reserve(void)
1777{
1778	unsigned long free_kbytes;
1779
1780	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1781
1782	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1783	return 0;
1784}
1785subsys_initcall(init_user_reserve);
1786
1787/*
1788 * Initialise sysctl_admin_reserve_kbytes.
1789 *
1790 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1791 * to log in and kill a memory hogging process.
1792 *
1793 * Systems with more than 256MB will reserve 8MB, enough to recover
1794 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1795 * only reserve 3% of free pages by default.
1796 */
1797static int __meminit init_admin_reserve(void)
1798{
1799	unsigned long free_kbytes;
1800
1801	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
1802
1803	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1804	return 0;
1805}
1806subsys_initcall(init_admin_reserve);