Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30#include "swap.h"
  31
  32/**
  33 * kfree_const - conditionally free memory
  34 * @x: pointer to the memory
  35 *
  36 * Function calls kfree only if @x is not in .rodata section.
  37 */
  38void kfree_const(const void *x)
  39{
  40	if (!is_kernel_rodata((unsigned long)x))
  41		kfree(x);
  42}
  43EXPORT_SYMBOL(kfree_const);
  44
  45/**
  46 * kstrdup - allocate space for and copy an existing string
  47 * @s: the string to duplicate
  48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  49 *
  50 * Return: newly allocated copy of @s or %NULL in case of error
  51 */
  52char *kstrdup(const char *s, gfp_t gfp)
  53{
  54	size_t len;
  55	char *buf;
  56
  57	if (!s)
  58		return NULL;
  59
  60	len = strlen(s) + 1;
  61	buf = kmalloc_track_caller(len, gfp);
  62	if (buf)
  63		memcpy(buf, s, len);
  64	return buf;
  65}
  66EXPORT_SYMBOL(kstrdup);
  67
  68/**
  69 * kstrdup_const - conditionally duplicate an existing const string
  70 * @s: the string to duplicate
  71 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  72 *
  73 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
  74 * must not be passed to krealloc().
  75 *
  76 * Return: source string if it is in .rodata section otherwise
  77 * fallback to kstrdup.
  78 */
  79const char *kstrdup_const(const char *s, gfp_t gfp)
  80{
  81	if (is_kernel_rodata((unsigned long)s))
  82		return s;
  83
  84	return kstrdup(s, gfp);
  85}
  86EXPORT_SYMBOL(kstrdup_const);
  87
  88/**
  89 * kstrndup - allocate space for and copy an existing string
  90 * @s: the string to duplicate
  91 * @max: read at most @max chars from @s
  92 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  93 *
  94 * Note: Use kmemdup_nul() instead if the size is known exactly.
  95 *
  96 * Return: newly allocated copy of @s or %NULL in case of error
  97 */
  98char *kstrndup(const char *s, size_t max, gfp_t gfp)
  99{
 100	size_t len;
 101	char *buf;
 102
 103	if (!s)
 104		return NULL;
 105
 106	len = strnlen(s, max);
 107	buf = kmalloc_track_caller(len+1, gfp);
 108	if (buf) {
 109		memcpy(buf, s, len);
 110		buf[len] = '\0';
 111	}
 112	return buf;
 113}
 114EXPORT_SYMBOL(kstrndup);
 115
 116/**
 117 * kmemdup - duplicate region of memory
 118 *
 119 * @src: memory region to duplicate
 120 * @len: memory region length
 121 * @gfp: GFP mask to use
 122 *
 123 * Return: newly allocated copy of @src or %NULL in case of error
 124 */
 125void *kmemdup(const void *src, size_t len, gfp_t gfp)
 126{
 127	void *p;
 128
 129	p = kmalloc_track_caller(len, gfp);
 130	if (p)
 131		memcpy(p, src, len);
 132	return p;
 133}
 134EXPORT_SYMBOL(kmemdup);
 135
 136/**
 137 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 138 * @s: The data to stringify
 139 * @len: The size of the data
 140 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 141 *
 142 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 143 * case of error
 144 */
 145char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 146{
 147	char *buf;
 148
 149	if (!s)
 150		return NULL;
 151
 152	buf = kmalloc_track_caller(len + 1, gfp);
 153	if (buf) {
 154		memcpy(buf, s, len);
 155		buf[len] = '\0';
 156	}
 157	return buf;
 158}
 159EXPORT_SYMBOL(kmemdup_nul);
 160
 161/**
 162 * memdup_user - duplicate memory region from user space
 163 *
 164 * @src: source address in user space
 165 * @len: number of bytes to copy
 166 *
 167 * Return: an ERR_PTR() on failure.  Result is physically
 168 * contiguous, to be freed by kfree().
 169 */
 170void *memdup_user(const void __user *src, size_t len)
 171{
 172	void *p;
 173
 174	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 175	if (!p)
 176		return ERR_PTR(-ENOMEM);
 177
 178	if (copy_from_user(p, src, len)) {
 179		kfree(p);
 180		return ERR_PTR(-EFAULT);
 181	}
 182
 183	return p;
 184}
 185EXPORT_SYMBOL(memdup_user);
 186
 187/**
 188 * vmemdup_user - duplicate memory region from user space
 189 *
 190 * @src: source address in user space
 191 * @len: number of bytes to copy
 192 *
 193 * Return: an ERR_PTR() on failure.  Result may be not
 194 * physically contiguous.  Use kvfree() to free.
 195 */
 196void *vmemdup_user(const void __user *src, size_t len)
 197{
 198	void *p;
 199
 200	p = kvmalloc(len, GFP_USER);
 201	if (!p)
 202		return ERR_PTR(-ENOMEM);
 203
 204	if (copy_from_user(p, src, len)) {
 205		kvfree(p);
 206		return ERR_PTR(-EFAULT);
 207	}
 208
 209	return p;
 210}
 211EXPORT_SYMBOL(vmemdup_user);
 212
 213/**
 214 * strndup_user - duplicate an existing string from user space
 215 * @s: The string to duplicate
 216 * @n: Maximum number of bytes to copy, including the trailing NUL.
 217 *
 218 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 219 */
 220char *strndup_user(const char __user *s, long n)
 221{
 222	char *p;
 223	long length;
 224
 225	length = strnlen_user(s, n);
 226
 227	if (!length)
 228		return ERR_PTR(-EFAULT);
 229
 230	if (length > n)
 231		return ERR_PTR(-EINVAL);
 232
 233	p = memdup_user(s, length);
 234
 235	if (IS_ERR(p))
 236		return p;
 237
 238	p[length - 1] = '\0';
 239
 240	return p;
 241}
 242EXPORT_SYMBOL(strndup_user);
 243
 244/**
 245 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 246 *
 247 * @src: source address in user space
 248 * @len: number of bytes to copy
 249 *
 250 * Return: an ERR_PTR() on failure.
 251 */
 252void *memdup_user_nul(const void __user *src, size_t len)
 253{
 254	char *p;
 255
 256	/*
 257	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
 258	 * cause pagefault, which makes it pointless to use GFP_NOFS
 259	 * or GFP_ATOMIC.
 260	 */
 261	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 262	if (!p)
 263		return ERR_PTR(-ENOMEM);
 264
 265	if (copy_from_user(p, src, len)) {
 266		kfree(p);
 267		return ERR_PTR(-EFAULT);
 268	}
 269	p[len] = '\0';
 270
 271	return p;
 272}
 273EXPORT_SYMBOL(memdup_user_nul);
 274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 275/* Check if the vma is being used as a stack by this task */
 276int vma_is_stack_for_current(struct vm_area_struct *vma)
 277{
 278	struct task_struct * __maybe_unused t = current;
 279
 280	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 281}
 282
 283/*
 284 * Change backing file, only valid to use during initial VMA setup.
 285 */
 286void vma_set_file(struct vm_area_struct *vma, struct file *file)
 287{
 288	/* Changing an anonymous vma with this is illegal */
 289	get_file(file);
 290	swap(vma->vm_file, file);
 291	fput(file);
 292}
 293EXPORT_SYMBOL(vma_set_file);
 294
 295#ifndef STACK_RND_MASK
 296#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 297#endif
 298
 299unsigned long randomize_stack_top(unsigned long stack_top)
 300{
 301	unsigned long random_variable = 0;
 302
 303	if (current->flags & PF_RANDOMIZE) {
 304		random_variable = get_random_long();
 305		random_variable &= STACK_RND_MASK;
 306		random_variable <<= PAGE_SHIFT;
 307	}
 308#ifdef CONFIG_STACK_GROWSUP
 309	return PAGE_ALIGN(stack_top) + random_variable;
 310#else
 311	return PAGE_ALIGN(stack_top) - random_variable;
 312#endif
 313}
 314
 315/**
 316 * randomize_page - Generate a random, page aligned address
 317 * @start:	The smallest acceptable address the caller will take.
 318 * @range:	The size of the area, starting at @start, within which the
 319 *		random address must fall.
 320 *
 321 * If @start + @range would overflow, @range is capped.
 322 *
 323 * NOTE: Historical use of randomize_range, which this replaces, presumed that
 324 * @start was already page aligned.  We now align it regardless.
 325 *
 326 * Return: A page aligned address within [start, start + range).  On error,
 327 * @start is returned.
 328 */
 329unsigned long randomize_page(unsigned long start, unsigned long range)
 330{
 331	if (!PAGE_ALIGNED(start)) {
 332		range -= PAGE_ALIGN(start) - start;
 333		start = PAGE_ALIGN(start);
 334	}
 335
 336	if (start > ULONG_MAX - range)
 337		range = ULONG_MAX - start;
 338
 339	range >>= PAGE_SHIFT;
 340
 341	if (range == 0)
 342		return start;
 343
 344	return start + (get_random_long() % range << PAGE_SHIFT);
 345}
 346
 347#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 348unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
 349{
 350	/* Is the current task 32bit ? */
 351	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 352		return randomize_page(mm->brk, SZ_32M);
 353
 354	return randomize_page(mm->brk, SZ_1G);
 355}
 356
 357unsigned long arch_mmap_rnd(void)
 358{
 359	unsigned long rnd;
 360
 361#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 362	if (is_compat_task())
 363		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 364	else
 365#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 366		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 367
 368	return rnd << PAGE_SHIFT;
 369}
 370
 371static int mmap_is_legacy(struct rlimit *rlim_stack)
 372{
 373	if (current->personality & ADDR_COMPAT_LAYOUT)
 374		return 1;
 375
 376	if (rlim_stack->rlim_cur == RLIM_INFINITY)
 377		return 1;
 378
 379	return sysctl_legacy_va_layout;
 380}
 381
 382/*
 383 * Leave enough space between the mmap area and the stack to honour ulimit in
 384 * the face of randomisation.
 385 */
 386#define MIN_GAP		(SZ_128M)
 387#define MAX_GAP		(STACK_TOP / 6 * 5)
 388
 389static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 390{
 391	unsigned long gap = rlim_stack->rlim_cur;
 392	unsigned long pad = stack_guard_gap;
 393
 394	/* Account for stack randomization if necessary */
 395	if (current->flags & PF_RANDOMIZE)
 396		pad += (STACK_RND_MASK << PAGE_SHIFT);
 397
 398	/* Values close to RLIM_INFINITY can overflow. */
 399	if (gap + pad > gap)
 400		gap += pad;
 401
 402	if (gap < MIN_GAP)
 403		gap = MIN_GAP;
 404	else if (gap > MAX_GAP)
 405		gap = MAX_GAP;
 406
 407	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 408}
 409
 410void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 411{
 412	unsigned long random_factor = 0UL;
 413
 414	if (current->flags & PF_RANDOMIZE)
 415		random_factor = arch_mmap_rnd();
 416
 417	if (mmap_is_legacy(rlim_stack)) {
 418		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 419		mm->get_unmapped_area = arch_get_unmapped_area;
 420	} else {
 421		mm->mmap_base = mmap_base(random_factor, rlim_stack);
 422		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 423	}
 424}
 425#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 426void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 427{
 428	mm->mmap_base = TASK_UNMAPPED_BASE;
 429	mm->get_unmapped_area = arch_get_unmapped_area;
 430}
 431#endif
 432
 433/**
 434 * __account_locked_vm - account locked pages to an mm's locked_vm
 435 * @mm:          mm to account against
 436 * @pages:       number of pages to account
 437 * @inc:         %true if @pages should be considered positive, %false if not
 438 * @task:        task used to check RLIMIT_MEMLOCK
 439 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 440 *
 441 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 442 * that mmap_lock is held as writer.
 443 *
 444 * Return:
 445 * * 0       on success
 446 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 447 */
 448int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 449			struct task_struct *task, bool bypass_rlim)
 450{
 451	unsigned long locked_vm, limit;
 452	int ret = 0;
 453
 454	mmap_assert_write_locked(mm);
 455
 456	locked_vm = mm->locked_vm;
 457	if (inc) {
 458		if (!bypass_rlim) {
 459			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 460			if (locked_vm + pages > limit)
 461				ret = -ENOMEM;
 462		}
 463		if (!ret)
 464			mm->locked_vm = locked_vm + pages;
 465	} else {
 466		WARN_ON_ONCE(pages > locked_vm);
 467		mm->locked_vm = locked_vm - pages;
 468	}
 469
 470	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 471		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 472		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 473		 ret ? " - exceeded" : "");
 474
 475	return ret;
 476}
 477EXPORT_SYMBOL_GPL(__account_locked_vm);
 478
 479/**
 480 * account_locked_vm - account locked pages to an mm's locked_vm
 481 * @mm:          mm to account against, may be NULL
 482 * @pages:       number of pages to account
 483 * @inc:         %true if @pages should be considered positive, %false if not
 484 *
 485 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 486 *
 487 * Return:
 488 * * 0       on success, or if mm is NULL
 489 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 490 */
 491int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 492{
 493	int ret;
 494
 495	if (pages == 0 || !mm)
 496		return 0;
 497
 498	mmap_write_lock(mm);
 499	ret = __account_locked_vm(mm, pages, inc, current,
 500				  capable(CAP_IPC_LOCK));
 501	mmap_write_unlock(mm);
 502
 503	return ret;
 504}
 505EXPORT_SYMBOL_GPL(account_locked_vm);
 506
 507unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 508	unsigned long len, unsigned long prot,
 509	unsigned long flag, unsigned long pgoff)
 510{
 511	unsigned long ret;
 512	struct mm_struct *mm = current->mm;
 513	unsigned long populate;
 514	LIST_HEAD(uf);
 515
 516	ret = security_mmap_file(file, prot, flag);
 517	if (!ret) {
 518		if (mmap_write_lock_killable(mm))
 519			return -EINTR;
 520		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
 521			      &uf);
 522		mmap_write_unlock(mm);
 523		userfaultfd_unmap_complete(mm, &uf);
 524		if (populate)
 525			mm_populate(ret, populate);
 526	}
 527	return ret;
 528}
 529
 530unsigned long vm_mmap(struct file *file, unsigned long addr,
 531	unsigned long len, unsigned long prot,
 532	unsigned long flag, unsigned long offset)
 533{
 534	if (unlikely(offset + PAGE_ALIGN(len) < offset))
 535		return -EINVAL;
 536	if (unlikely(offset_in_page(offset)))
 537		return -EINVAL;
 538
 539	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 540}
 541EXPORT_SYMBOL(vm_mmap);
 542
 543/**
 544 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 545 * failure, fall back to non-contiguous (vmalloc) allocation.
 546 * @size: size of the request.
 547 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 548 * @node: numa node to allocate from
 549 *
 550 * Uses kmalloc to get the memory but if the allocation fails then falls back
 551 * to the vmalloc allocator. Use kvfree for freeing the memory.
 552 *
 553 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
 554 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 555 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 556 *
 
 
 
 557 * Return: pointer to the allocated memory of %NULL in case of failure
 558 */
 559void *kvmalloc_node(size_t size, gfp_t flags, int node)
 560{
 561	gfp_t kmalloc_flags = flags;
 562	void *ret;
 563
 564	/*
 
 
 
 
 
 
 
 565	 * We want to attempt a large physically contiguous block first because
 566	 * it is less likely to fragment multiple larger blocks and therefore
 567	 * contribute to a long term fragmentation less than vmalloc fallback.
 568	 * However make sure that larger requests are not too disruptive - no
 569	 * OOM killer and no allocation failure warnings as we have a fallback.
 570	 */
 571	if (size > PAGE_SIZE) {
 572		kmalloc_flags |= __GFP_NOWARN;
 573
 574		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 575			kmalloc_flags |= __GFP_NORETRY;
 576
 577		/* nofail semantic is implemented by the vmalloc fallback */
 578		kmalloc_flags &= ~__GFP_NOFAIL;
 579	}
 580
 581	ret = kmalloc_node(size, kmalloc_flags, node);
 582
 583	/*
 584	 * It doesn't really make sense to fallback to vmalloc for sub page
 585	 * requests
 586	 */
 587	if (ret || size <= PAGE_SIZE)
 588		return ret;
 589
 590	/* non-sleeping allocations are not supported by vmalloc */
 591	if (!gfpflags_allow_blocking(flags))
 592		return NULL;
 593
 594	/* Don't even allow crazy sizes */
 595	if (unlikely(size > INT_MAX)) {
 596		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 597		return NULL;
 598	}
 599
 600	/*
 601	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
 602	 * since the callers already cannot assume anything
 603	 * about the resulting pointer, and cannot play
 604	 * protection games.
 605	 */
 606	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
 607			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 608			node, __builtin_return_address(0));
 609}
 610EXPORT_SYMBOL(kvmalloc_node);
 611
 612/**
 613 * kvfree() - Free memory.
 614 * @addr: Pointer to allocated memory.
 615 *
 616 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 617 * It is slightly more efficient to use kfree() or vfree() if you are certain
 618 * that you know which one to use.
 619 *
 620 * Context: Either preemptible task context or not-NMI interrupt.
 621 */
 622void kvfree(const void *addr)
 623{
 624	if (is_vmalloc_addr(addr))
 625		vfree(addr);
 626	else
 627		kfree(addr);
 628}
 629EXPORT_SYMBOL(kvfree);
 630
 631/**
 632 * kvfree_sensitive - Free a data object containing sensitive information.
 633 * @addr: address of the data object to be freed.
 634 * @len: length of the data object.
 635 *
 636 * Use the special memzero_explicit() function to clear the content of a
 637 * kvmalloc'ed object containing sensitive data to make sure that the
 638 * compiler won't optimize out the data clearing.
 639 */
 640void kvfree_sensitive(const void *addr, size_t len)
 641{
 642	if (likely(!ZERO_OR_NULL_PTR(addr))) {
 643		memzero_explicit((void *)addr, len);
 644		kvfree(addr);
 645	}
 646}
 647EXPORT_SYMBOL(kvfree_sensitive);
 648
 649void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 650{
 651	void *newp;
 652
 653	if (oldsize >= newsize)
 654		return (void *)p;
 655	newp = kvmalloc(newsize, flags);
 656	if (!newp)
 657		return NULL;
 658	memcpy(newp, p, oldsize);
 659	kvfree(p);
 660	return newp;
 661}
 662EXPORT_SYMBOL(kvrealloc);
 663
 664/**
 665 * __vmalloc_array - allocate memory for a virtually contiguous array.
 666 * @n: number of elements.
 667 * @size: element size.
 668 * @flags: the type of memory to allocate (see kmalloc).
 669 */
 670void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
 671{
 672	size_t bytes;
 673
 674	if (unlikely(check_mul_overflow(n, size, &bytes)))
 675		return NULL;
 676	return __vmalloc(bytes, flags);
 677}
 678EXPORT_SYMBOL(__vmalloc_array);
 679
 680/**
 681 * vmalloc_array - allocate memory for a virtually contiguous array.
 682 * @n: number of elements.
 683 * @size: element size.
 684 */
 685void *vmalloc_array(size_t n, size_t size)
 686{
 687	return __vmalloc_array(n, size, GFP_KERNEL);
 688}
 689EXPORT_SYMBOL(vmalloc_array);
 690
 691/**
 692 * __vcalloc - allocate and zero memory for a virtually contiguous array.
 693 * @n: number of elements.
 694 * @size: element size.
 695 * @flags: the type of memory to allocate (see kmalloc).
 696 */
 697void *__vcalloc(size_t n, size_t size, gfp_t flags)
 698{
 699	return __vmalloc_array(n, size, flags | __GFP_ZERO);
 
 700}
 701EXPORT_SYMBOL(__vcalloc);
 702
 703/**
 704 * vcalloc - allocate and zero memory for a virtually contiguous array.
 705 * @n: number of elements.
 706 * @size: element size.
 707 */
 708void *vcalloc(size_t n, size_t size)
 709{
 710	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
 711}
 712EXPORT_SYMBOL(vcalloc);
 713
 714/* Neutral page->mapping pointer to address_space or anon_vma or other */
 715void *page_rmapping(struct page *page)
 716{
 717	return folio_raw_mapping(page_folio(page));
 
 
 
 
 
 
 
 
 718}
 
 719
 720struct anon_vma *folio_anon_vma(struct folio *folio)
 721{
 722	unsigned long mapping = (unsigned long)folio->mapping;
 723
 
 
 724	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 725		return NULL;
 726	return (void *)(mapping - PAGE_MAPPING_ANON);
 727}
 728
 729/**
 730 * folio_mapping - Find the mapping where this folio is stored.
 731 * @folio: The folio.
 732 *
 733 * For folios which are in the page cache, return the mapping that this
 734 * page belongs to.  Folios in the swap cache return the swap mapping
 735 * this page is stored in (which is different from the mapping for the
 736 * swap file or swap device where the data is stored).
 737 *
 738 * You can call this for folios which aren't in the swap cache or page
 739 * cache and it will return NULL.
 740 */
 741struct address_space *folio_mapping(struct folio *folio)
 742{
 743	struct address_space *mapping;
 744
 
 
 745	/* This happens if someone calls flush_dcache_page on slab page */
 746	if (unlikely(folio_test_slab(folio)))
 747		return NULL;
 748
 749	if (unlikely(folio_test_swapcache(folio)))
 750		return swap_address_space(folio_swap_entry(folio));
 
 
 
 
 751
 752	mapping = folio->mapping;
 753	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
 754		return NULL;
 755
 756	return mapping;
 757}
 758EXPORT_SYMBOL(folio_mapping);
 759
 760/**
 761 * folio_copy - Copy the contents of one folio to another.
 762 * @dst: Folio to copy to.
 763 * @src: Folio to copy from.
 764 *
 765 * The bytes in the folio represented by @src are copied to @dst.
 766 * Assumes the caller has validated that @dst is at least as large as @src.
 767 * Can be called in atomic context for order-0 folios, but if the folio is
 768 * larger, it may sleep.
 769 */
 770void folio_copy(struct folio *dst, struct folio *src)
 771{
 772	long i = 0;
 773	long nr = folio_nr_pages(src);
 774
 775	for (;;) {
 776		copy_highpage(folio_page(dst, i), folio_page(src, i));
 777		if (++i == nr)
 778			break;
 779		cond_resched();
 780	}
 781}
 782
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 784int sysctl_overcommit_ratio __read_mostly = 50;
 785unsigned long sysctl_overcommit_kbytes __read_mostly;
 786int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 787unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 788unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 789
 790int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
 791		size_t *lenp, loff_t *ppos)
 792{
 793	int ret;
 794
 795	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 796	if (ret == 0 && write)
 797		sysctl_overcommit_kbytes = 0;
 798	return ret;
 799}
 800
 801static void sync_overcommit_as(struct work_struct *dummy)
 802{
 803	percpu_counter_sync(&vm_committed_as);
 804}
 805
 806int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
 807		size_t *lenp, loff_t *ppos)
 808{
 809	struct ctl_table t;
 810	int new_policy = -1;
 811	int ret;
 812
 813	/*
 814	 * The deviation of sync_overcommit_as could be big with loose policy
 815	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
 816	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
 817	 * with the strict "NEVER", and to avoid possible race condition (even
 818	 * though user usually won't too frequently do the switching to policy
 819	 * OVERCOMMIT_NEVER), the switch is done in the following order:
 820	 *	1. changing the batch
 821	 *	2. sync percpu count on each CPU
 822	 *	3. switch the policy
 823	 */
 824	if (write) {
 825		t = *table;
 826		t.data = &new_policy;
 827		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 828		if (ret || new_policy == -1)
 829			return ret;
 830
 831		mm_compute_batch(new_policy);
 832		if (new_policy == OVERCOMMIT_NEVER)
 833			schedule_on_each_cpu(sync_overcommit_as);
 834		sysctl_overcommit_memory = new_policy;
 835	} else {
 836		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 837	}
 838
 839	return ret;
 840}
 841
 842int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
 843		size_t *lenp, loff_t *ppos)
 844{
 845	int ret;
 846
 847	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 848	if (ret == 0 && write)
 849		sysctl_overcommit_ratio = 0;
 850	return ret;
 851}
 852
 853/*
 854 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 855 */
 856unsigned long vm_commit_limit(void)
 857{
 858	unsigned long allowed;
 859
 860	if (sysctl_overcommit_kbytes)
 861		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 862	else
 863		allowed = ((totalram_pages() - hugetlb_total_pages())
 864			   * sysctl_overcommit_ratio / 100);
 865	allowed += total_swap_pages;
 866
 867	return allowed;
 868}
 869
 870/*
 871 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 872 * other variables. It can be updated by several CPUs frequently.
 873 */
 874struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 875
 876/*
 877 * The global memory commitment made in the system can be a metric
 878 * that can be used to drive ballooning decisions when Linux is hosted
 879 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 880 * balancing memory across competing virtual machines that are hosted.
 881 * Several metrics drive this policy engine including the guest reported
 882 * memory commitment.
 883 *
 884 * The time cost of this is very low for small platforms, and for big
 885 * platform like a 2S/36C/72T Skylake server, in worst case where
 886 * vm_committed_as's spinlock is under severe contention, the time cost
 887 * could be about 30~40 microseconds.
 888 */
 889unsigned long vm_memory_committed(void)
 890{
 891	return percpu_counter_sum_positive(&vm_committed_as);
 892}
 893EXPORT_SYMBOL_GPL(vm_memory_committed);
 894
 895/*
 896 * Check that a process has enough memory to allocate a new virtual
 897 * mapping. 0 means there is enough memory for the allocation to
 898 * succeed and -ENOMEM implies there is not.
 899 *
 900 * We currently support three overcommit policies, which are set via the
 901 * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
 902 *
 903 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 904 * Additional code 2002 Jul 20 by Robert Love.
 905 *
 906 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 907 *
 908 * Note this is a helper function intended to be used by LSMs which
 909 * wish to use this logic.
 910 */
 911int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 912{
 913	long allowed;
 914
 915	vm_acct_memory(pages);
 916
 917	/*
 918	 * Sometimes we want to use more memory than we have
 919	 */
 920	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 921		return 0;
 922
 923	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 924		if (pages > totalram_pages() + total_swap_pages)
 925			goto error;
 926		return 0;
 927	}
 928
 929	allowed = vm_commit_limit();
 930	/*
 931	 * Reserve some for root
 932	 */
 933	if (!cap_sys_admin)
 934		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 935
 936	/*
 937	 * Don't let a single process grow so big a user can't recover
 938	 */
 939	if (mm) {
 940		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 941
 942		allowed -= min_t(long, mm->total_vm / 32, reserve);
 943	}
 944
 945	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 946		return 0;
 947error:
 948	pr_warn_ratelimited("%s: pid: %d, comm: %s, no enough memory for the allocation\n",
 949			    __func__, current->pid, current->comm);
 950	vm_unacct_memory(pages);
 951
 952	return -ENOMEM;
 953}
 954
 955/**
 956 * get_cmdline() - copy the cmdline value to a buffer.
 957 * @task:     the task whose cmdline value to copy.
 958 * @buffer:   the buffer to copy to.
 959 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 960 *            to this length.
 961 *
 962 * Return: the size of the cmdline field copied. Note that the copy does
 963 * not guarantee an ending NULL byte.
 964 */
 965int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 966{
 967	int res = 0;
 968	unsigned int len;
 969	struct mm_struct *mm = get_task_mm(task);
 970	unsigned long arg_start, arg_end, env_start, env_end;
 971	if (!mm)
 972		goto out;
 973	if (!mm->arg_end)
 974		goto out_mm;	/* Shh! No looking before we're done */
 975
 976	spin_lock(&mm->arg_lock);
 977	arg_start = mm->arg_start;
 978	arg_end = mm->arg_end;
 979	env_start = mm->env_start;
 980	env_end = mm->env_end;
 981	spin_unlock(&mm->arg_lock);
 982
 983	len = arg_end - arg_start;
 984
 985	if (len > buflen)
 986		len = buflen;
 987
 988	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 989
 990	/*
 991	 * If the nul at the end of args has been overwritten, then
 992	 * assume application is using setproctitle(3).
 993	 */
 994	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 995		len = strnlen(buffer, res);
 996		if (len < res) {
 997			res = len;
 998		} else {
 999			len = env_end - env_start;
1000			if (len > buflen - res)
1001				len = buflen - res;
1002			res += access_process_vm(task, env_start,
1003						 buffer+res, len,
1004						 FOLL_FORCE);
1005			res = strnlen(buffer, res);
1006		}
1007	}
1008out_mm:
1009	mmput(mm);
1010out:
1011	return res;
1012}
1013
1014int __weak memcmp_pages(struct page *page1, struct page *page2)
1015{
1016	char *addr1, *addr2;
1017	int ret;
1018
1019	addr1 = kmap_atomic(page1);
1020	addr2 = kmap_atomic(page2);
1021	ret = memcmp(addr1, addr2, PAGE_SIZE);
1022	kunmap_atomic(addr2);
1023	kunmap_atomic(addr1);
1024	return ret;
1025}
1026
1027#ifdef CONFIG_PRINTK
1028/**
1029 * mem_dump_obj - Print available provenance information
1030 * @object: object for which to find provenance information.
1031 *
1032 * This function uses pr_cont(), so that the caller is expected to have
1033 * printed out whatever preamble is appropriate.  The provenance information
1034 * depends on the type of object and on how much debugging is enabled.
1035 * For example, for a slab-cache object, the slab name is printed, and,
1036 * if available, the return address and stack trace from the allocation
1037 * and last free path of that object.
1038 */
1039void mem_dump_obj(void *object)
1040{
1041	const char *type;
1042
1043	if (kmem_valid_obj(object)) {
1044		kmem_dump_obj(object);
1045		return;
1046	}
1047
1048	if (vmalloc_dump_obj(object))
1049		return;
1050
1051	if (virt_addr_valid(object))
1052		type = "non-slab/vmalloc memory";
1053	else if (object == NULL)
1054		type = "NULL pointer";
1055	else if (object == ZERO_SIZE_PTR)
1056		type = "zero-size pointer";
1057	else
1058		type = "non-paged memory";
1059
1060	pr_cont(" %s\n", type);
1061}
1062EXPORT_SYMBOL_GPL(mem_dump_obj);
1063#endif
1064
1065/*
1066 * A driver might set a page logically offline -- PageOffline() -- and
1067 * turn the page inaccessible in the hypervisor; after that, access to page
1068 * content can be fatal.
1069 *
1070 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1071 * pages after checking PageOffline(); however, these PFN walkers can race
1072 * with drivers that set PageOffline().
1073 *
1074 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1075 * synchronize with such drivers, achieving that a page cannot be set
1076 * PageOffline() while frozen.
1077 *
1078 * page_offline_begin()/page_offline_end() is used by drivers that care about
1079 * such races when setting a page PageOffline().
1080 */
1081static DECLARE_RWSEM(page_offline_rwsem);
1082
1083void page_offline_freeze(void)
1084{
1085	down_read(&page_offline_rwsem);
1086}
1087
1088void page_offline_thaw(void)
1089{
1090	up_read(&page_offline_rwsem);
1091}
1092
1093void page_offline_begin(void)
1094{
1095	down_write(&page_offline_rwsem);
1096}
1097EXPORT_SYMBOL(page_offline_begin);
1098
1099void page_offline_end(void)
1100{
1101	up_write(&page_offline_rwsem);
1102}
1103EXPORT_SYMBOL(page_offline_end);
1104
1105#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1106void flush_dcache_folio(struct folio *folio)
1107{
1108	long i, nr = folio_nr_pages(folio);
1109
1110	for (i = 0; i < nr; i++)
1111		flush_dcache_page(folio_page(folio, i));
1112}
1113EXPORT_SYMBOL(flush_dcache_folio);
1114#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/mm.h>
  3#include <linux/slab.h>
  4#include <linux/string.h>
  5#include <linux/compiler.h>
  6#include <linux/export.h>
  7#include <linux/err.h>
  8#include <linux/sched.h>
  9#include <linux/sched/mm.h>
 10#include <linux/sched/signal.h>
 11#include <linux/sched/task_stack.h>
 12#include <linux/security.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/mman.h>
 16#include <linux/hugetlb.h>
 17#include <linux/vmalloc.h>
 18#include <linux/userfaultfd_k.h>
 19#include <linux/elf.h>
 20#include <linux/elf-randomize.h>
 21#include <linux/personality.h>
 22#include <linux/random.h>
 23#include <linux/processor.h>
 24#include <linux/sizes.h>
 25#include <linux/compat.h>
 26
 27#include <linux/uaccess.h>
 28
 29#include "internal.h"
 
 30
 31/**
 32 * kfree_const - conditionally free memory
 33 * @x: pointer to the memory
 34 *
 35 * Function calls kfree only if @x is not in .rodata section.
 36 */
 37void kfree_const(const void *x)
 38{
 39	if (!is_kernel_rodata((unsigned long)x))
 40		kfree(x);
 41}
 42EXPORT_SYMBOL(kfree_const);
 43
 44/**
 45 * kstrdup - allocate space for and copy an existing string
 46 * @s: the string to duplicate
 47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 48 *
 49 * Return: newly allocated copy of @s or %NULL in case of error
 50 */
 51char *kstrdup(const char *s, gfp_t gfp)
 52{
 53	size_t len;
 54	char *buf;
 55
 56	if (!s)
 57		return NULL;
 58
 59	len = strlen(s) + 1;
 60	buf = kmalloc_track_caller(len, gfp);
 61	if (buf)
 62		memcpy(buf, s, len);
 63	return buf;
 64}
 65EXPORT_SYMBOL(kstrdup);
 66
 67/**
 68 * kstrdup_const - conditionally duplicate an existing const string
 69 * @s: the string to duplicate
 70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 71 *
 72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
 
 73 *
 74 * Return: source string if it is in .rodata section otherwise
 75 * fallback to kstrdup.
 76 */
 77const char *kstrdup_const(const char *s, gfp_t gfp)
 78{
 79	if (is_kernel_rodata((unsigned long)s))
 80		return s;
 81
 82	return kstrdup(s, gfp);
 83}
 84EXPORT_SYMBOL(kstrdup_const);
 85
 86/**
 87 * kstrndup - allocate space for and copy an existing string
 88 * @s: the string to duplicate
 89 * @max: read at most @max chars from @s
 90 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 91 *
 92 * Note: Use kmemdup_nul() instead if the size is known exactly.
 93 *
 94 * Return: newly allocated copy of @s or %NULL in case of error
 95 */
 96char *kstrndup(const char *s, size_t max, gfp_t gfp)
 97{
 98	size_t len;
 99	char *buf;
100
101	if (!s)
102		return NULL;
103
104	len = strnlen(s, max);
105	buf = kmalloc_track_caller(len+1, gfp);
106	if (buf) {
107		memcpy(buf, s, len);
108		buf[len] = '\0';
109	}
110	return buf;
111}
112EXPORT_SYMBOL(kstrndup);
113
114/**
115 * kmemdup - duplicate region of memory
116 *
117 * @src: memory region to duplicate
118 * @len: memory region length
119 * @gfp: GFP mask to use
120 *
121 * Return: newly allocated copy of @src or %NULL in case of error
122 */
123void *kmemdup(const void *src, size_t len, gfp_t gfp)
124{
125	void *p;
126
127	p = kmalloc_track_caller(len, gfp);
128	if (p)
129		memcpy(p, src, len);
130	return p;
131}
132EXPORT_SYMBOL(kmemdup);
133
134/**
135 * kmemdup_nul - Create a NUL-terminated string from unterminated data
136 * @s: The data to stringify
137 * @len: The size of the data
138 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
139 *
140 * Return: newly allocated copy of @s with NUL-termination or %NULL in
141 * case of error
142 */
143char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
144{
145	char *buf;
146
147	if (!s)
148		return NULL;
149
150	buf = kmalloc_track_caller(len + 1, gfp);
151	if (buf) {
152		memcpy(buf, s, len);
153		buf[len] = '\0';
154	}
155	return buf;
156}
157EXPORT_SYMBOL(kmemdup_nul);
158
159/**
160 * memdup_user - duplicate memory region from user space
161 *
162 * @src: source address in user space
163 * @len: number of bytes to copy
164 *
165 * Return: an ERR_PTR() on failure.  Result is physically
166 * contiguous, to be freed by kfree().
167 */
168void *memdup_user(const void __user *src, size_t len)
169{
170	void *p;
171
172	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
173	if (!p)
174		return ERR_PTR(-ENOMEM);
175
176	if (copy_from_user(p, src, len)) {
177		kfree(p);
178		return ERR_PTR(-EFAULT);
179	}
180
181	return p;
182}
183EXPORT_SYMBOL(memdup_user);
184
185/**
186 * vmemdup_user - duplicate memory region from user space
187 *
188 * @src: source address in user space
189 * @len: number of bytes to copy
190 *
191 * Return: an ERR_PTR() on failure.  Result may be not
192 * physically contiguous.  Use kvfree() to free.
193 */
194void *vmemdup_user(const void __user *src, size_t len)
195{
196	void *p;
197
198	p = kvmalloc(len, GFP_USER);
199	if (!p)
200		return ERR_PTR(-ENOMEM);
201
202	if (copy_from_user(p, src, len)) {
203		kvfree(p);
204		return ERR_PTR(-EFAULT);
205	}
206
207	return p;
208}
209EXPORT_SYMBOL(vmemdup_user);
210
211/**
212 * strndup_user - duplicate an existing string from user space
213 * @s: The string to duplicate
214 * @n: Maximum number of bytes to copy, including the trailing NUL.
215 *
216 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
217 */
218char *strndup_user(const char __user *s, long n)
219{
220	char *p;
221	long length;
222
223	length = strnlen_user(s, n);
224
225	if (!length)
226		return ERR_PTR(-EFAULT);
227
228	if (length > n)
229		return ERR_PTR(-EINVAL);
230
231	p = memdup_user(s, length);
232
233	if (IS_ERR(p))
234		return p;
235
236	p[length - 1] = '\0';
237
238	return p;
239}
240EXPORT_SYMBOL(strndup_user);
241
242/**
243 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
244 *
245 * @src: source address in user space
246 * @len: number of bytes to copy
247 *
248 * Return: an ERR_PTR() on failure.
249 */
250void *memdup_user_nul(const void __user *src, size_t len)
251{
252	char *p;
253
254	/*
255	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
256	 * cause pagefault, which makes it pointless to use GFP_NOFS
257	 * or GFP_ATOMIC.
258	 */
259	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
260	if (!p)
261		return ERR_PTR(-ENOMEM);
262
263	if (copy_from_user(p, src, len)) {
264		kfree(p);
265		return ERR_PTR(-EFAULT);
266	}
267	p[len] = '\0';
268
269	return p;
270}
271EXPORT_SYMBOL(memdup_user_nul);
272
273void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
274		struct vm_area_struct *prev)
275{
276	struct vm_area_struct *next;
277
278	vma->vm_prev = prev;
279	if (prev) {
280		next = prev->vm_next;
281		prev->vm_next = vma;
282	} else {
283		next = mm->mmap;
284		mm->mmap = vma;
285	}
286	vma->vm_next = next;
287	if (next)
288		next->vm_prev = vma;
289}
290
291void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
292{
293	struct vm_area_struct *prev, *next;
294
295	next = vma->vm_next;
296	prev = vma->vm_prev;
297	if (prev)
298		prev->vm_next = next;
299	else
300		mm->mmap = next;
301	if (next)
302		next->vm_prev = prev;
303}
304
305/* Check if the vma is being used as a stack by this task */
306int vma_is_stack_for_current(struct vm_area_struct *vma)
307{
308	struct task_struct * __maybe_unused t = current;
309
310	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
311}
312
 
 
 
 
 
 
 
 
 
 
 
 
313#ifndef STACK_RND_MASK
314#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
315#endif
316
317unsigned long randomize_stack_top(unsigned long stack_top)
318{
319	unsigned long random_variable = 0;
320
321	if (current->flags & PF_RANDOMIZE) {
322		random_variable = get_random_long();
323		random_variable &= STACK_RND_MASK;
324		random_variable <<= PAGE_SHIFT;
325	}
326#ifdef CONFIG_STACK_GROWSUP
327	return PAGE_ALIGN(stack_top) + random_variable;
328#else
329	return PAGE_ALIGN(stack_top) - random_variable;
330#endif
331}
332
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
334unsigned long arch_randomize_brk(struct mm_struct *mm)
335{
336	/* Is the current task 32bit ? */
337	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
338		return randomize_page(mm->brk, SZ_32M);
339
340	return randomize_page(mm->brk, SZ_1G);
341}
342
343unsigned long arch_mmap_rnd(void)
344{
345	unsigned long rnd;
346
347#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
348	if (is_compat_task())
349		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
350	else
351#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
352		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
353
354	return rnd << PAGE_SHIFT;
355}
356
357static int mmap_is_legacy(struct rlimit *rlim_stack)
358{
359	if (current->personality & ADDR_COMPAT_LAYOUT)
360		return 1;
361
362	if (rlim_stack->rlim_cur == RLIM_INFINITY)
363		return 1;
364
365	return sysctl_legacy_va_layout;
366}
367
368/*
369 * Leave enough space between the mmap area and the stack to honour ulimit in
370 * the face of randomisation.
371 */
372#define MIN_GAP		(SZ_128M)
373#define MAX_GAP		(STACK_TOP / 6 * 5)
374
375static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
376{
377	unsigned long gap = rlim_stack->rlim_cur;
378	unsigned long pad = stack_guard_gap;
379
380	/* Account for stack randomization if necessary */
381	if (current->flags & PF_RANDOMIZE)
382		pad += (STACK_RND_MASK << PAGE_SHIFT);
383
384	/* Values close to RLIM_INFINITY can overflow. */
385	if (gap + pad > gap)
386		gap += pad;
387
388	if (gap < MIN_GAP)
389		gap = MIN_GAP;
390	else if (gap > MAX_GAP)
391		gap = MAX_GAP;
392
393	return PAGE_ALIGN(STACK_TOP - gap - rnd);
394}
395
396void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
397{
398	unsigned long random_factor = 0UL;
399
400	if (current->flags & PF_RANDOMIZE)
401		random_factor = arch_mmap_rnd();
402
403	if (mmap_is_legacy(rlim_stack)) {
404		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
405		mm->get_unmapped_area = arch_get_unmapped_area;
406	} else {
407		mm->mmap_base = mmap_base(random_factor, rlim_stack);
408		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
409	}
410}
411#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
412void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
413{
414	mm->mmap_base = TASK_UNMAPPED_BASE;
415	mm->get_unmapped_area = arch_get_unmapped_area;
416}
417#endif
418
419/**
420 * __account_locked_vm - account locked pages to an mm's locked_vm
421 * @mm:          mm to account against
422 * @pages:       number of pages to account
423 * @inc:         %true if @pages should be considered positive, %false if not
424 * @task:        task used to check RLIMIT_MEMLOCK
425 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
426 *
427 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
428 * that mmap_lock is held as writer.
429 *
430 * Return:
431 * * 0       on success
432 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
433 */
434int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
435			struct task_struct *task, bool bypass_rlim)
436{
437	unsigned long locked_vm, limit;
438	int ret = 0;
439
440	mmap_assert_write_locked(mm);
441
442	locked_vm = mm->locked_vm;
443	if (inc) {
444		if (!bypass_rlim) {
445			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
446			if (locked_vm + pages > limit)
447				ret = -ENOMEM;
448		}
449		if (!ret)
450			mm->locked_vm = locked_vm + pages;
451	} else {
452		WARN_ON_ONCE(pages > locked_vm);
453		mm->locked_vm = locked_vm - pages;
454	}
455
456	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
457		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
458		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
459		 ret ? " - exceeded" : "");
460
461	return ret;
462}
463EXPORT_SYMBOL_GPL(__account_locked_vm);
464
465/**
466 * account_locked_vm - account locked pages to an mm's locked_vm
467 * @mm:          mm to account against, may be NULL
468 * @pages:       number of pages to account
469 * @inc:         %true if @pages should be considered positive, %false if not
470 *
471 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
472 *
473 * Return:
474 * * 0       on success, or if mm is NULL
475 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
476 */
477int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
478{
479	int ret;
480
481	if (pages == 0 || !mm)
482		return 0;
483
484	mmap_write_lock(mm);
485	ret = __account_locked_vm(mm, pages, inc, current,
486				  capable(CAP_IPC_LOCK));
487	mmap_write_unlock(mm);
488
489	return ret;
490}
491EXPORT_SYMBOL_GPL(account_locked_vm);
492
493unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
494	unsigned long len, unsigned long prot,
495	unsigned long flag, unsigned long pgoff)
496{
497	unsigned long ret;
498	struct mm_struct *mm = current->mm;
499	unsigned long populate;
500	LIST_HEAD(uf);
501
502	ret = security_mmap_file(file, prot, flag);
503	if (!ret) {
504		if (mmap_write_lock_killable(mm))
505			return -EINTR;
506		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
507			      &uf);
508		mmap_write_unlock(mm);
509		userfaultfd_unmap_complete(mm, &uf);
510		if (populate)
511			mm_populate(ret, populate);
512	}
513	return ret;
514}
515
516unsigned long vm_mmap(struct file *file, unsigned long addr,
517	unsigned long len, unsigned long prot,
518	unsigned long flag, unsigned long offset)
519{
520	if (unlikely(offset + PAGE_ALIGN(len) < offset))
521		return -EINVAL;
522	if (unlikely(offset_in_page(offset)))
523		return -EINVAL;
524
525	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
526}
527EXPORT_SYMBOL(vm_mmap);
528
529/**
530 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
531 * failure, fall back to non-contiguous (vmalloc) allocation.
532 * @size: size of the request.
533 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
534 * @node: numa node to allocate from
535 *
536 * Uses kmalloc to get the memory but if the allocation fails then falls back
537 * to the vmalloc allocator. Use kvfree for freeing the memory.
538 *
539 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
540 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
541 * preferable to the vmalloc fallback, due to visible performance drawbacks.
542 *
543 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
544 * fall back to vmalloc.
545 *
546 * Return: pointer to the allocated memory of %NULL in case of failure
547 */
548void *kvmalloc_node(size_t size, gfp_t flags, int node)
549{
550	gfp_t kmalloc_flags = flags;
551	void *ret;
552
553	/*
554	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
555	 * so the given set of flags has to be compatible.
556	 */
557	if ((flags & GFP_KERNEL) != GFP_KERNEL)
558		return kmalloc_node(size, flags, node);
559
560	/*
561	 * We want to attempt a large physically contiguous block first because
562	 * it is less likely to fragment multiple larger blocks and therefore
563	 * contribute to a long term fragmentation less than vmalloc fallback.
564	 * However make sure that larger requests are not too disruptive - no
565	 * OOM killer and no allocation failure warnings as we have a fallback.
566	 */
567	if (size > PAGE_SIZE) {
568		kmalloc_flags |= __GFP_NOWARN;
569
570		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
571			kmalloc_flags |= __GFP_NORETRY;
 
 
 
572	}
573
574	ret = kmalloc_node(size, kmalloc_flags, node);
575
576	/*
577	 * It doesn't really make sense to fallback to vmalloc for sub page
578	 * requests
579	 */
580	if (ret || size <= PAGE_SIZE)
581		return ret;
582
583	return __vmalloc_node(size, 1, flags, node,
584			__builtin_return_address(0));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585}
586EXPORT_SYMBOL(kvmalloc_node);
587
588/**
589 * kvfree() - Free memory.
590 * @addr: Pointer to allocated memory.
591 *
592 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
593 * It is slightly more efficient to use kfree() or vfree() if you are certain
594 * that you know which one to use.
595 *
596 * Context: Either preemptible task context or not-NMI interrupt.
597 */
598void kvfree(const void *addr)
599{
600	if (is_vmalloc_addr(addr))
601		vfree(addr);
602	else
603		kfree(addr);
604}
605EXPORT_SYMBOL(kvfree);
606
607/**
608 * kvfree_sensitive - Free a data object containing sensitive information.
609 * @addr: address of the data object to be freed.
610 * @len: length of the data object.
611 *
612 * Use the special memzero_explicit() function to clear the content of a
613 * kvmalloc'ed object containing sensitive data to make sure that the
614 * compiler won't optimize out the data clearing.
615 */
616void kvfree_sensitive(const void *addr, size_t len)
617{
618	if (likely(!ZERO_OR_NULL_PTR(addr))) {
619		memzero_explicit((void *)addr, len);
620		kvfree(addr);
621	}
622}
623EXPORT_SYMBOL(kvfree_sensitive);
624
625static inline void *__page_rmapping(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
626{
627	unsigned long mapping;
628
629	mapping = (unsigned long)page->mapping;
630	mapping &= ~PAGE_MAPPING_FLAGS;
 
 
 
631
632	return (void *)mapping;
 
 
 
 
 
 
 
633}
 
634
635/* Neutral page->mapping pointer to address_space or anon_vma or other */
636void *page_rmapping(struct page *page)
 
 
 
 
 
637{
638	page = compound_head(page);
639	return __page_rmapping(page);
640}
 
641
642/*
643 * Return true if this page is mapped into pagetables.
644 * For compound page it returns true if any subpage of compound page is mapped.
 
645 */
646bool page_mapped(struct page *page)
647{
648	int i;
 
 
649
650	if (likely(!PageCompound(page)))
651		return atomic_read(&page->_mapcount) >= 0;
652	page = compound_head(page);
653	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
654		return true;
655	if (PageHuge(page))
656		return false;
657	for (i = 0; i < compound_nr(page); i++) {
658		if (atomic_read(&page[i]._mapcount) >= 0)
659			return true;
660	}
661	return false;
662}
663EXPORT_SYMBOL(page_mapped);
664
665struct anon_vma *page_anon_vma(struct page *page)
666{
667	unsigned long mapping;
668
669	page = compound_head(page);
670	mapping = (unsigned long)page->mapping;
671	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
672		return NULL;
673	return __page_rmapping(page);
674}
675
676struct address_space *page_mapping(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
677{
678	struct address_space *mapping;
679
680	page = compound_head(page);
681
682	/* This happens if someone calls flush_dcache_page on slab page */
683	if (unlikely(PageSlab(page)))
684		return NULL;
685
686	if (unlikely(PageSwapCache(page))) {
687		swp_entry_t entry;
688
689		entry.val = page_private(page);
690		return swap_address_space(entry);
691	}
692
693	mapping = page->mapping;
694	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
695		return NULL;
696
697	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
698}
699EXPORT_SYMBOL(page_mapping);
700
701/*
702 * For file cache pages, return the address_space, otherwise return NULL
703 */
704struct address_space *page_mapping_file(struct page *page)
705{
706	if (unlikely(PageSwapCache(page)))
707		return NULL;
708	return page_mapping(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
709}
710
711/* Slow path of page_mapcount() for compound pages */
712int __page_mapcount(struct page *page)
713{
714	int ret;
715
716	ret = atomic_read(&page->_mapcount) + 1;
717	/*
718	 * For file THP page->_mapcount contains total number of mapping
719	 * of the page: no need to look into compound_mapcount.
720	 */
721	if (!PageAnon(page) && !PageHuge(page))
722		return ret;
723	page = compound_head(page);
724	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
725	if (PageDoubleMap(page))
726		ret--;
727	return ret;
728}
729EXPORT_SYMBOL_GPL(__page_mapcount);
730
731int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
732int sysctl_overcommit_ratio __read_mostly = 50;
733unsigned long sysctl_overcommit_kbytes __read_mostly;
734int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
735unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
736unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
737
738int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
739		size_t *lenp, loff_t *ppos)
740{
741	int ret;
742
743	ret = proc_dointvec(table, write, buffer, lenp, ppos);
744	if (ret == 0 && write)
745		sysctl_overcommit_kbytes = 0;
746	return ret;
747}
748
749static void sync_overcommit_as(struct work_struct *dummy)
750{
751	percpu_counter_sync(&vm_committed_as);
752}
753
754int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
755		size_t *lenp, loff_t *ppos)
756{
757	struct ctl_table t;
758	int new_policy;
759	int ret;
760
761	/*
762	 * The deviation of sync_overcommit_as could be big with loose policy
763	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
764	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
765	 * with the strict "NEVER", and to avoid possible race condtion (even
766	 * though user usually won't too frequently do the switching to policy
767	 * OVERCOMMIT_NEVER), the switch is done in the following order:
768	 *	1. changing the batch
769	 *	2. sync percpu count on each CPU
770	 *	3. switch the policy
771	 */
772	if (write) {
773		t = *table;
774		t.data = &new_policy;
775		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
776		if (ret)
777			return ret;
778
779		mm_compute_batch(new_policy);
780		if (new_policy == OVERCOMMIT_NEVER)
781			schedule_on_each_cpu(sync_overcommit_as);
782		sysctl_overcommit_memory = new_policy;
783	} else {
784		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
785	}
786
787	return ret;
788}
789
790int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
791		size_t *lenp, loff_t *ppos)
792{
793	int ret;
794
795	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
796	if (ret == 0 && write)
797		sysctl_overcommit_ratio = 0;
798	return ret;
799}
800
801/*
802 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
803 */
804unsigned long vm_commit_limit(void)
805{
806	unsigned long allowed;
807
808	if (sysctl_overcommit_kbytes)
809		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
810	else
811		allowed = ((totalram_pages() - hugetlb_total_pages())
812			   * sysctl_overcommit_ratio / 100);
813	allowed += total_swap_pages;
814
815	return allowed;
816}
817
818/*
819 * Make sure vm_committed_as in one cacheline and not cacheline shared with
820 * other variables. It can be updated by several CPUs frequently.
821 */
822struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
823
824/*
825 * The global memory commitment made in the system can be a metric
826 * that can be used to drive ballooning decisions when Linux is hosted
827 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
828 * balancing memory across competing virtual machines that are hosted.
829 * Several metrics drive this policy engine including the guest reported
830 * memory commitment.
831 *
832 * The time cost of this is very low for small platforms, and for big
833 * platform like a 2S/36C/72T Skylake server, in worst case where
834 * vm_committed_as's spinlock is under severe contention, the time cost
835 * could be about 30~40 microseconds.
836 */
837unsigned long vm_memory_committed(void)
838{
839	return percpu_counter_sum_positive(&vm_committed_as);
840}
841EXPORT_SYMBOL_GPL(vm_memory_committed);
842
843/*
844 * Check that a process has enough memory to allocate a new virtual
845 * mapping. 0 means there is enough memory for the allocation to
846 * succeed and -ENOMEM implies there is not.
847 *
848 * We currently support three overcommit policies, which are set via the
849 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
850 *
851 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
852 * Additional code 2002 Jul 20 by Robert Love.
853 *
854 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
855 *
856 * Note this is a helper function intended to be used by LSMs which
857 * wish to use this logic.
858 */
859int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
860{
861	long allowed;
862
863	vm_acct_memory(pages);
864
865	/*
866	 * Sometimes we want to use more memory than we have
867	 */
868	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
869		return 0;
870
871	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
872		if (pages > totalram_pages() + total_swap_pages)
873			goto error;
874		return 0;
875	}
876
877	allowed = vm_commit_limit();
878	/*
879	 * Reserve some for root
880	 */
881	if (!cap_sys_admin)
882		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
883
884	/*
885	 * Don't let a single process grow so big a user can't recover
886	 */
887	if (mm) {
888		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
889
890		allowed -= min_t(long, mm->total_vm / 32, reserve);
891	}
892
893	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
894		return 0;
895error:
 
 
896	vm_unacct_memory(pages);
897
898	return -ENOMEM;
899}
900
901/**
902 * get_cmdline() - copy the cmdline value to a buffer.
903 * @task:     the task whose cmdline value to copy.
904 * @buffer:   the buffer to copy to.
905 * @buflen:   the length of the buffer. Larger cmdline values are truncated
906 *            to this length.
907 *
908 * Return: the size of the cmdline field copied. Note that the copy does
909 * not guarantee an ending NULL byte.
910 */
911int get_cmdline(struct task_struct *task, char *buffer, int buflen)
912{
913	int res = 0;
914	unsigned int len;
915	struct mm_struct *mm = get_task_mm(task);
916	unsigned long arg_start, arg_end, env_start, env_end;
917	if (!mm)
918		goto out;
919	if (!mm->arg_end)
920		goto out_mm;	/* Shh! No looking before we're done */
921
922	spin_lock(&mm->arg_lock);
923	arg_start = mm->arg_start;
924	arg_end = mm->arg_end;
925	env_start = mm->env_start;
926	env_end = mm->env_end;
927	spin_unlock(&mm->arg_lock);
928
929	len = arg_end - arg_start;
930
931	if (len > buflen)
932		len = buflen;
933
934	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
935
936	/*
937	 * If the nul at the end of args has been overwritten, then
938	 * assume application is using setproctitle(3).
939	 */
940	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
941		len = strnlen(buffer, res);
942		if (len < res) {
943			res = len;
944		} else {
945			len = env_end - env_start;
946			if (len > buflen - res)
947				len = buflen - res;
948			res += access_process_vm(task, env_start,
949						 buffer+res, len,
950						 FOLL_FORCE);
951			res = strnlen(buffer, res);
952		}
953	}
954out_mm:
955	mmput(mm);
956out:
957	return res;
958}
959
960int memcmp_pages(struct page *page1, struct page *page2)
961{
962	char *addr1, *addr2;
963	int ret;
964
965	addr1 = kmap_atomic(page1);
966	addr2 = kmap_atomic(page2);
967	ret = memcmp(addr1, addr2, PAGE_SIZE);
968	kunmap_atomic(addr2);
969	kunmap_atomic(addr1);
970	return ret;
971}