Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30#include "swap.h"
  31
  32/**
  33 * kfree_const - conditionally free memory
  34 * @x: pointer to the memory
  35 *
  36 * Function calls kfree only if @x is not in .rodata section.
  37 */
  38void kfree_const(const void *x)
  39{
  40	if (!is_kernel_rodata((unsigned long)x))
  41		kfree(x);
  42}
  43EXPORT_SYMBOL(kfree_const);
  44
  45/**
  46 * kstrdup - allocate space for and copy an existing string
  47 * @s: the string to duplicate
  48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  49 *
  50 * Return: newly allocated copy of @s or %NULL in case of error
  51 */
  52char *kstrdup(const char *s, gfp_t gfp)
  53{
  54	size_t len;
  55	char *buf;
  56
  57	if (!s)
  58		return NULL;
  59
  60	len = strlen(s) + 1;
  61	buf = kmalloc_track_caller(len, gfp);
  62	if (buf)
  63		memcpy(buf, s, len);
  64	return buf;
  65}
  66EXPORT_SYMBOL(kstrdup);
  67
  68/**
  69 * kstrdup_const - conditionally duplicate an existing const string
  70 * @s: the string to duplicate
  71 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  72 *
  73 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
  74 * must not be passed to krealloc().
  75 *
  76 * Return: source string if it is in .rodata section otherwise
  77 * fallback to kstrdup.
  78 */
  79const char *kstrdup_const(const char *s, gfp_t gfp)
  80{
  81	if (is_kernel_rodata((unsigned long)s))
  82		return s;
  83
  84	return kstrdup(s, gfp);
  85}
  86EXPORT_SYMBOL(kstrdup_const);
  87
  88/**
  89 * kstrndup - allocate space for and copy an existing string
  90 * @s: the string to duplicate
  91 * @max: read at most @max chars from @s
  92 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  93 *
  94 * Note: Use kmemdup_nul() instead if the size is known exactly.
  95 *
  96 * Return: newly allocated copy of @s or %NULL in case of error
  97 */
  98char *kstrndup(const char *s, size_t max, gfp_t gfp)
  99{
 100	size_t len;
 101	char *buf;
 102
 103	if (!s)
 104		return NULL;
 105
 106	len = strnlen(s, max);
 107	buf = kmalloc_track_caller(len+1, gfp);
 108	if (buf) {
 109		memcpy(buf, s, len);
 110		buf[len] = '\0';
 111	}
 112	return buf;
 113}
 114EXPORT_SYMBOL(kstrndup);
 115
 116/**
 117 * kmemdup - duplicate region of memory
 118 *
 119 * @src: memory region to duplicate
 120 * @len: memory region length
 121 * @gfp: GFP mask to use
 122 *
 123 * Return: newly allocated copy of @src or %NULL in case of error
 124 */
 125void *kmemdup(const void *src, size_t len, gfp_t gfp)
 126{
 127	void *p;
 128
 129	p = kmalloc_track_caller(len, gfp);
 130	if (p)
 131		memcpy(p, src, len);
 132	return p;
 133}
 134EXPORT_SYMBOL(kmemdup);
 135
 136/**
 137 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 138 * @s: The data to stringify
 139 * @len: The size of the data
 140 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 141 *
 142 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 143 * case of error
 144 */
 145char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 146{
 147	char *buf;
 148
 149	if (!s)
 150		return NULL;
 151
 152	buf = kmalloc_track_caller(len + 1, gfp);
 153	if (buf) {
 154		memcpy(buf, s, len);
 155		buf[len] = '\0';
 156	}
 157	return buf;
 158}
 159EXPORT_SYMBOL(kmemdup_nul);
 160
 161/**
 162 * memdup_user - duplicate memory region from user space
 163 *
 164 * @src: source address in user space
 165 * @len: number of bytes to copy
 166 *
 167 * Return: an ERR_PTR() on failure.  Result is physically
 168 * contiguous, to be freed by kfree().
 169 */
 170void *memdup_user(const void __user *src, size_t len)
 171{
 172	void *p;
 173
 174	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 
 
 
 
 
 175	if (!p)
 176		return ERR_PTR(-ENOMEM);
 177
 178	if (copy_from_user(p, src, len)) {
 179		kfree(p);
 180		return ERR_PTR(-EFAULT);
 181	}
 182
 183	return p;
 184}
 185EXPORT_SYMBOL(memdup_user);
 186
 187/**
 188 * vmemdup_user - duplicate memory region from user space
 189 *
 190 * @src: source address in user space
 191 * @len: number of bytes to copy
 192 *
 193 * Return: an ERR_PTR() on failure.  Result may be not
 194 * physically contiguous.  Use kvfree() to free.
 
 195 */
 196void *vmemdup_user(const void __user *src, size_t len)
 197{
 198	void *p;
 
 199
 200	p = kvmalloc(len, GFP_USER);
 201	if (!p)
 202		return ERR_PTR(-ENOMEM);
 203
 204	if (copy_from_user(p, src, len)) {
 205		kvfree(p);
 206		return ERR_PTR(-EFAULT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207	}
 208
 209	return p;
 
 
 
 
 210}
 211EXPORT_SYMBOL(vmemdup_user);
 212
 213/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 214 * strndup_user - duplicate an existing string from user space
 215 * @s: The string to duplicate
 216 * @n: Maximum number of bytes to copy, including the trailing NUL.
 217 *
 218 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 219 */
 220char *strndup_user(const char __user *s, long n)
 221{
 222	char *p;
 223	long length;
 224
 225	length = strnlen_user(s, n);
 226
 227	if (!length)
 228		return ERR_PTR(-EFAULT);
 229
 230	if (length > n)
 231		return ERR_PTR(-EINVAL);
 232
 233	p = memdup_user(s, length);
 234
 235	if (IS_ERR(p))
 236		return p;
 237
 238	p[length - 1] = '\0';
 239
 240	return p;
 241}
 242EXPORT_SYMBOL(strndup_user);
 243
 244/**
 245 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 246 *
 247 * @src: source address in user space
 248 * @len: number of bytes to copy
 249 *
 250 * Return: an ERR_PTR() on failure.
 251 */
 252void *memdup_user_nul(const void __user *src, size_t len)
 253{
 254	char *p;
 255
 256	/*
 257	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
 258	 * cause pagefault, which makes it pointless to use GFP_NOFS
 259	 * or GFP_ATOMIC.
 260	 */
 261	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 262	if (!p)
 263		return ERR_PTR(-ENOMEM);
 264
 265	if (copy_from_user(p, src, len)) {
 266		kfree(p);
 267		return ERR_PTR(-EFAULT);
 268	}
 269	p[len] = '\0';
 270
 271	return p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 272}
 273EXPORT_SYMBOL(memdup_user_nul);
 274
 275/* Check if the vma is being used as a stack by this task */
 276int vma_is_stack_for_current(struct vm_area_struct *vma)
 
 277{
 278	struct task_struct * __maybe_unused t = current;
 279
 280	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 281}
 282
 283/*
 284 * Change backing file, only valid to use during initial VMA setup.
 285 */
 286void vma_set_file(struct vm_area_struct *vma, struct file *file)
 287{
 288	/* Changing an anonymous vma with this is illegal */
 289	get_file(file);
 290	swap(vma->vm_file, file);
 291	fput(file);
 292}
 293EXPORT_SYMBOL(vma_set_file);
 294
 295#ifndef STACK_RND_MASK
 296#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 297#endif
 298
 299unsigned long randomize_stack_top(unsigned long stack_top)
 300{
 301	unsigned long random_variable = 0;
 302
 303	if (current->flags & PF_RANDOMIZE) {
 304		random_variable = get_random_long();
 305		random_variable &= STACK_RND_MASK;
 306		random_variable <<= PAGE_SHIFT;
 
 
 
 
 
 307	}
 308#ifdef CONFIG_STACK_GROWSUP
 309	return PAGE_ALIGN(stack_top) + random_variable;
 310#else
 311	return PAGE_ALIGN(stack_top) - random_variable;
 312#endif
 313}
 314
 315/**
 316 * randomize_page - Generate a random, page aligned address
 317 * @start:	The smallest acceptable address the caller will take.
 318 * @range:	The size of the area, starting at @start, within which the
 319 *		random address must fall.
 320 *
 321 * If @start + @range would overflow, @range is capped.
 322 *
 323 * NOTE: Historical use of randomize_range, which this replaces, presumed that
 324 * @start was already page aligned.  We now align it regardless.
 325 *
 326 * Return: A page aligned address within [start, start + range).  On error,
 327 * @start is returned.
 328 */
 329unsigned long randomize_page(unsigned long start, unsigned long range)
 330{
 331	if (!PAGE_ALIGNED(start)) {
 332		range -= PAGE_ALIGN(start) - start;
 333		start = PAGE_ALIGN(start);
 334	}
 335
 336	if (start > ULONG_MAX - range)
 337		range = ULONG_MAX - start;
 338
 339	range >>= PAGE_SHIFT;
 340
 341	if (range == 0)
 342		return start;
 343
 344	return start + (get_random_long() % range << PAGE_SHIFT);
 345}
 346
 347#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 348unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
 349{
 350	/* Is the current task 32bit ? */
 351	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 352		return randomize_page(mm->brk, SZ_32M);
 353
 354	return randomize_page(mm->brk, SZ_1G);
 355}
 356
 357unsigned long arch_mmap_rnd(void)
 358{
 359	unsigned long rnd;
 360
 361#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 362	if (is_compat_task())
 363		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 364	else
 365#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 366		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 367
 368	return rnd << PAGE_SHIFT;
 369}
 370
 371static int mmap_is_legacy(struct rlimit *rlim_stack)
 372{
 373	if (current->personality & ADDR_COMPAT_LAYOUT)
 374		return 1;
 375
 376	if (rlim_stack->rlim_cur == RLIM_INFINITY)
 377		return 1;
 378
 379	return sysctl_legacy_va_layout;
 380}
 381
 382/*
 383 * Leave enough space between the mmap area and the stack to honour ulimit in
 384 * the face of randomisation.
 385 */
 386#define MIN_GAP		(SZ_128M)
 387#define MAX_GAP		(STACK_TOP / 6 * 5)
 388
 389static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 390{
 391	unsigned long gap = rlim_stack->rlim_cur;
 392	unsigned long pad = stack_guard_gap;
 393
 394	/* Account for stack randomization if necessary */
 395	if (current->flags & PF_RANDOMIZE)
 396		pad += (STACK_RND_MASK << PAGE_SHIFT);
 397
 398	/* Values close to RLIM_INFINITY can overflow. */
 399	if (gap + pad > gap)
 400		gap += pad;
 401
 402	if (gap < MIN_GAP)
 403		gap = MIN_GAP;
 404	else if (gap > MAX_GAP)
 405		gap = MAX_GAP;
 406
 407	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 408}
 409
 410void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 411{
 412	unsigned long random_factor = 0UL;
 413
 414	if (current->flags & PF_RANDOMIZE)
 415		random_factor = arch_mmap_rnd();
 416
 417	if (mmap_is_legacy(rlim_stack)) {
 418		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 419		mm->get_unmapped_area = arch_get_unmapped_area;
 420	} else {
 421		mm->mmap_base = mmap_base(random_factor, rlim_stack);
 422		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 423	}
 424}
 425#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 426void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 427{
 428	mm->mmap_base = TASK_UNMAPPED_BASE;
 429	mm->get_unmapped_area = arch_get_unmapped_area;
 
 430}
 431#endif
 432
 433/**
 434 * __account_locked_vm - account locked pages to an mm's locked_vm
 435 * @mm:          mm to account against
 436 * @pages:       number of pages to account
 437 * @inc:         %true if @pages should be considered positive, %false if not
 438 * @task:        task used to check RLIMIT_MEMLOCK
 439 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 440 *
 441 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 442 * that mmap_lock is held as writer.
 443 *
 444 * Return:
 445 * * 0       on success
 446 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 447 */
 448int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 449			struct task_struct *task, bool bypass_rlim)
 450{
 451	unsigned long locked_vm, limit;
 452	int ret = 0;
 453
 454	mmap_assert_write_locked(mm);
 455
 456	locked_vm = mm->locked_vm;
 457	if (inc) {
 458		if (!bypass_rlim) {
 459			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 460			if (locked_vm + pages > limit)
 461				ret = -ENOMEM;
 462		}
 463		if (!ret)
 464			mm->locked_vm = locked_vm + pages;
 465	} else {
 466		WARN_ON_ONCE(pages > locked_vm);
 467		mm->locked_vm = locked_vm - pages;
 468	}
 469
 470	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 471		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 472		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 473		 ret ? " - exceeded" : "");
 474
 475	return ret;
 476}
 477EXPORT_SYMBOL_GPL(__account_locked_vm);
 478
 479/**
 480 * account_locked_vm - account locked pages to an mm's locked_vm
 481 * @mm:          mm to account against, may be NULL
 482 * @pages:       number of pages to account
 483 * @inc:         %true if @pages should be considered positive, %false if not
 484 *
 485 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 486 *
 487 * Return:
 488 * * 0       on success, or if mm is NULL
 489 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 490 */
 491int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 
 492{
 
 493	int ret;
 494
 495	if (pages == 0 || !mm)
 496		return 0;
 497
 498	mmap_write_lock(mm);
 499	ret = __account_locked_vm(mm, pages, inc, current,
 500				  capable(CAP_IPC_LOCK));
 501	mmap_write_unlock(mm);
 502
 503	return ret;
 504}
 505EXPORT_SYMBOL_GPL(account_locked_vm);
 506
 507unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 508	unsigned long len, unsigned long prot,
 509	unsigned long flag, unsigned long pgoff)
 510{
 511	unsigned long ret;
 512	struct mm_struct *mm = current->mm;
 513	unsigned long populate;
 514	LIST_HEAD(uf);
 515
 516	ret = security_mmap_file(file, prot, flag);
 517	if (!ret) {
 518		if (mmap_write_lock_killable(mm))
 519			return -EINTR;
 520		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
 521			      &uf);
 522		mmap_write_unlock(mm);
 523		userfaultfd_unmap_complete(mm, &uf);
 524		if (populate)
 525			mm_populate(ret, populate);
 526	}
 527	return ret;
 528}
 529
 530unsigned long vm_mmap(struct file *file, unsigned long addr,
 531	unsigned long len, unsigned long prot,
 532	unsigned long flag, unsigned long offset)
 533{
 534	if (unlikely(offset + PAGE_ALIGN(len) < offset))
 535		return -EINVAL;
 536	if (unlikely(offset_in_page(offset)))
 537		return -EINVAL;
 538
 539	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 540}
 541EXPORT_SYMBOL(vm_mmap);
 542
 543/**
 544 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 545 * failure, fall back to non-contiguous (vmalloc) allocation.
 546 * @size: size of the request.
 547 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 548 * @node: numa node to allocate from
 549 *
 550 * Uses kmalloc to get the memory but if the allocation fails then falls back
 551 * to the vmalloc allocator. Use kvfree for freeing the memory.
 552 *
 553 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
 554 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 555 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 556 *
 557 * Return: pointer to the allocated memory of %NULL in case of failure
 558 */
 559void *kvmalloc_node(size_t size, gfp_t flags, int node)
 560{
 561	gfp_t kmalloc_flags = flags;
 562	void *ret;
 563
 564	/*
 565	 * We want to attempt a large physically contiguous block first because
 566	 * it is less likely to fragment multiple larger blocks and therefore
 567	 * contribute to a long term fragmentation less than vmalloc fallback.
 568	 * However make sure that larger requests are not too disruptive - no
 569	 * OOM killer and no allocation failure warnings as we have a fallback.
 570	 */
 571	if (size > PAGE_SIZE) {
 572		kmalloc_flags |= __GFP_NOWARN;
 573
 574		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 575			kmalloc_flags |= __GFP_NORETRY;
 576
 577		/* nofail semantic is implemented by the vmalloc fallback */
 578		kmalloc_flags &= ~__GFP_NOFAIL;
 579	}
 580
 581	ret = kmalloc_node(size, kmalloc_flags, node);
 582
 583	/*
 584	 * It doesn't really make sense to fallback to vmalloc for sub page
 585	 * requests
 586	 */
 587	if (ret || size <= PAGE_SIZE)
 588		return ret;
 589
 590	/* non-sleeping allocations are not supported by vmalloc */
 591	if (!gfpflags_allow_blocking(flags))
 592		return NULL;
 593
 594	/* Don't even allow crazy sizes */
 595	if (unlikely(size > INT_MAX)) {
 596		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 597		return NULL;
 598	}
 599
 600	/*
 601	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
 602	 * since the callers already cannot assume anything
 603	 * about the resulting pointer, and cannot play
 604	 * protection games.
 605	 */
 606	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
 607			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 608			node, __builtin_return_address(0));
 609}
 610EXPORT_SYMBOL(kvmalloc_node);
 611
 612/**
 613 * kvfree() - Free memory.
 614 * @addr: Pointer to allocated memory.
 615 *
 616 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 617 * It is slightly more efficient to use kfree() or vfree() if you are certain
 618 * that you know which one to use.
 619 *
 620 * Context: Either preemptible task context or not-NMI interrupt.
 621 */
 622void kvfree(const void *addr)
 623{
 624	if (is_vmalloc_addr(addr))
 625		vfree(addr);
 626	else
 627		kfree(addr);
 628}
 629EXPORT_SYMBOL(kvfree);
 630
 631/**
 632 * kvfree_sensitive - Free a data object containing sensitive information.
 633 * @addr: address of the data object to be freed.
 634 * @len: length of the data object.
 635 *
 636 * Use the special memzero_explicit() function to clear the content of a
 637 * kvmalloc'ed object containing sensitive data to make sure that the
 638 * compiler won't optimize out the data clearing.
 639 */
 640void kvfree_sensitive(const void *addr, size_t len)
 641{
 642	if (likely(!ZERO_OR_NULL_PTR(addr))) {
 643		memzero_explicit((void *)addr, len);
 644		kvfree(addr);
 645	}
 646}
 647EXPORT_SYMBOL(kvfree_sensitive);
 648
 649void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 650{
 651	void *newp;
 652
 653	if (oldsize >= newsize)
 654		return (void *)p;
 655	newp = kvmalloc(newsize, flags);
 656	if (!newp)
 657		return NULL;
 658	memcpy(newp, p, oldsize);
 659	kvfree(p);
 660	return newp;
 661}
 662EXPORT_SYMBOL(kvrealloc);
 663
 664/**
 665 * __vmalloc_array - allocate memory for a virtually contiguous array.
 666 * @n: number of elements.
 667 * @size: element size.
 668 * @flags: the type of memory to allocate (see kmalloc).
 669 */
 670void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
 671{
 672	size_t bytes;
 673
 674	if (unlikely(check_mul_overflow(n, size, &bytes)))
 675		return NULL;
 676	return __vmalloc(bytes, flags);
 677}
 678EXPORT_SYMBOL(__vmalloc_array);
 679
 680/**
 681 * vmalloc_array - allocate memory for a virtually contiguous array.
 682 * @n: number of elements.
 683 * @size: element size.
 684 */
 685void *vmalloc_array(size_t n, size_t size)
 686{
 687	return __vmalloc_array(n, size, GFP_KERNEL);
 688}
 689EXPORT_SYMBOL(vmalloc_array);
 690
 691/**
 692 * __vcalloc - allocate and zero memory for a virtually contiguous array.
 693 * @n: number of elements.
 694 * @size: element size.
 695 * @flags: the type of memory to allocate (see kmalloc).
 696 */
 697void *__vcalloc(size_t n, size_t size, gfp_t flags)
 698{
 699	return __vmalloc_array(n, size, flags | __GFP_ZERO);
 700}
 701EXPORT_SYMBOL(__vcalloc);
 702
 703/**
 704 * vcalloc - allocate and zero memory for a virtually contiguous array.
 705 * @n: number of elements.
 706 * @size: element size.
 707 */
 708void *vcalloc(size_t n, size_t size)
 709{
 710	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
 711}
 712EXPORT_SYMBOL(vcalloc);
 713
 714/* Neutral page->mapping pointer to address_space or anon_vma or other */
 715void *page_rmapping(struct page *page)
 716{
 717	return folio_raw_mapping(page_folio(page));
 718}
 719
 720struct anon_vma *folio_anon_vma(struct folio *folio)
 721{
 722	unsigned long mapping = (unsigned long)folio->mapping;
 723
 724	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 725		return NULL;
 726	return (void *)(mapping - PAGE_MAPPING_ANON);
 727}
 728
 729/**
 730 * folio_mapping - Find the mapping where this folio is stored.
 731 * @folio: The folio.
 732 *
 733 * For folios which are in the page cache, return the mapping that this
 734 * page belongs to.  Folios in the swap cache return the swap mapping
 735 * this page is stored in (which is different from the mapping for the
 736 * swap file or swap device where the data is stored).
 737 *
 738 * You can call this for folios which aren't in the swap cache or page
 739 * cache and it will return NULL.
 740 */
 741struct address_space *folio_mapping(struct folio *folio)
 742{
 743	struct address_space *mapping;
 744
 745	/* This happens if someone calls flush_dcache_page on slab page */
 746	if (unlikely(folio_test_slab(folio)))
 747		return NULL;
 748
 749	if (unlikely(folio_test_swapcache(folio)))
 750		return swap_address_space(folio_swap_entry(folio));
 751
 752	mapping = folio->mapping;
 753	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
 754		return NULL;
 755
 756	return mapping;
 757}
 758EXPORT_SYMBOL(folio_mapping);
 759
 760/**
 761 * folio_copy - Copy the contents of one folio to another.
 762 * @dst: Folio to copy to.
 763 * @src: Folio to copy from.
 764 *
 765 * The bytes in the folio represented by @src are copied to @dst.
 766 * Assumes the caller has validated that @dst is at least as large as @src.
 767 * Can be called in atomic context for order-0 folios, but if the folio is
 768 * larger, it may sleep.
 769 */
 770void folio_copy(struct folio *dst, struct folio *src)
 771{
 772	long i = 0;
 773	long nr = folio_nr_pages(src);
 774
 775	for (;;) {
 776		copy_highpage(folio_page(dst, i), folio_page(src, i));
 777		if (++i == nr)
 778			break;
 779		cond_resched();
 780	}
 781}
 782
 783int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 784int sysctl_overcommit_ratio __read_mostly = 50;
 785unsigned long sysctl_overcommit_kbytes __read_mostly;
 786int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 787unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 788unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 789
 790int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
 791		size_t *lenp, loff_t *ppos)
 792{
 793	int ret;
 794
 795	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 796	if (ret == 0 && write)
 797		sysctl_overcommit_kbytes = 0;
 798	return ret;
 799}
 800
 801static void sync_overcommit_as(struct work_struct *dummy)
 802{
 803	percpu_counter_sync(&vm_committed_as);
 804}
 805
 806int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
 807		size_t *lenp, loff_t *ppos)
 808{
 809	struct ctl_table t;
 810	int new_policy = -1;
 811	int ret;
 812
 813	/*
 814	 * The deviation of sync_overcommit_as could be big with loose policy
 815	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
 816	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
 817	 * with the strict "NEVER", and to avoid possible race condition (even
 818	 * though user usually won't too frequently do the switching to policy
 819	 * OVERCOMMIT_NEVER), the switch is done in the following order:
 820	 *	1. changing the batch
 821	 *	2. sync percpu count on each CPU
 822	 *	3. switch the policy
 823	 */
 824	if (write) {
 825		t = *table;
 826		t.data = &new_policy;
 827		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 828		if (ret || new_policy == -1)
 829			return ret;
 830
 831		mm_compute_batch(new_policy);
 832		if (new_policy == OVERCOMMIT_NEVER)
 833			schedule_on_each_cpu(sync_overcommit_as);
 834		sysctl_overcommit_memory = new_policy;
 835	} else {
 836		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 837	}
 838
 839	return ret;
 840}
 841
 842int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
 843		size_t *lenp, loff_t *ppos)
 844{
 845	int ret;
 846
 847	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 848	if (ret == 0 && write)
 849		sysctl_overcommit_ratio = 0;
 850	return ret;
 851}
 852
 853/*
 854 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 855 */
 856unsigned long vm_commit_limit(void)
 857{
 858	unsigned long allowed;
 859
 860	if (sysctl_overcommit_kbytes)
 861		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 862	else
 863		allowed = ((totalram_pages() - hugetlb_total_pages())
 864			   * sysctl_overcommit_ratio / 100);
 865	allowed += total_swap_pages;
 866
 867	return allowed;
 868}
 869
 870/*
 871 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 872 * other variables. It can be updated by several CPUs frequently.
 873 */
 874struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 875
 876/*
 877 * The global memory commitment made in the system can be a metric
 878 * that can be used to drive ballooning decisions when Linux is hosted
 879 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 880 * balancing memory across competing virtual machines that are hosted.
 881 * Several metrics drive this policy engine including the guest reported
 882 * memory commitment.
 883 *
 884 * The time cost of this is very low for small platforms, and for big
 885 * platform like a 2S/36C/72T Skylake server, in worst case where
 886 * vm_committed_as's spinlock is under severe contention, the time cost
 887 * could be about 30~40 microseconds.
 888 */
 889unsigned long vm_memory_committed(void)
 890{
 891	return percpu_counter_sum_positive(&vm_committed_as);
 892}
 893EXPORT_SYMBOL_GPL(vm_memory_committed);
 894
 895/*
 896 * Check that a process has enough memory to allocate a new virtual
 897 * mapping. 0 means there is enough memory for the allocation to
 898 * succeed and -ENOMEM implies there is not.
 899 *
 900 * We currently support three overcommit policies, which are set via the
 901 * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
 902 *
 903 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 904 * Additional code 2002 Jul 20 by Robert Love.
 905 *
 906 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 907 *
 908 * Note this is a helper function intended to be used by LSMs which
 909 * wish to use this logic.
 910 */
 911int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 912{
 913	long allowed;
 914
 915	vm_acct_memory(pages);
 916
 917	/*
 918	 * Sometimes we want to use more memory than we have
 919	 */
 920	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 921		return 0;
 922
 923	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 924		if (pages > totalram_pages() + total_swap_pages)
 925			goto error;
 926		return 0;
 927	}
 928
 929	allowed = vm_commit_limit();
 930	/*
 931	 * Reserve some for root
 932	 */
 933	if (!cap_sys_admin)
 934		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 935
 936	/*
 937	 * Don't let a single process grow so big a user can't recover
 938	 */
 939	if (mm) {
 940		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 941
 942		allowed -= min_t(long, mm->total_vm / 32, reserve);
 943	}
 944
 945	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 946		return 0;
 947error:
 948	pr_warn_ratelimited("%s: pid: %d, comm: %s, no enough memory for the allocation\n",
 949			    __func__, current->pid, current->comm);
 950	vm_unacct_memory(pages);
 951
 952	return -ENOMEM;
 953}
 954
 955/**
 956 * get_cmdline() - copy the cmdline value to a buffer.
 957 * @task:     the task whose cmdline value to copy.
 958 * @buffer:   the buffer to copy to.
 959 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 960 *            to this length.
 961 *
 962 * Return: the size of the cmdline field copied. Note that the copy does
 963 * not guarantee an ending NULL byte.
 964 */
 965int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 966{
 967	int res = 0;
 968	unsigned int len;
 969	struct mm_struct *mm = get_task_mm(task);
 970	unsigned long arg_start, arg_end, env_start, env_end;
 971	if (!mm)
 972		goto out;
 973	if (!mm->arg_end)
 974		goto out_mm;	/* Shh! No looking before we're done */
 975
 976	spin_lock(&mm->arg_lock);
 977	arg_start = mm->arg_start;
 978	arg_end = mm->arg_end;
 979	env_start = mm->env_start;
 980	env_end = mm->env_end;
 981	spin_unlock(&mm->arg_lock);
 982
 983	len = arg_end - arg_start;
 984
 985	if (len > buflen)
 986		len = buflen;
 987
 988	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 989
 990	/*
 991	 * If the nul at the end of args has been overwritten, then
 992	 * assume application is using setproctitle(3).
 993	 */
 994	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 995		len = strnlen(buffer, res);
 996		if (len < res) {
 997			res = len;
 998		} else {
 999			len = env_end - env_start;
1000			if (len > buflen - res)
1001				len = buflen - res;
1002			res += access_process_vm(task, env_start,
1003						 buffer+res, len,
1004						 FOLL_FORCE);
1005			res = strnlen(buffer, res);
1006		}
1007	}
1008out_mm:
1009	mmput(mm);
1010out:
1011	return res;
1012}
1013
1014int __weak memcmp_pages(struct page *page1, struct page *page2)
1015{
1016	char *addr1, *addr2;
1017	int ret;
1018
1019	addr1 = kmap_atomic(page1);
1020	addr2 = kmap_atomic(page2);
1021	ret = memcmp(addr1, addr2, PAGE_SIZE);
1022	kunmap_atomic(addr2);
1023	kunmap_atomic(addr1);
1024	return ret;
1025}
1026
1027#ifdef CONFIG_PRINTK
1028/**
1029 * mem_dump_obj - Print available provenance information
1030 * @object: object for which to find provenance information.
1031 *
1032 * This function uses pr_cont(), so that the caller is expected to have
1033 * printed out whatever preamble is appropriate.  The provenance information
1034 * depends on the type of object and on how much debugging is enabled.
1035 * For example, for a slab-cache object, the slab name is printed, and,
1036 * if available, the return address and stack trace from the allocation
1037 * and last free path of that object.
1038 */
1039void mem_dump_obj(void *object)
1040{
1041	const char *type;
1042
1043	if (kmem_valid_obj(object)) {
1044		kmem_dump_obj(object);
1045		return;
1046	}
1047
1048	if (vmalloc_dump_obj(object))
1049		return;
1050
1051	if (virt_addr_valid(object))
1052		type = "non-slab/vmalloc memory";
1053	else if (object == NULL)
1054		type = "NULL pointer";
1055	else if (object == ZERO_SIZE_PTR)
1056		type = "zero-size pointer";
1057	else
1058		type = "non-paged memory";
1059
1060	pr_cont(" %s\n", type);
1061}
1062EXPORT_SYMBOL_GPL(mem_dump_obj);
1063#endif
1064
1065/*
1066 * A driver might set a page logically offline -- PageOffline() -- and
1067 * turn the page inaccessible in the hypervisor; after that, access to page
1068 * content can be fatal.
1069 *
1070 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1071 * pages after checking PageOffline(); however, these PFN walkers can race
1072 * with drivers that set PageOffline().
1073 *
1074 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1075 * synchronize with such drivers, achieving that a page cannot be set
1076 * PageOffline() while frozen.
1077 *
1078 * page_offline_begin()/page_offline_end() is used by drivers that care about
1079 * such races when setting a page PageOffline().
1080 */
1081static DECLARE_RWSEM(page_offline_rwsem);
1082
1083void page_offline_freeze(void)
1084{
1085	down_read(&page_offline_rwsem);
1086}
1087
1088void page_offline_thaw(void)
1089{
1090	up_read(&page_offline_rwsem);
1091}
1092
1093void page_offline_begin(void)
1094{
1095	down_write(&page_offline_rwsem);
1096}
1097EXPORT_SYMBOL(page_offline_begin);
1098
1099void page_offline_end(void)
1100{
1101	up_write(&page_offline_rwsem);
1102}
1103EXPORT_SYMBOL(page_offline_end);
1104
1105#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1106void flush_dcache_folio(struct folio *folio)
1107{
1108	long i, nr = folio_nr_pages(folio);
1109
1110	for (i = 0; i < nr; i++)
1111		flush_dcache_page(folio_page(folio, i));
1112}
1113EXPORT_SYMBOL(flush_dcache_folio);
1114#endif
v3.5.6
 
  1#include <linux/mm.h>
  2#include <linux/slab.h>
  3#include <linux/string.h>
 
  4#include <linux/export.h>
  5#include <linux/err.h>
  6#include <linux/sched.h>
 
 
 
  7#include <linux/security.h>
  8#include <asm/uaccess.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9
 10#include "internal.h"
 
 11
 12#define CREATE_TRACE_POINTS
 13#include <trace/events/kmem.h>
 
 
 
 
 
 
 
 
 
 
 14
 15/**
 16 * kstrdup - allocate space for and copy an existing string
 17 * @s: the string to duplicate
 18 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 
 
 19 */
 20char *kstrdup(const char *s, gfp_t gfp)
 21{
 22	size_t len;
 23	char *buf;
 24
 25	if (!s)
 26		return NULL;
 27
 28	len = strlen(s) + 1;
 29	buf = kmalloc_track_caller(len, gfp);
 30	if (buf)
 31		memcpy(buf, s, len);
 32	return buf;
 33}
 34EXPORT_SYMBOL(kstrdup);
 35
 36/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37 * kstrndup - allocate space for and copy an existing string
 38 * @s: the string to duplicate
 39 * @max: read at most @max chars from @s
 40 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 
 
 
 
 41 */
 42char *kstrndup(const char *s, size_t max, gfp_t gfp)
 43{
 44	size_t len;
 45	char *buf;
 46
 47	if (!s)
 48		return NULL;
 49
 50	len = strnlen(s, max);
 51	buf = kmalloc_track_caller(len+1, gfp);
 52	if (buf) {
 53		memcpy(buf, s, len);
 54		buf[len] = '\0';
 55	}
 56	return buf;
 57}
 58EXPORT_SYMBOL(kstrndup);
 59
 60/**
 61 * kmemdup - duplicate region of memory
 62 *
 63 * @src: memory region to duplicate
 64 * @len: memory region length
 65 * @gfp: GFP mask to use
 
 
 66 */
 67void *kmemdup(const void *src, size_t len, gfp_t gfp)
 68{
 69	void *p;
 70
 71	p = kmalloc_track_caller(len, gfp);
 72	if (p)
 73		memcpy(p, src, len);
 74	return p;
 75}
 76EXPORT_SYMBOL(kmemdup);
 77
 78/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79 * memdup_user - duplicate memory region from user space
 80 *
 81 * @src: source address in user space
 82 * @len: number of bytes to copy
 83 *
 84 * Returns an ERR_PTR() on failure.
 
 85 */
 86void *memdup_user(const void __user *src, size_t len)
 87{
 88	void *p;
 89
 90	/*
 91	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
 92	 * cause pagefault, which makes it pointless to use GFP_NOFS
 93	 * or GFP_ATOMIC.
 94	 */
 95	p = kmalloc_track_caller(len, GFP_KERNEL);
 96	if (!p)
 97		return ERR_PTR(-ENOMEM);
 98
 99	if (copy_from_user(p, src, len)) {
100		kfree(p);
101		return ERR_PTR(-EFAULT);
102	}
103
104	return p;
105}
106EXPORT_SYMBOL(memdup_user);
107
108/**
109 * __krealloc - like krealloc() but don't free @p.
110 * @p: object to reallocate memory for.
111 * @new_size: how many bytes of memory are required.
112 * @flags: the type of memory to allocate.
113 *
114 * This function is like krealloc() except it never frees the originally
115 * allocated buffer. Use this if you don't want to free the buffer immediately
116 * like, for example, with RCU.
117 */
118void *__krealloc(const void *p, size_t new_size, gfp_t flags)
119{
120	void *ret;
121	size_t ks = 0;
122
123	if (unlikely(!new_size))
124		return ZERO_SIZE_PTR;
 
125
126	if (p)
127		ks = ksize(p);
128
129	if (ks >= new_size)
130		return (void *)p;
131
132	ret = kmalloc_track_caller(new_size, flags);
133	if (ret && p)
134		memcpy(ret, p, ks);
135
136	return ret;
137}
138EXPORT_SYMBOL(__krealloc);
139
140/**
141 * krealloc - reallocate memory. The contents will remain unchanged.
142 * @p: object to reallocate memory for.
143 * @new_size: how many bytes of memory are required.
144 * @flags: the type of memory to allocate.
145 *
146 * The contents of the object pointed to are preserved up to the
147 * lesser of the new and old sizes.  If @p is %NULL, krealloc()
148 * behaves exactly like kmalloc().  If @size is 0 and @p is not a
149 * %NULL pointer, the object pointed to is freed.
150 */
151void *krealloc(const void *p, size_t new_size, gfp_t flags)
152{
153	void *ret;
154
155	if (unlikely(!new_size)) {
156		kfree(p);
157		return ZERO_SIZE_PTR;
158	}
159
160	ret = __krealloc(p, new_size, flags);
161	if (ret && p != ret)
162		kfree(p);
163
164	return ret;
165}
166EXPORT_SYMBOL(krealloc);
167
168/**
169 * kzfree - like kfree but zero memory
170 * @p: object to free memory of
171 *
172 * The memory of the object @p points to is zeroed before freed.
173 * If @p is %NULL, kzfree() does nothing.
174 *
175 * Note: this function zeroes the whole allocated buffer which can be a good
176 * deal bigger than the requested buffer size passed to kmalloc(). So be
177 * careful when using this function in performance sensitive code.
178 */
179void kzfree(const void *p)
180{
181	size_t ks;
182	void *mem = (void *)p;
183
184	if (unlikely(ZERO_OR_NULL_PTR(mem)))
185		return;
186	ks = ksize(mem);
187	memset(mem, 0, ks);
188	kfree(mem);
189}
190EXPORT_SYMBOL(kzfree);
191
192/*
193 * strndup_user - duplicate an existing string from user space
194 * @s: The string to duplicate
195 * @n: Maximum number of bytes to copy, including the trailing NUL.
 
 
196 */
197char *strndup_user(const char __user *s, long n)
198{
199	char *p;
200	long length;
201
202	length = strnlen_user(s, n);
203
204	if (!length)
205		return ERR_PTR(-EFAULT);
206
207	if (length > n)
208		return ERR_PTR(-EINVAL);
209
210	p = memdup_user(s, length);
211
212	if (IS_ERR(p))
213		return p;
214
215	p[length - 1] = '\0';
216
217	return p;
218}
219EXPORT_SYMBOL(strndup_user);
220
221void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
222		struct vm_area_struct *prev, struct rb_node *rb_parent)
 
 
 
 
 
 
 
223{
224	struct vm_area_struct *next;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
226	vma->vm_prev = prev;
227	if (prev) {
228		next = prev->vm_next;
229		prev->vm_next = vma;
230	} else {
231		mm->mmap = vma;
232		if (rb_parent)
233			next = rb_entry(rb_parent,
234					struct vm_area_struct, vm_rb);
235		else
236			next = NULL;
237	}
238	vma->vm_next = next;
239	if (next)
240		next->vm_prev = vma;
241}
 
242
243/* Check if the vma is being used as a stack by this task */
244static int vm_is_stack_for_task(struct task_struct *t,
245				struct vm_area_struct *vma)
246{
 
 
247	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
248}
249
250/*
251 * Check if the vma is being used as a stack.
252 * If is_group is non-zero, check in the entire thread group or else
253 * just check in the current task. Returns the pid of the task that
254 * the vma is stack for.
255 */
256pid_t vm_is_stack(struct task_struct *task,
257		  struct vm_area_struct *vma, int in_group)
258{
259	pid_t ret = 0;
260
261	if (vm_is_stack_for_task(task, vma))
262		return task->pid;
263
264	if (in_group) {
265		struct task_struct *t;
266		rcu_read_lock();
267		if (!pid_alive(task))
268			goto done;
269
270		t = task;
271		do {
272			if (vm_is_stack_for_task(t, vma)) {
273				ret = t->pid;
274				goto done;
275			}
276		} while_each_thread(task, t);
277done:
278		rcu_read_unlock();
279	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
281	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282}
283
284#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
285void arch_pick_mmap_layout(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286{
287	mm->mmap_base = TASK_UNMAPPED_BASE;
288	mm->get_unmapped_area = arch_get_unmapped_area;
289	mm->unmap_area = arch_unmap_area;
290}
291#endif
292
293/*
294 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
295 * back to the regular GUP.
296 * If the architecture not support this function, simply return with no
297 * page pinned
298 */
299int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
300				 int nr_pages, int write, struct page **pages)
301{
302	return 0;
303}
304EXPORT_SYMBOL_GPL(__get_user_pages_fast);
305
306/**
307 * get_user_pages_fast() - pin user pages in memory
308 * @start:	starting user address
309 * @nr_pages:	number of pages from start to pin
310 * @write:	whether pages will be written to
311 * @pages:	array that receives pointers to the pages pinned.
312 *		Should be at least nr_pages long.
313 *
314 * Returns number of pages pinned. This may be fewer than the number
315 * requested. If nr_pages is 0 or negative, returns 0. If no pages
316 * were pinned, returns -errno.
317 *
318 * get_user_pages_fast provides equivalent functionality to get_user_pages,
319 * operating on current and current->mm, with force=0 and vma=NULL. However
320 * unlike get_user_pages, it must be called without mmap_sem held.
321 *
322 * get_user_pages_fast may take mmap_sem and page table locks, so no
323 * assumptions can be made about lack of locking. get_user_pages_fast is to be
324 * implemented in a way that is advantageous (vs get_user_pages()) when the
325 * user memory area is already faulted in and present in ptes. However if the
326 * pages have to be faulted in, it may turn out to be slightly slower so
327 * callers need to carefully consider what to use. On many architectures,
328 * get_user_pages_fast simply falls back to get_user_pages.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329 */
330int __attribute__((weak)) get_user_pages_fast(unsigned long start,
331				int nr_pages, int write, struct page **pages)
332{
333	struct mm_struct *mm = current->mm;
334	int ret;
335
336	down_read(&mm->mmap_sem);
337	ret = get_user_pages(current, mm, start, nr_pages,
338					write, 0, pages, NULL);
339	up_read(&mm->mmap_sem);
 
 
 
340
341	return ret;
342}
343EXPORT_SYMBOL_GPL(get_user_pages_fast);
344
345unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
346	unsigned long len, unsigned long prot,
347	unsigned long flag, unsigned long pgoff)
348{
349	unsigned long ret;
350	struct mm_struct *mm = current->mm;
 
 
351
352	ret = security_mmap_file(file, prot, flag);
353	if (!ret) {
354		down_write(&mm->mmap_sem);
355		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
356		up_write(&mm->mmap_sem);
 
 
 
 
 
357	}
358	return ret;
359}
360
361unsigned long vm_mmap(struct file *file, unsigned long addr,
362	unsigned long len, unsigned long prot,
363	unsigned long flag, unsigned long offset)
364{
365	if (unlikely(offset + PAGE_ALIGN(len) < offset))
366		return -EINVAL;
367	if (unlikely(offset & ~PAGE_MASK))
368		return -EINVAL;
369
370	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
371}
372EXPORT_SYMBOL(vm_mmap);
373
374/* Tracepoints definitions. */
375EXPORT_TRACEPOINT_SYMBOL(kmalloc);
376EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
377EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
378EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
379EXPORT_TRACEPOINT_SYMBOL(kfree);
380EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);