Linux Audio

Check our new training course

Loading...
  1#include <linux/mm.h>
  2#include <linux/slab.h>
  3#include <linux/string.h>
  4#include <linux/compiler.h>
  5#include <linux/export.h>
  6#include <linux/err.h>
  7#include <linux/sched.h>
  8#include <linux/security.h>
  9#include <linux/swap.h>
 10#include <linux/swapops.h>
 11#include <linux/mman.h>
 12#include <linux/hugetlb.h>
 13#include <linux/vmalloc.h>
 14
 15#include <asm/sections.h>
 16#include <asm/uaccess.h>
 17
 18#include "internal.h"
 19
 20static inline int is_kernel_rodata(unsigned long addr)
 21{
 22	return addr >= (unsigned long)__start_rodata &&
 23		addr < (unsigned long)__end_rodata;
 24}
 25
 26/**
 27 * kfree_const - conditionally free memory
 28 * @x: pointer to the memory
 29 *
 30 * Function calls kfree only if @x is not in .rodata section.
 31 */
 32void kfree_const(const void *x)
 33{
 34	if (!is_kernel_rodata((unsigned long)x))
 35		kfree(x);
 36}
 37EXPORT_SYMBOL(kfree_const);
 38
 39/**
 40 * kstrdup - allocate space for and copy an existing string
 41 * @s: the string to duplicate
 42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 43 */
 44char *kstrdup(const char *s, gfp_t gfp)
 45{
 46	size_t len;
 47	char *buf;
 48
 49	if (!s)
 50		return NULL;
 51
 52	len = strlen(s) + 1;
 53	buf = kmalloc_track_caller(len, gfp);
 54	if (buf)
 55		memcpy(buf, s, len);
 56	return buf;
 57}
 58EXPORT_SYMBOL(kstrdup);
 59
 60/**
 61 * kstrdup_const - conditionally duplicate an existing const string
 62 * @s: the string to duplicate
 63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 64 *
 65 * Function returns source string if it is in .rodata section otherwise it
 66 * fallbacks to kstrdup.
 67 * Strings allocated by kstrdup_const should be freed by kfree_const.
 68 */
 69const char *kstrdup_const(const char *s, gfp_t gfp)
 70{
 71	if (is_kernel_rodata((unsigned long)s))
 72		return s;
 73
 74	return kstrdup(s, gfp);
 75}
 76EXPORT_SYMBOL(kstrdup_const);
 77
 78/**
 79 * kstrndup - allocate space for and copy an existing string
 80 * @s: the string to duplicate
 81 * @max: read at most @max chars from @s
 82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 83 */
 84char *kstrndup(const char *s, size_t max, gfp_t gfp)
 85{
 86	size_t len;
 87	char *buf;
 88
 89	if (!s)
 90		return NULL;
 91
 92	len = strnlen(s, max);
 93	buf = kmalloc_track_caller(len+1, gfp);
 94	if (buf) {
 95		memcpy(buf, s, len);
 96		buf[len] = '\0';
 97	}
 98	return buf;
 99}
100EXPORT_SYMBOL(kstrndup);
101
102/**
103 * kmemdup - duplicate region of memory
104 *
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
108 */
109void *kmemdup(const void *src, size_t len, gfp_t gfp)
110{
111	void *p;
112
113	p = kmalloc_track_caller(len, gfp);
114	if (p)
115		memcpy(p, src, len);
116	return p;
117}
118EXPORT_SYMBOL(kmemdup);
119
120/**
121 * memdup_user - duplicate memory region from user space
122 *
123 * @src: source address in user space
124 * @len: number of bytes to copy
125 *
126 * Returns an ERR_PTR() on failure.
127 */
128void *memdup_user(const void __user *src, size_t len)
129{
130	void *p;
131
132	/*
133	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134	 * cause pagefault, which makes it pointless to use GFP_NOFS
135	 * or GFP_ATOMIC.
136	 */
137	p = kmalloc_track_caller(len, GFP_KERNEL);
138	if (!p)
139		return ERR_PTR(-ENOMEM);
140
141	if (copy_from_user(p, src, len)) {
142		kfree(p);
143		return ERR_PTR(-EFAULT);
144	}
145
146	return p;
147}
148EXPORT_SYMBOL(memdup_user);
149
150/*
151 * strndup_user - duplicate an existing string from user space
152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
154 */
155char *strndup_user(const char __user *s, long n)
156{
157	char *p;
158	long length;
159
160	length = strnlen_user(s, n);
161
162	if (!length)
163		return ERR_PTR(-EFAULT);
164
165	if (length > n)
166		return ERR_PTR(-EINVAL);
167
168	p = memdup_user(s, length);
169
170	if (IS_ERR(p))
171		return p;
172
173	p[length - 1] = '\0';
174
175	return p;
176}
177EXPORT_SYMBOL(strndup_user);
178
179/**
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
181 *
182 * @src: source address in user space
183 * @len: number of bytes to copy
184 *
185 * Returns an ERR_PTR() on failure.
186 */
187void *memdup_user_nul(const void __user *src, size_t len)
188{
189	char *p;
190
191	/*
192	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193	 * cause pagefault, which makes it pointless to use GFP_NOFS
194	 * or GFP_ATOMIC.
195	 */
196	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
197	if (!p)
198		return ERR_PTR(-ENOMEM);
199
200	if (copy_from_user(p, src, len)) {
201		kfree(p);
202		return ERR_PTR(-EFAULT);
203	}
204	p[len] = '\0';
205
206	return p;
207}
208EXPORT_SYMBOL(memdup_user_nul);
209
210void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211		struct vm_area_struct *prev, struct rb_node *rb_parent)
212{
213	struct vm_area_struct *next;
214
215	vma->vm_prev = prev;
216	if (prev) {
217		next = prev->vm_next;
218		prev->vm_next = vma;
219	} else {
220		mm->mmap = vma;
221		if (rb_parent)
222			next = rb_entry(rb_parent,
223					struct vm_area_struct, vm_rb);
224		else
225			next = NULL;
226	}
227	vma->vm_next = next;
228	if (next)
229		next->vm_prev = vma;
230}
231
232/* Check if the vma is being used as a stack by this task */
233int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
234{
235	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
236}
237
238#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
239void arch_pick_mmap_layout(struct mm_struct *mm)
240{
241	mm->mmap_base = TASK_UNMAPPED_BASE;
242	mm->get_unmapped_area = arch_get_unmapped_area;
243}
244#endif
245
246/*
247 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
248 * back to the regular GUP.
249 * If the architecture not support this function, simply return with no
250 * page pinned
251 */
252int __weak __get_user_pages_fast(unsigned long start,
253				 int nr_pages, int write, struct page **pages)
254{
255	return 0;
256}
257EXPORT_SYMBOL_GPL(__get_user_pages_fast);
258
259/**
260 * get_user_pages_fast() - pin user pages in memory
261 * @start:	starting user address
262 * @nr_pages:	number of pages from start to pin
263 * @write:	whether pages will be written to
264 * @pages:	array that receives pointers to the pages pinned.
265 *		Should be at least nr_pages long.
266 *
267 * Returns number of pages pinned. This may be fewer than the number
268 * requested. If nr_pages is 0 or negative, returns 0. If no pages
269 * were pinned, returns -errno.
270 *
271 * get_user_pages_fast provides equivalent functionality to get_user_pages,
272 * operating on current and current->mm, with force=0 and vma=NULL. However
273 * unlike get_user_pages, it must be called without mmap_sem held.
274 *
275 * get_user_pages_fast may take mmap_sem and page table locks, so no
276 * assumptions can be made about lack of locking. get_user_pages_fast is to be
277 * implemented in a way that is advantageous (vs get_user_pages()) when the
278 * user memory area is already faulted in and present in ptes. However if the
279 * pages have to be faulted in, it may turn out to be slightly slower so
280 * callers need to carefully consider what to use. On many architectures,
281 * get_user_pages_fast simply falls back to get_user_pages.
282 */
283int __weak get_user_pages_fast(unsigned long start,
284				int nr_pages, int write, struct page **pages)
285{
286	return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
287}
288EXPORT_SYMBOL_GPL(get_user_pages_fast);
289
290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
291	unsigned long len, unsigned long prot,
292	unsigned long flag, unsigned long pgoff)
293{
294	unsigned long ret;
295	struct mm_struct *mm = current->mm;
296	unsigned long populate;
297
298	ret = security_mmap_file(file, prot, flag);
299	if (!ret) {
300		down_write(&mm->mmap_sem);
301		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
302				    &populate);
303		up_write(&mm->mmap_sem);
304		if (populate)
305			mm_populate(ret, populate);
306	}
307	return ret;
308}
309
310unsigned long vm_mmap(struct file *file, unsigned long addr,
311	unsigned long len, unsigned long prot,
312	unsigned long flag, unsigned long offset)
313{
314	if (unlikely(offset + PAGE_ALIGN(len) < offset))
315		return -EINVAL;
316	if (unlikely(offset_in_page(offset)))
317		return -EINVAL;
318
319	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
320}
321EXPORT_SYMBOL(vm_mmap);
322
323void kvfree(const void *addr)
324{
325	if (is_vmalloc_addr(addr))
326		vfree(addr);
327	else
328		kfree(addr);
329}
330EXPORT_SYMBOL(kvfree);
331
332static inline void *__page_rmapping(struct page *page)
333{
334	unsigned long mapping;
335
336	mapping = (unsigned long)page->mapping;
337	mapping &= ~PAGE_MAPPING_FLAGS;
338
339	return (void *)mapping;
340}
341
342/* Neutral page->mapping pointer to address_space or anon_vma or other */
343void *page_rmapping(struct page *page)
344{
345	page = compound_head(page);
346	return __page_rmapping(page);
347}
348
349struct anon_vma *page_anon_vma(struct page *page)
350{
351	unsigned long mapping;
352
353	page = compound_head(page);
354	mapping = (unsigned long)page->mapping;
355	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
356		return NULL;
357	return __page_rmapping(page);
358}
359
360struct address_space *page_mapping(struct page *page)
361{
362	struct address_space *mapping;
363
364	page = compound_head(page);
365
366	/* This happens if someone calls flush_dcache_page on slab page */
367	if (unlikely(PageSlab(page)))
368		return NULL;
369
370	if (unlikely(PageSwapCache(page))) {
371		swp_entry_t entry;
372
373		entry.val = page_private(page);
374		return swap_address_space(entry);
375	}
376
377	mapping = page->mapping;
378	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
379		return NULL;
380	return mapping;
381}
382
383/* Slow path of page_mapcount() for compound pages */
384int __page_mapcount(struct page *page)
385{
386	int ret;
387
388	ret = atomic_read(&page->_mapcount) + 1;
389	page = compound_head(page);
390	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
391	if (PageDoubleMap(page))
392		ret--;
393	return ret;
394}
395EXPORT_SYMBOL_GPL(__page_mapcount);
396
397int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
398int sysctl_overcommit_ratio __read_mostly = 50;
399unsigned long sysctl_overcommit_kbytes __read_mostly;
400int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
401unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
402unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
403
404int overcommit_ratio_handler(struct ctl_table *table, int write,
405			     void __user *buffer, size_t *lenp,
406			     loff_t *ppos)
407{
408	int ret;
409
410	ret = proc_dointvec(table, write, buffer, lenp, ppos);
411	if (ret == 0 && write)
412		sysctl_overcommit_kbytes = 0;
413	return ret;
414}
415
416int overcommit_kbytes_handler(struct ctl_table *table, int write,
417			     void __user *buffer, size_t *lenp,
418			     loff_t *ppos)
419{
420	int ret;
421
422	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
423	if (ret == 0 && write)
424		sysctl_overcommit_ratio = 0;
425	return ret;
426}
427
428/*
429 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
430 */
431unsigned long vm_commit_limit(void)
432{
433	unsigned long allowed;
434
435	if (sysctl_overcommit_kbytes)
436		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
437	else
438		allowed = ((totalram_pages - hugetlb_total_pages())
439			   * sysctl_overcommit_ratio / 100);
440	allowed += total_swap_pages;
441
442	return allowed;
443}
444
445/*
446 * Make sure vm_committed_as in one cacheline and not cacheline shared with
447 * other variables. It can be updated by several CPUs frequently.
448 */
449struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
450
451/*
452 * The global memory commitment made in the system can be a metric
453 * that can be used to drive ballooning decisions when Linux is hosted
454 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
455 * balancing memory across competing virtual machines that are hosted.
456 * Several metrics drive this policy engine including the guest reported
457 * memory commitment.
458 */
459unsigned long vm_memory_committed(void)
460{
461	return percpu_counter_read_positive(&vm_committed_as);
462}
463EXPORT_SYMBOL_GPL(vm_memory_committed);
464
465/*
466 * Check that a process has enough memory to allocate a new virtual
467 * mapping. 0 means there is enough memory for the allocation to
468 * succeed and -ENOMEM implies there is not.
469 *
470 * We currently support three overcommit policies, which are set via the
471 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
472 *
473 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
474 * Additional code 2002 Jul 20 by Robert Love.
475 *
476 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
477 *
478 * Note this is a helper function intended to be used by LSMs which
479 * wish to use this logic.
480 */
481int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
482{
483	long free, allowed, reserve;
484
485	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
486			-(s64)vm_committed_as_batch * num_online_cpus(),
487			"memory commitment underflow");
488
489	vm_acct_memory(pages);
490
491	/*
492	 * Sometimes we want to use more memory than we have
493	 */
494	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
495		return 0;
496
497	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
498		free = global_page_state(NR_FREE_PAGES);
499		free += global_page_state(NR_FILE_PAGES);
500
501		/*
502		 * shmem pages shouldn't be counted as free in this
503		 * case, they can't be purged, only swapped out, and
504		 * that won't affect the overall amount of available
505		 * memory in the system.
506		 */
507		free -= global_page_state(NR_SHMEM);
508
509		free += get_nr_swap_pages();
510
511		/*
512		 * Any slabs which are created with the
513		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
514		 * which are reclaimable, under pressure.  The dentry
515		 * cache and most inode caches should fall into this
516		 */
517		free += global_page_state(NR_SLAB_RECLAIMABLE);
518
519		/*
520		 * Leave reserved pages. The pages are not for anonymous pages.
521		 */
522		if (free <= totalreserve_pages)
523			goto error;
524		else
525			free -= totalreserve_pages;
526
527		/*
528		 * Reserve some for root
529		 */
530		if (!cap_sys_admin)
531			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
532
533		if (free > pages)
534			return 0;
535
536		goto error;
537	}
538
539	allowed = vm_commit_limit();
540	/*
541	 * Reserve some for root
542	 */
543	if (!cap_sys_admin)
544		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
545
546	/*
547	 * Don't let a single process grow so big a user can't recover
548	 */
549	if (mm) {
550		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
551		allowed -= min_t(long, mm->total_vm / 32, reserve);
552	}
553
554	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
555		return 0;
556error:
557	vm_unacct_memory(pages);
558
559	return -ENOMEM;
560}
561
562/**
563 * get_cmdline() - copy the cmdline value to a buffer.
564 * @task:     the task whose cmdline value to copy.
565 * @buffer:   the buffer to copy to.
566 * @buflen:   the length of the buffer. Larger cmdline values are truncated
567 *            to this length.
568 * Returns the size of the cmdline field copied. Note that the copy does
569 * not guarantee an ending NULL byte.
570 */
571int get_cmdline(struct task_struct *task, char *buffer, int buflen)
572{
573	int res = 0;
574	unsigned int len;
575	struct mm_struct *mm = get_task_mm(task);
576	unsigned long arg_start, arg_end, env_start, env_end;
577	if (!mm)
578		goto out;
579	if (!mm->arg_end)
580		goto out_mm;	/* Shh! No looking before we're done */
581
582	down_read(&mm->mmap_sem);
583	arg_start = mm->arg_start;
584	arg_end = mm->arg_end;
585	env_start = mm->env_start;
586	env_end = mm->env_end;
587	up_read(&mm->mmap_sem);
588
589	len = arg_end - arg_start;
590
591	if (len > buflen)
592		len = buflen;
593
594	res = access_process_vm(task, arg_start, buffer, len, 0);
595
596	/*
597	 * If the nul at the end of args has been overwritten, then
598	 * assume application is using setproctitle(3).
599	 */
600	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
601		len = strnlen(buffer, res);
602		if (len < res) {
603			res = len;
604		} else {
605			len = env_end - env_start;
606			if (len > buflen - res)
607				len = buflen - res;
608			res += access_process_vm(task, env_start,
609						 buffer+res, len, 0);
610			res = strnlen(buffer, res);
611		}
612	}
613out_mm:
614	mmput(mm);
615out:
616	return res;
617}
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30#include "swap.h"
  31
  32/**
  33 * kfree_const - conditionally free memory
  34 * @x: pointer to the memory
  35 *
  36 * Function calls kfree only if @x is not in .rodata section.
  37 */
  38void kfree_const(const void *x)
  39{
  40	if (!is_kernel_rodata((unsigned long)x))
  41		kfree(x);
  42}
  43EXPORT_SYMBOL(kfree_const);
  44
  45/**
  46 * kstrdup - allocate space for and copy an existing string
  47 * @s: the string to duplicate
  48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  49 *
  50 * Return: newly allocated copy of @s or %NULL in case of error
  51 */
  52noinline
  53char *kstrdup(const char *s, gfp_t gfp)
  54{
  55	size_t len;
  56	char *buf;
  57
  58	if (!s)
  59		return NULL;
  60
  61	len = strlen(s) + 1;
  62	buf = kmalloc_track_caller(len, gfp);
  63	if (buf)
  64		memcpy(buf, s, len);
  65	return buf;
  66}
  67EXPORT_SYMBOL(kstrdup);
  68
  69/**
  70 * kstrdup_const - conditionally duplicate an existing const string
  71 * @s: the string to duplicate
  72 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  73 *
  74 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
  75 * must not be passed to krealloc().
  76 *
  77 * Return: source string if it is in .rodata section otherwise
  78 * fallback to kstrdup.
  79 */
  80const char *kstrdup_const(const char *s, gfp_t gfp)
  81{
  82	if (is_kernel_rodata((unsigned long)s))
  83		return s;
  84
  85	return kstrdup(s, gfp);
  86}
  87EXPORT_SYMBOL(kstrdup_const);
  88
  89/**
  90 * kstrndup - allocate space for and copy an existing string
  91 * @s: the string to duplicate
  92 * @max: read at most @max chars from @s
  93 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  94 *
  95 * Note: Use kmemdup_nul() instead if the size is known exactly.
  96 *
  97 * Return: newly allocated copy of @s or %NULL in case of error
  98 */
  99char *kstrndup(const char *s, size_t max, gfp_t gfp)
 100{
 101	size_t len;
 102	char *buf;
 103
 104	if (!s)
 105		return NULL;
 106
 107	len = strnlen(s, max);
 108	buf = kmalloc_track_caller(len+1, gfp);
 109	if (buf) {
 110		memcpy(buf, s, len);
 111		buf[len] = '\0';
 112	}
 113	return buf;
 114}
 115EXPORT_SYMBOL(kstrndup);
 116
 117/**
 118 * kmemdup - duplicate region of memory
 119 *
 120 * @src: memory region to duplicate
 121 * @len: memory region length
 122 * @gfp: GFP mask to use
 123 *
 124 * Return: newly allocated copy of @src or %NULL in case of error,
 125 * result is physically contiguous. Use kfree() to free.
 126 */
 127void *kmemdup(const void *src, size_t len, gfp_t gfp)
 128{
 129	void *p;
 130
 131	p = kmalloc_track_caller(len, gfp);
 132	if (p)
 133		memcpy(p, src, len);
 134	return p;
 135}
 136EXPORT_SYMBOL(kmemdup);
 137
 138/**
 139 * kmemdup_array - duplicate a given array.
 140 *
 141 * @src: array to duplicate.
 142 * @element_size: size of each element of array.
 143 * @count: number of elements to duplicate from array.
 144 * @gfp: GFP mask to use.
 145 *
 146 * Return: duplicated array of @src or %NULL in case of error,
 147 * result is physically contiguous. Use kfree() to free.
 148 */
 149void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp)
 150{
 151	return kmemdup(src, size_mul(element_size, count), gfp);
 152}
 153EXPORT_SYMBOL(kmemdup_array);
 154
 155/**
 156 * kvmemdup - duplicate region of memory
 157 *
 158 * @src: memory region to duplicate
 159 * @len: memory region length
 160 * @gfp: GFP mask to use
 161 *
 162 * Return: newly allocated copy of @src or %NULL in case of error,
 163 * result may be not physically contiguous. Use kvfree() to free.
 164 */
 165void *kvmemdup(const void *src, size_t len, gfp_t gfp)
 166{
 167	void *p;
 168
 169	p = kvmalloc(len, gfp);
 170	if (p)
 171		memcpy(p, src, len);
 172	return p;
 173}
 174EXPORT_SYMBOL(kvmemdup);
 175
 176/**
 177 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 178 * @s: The data to stringify
 179 * @len: The size of the data
 180 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 181 *
 182 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 183 * case of error
 184 */
 185char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 186{
 187	char *buf;
 188
 189	if (!s)
 190		return NULL;
 191
 192	buf = kmalloc_track_caller(len + 1, gfp);
 193	if (buf) {
 194		memcpy(buf, s, len);
 195		buf[len] = '\0';
 196	}
 197	return buf;
 198}
 199EXPORT_SYMBOL(kmemdup_nul);
 200
 201/**
 202 * memdup_user - duplicate memory region from user space
 203 *
 204 * @src: source address in user space
 205 * @len: number of bytes to copy
 206 *
 207 * Return: an ERR_PTR() on failure.  Result is physically
 208 * contiguous, to be freed by kfree().
 209 */
 210void *memdup_user(const void __user *src, size_t len)
 211{
 212	void *p;
 213
 214	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 215	if (!p)
 216		return ERR_PTR(-ENOMEM);
 217
 218	if (copy_from_user(p, src, len)) {
 219		kfree(p);
 220		return ERR_PTR(-EFAULT);
 221	}
 222
 223	return p;
 224}
 225EXPORT_SYMBOL(memdup_user);
 226
 227/**
 228 * vmemdup_user - duplicate memory region from user space
 229 *
 230 * @src: source address in user space
 231 * @len: number of bytes to copy
 232 *
 233 * Return: an ERR_PTR() on failure.  Result may be not
 234 * physically contiguous.  Use kvfree() to free.
 235 */
 236void *vmemdup_user(const void __user *src, size_t len)
 237{
 238	void *p;
 239
 240	p = kvmalloc(len, GFP_USER);
 241	if (!p)
 242		return ERR_PTR(-ENOMEM);
 243
 244	if (copy_from_user(p, src, len)) {
 245		kvfree(p);
 246		return ERR_PTR(-EFAULT);
 247	}
 248
 249	return p;
 250}
 251EXPORT_SYMBOL(vmemdup_user);
 252
 253/**
 254 * strndup_user - duplicate an existing string from user space
 255 * @s: The string to duplicate
 256 * @n: Maximum number of bytes to copy, including the trailing NUL.
 257 *
 258 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 259 */
 260char *strndup_user(const char __user *s, long n)
 261{
 262	char *p;
 263	long length;
 264
 265	length = strnlen_user(s, n);
 266
 267	if (!length)
 268		return ERR_PTR(-EFAULT);
 269
 270	if (length > n)
 271		return ERR_PTR(-EINVAL);
 272
 273	p = memdup_user(s, length);
 274
 275	if (IS_ERR(p))
 276		return p;
 277
 278	p[length - 1] = '\0';
 279
 280	return p;
 281}
 282EXPORT_SYMBOL(strndup_user);
 283
 284/**
 285 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 286 *
 287 * @src: source address in user space
 288 * @len: number of bytes to copy
 289 *
 290 * Return: an ERR_PTR() on failure.
 291 */
 292void *memdup_user_nul(const void __user *src, size_t len)
 293{
 294	char *p;
 295
 296	/*
 297	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
 298	 * cause pagefault, which makes it pointless to use GFP_NOFS
 299	 * or GFP_ATOMIC.
 300	 */
 301	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 302	if (!p)
 303		return ERR_PTR(-ENOMEM);
 304
 305	if (copy_from_user(p, src, len)) {
 306		kfree(p);
 307		return ERR_PTR(-EFAULT);
 308	}
 309	p[len] = '\0';
 310
 311	return p;
 312}
 313EXPORT_SYMBOL(memdup_user_nul);
 314
 315/* Check if the vma is being used as a stack by this task */
 316int vma_is_stack_for_current(struct vm_area_struct *vma)
 317{
 318	struct task_struct * __maybe_unused t = current;
 319
 320	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 321}
 322
 323/*
 324 * Change backing file, only valid to use during initial VMA setup.
 325 */
 326void vma_set_file(struct vm_area_struct *vma, struct file *file)
 327{
 328	/* Changing an anonymous vma with this is illegal */
 329	get_file(file);
 330	swap(vma->vm_file, file);
 331	fput(file);
 332}
 333EXPORT_SYMBOL(vma_set_file);
 334
 335#ifndef STACK_RND_MASK
 336#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 337#endif
 338
 339unsigned long randomize_stack_top(unsigned long stack_top)
 340{
 341	unsigned long random_variable = 0;
 342
 343	if (current->flags & PF_RANDOMIZE) {
 344		random_variable = get_random_long();
 345		random_variable &= STACK_RND_MASK;
 346		random_variable <<= PAGE_SHIFT;
 347	}
 348#ifdef CONFIG_STACK_GROWSUP
 349	return PAGE_ALIGN(stack_top) + random_variable;
 350#else
 351	return PAGE_ALIGN(stack_top) - random_variable;
 352#endif
 353}
 354
 355/**
 356 * randomize_page - Generate a random, page aligned address
 357 * @start:	The smallest acceptable address the caller will take.
 358 * @range:	The size of the area, starting at @start, within which the
 359 *		random address must fall.
 360 *
 361 * If @start + @range would overflow, @range is capped.
 362 *
 363 * NOTE: Historical use of randomize_range, which this replaces, presumed that
 364 * @start was already page aligned.  We now align it regardless.
 365 *
 366 * Return: A page aligned address within [start, start + range).  On error,
 367 * @start is returned.
 368 */
 369unsigned long randomize_page(unsigned long start, unsigned long range)
 370{
 371	if (!PAGE_ALIGNED(start)) {
 372		range -= PAGE_ALIGN(start) - start;
 373		start = PAGE_ALIGN(start);
 374	}
 375
 376	if (start > ULONG_MAX - range)
 377		range = ULONG_MAX - start;
 378
 379	range >>= PAGE_SHIFT;
 380
 381	if (range == 0)
 382		return start;
 383
 384	return start + (get_random_long() % range << PAGE_SHIFT);
 385}
 386
 387#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 388unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
 389{
 390	/* Is the current task 32bit ? */
 391	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 392		return randomize_page(mm->brk, SZ_32M);
 393
 394	return randomize_page(mm->brk, SZ_1G);
 395}
 396
 397unsigned long arch_mmap_rnd(void)
 398{
 399	unsigned long rnd;
 400
 401#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 402	if (is_compat_task())
 403		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 404	else
 405#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 406		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 407
 408	return rnd << PAGE_SHIFT;
 409}
 410
 411static int mmap_is_legacy(struct rlimit *rlim_stack)
 412{
 413	if (current->personality & ADDR_COMPAT_LAYOUT)
 414		return 1;
 415
 416	/* On parisc the stack always grows up - so a unlimited stack should
 417	 * not be an indicator to use the legacy memory layout. */
 418	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
 419		!IS_ENABLED(CONFIG_STACK_GROWSUP))
 420		return 1;
 421
 422	return sysctl_legacy_va_layout;
 423}
 424
 425/*
 426 * Leave enough space between the mmap area and the stack to honour ulimit in
 427 * the face of randomisation.
 428 */
 429#define MIN_GAP		(SZ_128M)
 430#define MAX_GAP		(STACK_TOP / 6 * 5)
 431
 432static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 433{
 434#ifdef CONFIG_STACK_GROWSUP
 435	/*
 436	 * For an upwards growing stack the calculation is much simpler.
 437	 * Memory for the maximum stack size is reserved at the top of the
 438	 * task. mmap_base starts directly below the stack and grows
 439	 * downwards.
 440	 */
 441	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
 442#else
 443	unsigned long gap = rlim_stack->rlim_cur;
 444	unsigned long pad = stack_guard_gap;
 445
 446	/* Account for stack randomization if necessary */
 447	if (current->flags & PF_RANDOMIZE)
 448		pad += (STACK_RND_MASK << PAGE_SHIFT);
 449
 450	/* Values close to RLIM_INFINITY can overflow. */
 451	if (gap + pad > gap)
 452		gap += pad;
 453
 454	if (gap < MIN_GAP)
 455		gap = MIN_GAP;
 456	else if (gap > MAX_GAP)
 457		gap = MAX_GAP;
 458
 459	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 460#endif
 461}
 462
 463void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 464{
 465	unsigned long random_factor = 0UL;
 466
 467	if (current->flags & PF_RANDOMIZE)
 468		random_factor = arch_mmap_rnd();
 469
 470	if (mmap_is_legacy(rlim_stack)) {
 471		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 472		mm->get_unmapped_area = arch_get_unmapped_area;
 473	} else {
 474		mm->mmap_base = mmap_base(random_factor, rlim_stack);
 475		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 476	}
 477}
 478#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 479void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 480{
 481	mm->mmap_base = TASK_UNMAPPED_BASE;
 482	mm->get_unmapped_area = arch_get_unmapped_area;
 483}
 484#endif
 485
 486/**
 487 * __account_locked_vm - account locked pages to an mm's locked_vm
 488 * @mm:          mm to account against
 489 * @pages:       number of pages to account
 490 * @inc:         %true if @pages should be considered positive, %false if not
 491 * @task:        task used to check RLIMIT_MEMLOCK
 492 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 493 *
 494 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 495 * that mmap_lock is held as writer.
 496 *
 497 * Return:
 498 * * 0       on success
 499 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 500 */
 501int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 502			struct task_struct *task, bool bypass_rlim)
 503{
 504	unsigned long locked_vm, limit;
 505	int ret = 0;
 506
 507	mmap_assert_write_locked(mm);
 508
 509	locked_vm = mm->locked_vm;
 510	if (inc) {
 511		if (!bypass_rlim) {
 512			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 513			if (locked_vm + pages > limit)
 514				ret = -ENOMEM;
 515		}
 516		if (!ret)
 517			mm->locked_vm = locked_vm + pages;
 518	} else {
 519		WARN_ON_ONCE(pages > locked_vm);
 520		mm->locked_vm = locked_vm - pages;
 521	}
 522
 523	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 524		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 525		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 526		 ret ? " - exceeded" : "");
 527
 528	return ret;
 529}
 530EXPORT_SYMBOL_GPL(__account_locked_vm);
 531
 532/**
 533 * account_locked_vm - account locked pages to an mm's locked_vm
 534 * @mm:          mm to account against, may be NULL
 535 * @pages:       number of pages to account
 536 * @inc:         %true if @pages should be considered positive, %false if not
 537 *
 538 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 539 *
 540 * Return:
 541 * * 0       on success, or if mm is NULL
 542 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 543 */
 544int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 545{
 546	int ret;
 547
 548	if (pages == 0 || !mm)
 549		return 0;
 550
 551	mmap_write_lock(mm);
 552	ret = __account_locked_vm(mm, pages, inc, current,
 553				  capable(CAP_IPC_LOCK));
 554	mmap_write_unlock(mm);
 555
 556	return ret;
 557}
 558EXPORT_SYMBOL_GPL(account_locked_vm);
 559
 560unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 561	unsigned long len, unsigned long prot,
 562	unsigned long flag, unsigned long pgoff)
 563{
 564	unsigned long ret;
 565	struct mm_struct *mm = current->mm;
 566	unsigned long populate;
 567	LIST_HEAD(uf);
 568
 569	ret = security_mmap_file(file, prot, flag);
 570	if (!ret) {
 571		if (mmap_write_lock_killable(mm))
 572			return -EINTR;
 573		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
 574			      &uf);
 575		mmap_write_unlock(mm);
 576		userfaultfd_unmap_complete(mm, &uf);
 577		if (populate)
 578			mm_populate(ret, populate);
 579	}
 580	return ret;
 581}
 582
 583unsigned long vm_mmap(struct file *file, unsigned long addr,
 584	unsigned long len, unsigned long prot,
 585	unsigned long flag, unsigned long offset)
 586{
 587	if (unlikely(offset + PAGE_ALIGN(len) < offset))
 588		return -EINVAL;
 589	if (unlikely(offset_in_page(offset)))
 590		return -EINVAL;
 591
 592	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 593}
 594EXPORT_SYMBOL(vm_mmap);
 595
 596/**
 597 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 598 * failure, fall back to non-contiguous (vmalloc) allocation.
 599 * @size: size of the request.
 600 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 601 * @node: numa node to allocate from
 602 *
 603 * Uses kmalloc to get the memory but if the allocation fails then falls back
 604 * to the vmalloc allocator. Use kvfree for freeing the memory.
 605 *
 606 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
 607 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 608 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 609 *
 610 * Return: pointer to the allocated memory of %NULL in case of failure
 611 */
 612void *kvmalloc_node(size_t size, gfp_t flags, int node)
 613{
 614	gfp_t kmalloc_flags = flags;
 615	void *ret;
 616
 617	/*
 618	 * We want to attempt a large physically contiguous block first because
 619	 * it is less likely to fragment multiple larger blocks and therefore
 620	 * contribute to a long term fragmentation less than vmalloc fallback.
 621	 * However make sure that larger requests are not too disruptive - no
 622	 * OOM killer and no allocation failure warnings as we have a fallback.
 623	 */
 624	if (size > PAGE_SIZE) {
 625		kmalloc_flags |= __GFP_NOWARN;
 626
 627		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 628			kmalloc_flags |= __GFP_NORETRY;
 629
 630		/* nofail semantic is implemented by the vmalloc fallback */
 631		kmalloc_flags &= ~__GFP_NOFAIL;
 632	}
 633
 634	ret = kmalloc_node(size, kmalloc_flags, node);
 635
 636	/*
 637	 * It doesn't really make sense to fallback to vmalloc for sub page
 638	 * requests
 639	 */
 640	if (ret || size <= PAGE_SIZE)
 641		return ret;
 642
 643	/* non-sleeping allocations are not supported by vmalloc */
 644	if (!gfpflags_allow_blocking(flags))
 645		return NULL;
 646
 647	/* Don't even allow crazy sizes */
 648	if (unlikely(size > INT_MAX)) {
 649		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 650		return NULL;
 651	}
 652
 653	/*
 654	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
 655	 * since the callers already cannot assume anything
 656	 * about the resulting pointer, and cannot play
 657	 * protection games.
 658	 */
 659	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
 660			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 661			node, __builtin_return_address(0));
 662}
 663EXPORT_SYMBOL(kvmalloc_node);
 664
 665/**
 666 * kvfree() - Free memory.
 667 * @addr: Pointer to allocated memory.
 668 *
 669 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 670 * It is slightly more efficient to use kfree() or vfree() if you are certain
 671 * that you know which one to use.
 672 *
 673 * Context: Either preemptible task context or not-NMI interrupt.
 674 */
 675void kvfree(const void *addr)
 676{
 677	if (is_vmalloc_addr(addr))
 678		vfree(addr);
 679	else
 680		kfree(addr);
 681}
 682EXPORT_SYMBOL(kvfree);
 683
 684/**
 685 * kvfree_sensitive - Free a data object containing sensitive information.
 686 * @addr: address of the data object to be freed.
 687 * @len: length of the data object.
 688 *
 689 * Use the special memzero_explicit() function to clear the content of a
 690 * kvmalloc'ed object containing sensitive data to make sure that the
 691 * compiler won't optimize out the data clearing.
 692 */
 693void kvfree_sensitive(const void *addr, size_t len)
 694{
 695	if (likely(!ZERO_OR_NULL_PTR(addr))) {
 696		memzero_explicit((void *)addr, len);
 697		kvfree(addr);
 698	}
 699}
 700EXPORT_SYMBOL(kvfree_sensitive);
 701
 702void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 703{
 704	void *newp;
 705
 706	if (oldsize >= newsize)
 707		return (void *)p;
 708	newp = kvmalloc(newsize, flags);
 709	if (!newp)
 710		return NULL;
 711	memcpy(newp, p, oldsize);
 712	kvfree(p);
 713	return newp;
 714}
 715EXPORT_SYMBOL(kvrealloc);
 716
 717/**
 718 * __vmalloc_array - allocate memory for a virtually contiguous array.
 719 * @n: number of elements.
 720 * @size: element size.
 721 * @flags: the type of memory to allocate (see kmalloc).
 722 */
 723void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
 724{
 725	size_t bytes;
 726
 727	if (unlikely(check_mul_overflow(n, size, &bytes)))
 728		return NULL;
 729	return __vmalloc(bytes, flags);
 730}
 731EXPORT_SYMBOL(__vmalloc_array);
 732
 733/**
 734 * vmalloc_array - allocate memory for a virtually contiguous array.
 735 * @n: number of elements.
 736 * @size: element size.
 737 */
 738void *vmalloc_array(size_t n, size_t size)
 739{
 740	return __vmalloc_array(n, size, GFP_KERNEL);
 741}
 742EXPORT_SYMBOL(vmalloc_array);
 743
 744/**
 745 * __vcalloc - allocate and zero memory for a virtually contiguous array.
 746 * @n: number of elements.
 747 * @size: element size.
 748 * @flags: the type of memory to allocate (see kmalloc).
 749 */
 750void *__vcalloc(size_t n, size_t size, gfp_t flags)
 751{
 752	return __vmalloc_array(n, size, flags | __GFP_ZERO);
 753}
 754EXPORT_SYMBOL(__vcalloc);
 755
 756/**
 757 * vcalloc - allocate and zero memory for a virtually contiguous array.
 758 * @n: number of elements.
 759 * @size: element size.
 760 */
 761void *vcalloc(size_t n, size_t size)
 762{
 763	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
 764}
 765EXPORT_SYMBOL(vcalloc);
 766
 767struct anon_vma *folio_anon_vma(struct folio *folio)
 768{
 769	unsigned long mapping = (unsigned long)folio->mapping;
 770
 771	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 772		return NULL;
 773	return (void *)(mapping - PAGE_MAPPING_ANON);
 774}
 775
 776/**
 777 * folio_mapping - Find the mapping where this folio is stored.
 778 * @folio: The folio.
 779 *
 780 * For folios which are in the page cache, return the mapping that this
 781 * page belongs to.  Folios in the swap cache return the swap mapping
 782 * this page is stored in (which is different from the mapping for the
 783 * swap file or swap device where the data is stored).
 784 *
 785 * You can call this for folios which aren't in the swap cache or page
 786 * cache and it will return NULL.
 787 */
 788struct address_space *folio_mapping(struct folio *folio)
 789{
 790	struct address_space *mapping;
 791
 792	/* This happens if someone calls flush_dcache_page on slab page */
 793	if (unlikely(folio_test_slab(folio)))
 794		return NULL;
 795
 796	if (unlikely(folio_test_swapcache(folio)))
 797		return swap_address_space(folio->swap);
 798
 799	mapping = folio->mapping;
 800	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
 801		return NULL;
 802
 803	return mapping;
 804}
 805EXPORT_SYMBOL(folio_mapping);
 806
 807/**
 808 * folio_copy - Copy the contents of one folio to another.
 809 * @dst: Folio to copy to.
 810 * @src: Folio to copy from.
 811 *
 812 * The bytes in the folio represented by @src are copied to @dst.
 813 * Assumes the caller has validated that @dst is at least as large as @src.
 814 * Can be called in atomic context for order-0 folios, but if the folio is
 815 * larger, it may sleep.
 816 */
 817void folio_copy(struct folio *dst, struct folio *src)
 818{
 819	long i = 0;
 820	long nr = folio_nr_pages(src);
 821
 822	for (;;) {
 823		copy_highpage(folio_page(dst, i), folio_page(src, i));
 824		if (++i == nr)
 825			break;
 826		cond_resched();
 827	}
 828}
 829EXPORT_SYMBOL(folio_copy);
 830
 831int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 832int sysctl_overcommit_ratio __read_mostly = 50;
 833unsigned long sysctl_overcommit_kbytes __read_mostly;
 834int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 835unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 836unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 837
 838int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
 839		size_t *lenp, loff_t *ppos)
 840{
 841	int ret;
 842
 843	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 844	if (ret == 0 && write)
 845		sysctl_overcommit_kbytes = 0;
 846	return ret;
 847}
 848
 849static void sync_overcommit_as(struct work_struct *dummy)
 850{
 851	percpu_counter_sync(&vm_committed_as);
 852}
 853
 854int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
 855		size_t *lenp, loff_t *ppos)
 856{
 857	struct ctl_table t;
 858	int new_policy = -1;
 859	int ret;
 860
 861	/*
 862	 * The deviation of sync_overcommit_as could be big with loose policy
 863	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
 864	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
 865	 * with the strict "NEVER", and to avoid possible race condition (even
 866	 * though user usually won't too frequently do the switching to policy
 867	 * OVERCOMMIT_NEVER), the switch is done in the following order:
 868	 *	1. changing the batch
 869	 *	2. sync percpu count on each CPU
 870	 *	3. switch the policy
 871	 */
 872	if (write) {
 873		t = *table;
 874		t.data = &new_policy;
 875		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 876		if (ret || new_policy == -1)
 877			return ret;
 878
 879		mm_compute_batch(new_policy);
 880		if (new_policy == OVERCOMMIT_NEVER)
 881			schedule_on_each_cpu(sync_overcommit_as);
 882		sysctl_overcommit_memory = new_policy;
 883	} else {
 884		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 885	}
 886
 887	return ret;
 888}
 889
 890int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
 891		size_t *lenp, loff_t *ppos)
 892{
 893	int ret;
 894
 895	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 896	if (ret == 0 && write)
 897		sysctl_overcommit_ratio = 0;
 898	return ret;
 899}
 900
 901/*
 902 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 903 */
 904unsigned long vm_commit_limit(void)
 905{
 906	unsigned long allowed;
 907
 908	if (sysctl_overcommit_kbytes)
 909		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 910	else
 911		allowed = ((totalram_pages() - hugetlb_total_pages())
 912			   * sysctl_overcommit_ratio / 100);
 913	allowed += total_swap_pages;
 914
 915	return allowed;
 916}
 917
 918/*
 919 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 920 * other variables. It can be updated by several CPUs frequently.
 921 */
 922struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 923
 924/*
 925 * The global memory commitment made in the system can be a metric
 926 * that can be used to drive ballooning decisions when Linux is hosted
 927 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 928 * balancing memory across competing virtual machines that are hosted.
 929 * Several metrics drive this policy engine including the guest reported
 930 * memory commitment.
 931 *
 932 * The time cost of this is very low for small platforms, and for big
 933 * platform like a 2S/36C/72T Skylake server, in worst case where
 934 * vm_committed_as's spinlock is under severe contention, the time cost
 935 * could be about 30~40 microseconds.
 936 */
 937unsigned long vm_memory_committed(void)
 938{
 939	return percpu_counter_sum_positive(&vm_committed_as);
 940}
 941EXPORT_SYMBOL_GPL(vm_memory_committed);
 942
 943/*
 944 * Check that a process has enough memory to allocate a new virtual
 945 * mapping. 0 means there is enough memory for the allocation to
 946 * succeed and -ENOMEM implies there is not.
 947 *
 948 * We currently support three overcommit policies, which are set via the
 949 * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
 950 *
 951 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 952 * Additional code 2002 Jul 20 by Robert Love.
 953 *
 954 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 955 *
 956 * Note this is a helper function intended to be used by LSMs which
 957 * wish to use this logic.
 958 */
 959int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 960{
 961	long allowed;
 962	unsigned long bytes_failed;
 963
 964	vm_acct_memory(pages);
 965
 966	/*
 967	 * Sometimes we want to use more memory than we have
 968	 */
 969	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 970		return 0;
 971
 972	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 973		if (pages > totalram_pages() + total_swap_pages)
 974			goto error;
 975		return 0;
 976	}
 977
 978	allowed = vm_commit_limit();
 979	/*
 980	 * Reserve some for root
 981	 */
 982	if (!cap_sys_admin)
 983		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 984
 985	/*
 986	 * Don't let a single process grow so big a user can't recover
 987	 */
 988	if (mm) {
 989		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 990
 991		allowed -= min_t(long, mm->total_vm / 32, reserve);
 992	}
 993
 994	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 995		return 0;
 996error:
 997	bytes_failed = pages << PAGE_SHIFT;
 998	pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
 999			    __func__, current->pid, current->comm, bytes_failed);
1000	vm_unacct_memory(pages);
1001
1002	return -ENOMEM;
1003}
1004
1005/**
1006 * get_cmdline() - copy the cmdline value to a buffer.
1007 * @task:     the task whose cmdline value to copy.
1008 * @buffer:   the buffer to copy to.
1009 * @buflen:   the length of the buffer. Larger cmdline values are truncated
1010 *            to this length.
1011 *
1012 * Return: the size of the cmdline field copied. Note that the copy does
1013 * not guarantee an ending NULL byte.
1014 */
1015int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1016{
1017	int res = 0;
1018	unsigned int len;
1019	struct mm_struct *mm = get_task_mm(task);
1020	unsigned long arg_start, arg_end, env_start, env_end;
1021	if (!mm)
1022		goto out;
1023	if (!mm->arg_end)
1024		goto out_mm;	/* Shh! No looking before we're done */
1025
1026	spin_lock(&mm->arg_lock);
1027	arg_start = mm->arg_start;
1028	arg_end = mm->arg_end;
1029	env_start = mm->env_start;
1030	env_end = mm->env_end;
1031	spin_unlock(&mm->arg_lock);
1032
1033	len = arg_end - arg_start;
1034
1035	if (len > buflen)
1036		len = buflen;
1037
1038	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1039
1040	/*
1041	 * If the nul at the end of args has been overwritten, then
1042	 * assume application is using setproctitle(3).
1043	 */
1044	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1045		len = strnlen(buffer, res);
1046		if (len < res) {
1047			res = len;
1048		} else {
1049			len = env_end - env_start;
1050			if (len > buflen - res)
1051				len = buflen - res;
1052			res += access_process_vm(task, env_start,
1053						 buffer+res, len,
1054						 FOLL_FORCE);
1055			res = strnlen(buffer, res);
1056		}
1057	}
1058out_mm:
1059	mmput(mm);
1060out:
1061	return res;
1062}
1063
1064int __weak memcmp_pages(struct page *page1, struct page *page2)
1065{
1066	char *addr1, *addr2;
1067	int ret;
1068
1069	addr1 = kmap_local_page(page1);
1070	addr2 = kmap_local_page(page2);
1071	ret = memcmp(addr1, addr2, PAGE_SIZE);
1072	kunmap_local(addr2);
1073	kunmap_local(addr1);
1074	return ret;
1075}
1076
1077#ifdef CONFIG_PRINTK
1078/**
1079 * mem_dump_obj - Print available provenance information
1080 * @object: object for which to find provenance information.
1081 *
1082 * This function uses pr_cont(), so that the caller is expected to have
1083 * printed out whatever preamble is appropriate.  The provenance information
1084 * depends on the type of object and on how much debugging is enabled.
1085 * For example, for a slab-cache object, the slab name is printed, and,
1086 * if available, the return address and stack trace from the allocation
1087 * and last free path of that object.
1088 */
1089void mem_dump_obj(void *object)
1090{
1091	const char *type;
1092
1093	if (kmem_dump_obj(object))
1094		return;
1095
1096	if (vmalloc_dump_obj(object))
1097		return;
1098
1099	if (is_vmalloc_addr(object))
1100		type = "vmalloc memory";
1101	else if (virt_addr_valid(object))
1102		type = "non-slab/vmalloc memory";
1103	else if (object == NULL)
1104		type = "NULL pointer";
1105	else if (object == ZERO_SIZE_PTR)
1106		type = "zero-size pointer";
1107	else
1108		type = "non-paged memory";
1109
1110	pr_cont(" %s\n", type);
1111}
1112EXPORT_SYMBOL_GPL(mem_dump_obj);
1113#endif
1114
1115/*
1116 * A driver might set a page logically offline -- PageOffline() -- and
1117 * turn the page inaccessible in the hypervisor; after that, access to page
1118 * content can be fatal.
1119 *
1120 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1121 * pages after checking PageOffline(); however, these PFN walkers can race
1122 * with drivers that set PageOffline().
1123 *
1124 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1125 * synchronize with such drivers, achieving that a page cannot be set
1126 * PageOffline() while frozen.
1127 *
1128 * page_offline_begin()/page_offline_end() is used by drivers that care about
1129 * such races when setting a page PageOffline().
1130 */
1131static DECLARE_RWSEM(page_offline_rwsem);
1132
1133void page_offline_freeze(void)
1134{
1135	down_read(&page_offline_rwsem);
1136}
1137
1138void page_offline_thaw(void)
1139{
1140	up_read(&page_offline_rwsem);
1141}
1142
1143void page_offline_begin(void)
1144{
1145	down_write(&page_offline_rwsem);
1146}
1147EXPORT_SYMBOL(page_offline_begin);
1148
1149void page_offline_end(void)
1150{
1151	up_write(&page_offline_rwsem);
1152}
1153EXPORT_SYMBOL(page_offline_end);
1154
1155#ifndef flush_dcache_folio
1156void flush_dcache_folio(struct folio *folio)
1157{
1158	long i, nr = folio_nr_pages(folio);
1159
1160	for (i = 0; i < nr; i++)
1161		flush_dcache_page(folio_page(folio, i));
1162}
1163EXPORT_SYMBOL(flush_dcache_folio);
1164#endif