Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
  1#include <linux/mm.h>
  2#include <linux/slab.h>
  3#include <linux/string.h>
  4#include <linux/compiler.h>
  5#include <linux/export.h>
  6#include <linux/err.h>
  7#include <linux/sched.h>
  8#include <linux/sched/mm.h>
  9#include <linux/sched/task_stack.h>
 10#include <linux/security.h>
 11#include <linux/swap.h>
 12#include <linux/swapops.h>
 13#include <linux/mman.h>
 14#include <linux/hugetlb.h>
 15#include <linux/vmalloc.h>
 16#include <linux/userfaultfd_k.h>
 17
 18#include <asm/sections.h>
 19#include <linux/uaccess.h>
 20
 21#include "internal.h"
 22
 23static inline int is_kernel_rodata(unsigned long addr)
 24{
 25	return addr >= (unsigned long)__start_rodata &&
 26		addr < (unsigned long)__end_rodata;
 27}
 28
 29/**
 30 * kfree_const - conditionally free memory
 31 * @x: pointer to the memory
 32 *
 33 * Function calls kfree only if @x is not in .rodata section.
 34 */
 35void kfree_const(const void *x)
 36{
 37	if (!is_kernel_rodata((unsigned long)x))
 38		kfree(x);
 39}
 40EXPORT_SYMBOL(kfree_const);
 41
 42/**
 43 * kstrdup - allocate space for and copy an existing string
 44 * @s: the string to duplicate
 45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 46 */
 47char *kstrdup(const char *s, gfp_t gfp)
 48{
 49	size_t len;
 50	char *buf;
 51
 52	if (!s)
 53		return NULL;
 54
 55	len = strlen(s) + 1;
 56	buf = kmalloc_track_caller(len, gfp);
 57	if (buf)
 58		memcpy(buf, s, len);
 59	return buf;
 60}
 61EXPORT_SYMBOL(kstrdup);
 62
 63/**
 64 * kstrdup_const - conditionally duplicate an existing const string
 65 * @s: the string to duplicate
 66 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 67 *
 68 * Function returns source string if it is in .rodata section otherwise it
 69 * fallbacks to kstrdup.
 70 * Strings allocated by kstrdup_const should be freed by kfree_const.
 71 */
 72const char *kstrdup_const(const char *s, gfp_t gfp)
 73{
 74	if (is_kernel_rodata((unsigned long)s))
 75		return s;
 76
 77	return kstrdup(s, gfp);
 78}
 79EXPORT_SYMBOL(kstrdup_const);
 80
 81/**
 82 * kstrndup - allocate space for and copy an existing string
 83 * @s: the string to duplicate
 84 * @max: read at most @max chars from @s
 85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 86 *
 87 * Note: Use kmemdup_nul() instead if the size is known exactly.
 88 */
 89char *kstrndup(const char *s, size_t max, gfp_t gfp)
 90{
 91	size_t len;
 92	char *buf;
 93
 94	if (!s)
 95		return NULL;
 96
 97	len = strnlen(s, max);
 98	buf = kmalloc_track_caller(len+1, gfp);
 99	if (buf) {
100		memcpy(buf, s, len);
101		buf[len] = '\0';
102	}
103	return buf;
104}
105EXPORT_SYMBOL(kstrndup);
106
107/**
108 * kmemdup - duplicate region of memory
109 *
110 * @src: memory region to duplicate
111 * @len: memory region length
112 * @gfp: GFP mask to use
113 */
114void *kmemdup(const void *src, size_t len, gfp_t gfp)
115{
116	void *p;
117
118	p = kmalloc_track_caller(len, gfp);
119	if (p)
120		memcpy(p, src, len);
121	return p;
122}
123EXPORT_SYMBOL(kmemdup);
124
125/**
126 * kmemdup_nul - Create a NUL-terminated string from unterminated data
127 * @s: The data to stringify
128 * @len: The size of the data
129 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
130 */
131char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
132{
133	char *buf;
134
135	if (!s)
136		return NULL;
137
138	buf = kmalloc_track_caller(len + 1, gfp);
139	if (buf) {
140		memcpy(buf, s, len);
141		buf[len] = '\0';
142	}
143	return buf;
144}
145EXPORT_SYMBOL(kmemdup_nul);
146
147/**
148 * memdup_user - duplicate memory region from user space
149 *
150 * @src: source address in user space
151 * @len: number of bytes to copy
152 *
153 * Returns an ERR_PTR() on failure.  Result is physically
154 * contiguous, to be freed by kfree().
155 */
156void *memdup_user(const void __user *src, size_t len)
157{
158	void *p;
159
160	p = kmalloc_track_caller(len, GFP_USER);
161	if (!p)
162		return ERR_PTR(-ENOMEM);
163
164	if (copy_from_user(p, src, len)) {
165		kfree(p);
166		return ERR_PTR(-EFAULT);
167	}
168
169	return p;
170}
171EXPORT_SYMBOL(memdup_user);
172
173/**
174 * vmemdup_user - duplicate memory region from user space
175 *
176 * @src: source address in user space
177 * @len: number of bytes to copy
178 *
179 * Returns an ERR_PTR() on failure.  Result may be not
180 * physically contiguous.  Use kvfree() to free.
181 */
182void *vmemdup_user(const void __user *src, size_t len)
183{
184	void *p;
185
186	p = kvmalloc(len, GFP_USER);
187	if (!p)
188		return ERR_PTR(-ENOMEM);
189
190	if (copy_from_user(p, src, len)) {
191		kvfree(p);
192		return ERR_PTR(-EFAULT);
193	}
194
195	return p;
196}
197EXPORT_SYMBOL(vmemdup_user);
198
199/*
200 * strndup_user - duplicate an existing string from user space
201 * @s: The string to duplicate
202 * @n: Maximum number of bytes to copy, including the trailing NUL.
203 */
204char *strndup_user(const char __user *s, long n)
205{
206	char *p;
207	long length;
208
209	length = strnlen_user(s, n);
210
211	if (!length)
212		return ERR_PTR(-EFAULT);
213
214	if (length > n)
215		return ERR_PTR(-EINVAL);
216
217	p = memdup_user(s, length);
218
219	if (IS_ERR(p))
220		return p;
221
222	p[length - 1] = '\0';
223
224	return p;
225}
226EXPORT_SYMBOL(strndup_user);
227
228/**
229 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
230 *
231 * @src: source address in user space
232 * @len: number of bytes to copy
233 *
234 * Returns an ERR_PTR() on failure.
235 */
236void *memdup_user_nul(const void __user *src, size_t len)
237{
238	char *p;
239
240	/*
241	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
242	 * cause pagefault, which makes it pointless to use GFP_NOFS
243	 * or GFP_ATOMIC.
244	 */
245	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
246	if (!p)
247		return ERR_PTR(-ENOMEM);
248
249	if (copy_from_user(p, src, len)) {
250		kfree(p);
251		return ERR_PTR(-EFAULT);
252	}
253	p[len] = '\0';
254
255	return p;
256}
257EXPORT_SYMBOL(memdup_user_nul);
258
259void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
260		struct vm_area_struct *prev, struct rb_node *rb_parent)
261{
262	struct vm_area_struct *next;
263
264	vma->vm_prev = prev;
265	if (prev) {
266		next = prev->vm_next;
267		prev->vm_next = vma;
268	} else {
269		mm->mmap = vma;
270		if (rb_parent)
271			next = rb_entry(rb_parent,
272					struct vm_area_struct, vm_rb);
273		else
274			next = NULL;
275	}
276	vma->vm_next = next;
277	if (next)
278		next->vm_prev = vma;
279}
280
281/* Check if the vma is being used as a stack by this task */
282int vma_is_stack_for_current(struct vm_area_struct *vma)
283{
284	struct task_struct * __maybe_unused t = current;
285
286	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
287}
288
289#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
290void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
291{
292	mm->mmap_base = TASK_UNMAPPED_BASE;
293	mm->get_unmapped_area = arch_get_unmapped_area;
294}
295#endif
296
297/*
298 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
299 * back to the regular GUP.
300 * Note a difference with get_user_pages_fast: this always returns the
301 * number of pages pinned, 0 if no pages were pinned.
302 * If the architecture does not support this function, simply return with no
303 * pages pinned.
304 */
305int __weak __get_user_pages_fast(unsigned long start,
306				 int nr_pages, int write, struct page **pages)
307{
308	return 0;
309}
310EXPORT_SYMBOL_GPL(__get_user_pages_fast);
311
312/**
313 * get_user_pages_fast() - pin user pages in memory
314 * @start:	starting user address
315 * @nr_pages:	number of pages from start to pin
316 * @write:	whether pages will be written to
317 * @pages:	array that receives pointers to the pages pinned.
318 *		Should be at least nr_pages long.
319 *
320 * Returns number of pages pinned. This may be fewer than the number
321 * requested. If nr_pages is 0 or negative, returns 0. If no pages
322 * were pinned, returns -errno.
323 *
324 * get_user_pages_fast provides equivalent functionality to get_user_pages,
325 * operating on current and current->mm, with force=0 and vma=NULL. However
326 * unlike get_user_pages, it must be called without mmap_sem held.
327 *
328 * get_user_pages_fast may take mmap_sem and page table locks, so no
329 * assumptions can be made about lack of locking. get_user_pages_fast is to be
330 * implemented in a way that is advantageous (vs get_user_pages()) when the
331 * user memory area is already faulted in and present in ptes. However if the
332 * pages have to be faulted in, it may turn out to be slightly slower so
333 * callers need to carefully consider what to use. On many architectures,
334 * get_user_pages_fast simply falls back to get_user_pages.
335 */
336int __weak get_user_pages_fast(unsigned long start,
337				int nr_pages, int write, struct page **pages)
338{
339	return get_user_pages_unlocked(start, nr_pages, pages,
340				       write ? FOLL_WRITE : 0);
341}
342EXPORT_SYMBOL_GPL(get_user_pages_fast);
343
344unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
345	unsigned long len, unsigned long prot,
346	unsigned long flag, unsigned long pgoff)
347{
348	unsigned long ret;
349	struct mm_struct *mm = current->mm;
350	unsigned long populate;
351	LIST_HEAD(uf);
352
353	ret = security_mmap_file(file, prot, flag);
354	if (!ret) {
355		if (down_write_killable(&mm->mmap_sem))
356			return -EINTR;
357		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
358				    &populate, &uf);
359		up_write(&mm->mmap_sem);
360		userfaultfd_unmap_complete(mm, &uf);
361		if (populate)
362			mm_populate(ret, populate);
363	}
364	return ret;
365}
366
367unsigned long vm_mmap(struct file *file, unsigned long addr,
368	unsigned long len, unsigned long prot,
369	unsigned long flag, unsigned long offset)
370{
371	if (unlikely(offset + PAGE_ALIGN(len) < offset))
372		return -EINVAL;
373	if (unlikely(offset_in_page(offset)))
374		return -EINVAL;
375
376	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
377}
378EXPORT_SYMBOL(vm_mmap);
379
380/**
381 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
382 * failure, fall back to non-contiguous (vmalloc) allocation.
383 * @size: size of the request.
384 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
385 * @node: numa node to allocate from
386 *
387 * Uses kmalloc to get the memory but if the allocation fails then falls back
388 * to the vmalloc allocator. Use kvfree for freeing the memory.
389 *
390 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
391 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
392 * preferable to the vmalloc fallback, due to visible performance drawbacks.
393 *
394 * Any use of gfp flags outside of GFP_KERNEL should be consulted with mm people.
395 */
396void *kvmalloc_node(size_t size, gfp_t flags, int node)
397{
398	gfp_t kmalloc_flags = flags;
399	void *ret;
400
401	/*
402	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
403	 * so the given set of flags has to be compatible.
404	 */
405	WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
406
407	/*
408	 * We want to attempt a large physically contiguous block first because
409	 * it is less likely to fragment multiple larger blocks and therefore
410	 * contribute to a long term fragmentation less than vmalloc fallback.
411	 * However make sure that larger requests are not too disruptive - no
412	 * OOM killer and no allocation failure warnings as we have a fallback.
413	 */
414	if (size > PAGE_SIZE) {
415		kmalloc_flags |= __GFP_NOWARN;
416
417		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
418			kmalloc_flags |= __GFP_NORETRY;
419	}
420
421	ret = kmalloc_node(size, kmalloc_flags, node);
422
423	/*
424	 * It doesn't really make sense to fallback to vmalloc for sub page
425	 * requests
426	 */
427	if (ret || size <= PAGE_SIZE)
428		return ret;
429
430	return __vmalloc_node_flags_caller(size, node, flags,
431			__builtin_return_address(0));
432}
433EXPORT_SYMBOL(kvmalloc_node);
434
435void kvfree(const void *addr)
436{
437	if (is_vmalloc_addr(addr))
438		vfree(addr);
439	else
440		kfree(addr);
441}
442EXPORT_SYMBOL(kvfree);
443
444static inline void *__page_rmapping(struct page *page)
445{
446	unsigned long mapping;
447
448	mapping = (unsigned long)page->mapping;
449	mapping &= ~PAGE_MAPPING_FLAGS;
450
451	return (void *)mapping;
452}
453
454/* Neutral page->mapping pointer to address_space or anon_vma or other */
455void *page_rmapping(struct page *page)
456{
457	page = compound_head(page);
458	return __page_rmapping(page);
459}
460
461/*
462 * Return true if this page is mapped into pagetables.
463 * For compound page it returns true if any subpage of compound page is mapped.
464 */
465bool page_mapped(struct page *page)
466{
467	int i;
468
469	if (likely(!PageCompound(page)))
470		return atomic_read(&page->_mapcount) >= 0;
471	page = compound_head(page);
472	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
473		return true;
474	if (PageHuge(page))
475		return false;
476	for (i = 0; i < hpage_nr_pages(page); i++) {
477		if (atomic_read(&page[i]._mapcount) >= 0)
478			return true;
479	}
480	return false;
481}
482EXPORT_SYMBOL(page_mapped);
483
484struct anon_vma *page_anon_vma(struct page *page)
485{
486	unsigned long mapping;
487
488	page = compound_head(page);
489	mapping = (unsigned long)page->mapping;
490	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
491		return NULL;
492	return __page_rmapping(page);
493}
494
495struct address_space *page_mapping(struct page *page)
496{
497	struct address_space *mapping;
498
499	page = compound_head(page);
500
501	/* This happens if someone calls flush_dcache_page on slab page */
502	if (unlikely(PageSlab(page)))
503		return NULL;
504
505	if (unlikely(PageSwapCache(page))) {
506		swp_entry_t entry;
507
508		entry.val = page_private(page);
509		return swap_address_space(entry);
510	}
511
512	mapping = page->mapping;
513	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
514		return NULL;
515
516	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
517}
518EXPORT_SYMBOL(page_mapping);
519
520/*
521 * For file cache pages, return the address_space, otherwise return NULL
522 */
523struct address_space *page_mapping_file(struct page *page)
524{
525	if (unlikely(PageSwapCache(page)))
526		return NULL;
527	return page_mapping(page);
528}
529
530/* Slow path of page_mapcount() for compound pages */
531int __page_mapcount(struct page *page)
532{
533	int ret;
534
535	ret = atomic_read(&page->_mapcount) + 1;
536	/*
537	 * For file THP page->_mapcount contains total number of mapping
538	 * of the page: no need to look into compound_mapcount.
539	 */
540	if (!PageAnon(page) && !PageHuge(page))
541		return ret;
542	page = compound_head(page);
543	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
544	if (PageDoubleMap(page))
545		ret--;
546	return ret;
547}
548EXPORT_SYMBOL_GPL(__page_mapcount);
549
550int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
551int sysctl_overcommit_ratio __read_mostly = 50;
552unsigned long sysctl_overcommit_kbytes __read_mostly;
553int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
554unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
555unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
556
557int overcommit_ratio_handler(struct ctl_table *table, int write,
558			     void __user *buffer, size_t *lenp,
559			     loff_t *ppos)
560{
561	int ret;
562
563	ret = proc_dointvec(table, write, buffer, lenp, ppos);
564	if (ret == 0 && write)
565		sysctl_overcommit_kbytes = 0;
566	return ret;
567}
568
569int overcommit_kbytes_handler(struct ctl_table *table, int write,
570			     void __user *buffer, size_t *lenp,
571			     loff_t *ppos)
572{
573	int ret;
574
575	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
576	if (ret == 0 && write)
577		sysctl_overcommit_ratio = 0;
578	return ret;
579}
580
581/*
582 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
583 */
584unsigned long vm_commit_limit(void)
585{
586	unsigned long allowed;
587
588	if (sysctl_overcommit_kbytes)
589		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
590	else
591		allowed = ((totalram_pages - hugetlb_total_pages())
592			   * sysctl_overcommit_ratio / 100);
593	allowed += total_swap_pages;
594
595	return allowed;
596}
597
598/*
599 * Make sure vm_committed_as in one cacheline and not cacheline shared with
600 * other variables. It can be updated by several CPUs frequently.
601 */
602struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
603
604/*
605 * The global memory commitment made in the system can be a metric
606 * that can be used to drive ballooning decisions when Linux is hosted
607 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
608 * balancing memory across competing virtual machines that are hosted.
609 * Several metrics drive this policy engine including the guest reported
610 * memory commitment.
611 */
612unsigned long vm_memory_committed(void)
613{
614	return percpu_counter_read_positive(&vm_committed_as);
615}
616EXPORT_SYMBOL_GPL(vm_memory_committed);
617
618/*
619 * Check that a process has enough memory to allocate a new virtual
620 * mapping. 0 means there is enough memory for the allocation to
621 * succeed and -ENOMEM implies there is not.
622 *
623 * We currently support three overcommit policies, which are set via the
624 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
625 *
626 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
627 * Additional code 2002 Jul 20 by Robert Love.
628 *
629 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
630 *
631 * Note this is a helper function intended to be used by LSMs which
632 * wish to use this logic.
633 */
634int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
635{
636	long free, allowed, reserve;
637
638	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
639			-(s64)vm_committed_as_batch * num_online_cpus(),
640			"memory commitment underflow");
641
642	vm_acct_memory(pages);
643
644	/*
645	 * Sometimes we want to use more memory than we have
646	 */
647	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
648		return 0;
649
650	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
651		free = global_zone_page_state(NR_FREE_PAGES);
652		free += global_node_page_state(NR_FILE_PAGES);
653
654		/*
655		 * shmem pages shouldn't be counted as free in this
656		 * case, they can't be purged, only swapped out, and
657		 * that won't affect the overall amount of available
658		 * memory in the system.
659		 */
660		free -= global_node_page_state(NR_SHMEM);
661
662		free += get_nr_swap_pages();
663
664		/*
665		 * Any slabs which are created with the
666		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
667		 * which are reclaimable, under pressure.  The dentry
668		 * cache and most inode caches should fall into this
669		 */
670		free += global_node_page_state(NR_SLAB_RECLAIMABLE);
671
672		/*
673		 * Part of the kernel memory, which can be released
674		 * under memory pressure.
675		 */
676		free += global_node_page_state(
677			NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
678
679		/*
680		 * Leave reserved pages. The pages are not for anonymous pages.
681		 */
682		if (free <= totalreserve_pages)
683			goto error;
684		else
685			free -= totalreserve_pages;
686
687		/*
688		 * Reserve some for root
689		 */
690		if (!cap_sys_admin)
691			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
692
693		if (free > pages)
694			return 0;
695
696		goto error;
697	}
698
699	allowed = vm_commit_limit();
700	/*
701	 * Reserve some for root
702	 */
703	if (!cap_sys_admin)
704		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
705
706	/*
707	 * Don't let a single process grow so big a user can't recover
708	 */
709	if (mm) {
710		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
711		allowed -= min_t(long, mm->total_vm / 32, reserve);
712	}
713
714	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
715		return 0;
716error:
717	vm_unacct_memory(pages);
718
719	return -ENOMEM;
720}
721
722/**
723 * get_cmdline() - copy the cmdline value to a buffer.
724 * @task:     the task whose cmdline value to copy.
725 * @buffer:   the buffer to copy to.
726 * @buflen:   the length of the buffer. Larger cmdline values are truncated
727 *            to this length.
728 * Returns the size of the cmdline field copied. Note that the copy does
729 * not guarantee an ending NULL byte.
730 */
731int get_cmdline(struct task_struct *task, char *buffer, int buflen)
732{
733	int res = 0;
734	unsigned int len;
735	struct mm_struct *mm = get_task_mm(task);
736	unsigned long arg_start, arg_end, env_start, env_end;
737	if (!mm)
738		goto out;
739	if (!mm->arg_end)
740		goto out_mm;	/* Shh! No looking before we're done */
741
742	down_read(&mm->mmap_sem);
743	arg_start = mm->arg_start;
744	arg_end = mm->arg_end;
745	env_start = mm->env_start;
746	env_end = mm->env_end;
747	up_read(&mm->mmap_sem);
748
749	len = arg_end - arg_start;
750
751	if (len > buflen)
752		len = buflen;
753
754	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
755
756	/*
757	 * If the nul at the end of args has been overwritten, then
758	 * assume application is using setproctitle(3).
759	 */
760	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
761		len = strnlen(buffer, res);
762		if (len < res) {
763			res = len;
764		} else {
765			len = env_end - env_start;
766			if (len > buflen - res)
767				len = buflen - res;
768			res += access_process_vm(task, env_start,
769						 buffer+res, len,
770						 FOLL_FORCE);
771			res = strnlen(buffer, res);
772		}
773	}
774out_mm:
775	mmput(mm);
776out:
777	return res;
778}
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30
  31/**
  32 * kfree_const - conditionally free memory
  33 * @x: pointer to the memory
  34 *
  35 * Function calls kfree only if @x is not in .rodata section.
  36 */
  37void kfree_const(const void *x)
  38{
  39	if (!is_kernel_rodata((unsigned long)x))
  40		kfree(x);
  41}
  42EXPORT_SYMBOL(kfree_const);
  43
  44/**
  45 * kstrdup - allocate space for and copy an existing string
  46 * @s: the string to duplicate
  47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  48 *
  49 * Return: newly allocated copy of @s or %NULL in case of error
  50 */
  51char *kstrdup(const char *s, gfp_t gfp)
  52{
  53	size_t len;
  54	char *buf;
  55
  56	if (!s)
  57		return NULL;
  58
  59	len = strlen(s) + 1;
  60	buf = kmalloc_track_caller(len, gfp);
  61	if (buf)
  62		memcpy(buf, s, len);
  63	return buf;
  64}
  65EXPORT_SYMBOL(kstrdup);
  66
  67/**
  68 * kstrdup_const - conditionally duplicate an existing const string
  69 * @s: the string to duplicate
  70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  71 *
  72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
  73 * must not be passed to krealloc().
  74 *
  75 * Return: source string if it is in .rodata section otherwise
  76 * fallback to kstrdup.
  77 */
  78const char *kstrdup_const(const char *s, gfp_t gfp)
  79{
  80	if (is_kernel_rodata((unsigned long)s))
  81		return s;
  82
  83	return kstrdup(s, gfp);
  84}
  85EXPORT_SYMBOL(kstrdup_const);
  86
  87/**
  88 * kstrndup - allocate space for and copy an existing string
  89 * @s: the string to duplicate
  90 * @max: read at most @max chars from @s
  91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  92 *
  93 * Note: Use kmemdup_nul() instead if the size is known exactly.
  94 *
  95 * Return: newly allocated copy of @s or %NULL in case of error
  96 */
  97char *kstrndup(const char *s, size_t max, gfp_t gfp)
  98{
  99	size_t len;
 100	char *buf;
 101
 102	if (!s)
 103		return NULL;
 104
 105	len = strnlen(s, max);
 106	buf = kmalloc_track_caller(len+1, gfp);
 107	if (buf) {
 108		memcpy(buf, s, len);
 109		buf[len] = '\0';
 110	}
 111	return buf;
 112}
 113EXPORT_SYMBOL(kstrndup);
 114
 115/**
 116 * kmemdup - duplicate region of memory
 117 *
 118 * @src: memory region to duplicate
 119 * @len: memory region length
 120 * @gfp: GFP mask to use
 121 *
 122 * Return: newly allocated copy of @src or %NULL in case of error
 123 */
 124void *kmemdup(const void *src, size_t len, gfp_t gfp)
 125{
 126	void *p;
 127
 128	p = kmalloc_track_caller(len, gfp);
 129	if (p)
 130		memcpy(p, src, len);
 131	return p;
 132}
 133EXPORT_SYMBOL(kmemdup);
 134
 135/**
 136 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 137 * @s: The data to stringify
 138 * @len: The size of the data
 139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 140 *
 141 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 142 * case of error
 143 */
 144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 145{
 146	char *buf;
 147
 148	if (!s)
 149		return NULL;
 150
 151	buf = kmalloc_track_caller(len + 1, gfp);
 152	if (buf) {
 153		memcpy(buf, s, len);
 154		buf[len] = '\0';
 155	}
 156	return buf;
 157}
 158EXPORT_SYMBOL(kmemdup_nul);
 159
 160/**
 161 * memdup_user - duplicate memory region from user space
 162 *
 163 * @src: source address in user space
 164 * @len: number of bytes to copy
 165 *
 166 * Return: an ERR_PTR() on failure.  Result is physically
 167 * contiguous, to be freed by kfree().
 168 */
 169void *memdup_user(const void __user *src, size_t len)
 170{
 171	void *p;
 172
 173	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 174	if (!p)
 175		return ERR_PTR(-ENOMEM);
 176
 177	if (copy_from_user(p, src, len)) {
 178		kfree(p);
 179		return ERR_PTR(-EFAULT);
 180	}
 181
 182	return p;
 183}
 184EXPORT_SYMBOL(memdup_user);
 185
 186/**
 187 * vmemdup_user - duplicate memory region from user space
 188 *
 189 * @src: source address in user space
 190 * @len: number of bytes to copy
 191 *
 192 * Return: an ERR_PTR() on failure.  Result may be not
 193 * physically contiguous.  Use kvfree() to free.
 194 */
 195void *vmemdup_user(const void __user *src, size_t len)
 196{
 197	void *p;
 198
 199	p = kvmalloc(len, GFP_USER);
 200	if (!p)
 201		return ERR_PTR(-ENOMEM);
 202
 203	if (copy_from_user(p, src, len)) {
 204		kvfree(p);
 205		return ERR_PTR(-EFAULT);
 206	}
 207
 208	return p;
 209}
 210EXPORT_SYMBOL(vmemdup_user);
 211
 212/**
 213 * strndup_user - duplicate an existing string from user space
 214 * @s: The string to duplicate
 215 * @n: Maximum number of bytes to copy, including the trailing NUL.
 216 *
 217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 218 */
 219char *strndup_user(const char __user *s, long n)
 220{
 221	char *p;
 222	long length;
 223
 224	length = strnlen_user(s, n);
 225
 226	if (!length)
 227		return ERR_PTR(-EFAULT);
 228
 229	if (length > n)
 230		return ERR_PTR(-EINVAL);
 231
 232	p = memdup_user(s, length);
 233
 234	if (IS_ERR(p))
 235		return p;
 236
 237	p[length - 1] = '\0';
 238
 239	return p;
 240}
 241EXPORT_SYMBOL(strndup_user);
 242
 243/**
 244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 245 *
 246 * @src: source address in user space
 247 * @len: number of bytes to copy
 248 *
 249 * Return: an ERR_PTR() on failure.
 250 */
 251void *memdup_user_nul(const void __user *src, size_t len)
 252{
 253	char *p;
 254
 255	/*
 256	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
 257	 * cause pagefault, which makes it pointless to use GFP_NOFS
 258	 * or GFP_ATOMIC.
 259	 */
 260	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 261	if (!p)
 262		return ERR_PTR(-ENOMEM);
 263
 264	if (copy_from_user(p, src, len)) {
 265		kfree(p);
 266		return ERR_PTR(-EFAULT);
 267	}
 268	p[len] = '\0';
 269
 270	return p;
 271}
 272EXPORT_SYMBOL(memdup_user_nul);
 273
 274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 275		struct vm_area_struct *prev)
 276{
 277	struct vm_area_struct *next;
 278
 279	vma->vm_prev = prev;
 280	if (prev) {
 281		next = prev->vm_next;
 282		prev->vm_next = vma;
 283	} else {
 284		next = mm->mmap;
 285		mm->mmap = vma;
 286	}
 287	vma->vm_next = next;
 288	if (next)
 289		next->vm_prev = vma;
 290}
 291
 292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
 293{
 294	struct vm_area_struct *prev, *next;
 295
 296	next = vma->vm_next;
 297	prev = vma->vm_prev;
 298	if (prev)
 299		prev->vm_next = next;
 300	else
 301		mm->mmap = next;
 302	if (next)
 303		next->vm_prev = prev;
 304}
 305
 306/* Check if the vma is being used as a stack by this task */
 307int vma_is_stack_for_current(struct vm_area_struct *vma)
 308{
 309	struct task_struct * __maybe_unused t = current;
 310
 311	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 312}
 313
 314/*
 315 * Change backing file, only valid to use during initial VMA setup.
 316 */
 317void vma_set_file(struct vm_area_struct *vma, struct file *file)
 318{
 319	/* Changing an anonymous vma with this is illegal */
 320	get_file(file);
 321	swap(vma->vm_file, file);
 322	fput(file);
 323}
 324EXPORT_SYMBOL(vma_set_file);
 325
 326#ifndef STACK_RND_MASK
 327#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 328#endif
 329
 330unsigned long randomize_stack_top(unsigned long stack_top)
 331{
 332	unsigned long random_variable = 0;
 333
 334	if (current->flags & PF_RANDOMIZE) {
 335		random_variable = get_random_long();
 336		random_variable &= STACK_RND_MASK;
 337		random_variable <<= PAGE_SHIFT;
 338	}
 339#ifdef CONFIG_STACK_GROWSUP
 340	return PAGE_ALIGN(stack_top) + random_variable;
 341#else
 342	return PAGE_ALIGN(stack_top) - random_variable;
 343#endif
 344}
 345
 346#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 347unsigned long arch_randomize_brk(struct mm_struct *mm)
 348{
 349	/* Is the current task 32bit ? */
 350	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 351		return randomize_page(mm->brk, SZ_32M);
 352
 353	return randomize_page(mm->brk, SZ_1G);
 354}
 355
 356unsigned long arch_mmap_rnd(void)
 357{
 358	unsigned long rnd;
 359
 360#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 361	if (is_compat_task())
 362		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 363	else
 364#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 365		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 366
 367	return rnd << PAGE_SHIFT;
 368}
 369
 370static int mmap_is_legacy(struct rlimit *rlim_stack)
 371{
 372	if (current->personality & ADDR_COMPAT_LAYOUT)
 373		return 1;
 374
 375	if (rlim_stack->rlim_cur == RLIM_INFINITY)
 376		return 1;
 377
 378	return sysctl_legacy_va_layout;
 379}
 380
 381/*
 382 * Leave enough space between the mmap area and the stack to honour ulimit in
 383 * the face of randomisation.
 384 */
 385#define MIN_GAP		(SZ_128M)
 386#define MAX_GAP		(STACK_TOP / 6 * 5)
 387
 388static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 389{
 390	unsigned long gap = rlim_stack->rlim_cur;
 391	unsigned long pad = stack_guard_gap;
 392
 393	/* Account for stack randomization if necessary */
 394	if (current->flags & PF_RANDOMIZE)
 395		pad += (STACK_RND_MASK << PAGE_SHIFT);
 396
 397	/* Values close to RLIM_INFINITY can overflow. */
 398	if (gap + pad > gap)
 399		gap += pad;
 400
 401	if (gap < MIN_GAP)
 402		gap = MIN_GAP;
 403	else if (gap > MAX_GAP)
 404		gap = MAX_GAP;
 405
 406	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 407}
 408
 409void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 410{
 411	unsigned long random_factor = 0UL;
 412
 413	if (current->flags & PF_RANDOMIZE)
 414		random_factor = arch_mmap_rnd();
 415
 416	if (mmap_is_legacy(rlim_stack)) {
 417		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 418		mm->get_unmapped_area = arch_get_unmapped_area;
 419	} else {
 420		mm->mmap_base = mmap_base(random_factor, rlim_stack);
 421		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 422	}
 423}
 424#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 425void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 426{
 427	mm->mmap_base = TASK_UNMAPPED_BASE;
 428	mm->get_unmapped_area = arch_get_unmapped_area;
 429}
 430#endif
 431
 432/**
 433 * __account_locked_vm - account locked pages to an mm's locked_vm
 434 * @mm:          mm to account against
 435 * @pages:       number of pages to account
 436 * @inc:         %true if @pages should be considered positive, %false if not
 437 * @task:        task used to check RLIMIT_MEMLOCK
 438 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 439 *
 440 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 441 * that mmap_lock is held as writer.
 442 *
 443 * Return:
 444 * * 0       on success
 445 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 446 */
 447int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 448			struct task_struct *task, bool bypass_rlim)
 449{
 450	unsigned long locked_vm, limit;
 451	int ret = 0;
 452
 453	mmap_assert_write_locked(mm);
 454
 455	locked_vm = mm->locked_vm;
 456	if (inc) {
 457		if (!bypass_rlim) {
 458			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 459			if (locked_vm + pages > limit)
 460				ret = -ENOMEM;
 461		}
 462		if (!ret)
 463			mm->locked_vm = locked_vm + pages;
 464	} else {
 465		WARN_ON_ONCE(pages > locked_vm);
 466		mm->locked_vm = locked_vm - pages;
 467	}
 468
 469	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 470		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 471		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 472		 ret ? " - exceeded" : "");
 473
 474	return ret;
 475}
 476EXPORT_SYMBOL_GPL(__account_locked_vm);
 477
 478/**
 479 * account_locked_vm - account locked pages to an mm's locked_vm
 480 * @mm:          mm to account against, may be NULL
 481 * @pages:       number of pages to account
 482 * @inc:         %true if @pages should be considered positive, %false if not
 483 *
 484 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 485 *
 486 * Return:
 487 * * 0       on success, or if mm is NULL
 488 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 489 */
 490int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 491{
 492	int ret;
 493
 494	if (pages == 0 || !mm)
 495		return 0;
 496
 497	mmap_write_lock(mm);
 498	ret = __account_locked_vm(mm, pages, inc, current,
 499				  capable(CAP_IPC_LOCK));
 500	mmap_write_unlock(mm);
 501
 502	return ret;
 503}
 504EXPORT_SYMBOL_GPL(account_locked_vm);
 505
 506unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 507	unsigned long len, unsigned long prot,
 508	unsigned long flag, unsigned long pgoff)
 509{
 510	unsigned long ret;
 511	struct mm_struct *mm = current->mm;
 512	unsigned long populate;
 513	LIST_HEAD(uf);
 514
 515	ret = security_mmap_file(file, prot, flag);
 516	if (!ret) {
 517		if (mmap_write_lock_killable(mm))
 518			return -EINTR;
 519		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
 520			      &uf);
 521		mmap_write_unlock(mm);
 522		userfaultfd_unmap_complete(mm, &uf);
 523		if (populate)
 524			mm_populate(ret, populate);
 525	}
 526	return ret;
 527}
 528
 529unsigned long vm_mmap(struct file *file, unsigned long addr,
 530	unsigned long len, unsigned long prot,
 531	unsigned long flag, unsigned long offset)
 532{
 533	if (unlikely(offset + PAGE_ALIGN(len) < offset))
 534		return -EINVAL;
 535	if (unlikely(offset_in_page(offset)))
 536		return -EINVAL;
 537
 538	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 539}
 540EXPORT_SYMBOL(vm_mmap);
 541
 542/**
 543 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 544 * failure, fall back to non-contiguous (vmalloc) allocation.
 545 * @size: size of the request.
 546 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 547 * @node: numa node to allocate from
 548 *
 549 * Uses kmalloc to get the memory but if the allocation fails then falls back
 550 * to the vmalloc allocator. Use kvfree for freeing the memory.
 551 *
 552 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 553 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 554 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 555 *
 556 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 557 * fall back to vmalloc.
 558 *
 559 * Return: pointer to the allocated memory of %NULL in case of failure
 560 */
 561void *kvmalloc_node(size_t size, gfp_t flags, int node)
 562{
 563	gfp_t kmalloc_flags = flags;
 564	void *ret;
 565
 566	/*
 567	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 568	 * so the given set of flags has to be compatible.
 569	 */
 570	if ((flags & GFP_KERNEL) != GFP_KERNEL)
 571		return kmalloc_node(size, flags, node);
 572
 573	/*
 574	 * We want to attempt a large physically contiguous block first because
 575	 * it is less likely to fragment multiple larger blocks and therefore
 576	 * contribute to a long term fragmentation less than vmalloc fallback.
 577	 * However make sure that larger requests are not too disruptive - no
 578	 * OOM killer and no allocation failure warnings as we have a fallback.
 579	 */
 580	if (size > PAGE_SIZE) {
 581		kmalloc_flags |= __GFP_NOWARN;
 582
 583		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 584			kmalloc_flags |= __GFP_NORETRY;
 585	}
 586
 587	ret = kmalloc_node(size, kmalloc_flags, node);
 588
 589	/*
 590	 * It doesn't really make sense to fallback to vmalloc for sub page
 591	 * requests
 592	 */
 593	if (ret || size <= PAGE_SIZE)
 594		return ret;
 595
 596	/* Don't even allow crazy sizes */
 597	if (WARN_ON_ONCE(size > INT_MAX))
 598		return NULL;
 599
 600	return __vmalloc_node(size, 1, flags, node,
 601			__builtin_return_address(0));
 602}
 603EXPORT_SYMBOL(kvmalloc_node);
 604
 605/**
 606 * kvfree() - Free memory.
 607 * @addr: Pointer to allocated memory.
 608 *
 609 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 610 * It is slightly more efficient to use kfree() or vfree() if you are certain
 611 * that you know which one to use.
 612 *
 613 * Context: Either preemptible task context or not-NMI interrupt.
 614 */
 615void kvfree(const void *addr)
 616{
 617	if (is_vmalloc_addr(addr))
 618		vfree(addr);
 619	else
 620		kfree(addr);
 621}
 622EXPORT_SYMBOL(kvfree);
 623
 624/**
 625 * kvfree_sensitive - Free a data object containing sensitive information.
 626 * @addr: address of the data object to be freed.
 627 * @len: length of the data object.
 628 *
 629 * Use the special memzero_explicit() function to clear the content of a
 630 * kvmalloc'ed object containing sensitive data to make sure that the
 631 * compiler won't optimize out the data clearing.
 632 */
 633void kvfree_sensitive(const void *addr, size_t len)
 634{
 635	if (likely(!ZERO_OR_NULL_PTR(addr))) {
 636		memzero_explicit((void *)addr, len);
 637		kvfree(addr);
 638	}
 639}
 640EXPORT_SYMBOL(kvfree_sensitive);
 641
 642static inline void *__page_rmapping(struct page *page)
 643{
 644	unsigned long mapping;
 645
 646	mapping = (unsigned long)page->mapping;
 647	mapping &= ~PAGE_MAPPING_FLAGS;
 648
 649	return (void *)mapping;
 650}
 651
 652/* Neutral page->mapping pointer to address_space or anon_vma or other */
 653void *page_rmapping(struct page *page)
 654{
 655	page = compound_head(page);
 656	return __page_rmapping(page);
 657}
 658
 659/*
 660 * Return true if this page is mapped into pagetables.
 661 * For compound page it returns true if any subpage of compound page is mapped.
 662 */
 663bool page_mapped(struct page *page)
 664{
 665	int i;
 666
 667	if (likely(!PageCompound(page)))
 668		return atomic_read(&page->_mapcount) >= 0;
 669	page = compound_head(page);
 670	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 671		return true;
 672	if (PageHuge(page))
 673		return false;
 674	for (i = 0; i < compound_nr(page); i++) {
 675		if (atomic_read(&page[i]._mapcount) >= 0)
 676			return true;
 677	}
 678	return false;
 679}
 680EXPORT_SYMBOL(page_mapped);
 681
 682struct anon_vma *page_anon_vma(struct page *page)
 683{
 684	unsigned long mapping;
 685
 686	page = compound_head(page);
 687	mapping = (unsigned long)page->mapping;
 688	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 689		return NULL;
 690	return __page_rmapping(page);
 691}
 692
 693struct address_space *page_mapping(struct page *page)
 694{
 695	struct address_space *mapping;
 696
 697	page = compound_head(page);
 698
 699	/* This happens if someone calls flush_dcache_page on slab page */
 700	if (unlikely(PageSlab(page)))
 701		return NULL;
 702
 703	if (unlikely(PageSwapCache(page))) {
 704		swp_entry_t entry;
 705
 706		entry.val = page_private(page);
 707		return swap_address_space(entry);
 708	}
 709
 710	mapping = page->mapping;
 711	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 712		return NULL;
 713
 714	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 715}
 716EXPORT_SYMBOL(page_mapping);
 717
 718/* Slow path of page_mapcount() for compound pages */
 719int __page_mapcount(struct page *page)
 720{
 721	int ret;
 722
 723	ret = atomic_read(&page->_mapcount) + 1;
 724	/*
 725	 * For file THP page->_mapcount contains total number of mapping
 726	 * of the page: no need to look into compound_mapcount.
 727	 */
 728	if (!PageAnon(page) && !PageHuge(page))
 729		return ret;
 730	page = compound_head(page);
 731	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 732	if (PageDoubleMap(page))
 733		ret--;
 734	return ret;
 735}
 736EXPORT_SYMBOL_GPL(__page_mapcount);
 737
 738void copy_huge_page(struct page *dst, struct page *src)
 739{
 740	unsigned i, nr = compound_nr(src);
 741
 742	for (i = 0; i < nr; i++) {
 743		cond_resched();
 744		copy_highpage(nth_page(dst, i), nth_page(src, i));
 745	}
 746}
 747
 748int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 749int sysctl_overcommit_ratio __read_mostly = 50;
 750unsigned long sysctl_overcommit_kbytes __read_mostly;
 751int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 752unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 753unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 754
 755int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
 756		size_t *lenp, loff_t *ppos)
 757{
 758	int ret;
 759
 760	ret = proc_dointvec(table, write, buffer, lenp, ppos);
 761	if (ret == 0 && write)
 762		sysctl_overcommit_kbytes = 0;
 763	return ret;
 764}
 765
 766static void sync_overcommit_as(struct work_struct *dummy)
 767{
 768	percpu_counter_sync(&vm_committed_as);
 769}
 770
 771int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
 772		size_t *lenp, loff_t *ppos)
 773{
 774	struct ctl_table t;
 775	int new_policy = -1;
 776	int ret;
 777
 778	/*
 779	 * The deviation of sync_overcommit_as could be big with loose policy
 780	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
 781	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
 782	 * with the strict "NEVER", and to avoid possible race condition (even
 783	 * though user usually won't too frequently do the switching to policy
 784	 * OVERCOMMIT_NEVER), the switch is done in the following order:
 785	 *	1. changing the batch
 786	 *	2. sync percpu count on each CPU
 787	 *	3. switch the policy
 788	 */
 789	if (write) {
 790		t = *table;
 791		t.data = &new_policy;
 792		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 793		if (ret || new_policy == -1)
 794			return ret;
 795
 796		mm_compute_batch(new_policy);
 797		if (new_policy == OVERCOMMIT_NEVER)
 798			schedule_on_each_cpu(sync_overcommit_as);
 799		sysctl_overcommit_memory = new_policy;
 800	} else {
 801		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 802	}
 803
 804	return ret;
 805}
 806
 807int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
 808		size_t *lenp, loff_t *ppos)
 809{
 810	int ret;
 811
 812	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 813	if (ret == 0 && write)
 814		sysctl_overcommit_ratio = 0;
 815	return ret;
 816}
 817
 818/*
 819 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 820 */
 821unsigned long vm_commit_limit(void)
 822{
 823	unsigned long allowed;
 824
 825	if (sysctl_overcommit_kbytes)
 826		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 827	else
 828		allowed = ((totalram_pages() - hugetlb_total_pages())
 829			   * sysctl_overcommit_ratio / 100);
 830	allowed += total_swap_pages;
 831
 832	return allowed;
 833}
 834
 835/*
 836 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 837 * other variables. It can be updated by several CPUs frequently.
 838 */
 839struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 840
 841/*
 842 * The global memory commitment made in the system can be a metric
 843 * that can be used to drive ballooning decisions when Linux is hosted
 844 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 845 * balancing memory across competing virtual machines that are hosted.
 846 * Several metrics drive this policy engine including the guest reported
 847 * memory commitment.
 848 *
 849 * The time cost of this is very low for small platforms, and for big
 850 * platform like a 2S/36C/72T Skylake server, in worst case where
 851 * vm_committed_as's spinlock is under severe contention, the time cost
 852 * could be about 30~40 microseconds.
 853 */
 854unsigned long vm_memory_committed(void)
 855{
 856	return percpu_counter_sum_positive(&vm_committed_as);
 857}
 858EXPORT_SYMBOL_GPL(vm_memory_committed);
 859
 860/*
 861 * Check that a process has enough memory to allocate a new virtual
 862 * mapping. 0 means there is enough memory for the allocation to
 863 * succeed and -ENOMEM implies there is not.
 864 *
 865 * We currently support three overcommit policies, which are set via the
 866 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 867 *
 868 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 869 * Additional code 2002 Jul 20 by Robert Love.
 870 *
 871 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 872 *
 873 * Note this is a helper function intended to be used by LSMs which
 874 * wish to use this logic.
 875 */
 876int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 877{
 878	long allowed;
 879
 880	vm_acct_memory(pages);
 881
 882	/*
 883	 * Sometimes we want to use more memory than we have
 884	 */
 885	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 886		return 0;
 887
 888	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 889		if (pages > totalram_pages() + total_swap_pages)
 890			goto error;
 891		return 0;
 892	}
 893
 894	allowed = vm_commit_limit();
 895	/*
 896	 * Reserve some for root
 897	 */
 898	if (!cap_sys_admin)
 899		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 900
 901	/*
 902	 * Don't let a single process grow so big a user can't recover
 903	 */
 904	if (mm) {
 905		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 906
 907		allowed -= min_t(long, mm->total_vm / 32, reserve);
 908	}
 909
 910	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 911		return 0;
 912error:
 913	vm_unacct_memory(pages);
 914
 915	return -ENOMEM;
 916}
 917
 918/**
 919 * get_cmdline() - copy the cmdline value to a buffer.
 920 * @task:     the task whose cmdline value to copy.
 921 * @buffer:   the buffer to copy to.
 922 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 923 *            to this length.
 924 *
 925 * Return: the size of the cmdline field copied. Note that the copy does
 926 * not guarantee an ending NULL byte.
 927 */
 928int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 929{
 930	int res = 0;
 931	unsigned int len;
 932	struct mm_struct *mm = get_task_mm(task);
 933	unsigned long arg_start, arg_end, env_start, env_end;
 934	if (!mm)
 935		goto out;
 936	if (!mm->arg_end)
 937		goto out_mm;	/* Shh! No looking before we're done */
 938
 939	spin_lock(&mm->arg_lock);
 940	arg_start = mm->arg_start;
 941	arg_end = mm->arg_end;
 942	env_start = mm->env_start;
 943	env_end = mm->env_end;
 944	spin_unlock(&mm->arg_lock);
 945
 946	len = arg_end - arg_start;
 947
 948	if (len > buflen)
 949		len = buflen;
 950
 951	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 952
 953	/*
 954	 * If the nul at the end of args has been overwritten, then
 955	 * assume application is using setproctitle(3).
 956	 */
 957	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 958		len = strnlen(buffer, res);
 959		if (len < res) {
 960			res = len;
 961		} else {
 962			len = env_end - env_start;
 963			if (len > buflen - res)
 964				len = buflen - res;
 965			res += access_process_vm(task, env_start,
 966						 buffer+res, len,
 967						 FOLL_FORCE);
 968			res = strnlen(buffer, res);
 969		}
 970	}
 971out_mm:
 972	mmput(mm);
 973out:
 974	return res;
 975}
 976
 977int __weak memcmp_pages(struct page *page1, struct page *page2)
 978{
 979	char *addr1, *addr2;
 980	int ret;
 981
 982	addr1 = kmap_atomic(page1);
 983	addr2 = kmap_atomic(page2);
 984	ret = memcmp(addr1, addr2, PAGE_SIZE);
 985	kunmap_atomic(addr2);
 986	kunmap_atomic(addr1);
 987	return ret;
 988}
 989
 990#ifdef CONFIG_PRINTK
 991/**
 992 * mem_dump_obj - Print available provenance information
 993 * @object: object for which to find provenance information.
 994 *
 995 * This function uses pr_cont(), so that the caller is expected to have
 996 * printed out whatever preamble is appropriate.  The provenance information
 997 * depends on the type of object and on how much debugging is enabled.
 998 * For example, for a slab-cache object, the slab name is printed, and,
 999 * if available, the return address and stack trace from the allocation
1000 * and last free path of that object.
1001 */
1002void mem_dump_obj(void *object)
1003{
1004	const char *type;
1005
1006	if (kmem_valid_obj(object)) {
1007		kmem_dump_obj(object);
1008		return;
1009	}
1010
1011	if (vmalloc_dump_obj(object))
1012		return;
1013
1014	if (virt_addr_valid(object))
1015		type = "non-slab/vmalloc memory";
1016	else if (object == NULL)
1017		type = "NULL pointer";
1018	else if (object == ZERO_SIZE_PTR)
1019		type = "zero-size pointer";
1020	else
1021		type = "non-paged memory";
1022
1023	pr_cont(" %s\n", type);
1024}
1025EXPORT_SYMBOL_GPL(mem_dump_obj);
1026#endif
1027
1028/*
1029 * A driver might set a page logically offline -- PageOffline() -- and
1030 * turn the page inaccessible in the hypervisor; after that, access to page
1031 * content can be fatal.
1032 *
1033 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1034 * pages after checking PageOffline(); however, these PFN walkers can race
1035 * with drivers that set PageOffline().
1036 *
1037 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1038 * synchronize with such drivers, achieving that a page cannot be set
1039 * PageOffline() while frozen.
1040 *
1041 * page_offline_begin()/page_offline_end() is used by drivers that care about
1042 * such races when setting a page PageOffline().
1043 */
1044static DECLARE_RWSEM(page_offline_rwsem);
1045
1046void page_offline_freeze(void)
1047{
1048	down_read(&page_offline_rwsem);
1049}
1050
1051void page_offline_thaw(void)
1052{
1053	up_read(&page_offline_rwsem);
1054}
1055
1056void page_offline_begin(void)
1057{
1058	down_write(&page_offline_rwsem);
1059}
1060EXPORT_SYMBOL(page_offline_begin);
1061
1062void page_offline_end(void)
1063{
1064	up_write(&page_offline_rwsem);
1065}
1066EXPORT_SYMBOL(page_offline_end);