Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/mm.h>
  3#include <linux/slab.h>
  4#include <linux/string.h>
  5#include <linux/compiler.h>
  6#include <linux/export.h>
  7#include <linux/err.h>
  8#include <linux/sched.h>
  9#include <linux/sched/mm.h>
 10#include <linux/sched/signal.h>
 11#include <linux/sched/task_stack.h>
 12#include <linux/security.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/mman.h>
 16#include <linux/hugetlb.h>
 17#include <linux/vmalloc.h>
 18#include <linux/userfaultfd_k.h>
 19#include <linux/elf.h>
 20#include <linux/elf-randomize.h>
 21#include <linux/personality.h>
 22#include <linux/random.h>
 23#include <linux/processor.h>
 24#include <linux/sizes.h>
 25#include <linux/compat.h>
 26
 27#include <linux/uaccess.h>
 
 28
 29#include "internal.h"
 30
 
 
 
 
 
 
 31/**
 32 * kfree_const - conditionally free memory
 33 * @x: pointer to the memory
 34 *
 35 * Function calls kfree only if @x is not in .rodata section.
 36 */
 37void kfree_const(const void *x)
 38{
 39	if (!is_kernel_rodata((unsigned long)x))
 40		kfree(x);
 41}
 42EXPORT_SYMBOL(kfree_const);
 43
 44/**
 45 * kstrdup - allocate space for and copy an existing string
 46 * @s: the string to duplicate
 47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 48 *
 49 * Return: newly allocated copy of @s or %NULL in case of error
 50 */
 51char *kstrdup(const char *s, gfp_t gfp)
 52{
 53	size_t len;
 54	char *buf;
 55
 56	if (!s)
 57		return NULL;
 58
 59	len = strlen(s) + 1;
 60	buf = kmalloc_track_caller(len, gfp);
 61	if (buf)
 62		memcpy(buf, s, len);
 63	return buf;
 64}
 65EXPORT_SYMBOL(kstrdup);
 66
 67/**
 68 * kstrdup_const - conditionally duplicate an existing const string
 69 * @s: the string to duplicate
 70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 71 *
 72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
 73 *
 74 * Return: source string if it is in .rodata section otherwise
 75 * fallback to kstrdup.
 76 */
 77const char *kstrdup_const(const char *s, gfp_t gfp)
 78{
 79	if (is_kernel_rodata((unsigned long)s))
 80		return s;
 81
 82	return kstrdup(s, gfp);
 83}
 84EXPORT_SYMBOL(kstrdup_const);
 85
 86/**
 87 * kstrndup - allocate space for and copy an existing string
 88 * @s: the string to duplicate
 89 * @max: read at most @max chars from @s
 90 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 91 *
 92 * Note: Use kmemdup_nul() instead if the size is known exactly.
 93 *
 94 * Return: newly allocated copy of @s or %NULL in case of error
 95 */
 96char *kstrndup(const char *s, size_t max, gfp_t gfp)
 97{
 98	size_t len;
 99	char *buf;
100
101	if (!s)
102		return NULL;
103
104	len = strnlen(s, max);
105	buf = kmalloc_track_caller(len+1, gfp);
106	if (buf) {
107		memcpy(buf, s, len);
108		buf[len] = '\0';
109	}
110	return buf;
111}
112EXPORT_SYMBOL(kstrndup);
113
114/**
115 * kmemdup - duplicate region of memory
116 *
117 * @src: memory region to duplicate
118 * @len: memory region length
119 * @gfp: GFP mask to use
120 *
121 * Return: newly allocated copy of @src or %NULL in case of error
122 */
123void *kmemdup(const void *src, size_t len, gfp_t gfp)
124{
125	void *p;
126
127	p = kmalloc_track_caller(len, gfp);
128	if (p)
129		memcpy(p, src, len);
130	return p;
131}
132EXPORT_SYMBOL(kmemdup);
133
134/**
135 * kmemdup_nul - Create a NUL-terminated string from unterminated data
136 * @s: The data to stringify
137 * @len: The size of the data
138 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
139 *
140 * Return: newly allocated copy of @s with NUL-termination or %NULL in
141 * case of error
142 */
143char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
144{
145	char *buf;
146
147	if (!s)
148		return NULL;
149
150	buf = kmalloc_track_caller(len + 1, gfp);
151	if (buf) {
152		memcpy(buf, s, len);
153		buf[len] = '\0';
154	}
155	return buf;
156}
157EXPORT_SYMBOL(kmemdup_nul);
158
159/**
160 * memdup_user - duplicate memory region from user space
161 *
162 * @src: source address in user space
163 * @len: number of bytes to copy
164 *
165 * Return: an ERR_PTR() on failure.  Result is physically
166 * contiguous, to be freed by kfree().
167 */
168void *memdup_user(const void __user *src, size_t len)
169{
170	void *p;
171
172	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 
 
 
 
 
173	if (!p)
174		return ERR_PTR(-ENOMEM);
175
176	if (copy_from_user(p, src, len)) {
177		kfree(p);
178		return ERR_PTR(-EFAULT);
179	}
180
181	return p;
182}
183EXPORT_SYMBOL(memdup_user);
184
185/**
186 * vmemdup_user - duplicate memory region from user space
187 *
188 * @src: source address in user space
189 * @len: number of bytes to copy
190 *
191 * Return: an ERR_PTR() on failure.  Result may be not
192 * physically contiguous.  Use kvfree() to free.
193 */
194void *vmemdup_user(const void __user *src, size_t len)
195{
196	void *p;
197
198	p = kvmalloc(len, GFP_USER);
199	if (!p)
200		return ERR_PTR(-ENOMEM);
201
202	if (copy_from_user(p, src, len)) {
203		kvfree(p);
204		return ERR_PTR(-EFAULT);
205	}
206
207	return p;
208}
209EXPORT_SYMBOL(vmemdup_user);
210
211/**
212 * strndup_user - duplicate an existing string from user space
213 * @s: The string to duplicate
214 * @n: Maximum number of bytes to copy, including the trailing NUL.
215 *
216 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
217 */
218char *strndup_user(const char __user *s, long n)
219{
220	char *p;
221	long length;
222
223	length = strnlen_user(s, n);
224
225	if (!length)
226		return ERR_PTR(-EFAULT);
227
228	if (length > n)
229		return ERR_PTR(-EINVAL);
230
231	p = memdup_user(s, length);
232
233	if (IS_ERR(p))
234		return p;
235
236	p[length - 1] = '\0';
237
238	return p;
239}
240EXPORT_SYMBOL(strndup_user);
241
242/**
243 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
244 *
245 * @src: source address in user space
246 * @len: number of bytes to copy
247 *
248 * Return: an ERR_PTR() on failure.
249 */
250void *memdup_user_nul(const void __user *src, size_t len)
251{
252	char *p;
253
254	/*
255	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
256	 * cause pagefault, which makes it pointless to use GFP_NOFS
257	 * or GFP_ATOMIC.
258	 */
259	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
260	if (!p)
261		return ERR_PTR(-ENOMEM);
262
263	if (copy_from_user(p, src, len)) {
264		kfree(p);
265		return ERR_PTR(-EFAULT);
266	}
267	p[len] = '\0';
268
269	return p;
270}
271EXPORT_SYMBOL(memdup_user_nul);
272
273void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
274		struct vm_area_struct *prev)
275{
276	struct vm_area_struct *next;
277
278	vma->vm_prev = prev;
279	if (prev) {
280		next = prev->vm_next;
281		prev->vm_next = vma;
282	} else {
283		next = mm->mmap;
284		mm->mmap = vma;
 
 
 
 
 
285	}
286	vma->vm_next = next;
287	if (next)
288		next->vm_prev = vma;
289}
290
291void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
292{
293	struct vm_area_struct *prev, *next;
294
295	next = vma->vm_next;
296	prev = vma->vm_prev;
297	if (prev)
298		prev->vm_next = next;
299	else
300		mm->mmap = next;
301	if (next)
302		next->vm_prev = prev;
303}
304
305/* Check if the vma is being used as a stack by this task */
306int vma_is_stack_for_current(struct vm_area_struct *vma)
307{
308	struct task_struct * __maybe_unused t = current;
309
310	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
311}
312
313#ifndef STACK_RND_MASK
314#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
315#endif
316
317unsigned long randomize_stack_top(unsigned long stack_top)
318{
319	unsigned long random_variable = 0;
320
321	if (current->flags & PF_RANDOMIZE) {
322		random_variable = get_random_long();
323		random_variable &= STACK_RND_MASK;
324		random_variable <<= PAGE_SHIFT;
325	}
326#ifdef CONFIG_STACK_GROWSUP
327	return PAGE_ALIGN(stack_top) + random_variable;
328#else
329	return PAGE_ALIGN(stack_top) - random_variable;
330#endif
331}
332
333#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
334unsigned long arch_randomize_brk(struct mm_struct *mm)
335{
336	/* Is the current task 32bit ? */
337	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
338		return randomize_page(mm->brk, SZ_32M);
339
340	return randomize_page(mm->brk, SZ_1G);
341}
342
343unsigned long arch_mmap_rnd(void)
344{
345	unsigned long rnd;
346
347#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
348	if (is_compat_task())
349		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
350	else
351#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
352		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
353
354	return rnd << PAGE_SHIFT;
355}
356
357static int mmap_is_legacy(struct rlimit *rlim_stack)
358{
359	if (current->personality & ADDR_COMPAT_LAYOUT)
360		return 1;
361
362	if (rlim_stack->rlim_cur == RLIM_INFINITY)
363		return 1;
364
365	return sysctl_legacy_va_layout;
366}
367
368/*
369 * Leave enough space between the mmap area and the stack to honour ulimit in
370 * the face of randomisation.
371 */
372#define MIN_GAP		(SZ_128M)
373#define MAX_GAP		(STACK_TOP / 6 * 5)
374
375static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
376{
377	unsigned long gap = rlim_stack->rlim_cur;
378	unsigned long pad = stack_guard_gap;
379
380	/* Account for stack randomization if necessary */
381	if (current->flags & PF_RANDOMIZE)
382		pad += (STACK_RND_MASK << PAGE_SHIFT);
383
384	/* Values close to RLIM_INFINITY can overflow. */
385	if (gap + pad > gap)
386		gap += pad;
387
388	if (gap < MIN_GAP)
389		gap = MIN_GAP;
390	else if (gap > MAX_GAP)
391		gap = MAX_GAP;
392
393	return PAGE_ALIGN(STACK_TOP - gap - rnd);
394}
395
396void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
397{
398	unsigned long random_factor = 0UL;
399
400	if (current->flags & PF_RANDOMIZE)
401		random_factor = arch_mmap_rnd();
402
403	if (mmap_is_legacy(rlim_stack)) {
404		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
405		mm->get_unmapped_area = arch_get_unmapped_area;
406	} else {
407		mm->mmap_base = mmap_base(random_factor, rlim_stack);
408		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
409	}
410}
411#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
412void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
413{
414	mm->mmap_base = TASK_UNMAPPED_BASE;
415	mm->get_unmapped_area = arch_get_unmapped_area;
416}
417#endif
418
419/**
420 * __account_locked_vm - account locked pages to an mm's locked_vm
421 * @mm:          mm to account against
422 * @pages:       number of pages to account
423 * @inc:         %true if @pages should be considered positive, %false if not
424 * @task:        task used to check RLIMIT_MEMLOCK
425 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
426 *
427 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
428 * that mmap_lock is held as writer.
429 *
430 * Return:
431 * * 0       on success
432 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
433 */
434int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
435			struct task_struct *task, bool bypass_rlim)
436{
437	unsigned long locked_vm, limit;
438	int ret = 0;
439
440	mmap_assert_write_locked(mm);
441
442	locked_vm = mm->locked_vm;
443	if (inc) {
444		if (!bypass_rlim) {
445			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
446			if (locked_vm + pages > limit)
447				ret = -ENOMEM;
448		}
449		if (!ret)
450			mm->locked_vm = locked_vm + pages;
451	} else {
452		WARN_ON_ONCE(pages > locked_vm);
453		mm->locked_vm = locked_vm - pages;
454	}
455
456	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
457		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
458		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
459		 ret ? " - exceeded" : "");
460
461	return ret;
462}
463EXPORT_SYMBOL_GPL(__account_locked_vm);
464
465/**
466 * account_locked_vm - account locked pages to an mm's locked_vm
467 * @mm:          mm to account against, may be NULL
468 * @pages:       number of pages to account
469 * @inc:         %true if @pages should be considered positive, %false if not
470 *
471 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
472 *
473 * Return:
474 * * 0       on success, or if mm is NULL
475 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 
 
 
 
 
 
 
 
 
 
 
 
476 */
477int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 
478{
479	int ret;
480
481	if (pages == 0 || !mm)
482		return 0;
483
484	mmap_write_lock(mm);
485	ret = __account_locked_vm(mm, pages, inc, current,
486				  capable(CAP_IPC_LOCK));
487	mmap_write_unlock(mm);
488
489	return ret;
490}
491EXPORT_SYMBOL_GPL(account_locked_vm);
492
493unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
494	unsigned long len, unsigned long prot,
495	unsigned long flag, unsigned long pgoff)
496{
497	unsigned long ret;
498	struct mm_struct *mm = current->mm;
499	unsigned long populate;
500	LIST_HEAD(uf);
501
502	ret = security_mmap_file(file, prot, flag);
503	if (!ret) {
504		if (mmap_write_lock_killable(mm))
505			return -EINTR;
506		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
507			      &uf);
508		mmap_write_unlock(mm);
509		userfaultfd_unmap_complete(mm, &uf);
510		if (populate)
511			mm_populate(ret, populate);
512	}
513	return ret;
514}
515
516unsigned long vm_mmap(struct file *file, unsigned long addr,
517	unsigned long len, unsigned long prot,
518	unsigned long flag, unsigned long offset)
519{
520	if (unlikely(offset + PAGE_ALIGN(len) < offset))
521		return -EINVAL;
522	if (unlikely(offset_in_page(offset)))
523		return -EINVAL;
524
525	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
526}
527EXPORT_SYMBOL(vm_mmap);
528
529/**
530 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
531 * failure, fall back to non-contiguous (vmalloc) allocation.
532 * @size: size of the request.
533 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
534 * @node: numa node to allocate from
535 *
536 * Uses kmalloc to get the memory but if the allocation fails then falls back
537 * to the vmalloc allocator. Use kvfree for freeing the memory.
538 *
539 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
540 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
541 * preferable to the vmalloc fallback, due to visible performance drawbacks.
542 *
543 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
544 * fall back to vmalloc.
545 *
546 * Return: pointer to the allocated memory of %NULL in case of failure
547 */
548void *kvmalloc_node(size_t size, gfp_t flags, int node)
549{
550	gfp_t kmalloc_flags = flags;
551	void *ret;
552
553	/*
554	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
555	 * so the given set of flags has to be compatible.
556	 */
557	if ((flags & GFP_KERNEL) != GFP_KERNEL)
558		return kmalloc_node(size, flags, node);
559
560	/*
561	 * We want to attempt a large physically contiguous block first because
562	 * it is less likely to fragment multiple larger blocks and therefore
563	 * contribute to a long term fragmentation less than vmalloc fallback.
564	 * However make sure that larger requests are not too disruptive - no
565	 * OOM killer and no allocation failure warnings as we have a fallback.
566	 */
567	if (size > PAGE_SIZE) {
568		kmalloc_flags |= __GFP_NOWARN;
569
570		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
571			kmalloc_flags |= __GFP_NORETRY;
572	}
573
574	ret = kmalloc_node(size, kmalloc_flags, node);
575
576	/*
577	 * It doesn't really make sense to fallback to vmalloc for sub page
578	 * requests
579	 */
580	if (ret || size <= PAGE_SIZE)
581		return ret;
582
583	return __vmalloc_node(size, 1, flags, node,
584			__builtin_return_address(0));
585}
586EXPORT_SYMBOL(kvmalloc_node);
587
588/**
589 * kvfree() - Free memory.
590 * @addr: Pointer to allocated memory.
591 *
592 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
593 * It is slightly more efficient to use kfree() or vfree() if you are certain
594 * that you know which one to use.
595 *
596 * Context: Either preemptible task context or not-NMI interrupt.
597 */
598void kvfree(const void *addr)
599{
600	if (is_vmalloc_addr(addr))
601		vfree(addr);
602	else
603		kfree(addr);
604}
605EXPORT_SYMBOL(kvfree);
606
607/**
608 * kvfree_sensitive - Free a data object containing sensitive information.
609 * @addr: address of the data object to be freed.
610 * @len: length of the data object.
611 *
612 * Use the special memzero_explicit() function to clear the content of a
613 * kvmalloc'ed object containing sensitive data to make sure that the
614 * compiler won't optimize out the data clearing.
615 */
616void kvfree_sensitive(const void *addr, size_t len)
617{
618	if (likely(!ZERO_OR_NULL_PTR(addr))) {
619		memzero_explicit((void *)addr, len);
620		kvfree(addr);
621	}
622}
623EXPORT_SYMBOL(kvfree_sensitive);
624
625static inline void *__page_rmapping(struct page *page)
626{
627	unsigned long mapping;
628
629	mapping = (unsigned long)page->mapping;
630	mapping &= ~PAGE_MAPPING_FLAGS;
631
632	return (void *)mapping;
633}
634
635/* Neutral page->mapping pointer to address_space or anon_vma or other */
636void *page_rmapping(struct page *page)
637{
638	page = compound_head(page);
639	return __page_rmapping(page);
640}
641
642/*
643 * Return true if this page is mapped into pagetables.
644 * For compound page it returns true if any subpage of compound page is mapped.
645 */
646bool page_mapped(struct page *page)
647{
648	int i;
649
650	if (likely(!PageCompound(page)))
651		return atomic_read(&page->_mapcount) >= 0;
652	page = compound_head(page);
653	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
654		return true;
655	if (PageHuge(page))
656		return false;
657	for (i = 0; i < compound_nr(page); i++) {
658		if (atomic_read(&page[i]._mapcount) >= 0)
659			return true;
660	}
661	return false;
662}
663EXPORT_SYMBOL(page_mapped);
664
665struct anon_vma *page_anon_vma(struct page *page)
666{
667	unsigned long mapping;
668
669	page = compound_head(page);
670	mapping = (unsigned long)page->mapping;
671	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
672		return NULL;
673	return __page_rmapping(page);
674}
675
676struct address_space *page_mapping(struct page *page)
677{
678	struct address_space *mapping;
679
680	page = compound_head(page);
681
682	/* This happens if someone calls flush_dcache_page on slab page */
683	if (unlikely(PageSlab(page)))
684		return NULL;
685
686	if (unlikely(PageSwapCache(page))) {
687		swp_entry_t entry;
688
689		entry.val = page_private(page);
690		return swap_address_space(entry);
691	}
692
693	mapping = page->mapping;
694	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
695		return NULL;
696
697	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
698}
699EXPORT_SYMBOL(page_mapping);
700
701/*
702 * For file cache pages, return the address_space, otherwise return NULL
703 */
704struct address_space *page_mapping_file(struct page *page)
705{
706	if (unlikely(PageSwapCache(page)))
707		return NULL;
708	return page_mapping(page);
709}
710
711/* Slow path of page_mapcount() for compound pages */
712int __page_mapcount(struct page *page)
713{
714	int ret;
715
716	ret = atomic_read(&page->_mapcount) + 1;
717	/*
718	 * For file THP page->_mapcount contains total number of mapping
719	 * of the page: no need to look into compound_mapcount.
720	 */
721	if (!PageAnon(page) && !PageHuge(page))
722		return ret;
723	page = compound_head(page);
724	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
725	if (PageDoubleMap(page))
726		ret--;
727	return ret;
728}
729EXPORT_SYMBOL_GPL(__page_mapcount);
730
731int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
732int sysctl_overcommit_ratio __read_mostly = 50;
733unsigned long sysctl_overcommit_kbytes __read_mostly;
734int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
735unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
736unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
737
738int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
739		size_t *lenp, loff_t *ppos)
 
740{
741	int ret;
742
743	ret = proc_dointvec(table, write, buffer, lenp, ppos);
744	if (ret == 0 && write)
745		sysctl_overcommit_kbytes = 0;
746	return ret;
747}
748
749static void sync_overcommit_as(struct work_struct *dummy)
750{
751	percpu_counter_sync(&vm_committed_as);
752}
753
754int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
755		size_t *lenp, loff_t *ppos)
756{
757	struct ctl_table t;
758	int new_policy;
759	int ret;
760
761	/*
762	 * The deviation of sync_overcommit_as could be big with loose policy
763	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
764	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
765	 * with the strict "NEVER", and to avoid possible race condtion (even
766	 * though user usually won't too frequently do the switching to policy
767	 * OVERCOMMIT_NEVER), the switch is done in the following order:
768	 *	1. changing the batch
769	 *	2. sync percpu count on each CPU
770	 *	3. switch the policy
771	 */
772	if (write) {
773		t = *table;
774		t.data = &new_policy;
775		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
776		if (ret)
777			return ret;
778
779		mm_compute_batch(new_policy);
780		if (new_policy == OVERCOMMIT_NEVER)
781			schedule_on_each_cpu(sync_overcommit_as);
782		sysctl_overcommit_memory = new_policy;
783	} else {
784		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
785	}
786
787	return ret;
788}
789
790int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
791		size_t *lenp, loff_t *ppos)
792{
793	int ret;
794
795	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
796	if (ret == 0 && write)
797		sysctl_overcommit_ratio = 0;
798	return ret;
799}
800
801/*
802 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
803 */
804unsigned long vm_commit_limit(void)
805{
806	unsigned long allowed;
807
808	if (sysctl_overcommit_kbytes)
809		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
810	else
811		allowed = ((totalram_pages() - hugetlb_total_pages())
812			   * sysctl_overcommit_ratio / 100);
813	allowed += total_swap_pages;
814
815	return allowed;
816}
817
818/*
819 * Make sure vm_committed_as in one cacheline and not cacheline shared with
820 * other variables. It can be updated by several CPUs frequently.
821 */
822struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
823
824/*
825 * The global memory commitment made in the system can be a metric
826 * that can be used to drive ballooning decisions when Linux is hosted
827 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
828 * balancing memory across competing virtual machines that are hosted.
829 * Several metrics drive this policy engine including the guest reported
830 * memory commitment.
831 *
832 * The time cost of this is very low for small platforms, and for big
833 * platform like a 2S/36C/72T Skylake server, in worst case where
834 * vm_committed_as's spinlock is under severe contention, the time cost
835 * could be about 30~40 microseconds.
836 */
837unsigned long vm_memory_committed(void)
838{
839	return percpu_counter_sum_positive(&vm_committed_as);
840}
841EXPORT_SYMBOL_GPL(vm_memory_committed);
842
843/*
844 * Check that a process has enough memory to allocate a new virtual
845 * mapping. 0 means there is enough memory for the allocation to
846 * succeed and -ENOMEM implies there is not.
847 *
848 * We currently support three overcommit policies, which are set via the
849 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
850 *
851 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
852 * Additional code 2002 Jul 20 by Robert Love.
853 *
854 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
855 *
856 * Note this is a helper function intended to be used by LSMs which
857 * wish to use this logic.
858 */
859int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
860{
861	long allowed;
 
 
 
 
862
863	vm_acct_memory(pages);
864
865	/*
866	 * Sometimes we want to use more memory than we have
867	 */
868	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
869		return 0;
870
871	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
872		if (pages > totalram_pages() + total_swap_pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
873			goto error;
874		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
875	}
876
877	allowed = vm_commit_limit();
878	/*
879	 * Reserve some for root
880	 */
881	if (!cap_sys_admin)
882		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
883
884	/*
885	 * Don't let a single process grow so big a user can't recover
886	 */
887	if (mm) {
888		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
889
890		allowed -= min_t(long, mm->total_vm / 32, reserve);
891	}
892
893	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
894		return 0;
895error:
896	vm_unacct_memory(pages);
897
898	return -ENOMEM;
899}
900
901/**
902 * get_cmdline() - copy the cmdline value to a buffer.
903 * @task:     the task whose cmdline value to copy.
904 * @buffer:   the buffer to copy to.
905 * @buflen:   the length of the buffer. Larger cmdline values are truncated
906 *            to this length.
907 *
908 * Return: the size of the cmdline field copied. Note that the copy does
909 * not guarantee an ending NULL byte.
910 */
911int get_cmdline(struct task_struct *task, char *buffer, int buflen)
912{
913	int res = 0;
914	unsigned int len;
915	struct mm_struct *mm = get_task_mm(task);
916	unsigned long arg_start, arg_end, env_start, env_end;
917	if (!mm)
918		goto out;
919	if (!mm->arg_end)
920		goto out_mm;	/* Shh! No looking before we're done */
921
922	spin_lock(&mm->arg_lock);
923	arg_start = mm->arg_start;
924	arg_end = mm->arg_end;
925	env_start = mm->env_start;
926	env_end = mm->env_end;
927	spin_unlock(&mm->arg_lock);
928
929	len = arg_end - arg_start;
930
931	if (len > buflen)
932		len = buflen;
933
934	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
935
936	/*
937	 * If the nul at the end of args has been overwritten, then
938	 * assume application is using setproctitle(3).
939	 */
940	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
941		len = strnlen(buffer, res);
942		if (len < res) {
943			res = len;
944		} else {
945			len = env_end - env_start;
946			if (len > buflen - res)
947				len = buflen - res;
948			res += access_process_vm(task, env_start,
949						 buffer+res, len,
950						 FOLL_FORCE);
951			res = strnlen(buffer, res);
952		}
953	}
954out_mm:
955	mmput(mm);
956out:
957	return res;
958}
959
960int memcmp_pages(struct page *page1, struct page *page2)
961{
962	char *addr1, *addr2;
963	int ret;
964
965	addr1 = kmap_atomic(page1);
966	addr2 = kmap_atomic(page2);
967	ret = memcmp(addr1, addr2, PAGE_SIZE);
968	kunmap_atomic(addr2);
969	kunmap_atomic(addr1);
970	return ret;
971}
v4.6
 
  1#include <linux/mm.h>
  2#include <linux/slab.h>
  3#include <linux/string.h>
  4#include <linux/compiler.h>
  5#include <linux/export.h>
  6#include <linux/err.h>
  7#include <linux/sched.h>
 
 
 
  8#include <linux/security.h>
  9#include <linux/swap.h>
 10#include <linux/swapops.h>
 11#include <linux/mman.h>
 12#include <linux/hugetlb.h>
 13#include <linux/vmalloc.h>
 
 
 
 
 
 
 
 
 14
 15#include <asm/sections.h>
 16#include <asm/uaccess.h>
 17
 18#include "internal.h"
 19
 20static inline int is_kernel_rodata(unsigned long addr)
 21{
 22	return addr >= (unsigned long)__start_rodata &&
 23		addr < (unsigned long)__end_rodata;
 24}
 25
 26/**
 27 * kfree_const - conditionally free memory
 28 * @x: pointer to the memory
 29 *
 30 * Function calls kfree only if @x is not in .rodata section.
 31 */
 32void kfree_const(const void *x)
 33{
 34	if (!is_kernel_rodata((unsigned long)x))
 35		kfree(x);
 36}
 37EXPORT_SYMBOL(kfree_const);
 38
 39/**
 40 * kstrdup - allocate space for and copy an existing string
 41 * @s: the string to duplicate
 42 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 
 
 43 */
 44char *kstrdup(const char *s, gfp_t gfp)
 45{
 46	size_t len;
 47	char *buf;
 48
 49	if (!s)
 50		return NULL;
 51
 52	len = strlen(s) + 1;
 53	buf = kmalloc_track_caller(len, gfp);
 54	if (buf)
 55		memcpy(buf, s, len);
 56	return buf;
 57}
 58EXPORT_SYMBOL(kstrdup);
 59
 60/**
 61 * kstrdup_const - conditionally duplicate an existing const string
 62 * @s: the string to duplicate
 63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 64 *
 65 * Function returns source string if it is in .rodata section otherwise it
 66 * fallbacks to kstrdup.
 67 * Strings allocated by kstrdup_const should be freed by kfree_const.
 
 68 */
 69const char *kstrdup_const(const char *s, gfp_t gfp)
 70{
 71	if (is_kernel_rodata((unsigned long)s))
 72		return s;
 73
 74	return kstrdup(s, gfp);
 75}
 76EXPORT_SYMBOL(kstrdup_const);
 77
 78/**
 79 * kstrndup - allocate space for and copy an existing string
 80 * @s: the string to duplicate
 81 * @max: read at most @max chars from @s
 82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 
 
 
 
 83 */
 84char *kstrndup(const char *s, size_t max, gfp_t gfp)
 85{
 86	size_t len;
 87	char *buf;
 88
 89	if (!s)
 90		return NULL;
 91
 92	len = strnlen(s, max);
 93	buf = kmalloc_track_caller(len+1, gfp);
 94	if (buf) {
 95		memcpy(buf, s, len);
 96		buf[len] = '\0';
 97	}
 98	return buf;
 99}
100EXPORT_SYMBOL(kstrndup);
101
102/**
103 * kmemdup - duplicate region of memory
104 *
105 * @src: memory region to duplicate
106 * @len: memory region length
107 * @gfp: GFP mask to use
 
 
108 */
109void *kmemdup(const void *src, size_t len, gfp_t gfp)
110{
111	void *p;
112
113	p = kmalloc_track_caller(len, gfp);
114	if (p)
115		memcpy(p, src, len);
116	return p;
117}
118EXPORT_SYMBOL(kmemdup);
119
120/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121 * memdup_user - duplicate memory region from user space
122 *
123 * @src: source address in user space
124 * @len: number of bytes to copy
125 *
126 * Returns an ERR_PTR() on failure.
 
127 */
128void *memdup_user(const void __user *src, size_t len)
129{
130	void *p;
131
132	/*
133	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
134	 * cause pagefault, which makes it pointless to use GFP_NOFS
135	 * or GFP_ATOMIC.
136	 */
137	p = kmalloc_track_caller(len, GFP_KERNEL);
138	if (!p)
139		return ERR_PTR(-ENOMEM);
140
141	if (copy_from_user(p, src, len)) {
142		kfree(p);
143		return ERR_PTR(-EFAULT);
144	}
145
146	return p;
147}
148EXPORT_SYMBOL(memdup_user);
149
150/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151 * strndup_user - duplicate an existing string from user space
152 * @s: The string to duplicate
153 * @n: Maximum number of bytes to copy, including the trailing NUL.
 
 
154 */
155char *strndup_user(const char __user *s, long n)
156{
157	char *p;
158	long length;
159
160	length = strnlen_user(s, n);
161
162	if (!length)
163		return ERR_PTR(-EFAULT);
164
165	if (length > n)
166		return ERR_PTR(-EINVAL);
167
168	p = memdup_user(s, length);
169
170	if (IS_ERR(p))
171		return p;
172
173	p[length - 1] = '\0';
174
175	return p;
176}
177EXPORT_SYMBOL(strndup_user);
178
179/**
180 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
181 *
182 * @src: source address in user space
183 * @len: number of bytes to copy
184 *
185 * Returns an ERR_PTR() on failure.
186 */
187void *memdup_user_nul(const void __user *src, size_t len)
188{
189	char *p;
190
191	/*
192	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
193	 * cause pagefault, which makes it pointless to use GFP_NOFS
194	 * or GFP_ATOMIC.
195	 */
196	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
197	if (!p)
198		return ERR_PTR(-ENOMEM);
199
200	if (copy_from_user(p, src, len)) {
201		kfree(p);
202		return ERR_PTR(-EFAULT);
203	}
204	p[len] = '\0';
205
206	return p;
207}
208EXPORT_SYMBOL(memdup_user_nul);
209
210void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
211		struct vm_area_struct *prev, struct rb_node *rb_parent)
212{
213	struct vm_area_struct *next;
214
215	vma->vm_prev = prev;
216	if (prev) {
217		next = prev->vm_next;
218		prev->vm_next = vma;
219	} else {
 
220		mm->mmap = vma;
221		if (rb_parent)
222			next = rb_entry(rb_parent,
223					struct vm_area_struct, vm_rb);
224		else
225			next = NULL;
226	}
227	vma->vm_next = next;
228	if (next)
229		next->vm_prev = vma;
230}
231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232/* Check if the vma is being used as a stack by this task */
233int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
234{
 
 
235	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
236}
237
238#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
239void arch_pick_mmap_layout(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240{
241	mm->mmap_base = TASK_UNMAPPED_BASE;
242	mm->get_unmapped_area = arch_get_unmapped_area;
243}
244#endif
245
246/*
247 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
248 * back to the regular GUP.
249 * If the architecture not support this function, simply return with no
250 * page pinned
251 */
252int __weak __get_user_pages_fast(unsigned long start,
253				 int nr_pages, int write, struct page **pages)
254{
255	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256}
257EXPORT_SYMBOL_GPL(__get_user_pages_fast);
258
259/**
260 * get_user_pages_fast() - pin user pages in memory
261 * @start:	starting user address
262 * @nr_pages:	number of pages from start to pin
263 * @write:	whether pages will be written to
264 * @pages:	array that receives pointers to the pages pinned.
265 *		Should be at least nr_pages long.
266 *
267 * Returns number of pages pinned. This may be fewer than the number
268 * requested. If nr_pages is 0 or negative, returns 0. If no pages
269 * were pinned, returns -errno.
270 *
271 * get_user_pages_fast provides equivalent functionality to get_user_pages,
272 * operating on current and current->mm, with force=0 and vma=NULL. However
273 * unlike get_user_pages, it must be called without mmap_sem held.
274 *
275 * get_user_pages_fast may take mmap_sem and page table locks, so no
276 * assumptions can be made about lack of locking. get_user_pages_fast is to be
277 * implemented in a way that is advantageous (vs get_user_pages()) when the
278 * user memory area is already faulted in and present in ptes. However if the
279 * pages have to be faulted in, it may turn out to be slightly slower so
280 * callers need to carefully consider what to use. On many architectures,
281 * get_user_pages_fast simply falls back to get_user_pages.
282 */
283int __weak get_user_pages_fast(unsigned long start,
284				int nr_pages, int write, struct page **pages)
285{
286	return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
 
 
 
 
 
 
 
 
 
 
287}
288EXPORT_SYMBOL_GPL(get_user_pages_fast);
289
290unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
291	unsigned long len, unsigned long prot,
292	unsigned long flag, unsigned long pgoff)
293{
294	unsigned long ret;
295	struct mm_struct *mm = current->mm;
296	unsigned long populate;
 
297
298	ret = security_mmap_file(file, prot, flag);
299	if (!ret) {
300		down_write(&mm->mmap_sem);
301		ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
302				    &populate);
303		up_write(&mm->mmap_sem);
 
 
304		if (populate)
305			mm_populate(ret, populate);
306	}
307	return ret;
308}
309
310unsigned long vm_mmap(struct file *file, unsigned long addr,
311	unsigned long len, unsigned long prot,
312	unsigned long flag, unsigned long offset)
313{
314	if (unlikely(offset + PAGE_ALIGN(len) < offset))
315		return -EINVAL;
316	if (unlikely(offset_in_page(offset)))
317		return -EINVAL;
318
319	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
320}
321EXPORT_SYMBOL(vm_mmap);
322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323void kvfree(const void *addr)
324{
325	if (is_vmalloc_addr(addr))
326		vfree(addr);
327	else
328		kfree(addr);
329}
330EXPORT_SYMBOL(kvfree);
331
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
332static inline void *__page_rmapping(struct page *page)
333{
334	unsigned long mapping;
335
336	mapping = (unsigned long)page->mapping;
337	mapping &= ~PAGE_MAPPING_FLAGS;
338
339	return (void *)mapping;
340}
341
342/* Neutral page->mapping pointer to address_space or anon_vma or other */
343void *page_rmapping(struct page *page)
344{
345	page = compound_head(page);
346	return __page_rmapping(page);
347}
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349struct anon_vma *page_anon_vma(struct page *page)
350{
351	unsigned long mapping;
352
353	page = compound_head(page);
354	mapping = (unsigned long)page->mapping;
355	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
356		return NULL;
357	return __page_rmapping(page);
358}
359
360struct address_space *page_mapping(struct page *page)
361{
362	struct address_space *mapping;
363
364	page = compound_head(page);
365
366	/* This happens if someone calls flush_dcache_page on slab page */
367	if (unlikely(PageSlab(page)))
368		return NULL;
369
370	if (unlikely(PageSwapCache(page))) {
371		swp_entry_t entry;
372
373		entry.val = page_private(page);
374		return swap_address_space(entry);
375	}
376
377	mapping = page->mapping;
378	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
 
 
 
 
 
 
 
 
 
 
 
 
379		return NULL;
380	return mapping;
381}
382
383/* Slow path of page_mapcount() for compound pages */
384int __page_mapcount(struct page *page)
385{
386	int ret;
387
388	ret = atomic_read(&page->_mapcount) + 1;
 
 
 
 
 
 
389	page = compound_head(page);
390	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
391	if (PageDoubleMap(page))
392		ret--;
393	return ret;
394}
395EXPORT_SYMBOL_GPL(__page_mapcount);
396
397int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
398int sysctl_overcommit_ratio __read_mostly = 50;
399unsigned long sysctl_overcommit_kbytes __read_mostly;
400int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
401unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
402unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
403
404int overcommit_ratio_handler(struct ctl_table *table, int write,
405			     void __user *buffer, size_t *lenp,
406			     loff_t *ppos)
407{
408	int ret;
409
410	ret = proc_dointvec(table, write, buffer, lenp, ppos);
411	if (ret == 0 && write)
412		sysctl_overcommit_kbytes = 0;
413	return ret;
414}
415
416int overcommit_kbytes_handler(struct ctl_table *table, int write,
417			     void __user *buffer, size_t *lenp,
418			     loff_t *ppos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419{
420	int ret;
421
422	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
423	if (ret == 0 && write)
424		sysctl_overcommit_ratio = 0;
425	return ret;
426}
427
428/*
429 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
430 */
431unsigned long vm_commit_limit(void)
432{
433	unsigned long allowed;
434
435	if (sysctl_overcommit_kbytes)
436		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
437	else
438		allowed = ((totalram_pages - hugetlb_total_pages())
439			   * sysctl_overcommit_ratio / 100);
440	allowed += total_swap_pages;
441
442	return allowed;
443}
444
445/*
446 * Make sure vm_committed_as in one cacheline and not cacheline shared with
447 * other variables. It can be updated by several CPUs frequently.
448 */
449struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
450
451/*
452 * The global memory commitment made in the system can be a metric
453 * that can be used to drive ballooning decisions when Linux is hosted
454 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
455 * balancing memory across competing virtual machines that are hosted.
456 * Several metrics drive this policy engine including the guest reported
457 * memory commitment.
 
 
 
 
 
458 */
459unsigned long vm_memory_committed(void)
460{
461	return percpu_counter_read_positive(&vm_committed_as);
462}
463EXPORT_SYMBOL_GPL(vm_memory_committed);
464
465/*
466 * Check that a process has enough memory to allocate a new virtual
467 * mapping. 0 means there is enough memory for the allocation to
468 * succeed and -ENOMEM implies there is not.
469 *
470 * We currently support three overcommit policies, which are set via the
471 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
472 *
473 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
474 * Additional code 2002 Jul 20 by Robert Love.
475 *
476 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
477 *
478 * Note this is a helper function intended to be used by LSMs which
479 * wish to use this logic.
480 */
481int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
482{
483	long free, allowed, reserve;
484
485	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
486			-(s64)vm_committed_as_batch * num_online_cpus(),
487			"memory commitment underflow");
488
489	vm_acct_memory(pages);
490
491	/*
492	 * Sometimes we want to use more memory than we have
493	 */
494	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
495		return 0;
496
497	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
498		free = global_page_state(NR_FREE_PAGES);
499		free += global_page_state(NR_FILE_PAGES);
500
501		/*
502		 * shmem pages shouldn't be counted as free in this
503		 * case, they can't be purged, only swapped out, and
504		 * that won't affect the overall amount of available
505		 * memory in the system.
506		 */
507		free -= global_page_state(NR_SHMEM);
508
509		free += get_nr_swap_pages();
510
511		/*
512		 * Any slabs which are created with the
513		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
514		 * which are reclaimable, under pressure.  The dentry
515		 * cache and most inode caches should fall into this
516		 */
517		free += global_page_state(NR_SLAB_RECLAIMABLE);
518
519		/*
520		 * Leave reserved pages. The pages are not for anonymous pages.
521		 */
522		if (free <= totalreserve_pages)
523			goto error;
524		else
525			free -= totalreserve_pages;
526
527		/*
528		 * Reserve some for root
529		 */
530		if (!cap_sys_admin)
531			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
532
533		if (free > pages)
534			return 0;
535
536		goto error;
537	}
538
539	allowed = vm_commit_limit();
540	/*
541	 * Reserve some for root
542	 */
543	if (!cap_sys_admin)
544		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
545
546	/*
547	 * Don't let a single process grow so big a user can't recover
548	 */
549	if (mm) {
550		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 
551		allowed -= min_t(long, mm->total_vm / 32, reserve);
552	}
553
554	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
555		return 0;
556error:
557	vm_unacct_memory(pages);
558
559	return -ENOMEM;
560}
561
562/**
563 * get_cmdline() - copy the cmdline value to a buffer.
564 * @task:     the task whose cmdline value to copy.
565 * @buffer:   the buffer to copy to.
566 * @buflen:   the length of the buffer. Larger cmdline values are truncated
567 *            to this length.
568 * Returns the size of the cmdline field copied. Note that the copy does
 
569 * not guarantee an ending NULL byte.
570 */
571int get_cmdline(struct task_struct *task, char *buffer, int buflen)
572{
573	int res = 0;
574	unsigned int len;
575	struct mm_struct *mm = get_task_mm(task);
576	unsigned long arg_start, arg_end, env_start, env_end;
577	if (!mm)
578		goto out;
579	if (!mm->arg_end)
580		goto out_mm;	/* Shh! No looking before we're done */
581
582	down_read(&mm->mmap_sem);
583	arg_start = mm->arg_start;
584	arg_end = mm->arg_end;
585	env_start = mm->env_start;
586	env_end = mm->env_end;
587	up_read(&mm->mmap_sem);
588
589	len = arg_end - arg_start;
590
591	if (len > buflen)
592		len = buflen;
593
594	res = access_process_vm(task, arg_start, buffer, len, 0);
595
596	/*
597	 * If the nul at the end of args has been overwritten, then
598	 * assume application is using setproctitle(3).
599	 */
600	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
601		len = strnlen(buffer, res);
602		if (len < res) {
603			res = len;
604		} else {
605			len = env_end - env_start;
606			if (len > buflen - res)
607				len = buflen - res;
608			res += access_process_vm(task, env_start,
609						 buffer+res, len, 0);
 
610			res = strnlen(buffer, res);
611		}
612	}
613out_mm:
614	mmput(mm);
615out:
616	return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
617}