Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	linux/mm/mlock.c
  4 *
  5 *  (C) Copyright 1995 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 */
  8
  9#include <linux/capability.h>
 10#include <linux/mman.h>
 11#include <linux/mm.h>
 12#include <linux/sched/user.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/pagemap.h>
 16#include <linux/pagevec.h>
 17#include <linux/pagewalk.h>
 18#include <linux/mempolicy.h>
 19#include <linux/syscalls.h>
 20#include <linux/sched.h>
 21#include <linux/export.h>
 22#include <linux/rmap.h>
 23#include <linux/mmzone.h>
 24#include <linux/hugetlb.h>
 25#include <linux/memcontrol.h>
 26#include <linux/mm_inline.h>
 27#include <linux/secretmem.h>
 28
 29#include "internal.h"
 30
 31struct mlock_fbatch {
 32	local_lock_t lock;
 33	struct folio_batch fbatch;
 34};
 35
 36static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
 37	.lock = INIT_LOCAL_LOCK(lock),
 38};
 39
 40bool can_do_mlock(void)
 41{
 42	if (rlimit(RLIMIT_MEMLOCK) != 0)
 43		return true;
 44	if (capable(CAP_IPC_LOCK))
 45		return true;
 46	return false;
 47}
 48EXPORT_SYMBOL(can_do_mlock);
 49
 50/*
 51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
 52 * in vmscan and, possibly, the fault path; and to support semi-accurate
 53 * statistics.
 54 *
 55 * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
 56 * will be ostensibly placed on the LRU "unevictable" list (actually no such
 57 * list exists), rather than the [in]active lists. PG_unevictable is set to
 58 * indicate the unevictable state.
 59 */
 60
 61static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
 62{
 63	/* There is nothing more we can do while it's off LRU */
 64	if (!folio_test_clear_lru(folio))
 65		return lruvec;
 66
 67	lruvec = folio_lruvec_relock_irq(folio, lruvec);
 68
 69	if (unlikely(folio_evictable(folio))) {
 70		/*
 71		 * This is a little surprising, but quite possible: PG_mlocked
 72		 * must have got cleared already by another CPU.  Could this
 73		 * folio be unevictable?  I'm not sure, but move it now if so.
 
 74		 */
 75		if (folio_test_unevictable(folio)) {
 76			lruvec_del_folio(lruvec, folio);
 77			folio_clear_unevictable(folio);
 78			lruvec_add_folio(lruvec, folio);
 79
 80			__count_vm_events(UNEVICTABLE_PGRESCUED,
 81					  folio_nr_pages(folio));
 82		}
 83		goto out;
 84	}
 85
 86	if (folio_test_unevictable(folio)) {
 87		if (folio_test_mlocked(folio))
 88			folio->mlock_count++;
 89		goto out;
 90	}
 91
 92	lruvec_del_folio(lruvec, folio);
 93	folio_clear_active(folio);
 94	folio_set_unevictable(folio);
 95	folio->mlock_count = !!folio_test_mlocked(folio);
 96	lruvec_add_folio(lruvec, folio);
 97	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
 98out:
 99	folio_set_lru(folio);
100	return lruvec;
101}
102
103static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104{
105	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106
107	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108
109	/* As above, this is a little surprising, but possible */
110	if (unlikely(folio_evictable(folio)))
111		goto out;
112
113	folio_set_unevictable(folio);
114	folio->mlock_count = !!folio_test_mlocked(folio);
115	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116out:
117	lruvec_add_folio(lruvec, folio);
118	folio_set_lru(folio);
119	return lruvec;
120}
121
122static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123{
124	int nr_pages = folio_nr_pages(folio);
125	bool isolated = false;
126
127	if (!folio_test_clear_lru(folio))
128		goto munlock;
129
130	isolated = true;
131	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132
133	if (folio_test_unevictable(folio)) {
134		/* Then mlock_count is maintained, but might undercount */
135		if (folio->mlock_count)
136			folio->mlock_count--;
137		if (folio->mlock_count)
138			goto out;
139	}
140	/* else assume that was the last mlock: reclaim will fix it if not */
141
142munlock:
143	if (folio_test_clear_mlocked(folio)) {
144		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145		if (isolated || !folio_test_unevictable(folio))
146			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147		else
148			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149	}
150
151	/* folio_evictable() has to be checked *after* clearing Mlocked */
152	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153		lruvec_del_folio(lruvec, folio);
154		folio_clear_unevictable(folio);
155		lruvec_add_folio(lruvec, folio);
156		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157	}
158out:
159	if (isolated)
160		folio_set_lru(folio);
161	return lruvec;
162}
163
164/*
165 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
166 */
167#define LRU_FOLIO 0x1
168#define NEW_FOLIO 0x2
169static inline struct folio *mlock_lru(struct folio *folio)
170{
171	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172}
173
174static inline struct folio *mlock_new(struct folio *folio)
175{
176	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
177}
178
179/*
180 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181 * make use of such folio pointer flags in future, but for now just keep it for
182 * mlock.  We could use three separate folio batches instead, but one feels
183 * better (munlocking a full folio batch does not need to drain mlocking folio
184 * batches first).
185 */
186static void mlock_folio_batch(struct folio_batch *fbatch)
187{
188	struct lruvec *lruvec = NULL;
189	unsigned long mlock;
190	struct folio *folio;
191	int i;
192
193	for (i = 0; i < folio_batch_count(fbatch); i++) {
194		folio = fbatch->folios[i];
195		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196		folio = (struct folio *)((unsigned long)folio - mlock);
197		fbatch->folios[i] = folio;
198
199		if (mlock & LRU_FOLIO)
200			lruvec = __mlock_folio(folio, lruvec);
201		else if (mlock & NEW_FOLIO)
202			lruvec = __mlock_new_folio(folio, lruvec);
203		else
204			lruvec = __munlock_folio(folio, lruvec);
205	}
206
207	if (lruvec)
208		unlock_page_lruvec_irq(lruvec);
209	folios_put(fbatch->folios, folio_batch_count(fbatch));
210	folio_batch_reinit(fbatch);
211}
212
213void mlock_drain_local(void)
214{
215	struct folio_batch *fbatch;
216
217	local_lock(&mlock_fbatch.lock);
218	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
219	if (folio_batch_count(fbatch))
220		mlock_folio_batch(fbatch);
221	local_unlock(&mlock_fbatch.lock);
222}
223
224void mlock_drain_remote(int cpu)
225{
226	struct folio_batch *fbatch;
227
228	WARN_ON_ONCE(cpu_online(cpu));
229	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
230	if (folio_batch_count(fbatch))
231		mlock_folio_batch(fbatch);
232}
233
234bool need_mlock_drain(int cpu)
235{
236	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
237}
238
239/**
240 * mlock_folio - mlock a folio already on (or temporarily off) LRU
241 * @folio: folio to be mlocked.
242 */
243void mlock_folio(struct folio *folio)
244{
245	struct folio_batch *fbatch;
246
247	local_lock(&mlock_fbatch.lock);
248	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
249
250	if (!folio_test_set_mlocked(folio)) {
251		int nr_pages = folio_nr_pages(folio);
252
253		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
254		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
255	}
256
257	folio_get(folio);
258	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
259	    folio_test_large(folio) || lru_cache_disabled())
260		mlock_folio_batch(fbatch);
261	local_unlock(&mlock_fbatch.lock);
262}
263
264/**
265 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
266 * @folio: folio to be mlocked, either normal or a THP head.
267 */
268void mlock_new_folio(struct folio *folio)
269{
270	struct folio_batch *fbatch;
271	int nr_pages = folio_nr_pages(folio);
272
273	local_lock(&mlock_fbatch.lock);
274	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
275	folio_set_mlocked(folio);
276
277	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
 
 
 
278	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
279
280	folio_get(folio);
281	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
282	    folio_test_large(folio) || lru_cache_disabled())
283		mlock_folio_batch(fbatch);
284	local_unlock(&mlock_fbatch.lock);
285}
286
287/**
288 * munlock_folio - munlock a folio
289 * @folio: folio to be munlocked, either normal or a THP head.
290 */
291void munlock_folio(struct folio *folio)
292{
293	struct folio_batch *fbatch;
294
295	local_lock(&mlock_fbatch.lock);
296	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
297	/*
298	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
299	 * which will check whether the folio is multiply mlocked.
300	 */
301	folio_get(folio);
302	if (!folio_batch_add(fbatch, folio) ||
303	    folio_test_large(folio) || lru_cache_disabled())
304		mlock_folio_batch(fbatch);
305	local_unlock(&mlock_fbatch.lock);
306}
307
308static inline unsigned int folio_mlock_step(struct folio *folio,
309		pte_t *pte, unsigned long addr, unsigned long end)
310{
311	unsigned int count, i, nr = folio_nr_pages(folio);
312	unsigned long pfn = folio_pfn(folio);
313	pte_t ptent = ptep_get(pte);
314
315	if (!folio_test_large(folio))
316		return 1;
317
318	count = pfn + nr - pte_pfn(ptent);
319	count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT);
320
321	for (i = 0; i < count; i++, pte++) {
322		pte_t entry = ptep_get(pte);
323
324		if (!pte_present(entry))
325			break;
326		if (pte_pfn(entry) - pfn >= nr)
327			break;
328	}
329
330	return i;
331}
332
333static inline bool allow_mlock_munlock(struct folio *folio,
334		struct vm_area_struct *vma, unsigned long start,
335		unsigned long end, unsigned int step)
336{
337	/*
338	 * For unlock, allow munlock large folio which is partially
339	 * mapped to VMA. As it's possible that large folio is
340	 * mlocked and VMA is split later.
341	 *
342	 * During memory pressure, such kind of large folio can
343	 * be split. And the pages are not in VM_LOCKed VMA
344	 * can be reclaimed.
345	 */
346	if (!(vma->vm_flags & VM_LOCKED))
347		return true;
348
349	/* folio_within_range() cannot take KSM, but any small folio is OK */
350	if (!folio_test_large(folio))
351		return true;
352
353	/* folio not in range [start, end), skip mlock */
354	if (!folio_within_range(folio, vma, start, end))
355		return false;
356
357	/* folio is not fully mapped, skip mlock */
358	if (step != folio_nr_pages(folio))
359		return false;
360
361	return true;
362}
363
364static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
365			   unsigned long end, struct mm_walk *walk)
366
367{
368	struct vm_area_struct *vma = walk->vma;
369	spinlock_t *ptl;
370	pte_t *start_pte, *pte;
371	pte_t ptent;
372	struct folio *folio;
373	unsigned int step = 1;
374	unsigned long start = addr;
375
376	ptl = pmd_trans_huge_lock(pmd, vma);
377	if (ptl) {
378		if (!pmd_present(*pmd))
379			goto out;
380		if (is_huge_zero_pmd(*pmd))
381			goto out;
382		folio = page_folio(pmd_page(*pmd));
383		if (vma->vm_flags & VM_LOCKED)
384			mlock_folio(folio);
385		else
386			munlock_folio(folio);
387		goto out;
388	}
389
390	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
391	if (!start_pte) {
392		walk->action = ACTION_AGAIN;
393		return 0;
394	}
395
396	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
397		ptent = ptep_get(pte);
398		if (!pte_present(ptent))
399			continue;
400		folio = vm_normal_folio(vma, addr, ptent);
401		if (!folio || folio_is_zone_device(folio))
 
 
402			continue;
403
404		step = folio_mlock_step(folio, pte, addr, end);
405		if (!allow_mlock_munlock(folio, vma, start, end, step))
406			goto next_entry;
407
408		if (vma->vm_flags & VM_LOCKED)
409			mlock_folio(folio);
410		else
411			munlock_folio(folio);
412
413next_entry:
414		pte += step - 1;
415		addr += (step - 1) << PAGE_SHIFT;
416	}
417	pte_unmap(start_pte);
418out:
419	spin_unlock(ptl);
420	cond_resched();
421	return 0;
422}
423
424/*
425 * mlock_vma_pages_range() - mlock any pages already in the range,
426 *                           or munlock all pages in the range.
427 * @vma - vma containing range to be mlock()ed or munlock()ed
428 * @start - start address in @vma of the range
429 * @end - end of range in @vma
430 * @newflags - the new set of flags for @vma.
431 *
432 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
433 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
434 */
435static void mlock_vma_pages_range(struct vm_area_struct *vma,
436	unsigned long start, unsigned long end, vm_flags_t newflags)
437{
438	static const struct mm_walk_ops mlock_walk_ops = {
439		.pmd_entry = mlock_pte_range,
440		.walk_lock = PGWALK_WRLOCK_VERIFY,
441	};
442
443	/*
444	 * There is a slight chance that concurrent page migration,
445	 * or page reclaim finding a page of this now-VM_LOCKED vma,
446	 * will call mlock_vma_folio() and raise page's mlock_count:
447	 * double counting, leaving the page unevictable indefinitely.
448	 * Communicate this danger to mlock_vma_folio() with VM_IO,
449	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
450	 * mmap_lock is held in write mode here, so this weird
451	 * combination should not be visible to other mmap_lock users;
452	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
453	 */
454	if (newflags & VM_LOCKED)
455		newflags |= VM_IO;
456	vma_start_write(vma);
457	vm_flags_reset_once(vma, newflags);
458
459	lru_add_drain();
460	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
461	lru_add_drain();
462
463	if (newflags & VM_IO) {
464		newflags &= ~VM_IO;
465		vm_flags_reset_once(vma, newflags);
466	}
467}
468
469/*
470 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
471 *
472 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
473 * munlock is a no-op.  However, for some special vmas, we go ahead and
474 * populate the ptes.
475 *
476 * For vmas that pass the filters, merge/split as appropriate.
477 */
478static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
479	       struct vm_area_struct **prev, unsigned long start,
480	       unsigned long end, vm_flags_t newflags)
481{
482	struct mm_struct *mm = vma->vm_mm;
 
483	int nr_pages;
484	int ret = 0;
485	vm_flags_t oldflags = vma->vm_flags;
486
487	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
488	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
489	    vma_is_dax(vma) || vma_is_secretmem(vma))
490		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
491		goto out;
492
493	vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
494	if (IS_ERR(vma)) {
495		ret = PTR_ERR(vma);
496		goto out;
 
 
 
497	}
498
 
 
 
 
 
 
 
 
 
 
 
 
 
499	/*
500	 * Keep track of amount of locked VM.
501	 */
502	nr_pages = (end - start) >> PAGE_SHIFT;
503	if (!(newflags & VM_LOCKED))
504		nr_pages = -nr_pages;
505	else if (oldflags & VM_LOCKED)
506		nr_pages = 0;
507	mm->locked_vm += nr_pages;
508
509	/*
510	 * vm_flags is protected by the mmap_lock held in write mode.
511	 * It's okay if try_to_unmap_one unmaps a page just after we
512	 * set VM_LOCKED, populate_vma_page_range will bring it back.
513	 */
 
514	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
515		/* No work to do, and mlocking twice would be wrong */
516		vma_start_write(vma);
517		vm_flags_reset(vma, newflags);
518	} else {
519		mlock_vma_pages_range(vma, start, end, newflags);
520	}
521out:
522	*prev = vma;
523	return ret;
524}
525
526static int apply_vma_lock_flags(unsigned long start, size_t len,
527				vm_flags_t flags)
528{
529	unsigned long nstart, end, tmp;
530	struct vm_area_struct *vma, *prev;
531	VMA_ITERATOR(vmi, current->mm, start);
 
532
533	VM_BUG_ON(offset_in_page(start));
534	VM_BUG_ON(len != PAGE_ALIGN(len));
535	end = start + len;
536	if (end < start)
537		return -EINVAL;
538	if (end == start)
539		return 0;
540	vma = vma_iter_load(&vmi);
541	if (!vma)
542		return -ENOMEM;
543
544	prev = vma_prev(&vmi);
545	if (start > vma->vm_start)
546		prev = vma;
 
 
547
548	nstart = start;
549	tmp = vma->vm_start;
550	for_each_vma_range(vmi, vma, end) {
551		int error;
552		vm_flags_t newflags;
553
554		if (vma->vm_start != tmp)
555			return -ENOMEM;
556
557		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
558		newflags |= flags;
 
559		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
560		tmp = vma->vm_end;
561		if (tmp > end)
562			tmp = end;
563		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
564		if (error)
565			return error;
566		tmp = vma_iter_end(&vmi);
567		nstart = tmp;
568	}
569
570	if (tmp < end)
571		return -ENOMEM;
572
573	return 0;
 
 
 
 
 
 
574}
575
576/*
577 * Go through vma areas and sum size of mlocked
578 * vma pages, as return value.
579 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
580 * is also counted.
581 * Return value: previously mlocked page counts
582 */
583static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
584		unsigned long start, size_t len)
585{
586	struct vm_area_struct *vma;
587	unsigned long count = 0;
588	unsigned long end;
589	VMA_ITERATOR(vmi, mm, start);
590
591	/* Don't overflow past ULONG_MAX */
592	if (unlikely(ULONG_MAX - len < start))
593		end = ULONG_MAX;
594	else
595		end = start + len;
596
597	for_each_vma_range(vmi, vma, end) {
598		if (vma->vm_flags & VM_LOCKED) {
599			if (start > vma->vm_start)
600				count -= (start - vma->vm_start);
601			if (end < vma->vm_end) {
602				count += end - vma->vm_start;
603				break;
604			}
605			count += vma->vm_end - vma->vm_start;
606		}
607	}
608
609	return count >> PAGE_SHIFT;
610}
611
612/*
613 * convert get_user_pages() return value to posix mlock() error
614 */
615static int __mlock_posix_error_return(long retval)
616{
617	if (retval == -EFAULT)
618		retval = -ENOMEM;
619	else if (retval == -ENOMEM)
620		retval = -EAGAIN;
621	return retval;
622}
623
624static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
625{
626	unsigned long locked;
627	unsigned long lock_limit;
628	int error = -ENOMEM;
629
630	start = untagged_addr(start);
631
632	if (!can_do_mlock())
633		return -EPERM;
634
635	len = PAGE_ALIGN(len + (offset_in_page(start)));
636	start &= PAGE_MASK;
637
638	lock_limit = rlimit(RLIMIT_MEMLOCK);
639	lock_limit >>= PAGE_SHIFT;
640	locked = len >> PAGE_SHIFT;
641
642	if (mmap_write_lock_killable(current->mm))
643		return -EINTR;
644
645	locked += current->mm->locked_vm;
646	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
647		/*
648		 * It is possible that the regions requested intersect with
649		 * previously mlocked areas, that part area in "mm->locked_vm"
650		 * should not be counted to new mlock increment count. So check
651		 * and adjust locked count if necessary.
652		 */
653		locked -= count_mm_mlocked_page_nr(current->mm,
654				start, len);
655	}
656
657	/* check against resource limits */
658	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
659		error = apply_vma_lock_flags(start, len, flags);
660
661	mmap_write_unlock(current->mm);
662	if (error)
663		return error;
664
665	error = __mm_populate(start, len, 0);
666	if (error)
667		return __mlock_posix_error_return(error);
668	return 0;
669}
670
671SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
672{
673	return do_mlock(start, len, VM_LOCKED);
674}
675
676SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
677{
678	vm_flags_t vm_flags = VM_LOCKED;
679
680	if (flags & ~MLOCK_ONFAULT)
681		return -EINVAL;
682
683	if (flags & MLOCK_ONFAULT)
684		vm_flags |= VM_LOCKONFAULT;
685
686	return do_mlock(start, len, vm_flags);
687}
688
689SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
690{
691	int ret;
692
693	start = untagged_addr(start);
694
695	len = PAGE_ALIGN(len + (offset_in_page(start)));
696	start &= PAGE_MASK;
697
698	if (mmap_write_lock_killable(current->mm))
699		return -EINTR;
700	ret = apply_vma_lock_flags(start, len, 0);
701	mmap_write_unlock(current->mm);
702
703	return ret;
704}
705
706/*
707 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
708 * and translate into the appropriate modifications to mm->def_flags and/or the
709 * flags for all current VMAs.
710 *
711 * There are a couple of subtleties with this.  If mlockall() is called multiple
712 * times with different flags, the values do not necessarily stack.  If mlockall
713 * is called once including the MCL_FUTURE flag and then a second time without
714 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
715 */
716static int apply_mlockall_flags(int flags)
717{
718	VMA_ITERATOR(vmi, current->mm, 0);
719	struct vm_area_struct *vma, *prev = NULL;
720	vm_flags_t to_add = 0;
721
722	current->mm->def_flags &= ~VM_LOCKED_MASK;
723	if (flags & MCL_FUTURE) {
724		current->mm->def_flags |= VM_LOCKED;
725
726		if (flags & MCL_ONFAULT)
727			current->mm->def_flags |= VM_LOCKONFAULT;
728
729		if (!(flags & MCL_CURRENT))
730			goto out;
731	}
732
733	if (flags & MCL_CURRENT) {
734		to_add |= VM_LOCKED;
735		if (flags & MCL_ONFAULT)
736			to_add |= VM_LOCKONFAULT;
737	}
738
739	for_each_vma(vmi, vma) {
740		vm_flags_t newflags;
741
742		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
743		newflags |= to_add;
744
745		/* Ignore errors */
746		mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
747			    newflags);
748		cond_resched();
749	}
750out:
751	return 0;
752}
753
754SYSCALL_DEFINE1(mlockall, int, flags)
755{
756	unsigned long lock_limit;
757	int ret;
758
759	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
760	    flags == MCL_ONFAULT)
761		return -EINVAL;
762
763	if (!can_do_mlock())
764		return -EPERM;
765
766	lock_limit = rlimit(RLIMIT_MEMLOCK);
767	lock_limit >>= PAGE_SHIFT;
768
769	if (mmap_write_lock_killable(current->mm))
770		return -EINTR;
771
772	ret = -ENOMEM;
773	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
774	    capable(CAP_IPC_LOCK))
775		ret = apply_mlockall_flags(flags);
776	mmap_write_unlock(current->mm);
777	if (!ret && (flags & MCL_CURRENT))
778		mm_populate(0, TASK_SIZE);
779
780	return ret;
781}
782
783SYSCALL_DEFINE0(munlockall)
784{
785	int ret;
786
787	if (mmap_write_lock_killable(current->mm))
788		return -EINTR;
789	ret = apply_mlockall_flags(0);
790	mmap_write_unlock(current->mm);
791	return ret;
792}
793
794/*
795 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
796 * shm segments) get accounted against the user_struct instead.
797 */
798static DEFINE_SPINLOCK(shmlock_user_lock);
799
800int user_shm_lock(size_t size, struct ucounts *ucounts)
801{
802	unsigned long lock_limit, locked;
803	long memlock;
804	int allowed = 0;
805
806	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
807	lock_limit = rlimit(RLIMIT_MEMLOCK);
808	if (lock_limit != RLIM_INFINITY)
809		lock_limit >>= PAGE_SHIFT;
810	spin_lock(&shmlock_user_lock);
811	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
812
813	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
814		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
815		goto out;
816	}
817	if (!get_ucounts(ucounts)) {
818		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
819		allowed = 0;
820		goto out;
821	}
822	allowed = 1;
823out:
824	spin_unlock(&shmlock_user_lock);
825	return allowed;
826}
827
828void user_shm_unlock(size_t size, struct ucounts *ucounts)
829{
830	spin_lock(&shmlock_user_lock);
831	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
832	spin_unlock(&shmlock_user_lock);
833	put_ucounts(ucounts);
834}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	linux/mm/mlock.c
  4 *
  5 *  (C) Copyright 1995 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 */
  8
  9#include <linux/capability.h>
 10#include <linux/mman.h>
 11#include <linux/mm.h>
 12#include <linux/sched/user.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/pagemap.h>
 16#include <linux/pagevec.h>
 17#include <linux/pagewalk.h>
 18#include <linux/mempolicy.h>
 19#include <linux/syscalls.h>
 20#include <linux/sched.h>
 21#include <linux/export.h>
 22#include <linux/rmap.h>
 23#include <linux/mmzone.h>
 24#include <linux/hugetlb.h>
 25#include <linux/memcontrol.h>
 26#include <linux/mm_inline.h>
 27#include <linux/secretmem.h>
 28
 29#include "internal.h"
 30
 31struct mlock_pvec {
 32	local_lock_t lock;
 33	struct pagevec vec;
 34};
 35
 36static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
 37	.lock = INIT_LOCAL_LOCK(lock),
 38};
 39
 40bool can_do_mlock(void)
 41{
 42	if (rlimit(RLIMIT_MEMLOCK) != 0)
 43		return true;
 44	if (capable(CAP_IPC_LOCK))
 45		return true;
 46	return false;
 47}
 48EXPORT_SYMBOL(can_do_mlock);
 49
 50/*
 51 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 52 * in vmscan and, possibly, the fault path; and to support semi-accurate
 53 * statistics.
 54 *
 55 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 56 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 57 * The unevictable list is an LRU sibling list to the [in]active lists.
 58 * PageUnevictable is set to indicate the unevictable state.
 59 */
 60
 61static struct lruvec *__mlock_page(struct page *page, struct lruvec *lruvec)
 62{
 63	/* There is nothing more we can do while it's off LRU */
 64	if (!TestClearPageLRU(page))
 65		return lruvec;
 66
 67	lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
 68
 69	if (unlikely(page_evictable(page))) {
 70		/*
 71		 * This is a little surprising, but quite possible:
 72		 * PageMlocked must have got cleared already by another CPU.
 73		 * Could this page be on the Unevictable LRU?  I'm not sure,
 74		 * but move it now if so.
 75		 */
 76		if (PageUnevictable(page)) {
 77			del_page_from_lru_list(page, lruvec);
 78			ClearPageUnevictable(page);
 79			add_page_to_lru_list(page, lruvec);
 
 80			__count_vm_events(UNEVICTABLE_PGRESCUED,
 81					  thp_nr_pages(page));
 82		}
 83		goto out;
 84	}
 85
 86	if (PageUnevictable(page)) {
 87		if (PageMlocked(page))
 88			page->mlock_count++;
 89		goto out;
 90	}
 91
 92	del_page_from_lru_list(page, lruvec);
 93	ClearPageActive(page);
 94	SetPageUnevictable(page);
 95	page->mlock_count = !!PageMlocked(page);
 96	add_page_to_lru_list(page, lruvec);
 97	__count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page));
 98out:
 99	SetPageLRU(page);
100	return lruvec;
101}
102
103static struct lruvec *__mlock_new_page(struct page *page, struct lruvec *lruvec)
104{
105	VM_BUG_ON_PAGE(PageLRU(page), page);
106
107	lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
108
109	/* As above, this is a little surprising, but possible */
110	if (unlikely(page_evictable(page)))
111		goto out;
112
113	SetPageUnevictable(page);
114	page->mlock_count = !!PageMlocked(page);
115	__count_vm_events(UNEVICTABLE_PGCULLED, thp_nr_pages(page));
116out:
117	add_page_to_lru_list(page, lruvec);
118	SetPageLRU(page);
119	return lruvec;
120}
121
122static struct lruvec *__munlock_page(struct page *page, struct lruvec *lruvec)
123{
124	int nr_pages = thp_nr_pages(page);
125	bool isolated = false;
126
127	if (!TestClearPageLRU(page))
128		goto munlock;
129
130	isolated = true;
131	lruvec = folio_lruvec_relock_irq(page_folio(page), lruvec);
132
133	if (PageUnevictable(page)) {
134		/* Then mlock_count is maintained, but might undercount */
135		if (page->mlock_count)
136			page->mlock_count--;
137		if (page->mlock_count)
138			goto out;
139	}
140	/* else assume that was the last mlock: reclaim will fix it if not */
141
142munlock:
143	if (TestClearPageMlocked(page)) {
144		__mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
145		if (isolated || !PageUnevictable(page))
146			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147		else
148			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149	}
150
151	/* page_evictable() has to be checked *after* clearing Mlocked */
152	if (isolated && PageUnevictable(page) && page_evictable(page)) {
153		del_page_from_lru_list(page, lruvec);
154		ClearPageUnevictable(page);
155		add_page_to_lru_list(page, lruvec);
156		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157	}
158out:
159	if (isolated)
160		SetPageLRU(page);
161	return lruvec;
162}
163
164/*
165 * Flags held in the low bits of a struct page pointer on the mlock_pvec.
166 */
167#define LRU_PAGE 0x1
168#define NEW_PAGE 0x2
169static inline struct page *mlock_lru(struct page *page)
170{
171	return (struct page *)((unsigned long)page + LRU_PAGE);
172}
173
174static inline struct page *mlock_new(struct page *page)
175{
176	return (struct page *)((unsigned long)page + NEW_PAGE);
177}
178
179/*
180 * mlock_pagevec() is derived from pagevec_lru_move_fn():
181 * perhaps that can make use of such page pointer flags in future,
182 * but for now just keep it for mlock.  We could use three separate
183 * pagevecs instead, but one feels better (munlocking a full pagevec
184 * does not need to drain mlocking pagevecs first).
185 */
186static void mlock_pagevec(struct pagevec *pvec)
187{
188	struct lruvec *lruvec = NULL;
189	unsigned long mlock;
190	struct page *page;
191	int i;
192
193	for (i = 0; i < pagevec_count(pvec); i++) {
194		page = pvec->pages[i];
195		mlock = (unsigned long)page & (LRU_PAGE | NEW_PAGE);
196		page = (struct page *)((unsigned long)page - mlock);
197		pvec->pages[i] = page;
198
199		if (mlock & LRU_PAGE)
200			lruvec = __mlock_page(page, lruvec);
201		else if (mlock & NEW_PAGE)
202			lruvec = __mlock_new_page(page, lruvec);
203		else
204			lruvec = __munlock_page(page, lruvec);
205	}
206
207	if (lruvec)
208		unlock_page_lruvec_irq(lruvec);
209	release_pages(pvec->pages, pvec->nr);
210	pagevec_reinit(pvec);
211}
212
213void mlock_page_drain_local(void)
214{
215	struct pagevec *pvec;
216
217	local_lock(&mlock_pvec.lock);
218	pvec = this_cpu_ptr(&mlock_pvec.vec);
219	if (pagevec_count(pvec))
220		mlock_pagevec(pvec);
221	local_unlock(&mlock_pvec.lock);
222}
223
224void mlock_page_drain_remote(int cpu)
225{
226	struct pagevec *pvec;
227
228	WARN_ON_ONCE(cpu_online(cpu));
229	pvec = &per_cpu(mlock_pvec.vec, cpu);
230	if (pagevec_count(pvec))
231		mlock_pagevec(pvec);
232}
233
234bool need_mlock_page_drain(int cpu)
235{
236	return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
237}
238
239/**
240 * mlock_folio - mlock a folio already on (or temporarily off) LRU
241 * @folio: folio to be mlocked.
242 */
243void mlock_folio(struct folio *folio)
244{
245	struct pagevec *pvec;
246
247	local_lock(&mlock_pvec.lock);
248	pvec = this_cpu_ptr(&mlock_pvec.vec);
249
250	if (!folio_test_set_mlocked(folio)) {
251		int nr_pages = folio_nr_pages(folio);
252
253		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
254		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
255	}
256
257	folio_get(folio);
258	if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
259	    folio_test_large(folio) || lru_cache_disabled())
260		mlock_pagevec(pvec);
261	local_unlock(&mlock_pvec.lock);
262}
263
264/**
265 * mlock_new_page - mlock a newly allocated page not yet on LRU
266 * @page: page to be mlocked, either a normal page or a THP head.
267 */
268void mlock_new_page(struct page *page)
269{
270	struct pagevec *pvec;
271	int nr_pages = thp_nr_pages(page);
 
 
 
 
272
273	local_lock(&mlock_pvec.lock);
274	pvec = this_cpu_ptr(&mlock_pvec.vec);
275	SetPageMlocked(page);
276	mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
277	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
278
279	get_page(page);
280	if (!pagevec_add(pvec, mlock_new(page)) ||
281	    PageHead(page) || lru_cache_disabled())
282		mlock_pagevec(pvec);
283	local_unlock(&mlock_pvec.lock);
284}
285
286/**
287 * munlock_page - munlock a page
288 * @page: page to be munlocked, either a normal page or a THP head.
289 */
290void munlock_page(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291{
292	struct pagevec *pvec;
 
 
 
 
 
 
 
 
293
294	local_lock(&mlock_pvec.lock);
295	pvec = this_cpu_ptr(&mlock_pvec.vec);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296	/*
297	 * TestClearPageMlocked(page) must be left to __munlock_page(),
298	 * which will check whether the page is multiply mlocked.
 
 
 
 
 
299	 */
 
 
 
 
 
 
300
301	get_page(page);
302	if (!pagevec_add(pvec, page) ||
303	    PageHead(page) || lru_cache_disabled())
304		mlock_pagevec(pvec);
305	local_unlock(&mlock_pvec.lock);
 
 
 
 
306}
307
308static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
309			   unsigned long end, struct mm_walk *walk)
310
311{
312	struct vm_area_struct *vma = walk->vma;
313	spinlock_t *ptl;
314	pte_t *start_pte, *pte;
315	struct page *page;
 
 
 
316
317	ptl = pmd_trans_huge_lock(pmd, vma);
318	if (ptl) {
319		if (!pmd_present(*pmd))
320			goto out;
321		if (is_huge_zero_pmd(*pmd))
322			goto out;
323		page = pmd_page(*pmd);
324		if (vma->vm_flags & VM_LOCKED)
325			mlock_folio(page_folio(page));
326		else
327			munlock_page(page);
328		goto out;
329	}
330
331	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 
 
 
 
 
332	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
333		if (!pte_present(*pte))
 
334			continue;
335		page = vm_normal_page(vma, addr, *pte);
336		if (!page || is_zone_device_page(page))
337			continue;
338		if (PageTransCompound(page))
339			continue;
 
 
 
 
 
340		if (vma->vm_flags & VM_LOCKED)
341			mlock_folio(page_folio(page));
342		else
343			munlock_page(page);
 
 
 
 
344	}
345	pte_unmap(start_pte);
346out:
347	spin_unlock(ptl);
348	cond_resched();
349	return 0;
350}
351
352/*
353 * mlock_vma_pages_range() - mlock any pages already in the range,
354 *                           or munlock all pages in the range.
355 * @vma - vma containing range to be mlock()ed or munlock()ed
356 * @start - start address in @vma of the range
357 * @end - end of range in @vma
358 * @newflags - the new set of flags for @vma.
359 *
360 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
361 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
362 */
363static void mlock_vma_pages_range(struct vm_area_struct *vma,
364	unsigned long start, unsigned long end, vm_flags_t newflags)
365{
366	static const struct mm_walk_ops mlock_walk_ops = {
367		.pmd_entry = mlock_pte_range,
 
368	};
369
370	/*
371	 * There is a slight chance that concurrent page migration,
372	 * or page reclaim finding a page of this now-VM_LOCKED vma,
373	 * will call mlock_vma_page() and raise page's mlock_count:
374	 * double counting, leaving the page unevictable indefinitely.
375	 * Communicate this danger to mlock_vma_page() with VM_IO,
376	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
377	 * mmap_lock is held in write mode here, so this weird
378	 * combination should not be visible to other mmap_lock users;
379	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
380	 */
381	if (newflags & VM_LOCKED)
382		newflags |= VM_IO;
383	WRITE_ONCE(vma->vm_flags, newflags);
 
384
385	lru_add_drain();
386	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
387	lru_add_drain();
388
389	if (newflags & VM_IO) {
390		newflags &= ~VM_IO;
391		WRITE_ONCE(vma->vm_flags, newflags);
392	}
393}
394
395/*
396 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
397 *
398 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
399 * munlock is a no-op.  However, for some special vmas, we go ahead and
400 * populate the ptes.
401 *
402 * For vmas that pass the filters, merge/split as appropriate.
403 */
404static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
405	unsigned long start, unsigned long end, vm_flags_t newflags)
 
406{
407	struct mm_struct *mm = vma->vm_mm;
408	pgoff_t pgoff;
409	int nr_pages;
410	int ret = 0;
411	vm_flags_t oldflags = vma->vm_flags;
412
413	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
414	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
415	    vma_is_dax(vma) || vma_is_secretmem(vma))
416		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
417		goto out;
418
419	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
420	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
421			  vma->vm_file, pgoff, vma_policy(vma),
422			  vma->vm_userfaultfd_ctx, anon_vma_name(vma));
423	if (*prev) {
424		vma = *prev;
425		goto success;
426	}
427
428	if (start != vma->vm_start) {
429		ret = split_vma(mm, vma, start, 1);
430		if (ret)
431			goto out;
432	}
433
434	if (end != vma->vm_end) {
435		ret = split_vma(mm, vma, end, 0);
436		if (ret)
437			goto out;
438	}
439
440success:
441	/*
442	 * Keep track of amount of locked VM.
443	 */
444	nr_pages = (end - start) >> PAGE_SHIFT;
445	if (!(newflags & VM_LOCKED))
446		nr_pages = -nr_pages;
447	else if (oldflags & VM_LOCKED)
448		nr_pages = 0;
449	mm->locked_vm += nr_pages;
450
451	/*
452	 * vm_flags is protected by the mmap_lock held in write mode.
453	 * It's okay if try_to_unmap_one unmaps a page just after we
454	 * set VM_LOCKED, populate_vma_page_range will bring it back.
455	 */
456
457	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
458		/* No work to do, and mlocking twice would be wrong */
459		vma->vm_flags = newflags;
 
460	} else {
461		mlock_vma_pages_range(vma, start, end, newflags);
462	}
463out:
464	*prev = vma;
465	return ret;
466}
467
468static int apply_vma_lock_flags(unsigned long start, size_t len,
469				vm_flags_t flags)
470{
471	unsigned long nstart, end, tmp;
472	struct vm_area_struct *vma, *prev;
473	int error;
474	MA_STATE(mas, &current->mm->mm_mt, start, start);
475
476	VM_BUG_ON(offset_in_page(start));
477	VM_BUG_ON(len != PAGE_ALIGN(len));
478	end = start + len;
479	if (end < start)
480		return -EINVAL;
481	if (end == start)
482		return 0;
483	vma = mas_walk(&mas);
484	if (!vma)
485		return -ENOMEM;
486
 
487	if (start > vma->vm_start)
488		prev = vma;
489	else
490		prev = mas_prev(&mas, 0);
491
492	for (nstart = start ; ; ) {
493		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
 
 
 
 
 
 
494
 
495		newflags |= flags;
496
497		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
498		tmp = vma->vm_end;
499		if (tmp > end)
500			tmp = end;
501		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
502		if (error)
503			break;
 
504		nstart = tmp;
505		if (nstart < prev->vm_end)
506			nstart = prev->vm_end;
507		if (nstart >= end)
508			break;
509
510		vma = find_vma(prev->vm_mm, prev->vm_end);
511		if (!vma || vma->vm_start != nstart) {
512			error = -ENOMEM;
513			break;
514		}
515	}
516	return error;
517}
518
519/*
520 * Go through vma areas and sum size of mlocked
521 * vma pages, as return value.
522 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
523 * is also counted.
524 * Return value: previously mlocked page counts
525 */
526static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
527		unsigned long start, size_t len)
528{
529	struct vm_area_struct *vma;
530	unsigned long count = 0;
531	unsigned long end;
532	VMA_ITERATOR(vmi, mm, start);
533
534	/* Don't overflow past ULONG_MAX */
535	if (unlikely(ULONG_MAX - len < start))
536		end = ULONG_MAX;
537	else
538		end = start + len;
539
540	for_each_vma_range(vmi, vma, end) {
541		if (vma->vm_flags & VM_LOCKED) {
542			if (start > vma->vm_start)
543				count -= (start - vma->vm_start);
544			if (end < vma->vm_end) {
545				count += end - vma->vm_start;
546				break;
547			}
548			count += vma->vm_end - vma->vm_start;
549		}
550	}
551
552	return count >> PAGE_SHIFT;
553}
554
555/*
556 * convert get_user_pages() return value to posix mlock() error
557 */
558static int __mlock_posix_error_return(long retval)
559{
560	if (retval == -EFAULT)
561		retval = -ENOMEM;
562	else if (retval == -ENOMEM)
563		retval = -EAGAIN;
564	return retval;
565}
566
567static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
568{
569	unsigned long locked;
570	unsigned long lock_limit;
571	int error = -ENOMEM;
572
573	start = untagged_addr(start);
574
575	if (!can_do_mlock())
576		return -EPERM;
577
578	len = PAGE_ALIGN(len + (offset_in_page(start)));
579	start &= PAGE_MASK;
580
581	lock_limit = rlimit(RLIMIT_MEMLOCK);
582	lock_limit >>= PAGE_SHIFT;
583	locked = len >> PAGE_SHIFT;
584
585	if (mmap_write_lock_killable(current->mm))
586		return -EINTR;
587
588	locked += current->mm->locked_vm;
589	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
590		/*
591		 * It is possible that the regions requested intersect with
592		 * previously mlocked areas, that part area in "mm->locked_vm"
593		 * should not be counted to new mlock increment count. So check
594		 * and adjust locked count if necessary.
595		 */
596		locked -= count_mm_mlocked_page_nr(current->mm,
597				start, len);
598	}
599
600	/* check against resource limits */
601	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
602		error = apply_vma_lock_flags(start, len, flags);
603
604	mmap_write_unlock(current->mm);
605	if (error)
606		return error;
607
608	error = __mm_populate(start, len, 0);
609	if (error)
610		return __mlock_posix_error_return(error);
611	return 0;
612}
613
614SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
615{
616	return do_mlock(start, len, VM_LOCKED);
617}
618
619SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
620{
621	vm_flags_t vm_flags = VM_LOCKED;
622
623	if (flags & ~MLOCK_ONFAULT)
624		return -EINVAL;
625
626	if (flags & MLOCK_ONFAULT)
627		vm_flags |= VM_LOCKONFAULT;
628
629	return do_mlock(start, len, vm_flags);
630}
631
632SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
633{
634	int ret;
635
636	start = untagged_addr(start);
637
638	len = PAGE_ALIGN(len + (offset_in_page(start)));
639	start &= PAGE_MASK;
640
641	if (mmap_write_lock_killable(current->mm))
642		return -EINTR;
643	ret = apply_vma_lock_flags(start, len, 0);
644	mmap_write_unlock(current->mm);
645
646	return ret;
647}
648
649/*
650 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
651 * and translate into the appropriate modifications to mm->def_flags and/or the
652 * flags for all current VMAs.
653 *
654 * There are a couple of subtleties with this.  If mlockall() is called multiple
655 * times with different flags, the values do not necessarily stack.  If mlockall
656 * is called once including the MCL_FUTURE flag and then a second time without
657 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
658 */
659static int apply_mlockall_flags(int flags)
660{
661	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
662	struct vm_area_struct *vma, *prev = NULL;
663	vm_flags_t to_add = 0;
664
665	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
666	if (flags & MCL_FUTURE) {
667		current->mm->def_flags |= VM_LOCKED;
668
669		if (flags & MCL_ONFAULT)
670			current->mm->def_flags |= VM_LOCKONFAULT;
671
672		if (!(flags & MCL_CURRENT))
673			goto out;
674	}
675
676	if (flags & MCL_CURRENT) {
677		to_add |= VM_LOCKED;
678		if (flags & MCL_ONFAULT)
679			to_add |= VM_LOCKONFAULT;
680	}
681
682	mas_for_each(&mas, vma, ULONG_MAX) {
683		vm_flags_t newflags;
684
685		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
686		newflags |= to_add;
687
688		/* Ignore errors */
689		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
690		mas_pause(&mas);
691		cond_resched();
692	}
693out:
694	return 0;
695}
696
697SYSCALL_DEFINE1(mlockall, int, flags)
698{
699	unsigned long lock_limit;
700	int ret;
701
702	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
703	    flags == MCL_ONFAULT)
704		return -EINVAL;
705
706	if (!can_do_mlock())
707		return -EPERM;
708
709	lock_limit = rlimit(RLIMIT_MEMLOCK);
710	lock_limit >>= PAGE_SHIFT;
711
712	if (mmap_write_lock_killable(current->mm))
713		return -EINTR;
714
715	ret = -ENOMEM;
716	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
717	    capable(CAP_IPC_LOCK))
718		ret = apply_mlockall_flags(flags);
719	mmap_write_unlock(current->mm);
720	if (!ret && (flags & MCL_CURRENT))
721		mm_populate(0, TASK_SIZE);
722
723	return ret;
724}
725
726SYSCALL_DEFINE0(munlockall)
727{
728	int ret;
729
730	if (mmap_write_lock_killable(current->mm))
731		return -EINTR;
732	ret = apply_mlockall_flags(0);
733	mmap_write_unlock(current->mm);
734	return ret;
735}
736
737/*
738 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
739 * shm segments) get accounted against the user_struct instead.
740 */
741static DEFINE_SPINLOCK(shmlock_user_lock);
742
743int user_shm_lock(size_t size, struct ucounts *ucounts)
744{
745	unsigned long lock_limit, locked;
746	long memlock;
747	int allowed = 0;
748
749	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
750	lock_limit = rlimit(RLIMIT_MEMLOCK);
751	if (lock_limit != RLIM_INFINITY)
752		lock_limit >>= PAGE_SHIFT;
753	spin_lock(&shmlock_user_lock);
754	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
755
756	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
757		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
758		goto out;
759	}
760	if (!get_ucounts(ucounts)) {
761		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
762		allowed = 0;
763		goto out;
764	}
765	allowed = 1;
766out:
767	spin_unlock(&shmlock_user_lock);
768	return allowed;
769}
770
771void user_shm_unlock(size_t size, struct ucounts *ucounts)
772{
773	spin_lock(&shmlock_user_lock);
774	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
775	spin_unlock(&shmlock_user_lock);
776	put_ucounts(ucounts);
777}