Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	linux/mm/mlock.c
  4 *
  5 *  (C) Copyright 1995 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 */
  8
  9#include <linux/capability.h>
 10#include <linux/mman.h>
 11#include <linux/mm.h>
 12#include <linux/sched/user.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/pagemap.h>
 16#include <linux/pagevec.h>
 17#include <linux/pagewalk.h>
 18#include <linux/mempolicy.h>
 19#include <linux/syscalls.h>
 20#include <linux/sched.h>
 21#include <linux/export.h>
 22#include <linux/rmap.h>
 23#include <linux/mmzone.h>
 24#include <linux/hugetlb.h>
 25#include <linux/memcontrol.h>
 26#include <linux/mm_inline.h>
 27#include <linux/secretmem.h>
 28
 29#include "internal.h"
 30
 31struct mlock_fbatch {
 32	local_lock_t lock;
 33	struct folio_batch fbatch;
 34};
 35
 36static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = {
 37	.lock = INIT_LOCAL_LOCK(lock),
 38};
 39
 40bool can_do_mlock(void)
 41{
 42	if (rlimit(RLIMIT_MEMLOCK) != 0)
 43		return true;
 44	if (capable(CAP_IPC_LOCK))
 45		return true;
 46	return false;
 
 
 47}
 48EXPORT_SYMBOL(can_do_mlock);
 49
 50/*
 51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing
 52 * in vmscan and, possibly, the fault path; and to support semi-accurate
 53 * statistics.
 54 *
 55 * An mlocked folio [folio_test_mlocked(folio)] is unevictable.  As such, it
 56 * will be ostensibly placed on the LRU "unevictable" list (actually no such
 57 * list exists), rather than the [in]active lists. PG_unevictable is set to
 58 * indicate the unevictable state.
 
 
 
 
 
 
 59 */
 60
 61static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec)
 
 
 
 62{
 63	/* There is nothing more we can do while it's off LRU */
 64	if (!folio_test_clear_lru(folio))
 65		return lruvec;
 66
 67	lruvec = folio_lruvec_relock_irq(folio, lruvec);
 68
 69	if (unlikely(folio_evictable(folio))) {
 
 
 
 70		/*
 71		 * This is a little surprising, but quite possible: PG_mlocked
 72		 * must have got cleared already by another CPU.  Could this
 73		 * folio be unevictable?  I'm not sure, but move it now if so.
 74		 */
 75		if (folio_test_unevictable(folio)) {
 76			lruvec_del_folio(lruvec, folio);
 77			folio_clear_unevictable(folio);
 78			lruvec_add_folio(lruvec, folio);
 79
 80			__count_vm_events(UNEVICTABLE_PGRESCUED,
 81					  folio_nr_pages(folio));
 82		}
 83		goto out;
 84	}
 85
 86	if (folio_test_unevictable(folio)) {
 87		if (folio_test_mlocked(folio))
 88			folio->mlock_count++;
 89		goto out;
 90	}
 91
 92	lruvec_del_folio(lruvec, folio);
 93	folio_clear_active(folio);
 94	folio_set_unevictable(folio);
 95	folio->mlock_count = !!folio_test_mlocked(folio);
 96	lruvec_add_folio(lruvec, folio);
 97	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
 98out:
 99	folio_set_lru(folio);
100	return lruvec;
101}
102
103static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec)
104{
105	VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
106
107	lruvec = folio_lruvec_relock_irq(folio, lruvec);
108
109	/* As above, this is a little surprising, but possible */
110	if (unlikely(folio_evictable(folio)))
111		goto out;
112
113	folio_set_unevictable(folio);
114	folio->mlock_count = !!folio_test_mlocked(folio);
115	__count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio));
116out:
117	lruvec_add_folio(lruvec, folio);
118	folio_set_lru(folio);
119	return lruvec;
120}
121
122static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec)
123{
124	int nr_pages = folio_nr_pages(folio);
125	bool isolated = false;
126
127	if (!folio_test_clear_lru(folio))
128		goto munlock;
129
130	isolated = true;
131	lruvec = folio_lruvec_relock_irq(folio, lruvec);
132
133	if (folio_test_unevictable(folio)) {
134		/* Then mlock_count is maintained, but might undercount */
135		if (folio->mlock_count)
136			folio->mlock_count--;
137		if (folio->mlock_count)
138			goto out;
139	}
140	/* else assume that was the last mlock: reclaim will fix it if not */
141
142munlock:
143	if (folio_test_clear_mlocked(folio)) {
144		__zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
145		if (isolated || !folio_test_unevictable(folio))
146			__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
147		else
148			__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
149	}
150
151	/* folio_evictable() has to be checked *after* clearing Mlocked */
152	if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) {
153		lruvec_del_folio(lruvec, folio);
154		folio_clear_unevictable(folio);
155		lruvec_add_folio(lruvec, folio);
156		__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
157	}
158out:
159	if (isolated)
160		folio_set_lru(folio);
161	return lruvec;
162}
163
164/*
165 * Flags held in the low bits of a struct folio pointer on the mlock_fbatch.
 
166 */
167#define LRU_FOLIO 0x1
168#define NEW_FOLIO 0x2
169static inline struct folio *mlock_lru(struct folio *folio)
170{
171	return (struct folio *)((unsigned long)folio + LRU_FOLIO);
172}
173
174static inline struct folio *mlock_new(struct folio *folio)
175{
176	return (struct folio *)((unsigned long)folio + NEW_FOLIO);
 
 
 
 
177}
178
179/*
180 * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can
181 * make use of such folio pointer flags in future, but for now just keep it for
182 * mlock.  We could use three separate folio batches instead, but one feels
183 * better (munlocking a full folio batch does not need to drain mlocking folio
184 * batches first).
185 */
186static void mlock_folio_batch(struct folio_batch *fbatch)
187{
188	struct lruvec *lruvec = NULL;
189	unsigned long mlock;
190	struct folio *folio;
191	int i;
192
193	for (i = 0; i < folio_batch_count(fbatch); i++) {
194		folio = fbatch->folios[i];
195		mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO);
196		folio = (struct folio *)((unsigned long)folio - mlock);
197		fbatch->folios[i] = folio;
198
199		if (mlock & LRU_FOLIO)
200			lruvec = __mlock_folio(folio, lruvec);
201		else if (mlock & NEW_FOLIO)
202			lruvec = __mlock_new_folio(folio, lruvec);
203		else
204			lruvec = __munlock_folio(folio, lruvec);
205	}
206
207	if (lruvec)
208		unlock_page_lruvec_irq(lruvec);
209	folios_put(fbatch);
210}
211
212void mlock_drain_local(void)
 
 
 
 
 
 
213{
214	struct folio_batch *fbatch;
215
216	local_lock(&mlock_fbatch.lock);
217	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
218	if (folio_batch_count(fbatch))
219		mlock_folio_batch(fbatch);
220	local_unlock(&mlock_fbatch.lock);
221}
222
223void mlock_drain_remote(int cpu)
224{
225	struct folio_batch *fbatch;
226
227	WARN_ON_ONCE(cpu_online(cpu));
228	fbatch = &per_cpu(mlock_fbatch.fbatch, cpu);
229	if (folio_batch_count(fbatch))
230		mlock_folio_batch(fbatch);
231}
232
233bool need_mlock_drain(int cpu)
 
 
 
 
 
 
 
 
 
234{
235	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
 
 
 
236}
237
238/**
239 * mlock_folio - mlock a folio already on (or temporarily off) LRU
240 * @folio: folio to be mlocked.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241 */
242void mlock_folio(struct folio *folio)
243{
244	struct folio_batch *fbatch;
 
245
246	local_lock(&mlock_fbatch.lock);
247	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
248
249	if (!folio_test_set_mlocked(folio)) {
250		int nr_pages = folio_nr_pages(folio);
 
 
 
 
251
252		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
253		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
 
 
 
 
 
 
 
 
254	}
 
 
 
 
255
256	folio_get(folio);
257	if (!folio_batch_add(fbatch, mlock_lru(folio)) ||
258	    folio_test_large(folio) || lru_cache_disabled())
259		mlock_folio_batch(fbatch);
260	local_unlock(&mlock_fbatch.lock);
261}
262
263/**
264 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
265 * @folio: folio to be mlocked, either normal or a THP head.
 
 
 
 
 
 
 
 
266 */
267void mlock_new_folio(struct folio *folio)
 
268{
269	struct folio_batch *fbatch;
270	int nr_pages = folio_nr_pages(folio);
 
271
272	local_lock(&mlock_fbatch.lock);
273	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
274	folio_set_mlocked(folio);
275
276	zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
277	__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
278
279	folio_get(folio);
280	if (!folio_batch_add(fbatch, mlock_new(folio)) ||
281	    folio_test_large(folio) || lru_cache_disabled())
282		mlock_folio_batch(fbatch);
283	local_unlock(&mlock_fbatch.lock);
284}
285
286/**
287 * munlock_folio - munlock a folio
288 * @folio: folio to be munlocked, either normal or a THP head.
289 */
290void munlock_folio(struct folio *folio)
291{
292	struct folio_batch *fbatch;
 
293
294	local_lock(&mlock_fbatch.lock);
295	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
296	/*
297	 * folio_test_clear_mlocked(folio) must be left to __munlock_folio(),
298	 * which will check whether the folio is multiply mlocked.
299	 */
300	folio_get(folio);
301	if (!folio_batch_add(fbatch, folio) ||
302	    folio_test_large(folio) || lru_cache_disabled())
303		mlock_folio_batch(fbatch);
304	local_unlock(&mlock_fbatch.lock);
 
 
 
 
305}
306
307static inline unsigned int folio_mlock_step(struct folio *folio,
308		pte_t *pte, unsigned long addr, unsigned long end)
 
 
309{
310	const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY;
311	unsigned int count = (end - addr) >> PAGE_SHIFT;
312	pte_t ptent = ptep_get(pte);
 
 
 
313
314	if (!folio_test_large(folio))
315		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316
317	return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL,
318			       NULL, NULL);
319}
320
321static inline bool allow_mlock_munlock(struct folio *folio,
322		struct vm_area_struct *vma, unsigned long start,
323		unsigned long end, unsigned int step)
 
 
 
 
324{
 
325	/*
326	 * For unlock, allow munlock large folio which is partially
327	 * mapped to VMA. As it's possible that large folio is
328	 * mlocked and VMA is split later.
329	 *
330	 * During memory pressure, such kind of large folio can
331	 * be split. And the pages are not in VM_LOCKed VMA
332	 * can be reclaimed.
333	 */
334	if (!(vma->vm_flags & VM_LOCKED))
335		return true;
336
337	/* folio_within_range() cannot take KSM, but any small folio is OK */
338	if (!folio_test_large(folio))
339		return true;
340
341	/* folio not in range [start, end), skip mlock */
342	if (!folio_within_range(folio, vma, start, end))
343		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
345	/* folio is not fully mapped, skip mlock */
346	if (step != folio_nr_pages(folio))
347		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
349	return true;
 
 
 
 
 
350}
351
352static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
353			   unsigned long end, struct mm_walk *walk)
354
 
 
 
 
 
 
 
 
 
 
 
 
 
355{
356	struct vm_area_struct *vma = walk->vma;
357	spinlock_t *ptl;
358	pte_t *start_pte, *pte;
359	pte_t ptent;
360	struct folio *folio;
361	unsigned int step = 1;
362	unsigned long start = addr;
363
364	ptl = pmd_trans_huge_lock(pmd, vma);
365	if (ptl) {
366		if (!pmd_present(*pmd))
367			goto out;
368		if (is_huge_zero_pmd(*pmd))
369			goto out;
370		folio = pmd_folio(*pmd);
371		if (vma->vm_flags & VM_LOCKED)
372			mlock_folio(folio);
373		else
374			munlock_folio(folio);
375		goto out;
376	}
377
378	start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
379	if (!start_pte) {
380		walk->action = ACTION_AGAIN;
381		return 0;
382	}
383
384	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
385		ptent = ptep_get(pte);
386		if (!pte_present(ptent))
387			continue;
388		folio = vm_normal_folio(vma, addr, ptent);
389		if (!folio || folio_is_zone_device(folio))
390			continue;
 
 
 
 
 
 
 
 
 
 
 
391
392		step = folio_mlock_step(folio, pte, addr, end);
393		if (!allow_mlock_munlock(folio, vma, start, end, step))
394			goto next_entry;
395
396		if (vma->vm_flags & VM_LOCKED)
397			mlock_folio(folio);
398		else
399			munlock_folio(folio);
400
401next_entry:
402		pte += step - 1;
403		addr += (step - 1) << PAGE_SHIFT;
404	}
405	pte_unmap(start_pte);
406out:
407	spin_unlock(ptl);
408	cond_resched();
409	return 0;
410}
411
412/*
413 * mlock_vma_pages_range() - mlock any pages already in the range,
414 *                           or munlock all pages in the range.
415 * @vma - vma containing range to be mlock()ed or munlock()ed
416 * @start - start address in @vma of the range
417 * @end - end of range in @vma
418 * @newflags - the new set of flags for @vma.
419 *
420 * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
421 * called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
422 */
423static void mlock_vma_pages_range(struct vm_area_struct *vma,
424	unsigned long start, unsigned long end, vm_flags_t newflags)
425{
426	static const struct mm_walk_ops mlock_walk_ops = {
427		.pmd_entry = mlock_pte_range,
428		.walk_lock = PGWALK_WRLOCK_VERIFY,
429	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
431	/*
432	 * There is a slight chance that concurrent page migration,
433	 * or page reclaim finding a page of this now-VM_LOCKED vma,
434	 * will call mlock_vma_folio() and raise page's mlock_count:
435	 * double counting, leaving the page unevictable indefinitely.
436	 * Communicate this danger to mlock_vma_folio() with VM_IO,
437	 * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
438	 * mmap_lock is held in write mode here, so this weird
439	 * combination should not be visible to other mmap_lock users;
440	 * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED.
441	 */
442	if (newflags & VM_LOCKED)
443		newflags |= VM_IO;
444	vma_start_write(vma);
445	vm_flags_reset_once(vma, newflags);
446
447	lru_add_drain();
448	walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL);
449	lru_add_drain();
450
451	if (newflags & VM_IO) {
452		newflags &= ~VM_IO;
453		vm_flags_reset_once(vma, newflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454	}
455}
456
457/*
458 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
459 *
460 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
461 * munlock is a no-op.  However, for some special vmas, we go ahead and
462 * populate the ptes.
463 *
464 * For vmas that pass the filters, merge/split as appropriate.
465 */
466static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
467	       struct vm_area_struct **prev, unsigned long start,
468	       unsigned long end, vm_flags_t newflags)
469{
470	struct mm_struct *mm = vma->vm_mm;
 
471	int nr_pages;
472	int ret = 0;
473	vm_flags_t oldflags = vma->vm_flags;
474
475	if (newflags == oldflags || (oldflags & VM_SPECIAL) ||
476	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
477	    vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE))
478		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
479		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
480
481	vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags);
482	if (IS_ERR(vma)) {
483		ret = PTR_ERR(vma);
484		goto out;
485	}
486
 
487	/*
488	 * Keep track of amount of locked VM.
489	 */
490	nr_pages = (end - start) >> PAGE_SHIFT;
491	if (!(newflags & VM_LOCKED))
492		nr_pages = -nr_pages;
493	else if (oldflags & VM_LOCKED)
494		nr_pages = 0;
495	mm->locked_vm += nr_pages;
496
497	/*
498	 * vm_flags is protected by the mmap_lock held in write mode.
499	 * It's okay if try_to_unmap_one unmaps a page just after we
500	 * set VM_LOCKED, populate_vma_page_range will bring it back.
501	 */
502	if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
503		/* No work to do, and mlocking twice would be wrong */
504		vma_start_write(vma);
505		vm_flags_reset(vma, newflags);
506	} else {
507		mlock_vma_pages_range(vma, start, end, newflags);
508	}
509out:
510	*prev = vma;
511	return ret;
512}
513
514static int apply_vma_lock_flags(unsigned long start, size_t len,
515				vm_flags_t flags)
516{
517	unsigned long nstart, end, tmp;
518	struct vm_area_struct *vma, *prev;
519	VMA_ITERATOR(vmi, current->mm, start);
520
521	VM_BUG_ON(offset_in_page(start));
522	VM_BUG_ON(len != PAGE_ALIGN(len));
523	end = start + len;
524	if (end < start)
525		return -EINVAL;
526	if (end == start)
527		return 0;
528	vma = vma_iter_load(&vmi);
529	if (!vma)
530		return -ENOMEM;
531
532	prev = vma_prev(&vmi);
533	if (start > vma->vm_start)
534		prev = vma;
535
536	nstart = start;
537	tmp = vma->vm_start;
538	for_each_vma_range(vmi, vma, end) {
539		int error;
540		vm_flags_t newflags;
541
542		if (vma->vm_start != tmp)
543			return -ENOMEM;
544
545		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
546		newflags |= flags;
547		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
 
 
 
 
548		tmp = vma->vm_end;
549		if (tmp > end)
550			tmp = end;
551		error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags);
552		if (error)
553			return error;
554		tmp = vma_iter_end(&vmi);
555		nstart = tmp;
 
 
 
 
 
 
 
 
 
 
556	}
557
558	if (tmp < end)
559		return -ENOMEM;
560
561	return 0;
562}
563
564/*
565 * Go through vma areas and sum size of mlocked
566 * vma pages, as return value.
567 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
568 * is also counted.
569 * Return value: previously mlocked page counts
570 */
571static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
572		unsigned long start, size_t len)
573{
574	struct vm_area_struct *vma;
575	unsigned long count = 0;
576	unsigned long end;
577	VMA_ITERATOR(vmi, mm, start);
578
579	/* Don't overflow past ULONG_MAX */
580	if (unlikely(ULONG_MAX - len < start))
581		end = ULONG_MAX;
582	else
583		end = start + len;
584
585	for_each_vma_range(vmi, vma, end) {
586		if (vma->vm_flags & VM_LOCKED) {
587			if (start > vma->vm_start)
588				count -= (start - vma->vm_start);
589			if (end < vma->vm_end) {
590				count += end - vma->vm_start;
591				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
592			}
593			count += vma->vm_end - vma->vm_start;
 
594		}
 
 
595	}
596
597	return count >> PAGE_SHIFT;
 
598}
599
600/*
601 * convert get_user_pages() return value to posix mlock() error
602 */
603static int __mlock_posix_error_return(long retval)
604{
605	if (retval == -EFAULT)
606		retval = -ENOMEM;
607	else if (retval == -ENOMEM)
608		retval = -EAGAIN;
609	return retval;
610}
611
612static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
613{
614	unsigned long locked;
615	unsigned long lock_limit;
616	int error = -ENOMEM;
617
618	start = untagged_addr(start);
619
620	if (!can_do_mlock())
621		return -EPERM;
622
623	len = PAGE_ALIGN(len + (offset_in_page(start)));
 
 
624	start &= PAGE_MASK;
625
626	lock_limit = rlimit(RLIMIT_MEMLOCK);
627	lock_limit >>= PAGE_SHIFT;
628	locked = len >> PAGE_SHIFT;
629
630	if (mmap_write_lock_killable(current->mm))
631		return -EINTR;
632
633	locked += current->mm->locked_vm;
634	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
635		/*
636		 * It is possible that the regions requested intersect with
637		 * previously mlocked areas, that part area in "mm->locked_vm"
638		 * should not be counted to new mlock increment count. So check
639		 * and adjust locked count if necessary.
640		 */
641		locked -= count_mm_mlocked_page_nr(current->mm,
642				start, len);
643	}
644
645	/* check against resource limits */
646	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
647		error = apply_vma_lock_flags(start, len, flags);
648
649	mmap_write_unlock(current->mm);
650	if (error)
651		return error;
652
653	error = __mm_populate(start, len, 0);
654	if (error)
655		return __mlock_posix_error_return(error);
656	return 0;
657}
658
659SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
660{
661	return do_mlock(start, len, VM_LOCKED);
662}
663
664SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
665{
666	vm_flags_t vm_flags = VM_LOCKED;
667
668	if (flags & ~MLOCK_ONFAULT)
669		return -EINVAL;
670
671	if (flags & MLOCK_ONFAULT)
672		vm_flags |= VM_LOCKONFAULT;
673
674	return do_mlock(start, len, vm_flags);
 
 
 
675}
676
677SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
678{
679	int ret;
680
681	start = untagged_addr(start);
682
683	len = PAGE_ALIGN(len + (offset_in_page(start)));
684	start &= PAGE_MASK;
685
686	if (mmap_write_lock_killable(current->mm))
687		return -EINTR;
688	ret = apply_vma_lock_flags(start, len, 0);
689	mmap_write_unlock(current->mm);
690
691	return ret;
692}
693
694/*
695 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
696 * and translate into the appropriate modifications to mm->def_flags and/or the
697 * flags for all current VMAs.
698 *
699 * There are a couple of subtleties with this.  If mlockall() is called multiple
700 * times with different flags, the values do not necessarily stack.  If mlockall
701 * is called once including the MCL_FUTURE flag and then a second time without
702 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
703 */
704static int apply_mlockall_flags(int flags)
705{
706	VMA_ITERATOR(vmi, current->mm, 0);
707	struct vm_area_struct *vma, *prev = NULL;
708	vm_flags_t to_add = 0;
709
710	current->mm->def_flags &= ~VM_LOCKED_MASK;
711	if (flags & MCL_FUTURE) {
712		current->mm->def_flags |= VM_LOCKED;
 
 
 
 
713
714		if (flags & MCL_ONFAULT)
715			current->mm->def_flags |= VM_LOCKONFAULT;
716
717		if (!(flags & MCL_CURRENT))
718			goto out;
719	}
720
721	if (flags & MCL_CURRENT) {
722		to_add |= VM_LOCKED;
723		if (flags & MCL_ONFAULT)
724			to_add |= VM_LOCKONFAULT;
725	}
726
727	for_each_vma(vmi, vma) {
728		int error;
729		vm_flags_t newflags;
730
731		newflags = vma->vm_flags & ~VM_LOCKED_MASK;
732		newflags |= to_add;
 
733
734		error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
735				    newflags);
736		/* Ignore errors, but prev needs fixing up. */
737		if (error)
738			prev = vma;
739		cond_resched();
740	}
741out:
742	return 0;
743}
744
745SYSCALL_DEFINE1(mlockall, int, flags)
746{
747	unsigned long lock_limit;
748	int ret;
749
750	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
751	    flags == MCL_ONFAULT)
752		return -EINVAL;
753
 
754	if (!can_do_mlock())
755		return -EPERM;
 
 
 
756
757	lock_limit = rlimit(RLIMIT_MEMLOCK);
758	lock_limit >>= PAGE_SHIFT;
759
760	if (mmap_write_lock_killable(current->mm))
761		return -EINTR;
762
763	ret = -ENOMEM;
 
 
764	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
765	    capable(CAP_IPC_LOCK))
766		ret = apply_mlockall_flags(flags);
767	mmap_write_unlock(current->mm);
768	if (!ret && (flags & MCL_CURRENT))
769		mm_populate(0, TASK_SIZE);
770
771	return ret;
772}
773
774SYSCALL_DEFINE0(munlockall)
775{
776	int ret;
777
778	if (mmap_write_lock_killable(current->mm))
779		return -EINTR;
780	ret = apply_mlockall_flags(0);
781	mmap_write_unlock(current->mm);
782	return ret;
783}
784
785/*
786 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
787 * shm segments) get accounted against the user_struct instead.
788 */
789static DEFINE_SPINLOCK(shmlock_user_lock);
790
791int user_shm_lock(size_t size, struct ucounts *ucounts)
792{
793	unsigned long lock_limit, locked;
794	long memlock;
795	int allowed = 0;
796
797	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
798	lock_limit = rlimit(RLIMIT_MEMLOCK);
799	if (lock_limit != RLIM_INFINITY)
800		lock_limit >>= PAGE_SHIFT;
 
801	spin_lock(&shmlock_user_lock);
802	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
803
804	if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
805		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
806		goto out;
807	}
808	if (!get_ucounts(ucounts)) {
809		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
810		allowed = 0;
811		goto out;
812	}
813	allowed = 1;
814out:
815	spin_unlock(&shmlock_user_lock);
816	return allowed;
817}
818
819void user_shm_unlock(size_t size, struct ucounts *ucounts)
820{
821	spin_lock(&shmlock_user_lock);
822	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
823	spin_unlock(&shmlock_user_lock);
824	put_ucounts(ucounts);
825}
v3.15
 
  1/*
  2 *	linux/mm/mlock.c
  3 *
  4 *  (C) Copyright 1995 Linus Torvalds
  5 *  (C) Copyright 2002 Christoph Hellwig
  6 */
  7
  8#include <linux/capability.h>
  9#include <linux/mman.h>
 10#include <linux/mm.h>
 
 11#include <linux/swap.h>
 12#include <linux/swapops.h>
 13#include <linux/pagemap.h>
 14#include <linux/pagevec.h>
 
 15#include <linux/mempolicy.h>
 16#include <linux/syscalls.h>
 17#include <linux/sched.h>
 18#include <linux/export.h>
 19#include <linux/rmap.h>
 20#include <linux/mmzone.h>
 21#include <linux/hugetlb.h>
 22#include <linux/memcontrol.h>
 23#include <linux/mm_inline.h>
 
 24
 25#include "internal.h"
 26
 27int can_do_mlock(void)
 
 
 
 
 
 
 
 
 
 28{
 
 
 29	if (capable(CAP_IPC_LOCK))
 30		return 1;
 31	if (rlimit(RLIMIT_MEMLOCK) != 0)
 32		return 1;
 33	return 0;
 34}
 35EXPORT_SYMBOL(can_do_mlock);
 36
 37/*
 38 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 39 * in vmscan and, possibly, the fault path; and to support semi-accurate
 40 * statistics.
 41 *
 42 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 43 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 44 * The unevictable list is an LRU sibling list to the [in]active lists.
 45 * PageUnevictable is set to indicate the unevictable state.
 46 *
 47 * When lazy mlocking via vmscan, it is important to ensure that the
 48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 49 * may have mlocked a page that is being munlocked. So lazy mlock must take
 50 * the mmap_sem for read, and verify that the vma really is locked
 51 * (see mm/rmap.c).
 52 */
 53
 54/*
 55 *  LRU accounting for clear_page_mlock()
 56 */
 57void clear_page_mlock(struct page *page)
 58{
 59	if (!TestClearPageMlocked(page))
 60		return;
 
 61
 62	mod_zone_page_state(page_zone(page), NR_MLOCK,
 63			    -hpage_nr_pages(page));
 64	count_vm_event(UNEVICTABLE_PGCLEARED);
 65	if (!isolate_lru_page(page)) {
 66		putback_lru_page(page);
 67	} else {
 68		/*
 69		 * We lost the race. the page already moved to evictable list.
 
 
 70		 */
 71		if (PageUnevictable(page))
 72			count_vm_event(UNEVICTABLE_PGSTRANDED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 73	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76/*
 77 * Mark page as mlocked if not already.
 78 * If page on LRU, isolate and putback to move to unevictable list.
 79 */
 80void mlock_vma_page(struct page *page)
 
 
 81{
 82	/* Serialize with page migration */
 83	BUG_ON(!PageLocked(page));
 84
 85	if (!TestSetPageMlocked(page)) {
 86		mod_zone_page_state(page_zone(page), NR_MLOCK,
 87				    hpage_nr_pages(page));
 88		count_vm_event(UNEVICTABLE_PGMLOCKED);
 89		if (!isolate_lru_page(page))
 90			putback_lru_page(page);
 91	}
 92}
 93
 94/*
 95 * Isolate a page from LRU with optional get_page() pin.
 96 * Assumes lru_lock already held and page already pinned.
 
 
 
 97 */
 98static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
 99{
100	if (PageLRU(page)) {
101		struct lruvec *lruvec;
 
 
 
 
 
 
 
 
102
103		lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
104		if (getpage)
105			get_page(page);
106		ClearPageLRU(page);
107		del_page_from_lru_list(page, lruvec, page_lru(page));
108		return true;
109	}
110
111	return false;
 
 
112}
113
114/*
115 * Finish munlock after successful page isolation
116 *
117 * Page must be locked. This is a wrapper for try_to_munlock()
118 * and putback_lru_page() with munlock accounting.
119 */
120static void __munlock_isolated_page(struct page *page)
121{
122	int ret = SWAP_AGAIN;
123
124	/*
125	 * Optimization: if the page was mapped just once, that's our mapping
126	 * and we don't need to check all the other vmas.
127	 */
128	if (page_mapcount(page) > 1)
129		ret = try_to_munlock(page);
130
131	/* Did try_to_unlock() succeed or punt? */
132	if (ret != SWAP_MLOCK)
133		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
134
135	putback_lru_page(page);
 
 
 
136}
137
138/*
139 * Accounting for page isolation fail during munlock
140 *
141 * Performs accounting when page isolation fails in munlock. There is nothing
142 * else to do because it means some other task has already removed the page
143 * from the LRU. putback_lru_page() will take care of removing the page from
144 * the unevictable list, if necessary. vmscan [page_referenced()] will move
145 * the page back to the unevictable list if some other vma has it mlocked.
146 */
147static void __munlock_isolation_failed(struct page *page)
148{
149	if (PageUnevictable(page))
150		__count_vm_event(UNEVICTABLE_PGSTRANDED);
151	else
152		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
153}
154
155/**
156 * munlock_vma_page - munlock a vma page
157 * @page - page to be unlocked, either a normal page or THP page head
158 *
159 * returns the size of the page as a page mask (0 for normal page,
160 *         HPAGE_PMD_NR - 1 for THP head page)
161 *
162 * called from munlock()/munmap() path with page supposedly on the LRU.
163 * When we munlock a page, because the vma where we found the page is being
164 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
165 * page locked so that we can leave it on the unevictable lru list and not
166 * bother vmscan with it.  However, to walk the page's rmap list in
167 * try_to_munlock() we must isolate the page from the LRU.  If some other
168 * task has removed the page from the LRU, we won't be able to do that.
169 * So we clear the PageMlocked as we might not get another chance.  If we
170 * can't isolate the page, we leave it for putback_lru_page() and vmscan
171 * [page_referenced()/try_to_unmap()] to deal with.
172 */
173unsigned int munlock_vma_page(struct page *page)
174{
175	unsigned int nr_pages;
176	struct zone *zone = page_zone(page);
177
178	/* For try_to_munlock() and to serialize with page migration */
179	BUG_ON(!PageLocked(page));
180
181	/*
182	 * Serialize with any parallel __split_huge_page_refcount() which
183	 * might otherwise copy PageMlocked to part of the tail pages before
184	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
185	 */
186	spin_lock_irq(&zone->lru_lock);
187
188	nr_pages = hpage_nr_pages(page);
189	if (!TestClearPageMlocked(page))
190		goto unlock_out;
191
192	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
193
194	if (__munlock_isolate_lru_page(page, true)) {
195		spin_unlock_irq(&zone->lru_lock);
196		__munlock_isolated_page(page);
197		goto out;
198	}
199	__munlock_isolation_failed(page);
200
201unlock_out:
202	spin_unlock_irq(&zone->lru_lock);
203
204out:
205	return nr_pages - 1;
 
 
 
206}
207
208/**
209 * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
210 * @vma:   target vma
211 * @start: start address
212 * @end:   end address
213 *
214 * This takes care of making the pages present too.
215 *
216 * return 0 on success, negative error code on error.
217 *
218 * vma->vm_mm->mmap_sem must be held for at least read.
219 */
220long __mlock_vma_pages_range(struct vm_area_struct *vma,
221		unsigned long start, unsigned long end, int *nonblocking)
222{
223	struct mm_struct *mm = vma->vm_mm;
224	unsigned long nr_pages = (end - start) / PAGE_SIZE;
225	int gup_flags;
226
227	VM_BUG_ON(start & ~PAGE_MASK);
228	VM_BUG_ON(end   & ~PAGE_MASK);
229	VM_BUG_ON(start < vma->vm_start);
230	VM_BUG_ON(end   > vma->vm_end);
231	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
 
 
 
 
 
 
 
232
233	gup_flags = FOLL_TOUCH | FOLL_MLOCK;
234	/*
235	 * We want to touch writable mappings with a write fault in order
236	 * to break COW, except for shared mappings because these don't COW
237	 * and we would not want to dirty them for nothing.
238	 */
239	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
240		gup_flags |= FOLL_WRITE;
241
 
 
242	/*
243	 * We want mlock to succeed for regions that have any permissions
244	 * other than PROT_NONE.
245	 */
246	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
247		gup_flags |= FOLL_FORCE;
248
249	/*
250	 * We made sure addr is within a VMA, so the following will
251	 * not result in a stack expansion that recurses back here.
252	 */
253	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
254				NULL, NULL, nonblocking);
255}
256
257/*
258 * convert get_user_pages() return value to posix mlock() error
259 */
260static int __mlock_posix_error_return(long retval)
261{
262	if (retval == -EFAULT)
263		retval = -ENOMEM;
264	else if (retval == -ENOMEM)
265		retval = -EAGAIN;
266	return retval;
267}
268
269/*
270 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
271 *
272 * The fast path is available only for evictable pages with single mapping.
273 * Then we can bypass the per-cpu pvec and get better performance.
274 * when mapcount > 1 we need try_to_munlock() which can fail.
275 * when !page_evictable(), we need the full redo logic of putback_lru_page to
276 * avoid leaving evictable page in unevictable list.
277 *
278 * In case of success, @page is added to @pvec and @pgrescued is incremented
279 * in case that the page was previously unevictable. @page is also unlocked.
280 */
281static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
282		int *pgrescued)
283{
284	VM_BUG_ON_PAGE(PageLRU(page), page);
285	VM_BUG_ON_PAGE(!PageLocked(page), page);
286
287	if (page_mapcount(page) <= 1 && page_evictable(page)) {
288		pagevec_add(pvec, page);
289		if (TestClearPageUnevictable(page))
290			(*pgrescued)++;
291		unlock_page(page);
292		return true;
293	}
294
295	return false;
 
296}
297
298/*
299 * Putback multiple evictable pages to the LRU
300 *
301 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
302 * the pages might have meanwhile become unevictable but that is OK.
303 */
304static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
305{
306	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
307	/*
308	 *__pagevec_lru_add() calls release_pages() so we don't call
309	 * put_page() explicitly
 
 
 
 
 
310	 */
311	__pagevec_lru_add(pvec);
312	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
313}
 
 
 
314
315/*
316 * Munlock a batch of pages from the same zone
317 *
318 * The work is split to two main phases. First phase clears the Mlocked flag
319 * and attempts to isolate the pages, all under a single zone lru lock.
320 * The second phase finishes the munlock only for pages where isolation
321 * succeeded.
322 *
323 * Note that the pagevec may be modified during the process.
324 */
325static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
326{
327	int i;
328	int nr = pagevec_count(pvec);
329	int delta_munlocked;
330	struct pagevec pvec_putback;
331	int pgrescued = 0;
332
333	pagevec_init(&pvec_putback, 0);
334
335	/* Phase 1: page isolation */
336	spin_lock_irq(&zone->lru_lock);
337	for (i = 0; i < nr; i++) {
338		struct page *page = pvec->pages[i];
339
340		if (TestClearPageMlocked(page)) {
341			/*
342			 * We already have pin from follow_page_mask()
343			 * so we can spare the get_page() here.
344			 */
345			if (__munlock_isolate_lru_page(page, false))
346				continue;
347			else
348				__munlock_isolation_failed(page);
349		}
350
351		/*
352		 * We won't be munlocking this page in the next phase
353		 * but we still need to release the follow_page_mask()
354		 * pin. We cannot do it under lru_lock however. If it's
355		 * the last pin, __page_cache_release() would deadlock.
356		 */
357		pagevec_add(&pvec_putback, pvec->pages[i]);
358		pvec->pages[i] = NULL;
359	}
360	delta_munlocked = -nr + pagevec_count(&pvec_putback);
361	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
362	spin_unlock_irq(&zone->lru_lock);
363
364	/* Now we can release pins of pages that we are not munlocking */
365	pagevec_release(&pvec_putback);
366
367	/* Phase 2: page munlock */
368	for (i = 0; i < nr; i++) {
369		struct page *page = pvec->pages[i];
370
371		if (page) {
372			lock_page(page);
373			if (!__putback_lru_fast_prepare(page, &pvec_putback,
374					&pgrescued)) {
375				/*
376				 * Slow path. We don't want to lose the last
377				 * pin before unlock_page()
378				 */
379				get_page(page); /* for putback_lru_page() */
380				__munlock_isolated_page(page);
381				unlock_page(page);
382				put_page(page); /* from follow_page_mask() */
383			}
384		}
385	}
386
387	/*
388	 * Phase 3: page putback for pages that qualified for the fast path
389	 * This will also call put_page() to return pin from follow_page_mask()
390	 */
391	if (pagevec_count(&pvec_putback))
392		__putback_lru_fast(&pvec_putback, pgrescued);
393}
394
395/*
396 * Fill up pagevec for __munlock_pagevec using pte walk
397 *
398 * The function expects that the struct page corresponding to @start address is
399 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
400 *
401 * The rest of @pvec is filled by subsequent pages within the same pmd and same
402 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
403 * pages also get pinned.
404 *
405 * Returns the address of the next page that should be scanned. This equals
406 * @start + PAGE_SIZE when no page could be added by the pte walk.
407 */
408static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
409		struct vm_area_struct *vma, int zoneid,	unsigned long start,
410		unsigned long end)
411{
412	pte_t *pte;
413	spinlock_t *ptl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
415	/*
416	 * Initialize pte walk starting at the already pinned page where we
417	 * are sure that there is a pte, as it was pinned under the same
418	 * mmap_sem write op.
419	 */
420	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
421	/* Make sure we do not cross the page table boundary */
422	end = pgd_addr_end(start, end);
423	end = pud_addr_end(start, end);
424	end = pmd_addr_end(start, end);
425
426	/* The page next to the pinned page is the first we will try to get */
427	start += PAGE_SIZE;
428	while (start < end) {
429		struct page *page = NULL;
430		pte++;
431		if (pte_present(*pte))
432			page = vm_normal_page(vma, start, *pte);
433		/*
434		 * Break if page could not be obtained or the page's node+zone does not
435		 * match
436		 */
437		if (!page || page_zone_id(page) != zoneid)
438			break;
439
440		get_page(page);
441		/*
442		 * Increase the address that will be returned *before* the
443		 * eventual break due to pvec becoming full by adding the page
444		 */
445		start += PAGE_SIZE;
446		if (pagevec_add(pvec, page) == 0)
447			break;
 
 
 
 
448	}
449	pte_unmap_unlock(pte, ptl);
450	return start;
 
 
 
451}
452
453/*
454 * munlock_vma_pages_range() - munlock all pages in the vma range.'
455 * @vma - vma containing range to be munlock()ed.
 
456 * @start - start address in @vma of the range
457 * @end - end of range in @vma.
 
458 *
459 *  For mremap(), munmap() and exit().
460 *
461 * Called with @vma VM_LOCKED.
462 *
463 * Returns with VM_LOCKED cleared.  Callers must be prepared to
464 * deal with this.
465 *
466 * We don't save and restore VM_LOCKED here because pages are
467 * still on lru.  In unmap path, pages might be scanned by reclaim
468 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
469 * free them.  This will result in freeing mlocked pages.
470 */
471void munlock_vma_pages_range(struct vm_area_struct *vma,
472			     unsigned long start, unsigned long end)
473{
474	vma->vm_flags &= ~VM_LOCKED;
475
476	while (start < end) {
477		struct page *page = NULL;
478		unsigned int page_mask;
479		unsigned long page_increm;
480		struct pagevec pvec;
481		struct zone *zone;
482		int zoneid;
483
484		pagevec_init(&pvec, 0);
485		/*
486		 * Although FOLL_DUMP is intended for get_dump_page(),
487		 * it just so happens that its special treatment of the
488		 * ZERO_PAGE (returning an error instead of doing get_page)
489		 * suits munlock very well (and if somehow an abnormal page
490		 * has sneaked into the range, we won't oops here: great).
491		 */
492		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
493				&page_mask);
494
495		if (page && !IS_ERR(page)) {
496			if (PageTransHuge(page)) {
497				lock_page(page);
498				/*
499				 * Any THP page found by follow_page_mask() may
500				 * have gotten split before reaching
501				 * munlock_vma_page(), so we need to recompute
502				 * the page_mask here.
503				 */
504				page_mask = munlock_vma_page(page);
505				unlock_page(page);
506				put_page(page); /* follow_page_mask() */
507			} else {
508				/*
509				 * Non-huge pages are handled in batches via
510				 * pagevec. The pin from follow_page_mask()
511				 * prevents them from collapsing by THP.
512				 */
513				pagevec_add(&pvec, page);
514				zone = page_zone(page);
515				zoneid = page_zone_id(page);
516
517				/*
518				 * Try to fill the rest of pagevec using fast
519				 * pte walk. This will also update start to
520				 * the next page to process. Then munlock the
521				 * pagevec.
522				 */
523				start = __munlock_pagevec_fill(&pvec, vma,
524						zoneid, start, end);
525				__munlock_pagevec(&pvec, zone);
526				goto next;
527			}
528		}
529		/* It's a bug to munlock in the middle of a THP page */
530		VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
531		page_increm = 1 + page_mask;
532		start += page_increm * PAGE_SIZE;
533next:
534		cond_resched();
535	}
536}
537
538/*
539 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
540 *
541 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
542 * munlock is a no-op.  However, for some special vmas, we go ahead and
543 * populate the ptes.
544 *
545 * For vmas that pass the filters, merge/split as appropriate.
546 */
547static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
548	unsigned long start, unsigned long end, vm_flags_t newflags)
 
549{
550	struct mm_struct *mm = vma->vm_mm;
551	pgoff_t pgoff;
552	int nr_pages;
553	int ret = 0;
554	int lock = !!(newflags & VM_LOCKED);
555
556	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
557	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
558		goto out;	/* don't set VM_LOCKED,  don't count */
559
560	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
561	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
562			  vma->vm_file, pgoff, vma_policy(vma));
563	if (*prev) {
564		vma = *prev;
565		goto success;
566	}
567
568	if (start != vma->vm_start) {
569		ret = split_vma(mm, vma, start, 1);
570		if (ret)
571			goto out;
572	}
573
574	if (end != vma->vm_end) {
575		ret = split_vma(mm, vma, end, 0);
576		if (ret)
577			goto out;
578	}
579
580success:
581	/*
582	 * Keep track of amount of locked VM.
583	 */
584	nr_pages = (end - start) >> PAGE_SHIFT;
585	if (!lock)
586		nr_pages = -nr_pages;
 
 
587	mm->locked_vm += nr_pages;
588
589	/*
590	 * vm_flags is protected by the mmap_sem held in write mode.
591	 * It's okay if try_to_unmap_one unmaps a page just after we
592	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
593	 */
594
595	if (lock)
596		vma->vm_flags = newflags;
597	else
598		munlock_vma_pages_range(vma, start, end);
599
 
600out:
601	*prev = vma;
602	return ret;
603}
604
605static int do_mlock(unsigned long start, size_t len, int on)
 
606{
607	unsigned long nstart, end, tmp;
608	struct vm_area_struct * vma, * prev;
609	int error;
610
611	VM_BUG_ON(start & ~PAGE_MASK);
612	VM_BUG_ON(len != PAGE_ALIGN(len));
613	end = start + len;
614	if (end < start)
615		return -EINVAL;
616	if (end == start)
617		return 0;
618	vma = find_vma(current->mm, start);
619	if (!vma || vma->vm_start > start)
620		return -ENOMEM;
621
622	prev = vma->vm_prev;
623	if (start > vma->vm_start)
624		prev = vma;
625
626	for (nstart = start ; ; ) {
 
 
 
627		vm_flags_t newflags;
628
 
 
 
 
 
629		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
630
631		newflags = vma->vm_flags & ~VM_LOCKED;
632		if (on)
633			newflags |= VM_LOCKED;
634
635		tmp = vma->vm_end;
636		if (tmp > end)
637			tmp = end;
638		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
639		if (error)
640			break;
 
641		nstart = tmp;
642		if (nstart < prev->vm_end)
643			nstart = prev->vm_end;
644		if (nstart >= end)
645			break;
646
647		vma = prev->vm_next;
648		if (!vma || vma->vm_start != nstart) {
649			error = -ENOMEM;
650			break;
651		}
652	}
653	return error;
 
 
 
 
654}
655
656/*
657 * __mm_populate - populate and/or mlock pages within a range of address space.
658 *
659 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
660 * flags. VMAs must be already marked with the desired vm_flags, and
661 * mmap_sem must not be held.
662 */
663int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
664{
665	struct mm_struct *mm = current->mm;
666	unsigned long end, nstart, nend;
667	struct vm_area_struct *vma = NULL;
668	int locked = 0;
669	long ret = 0;
670
671	VM_BUG_ON(start & ~PAGE_MASK);
672	VM_BUG_ON(len != PAGE_ALIGN(len));
673	end = start + len;
 
 
674
675	for (nstart = start; nstart < end; nstart = nend) {
676		/*
677		 * We want to fault in pages for [nstart; end) address range.
678		 * Find first corresponding VMA.
679		 */
680		if (!locked) {
681			locked = 1;
682			down_read(&mm->mmap_sem);
683			vma = find_vma(mm, nstart);
684		} else if (nstart >= vma->vm_end)
685			vma = vma->vm_next;
686		if (!vma || vma->vm_start >= end)
687			break;
688		/*
689		 * Set [nstart; nend) to intersection of desired address
690		 * range with the first VMA. Also, skip undesirable VMA types.
691		 */
692		nend = min(end, vma->vm_end);
693		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
694			continue;
695		if (nstart < vma->vm_start)
696			nstart = vma->vm_start;
697		/*
698		 * Now fault in a range of pages. __mlock_vma_pages_range()
699		 * double checks the vma flags, so that it won't mlock pages
700		 * if the vma was already munlocked.
701		 */
702		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
703		if (ret < 0) {
704			if (ignore_errors) {
705				ret = 0;
706				continue;	/* continue at next VMA */
707			}
708			ret = __mlock_posix_error_return(ret);
709			break;
710		}
711		nend = nstart + ret * PAGE_SIZE;
712		ret = 0;
713	}
714	if (locked)
715		up_read(&mm->mmap_sem);
716	return ret;	/* 0 or negative error code */
717}
718
719SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
 
 
 
 
 
 
 
 
 
 
 
 
720{
721	unsigned long locked;
722	unsigned long lock_limit;
723	int error = -ENOMEM;
724
 
 
725	if (!can_do_mlock())
726		return -EPERM;
727
728	lru_add_drain_all();	/* flush pagevec */
729
730	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
731	start &= PAGE_MASK;
732
733	lock_limit = rlimit(RLIMIT_MEMLOCK);
734	lock_limit >>= PAGE_SHIFT;
735	locked = len >> PAGE_SHIFT;
736
737	down_write(&current->mm->mmap_sem);
 
738
739	locked += current->mm->locked_vm;
 
 
 
 
 
 
 
 
 
 
740
741	/* check against resource limits */
742	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
743		error = do_mlock(start, len, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
745	up_write(&current->mm->mmap_sem);
746	if (!error)
747		error = __mm_populate(start, len, 0);
748	return error;
749}
750
751SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
752{
753	int ret;
754
755	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
 
 
756	start &= PAGE_MASK;
757
758	down_write(&current->mm->mmap_sem);
759	ret = do_mlock(start, len, 0);
760	up_write(&current->mm->mmap_sem);
 
761
762	return ret;
763}
764
765static int do_mlockall(int flags)
766{
767	struct vm_area_struct * vma, * prev = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
768
769	if (flags & MCL_FUTURE)
 
770		current->mm->def_flags |= VM_LOCKED;
771	else
772		current->mm->def_flags &= ~VM_LOCKED;
773	if (flags == MCL_FUTURE)
774		goto out;
775
776	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
777		vm_flags_t newflags;
778
779		newflags = vma->vm_flags & ~VM_LOCKED;
780		if (flags & MCL_CURRENT)
781			newflags |= VM_LOCKED;
782
783		/* Ignore errors */
784		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
 
 
 
785		cond_resched();
786	}
787out:
788	return 0;
789}
790
791SYSCALL_DEFINE1(mlockall, int, flags)
792{
793	unsigned long lock_limit;
794	int ret = -EINVAL;
795
796	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
797		goto out;
 
798
799	ret = -EPERM;
800	if (!can_do_mlock())
801		goto out;
802
803	if (flags & MCL_CURRENT)
804		lru_add_drain_all();	/* flush pagevec */
805
806	lock_limit = rlimit(RLIMIT_MEMLOCK);
807	lock_limit >>= PAGE_SHIFT;
808
 
 
 
809	ret = -ENOMEM;
810	down_write(&current->mm->mmap_sem);
811
812	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
813	    capable(CAP_IPC_LOCK))
814		ret = do_mlockall(flags);
815	up_write(&current->mm->mmap_sem);
816	if (!ret && (flags & MCL_CURRENT))
817		mm_populate(0, TASK_SIZE);
818out:
819	return ret;
820}
821
822SYSCALL_DEFINE0(munlockall)
823{
824	int ret;
825
826	down_write(&current->mm->mmap_sem);
827	ret = do_mlockall(0);
828	up_write(&current->mm->mmap_sem);
 
829	return ret;
830}
831
832/*
833 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
834 * shm segments) get accounted against the user_struct instead.
835 */
836static DEFINE_SPINLOCK(shmlock_user_lock);
837
838int user_shm_lock(size_t size, struct user_struct *user)
839{
840	unsigned long lock_limit, locked;
 
841	int allowed = 0;
842
843	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
844	lock_limit = rlimit(RLIMIT_MEMLOCK);
845	if (lock_limit == RLIM_INFINITY)
846		allowed = 1;
847	lock_limit >>= PAGE_SHIFT;
848	spin_lock(&shmlock_user_lock);
849	if (!allowed &&
850	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
 
 
851		goto out;
852	get_uid(user);
853	user->locked_shm += locked;
 
 
 
 
854	allowed = 1;
855out:
856	spin_unlock(&shmlock_user_lock);
857	return allowed;
858}
859
860void user_shm_unlock(size_t size, struct user_struct *user)
861{
862	spin_lock(&shmlock_user_lock);
863	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
864	spin_unlock(&shmlock_user_lock);
865	free_uid(user);
866}