Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *	linux/mm/mlock.c
  4 *
  5 *  (C) Copyright 1995 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 */
  8
  9#include <linux/capability.h>
 10#include <linux/mman.h>
 11#include <linux/mm.h>
 12#include <linux/sched/user.h>
 13#include <linux/swap.h>
 14#include <linux/swapops.h>
 15#include <linux/pagemap.h>
 16#include <linux/pagevec.h>
 17#include <linux/mempolicy.h>
 18#include <linux/syscalls.h>
 19#include <linux/sched.h>
 20#include <linux/export.h>
 21#include <linux/rmap.h>
 22#include <linux/mmzone.h>
 23#include <linux/hugetlb.h>
 24#include <linux/memcontrol.h>
 25#include <linux/mm_inline.h>
 26#include <linux/secretmem.h>
 27
 28#include "internal.h"
 29
 30bool can_do_mlock(void)
 31{
 32	if (rlimit(RLIMIT_MEMLOCK) != 0)
 33		return true;
 34	if (capable(CAP_IPC_LOCK))
 35		return true;
 36	return false;
 
 
 37}
 38EXPORT_SYMBOL(can_do_mlock);
 39
 40/*
 41 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 42 * in vmscan and, possibly, the fault path; and to support semi-accurate
 43 * statistics.
 44 *
 45 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 46 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 47 * The unevictable list is an LRU sibling list to the [in]active lists.
 48 * PageUnevictable is set to indicate the unevictable state.
 49 *
 50 * When lazy mlocking via vmscan, it is important to ensure that the
 51 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 52 * may have mlocked a page that is being munlocked. So lazy mlock must take
 53 * the mmap_lock for read, and verify that the vma really is locked
 54 * (see mm/rmap.c).
 55 */
 56
 57/*
 58 *  LRU accounting for clear_page_mlock()
 59 */
 60void clear_page_mlock(struct page *page)
 61{
 62	int nr_pages;
 63
 64	if (!TestClearPageMlocked(page))
 65		return;
 66
 67	nr_pages = thp_nr_pages(page);
 68	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 69	count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
 70	/*
 71	 * The previous TestClearPageMlocked() corresponds to the smp_mb()
 72	 * in __pagevec_lru_add_fn().
 73	 *
 74	 * See __pagevec_lru_add_fn for more explanation.
 75	 */
 76	if (!isolate_lru_page(page)) {
 77		putback_lru_page(page);
 78	} else {
 79		/*
 80		 * We lost the race. the page already moved to evictable list.
 81		 */
 82		if (PageUnevictable(page))
 83			count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
 84	}
 85}
 86
 87/*
 88 * Mark page as mlocked if not already.
 89 * If page on LRU, isolate and putback to move to unevictable list.
 90 */
 91void mlock_vma_page(struct page *page)
 92{
 93	/* Serialize with page migration */
 94	BUG_ON(!PageLocked(page));
 95
 96	VM_BUG_ON_PAGE(PageTail(page), page);
 97	VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
 98
 99	if (!TestSetPageMlocked(page)) {
100		int nr_pages = thp_nr_pages(page);
101
102		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
103		count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
104		if (!isolate_lru_page(page))
105			putback_lru_page(page);
106	}
107}
108
109/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110 * Finish munlock after successful page isolation
111 *
112 * Page must be locked. This is a wrapper for page_mlock()
113 * and putback_lru_page() with munlock accounting.
114 */
115static void __munlock_isolated_page(struct page *page)
116{
 
 
117	/*
118	 * Optimization: if the page was mapped just once, that's our mapping
119	 * and we don't need to check all the other vmas.
120	 */
121	if (page_mapcount(page) > 1)
122		page_mlock(page);
123
124	/* Did try_to_unlock() succeed or punt? */
125	if (!PageMlocked(page))
126		count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
127
128	putback_lru_page(page);
129}
130
131/*
132 * Accounting for page isolation fail during munlock
133 *
134 * Performs accounting when page isolation fails in munlock. There is nothing
135 * else to do because it means some other task has already removed the page
136 * from the LRU. putback_lru_page() will take care of removing the page from
137 * the unevictable list, if necessary. vmscan [page_referenced()] will move
138 * the page back to the unevictable list if some other vma has it mlocked.
139 */
140static void __munlock_isolation_failed(struct page *page)
141{
142	int nr_pages = thp_nr_pages(page);
143
144	if (PageUnevictable(page))
145		__count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
146	else
147		__count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
148}
149
150/**
151 * munlock_vma_page - munlock a vma page
152 * @page: page to be unlocked, either a normal page or THP page head
153 *
154 * returns the size of the page as a page mask (0 for normal page,
155 *         HPAGE_PMD_NR - 1 for THP head page)
156 *
157 * called from munlock()/munmap() path with page supposedly on the LRU.
158 * When we munlock a page, because the vma where we found the page is being
159 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
160 * page locked so that we can leave it on the unevictable lru list and not
161 * bother vmscan with it.  However, to walk the page's rmap list in
162 * page_mlock() we must isolate the page from the LRU.  If some other
163 * task has removed the page from the LRU, we won't be able to do that.
164 * So we clear the PageMlocked as we might not get another chance.  If we
165 * can't isolate the page, we leave it for putback_lru_page() and vmscan
166 * [page_referenced()/try_to_unmap()] to deal with.
167 */
168unsigned int munlock_vma_page(struct page *page)
169{
170	int nr_pages;
 
171
172	/* For page_mlock() and to serialize with page migration */
173	BUG_ON(!PageLocked(page));
174	VM_BUG_ON_PAGE(PageTail(page), page);
175
176	if (!TestClearPageMlocked(page)) {
177		/* Potentially, PTE-mapped THP: do not skip the rest PTEs */
178		return 0;
179	}
 
 
 
 
 
 
180
181	nr_pages = thp_nr_pages(page);
182	mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
183
184	if (!isolate_lru_page(page))
 
185		__munlock_isolated_page(page);
186	else
187		__munlock_isolation_failed(page);
 
188
 
 
 
 
189	return nr_pages - 1;
190}
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192/*
193 * convert get_user_pages() return value to posix mlock() error
194 */
195static int __mlock_posix_error_return(long retval)
196{
197	if (retval == -EFAULT)
198		retval = -ENOMEM;
199	else if (retval == -ENOMEM)
200		retval = -EAGAIN;
201	return retval;
202}
203
204/*
205 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
206 *
207 * The fast path is available only for evictable pages with single mapping.
208 * Then we can bypass the per-cpu pvec and get better performance.
209 * when mapcount > 1 we need page_mlock() which can fail.
210 * when !page_evictable(), we need the full redo logic of putback_lru_page to
211 * avoid leaving evictable page in unevictable list.
212 *
213 * In case of success, @page is added to @pvec and @pgrescued is incremented
214 * in case that the page was previously unevictable. @page is also unlocked.
215 */
216static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
217		int *pgrescued)
218{
219	VM_BUG_ON_PAGE(PageLRU(page), page);
220	VM_BUG_ON_PAGE(!PageLocked(page), page);
221
222	if (page_mapcount(page) <= 1 && page_evictable(page)) {
223		pagevec_add(pvec, page);
224		if (TestClearPageUnevictable(page))
225			(*pgrescued)++;
226		unlock_page(page);
227		return true;
228	}
229
230	return false;
231}
232
233/*
234 * Putback multiple evictable pages to the LRU
235 *
236 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
237 * the pages might have meanwhile become unevictable but that is OK.
238 */
239static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
240{
241	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
242	/*
243	 *__pagevec_lru_add() calls release_pages() so we don't call
244	 * put_page() explicitly
245	 */
246	__pagevec_lru_add(pvec);
247	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
248}
249
250/*
251 * Munlock a batch of pages from the same zone
252 *
253 * The work is split to two main phases. First phase clears the Mlocked flag
254 * and attempts to isolate the pages, all under a single zone lru lock.
255 * The second phase finishes the munlock only for pages where isolation
256 * succeeded.
257 *
258 * Note that the pagevec may be modified during the process.
259 */
260static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
261{
262	int i;
263	int nr = pagevec_count(pvec);
264	int delta_munlocked = -nr;
265	struct pagevec pvec_putback;
266	struct lruvec *lruvec = NULL;
267	int pgrescued = 0;
268
269	pagevec_init(&pvec_putback);
270
271	/* Phase 1: page isolation */
 
272	for (i = 0; i < nr; i++) {
273		struct page *page = pvec->pages[i];
274
275		if (TestClearPageMlocked(page)) {
276			/*
277			 * We already have pin from follow_page_mask()
278			 * so we can spare the get_page() here.
279			 */
280			if (TestClearPageLRU(page)) {
281				lruvec = relock_page_lruvec_irq(page, lruvec);
282				del_page_from_lru_list(page, lruvec);
283				continue;
284			} else
285				__munlock_isolation_failed(page);
286		} else {
287			delta_munlocked++;
288		}
289
290		/*
291		 * We won't be munlocking this page in the next phase
292		 * but we still need to release the follow_page_mask()
293		 * pin. We cannot do it under lru_lock however. If it's
294		 * the last pin, __page_cache_release() would deadlock.
295		 */
296		pagevec_add(&pvec_putback, pvec->pages[i]);
297		pvec->pages[i] = NULL;
298	}
299	if (lruvec) {
300		__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
301		unlock_page_lruvec_irq(lruvec);
302	} else if (delta_munlocked) {
303		mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
304	}
305
306	/* Now we can release pins of pages that we are not munlocking */
307	pagevec_release(&pvec_putback);
308
309	/* Phase 2: page munlock */
310	for (i = 0; i < nr; i++) {
311		struct page *page = pvec->pages[i];
312
313		if (page) {
314			lock_page(page);
315			if (!__putback_lru_fast_prepare(page, &pvec_putback,
316					&pgrescued)) {
317				/*
318				 * Slow path. We don't want to lose the last
319				 * pin before unlock_page()
320				 */
321				get_page(page); /* for putback_lru_page() */
322				__munlock_isolated_page(page);
323				unlock_page(page);
324				put_page(page); /* from follow_page_mask() */
325			}
326		}
327	}
328
329	/*
330	 * Phase 3: page putback for pages that qualified for the fast path
331	 * This will also call put_page() to return pin from follow_page_mask()
332	 */
333	if (pagevec_count(&pvec_putback))
334		__putback_lru_fast(&pvec_putback, pgrescued);
335}
336
337/*
338 * Fill up pagevec for __munlock_pagevec using pte walk
339 *
340 * The function expects that the struct page corresponding to @start address is
341 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
342 *
343 * The rest of @pvec is filled by subsequent pages within the same pmd and same
344 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
345 * pages also get pinned.
346 *
347 * Returns the address of the next page that should be scanned. This equals
348 * @start + PAGE_SIZE when no page could be added by the pte walk.
349 */
350static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
351			struct vm_area_struct *vma, struct zone *zone,
352			unsigned long start, unsigned long end)
353{
354	pte_t *pte;
355	spinlock_t *ptl;
356
357	/*
358	 * Initialize pte walk starting at the already pinned page where we
359	 * are sure that there is a pte, as it was pinned under the same
360	 * mmap_lock write op.
361	 */
362	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
363	/* Make sure we do not cross the page table boundary */
364	end = pgd_addr_end(start, end);
365	end = p4d_addr_end(start, end);
366	end = pud_addr_end(start, end);
367	end = pmd_addr_end(start, end);
368
369	/* The page next to the pinned page is the first we will try to get */
370	start += PAGE_SIZE;
371	while (start < end) {
372		struct page *page = NULL;
373		pte++;
374		if (pte_present(*pte))
375			page = vm_normal_page(vma, start, *pte);
376		/*
377		 * Break if page could not be obtained or the page's node+zone does not
378		 * match
379		 */
380		if (!page || page_zone(page) != zone)
381			break;
382
383		/*
384		 * Do not use pagevec for PTE-mapped THP,
385		 * munlock_vma_pages_range() will handle them.
386		 */
387		if (PageTransCompound(page))
388			break;
389
390		get_page(page);
391		/*
392		 * Increase the address that will be returned *before* the
393		 * eventual break due to pvec becoming full by adding the page
394		 */
395		start += PAGE_SIZE;
396		if (pagevec_add(pvec, page) == 0)
397			break;
398	}
399	pte_unmap_unlock(pte, ptl);
400	return start;
401}
402
403/*
404 * munlock_vma_pages_range() - munlock all pages in the vma range.'
405 * @vma - vma containing range to be munlock()ed.
406 * @start - start address in @vma of the range
407 * @end - end of range in @vma.
408 *
409 *  For mremap(), munmap() and exit().
410 *
411 * Called with @vma VM_LOCKED.
412 *
413 * Returns with VM_LOCKED cleared.  Callers must be prepared to
414 * deal with this.
415 *
416 * We don't save and restore VM_LOCKED here because pages are
417 * still on lru.  In unmap path, pages might be scanned by reclaim
418 * and re-mlocked by page_mlock/try_to_unmap before we unmap and
419 * free them.  This will result in freeing mlocked pages.
420 */
421void munlock_vma_pages_range(struct vm_area_struct *vma,
422			     unsigned long start, unsigned long end)
423{
424	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
425
426	while (start < end) {
427		struct page *page;
428		unsigned int page_mask = 0;
429		unsigned long page_increm;
430		struct pagevec pvec;
431		struct zone *zone;
 
432
433		pagevec_init(&pvec);
434		/*
435		 * Although FOLL_DUMP is intended for get_dump_page(),
436		 * it just so happens that its special treatment of the
437		 * ZERO_PAGE (returning an error instead of doing get_page)
438		 * suits munlock very well (and if somehow an abnormal page
439		 * has sneaked into the range, we won't oops here: great).
440		 */
441		page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
 
442
443		if (page && !IS_ERR(page)) {
444			if (PageTransTail(page)) {
445				VM_BUG_ON_PAGE(PageMlocked(page), page);
446				put_page(page); /* follow_page_mask() */
447			} else if (PageTransHuge(page)) {
448				lock_page(page);
449				/*
450				 * Any THP page found by follow_page_mask() may
451				 * have gotten split before reaching
452				 * munlock_vma_page(), so we need to compute
453				 * the page_mask here instead.
454				 */
455				page_mask = munlock_vma_page(page);
456				unlock_page(page);
457				put_page(page); /* follow_page_mask() */
458			} else {
459				/*
460				 * Non-huge pages are handled in batches via
461				 * pagevec. The pin from follow_page_mask()
462				 * prevents them from collapsing by THP.
463				 */
464				pagevec_add(&pvec, page);
465				zone = page_zone(page);
 
466
467				/*
468				 * Try to fill the rest of pagevec using fast
469				 * pte walk. This will also update start to
470				 * the next page to process. Then munlock the
471				 * pagevec.
472				 */
473				start = __munlock_pagevec_fill(&pvec, vma,
474						zone, start, end);
475				__munlock_pagevec(&pvec, zone);
476				goto next;
477			}
478		}
 
 
479		page_increm = 1 + page_mask;
480		start += page_increm * PAGE_SIZE;
481next:
482		cond_resched();
483	}
484}
485
486/*
487 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
488 *
489 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
490 * munlock is a no-op.  However, for some special vmas, we go ahead and
491 * populate the ptes.
492 *
493 * For vmas that pass the filters, merge/split as appropriate.
494 */
495static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
496	unsigned long start, unsigned long end, vm_flags_t newflags)
497{
498	struct mm_struct *mm = vma->vm_mm;
499	pgoff_t pgoff;
500	int nr_pages;
501	int ret = 0;
502	int lock = !!(newflags & VM_LOCKED);
503	vm_flags_t old_flags = vma->vm_flags;
504
505	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
506	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) ||
507	    vma_is_dax(vma) || vma_is_secretmem(vma))
508		/* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */
509		goto out;
510
511	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
512	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
513			  vma->vm_file, pgoff, vma_policy(vma),
514			  vma->vm_userfaultfd_ctx);
515	if (*prev) {
516		vma = *prev;
517		goto success;
518	}
519
520	if (start != vma->vm_start) {
521		ret = split_vma(mm, vma, start, 1);
522		if (ret)
523			goto out;
524	}
525
526	if (end != vma->vm_end) {
527		ret = split_vma(mm, vma, end, 0);
528		if (ret)
529			goto out;
530	}
531
532success:
533	/*
534	 * Keep track of amount of locked VM.
535	 */
536	nr_pages = (end - start) >> PAGE_SHIFT;
537	if (!lock)
538		nr_pages = -nr_pages;
539	else if (old_flags & VM_LOCKED)
540		nr_pages = 0;
541	mm->locked_vm += nr_pages;
542
543	/*
544	 * vm_flags is protected by the mmap_lock held in write mode.
545	 * It's okay if try_to_unmap_one unmaps a page just after we
546	 * set VM_LOCKED, populate_vma_page_range will bring it back.
547	 */
548
549	if (lock)
550		vma->vm_flags = newflags;
551	else
552		munlock_vma_pages_range(vma, start, end);
553
554out:
555	*prev = vma;
556	return ret;
557}
558
559static int apply_vma_lock_flags(unsigned long start, size_t len,
560				vm_flags_t flags)
561{
562	unsigned long nstart, end, tmp;
563	struct vm_area_struct *vma, *prev;
564	int error;
565
566	VM_BUG_ON(offset_in_page(start));
567	VM_BUG_ON(len != PAGE_ALIGN(len));
568	end = start + len;
569	if (end < start)
570		return -EINVAL;
571	if (end == start)
572		return 0;
573	vma = find_vma(current->mm, start);
574	if (!vma || vma->vm_start > start)
575		return -ENOMEM;
576
577	prev = vma->vm_prev;
578	if (start > vma->vm_start)
579		prev = vma;
580
581	for (nstart = start ; ; ) {
582		vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
583
584		newflags |= flags;
585
586		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
 
 
 
 
587		tmp = vma->vm_end;
588		if (tmp > end)
589			tmp = end;
590		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
591		if (error)
592			break;
593		nstart = tmp;
594		if (nstart < prev->vm_end)
595			nstart = prev->vm_end;
596		if (nstart >= end)
597			break;
598
599		vma = prev->vm_next;
600		if (!vma || vma->vm_start != nstart) {
601			error = -ENOMEM;
602			break;
603		}
604	}
605	return error;
606}
607
608/*
609 * Go through vma areas and sum size of mlocked
610 * vma pages, as return value.
611 * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
612 * is also counted.
613 * Return value: previously mlocked page counts
614 */
615static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
616		unsigned long start, size_t len)
617{
618	struct vm_area_struct *vma;
619	unsigned long count = 0;
620
621	if (mm == NULL)
622		mm = current->mm;
623
624	vma = find_vma(mm, start);
625	if (vma == NULL)
626		return 0;
627
628	for (; vma ; vma = vma->vm_next) {
629		if (start >= vma->vm_end)
630			continue;
631		if (start + len <=  vma->vm_start)
 
 
 
 
 
 
 
 
632			break;
633		if (vma->vm_flags & VM_LOCKED) {
634			if (start > vma->vm_start)
635				count -= (start - vma->vm_start);
636			if (start + len < vma->vm_end) {
637				count += start + len - vma->vm_start;
638				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
639			}
640			count += vma->vm_end - vma->vm_start;
 
641		}
 
 
642	}
643
644	return count >> PAGE_SHIFT;
 
645}
646
647static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
648{
649	unsigned long locked;
650	unsigned long lock_limit;
651	int error = -ENOMEM;
652
653	start = untagged_addr(start);
654
655	if (!can_do_mlock())
656		return -EPERM;
657
658	len = PAGE_ALIGN(len + (offset_in_page(start)));
 
 
659	start &= PAGE_MASK;
660
661	lock_limit = rlimit(RLIMIT_MEMLOCK);
662	lock_limit >>= PAGE_SHIFT;
663	locked = len >> PAGE_SHIFT;
664
665	if (mmap_write_lock_killable(current->mm))
666		return -EINTR;
667
668	locked += current->mm->locked_vm;
669	if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
670		/*
671		 * It is possible that the regions requested intersect with
672		 * previously mlocked areas, that part area in "mm->locked_vm"
673		 * should not be counted to new mlock increment count. So check
674		 * and adjust locked count if necessary.
675		 */
676		locked -= count_mm_mlocked_page_nr(current->mm,
677				start, len);
678	}
679
680	/* check against resource limits */
681	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
682		error = apply_vma_lock_flags(start, len, flags);
683
684	mmap_write_unlock(current->mm);
685	if (error)
686		return error;
687
688	error = __mm_populate(start, len, 0);
689	if (error)
690		return __mlock_posix_error_return(error);
691	return 0;
692}
693
694SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
695{
696	return do_mlock(start, len, VM_LOCKED);
697}
698
699SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags)
700{
701	vm_flags_t vm_flags = VM_LOCKED;
702
703	if (flags & ~MLOCK_ONFAULT)
704		return -EINVAL;
705
706	if (flags & MLOCK_ONFAULT)
707		vm_flags |= VM_LOCKONFAULT;
708
709	return do_mlock(start, len, vm_flags);
710}
711
712SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
713{
714	int ret;
715
716	start = untagged_addr(start);
717
718	len = PAGE_ALIGN(len + (offset_in_page(start)));
719	start &= PAGE_MASK;
720
721	if (mmap_write_lock_killable(current->mm))
722		return -EINTR;
723	ret = apply_vma_lock_flags(start, len, 0);
724	mmap_write_unlock(current->mm);
725
726	return ret;
727}
728
729/*
730 * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall)
731 * and translate into the appropriate modifications to mm->def_flags and/or the
732 * flags for all current VMAs.
733 *
734 * There are a couple of subtleties with this.  If mlockall() is called multiple
735 * times with different flags, the values do not necessarily stack.  If mlockall
736 * is called once including the MCL_FUTURE flag and then a second time without
737 * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags.
738 */
739static int apply_mlockall_flags(int flags)
740{
741	struct vm_area_struct *vma, *prev = NULL;
742	vm_flags_t to_add = 0;
743
744	current->mm->def_flags &= VM_LOCKED_CLEAR_MASK;
745	if (flags & MCL_FUTURE) {
746		current->mm->def_flags |= VM_LOCKED;
747
748		if (flags & MCL_ONFAULT)
749			current->mm->def_flags |= VM_LOCKONFAULT;
750
751		if (!(flags & MCL_CURRENT))
752			goto out;
753	}
754
755	if (flags & MCL_CURRENT) {
756		to_add |= VM_LOCKED;
757		if (flags & MCL_ONFAULT)
758			to_add |= VM_LOCKONFAULT;
759	}
760
761	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
762		vm_flags_t newflags;
763
764		newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
765		newflags |= to_add;
 
766
767		/* Ignore errors */
768		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
769		cond_resched();
770	}
771out:
772	return 0;
773}
774
775SYSCALL_DEFINE1(mlockall, int, flags)
776{
777	unsigned long lock_limit;
778	int ret;
779
780	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) ||
781	    flags == MCL_ONFAULT)
782		return -EINVAL;
783
 
784	if (!can_do_mlock())
785		return -EPERM;
 
 
 
786
787	lock_limit = rlimit(RLIMIT_MEMLOCK);
788	lock_limit >>= PAGE_SHIFT;
789
790	if (mmap_write_lock_killable(current->mm))
791		return -EINTR;
792
793	ret = -ENOMEM;
 
 
794	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
795	    capable(CAP_IPC_LOCK))
796		ret = apply_mlockall_flags(flags);
797	mmap_write_unlock(current->mm);
798	if (!ret && (flags & MCL_CURRENT))
799		mm_populate(0, TASK_SIZE);
800
801	return ret;
802}
803
804SYSCALL_DEFINE0(munlockall)
805{
806	int ret;
807
808	if (mmap_write_lock_killable(current->mm))
809		return -EINTR;
810	ret = apply_mlockall_flags(0);
811	mmap_write_unlock(current->mm);
812	return ret;
813}
814
815/*
816 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
817 * shm segments) get accounted against the user_struct instead.
818 */
819static DEFINE_SPINLOCK(shmlock_user_lock);
820
821int user_shm_lock(size_t size, struct ucounts *ucounts)
822{
823	unsigned long lock_limit, locked;
824	long memlock;
825	int allowed = 0;
826
827	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
828	lock_limit = rlimit(RLIMIT_MEMLOCK);
829	if (lock_limit == RLIM_INFINITY)
830		allowed = 1;
831	lock_limit >>= PAGE_SHIFT;
832	spin_lock(&shmlock_user_lock);
833	memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
834
835	if (!allowed && (memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) {
836		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
837		goto out;
838	}
839	if (!get_ucounts(ucounts)) {
840		dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked);
841		goto out;
842	}
843	allowed = 1;
844out:
845	spin_unlock(&shmlock_user_lock);
846	return allowed;
847}
848
849void user_shm_unlock(size_t size, struct ucounts *ucounts)
850{
851	spin_lock(&shmlock_user_lock);
852	dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
853	spin_unlock(&shmlock_user_lock);
854	put_ucounts(ucounts);
855}
v3.15
 
  1/*
  2 *	linux/mm/mlock.c
  3 *
  4 *  (C) Copyright 1995 Linus Torvalds
  5 *  (C) Copyright 2002 Christoph Hellwig
  6 */
  7
  8#include <linux/capability.h>
  9#include <linux/mman.h>
 10#include <linux/mm.h>
 
 11#include <linux/swap.h>
 12#include <linux/swapops.h>
 13#include <linux/pagemap.h>
 14#include <linux/pagevec.h>
 15#include <linux/mempolicy.h>
 16#include <linux/syscalls.h>
 17#include <linux/sched.h>
 18#include <linux/export.h>
 19#include <linux/rmap.h>
 20#include <linux/mmzone.h>
 21#include <linux/hugetlb.h>
 22#include <linux/memcontrol.h>
 23#include <linux/mm_inline.h>
 
 24
 25#include "internal.h"
 26
 27int can_do_mlock(void)
 28{
 
 
 29	if (capable(CAP_IPC_LOCK))
 30		return 1;
 31	if (rlimit(RLIMIT_MEMLOCK) != 0)
 32		return 1;
 33	return 0;
 34}
 35EXPORT_SYMBOL(can_do_mlock);
 36
 37/*
 38 * Mlocked pages are marked with PageMlocked() flag for efficient testing
 39 * in vmscan and, possibly, the fault path; and to support semi-accurate
 40 * statistics.
 41 *
 42 * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
 43 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
 44 * The unevictable list is an LRU sibling list to the [in]active lists.
 45 * PageUnevictable is set to indicate the unevictable state.
 46 *
 47 * When lazy mlocking via vmscan, it is important to ensure that the
 48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
 49 * may have mlocked a page that is being munlocked. So lazy mlock must take
 50 * the mmap_sem for read, and verify that the vma really is locked
 51 * (see mm/rmap.c).
 52 */
 53
 54/*
 55 *  LRU accounting for clear_page_mlock()
 56 */
 57void clear_page_mlock(struct page *page)
 58{
 
 
 59	if (!TestClearPageMlocked(page))
 60		return;
 61
 62	mod_zone_page_state(page_zone(page), NR_MLOCK,
 63			    -hpage_nr_pages(page));
 64	count_vm_event(UNEVICTABLE_PGCLEARED);
 
 
 
 
 
 
 65	if (!isolate_lru_page(page)) {
 66		putback_lru_page(page);
 67	} else {
 68		/*
 69		 * We lost the race. the page already moved to evictable list.
 70		 */
 71		if (PageUnevictable(page))
 72			count_vm_event(UNEVICTABLE_PGSTRANDED);
 73	}
 74}
 75
 76/*
 77 * Mark page as mlocked if not already.
 78 * If page on LRU, isolate and putback to move to unevictable list.
 79 */
 80void mlock_vma_page(struct page *page)
 81{
 82	/* Serialize with page migration */
 83	BUG_ON(!PageLocked(page));
 84
 
 
 
 85	if (!TestSetPageMlocked(page)) {
 86		mod_zone_page_state(page_zone(page), NR_MLOCK,
 87				    hpage_nr_pages(page));
 88		count_vm_event(UNEVICTABLE_PGMLOCKED);
 
 89		if (!isolate_lru_page(page))
 90			putback_lru_page(page);
 91	}
 92}
 93
 94/*
 95 * Isolate a page from LRU with optional get_page() pin.
 96 * Assumes lru_lock already held and page already pinned.
 97 */
 98static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
 99{
100	if (PageLRU(page)) {
101		struct lruvec *lruvec;
102
103		lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
104		if (getpage)
105			get_page(page);
106		ClearPageLRU(page);
107		del_page_from_lru_list(page, lruvec, page_lru(page));
108		return true;
109	}
110
111	return false;
112}
113
114/*
115 * Finish munlock after successful page isolation
116 *
117 * Page must be locked. This is a wrapper for try_to_munlock()
118 * and putback_lru_page() with munlock accounting.
119 */
120static void __munlock_isolated_page(struct page *page)
121{
122	int ret = SWAP_AGAIN;
123
124	/*
125	 * Optimization: if the page was mapped just once, that's our mapping
126	 * and we don't need to check all the other vmas.
127	 */
128	if (page_mapcount(page) > 1)
129		ret = try_to_munlock(page);
130
131	/* Did try_to_unlock() succeed or punt? */
132	if (ret != SWAP_MLOCK)
133		count_vm_event(UNEVICTABLE_PGMUNLOCKED);
134
135	putback_lru_page(page);
136}
137
138/*
139 * Accounting for page isolation fail during munlock
140 *
141 * Performs accounting when page isolation fails in munlock. There is nothing
142 * else to do because it means some other task has already removed the page
143 * from the LRU. putback_lru_page() will take care of removing the page from
144 * the unevictable list, if necessary. vmscan [page_referenced()] will move
145 * the page back to the unevictable list if some other vma has it mlocked.
146 */
147static void __munlock_isolation_failed(struct page *page)
148{
 
 
149	if (PageUnevictable(page))
150		__count_vm_event(UNEVICTABLE_PGSTRANDED);
151	else
152		__count_vm_event(UNEVICTABLE_PGMUNLOCKED);
153}
154
155/**
156 * munlock_vma_page - munlock a vma page
157 * @page - page to be unlocked, either a normal page or THP page head
158 *
159 * returns the size of the page as a page mask (0 for normal page,
160 *         HPAGE_PMD_NR - 1 for THP head page)
161 *
162 * called from munlock()/munmap() path with page supposedly on the LRU.
163 * When we munlock a page, because the vma where we found the page is being
164 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
165 * page locked so that we can leave it on the unevictable lru list and not
166 * bother vmscan with it.  However, to walk the page's rmap list in
167 * try_to_munlock() we must isolate the page from the LRU.  If some other
168 * task has removed the page from the LRU, we won't be able to do that.
169 * So we clear the PageMlocked as we might not get another chance.  If we
170 * can't isolate the page, we leave it for putback_lru_page() and vmscan
171 * [page_referenced()/try_to_unmap()] to deal with.
172 */
173unsigned int munlock_vma_page(struct page *page)
174{
175	unsigned int nr_pages;
176	struct zone *zone = page_zone(page);
177
178	/* For try_to_munlock() and to serialize with page migration */
179	BUG_ON(!PageLocked(page));
 
180
181	/*
182	 * Serialize with any parallel __split_huge_page_refcount() which
183	 * might otherwise copy PageMlocked to part of the tail pages before
184	 * we clear it in the head page. It also stabilizes hpage_nr_pages().
185	 */
186	spin_lock_irq(&zone->lru_lock);
187
188	nr_pages = hpage_nr_pages(page);
189	if (!TestClearPageMlocked(page))
190		goto unlock_out;
191
192	__mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
 
193
194	if (__munlock_isolate_lru_page(page, true)) {
195		spin_unlock_irq(&zone->lru_lock);
196		__munlock_isolated_page(page);
197		goto out;
198	}
199	__munlock_isolation_failed(page);
200
201unlock_out:
202	spin_unlock_irq(&zone->lru_lock);
203
204out:
205	return nr_pages - 1;
206}
207
208/**
209 * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
210 * @vma:   target vma
211 * @start: start address
212 * @end:   end address
213 *
214 * This takes care of making the pages present too.
215 *
216 * return 0 on success, negative error code on error.
217 *
218 * vma->vm_mm->mmap_sem must be held for at least read.
219 */
220long __mlock_vma_pages_range(struct vm_area_struct *vma,
221		unsigned long start, unsigned long end, int *nonblocking)
222{
223	struct mm_struct *mm = vma->vm_mm;
224	unsigned long nr_pages = (end - start) / PAGE_SIZE;
225	int gup_flags;
226
227	VM_BUG_ON(start & ~PAGE_MASK);
228	VM_BUG_ON(end   & ~PAGE_MASK);
229	VM_BUG_ON(start < vma->vm_start);
230	VM_BUG_ON(end   > vma->vm_end);
231	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
232
233	gup_flags = FOLL_TOUCH | FOLL_MLOCK;
234	/*
235	 * We want to touch writable mappings with a write fault in order
236	 * to break COW, except for shared mappings because these don't COW
237	 * and we would not want to dirty them for nothing.
238	 */
239	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
240		gup_flags |= FOLL_WRITE;
241
242	/*
243	 * We want mlock to succeed for regions that have any permissions
244	 * other than PROT_NONE.
245	 */
246	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
247		gup_flags |= FOLL_FORCE;
248
249	/*
250	 * We made sure addr is within a VMA, so the following will
251	 * not result in a stack expansion that recurses back here.
252	 */
253	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
254				NULL, NULL, nonblocking);
255}
256
257/*
258 * convert get_user_pages() return value to posix mlock() error
259 */
260static int __mlock_posix_error_return(long retval)
261{
262	if (retval == -EFAULT)
263		retval = -ENOMEM;
264	else if (retval == -ENOMEM)
265		retval = -EAGAIN;
266	return retval;
267}
268
269/*
270 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
271 *
272 * The fast path is available only for evictable pages with single mapping.
273 * Then we can bypass the per-cpu pvec and get better performance.
274 * when mapcount > 1 we need try_to_munlock() which can fail.
275 * when !page_evictable(), we need the full redo logic of putback_lru_page to
276 * avoid leaving evictable page in unevictable list.
277 *
278 * In case of success, @page is added to @pvec and @pgrescued is incremented
279 * in case that the page was previously unevictable. @page is also unlocked.
280 */
281static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
282		int *pgrescued)
283{
284	VM_BUG_ON_PAGE(PageLRU(page), page);
285	VM_BUG_ON_PAGE(!PageLocked(page), page);
286
287	if (page_mapcount(page) <= 1 && page_evictable(page)) {
288		pagevec_add(pvec, page);
289		if (TestClearPageUnevictable(page))
290			(*pgrescued)++;
291		unlock_page(page);
292		return true;
293	}
294
295	return false;
296}
297
298/*
299 * Putback multiple evictable pages to the LRU
300 *
301 * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
302 * the pages might have meanwhile become unevictable but that is OK.
303 */
304static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
305{
306	count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
307	/*
308	 *__pagevec_lru_add() calls release_pages() so we don't call
309	 * put_page() explicitly
310	 */
311	__pagevec_lru_add(pvec);
312	count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
313}
314
315/*
316 * Munlock a batch of pages from the same zone
317 *
318 * The work is split to two main phases. First phase clears the Mlocked flag
319 * and attempts to isolate the pages, all under a single zone lru lock.
320 * The second phase finishes the munlock only for pages where isolation
321 * succeeded.
322 *
323 * Note that the pagevec may be modified during the process.
324 */
325static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
326{
327	int i;
328	int nr = pagevec_count(pvec);
329	int delta_munlocked;
330	struct pagevec pvec_putback;
 
331	int pgrescued = 0;
332
333	pagevec_init(&pvec_putback, 0);
334
335	/* Phase 1: page isolation */
336	spin_lock_irq(&zone->lru_lock);
337	for (i = 0; i < nr; i++) {
338		struct page *page = pvec->pages[i];
339
340		if (TestClearPageMlocked(page)) {
341			/*
342			 * We already have pin from follow_page_mask()
343			 * so we can spare the get_page() here.
344			 */
345			if (__munlock_isolate_lru_page(page, false))
 
 
346				continue;
347			else
348				__munlock_isolation_failed(page);
 
 
349		}
350
351		/*
352		 * We won't be munlocking this page in the next phase
353		 * but we still need to release the follow_page_mask()
354		 * pin. We cannot do it under lru_lock however. If it's
355		 * the last pin, __page_cache_release() would deadlock.
356		 */
357		pagevec_add(&pvec_putback, pvec->pages[i]);
358		pvec->pages[i] = NULL;
359	}
360	delta_munlocked = -nr + pagevec_count(&pvec_putback);
361	__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
362	spin_unlock_irq(&zone->lru_lock);
 
 
 
363
364	/* Now we can release pins of pages that we are not munlocking */
365	pagevec_release(&pvec_putback);
366
367	/* Phase 2: page munlock */
368	for (i = 0; i < nr; i++) {
369		struct page *page = pvec->pages[i];
370
371		if (page) {
372			lock_page(page);
373			if (!__putback_lru_fast_prepare(page, &pvec_putback,
374					&pgrescued)) {
375				/*
376				 * Slow path. We don't want to lose the last
377				 * pin before unlock_page()
378				 */
379				get_page(page); /* for putback_lru_page() */
380				__munlock_isolated_page(page);
381				unlock_page(page);
382				put_page(page); /* from follow_page_mask() */
383			}
384		}
385	}
386
387	/*
388	 * Phase 3: page putback for pages that qualified for the fast path
389	 * This will also call put_page() to return pin from follow_page_mask()
390	 */
391	if (pagevec_count(&pvec_putback))
392		__putback_lru_fast(&pvec_putback, pgrescued);
393}
394
395/*
396 * Fill up pagevec for __munlock_pagevec using pte walk
397 *
398 * The function expects that the struct page corresponding to @start address is
399 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
400 *
401 * The rest of @pvec is filled by subsequent pages within the same pmd and same
402 * zone, as long as the pte's are present and vm_normal_page() succeeds. These
403 * pages also get pinned.
404 *
405 * Returns the address of the next page that should be scanned. This equals
406 * @start + PAGE_SIZE when no page could be added by the pte walk.
407 */
408static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
409		struct vm_area_struct *vma, int zoneid,	unsigned long start,
410		unsigned long end)
411{
412	pte_t *pte;
413	spinlock_t *ptl;
414
415	/*
416	 * Initialize pte walk starting at the already pinned page where we
417	 * are sure that there is a pte, as it was pinned under the same
418	 * mmap_sem write op.
419	 */
420	pte = get_locked_pte(vma->vm_mm, start,	&ptl);
421	/* Make sure we do not cross the page table boundary */
422	end = pgd_addr_end(start, end);
 
423	end = pud_addr_end(start, end);
424	end = pmd_addr_end(start, end);
425
426	/* The page next to the pinned page is the first we will try to get */
427	start += PAGE_SIZE;
428	while (start < end) {
429		struct page *page = NULL;
430		pte++;
431		if (pte_present(*pte))
432			page = vm_normal_page(vma, start, *pte);
433		/*
434		 * Break if page could not be obtained or the page's node+zone does not
435		 * match
436		 */
437		if (!page || page_zone_id(page) != zoneid)
 
 
 
 
 
 
 
438			break;
439
440		get_page(page);
441		/*
442		 * Increase the address that will be returned *before* the
443		 * eventual break due to pvec becoming full by adding the page
444		 */
445		start += PAGE_SIZE;
446		if (pagevec_add(pvec, page) == 0)
447			break;
448	}
449	pte_unmap_unlock(pte, ptl);
450	return start;
451}
452
453/*
454 * munlock_vma_pages_range() - munlock all pages in the vma range.'
455 * @vma - vma containing range to be munlock()ed.
456 * @start - start address in @vma of the range
457 * @end - end of range in @vma.
458 *
459 *  For mremap(), munmap() and exit().
460 *
461 * Called with @vma VM_LOCKED.
462 *
463 * Returns with VM_LOCKED cleared.  Callers must be prepared to
464 * deal with this.
465 *
466 * We don't save and restore VM_LOCKED here because pages are
467 * still on lru.  In unmap path, pages might be scanned by reclaim
468 * and re-mlocked by try_to_{munlock|unmap} before we unmap and
469 * free them.  This will result in freeing mlocked pages.
470 */
471void munlock_vma_pages_range(struct vm_area_struct *vma,
472			     unsigned long start, unsigned long end)
473{
474	vma->vm_flags &= ~VM_LOCKED;
475
476	while (start < end) {
477		struct page *page = NULL;
478		unsigned int page_mask;
479		unsigned long page_increm;
480		struct pagevec pvec;
481		struct zone *zone;
482		int zoneid;
483
484		pagevec_init(&pvec, 0);
485		/*
486		 * Although FOLL_DUMP is intended for get_dump_page(),
487		 * it just so happens that its special treatment of the
488		 * ZERO_PAGE (returning an error instead of doing get_page)
489		 * suits munlock very well (and if somehow an abnormal page
490		 * has sneaked into the range, we won't oops here: great).
491		 */
492		page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
493				&page_mask);
494
495		if (page && !IS_ERR(page)) {
496			if (PageTransHuge(page)) {
 
 
 
497				lock_page(page);
498				/*
499				 * Any THP page found by follow_page_mask() may
500				 * have gotten split before reaching
501				 * munlock_vma_page(), so we need to recompute
502				 * the page_mask here.
503				 */
504				page_mask = munlock_vma_page(page);
505				unlock_page(page);
506				put_page(page); /* follow_page_mask() */
507			} else {
508				/*
509				 * Non-huge pages are handled in batches via
510				 * pagevec. The pin from follow_page_mask()
511				 * prevents them from collapsing by THP.
512				 */
513				pagevec_add(&pvec, page);
514				zone = page_zone(page);
515				zoneid = page_zone_id(page);
516
517				/*
518				 * Try to fill the rest of pagevec using fast
519				 * pte walk. This will also update start to
520				 * the next page to process. Then munlock the
521				 * pagevec.
522				 */
523				start = __munlock_pagevec_fill(&pvec, vma,
524						zoneid, start, end);
525				__munlock_pagevec(&pvec, zone);
526				goto next;
527			}
528		}
529		/* It's a bug to munlock in the middle of a THP page */
530		VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
531		page_increm = 1 + page_mask;
532		start += page_increm * PAGE_SIZE;
533next:
534		cond_resched();
535	}
536}
537
538/*
539 * mlock_fixup  - handle mlock[all]/munlock[all] requests.
540 *
541 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
542 * munlock is a no-op.  However, for some special vmas, we go ahead and
543 * populate the ptes.
544 *
545 * For vmas that pass the filters, merge/split as appropriate.
546 */
547static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
548	unsigned long start, unsigned long end, vm_flags_t newflags)
549{
550	struct mm_struct *mm = vma->vm_mm;
551	pgoff_t pgoff;
552	int nr_pages;
553	int ret = 0;
554	int lock = !!(newflags & VM_LOCKED);
 
555
556	if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
557	    is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
558		goto out;	/* don't set VM_LOCKED,  don't count */
 
 
559
560	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
561	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
562			  vma->vm_file, pgoff, vma_policy(vma));
 
563	if (*prev) {
564		vma = *prev;
565		goto success;
566	}
567
568	if (start != vma->vm_start) {
569		ret = split_vma(mm, vma, start, 1);
570		if (ret)
571			goto out;
572	}
573
574	if (end != vma->vm_end) {
575		ret = split_vma(mm, vma, end, 0);
576		if (ret)
577			goto out;
578	}
579
580success:
581	/*
582	 * Keep track of amount of locked VM.
583	 */
584	nr_pages = (end - start) >> PAGE_SHIFT;
585	if (!lock)
586		nr_pages = -nr_pages;
 
 
587	mm->locked_vm += nr_pages;
588
589	/*
590	 * vm_flags is protected by the mmap_sem held in write mode.
591	 * It's okay if try_to_unmap_one unmaps a page just after we
592	 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
593	 */
594
595	if (lock)
596		vma->vm_flags = newflags;
597	else
598		munlock_vma_pages_range(vma, start, end);
599
600out:
601	*prev = vma;
602	return ret;
603}
604
605static int do_mlock(unsigned long start, size_t len, int on)
 
606{
607	unsigned long nstart, end, tmp;
608	struct vm_area_struct * vma, * prev;
609	int error;
610
611	VM_BUG_ON(start & ~PAGE_MASK);
612	VM_BUG_ON(len != PAGE_ALIGN(len));
613	end = start + len;
614	if (end < start)
615		return -EINVAL;
616	if (end == start)
617		return 0;
618	vma = find_vma(current->mm, start);
619	if (!vma || vma->vm_start > start)
620		return -ENOMEM;
621
622	prev = vma->vm_prev;
623	if (start > vma->vm_start)
624		prev = vma;
625
626	for (nstart = start ; ; ) {
627		vm_flags_t newflags;
 
 
628
629		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
630
631		newflags = vma->vm_flags & ~VM_LOCKED;
632		if (on)
633			newflags |= VM_LOCKED;
634
635		tmp = vma->vm_end;
636		if (tmp > end)
637			tmp = end;
638		error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
639		if (error)
640			break;
641		nstart = tmp;
642		if (nstart < prev->vm_end)
643			nstart = prev->vm_end;
644		if (nstart >= end)
645			break;
646
647		vma = prev->vm_next;
648		if (!vma || vma->vm_start != nstart) {
649			error = -ENOMEM;
650			break;
651		}
652	}
653	return error;
654}
655
656/*
657 * __mm_populate - populate and/or mlock pages within a range of address space.
658 *
659 * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
660 * flags. VMAs must be already marked with the desired vm_flags, and
661 * mmap_sem must not be held.
662 */
663int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
 
664{
665	struct mm_struct *mm = current->mm;
666	unsigned long end, nstart, nend;
667	struct vm_area_struct *vma = NULL;
668	int locked = 0;
669	long ret = 0;
670
671	VM_BUG_ON(start & ~PAGE_MASK);
672	VM_BUG_ON(len != PAGE_ALIGN(len));
673	end = start + len;
674
675	for (nstart = start; nstart < end; nstart = nend) {
676		/*
677		 * We want to fault in pages for [nstart; end) address range.
678		 * Find first corresponding VMA.
679		 */
680		if (!locked) {
681			locked = 1;
682			down_read(&mm->mmap_sem);
683			vma = find_vma(mm, nstart);
684		} else if (nstart >= vma->vm_end)
685			vma = vma->vm_next;
686		if (!vma || vma->vm_start >= end)
687			break;
688		/*
689		 * Set [nstart; nend) to intersection of desired address
690		 * range with the first VMA. Also, skip undesirable VMA types.
691		 */
692		nend = min(end, vma->vm_end);
693		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
694			continue;
695		if (nstart < vma->vm_start)
696			nstart = vma->vm_start;
697		/*
698		 * Now fault in a range of pages. __mlock_vma_pages_range()
699		 * double checks the vma flags, so that it won't mlock pages
700		 * if the vma was already munlocked.
701		 */
702		ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
703		if (ret < 0) {
704			if (ignore_errors) {
705				ret = 0;
706				continue;	/* continue at next VMA */
707			}
708			ret = __mlock_posix_error_return(ret);
709			break;
710		}
711		nend = nstart + ret * PAGE_SIZE;
712		ret = 0;
713	}
714	if (locked)
715		up_read(&mm->mmap_sem);
716	return ret;	/* 0 or negative error code */
717}
718
719SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
720{
721	unsigned long locked;
722	unsigned long lock_limit;
723	int error = -ENOMEM;
724
 
 
725	if (!can_do_mlock())
726		return -EPERM;
727
728	lru_add_drain_all();	/* flush pagevec */
729
730	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
731	start &= PAGE_MASK;
732
733	lock_limit = rlimit(RLIMIT_MEMLOCK);
734	lock_limit >>= PAGE_SHIFT;
735	locked = len >> PAGE_SHIFT;
736
737	down_write(&current->mm->mmap_sem);
 
738
739	locked += current->mm->locked_vm;
 
 
 
 
 
 
 
 
 
 
740
741	/* check against resource limits */
742	if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
743		error = do_mlock(start, len, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
744
745	up_write(&current->mm->mmap_sem);
746	if (!error)
747		error = __mm_populate(start, len, 0);
748	return error;
 
 
 
 
 
 
 
749}
750
751SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
752{
753	int ret;
754
755	len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
 
 
756	start &= PAGE_MASK;
757
758	down_write(&current->mm->mmap_sem);
759	ret = do_mlock(start, len, 0);
760	up_write(&current->mm->mmap_sem);
 
761
762	return ret;
763}
764
765static int do_mlockall(int flags)
 
 
 
 
 
 
 
 
 
 
766{
767	struct vm_area_struct * vma, * prev = NULL;
 
768
769	if (flags & MCL_FUTURE)
 
770		current->mm->def_flags |= VM_LOCKED;
771	else
772		current->mm->def_flags &= ~VM_LOCKED;
773	if (flags == MCL_FUTURE)
774		goto out;
 
 
 
 
 
 
 
 
 
775
776	for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
777		vm_flags_t newflags;
778
779		newflags = vma->vm_flags & ~VM_LOCKED;
780		if (flags & MCL_CURRENT)
781			newflags |= VM_LOCKED;
782
783		/* Ignore errors */
784		mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
785		cond_resched();
786	}
787out:
788	return 0;
789}
790
791SYSCALL_DEFINE1(mlockall, int, flags)
792{
793	unsigned long lock_limit;
794	int ret = -EINVAL;
795
796	if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
797		goto out;
 
798
799	ret = -EPERM;
800	if (!can_do_mlock())
801		goto out;
802
803	if (flags & MCL_CURRENT)
804		lru_add_drain_all();	/* flush pagevec */
805
806	lock_limit = rlimit(RLIMIT_MEMLOCK);
807	lock_limit >>= PAGE_SHIFT;
808
 
 
 
809	ret = -ENOMEM;
810	down_write(&current->mm->mmap_sem);
811
812	if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
813	    capable(CAP_IPC_LOCK))
814		ret = do_mlockall(flags);
815	up_write(&current->mm->mmap_sem);
816	if (!ret && (flags & MCL_CURRENT))
817		mm_populate(0, TASK_SIZE);
818out:
819	return ret;
820}
821
822SYSCALL_DEFINE0(munlockall)
823{
824	int ret;
825
826	down_write(&current->mm->mmap_sem);
827	ret = do_mlockall(0);
828	up_write(&current->mm->mmap_sem);
 
829	return ret;
830}
831
832/*
833 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
834 * shm segments) get accounted against the user_struct instead.
835 */
836static DEFINE_SPINLOCK(shmlock_user_lock);
837
838int user_shm_lock(size_t size, struct user_struct *user)
839{
840	unsigned long lock_limit, locked;
 
841	int allowed = 0;
842
843	locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
844	lock_limit = rlimit(RLIMIT_MEMLOCK);
845	if (lock_limit == RLIM_INFINITY)
846		allowed = 1;
847	lock_limit >>= PAGE_SHIFT;
848	spin_lock(&shmlock_user_lock);
849	if (!allowed &&
850	    locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
 
 
851		goto out;
852	get_uid(user);
853	user->locked_shm += locked;
 
 
 
854	allowed = 1;
855out:
856	spin_unlock(&shmlock_user_lock);
857	return allowed;
858}
859
860void user_shm_unlock(size_t size, struct user_struct *user)
861{
862	spin_lock(&shmlock_user_lock);
863	user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
864	spin_unlock(&shmlock_user_lock);
865	free_uid(user);
866}