Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 
 
 
 32#include <asm/cacheflush.h>
 33#include <asm/mmu_context.h>
 34#include <asm/tlbflush.h>
 
 35
 36#include "internal.h"
 37
 38static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 39		unsigned long addr, unsigned long end, pgprot_t newprot,
 40		unsigned long cp_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41{
 42	pte_t *pte, oldpte;
 43	spinlock_t *ptl;
 44	unsigned long pages = 0;
 45	int target_node = NUMA_NO_NODE;
 46	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 47	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 48	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 49	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 50
 
 
 51	/*
 52	 * Can be called with only the mmap_lock for reading by
 53	 * prot_numa so we must check the pmd isn't constantly
 54	 * changing from under us from pmd_none to pmd_trans_huge
 55	 * and/or the other way around.
 56	 */
 57	if (pmd_trans_unstable(pmd))
 58		return 0;
 59
 60	/*
 61	 * The pmd points to a regular pte so the pmd can't change
 62	 * from under us even if the mmap_lock is only hold for
 63	 * reading.
 64	 */
 65	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 66
 67	/* Get target node for single threaded private VMAs */
 68	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
 69	    atomic_read(&vma->vm_mm->mm_users) == 1)
 70		target_node = numa_node_id();
 71
 72	flush_tlb_batched_pending(vma->vm_mm);
 73	arch_enter_lazy_mmu_mode();
 74	do {
 75		oldpte = *pte;
 76		if (pte_present(oldpte)) {
 77			pte_t ptent;
 78			bool preserve_write = prot_numa && pte_write(oldpte);
 79
 80			/*
 81			 * Avoid trapping faults against the zero or KSM
 82			 * pages. See similar comment in change_huge_pmd.
 83			 */
 84			if (prot_numa) {
 85				struct page *page;
 
 
 86
 87				/* Avoid TLB flush if possible */
 88				if (pte_protnone(oldpte))
 89					continue;
 90
 91				page = vm_normal_page(vma, addr, oldpte);
 92				if (!page || PageKsm(page))
 93					continue;
 94
 95				/* Also skip shared copy-on-write pages */
 96				if (is_cow_mapping(vma->vm_flags) &&
 97				    page_mapcount(page) != 1)
 98					continue;
 99
100				/*
101				 * While migration can move some dirty pages,
102				 * it cannot move them all from MIGRATE_ASYNC
103				 * context.
104				 */
105				if (page_is_file_lru(page) && PageDirty(page))
106					continue;
107
108				/*
109				 * Don't mess with PTEs if page is already on the node
110				 * a single-threaded process is running on.
111				 */
112				if (target_node == page_to_nid(page))
 
113					continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
114			}
115
116			oldpte = ptep_modify_prot_start(vma, addr, pte);
117			ptent = pte_modify(oldpte, newprot);
118			if (preserve_write)
119				ptent = pte_mk_savedwrite(ptent);
120
121			if (uffd_wp) {
122				ptent = pte_wrprotect(ptent);
123				ptent = pte_mkuffd_wp(ptent);
124			} else if (uffd_wp_resolve) {
125				/*
126				 * Leave the write bit to be handled
127				 * by PF interrupt handler, then
128				 * things like COW could be properly
129				 * handled.
130				 */
131				ptent = pte_clear_uffd_wp(ptent);
132			}
133
134			/* Avoid taking write faults for known dirty pages */
135			if (dirty_accountable && pte_dirty(ptent) &&
136					(pte_soft_dirty(ptent) ||
137					 !(vma->vm_flags & VM_SOFTDIRTY))) {
 
 
 
 
 
 
 
 
 
 
 
 
138				ptent = pte_mkwrite(ptent);
139			}
140			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
 
 
141			pages++;
142		} else if (is_swap_pte(oldpte)) {
143			swp_entry_t entry = pte_to_swp_entry(oldpte);
144			pte_t newpte;
145
146			if (is_writable_migration_entry(entry)) {
 
 
147				/*
148				 * A protection check is difficult so
149				 * just be safe and disable write
150				 */
151				entry = make_readable_migration_entry(
152							swp_offset(entry));
 
 
 
153				newpte = swp_entry_to_pte(entry);
154				if (pte_swp_soft_dirty(oldpte))
155					newpte = pte_swp_mksoft_dirty(newpte);
156				if (pte_swp_uffd_wp(oldpte))
157					newpte = pte_swp_mkuffd_wp(newpte);
158			} else if (is_writable_device_private_entry(entry)) {
159				/*
160				 * We do not preserve soft-dirtiness. See
161				 * copy_one_pte() for explanation.
162				 */
163				entry = make_readable_device_private_entry(
164							swp_offset(entry));
165				newpte = swp_entry_to_pte(entry);
166				if (pte_swp_uffd_wp(oldpte))
167					newpte = pte_swp_mkuffd_wp(newpte);
168			} else if (is_writable_device_exclusive_entry(entry)) {
169				entry = make_readable_device_exclusive_entry(
170							swp_offset(entry));
171				newpte = swp_entry_to_pte(entry);
172				if (pte_swp_soft_dirty(oldpte))
173					newpte = pte_swp_mksoft_dirty(newpte);
174				if (pte_swp_uffd_wp(oldpte))
175					newpte = pte_swp_mkuffd_wp(newpte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176			} else {
177				newpte = oldpte;
178			}
179
180			if (uffd_wp)
181				newpte = pte_swp_mkuffd_wp(newpte);
182			else if (uffd_wp_resolve)
183				newpte = pte_swp_clear_uffd_wp(newpte);
184
185			if (!pte_same(oldpte, newpte)) {
186				set_pte_at(vma->vm_mm, addr, pte, newpte);
187				pages++;
188			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189		}
190	} while (pte++, addr += PAGE_SIZE, addr != end);
191	arch_leave_lazy_mmu_mode();
192	pte_unmap_unlock(pte - 1, ptl);
193
194	return pages;
195}
196
197/*
198 * Used when setting automatic NUMA hinting protection where it is
199 * critical that a numa hinting PMD is not confused with a bad PMD.
200 */
201static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
202{
203	pmd_t pmdval = pmd_read_atomic(pmd);
204
205	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
206#ifdef CONFIG_TRANSPARENT_HUGEPAGE
207	barrier();
208#endif
209
210	if (pmd_none(pmdval))
211		return 1;
212	if (pmd_trans_huge(pmdval))
213		return 0;
214	if (unlikely(pmd_bad(pmdval))) {
215		pmd_clear_bad(pmd);
216		return 1;
217	}
218
219	return 0;
220}
221
222static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
223		pud_t *pud, unsigned long addr, unsigned long end,
224		pgprot_t newprot, unsigned long cp_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225{
226	pmd_t *pmd;
227	unsigned long next;
228	unsigned long pages = 0;
229	unsigned long nr_huge_updates = 0;
230	struct mmu_notifier_range range;
231
232	range.start = 0;
233
234	pmd = pmd_offset(pud, addr);
235	do {
236		unsigned long this_pages;
237
238		next = pmd_addr_end(addr, end);
239
 
240		/*
241		 * Automatic NUMA balancing walks the tables with mmap_lock
242		 * held for read. It's possible a parallel update to occur
243		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
244		 * check leading to a false positive and clearing.
245		 * Hence, it's necessary to atomically read the PMD value
246		 * for all the checks.
247		 */
248		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
249		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
250			goto next;
251
252		/* invoke the mmu notifier if the pmd is populated */
253		if (!range.start) {
254			mmu_notifier_range_init(&range,
255				MMU_NOTIFY_PROTECTION_VMA, 0,
256				vma, vma->vm_mm, addr, end);
257			mmu_notifier_invalidate_range_start(&range);
258		}
259
260		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
261			if (next - addr != HPAGE_PMD_SIZE) {
 
262				__split_huge_pmd(vma, pmd, addr, false, NULL);
 
 
 
 
 
 
263			} else {
264				int nr_ptes = change_huge_pmd(vma, pmd, addr,
265							      newprot, cp_flags);
 
 
 
 
266
267				if (nr_ptes) {
268					if (nr_ptes == HPAGE_PMD_NR) {
269						pages += HPAGE_PMD_NR;
270						nr_huge_updates++;
271					}
272
273					/* huge pmd was handled */
274					goto next;
275				}
276			}
277			/* fall through, the trans huge pmd just split */
278		}
279		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
280					      cp_flags);
281		pages += this_pages;
282next:
283		cond_resched();
284	} while (pmd++, addr = next, addr != end);
285
286	if (range.start)
287		mmu_notifier_invalidate_range_end(&range);
288
289	if (nr_huge_updates)
290		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
291	return pages;
292}
293
294static inline unsigned long change_pud_range(struct vm_area_struct *vma,
295		p4d_t *p4d, unsigned long addr, unsigned long end,
296		pgprot_t newprot, unsigned long cp_flags)
297{
298	pud_t *pud;
299	unsigned long next;
300	unsigned long pages = 0;
301
302	pud = pud_offset(p4d, addr);
303	do {
304		next = pud_addr_end(addr, end);
 
305		if (pud_none_or_clear_bad(pud))
306			continue;
307		pages += change_pmd_range(vma, pud, addr, next, newprot,
308					  cp_flags);
309	} while (pud++, addr = next, addr != end);
310
311	return pages;
312}
313
314static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
315		pgd_t *pgd, unsigned long addr, unsigned long end,
316		pgprot_t newprot, unsigned long cp_flags)
317{
318	p4d_t *p4d;
319	unsigned long next;
320	unsigned long pages = 0;
321
322	p4d = p4d_offset(pgd, addr);
323	do {
324		next = p4d_addr_end(addr, end);
 
325		if (p4d_none_or_clear_bad(p4d))
326			continue;
327		pages += change_pud_range(vma, p4d, addr, next, newprot,
328					  cp_flags);
329	} while (p4d++, addr = next, addr != end);
330
331	return pages;
332}
333
334static unsigned long change_protection_range(struct vm_area_struct *vma,
335		unsigned long addr, unsigned long end, pgprot_t newprot,
336		unsigned long cp_flags)
337{
338	struct mm_struct *mm = vma->vm_mm;
339	pgd_t *pgd;
340	unsigned long next;
341	unsigned long start = addr;
342	unsigned long pages = 0;
343
344	BUG_ON(addr >= end);
345	pgd = pgd_offset(mm, addr);
346	flush_cache_range(vma, addr, end);
347	inc_tlb_flush_pending(mm);
348	do {
349		next = pgd_addr_end(addr, end);
 
350		if (pgd_none_or_clear_bad(pgd))
351			continue;
352		pages += change_p4d_range(vma, pgd, addr, next, newprot,
353					  cp_flags);
354	} while (pgd++, addr = next, addr != end);
355
356	/* Only flush the TLB if we actually modified any entries: */
357	if (pages)
358		flush_tlb_range(vma, start, end);
359	dec_tlb_flush_pending(mm);
360
361	return pages;
362}
363
364unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
 
365		       unsigned long end, pgprot_t newprot,
366		       unsigned long cp_flags)
367{
368	unsigned long pages;
369
370	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
371
372	if (is_vm_hugetlb_page(vma))
373		pages = hugetlb_change_protection(vma, start, end, newprot);
 
374	else
375		pages = change_protection_range(vma, start, end, newprot,
376						cp_flags);
377
378	return pages;
379}
380
381static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
382			       unsigned long next, struct mm_walk *walk)
383{
384	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
385		0 : -EACCES;
386}
387
388static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
389				   unsigned long addr, unsigned long next,
390				   struct mm_walk *walk)
391{
392	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
393		0 : -EACCES;
394}
395
396static int prot_none_test(unsigned long addr, unsigned long next,
397			  struct mm_walk *walk)
398{
399	return 0;
400}
401
402static const struct mm_walk_ops prot_none_walk_ops = {
403	.pte_entry		= prot_none_pte_entry,
404	.hugetlb_entry		= prot_none_hugetlb_entry,
405	.test_walk		= prot_none_test,
406};
407
408int
409mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
410	unsigned long start, unsigned long end, unsigned long newflags)
 
411{
412	struct mm_struct *mm = vma->vm_mm;
413	unsigned long oldflags = vma->vm_flags;
414	long nrpages = (end - start) >> PAGE_SHIFT;
 
415	unsigned long charged = 0;
416	pgoff_t pgoff;
417	int error;
418	int dirty_accountable = 0;
419
420	if (newflags == oldflags) {
421		*pprev = vma;
422		return 0;
423	}
424
425	/*
426	 * Do PROT_NONE PFN permission checks here when we can still
427	 * bail out without undoing a lot of state. This is a rather
428	 * uncommon case, so doesn't need to be very optimized.
429	 */
430	if (arch_has_pfn_modify_check() &&
431	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
432	    (newflags & VM_ACCESS_FLAGS) == 0) {
433		pgprot_t new_pgprot = vm_get_page_prot(newflags);
434
435		error = walk_page_range(current->mm, start, end,
436				&prot_none_walk_ops, &new_pgprot);
437		if (error)
438			return error;
439	}
440
441	/*
442	 * If we make a private mapping writable we increase our commit;
443	 * but (without finer accounting) cannot reduce our commit if we
444	 * make it unwritable again. hugetlb mapping were accounted for
445	 * even if read-only so there is no need to account for them here
446	 */
447	if (newflags & VM_WRITE) {
448		/* Check space limits when area turns into data. */
449		if (!may_expand_vm(mm, newflags, nrpages) &&
450				may_expand_vm(mm, oldflags, nrpages))
451			return -ENOMEM;
452		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
453						VM_SHARED|VM_NORESERVE))) {
454			charged = nrpages;
455			if (security_vm_enough_memory_mm(mm, charged))
456				return -ENOMEM;
457			newflags |= VM_ACCOUNT;
458		}
459	}
460
461	/*
462	 * First try to merge with previous and/or next vma.
463	 */
464	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
465	*pprev = vma_merge(mm, *pprev, start, end, newflags,
466			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
467			   vma->vm_userfaultfd_ctx);
468	if (*pprev) {
469		vma = *pprev;
470		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
471		goto success;
472	}
473
474	*pprev = vma;
475
476	if (start != vma->vm_start) {
477		error = split_vma(mm, vma, start, 1);
478		if (error)
479			goto fail;
480	}
481
482	if (end != vma->vm_end) {
483		error = split_vma(mm, vma, end, 0);
484		if (error)
485			goto fail;
486	}
487
488success:
489	/*
490	 * vm_flags and vm_page_prot are protected by the mmap_lock
491	 * held in write mode.
492	 */
493	vma->vm_flags = newflags;
494	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 
495	vma_set_page_prot(vma);
496
497	change_protection(vma, start, end, vma->vm_page_prot,
498			  dirty_accountable ? MM_CP_DIRTY_ACCT : 0);
499
500	/*
501	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
502	 * fault on access.
503	 */
504	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
505			(newflags & VM_WRITE)) {
506		populate_vma_page_range(vma, start, end, NULL);
507	}
508
509	vm_stat_account(mm, oldflags, -nrpages);
510	vm_stat_account(mm, newflags, nrpages);
511	perf_event_mmap(vma);
512	return 0;
513
514fail:
515	vm_unacct_memory(charged);
516	return error;
517}
518
519/*
520 * pkey==-1 when doing a legacy mprotect()
521 */
522static int do_mprotect_pkey(unsigned long start, size_t len,
523		unsigned long prot, int pkey)
524{
525	unsigned long nstart, end, tmp, reqprot;
526	struct vm_area_struct *vma, *prev;
527	int error = -EINVAL;
528	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
529	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
530				(prot & PROT_READ);
 
 
531
532	start = untagged_addr(start);
533
534	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
535	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
536		return -EINVAL;
537
538	if (start & ~PAGE_MASK)
539		return -EINVAL;
540	if (!len)
541		return 0;
542	len = PAGE_ALIGN(len);
543	end = start + len;
544	if (end <= start)
545		return -ENOMEM;
546	if (!arch_validate_prot(prot, start))
547		return -EINVAL;
548
549	reqprot = prot;
550
551	if (mmap_write_lock_killable(current->mm))
552		return -EINTR;
553
554	/*
555	 * If userspace did not allocate the pkey, do not let
556	 * them use it here.
557	 */
558	error = -EINVAL;
559	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
560		goto out;
561
562	vma = find_vma(current->mm, start);
 
563	error = -ENOMEM;
564	if (!vma)
565		goto out;
566	prev = vma->vm_prev;
567	if (unlikely(grows & PROT_GROWSDOWN)) {
568		if (vma->vm_start >= end)
569			goto out;
570		start = vma->vm_start;
571		error = -EINVAL;
572		if (!(vma->vm_flags & VM_GROWSDOWN))
573			goto out;
574	} else {
575		if (vma->vm_start > start)
576			goto out;
577		if (unlikely(grows & PROT_GROWSUP)) {
578			end = vma->vm_end;
579			error = -EINVAL;
580			if (!(vma->vm_flags & VM_GROWSUP))
581				goto out;
582		}
583	}
 
584	if (start > vma->vm_start)
585		prev = vma;
 
 
586
 
587	for (nstart = start ; ; ) {
588		unsigned long mask_off_old_flags;
589		unsigned long newflags;
590		int new_vma_pkey;
591
592		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
593
594		/* Does the application expect PROT_READ to imply PROT_EXEC */
595		if (rier && (vma->vm_flags & VM_MAYEXEC))
596			prot |= PROT_EXEC;
597
598		/*
599		 * Each mprotect() call explicitly passes r/w/x permissions.
600		 * If a permission is not passed to mprotect(), it must be
601		 * cleared from the VMA.
602		 */
603		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
604					VM_FLAGS_CLEAR;
605
606		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
607		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
608		newflags |= (vma->vm_flags & ~mask_off_old_flags);
609
610		/* newflags >> 4 shift VM_MAY% in place of VM_% */
611		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
612			error = -EACCES;
613			goto out;
614		}
615
616		/* Allow architectures to sanity-check the new flags */
617		if (!arch_validate_flags(newflags)) {
618			error = -EINVAL;
619			goto out;
620		}
621
622		error = security_file_mprotect(vma, reqprot, prot);
623		if (error)
624			goto out;
625
626		tmp = vma->vm_end;
627		if (tmp > end)
628			tmp = end;
629
630		if (vma->vm_ops && vma->vm_ops->mprotect) {
631			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
632			if (error)
633				goto out;
634		}
635
636		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
637		if (error)
638			goto out;
639
640		nstart = tmp;
641
642		if (nstart < prev->vm_end)
643			nstart = prev->vm_end;
644		if (nstart >= end)
645			goto out;
646
647		vma = prev->vm_next;
648		if (!vma || vma->vm_start != nstart) {
649			error = -ENOMEM;
650			goto out;
651		}
652		prot = reqprot;
653	}
 
654out:
655	mmap_write_unlock(current->mm);
656	return error;
657}
658
659SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
660		unsigned long, prot)
661{
662	return do_mprotect_pkey(start, len, prot, -1);
663}
664
665#ifdef CONFIG_ARCH_HAS_PKEYS
666
667SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
668		unsigned long, prot, int, pkey)
669{
670	return do_mprotect_pkey(start, len, prot, pkey);
671}
672
673SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
674{
675	int pkey;
676	int ret;
677
678	/* No flags supported yet. */
679	if (flags)
680		return -EINVAL;
681	/* check for unsupported init values */
682	if (init_val & ~PKEY_ACCESS_MASK)
683		return -EINVAL;
684
685	mmap_write_lock(current->mm);
686	pkey = mm_pkey_alloc(current->mm);
687
688	ret = -ENOSPC;
689	if (pkey == -1)
690		goto out;
691
692	ret = arch_set_user_pkey_access(current, pkey, init_val);
693	if (ret) {
694		mm_pkey_free(current->mm, pkey);
695		goto out;
696	}
697	ret = pkey;
698out:
699	mmap_write_unlock(current->mm);
700	return ret;
701}
702
703SYSCALL_DEFINE1(pkey_free, int, pkey)
704{
705	int ret;
706
707	mmap_write_lock(current->mm);
708	ret = mm_pkey_free(current->mm, pkey);
709	mmap_write_unlock(current->mm);
710
711	/*
712	 * We could provide warnings or errors if any VMA still
713	 * has the pkey set here.
714	 */
715	return ret;
716}
717
718#endif /* CONFIG_ARCH_HAS_PKEYS */
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  mm/mprotect.c
  4 *
  5 *  (C) Copyright 1994 Linus Torvalds
  6 *  (C) Copyright 2002 Christoph Hellwig
  7 *
  8 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
  9 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 10 */
 11
 12#include <linux/pagewalk.h>
 13#include <linux/hugetlb.h>
 14#include <linux/shm.h>
 15#include <linux/mman.h>
 16#include <linux/fs.h>
 17#include <linux/highmem.h>
 18#include <linux/security.h>
 19#include <linux/mempolicy.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/swap.h>
 23#include <linux/swapops.h>
 24#include <linux/mmu_notifier.h>
 25#include <linux/migrate.h>
 26#include <linux/perf_event.h>
 27#include <linux/pkeys.h>
 28#include <linux/ksm.h>
 29#include <linux/uaccess.h>
 30#include <linux/mm_inline.h>
 31#include <linux/pgtable.h>
 32#include <linux/sched/sysctl.h>
 33#include <linux/userfaultfd_k.h>
 34#include <linux/memory-tiers.h>
 35#include <asm/cacheflush.h>
 36#include <asm/mmu_context.h>
 37#include <asm/tlbflush.h>
 38#include <asm/tlb.h>
 39
 40#include "internal.h"
 41
 42bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
 43			     pte_t pte)
 44{
 45	struct page *page;
 46
 47	if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
 48		return false;
 49
 50	/* Don't touch entries that are not even readable. */
 51	if (pte_protnone(pte))
 52		return false;
 53
 54	/* Do we need write faults for softdirty tracking? */
 55	if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
 56		return false;
 57
 58	/* Do we need write faults for uffd-wp tracking? */
 59	if (userfaultfd_pte_wp(vma, pte))
 60		return false;
 61
 62	if (!(vma->vm_flags & VM_SHARED)) {
 63		/*
 64		 * Writable MAP_PRIVATE mapping: We can only special-case on
 65		 * exclusive anonymous pages, because we know that our
 66		 * write-fault handler similarly would map them writable without
 67		 * any additional checks while holding the PT lock.
 68		 */
 69		page = vm_normal_page(vma, addr, pte);
 70		return page && PageAnon(page) && PageAnonExclusive(page);
 71	}
 72
 73	/*
 74	 * Writable MAP_SHARED mapping: "clean" might indicate that the FS still
 75	 * needs a real write-fault for writenotify
 76	 * (see vma_wants_writenotify()). If "dirty", the assumption is that the
 77	 * FS was already notified and we can simply mark the PTE writable
 78	 * just like the write-fault handler would do.
 79	 */
 80	return pte_dirty(pte);
 81}
 82
 83static unsigned long change_pte_range(struct mmu_gather *tlb,
 84		struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
 85		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
 86{
 87	pte_t *pte, oldpte;
 88	spinlock_t *ptl;
 89	unsigned long pages = 0;
 90	int target_node = NUMA_NO_NODE;
 
 91	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 92	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 93	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
 94
 95	tlb_change_page_size(tlb, PAGE_SIZE);
 96
 97	/*
 98	 * Can be called with only the mmap_lock for reading by
 99	 * prot_numa so we must check the pmd isn't constantly
100	 * changing from under us from pmd_none to pmd_trans_huge
101	 * and/or the other way around.
102	 */
103	if (pmd_trans_unstable(pmd))
104		return 0;
105
106	/*
107	 * The pmd points to a regular pte so the pmd can't change
108	 * from under us even if the mmap_lock is only hold for
109	 * reading.
110	 */
111	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
112
113	/* Get target node for single threaded private VMAs */
114	if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
115	    atomic_read(&vma->vm_mm->mm_users) == 1)
116		target_node = numa_node_id();
117
118	flush_tlb_batched_pending(vma->vm_mm);
119	arch_enter_lazy_mmu_mode();
120	do {
121		oldpte = *pte;
122		if (pte_present(oldpte)) {
123			pte_t ptent;
 
124
125			/*
126			 * Avoid trapping faults against the zero or KSM
127			 * pages. See similar comment in change_huge_pmd.
128			 */
129			if (prot_numa) {
130				struct page *page;
131				int nid;
132				bool toptier;
133
134				/* Avoid TLB flush if possible */
135				if (pte_protnone(oldpte))
136					continue;
137
138				page = vm_normal_page(vma, addr, oldpte);
139				if (!page || is_zone_device_page(page) || PageKsm(page))
140					continue;
141
142				/* Also skip shared copy-on-write pages */
143				if (is_cow_mapping(vma->vm_flags) &&
144				    page_count(page) != 1)
145					continue;
146
147				/*
148				 * While migration can move some dirty pages,
149				 * it cannot move them all from MIGRATE_ASYNC
150				 * context.
151				 */
152				if (page_is_file_lru(page) && PageDirty(page))
153					continue;
154
155				/*
156				 * Don't mess with PTEs if page is already on the node
157				 * a single-threaded process is running on.
158				 */
159				nid = page_to_nid(page);
160				if (target_node == nid)
161					continue;
162				toptier = node_is_toptier(nid);
163
164				/*
165				 * Skip scanning top tier node if normal numa
166				 * balancing is disabled
167				 */
168				if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
169				    toptier)
170					continue;
171				if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
172				    !toptier)
173					xchg_page_access_time(page,
174						jiffies_to_msecs(jiffies));
175			}
176
177			oldpte = ptep_modify_prot_start(vma, addr, pte);
178			ptent = pte_modify(oldpte, newprot);
 
 
179
180			if (uffd_wp) {
181				ptent = pte_wrprotect(ptent);
182				ptent = pte_mkuffd_wp(ptent);
183			} else if (uffd_wp_resolve) {
 
 
 
 
 
 
184				ptent = pte_clear_uffd_wp(ptent);
185			}
186
187			/*
188			 * In some writable, shared mappings, we might want
189			 * to catch actual write access -- see
190			 * vma_wants_writenotify().
191			 *
192			 * In all writable, private mappings, we have to
193			 * properly handle COW.
194			 *
195			 * In both cases, we can sometimes still change PTEs
196			 * writable and avoid the write-fault handler, for
197			 * example, if a PTE is already dirty and no other
198			 * COW or special handling is required.
199			 */
200			if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
201			    !pte_write(ptent) &&
202			    can_change_pte_writable(vma, addr, ptent))
203				ptent = pte_mkwrite(ptent);
204
205			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
206			if (pte_needs_flush(oldpte, ptent))
207				tlb_flush_pte_range(tlb, addr, PAGE_SIZE);
208			pages++;
209		} else if (is_swap_pte(oldpte)) {
210			swp_entry_t entry = pte_to_swp_entry(oldpte);
211			pte_t newpte;
212
213			if (is_writable_migration_entry(entry)) {
214				struct page *page = pfn_swap_entry_to_page(entry);
215
216				/*
217				 * A protection check is difficult so
218				 * just be safe and disable write
219				 */
220				if (PageAnon(page))
221					entry = make_readable_exclusive_migration_entry(
222							     swp_offset(entry));
223				else
224					entry = make_readable_migration_entry(swp_offset(entry));
225				newpte = swp_entry_to_pte(entry);
226				if (pte_swp_soft_dirty(oldpte))
227					newpte = pte_swp_mksoft_dirty(newpte);
228				if (pte_swp_uffd_wp(oldpte))
229					newpte = pte_swp_mkuffd_wp(newpte);
230			} else if (is_writable_device_private_entry(entry)) {
231				/*
232				 * We do not preserve soft-dirtiness. See
233				 * copy_one_pte() for explanation.
234				 */
235				entry = make_readable_device_private_entry(
236							swp_offset(entry));
237				newpte = swp_entry_to_pte(entry);
238				if (pte_swp_uffd_wp(oldpte))
239					newpte = pte_swp_mkuffd_wp(newpte);
240			} else if (is_writable_device_exclusive_entry(entry)) {
241				entry = make_readable_device_exclusive_entry(
242							swp_offset(entry));
243				newpte = swp_entry_to_pte(entry);
244				if (pte_swp_soft_dirty(oldpte))
245					newpte = pte_swp_mksoft_dirty(newpte);
246				if (pte_swp_uffd_wp(oldpte))
247					newpte = pte_swp_mkuffd_wp(newpte);
248			} else if (is_pte_marker_entry(entry)) {
249				/*
250				 * Ignore swapin errors unconditionally,
251				 * because any access should sigbus anyway.
252				 */
253				if (is_swapin_error_entry(entry))
254					continue;
255				/*
256				 * If this is uffd-wp pte marker and we'd like
257				 * to unprotect it, drop it; the next page
258				 * fault will trigger without uffd trapping.
259				 */
260				if (uffd_wp_resolve) {
261					pte_clear(vma->vm_mm, addr, pte);
262					pages++;
263				}
264				continue;
265			} else {
266				newpte = oldpte;
267			}
268
269			if (uffd_wp)
270				newpte = pte_swp_mkuffd_wp(newpte);
271			else if (uffd_wp_resolve)
272				newpte = pte_swp_clear_uffd_wp(newpte);
273
274			if (!pte_same(oldpte, newpte)) {
275				set_pte_at(vma->vm_mm, addr, pte, newpte);
276				pages++;
277			}
278		} else {
279			/* It must be an none page, or what else?.. */
280			WARN_ON_ONCE(!pte_none(oldpte));
281			if (unlikely(uffd_wp && !vma_is_anonymous(vma))) {
282				/*
283				 * For file-backed mem, we need to be able to
284				 * wr-protect a none pte, because even if the
285				 * pte is none, the page/swap cache could
286				 * exist.  Doing that by install a marker.
287				 */
288				set_pte_at(vma->vm_mm, addr, pte,
289					   make_pte_marker(PTE_MARKER_UFFD_WP));
290				pages++;
291			}
292		}
293	} while (pte++, addr += PAGE_SIZE, addr != end);
294	arch_leave_lazy_mmu_mode();
295	pte_unmap_unlock(pte - 1, ptl);
296
297	return pages;
298}
299
300/*
301 * Used when setting automatic NUMA hinting protection where it is
302 * critical that a numa hinting PMD is not confused with a bad PMD.
303 */
304static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
305{
306	pmd_t pmdval = pmdp_get_lockless(pmd);
307
308	/* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
309#ifdef CONFIG_TRANSPARENT_HUGEPAGE
310	barrier();
311#endif
312
313	if (pmd_none(pmdval))
314		return 1;
315	if (pmd_trans_huge(pmdval))
316		return 0;
317	if (unlikely(pmd_bad(pmdval))) {
318		pmd_clear_bad(pmd);
319		return 1;
320	}
321
322	return 0;
323}
324
325/* Return true if we're uffd wr-protecting file-backed memory, or false */
326static inline bool
327uffd_wp_protect_file(struct vm_area_struct *vma, unsigned long cp_flags)
328{
329	return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
330}
331
332/*
333 * If wr-protecting the range for file-backed, populate pgtable for the case
334 * when pgtable is empty but page cache exists.  When {pte|pmd|...}_alloc()
335 * failed it means no memory, we don't have a better option but stop.
336 */
337#define  change_pmd_prepare(vma, pmd, cp_flags)				\
338	do {								\
339		if (unlikely(uffd_wp_protect_file(vma, cp_flags))) {	\
340			if (WARN_ON_ONCE(pte_alloc(vma->vm_mm, pmd)))	\
341				break;					\
342		}							\
343	} while (0)
344/*
345 * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to
346 * have separate change_pmd_prepare() because pte_alloc() returns 0 on success,
347 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success.
348 */
349#define  change_prepare(vma, high, low, addr, cp_flags)			\
350	do {								\
351		if (unlikely(uffd_wp_protect_file(vma, cp_flags))) {	\
352			low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
353			if (WARN_ON_ONCE(p == NULL))			\
354				break;					\
355		}							\
356	} while (0)
357
358static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
359		struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
360		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
361{
362	pmd_t *pmd;
363	unsigned long next;
364	unsigned long pages = 0;
365	unsigned long nr_huge_updates = 0;
366	struct mmu_notifier_range range;
367
368	range.start = 0;
369
370	pmd = pmd_offset(pud, addr);
371	do {
372		unsigned long this_pages;
373
374		next = pmd_addr_end(addr, end);
375
376		change_pmd_prepare(vma, pmd, cp_flags);
377		/*
378		 * Automatic NUMA balancing walks the tables with mmap_lock
379		 * held for read. It's possible a parallel update to occur
380		 * between pmd_trans_huge() and a pmd_none_or_clear_bad()
381		 * check leading to a false positive and clearing.
382		 * Hence, it's necessary to atomically read the PMD value
383		 * for all the checks.
384		 */
385		if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
386		     pmd_none_or_clear_bad_unless_trans_huge(pmd))
387			goto next;
388
389		/* invoke the mmu notifier if the pmd is populated */
390		if (!range.start) {
391			mmu_notifier_range_init(&range,
392				MMU_NOTIFY_PROTECTION_VMA, 0,
393				vma, vma->vm_mm, addr, end);
394			mmu_notifier_invalidate_range_start(&range);
395		}
396
397		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
398			if ((next - addr != HPAGE_PMD_SIZE) ||
399			    uffd_wp_protect_file(vma, cp_flags)) {
400				__split_huge_pmd(vma, pmd, addr, false, NULL);
401				/*
402				 * For file-backed, the pmd could have been
403				 * cleared; make sure pmd populated if
404				 * necessary, then fall-through to pte level.
405				 */
406				change_pmd_prepare(vma, pmd, cp_flags);
407			} else {
408				/*
409				 * change_huge_pmd() does not defer TLB flushes,
410				 * so no need to propagate the tlb argument.
411				 */
412				int nr_ptes = change_huge_pmd(tlb, vma, pmd,
413						addr, newprot, cp_flags);
414
415				if (nr_ptes) {
416					if (nr_ptes == HPAGE_PMD_NR) {
417						pages += HPAGE_PMD_NR;
418						nr_huge_updates++;
419					}
420
421					/* huge pmd was handled */
422					goto next;
423				}
424			}
425			/* fall through, the trans huge pmd just split */
426		}
427		this_pages = change_pte_range(tlb, vma, pmd, addr, next,
428					      newprot, cp_flags);
429		pages += this_pages;
430next:
431		cond_resched();
432	} while (pmd++, addr = next, addr != end);
433
434	if (range.start)
435		mmu_notifier_invalidate_range_end(&range);
436
437	if (nr_huge_updates)
438		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
439	return pages;
440}
441
442static inline unsigned long change_pud_range(struct mmu_gather *tlb,
443		struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
444		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
445{
446	pud_t *pud;
447	unsigned long next;
448	unsigned long pages = 0;
449
450	pud = pud_offset(p4d, addr);
451	do {
452		next = pud_addr_end(addr, end);
453		change_prepare(vma, pud, pmd, addr, cp_flags);
454		if (pud_none_or_clear_bad(pud))
455			continue;
456		pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
457					  cp_flags);
458	} while (pud++, addr = next, addr != end);
459
460	return pages;
461}
462
463static inline unsigned long change_p4d_range(struct mmu_gather *tlb,
464		struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
465		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
466{
467	p4d_t *p4d;
468	unsigned long next;
469	unsigned long pages = 0;
470
471	p4d = p4d_offset(pgd, addr);
472	do {
473		next = p4d_addr_end(addr, end);
474		change_prepare(vma, p4d, pud, addr, cp_flags);
475		if (p4d_none_or_clear_bad(p4d))
476			continue;
477		pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
478					  cp_flags);
479	} while (p4d++, addr = next, addr != end);
480
481	return pages;
482}
483
484static unsigned long change_protection_range(struct mmu_gather *tlb,
485		struct vm_area_struct *vma, unsigned long addr,
486		unsigned long end, pgprot_t newprot, unsigned long cp_flags)
487{
488	struct mm_struct *mm = vma->vm_mm;
489	pgd_t *pgd;
490	unsigned long next;
 
491	unsigned long pages = 0;
492
493	BUG_ON(addr >= end);
494	pgd = pgd_offset(mm, addr);
495	tlb_start_vma(tlb, vma);
 
496	do {
497		next = pgd_addr_end(addr, end);
498		change_prepare(vma, pgd, p4d, addr, cp_flags);
499		if (pgd_none_or_clear_bad(pgd))
500			continue;
501		pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
502					  cp_flags);
503	} while (pgd++, addr = next, addr != end);
504
505	tlb_end_vma(tlb, vma);
 
 
 
506
507	return pages;
508}
509
510unsigned long change_protection(struct mmu_gather *tlb,
511		       struct vm_area_struct *vma, unsigned long start,
512		       unsigned long end, pgprot_t newprot,
513		       unsigned long cp_flags)
514{
515	unsigned long pages;
516
517	BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
518
519	if (is_vm_hugetlb_page(vma))
520		pages = hugetlb_change_protection(vma, start, end, newprot,
521						  cp_flags);
522	else
523		pages = change_protection_range(tlb, vma, start, end, newprot,
524						cp_flags);
525
526	return pages;
527}
528
529static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
530			       unsigned long next, struct mm_walk *walk)
531{
532	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
533		0 : -EACCES;
534}
535
536static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
537				   unsigned long addr, unsigned long next,
538				   struct mm_walk *walk)
539{
540	return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
541		0 : -EACCES;
542}
543
544static int prot_none_test(unsigned long addr, unsigned long next,
545			  struct mm_walk *walk)
546{
547	return 0;
548}
549
550static const struct mm_walk_ops prot_none_walk_ops = {
551	.pte_entry		= prot_none_pte_entry,
552	.hugetlb_entry		= prot_none_hugetlb_entry,
553	.test_walk		= prot_none_test,
554};
555
556int
557mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
558	       struct vm_area_struct **pprev, unsigned long start,
559	       unsigned long end, unsigned long newflags)
560{
561	struct mm_struct *mm = vma->vm_mm;
562	unsigned long oldflags = vma->vm_flags;
563	long nrpages = (end - start) >> PAGE_SHIFT;
564	unsigned int mm_cp_flags = 0;
565	unsigned long charged = 0;
566	pgoff_t pgoff;
567	int error;
 
568
569	if (newflags == oldflags) {
570		*pprev = vma;
571		return 0;
572	}
573
574	/*
575	 * Do PROT_NONE PFN permission checks here when we can still
576	 * bail out without undoing a lot of state. This is a rather
577	 * uncommon case, so doesn't need to be very optimized.
578	 */
579	if (arch_has_pfn_modify_check() &&
580	    (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
581	    (newflags & VM_ACCESS_FLAGS) == 0) {
582		pgprot_t new_pgprot = vm_get_page_prot(newflags);
583
584		error = walk_page_range(current->mm, start, end,
585				&prot_none_walk_ops, &new_pgprot);
586		if (error)
587			return error;
588	}
589
590	/*
591	 * If we make a private mapping writable we increase our commit;
592	 * but (without finer accounting) cannot reduce our commit if we
593	 * make it unwritable again. hugetlb mapping were accounted for
594	 * even if read-only so there is no need to account for them here
595	 */
596	if (newflags & VM_WRITE) {
597		/* Check space limits when area turns into data. */
598		if (!may_expand_vm(mm, newflags, nrpages) &&
599				may_expand_vm(mm, oldflags, nrpages))
600			return -ENOMEM;
601		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
602						VM_SHARED|VM_NORESERVE))) {
603			charged = nrpages;
604			if (security_vm_enough_memory_mm(mm, charged))
605				return -ENOMEM;
606			newflags |= VM_ACCOUNT;
607		}
608	}
609
610	/*
611	 * First try to merge with previous and/or next vma.
612	 */
613	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
614	*pprev = vma_merge(mm, *pprev, start, end, newflags,
615			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
616			   vma->vm_userfaultfd_ctx, anon_vma_name(vma));
617	if (*pprev) {
618		vma = *pprev;
619		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
620		goto success;
621	}
622
623	*pprev = vma;
624
625	if (start != vma->vm_start) {
626		error = split_vma(mm, vma, start, 1);
627		if (error)
628			goto fail;
629	}
630
631	if (end != vma->vm_end) {
632		error = split_vma(mm, vma, end, 0);
633		if (error)
634			goto fail;
635	}
636
637success:
638	/*
639	 * vm_flags and vm_page_prot are protected by the mmap_lock
640	 * held in write mode.
641	 */
642	vma->vm_flags = newflags;
643	if (vma_wants_manual_pte_write_upgrade(vma))
644		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
645	vma_set_page_prot(vma);
646
647	change_protection(tlb, vma, start, end, vma->vm_page_prot, mm_cp_flags);
 
648
649	/*
650	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
651	 * fault on access.
652	 */
653	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
654			(newflags & VM_WRITE)) {
655		populate_vma_page_range(vma, start, end, NULL);
656	}
657
658	vm_stat_account(mm, oldflags, -nrpages);
659	vm_stat_account(mm, newflags, nrpages);
660	perf_event_mmap(vma);
661	return 0;
662
663fail:
664	vm_unacct_memory(charged);
665	return error;
666}
667
668/*
669 * pkey==-1 when doing a legacy mprotect()
670 */
671static int do_mprotect_pkey(unsigned long start, size_t len,
672		unsigned long prot, int pkey)
673{
674	unsigned long nstart, end, tmp, reqprot;
675	struct vm_area_struct *vma, *prev;
676	int error;
677	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
678	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
679				(prot & PROT_READ);
680	struct mmu_gather tlb;
681	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
682
683	start = untagged_addr(start);
684
685	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
686	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
687		return -EINVAL;
688
689	if (start & ~PAGE_MASK)
690		return -EINVAL;
691	if (!len)
692		return 0;
693	len = PAGE_ALIGN(len);
694	end = start + len;
695	if (end <= start)
696		return -ENOMEM;
697	if (!arch_validate_prot(prot, start))
698		return -EINVAL;
699
700	reqprot = prot;
701
702	if (mmap_write_lock_killable(current->mm))
703		return -EINTR;
704
705	/*
706	 * If userspace did not allocate the pkey, do not let
707	 * them use it here.
708	 */
709	error = -EINVAL;
710	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
711		goto out;
712
713	mas_set(&mas, start);
714	vma = mas_find(&mas, ULONG_MAX);
715	error = -ENOMEM;
716	if (!vma)
717		goto out;
718
719	if (unlikely(grows & PROT_GROWSDOWN)) {
720		if (vma->vm_start >= end)
721			goto out;
722		start = vma->vm_start;
723		error = -EINVAL;
724		if (!(vma->vm_flags & VM_GROWSDOWN))
725			goto out;
726	} else {
727		if (vma->vm_start > start)
728			goto out;
729		if (unlikely(grows & PROT_GROWSUP)) {
730			end = vma->vm_end;
731			error = -EINVAL;
732			if (!(vma->vm_flags & VM_GROWSUP))
733				goto out;
734		}
735	}
736
737	if (start > vma->vm_start)
738		prev = vma;
739	else
740		prev = mas_prev(&mas, 0);
741
742	tlb_gather_mmu(&tlb, current->mm);
743	for (nstart = start ; ; ) {
744		unsigned long mask_off_old_flags;
745		unsigned long newflags;
746		int new_vma_pkey;
747
748		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
749
750		/* Does the application expect PROT_READ to imply PROT_EXEC */
751		if (rier && (vma->vm_flags & VM_MAYEXEC))
752			prot |= PROT_EXEC;
753
754		/*
755		 * Each mprotect() call explicitly passes r/w/x permissions.
756		 * If a permission is not passed to mprotect(), it must be
757		 * cleared from the VMA.
758		 */
759		mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR;
 
760
761		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
762		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
763		newflags |= (vma->vm_flags & ~mask_off_old_flags);
764
765		/* newflags >> 4 shift VM_MAY% in place of VM_% */
766		if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) {
767			error = -EACCES;
768			break;
769		}
770
771		/* Allow architectures to sanity-check the new flags */
772		if (!arch_validate_flags(newflags)) {
773			error = -EINVAL;
774			break;
775		}
776
777		error = security_file_mprotect(vma, reqprot, prot);
778		if (error)
779			break;
780
781		tmp = vma->vm_end;
782		if (tmp > end)
783			tmp = end;
784
785		if (vma->vm_ops && vma->vm_ops->mprotect) {
786			error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
787			if (error)
788				break;
789		}
790
791		error = mprotect_fixup(&tlb, vma, &prev, nstart, tmp, newflags);
792		if (error)
793			break;
794
795		nstart = tmp;
796
797		if (nstart < prev->vm_end)
798			nstart = prev->vm_end;
799		if (nstart >= end)
800			break;
801
802		vma = find_vma(current->mm, prev->vm_end);
803		if (!vma || vma->vm_start != nstart) {
804			error = -ENOMEM;
805			break;
806		}
807		prot = reqprot;
808	}
809	tlb_finish_mmu(&tlb);
810out:
811	mmap_write_unlock(current->mm);
812	return error;
813}
814
815SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
816		unsigned long, prot)
817{
818	return do_mprotect_pkey(start, len, prot, -1);
819}
820
821#ifdef CONFIG_ARCH_HAS_PKEYS
822
823SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
824		unsigned long, prot, int, pkey)
825{
826	return do_mprotect_pkey(start, len, prot, pkey);
827}
828
829SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
830{
831	int pkey;
832	int ret;
833
834	/* No flags supported yet. */
835	if (flags)
836		return -EINVAL;
837	/* check for unsupported init values */
838	if (init_val & ~PKEY_ACCESS_MASK)
839		return -EINVAL;
840
841	mmap_write_lock(current->mm);
842	pkey = mm_pkey_alloc(current->mm);
843
844	ret = -ENOSPC;
845	if (pkey == -1)
846		goto out;
847
848	ret = arch_set_user_pkey_access(current, pkey, init_val);
849	if (ret) {
850		mm_pkey_free(current->mm, pkey);
851		goto out;
852	}
853	ret = pkey;
854out:
855	mmap_write_unlock(current->mm);
856	return ret;
857}
858
859SYSCALL_DEFINE1(pkey_free, int, pkey)
860{
861	int ret;
862
863	mmap_write_lock(current->mm);
864	ret = mm_pkey_free(current->mm, pkey);
865	mmap_write_unlock(current->mm);
866
867	/*
868	 * We could provide warnings or errors if any VMA still
869	 * has the pkey set here.
870	 */
871	return ret;
872}
873
874#endif /* CONFIG_ARCH_HAS_PKEYS */